
  <rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">
    <channel>
      <title>Starmorph AI Web Development Blog</title>
      <link>https://blog.starmorph.com/blog</link>
      <description>Artificial Intelligence and Web Development Blog.</description>
      <language>en-us</language>
      <managingEditor>dylan@starmorph.com (Dylan Boudro)</managingEditor>
      <webMaster>dylan@starmorph.com (Dylan Boudro)</webMaster>
      <lastBuildDate>Tue, 14 Apr 2026 10:00:00 GMT</lastBuildDate>
      <atom:link href="https://blog.starmorph.com/tags/fine-tuning/feed.xml" rel="self" type="application/rss+xml"/>
      
  <item>
    <guid>https://blog.starmorph.com/blog/how-llms-work-complete-technical-guide</guid>
    <title>How Large Language Models Work: The Complete Technical Guide to Transformers, Training, and Inference (2026)</title>
    <link>https://blog.starmorph.com/blog/how-llms-work-complete-technical-guide</link>
    <description>A deep technical guide to how LLMs actually work — from the transformer architecture and attention mechanism to tokenization, training at scale, KV caching, inference acceleration, fine-tuning, and the modern innovations powering GPT-4o, Claude, Llama 3, and beyond. Backed by 30+ research papers.</description>
    <pubDate>Tue, 14 Apr 2026 10:00:00 GMT</pubDate>
    <author>dylan@starmorph.com (Dylan Boudro)</author>
    <category>llm</category><category>transformers</category><category>machine-learning</category><category>ai</category><category>deep-learning</category><category>attention-mechanism</category><category>inference</category><category>fine-tuning</category>
  </item>

    </channel>
  </rss>
