{"version":1,"pages":[{"id":"SilL6NK0Ks0r3qBzCYAf","title":"Unsloth Docs","pathname":"/docs","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f9a5","description":"Unsloth is an open-source framework for running and training models.","breadcrumbs":[{"label":"Get Started"}]},{"id":"Du8yzXwHIEJYlMtjjyGw","title":"Fine-tuning for Beginners","pathname":"/docs/get-started/fine-tuning-for-beginners","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"2b50","description":"","breadcrumbs":[{"label":"Get Started"}]},{"id":"odJXZM9jv284RKqZ2Pna","title":"Unsloth Requirements","pathname":"/docs/get-started/fine-tuning-for-beginners/unsloth-requirements","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f6e0","description":"Here are Unsloth's requirements including system and GPU VRAM requirements.","breadcrumbs":[{"label":"Get Started"},{"label":"Fine-tuning for Beginners","emoji":"2b50"}]},{"id":"HP82bIzgldwxWk3OSzVy","title":"FAQ + Is Fine-tuning Right For Me?","pathname":"/docs/get-started/fine-tuning-for-beginners/faq-+-is-fine-tuning-right-for-me","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f914","description":"If you're stuck on if fine-tuning is right for you, see here! Learn about fine-tuning misconceptions, how it compared to RAG and more:","breadcrumbs":[{"label":"Get Started"},{"label":"Fine-tuning for Beginners","emoji":"2b50"}]},{"id":"bISOEydFwcVt8cnfyCfS","title":"Unsloth Notebooks","pathname":"/docs/get-started/unsloth-notebooks","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f4d2","description":"Fine-tuning notebooks: Explore the Unsloth catalog.","breadcrumbs":[{"label":"Get Started"}]},{"id":"nQlzs5BcvqlaEjhsgbtY","title":"Unsloth Model Catalog","pathname":"/docs/get-started/unsloth-model-catalog","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f52e","description":"","breadcrumbs":[{"label":"Get Started"}]},{"id":"WbSfE0ITQYsNqERZwnbZ","title":"Unsloth Installation","pathname":"/docs/get-started/install","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f4e5","description":"Learn to install Unsloth locally or online.","breadcrumbs":[{"label":"Get Started"}]},{"id":"LhZlVJv6yLKmWbNy4UmP","title":"Install Unsloth via pip and uv","pathname":"/docs/get-started/install/pip-install","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"desktop-arrow-down","description":"To install Unsloth locally via Pip, follow the steps below:","breadcrumbs":[{"label":"Get Started"},{"label":"Unsloth Installation","emoji":"1f4e5"}]},{"id":"nQJglux1e9VKfVL4F43M","title":"Install Unsloth on MacOS","pathname":"/docs/get-started/install/mac","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"apple","description":"","breadcrumbs":[{"label":"Get Started"},{"label":"Unsloth Installation","emoji":"1f4e5"}]},{"id":"Sv0QKHkAGvwTK47OKX2A","title":"How to Fine-Tune LLMs on Windows with Unsloth (Step-by-Step Guide)","pathname":"/docs/get-started/install/windows-installation","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"windows","description":"See how to install Unsloth on Windows to start fine-tuning LLMs locally.","breadcrumbs":[{"label":"Get Started"},{"label":"Unsloth Installation","emoji":"1f4e5"}]},{"id":"dZaYUyA34oYX3LotyGAB","title":"Install Unsloth via Docker","pathname":"/docs/get-started/install/docker","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"docker","description":"Install Unsloth using our official Docker container","breadcrumbs":[{"label":"Get Started"},{"label":"Unsloth Installation","emoji":"1f4e5"}]},{"id":"SQoCZEpSeGsxtIypEgup","title":"Updating Unsloth","pathname":"/docs/get-started/install/updating","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"arrow-rotate-right","description":"To update or use an old version of Unsloth, follow the steps below:","breadcrumbs":[{"label":"Get Started"},{"label":"Unsloth Installation","emoji":"1f4e5"}]},{"id":"GUxDmG8LaAiQinraCXCr","title":"Fine-tuning LLMs on AMD GPUs with Unsloth Guide","pathname":"/docs/get-started/install/amd","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"square-up-right","description":"Learn how to fine-tune large language models (LLMs) on AMD GPUs with Unsloth.","breadcrumbs":[{"label":"Get Started"},{"label":"Unsloth Installation","emoji":"1f4e5"}]},{"id":"FhVmcV9yU5zmKQvNYNb8","title":"Fine-tuning LLMs on Intel GPUs with Unsloth","pathname":"/docs/get-started/install/intel","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"info","description":"Learn how to train and fine-tune large language models on Intel GPUs.","breadcrumbs":[{"label":"Get Started"},{"label":"Unsloth Installation","emoji":"1f4e5"}]},{"id":"nw2c1elNySGBBav8WP9B","title":"Fine-tuning LLMs Guide","pathname":"/docs/get-started/fine-tuning-llms-guide","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f9ec","description":"Learn all the basics and best practices of fine-tuning. Beginner-friendly.","breadcrumbs":[{"label":"Get Started"}]},{"id":"XgcpRfamZHmHnRHBnVE4","title":"Datasets Guide","pathname":"/docs/get-started/fine-tuning-llms-guide/datasets-guide","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f4c8","description":"Learn how to create & prepare a dataset for fine-tuning.","breadcrumbs":[{"label":"Get Started"},{"label":"Fine-tuning LLMs Guide","emoji":"1f9ec"}]},{"id":"y6obKRSk8TwyjIrCjuGE","title":"LoRA fine-tuning Hyperparameters Guide","pathname":"/docs/get-started/fine-tuning-llms-guide/lora-hyperparameters-guide","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f9e0","description":"Learn step-by-step the best LLM fine-tuning settings - LoRA rank & alpha, epochs, batch size + gradient accumulation, QLoRA vs. LoRA, target modules, and more.","breadcrumbs":[{"label":"Get Started"},{"label":"Fine-tuning LLMs Guide","emoji":"1f9ec"}]},{"id":"BSShKhLoFNlGWO5cN8VJ","title":"What Model Should I Use for Fine-tuning?","pathname":"/docs/get-started/fine-tuning-llms-guide/what-model-should-i-use","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"2753","description":"","breadcrumbs":[{"label":"Get Started"},{"label":"Fine-tuning LLMs Guide","emoji":"1f9ec"}]},{"id":"cECKVbf1TpF5j7WC0riJ","title":"Tutorial: How to Finetune Llama-3 and Use In Ollama","pathname":"/docs/get-started/fine-tuning-llms-guide/tutorial-how-to-finetune-llama-3-and-use-in-ollama","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f999","description":"Beginner's Guide for creating a customized personal assistant (like ChatGPT) to run locally on Ollama","breadcrumbs":[{"label":"Get Started"},{"label":"Fine-tuning LLMs Guide","emoji":"1f9ec"}]},{"id":"vT6jTKG1LCfN7HoJ4fVR","title":"Reinforcement Learning (RL) Guide","pathname":"/docs/get-started/reinforcement-learning-rl-guide","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f4a1","description":"Learn all about Reinforcement Learning (RL) and how to train your own DeepSeek-R1 reasoning model with Unsloth using GRPO. A complete guide from beginner to advanced.","breadcrumbs":[{"label":"Get Started"}]},{"id":"JdGEInjVZuXH42Y6O2Kz","title":"Reinforcement Learning GRPO with 7x Longer Context","pathname":"/docs/get-started/reinforcement-learning-rl-guide/grpo-long-context","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f300","description":"Learn how Unsloth enables ultra long context RL fine-tuning.","breadcrumbs":[{"label":"Get Started"},{"label":"Reinforcement Learning (RL) Guide","emoji":"1f4a1"}]},{"id":"aV6S9cmDmSv5ky4ZCo5d","title":"Vision Reinforcement Learning (VLM RL)","pathname":"/docs/get-started/reinforcement-learning-rl-guide/vision-reinforcement-learning-vlm-rl","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f441-1f5e8","description":"Train Vision/multimodal models via GRPO and RL with Unsloth!","breadcrumbs":[{"label":"Get Started"},{"label":"Reinforcement Learning (RL) Guide","emoji":"1f4a1"}]},{"id":"pFyRT83vVFiXANdxamCs","title":"FP8 Reinforcement Learning","pathname":"/docs/get-started/reinforcement-learning-rl-guide/fp8-reinforcement-learning","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f3b1","description":"Train reinforcement learning (RL) and GRPO in FP8 precision with Unsloth.","breadcrumbs":[{"label":"Get Started"},{"label":"Reinforcement Learning (RL) Guide","emoji":"1f4a1"}]},{"id":"QbifW3DsTPYvRJcVkXYz","title":"Tutorial: Train your own Reasoning model with GRPO","pathname":"/docs/get-started/reinforcement-learning-rl-guide/tutorial-train-your-own-reasoning-model-with-grpo","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"26a1","description":"Beginner's Guide to transforming a model like Llama 3.1 (8B) into a reasoning model by using Unsloth and GRPO.","breadcrumbs":[{"label":"Get Started"},{"label":"Reinforcement Learning (RL) Guide","emoji":"1f4a1"}]},{"id":"P9PfsQ0BjnZuwPaXA93E","title":"Advanced Reinforcement Learning Documentation","pathname":"/docs/get-started/reinforcement-learning-rl-guide/advanced-rl-documentation","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f9e9","description":"Advanced documentation settings when using Unsloth with GRPO.","breadcrumbs":[{"label":"Get Started"},{"label":"Reinforcement Learning (RL) Guide","emoji":"1f4a1"}]},{"id":"FoDSrxpEqxCs9VqqDJpp","title":"GSPO Reinforcement Learning","pathname":"/docs/get-started/reinforcement-learning-rl-guide/advanced-rl-documentation/gspo-reinforcement-learning","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"lightbulb-on","description":"Train with GSPO (Group Sequence Policy Optimization) RL in Unsloth.","breadcrumbs":[{"label":"Get Started"},{"label":"Reinforcement Learning (RL) Guide","emoji":"1f4a1"},{"label":"Advanced Reinforcement Learning Documentation","emoji":"1f9e9"}]},{"id":"PVxTK4Eal3B77LmfhdRU","title":"RL Reward Hacking","pathname":"/docs/get-started/reinforcement-learning-rl-guide/advanced-rl-documentation/rl-reward-hacking","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"treasure-chest","description":"Learn what is Reward Hacking in Reinforcement Learning and how to counter it.","breadcrumbs":[{"label":"Get Started"},{"label":"Reinforcement Learning (RL) Guide","emoji":"1f4a1"},{"label":"Advanced Reinforcement Learning Documentation","emoji":"1f9e9"}]},{"id":"wQboNZf1ZtBJ9Qk4WQxb","title":"FP16 vs BF16 for RL","pathname":"/docs/get-started/reinforcement-learning-rl-guide/advanced-rl-documentation/fp16-vs-bf16-for-rl","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"2049","description":"Defeating the Training-Inference Mismatch via FP16 https://arxiv.org/pdf/2510.26788 shows how using float16 is better than bfloat16","breadcrumbs":[{"label":"Get Started"},{"label":"Reinforcement Learning (RL) Guide","emoji":"1f4a1"},{"label":"Advanced Reinforcement Learning Documentation","emoji":"1f9e9"}]},{"id":"VbqKpm1bqUCSPsLzKkhe","title":"Memory Efficient RL","pathname":"/docs/get-started/reinforcement-learning-rl-guide/memory-efficient-rl","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"memory","description":"","breadcrumbs":[{"label":"Get Started"},{"label":"Reinforcement Learning (RL) Guide","emoji":"1f4a1"}]},{"id":"dJcI8cUqSvuD1WuVj6qU","title":"Preference Optimization Training - DPO, ORPO & KTO","pathname":"/docs/get-started/reinforcement-learning-rl-guide/preference-dpo-orpo-and-kto","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f3c6","description":"Learn about preference alignment fine-tuning with DPO, GRPO, ORPO or KTO via Unsloth, follow the steps below:","breadcrumbs":[{"label":"Get Started"},{"label":"Reinforcement Learning (RL) Guide","emoji":"1f4a1"}]},{"id":"qyazJc8QbOQ0mtlu6uEv","title":"Introducing Unsloth Studio","pathname":"/docs/new/studio","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f9a5","description":"Run and train AI models locally with Unsloth Studio.","breadcrumbs":[{"label":"New"}]},{"id":"vrLQd9559vRkDY8zRR0h","title":"Get started with Unsloth Studio","pathname":"/docs/new/studio/start","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"bolt","description":"A guide for getting started with the fine-tuning studio, data recipes, model exporting, and chat.","breadcrumbs":[{"label":"New"},{"label":"Introducing Unsloth Studio","emoji":"1f9a5"}]},{"id":"FdMvLj95MbkAR4aHURvS","title":"How to Run models with Unsloth Studio","pathname":"/docs/new/studio/chat","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"comment-dots","description":"Run AI models, LLMs and GGUFs locally with Unsloth Studio.","breadcrumbs":[{"label":"New"},{"label":"Introducing Unsloth Studio","emoji":"1f9a5"}]},{"id":"XFZRr9F9hSOSIbG5lxqB","title":"Unsloth Studio Installation","pathname":"/docs/new/studio/install","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"arrow-down-to-square","description":"Learn how to install Unsloth Studio on your local device.","breadcrumbs":[{"label":"New"},{"label":"Introducing Unsloth Studio","emoji":"1f9a5"}]},{"id":"m9k4PLFmjpsAP6LsQt7u","title":"Unsloth Data Recipes","pathname":"/docs/new/studio/data-recipe","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"hat-chef","description":"Learn how to create, build and edit datasets with Unsloth Studio's Data Recipes.","breadcrumbs":[{"label":"New"},{"label":"Introducing Unsloth Studio","emoji":"1f9a5"}]},{"id":"5ZU2kPF2eJ7VK0GeEUhu","title":"Export models with Unsloth Studio","pathname":"/docs/new/studio/export","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"box-isometric","description":"Learn how to export your safetensor or LoRA model files to GGUF or other formats.","breadcrumbs":[{"label":"New"},{"label":"Introducing Unsloth Studio","emoji":"1f9a5"}]},{"id":"OktH76Rsg2WQ12B4KR5H","title":"Unsloth Updates","pathname":"/docs/new/changelog","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"sparkles","description":"Unsloth Changelog for our latest releases, improvements and fixes.","breadcrumbs":[{"label":"New"}]},{"id":"NpuhjPsxi8BKhuS8nnyY","title":"Qwen3.6 - How to Run Locally","pathname":"/docs/models/qwen3.6","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f49c","description":"Run the new Qwen3.6-27B and 35B-A3B models locally!","breadcrumbs":[{"label":"Models"}]},{"id":"VnmWq1kNppQrTqCI6aLH","title":"Gemma 4 - How to Run Locally","pathname":"/docs/models/gemma-4","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"2728","description":"Run Google’s new Gemma 4 models locally, including E2B, E4B, 26B A4B, and 31B.","breadcrumbs":[{"label":"Models"}]},{"id":"6iXghkDoe3jzknTq5aWx","title":"Gemma 4 Fine-tuning Guide","pathname":"/docs/models/gemma-4/train","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"flask-gear","description":"Train Gemma 4 by Google with Unsloth.","breadcrumbs":[{"label":"Models"},{"label":"Gemma 4 - How to Run Locally","emoji":"2728"}]},{"id":"GEABCCTb5KV7QKOeL4YY","title":"NVIDIA Nemotron 3 Nano Omni - How To Run Locally","pathname":"/docs/models/nemotron-3-nano-omni","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f9e9","description":"Run & fine-tune Nemotron-3-Nano-Omni-30B-A3B locally on your device!","breadcrumbs":[{"label":"Models"}]},{"id":"ftHgogOloVhCFawwwmFL","title":"Kimi K2.6 - How to Run Locally","pathname":"/docs/models/kimi-k2.6","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f95d","description":"Step-by-step guide to running Kimi-K2.6 on your own local device.","breadcrumbs":[{"label":"Models"}]},{"id":"JcwJOcoquFknfeDFxM7k","title":"Qwen3.5 - How to Run Locally","pathname":"/docs/models/qwen3.5","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f49c","description":"Run the new Qwen3.5 LLMs including Medium: Qwen3.5-35B-A3B, 27B, 122B-A10B, Small: Qwen3.5-0.8B, 2B, 4B, 9B and 397B-A17B on your local device!","breadcrumbs":[{"label":"Models"}]},{"id":"PzWNOGBEqMa4Xa1reosr","title":"Qwen3.5 Fine-tuning Guide","pathname":"/docs/models/qwen3.5/fine-tune","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"flask-gear","description":"Learn how to fine-tune Qwen3.5 LLMs with Unsloth.","breadcrumbs":[{"label":"Models"},{"label":"Qwen3.5 - How to Run Locally","emoji":"1f49c"}]},{"id":"jb9Bhr7e6quGvUmcIcZe","title":"Qwen3.5 GGUF Benchmarks","pathname":"/docs/models/qwen3.5/gguf-benchmarks","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"chart-fft","description":"See how Unsloth Dynamic GGUFs perform + analysis of perplexity, KL divergence & MXFP4.","breadcrumbs":[{"label":"Models"},{"label":"Qwen3.5 - How to Run Locally","emoji":"1f49c"}]},{"id":"HXtDvHW0zkulPTuirjRy","title":"GLM-5.1 - How to Run Locally","pathname":"/docs/models/glm-5.1","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"z","description":"Run the new GLM-5.1 model by Z.ai on your own local device!","breadcrumbs":[{"label":"Models"}]},{"id":"vVjfRn7mlHvHqCMpAlEA","title":"Qwen3-Coder-Next: How to Run Locally","pathname":"/docs/models/qwen3-coder-next","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f320","description":"Guide to run Qwen3-Coder-Next locally on your device!","breadcrumbs":[{"label":"Models"}]},{"id":"fHlQYhMbeUpdzyTKCzrD","title":"NVIDIA Nemotron 3 Nano - How To Run Guide","pathname":"/docs/models/nemotron-3","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f9e9","description":"Run & fine-tune NVIDIA Nemotron 3 Nano locally on your device!","breadcrumbs":[{"label":"Models"}]},{"id":"riDN57c9VQGIzLuWqsi1","title":"NVIDIA Nemotron-3-Super: How To Run Guide","pathname":"/docs/models/nemotron-3/nemotron-3-super","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f9e9","description":"Run & fine-tune NVIDIA Nemotron-3-Super-120B-A12B locally on your device!","breadcrumbs":[{"label":"Models"},{"label":"NVIDIA Nemotron 3 Nano - How To Run Guide","emoji":"1f9e9"}]},{"id":"NIBKTG6sPnvLvZmo4pDh","title":"gpt-oss: How to Run Guide","pathname":"/docs/models/gpt-oss-how-to-run-and-fine-tune","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"openai","description":"Run & fine-tune OpenAI's new open-source models!","breadcrumbs":[{"label":"Models"}]},{"id":"Yct0FY1C3kt9qMFVP0r4","title":"gpt-oss Reinforcement Learning","pathname":"/docs/models/gpt-oss-how-to-run-and-fine-tune/gpt-oss-reinforcement-learning","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"openai","description":"","breadcrumbs":[{"label":"Models"},{"label":"gpt-oss: How to Run Guide","icon":"openai"}]},{"id":"7QtQnAhNrlBbzxeoLFUj","title":"Tutorial: How to Train gpt-oss with RL","pathname":"/docs/models/gpt-oss-how-to-run-and-fine-tune/gpt-oss-reinforcement-learning/tutorial-how-to-train-gpt-oss-with-rl","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"book-open-reader","description":"Learn to train OpenAI gpt-oss with GRPO to autonomously beat 2048 locally or on Colab.","breadcrumbs":[{"label":"Models"},{"label":"gpt-oss: How to Run Guide","icon":"openai"},{"label":"gpt-oss Reinforcement Learning","icon":"openai"}]},{"id":"cYndCUtBWIAwmRZLGye8","title":"Tutorial: How to Fine-tune gpt-oss","pathname":"/docs/models/gpt-oss-how-to-run-and-fine-tune/tutorial-how-to-fine-tune-gpt-oss","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"openai","description":"Learn step-by-step how to train OpenAI gpt-oss locally with Unsloth.","breadcrumbs":[{"label":"Models"},{"label":"gpt-oss: How to Run Guide","icon":"openai"}]},{"id":"y6rLPF0QzRbsqStpQrU7","title":"Long Context gpt-oss Training","pathname":"/docs/models/gpt-oss-how-to-run-and-fine-tune/long-context-gpt-oss-training","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"openai","description":"","breadcrumbs":[{"label":"Models"},{"label":"gpt-oss: How to Run Guide","icon":"openai"}]},{"id":"BAeSP6aOxvSeDUzCgKOK","title":"Large language model (LLMs) Tutorials","pathname":"/docs/models/tutorials","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f680","description":"","breadcrumbs":[{"label":"Models"}]},{"id":"Omr0gGekk3zqhZtwQdmd","title":"Qwen3 - How to Run & Fine-tune","pathname":"/docs/models/tutorials/qwen3-how-to-run-and-fine-tune","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f320","description":"Learn to run & fine-tune Qwen3 locally with Unsloth + our Dynamic 2.0 quants","breadcrumbs":[{"label":"Models"},{"label":"Large language model (LLMs) Tutorials","emoji":"1f680"}]},{"id":"DM6MVuVKY1zKR9oL23wQ","title":"Qwen3-VL: How to Run Guide","pathname":"/docs/models/tutorials/qwen3-how-to-run-and-fine-tune/qwen3-vl-how-to-run-and-fine-tune","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f320","description":"Learn to fine-tune and run Qwen3-VL locally with Unsloth.","breadcrumbs":[{"label":"Models"},{"label":"Large language model (LLMs) Tutorials","emoji":"1f680"},{"label":"Qwen3 - How to Run & Fine-tune","emoji":"1f320"}]},{"id":"o5rHE4o7g4QZc09TMMpj","title":"Qwen3-2507: Run Locally Guide","pathname":"/docs/models/tutorials/qwen3-how-to-run-and-fine-tune/qwen3-2507","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f320","description":"Run Qwen3-30B-A3B-2507 and 235B-A22B Thinking and Instruct versions locally on your device!","breadcrumbs":[{"label":"Models"},{"label":"Large language model (LLMs) Tutorials","emoji":"1f680"},{"label":"Qwen3 - How to Run & Fine-tune","emoji":"1f320"}]},{"id":"vQSH3015yDj4lD5ssFIL","title":"MiniMax-M2.7 - How to Run Locally","pathname":"/docs/models/tutorials/minimax-m27","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"waveform","description":"Run MiniMax-M2.7 LLM locally on your own device!","breadcrumbs":[{"label":"Models"},{"label":"Large language model (LLMs) Tutorials","emoji":"1f680"}]},{"id":"euF2aUT7116RmwwAtw3R","title":"GLM-5: How to Run Locally Guide","pathname":"/docs/models/tutorials/glm-5","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"z","description":"Run the new GLM-5 model by Z.ai on your own local device!","breadcrumbs":[{"label":"Models"},{"label":"Large language model (LLMs) Tutorials","emoji":"1f680"}]},{"id":"pPNg7FOowDGW06Ii95o6","title":"Kimi K2.5: How to Run Locally Guide","pathname":"/docs/models/tutorials/kimi-k2.5","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f95d","description":"Guide on running Kimi-K2.5 on your own local device!","breadcrumbs":[{"label":"Models"},{"label":"Large language model (LLMs) Tutorials","emoji":"1f680"}]},{"id":"zWg6fYWpGDwkPXd58ReM","title":"GLM-4.7-Flash: How To Run Locally","pathname":"/docs/models/tutorials/glm-4.7-flash","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"z","description":"Run & fine-tune GLM-4.7-Flash locally on your device!","breadcrumbs":[{"label":"Models"},{"label":"Large language model (LLMs) Tutorials","emoji":"1f680"}]},{"id":"VgFqcc02RzjCTHLApvjO","title":"MiniMax-M2.5: How to Run Guide","pathname":"/docs/models/tutorials/minimax-m25","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"waveform","description":"Run MiniMax-M2.5 locally on your own device!","breadcrumbs":[{"label":"Models"},{"label":"Large language model (LLMs) Tutorials","emoji":"1f680"}]},{"id":"rxJa5vhJrnBcNUFsBLsJ","title":"Qwen3-Coder: How to Run Locally","pathname":"/docs/models/tutorials/qwen3-coder-how-to-run-locally","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f320","description":"Run Qwen3-Coder-30B-A3B-Instruct and 480B-A35B locally with Unsloth Dynamic quants.","breadcrumbs":[{"label":"Models"},{"label":"Large language model (LLMs) Tutorials","emoji":"1f680"}]},{"id":"U0cxXh4tBSRb2I1IZbW5","title":"Gemma 3 - How to Run Guide","pathname":"/docs/models/tutorials/gemma-3-how-to-run-and-fine-tune","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"google","description":"How to run Gemma 3 effectively with our GGUFs on llama.cpp, Ollama, Open WebUI and how to fine-tune with Unsloth!","breadcrumbs":[{"label":"Models"},{"label":"Large language model (LLMs) Tutorials","emoji":"1f680"}]},{"id":"TkprHPXbQktLaSKR2A3L","title":"Gemma 3n: How to Run & Fine-tune","pathname":"/docs/models/tutorials/gemma-3-how-to-run-and-fine-tune/gemma-3n-how-to-run-and-fine-tune","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"google","description":"Run Google's new Gemma 3n locally with Dynamic GGUFs on llama.cpp, Ollama, Open WebUI and fine-tune with Unsloth!","breadcrumbs":[{"label":"Models"},{"label":"Large language model (LLMs) Tutorials","emoji":"1f680"},{"label":"Gemma 3 - How to Run Guide","icon":"google"}]},{"id":"aQX8YMqzttGdCG0oWaHQ","title":"DeepSeek-OCR 2: How to Run & Fine-tune Guide","pathname":"/docs/models/tutorials/deepseek-ocr-2","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f433","description":"Guide on how to run and fine-tune DeepSeek-OCR-2 locally.","breadcrumbs":[{"label":"Models"},{"label":"Large language model (LLMs) Tutorials","emoji":"1f680"}]},{"id":"1wSv7BAqW26rePIFITZn","title":"GLM-4.7: How to Run Locally Guide","pathname":"/docs/models/tutorials/glm-4.7","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"z","description":"A guide on how to run Z.ai GLM-4.7 model on your own local device!","breadcrumbs":[{"label":"Models"},{"label":"Large language model (LLMs) Tutorials","emoji":"1f680"}]},{"id":"2jzPq5XdOzNHmR7wDnnQ","title":"How to Run Qwen-Image-2512 Locally in ComfyUI","pathname":"/docs/models/tutorials/qwen-image-2512","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f49f","description":"Step-by-step tutorial for running Qwen-Image-2512 on your local device with ComfyUI.","breadcrumbs":[{"label":"Models"},{"label":"Large language model (LLMs) Tutorials","emoji":"1f680"}]},{"id":"sUIzzNUN3nVoZ7bi99AS","title":"Run Qwen-Image-2512 in stable-diffusion.cpp Tutorial","pathname":"/docs/models/tutorials/qwen-image-2512/stable-diffusion.cpp","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f3a8","description":"Tutorial for using Qwen-Image-2512 in stable-diffusion.cpp.","breadcrumbs":[{"label":"Models"},{"label":"Large language model (LLMs) Tutorials","emoji":"1f680"},{"label":"How to Run Qwen-Image-2512 Locally in ComfyUI","emoji":"1f49f"}]},{"id":"3Liotgx1T4MeF1J8CO0m","title":"Devstral 2 - How to Run Guide","pathname":"/docs/models/tutorials/devstral-2","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f4d9","description":"Guide for local running Mistral Devstral 2 models: 123B-Instruct-2512 and Small-2-24B-Instruct-2512.","breadcrumbs":[{"label":"Models"},{"label":"Large language model (LLMs) Tutorials","emoji":"1f680"}]},{"id":"zRbjQXuLmfdZD90U410W","title":"Ministral 3 - How to Run Guide","pathname":"/docs/models/tutorials/ministral-3","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f431","description":"Guide for Mistral Ministral 3 models, to run or fine-tune locally on your device","breadcrumbs":[{"label":"Models"},{"label":"Large language model (LLMs) Tutorials","emoji":"1f680"}]},{"id":"ckxEaylNcpFEtJmmgkoU","title":"DeepSeek-OCR: How to Run & Fine-tune","pathname":"/docs/models/tutorials/deepseek-ocr-how-to-run-and-fine-tune","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f433","description":"Guide on how to run and fine-tune DeepSeek-OCR locally.","breadcrumbs":[{"label":"Models"},{"label":"Large language model (LLMs) Tutorials","emoji":"1f680"}]},{"id":"fLpotmQAWm06ZmVkliIj","title":"Kimi K2 Thinking: Run Locally Guide","pathname":"/docs/models/tutorials/kimi-k2-thinking-how-to-run-locally","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f319","description":"Guide on running Kimi-K2-Thinking and Kimi-K2 on your own local device!","breadcrumbs":[{"label":"Models"},{"label":"Large language model (LLMs) Tutorials","emoji":"1f680"}]},{"id":"kubJWq6dZSW06gdjy3QO","title":"GLM-4.6: Run Locally Guide","pathname":"/docs/models/tutorials/glm-4.6-how-to-run-locally","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"z","description":"A guide on how to run Z.ai GLM-4.6 and GLM-4.6V-Flash model on your own local device!","breadcrumbs":[{"label":"Models"},{"label":"Large language model (LLMs) Tutorials","emoji":"1f680"}]},{"id":"cUiTofDNgkP12VQLa9cl","title":"Qwen3-Next: Run Locally Guide","pathname":"/docs/models/tutorials/qwen3-next","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f320","description":"Run Qwen3-Next-80B-A3B-Instruct and Thinking versions locally on your device!","breadcrumbs":[{"label":"Models"},{"label":"Large language model (LLMs) Tutorials","emoji":"1f680"}]},{"id":"J4GtBjM0f3vuIwoSfnHy","title":"FunctionGemma: How to Run & Fine-tune","pathname":"/docs/models/tutorials/functiongemma","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"google","description":"Learn how to run and fine-tune FunctionGemma locally on your device and phone.","breadcrumbs":[{"label":"Models"},{"label":"Large language model (LLMs) Tutorials","emoji":"1f680"}]},{"id":"eK0BfjMHNrfvfe4HI6Pk","title":"DeepSeek-V3.1: How to Run Locally","pathname":"/docs/models/tutorials/deepseek-v3.1-how-to-run-locally","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f40b","description":"A guide on how to run DeepSeek-V3.1 and Terminus on your own local device!","breadcrumbs":[{"label":"Models"},{"label":"Large language model (LLMs) Tutorials","emoji":"1f680"}]},{"id":"Ji5D1Y22Eu0ZtCpXU9ja","title":"DeepSeek-R1-0528: How to Run Locally","pathname":"/docs/models/tutorials/deepseek-r1-0528-how-to-run-locally","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f40b","description":"A guide on how to run DeepSeek-R1-0528 including Qwen3 on your own local device!","breadcrumbs":[{"label":"Models"},{"label":"Large language model (LLMs) Tutorials","emoji":"1f680"}]},{"id":"CixcWjfA7MpVVdwbzTmg","title":"Liquid LFM2.5: How To Run & Fine-tune","pathname":"/docs/models/tutorials/lfm2.5","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f4a7","description":"Run and fine-tune LFM2.5 Instruct and Vision locally on your device!","breadcrumbs":[{"label":"Models"},{"label":"Large language model (LLMs) Tutorials","emoji":"1f680"}]},{"id":"WYCU1j3i1h4Z2y63PJn9","title":"Magistral: How to Run & Fine-tune","pathname":"/docs/models/tutorials/magistral-how-to-run-and-fine-tune","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f4a5","description":"Meet Magistral - Mistral's new reasoning models.","breadcrumbs":[{"label":"Models"},{"label":"Large language model (LLMs) Tutorials","emoji":"1f680"}]},{"id":"A6Kc1s4GOsHXR07kaEqe","title":"IBM Granite 4.0","pathname":"/docs/models/tutorials/ibm-granite-4.0","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"cube","description":"How to run IBM Granite-4.0 with Unsloth GGUFs on llama.cpp, Ollama and how to fine-tune!","breadcrumbs":[{"label":"Models"},{"label":"Large language model (LLMs) Tutorials","emoji":"1f680"}]},{"id":"r2sc8WhniElY6ZuO8rZ9","title":"Llama 4: How to Run & Fine-tune","pathname":"/docs/models/tutorials/llama-4-how-to-run-and-fine-tune","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f999","description":"How to run Llama 4 locally using our dynamic GGUFs which recovers accuracy compared to standard quantization.","breadcrumbs":[{"label":"Models"},{"label":"Large language model (LLMs) Tutorials","emoji":"1f680"}]},{"id":"efAr1xJJgeUlT0oMibEg","title":"Grok 2","pathname":"/docs/models/tutorials/grok-2","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"square-x-twitter","description":"Run xAI's Grok 2 model locally!","breadcrumbs":[{"label":"Models"},{"label":"Large language model (LLMs) Tutorials","emoji":"1f680"}]},{"id":"haJMAru7E9BKIjoJvoam","title":"Devstral: How to Run & Fine-tune","pathname":"/docs/models/tutorials/devstral-how-to-run-and-fine-tune","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f4d9","description":"Run and fine-tune Mistral Devstral 1.1, including Small-2507 and 2505.","breadcrumbs":[{"label":"Models"},{"label":"Large language model (LLMs) Tutorials","emoji":"1f680"}]},{"id":"tYzsGxBMlN0JKeaQktNs","title":"How to Run Local LLMs with Docker: Step-by-Step Guide","pathname":"/docs/models/tutorials/how-to-run-llms-with-docker","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"docker","description":"Learn how to run Large Language Models (LLMs) with Docker & Unsloth on your local device.","breadcrumbs":[{"label":"Models"},{"label":"Large language model (LLMs) Tutorials","emoji":"1f680"}]},{"id":"hGkhBczYQEpW4XkRzlks","title":"DeepSeek-V3-0324: How to Run Locally","pathname":"/docs/models/tutorials/deepseek-v3-0324-how-to-run-locally","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f433","description":"How to run DeepSeek-V3-0324 locally using our dynamic quants which recovers accuracy","breadcrumbs":[{"label":"Models"},{"label":"Large language model (LLMs) Tutorials","emoji":"1f680"}]},{"id":"9a6TzBKnfHALYRRFfeNU","title":"DeepSeek-R1: How to Run Locally","pathname":"/docs/models/tutorials/deepseek-r1-how-to-run-locally","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f40b","description":"A guide on how you can run our 1.58-bit Dynamic Quants for DeepSeek-R1 using llama.cpp.","breadcrumbs":[{"label":"Models"},{"label":"Large language model (LLMs) Tutorials","emoji":"1f680"}]},{"id":"NS98qKy46bSVzgzTOyzG","title":"DeepSeek-R1 Dynamic 1.58-bit","pathname":"/docs/models/tutorials/deepseek-r1-how-to-run-locally/deepseek-r1-dynamic-1.58-bit","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f433","description":"See performance comparison tables for Unsloth's Dynamic GGUF Quants vs Standard IMatrix Quants.","breadcrumbs":[{"label":"Models"},{"label":"Large language model (LLMs) Tutorials","emoji":"1f680"},{"label":"DeepSeek-R1: How to Run Locally","emoji":"1f40b"}]},{"id":"zLTUUiWU5VchfMnYKD44","title":"Phi-4 Reasoning: How to Run & Fine-tune","pathname":"/docs/models/tutorials/phi-4-reasoning-how-to-run-and-fine-tune","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"windows","description":"Learn to run & fine-tune Phi-4 reasoning models locally with Unsloth + our Dynamic 2.0 quants","breadcrumbs":[{"label":"Models"},{"label":"Large language model (LLMs) Tutorials","emoji":"1f680"}]},{"id":"yWydsLIpPKmwwG1WDiKx","title":"QwQ-32B: How to Run effectively","pathname":"/docs/models/tutorials/qwq-32b-how-to-run-effectively","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f320","description":"How to run QwQ-32B effectively with our bug fixes and without endless generations + GGUFs.","breadcrumbs":[{"label":"Models"},{"label":"Large language model (LLMs) Tutorials","emoji":"1f680"}]},{"id":"gEugERiAw2ztDNt98JVR","title":"Inference & Deployment","pathname":"/docs/basics/inference-and-deployment","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f5a5","description":"Learn how to save your finetuned model so you can run it in your favorite inference engine.","breadcrumbs":[{"label":"Basics"}]},{"id":"T7ZPf3SNAwDykZNgXptE","title":"Saving to GGUF","pathname":"/docs/basics/inference-and-deployment/saving-to-gguf","siteSpaceId":"sitesp_VHa4A","lang":"en","description":"Saving models to 16bit for GGUF so you can use it for Ollama, Jan AI, Open WebUI and more!","breadcrumbs":[{"label":"Basics"},{"label":"Inference & Deployment","emoji":"1f5a5"}]},{"id":"KN9fwufcUfjR8cqPfQA4","title":"Speculative Decoding","pathname":"/docs/basics/inference-and-deployment/saving-to-gguf/speculative-decoding","siteSpaceId":"sitesp_VHa4A","lang":"en","description":"Speculative Decoding with llama-server, llama.cpp, vLLM and more for 2x faster inference","breadcrumbs":[{"label":"Basics"},{"label":"Inference & Deployment","emoji":"1f5a5"},{"label":"Saving to GGUF"}]},{"id":"fhJtaLFFXVsGnbMUiACo","title":"vLLM Deployment & Inference Guide","pathname":"/docs/basics/inference-and-deployment/vllm-guide","siteSpaceId":"sitesp_VHa4A","lang":"en","description":"Guide on saving and deploying LLMs to vLLM for serving LLMs in production","breadcrumbs":[{"label":"Basics"},{"label":"Inference & Deployment","emoji":"1f5a5"}]},{"id":"T8vAb3VMIaDyIUlVtrdK","title":"vLLM Engine Arguments","pathname":"/docs/basics/inference-and-deployment/vllm-guide/vllm-engine-arguments","siteSpaceId":"sitesp_VHa4A","lang":"en","description":"","breadcrumbs":[{"label":"Basics"},{"label":"Inference & Deployment","emoji":"1f5a5"},{"label":"vLLM Deployment & Inference Guide"}]},{"id":"mp9Evu7eg8kdy0IfITHu","title":"LoRA Hot Swapping Guide","pathname":"/docs/basics/inference-and-deployment/vllm-guide/lora-hot-swapping-guide","siteSpaceId":"sitesp_VHa4A","lang":"en","description":"","breadcrumbs":[{"label":"Basics"},{"label":"Inference & Deployment","emoji":"1f5a5"},{"label":"vLLM Deployment & Inference Guide"}]},{"id":"8UQUlu6UU8hhx3FiWc5B","title":"Saving to Ollama","pathname":"/docs/basics/inference-and-deployment/saving-to-ollama","siteSpaceId":"sitesp_VHa4A","lang":"en","description":"","breadcrumbs":[{"label":"Basics"},{"label":"Inference & Deployment","emoji":"1f5a5"}]},{"id":"nCFNF4eDqHqapNM3eEqt","title":"Deploying models to LM Studio","pathname":"/docs/basics/inference-and-deployment/lm-studio","siteSpaceId":"sitesp_VHa4A","lang":"en","description":"Saving models to GGUF so you can run and deploy them to LM Studio","breadcrumbs":[{"label":"Basics"},{"label":"Inference & Deployment","emoji":"1f5a5"}]},{"id":"LfriJ46uZOQRLXZiBgXw","title":"How to install LM Studio CLI in Linux Terminal","pathname":"/docs/basics/inference-and-deployment/lm-studio/how-to-install-lm-studio-cli-in-linux-terminal","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f47e","description":"LM Studio CLI installation guide without a UI in a terminal instance.","breadcrumbs":[{"label":"Basics"},{"label":"Inference & Deployment","emoji":"1f5a5"},{"label":"Deploying models to LM Studio"}]},{"id":"WehjgbuawqCXogREXvGG","title":"SGLang Deployment & Inference Guide","pathname":"/docs/basics/inference-and-deployment/sglang-guide","siteSpaceId":"sitesp_VHa4A","lang":"en","description":"Guide on saving and deploying LLMs to SGLang for serving LLMs in production","breadcrumbs":[{"label":"Basics"},{"label":"Inference & Deployment","emoji":"1f5a5"}]},{"id":"wqYCVI9bC4YRS7jlwWhR","title":"llama-server & OpenAI endpoint Deployment Guide","pathname":"/docs/basics/inference-and-deployment/llama-server-and-openai-endpoint","siteSpaceId":"sitesp_VHa4A","lang":"en","description":"Deploying via llama-server with an OpenAI compatible endpoint","breadcrumbs":[{"label":"Basics"},{"label":"Inference & Deployment","emoji":"1f5a5"}]},{"id":"KGLcoYbGfxvjuMoQ3HrB","title":"How to Run and Deploy LLMs on your iOS or Android Phone","pathname":"/docs/basics/inference-and-deployment/deploy-llms-phone","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f4f1","description":"Tutorial for fine-tuning your own LLM and deploying it on your Android or iPhone with ExecuTorch.","breadcrumbs":[{"label":"Basics"},{"label":"Inference & Deployment","emoji":"1f5a5"}]},{"id":"e09iCJirEAJOrGDlyLre","title":"Troubleshooting Inference","pathname":"/docs/basics/inference-and-deployment/troubleshooting-inference","siteSpaceId":"sitesp_VHa4A","lang":"en","description":"If you're experiencing issues when running or saving your model.","breadcrumbs":[{"label":"Basics"},{"label":"Inference & Deployment","emoji":"1f5a5"}]},{"id":"w020xJgdCTBtTvfHtvye","title":"How to Run Local LLMs with Claude Code","pathname":"/docs/basics/claude-code","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"claude","description":"Guide to use open models with Claude Code on your local device.","breadcrumbs":[{"label":"Basics"}]},{"id":"PCjZ57h5pE0QccKyJMYD","title":"How to Run Local LLMs with OpenAI Codex","pathname":"/docs/basics/codex","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"openai","description":"Use open models with OpenAI Codex on your device locally.","breadcrumbs":[{"label":"Basics"}]},{"id":"VTpEwCPKRuHGuVtz9ajd","title":"Multi-GPU Fine-tuning with Unsloth","pathname":"/docs/basics/multi-gpu-training-with-unsloth","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"rectangle-history","description":"Learn how to fine-tune LLMs on multiple GPUs and parallelism with Unsloth.","breadcrumbs":[{"label":"Basics"}]},{"id":"6aAeKwk7YLLgpBJR9JTv","title":"Multi-GPU Fine-tuning with Distributed Data Parallel (DDP)","pathname":"/docs/basics/multi-gpu-training-with-unsloth/ddp","siteSpaceId":"sitesp_VHa4A","lang":"en","description":"Learn how to use the Unsloth CLI to train on multiple GPUs with Distributed Data Parallel (DDP)!","breadcrumbs":[{"label":"Basics"},{"label":"Multi-GPU Fine-tuning with Unsloth","icon":"rectangle-history"}]},{"id":"VlJjz852gxI14pEHjByu","title":"Fine-tuning Embedding Models with Unsloth Guide","pathname":"/docs/basics/embedding-finetuning","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f50e","description":"Learn how to easily fine-tune embedding models with Unsloth.","breadcrumbs":[{"label":"Basics"}]},{"id":"QLaEGK4QyFj4hjb4Cjdn","title":"Fine-tune MoE Models 12x Faster with Unsloth","pathname":"/docs/basics/faster-moe","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f48e","description":"Train MoE LLMs locally using Unsloth Guide.","breadcrumbs":[{"label":"Basics"}]},{"id":"bnULxAhnvp7EPaivWLiP","title":"Text-to-Speech (TTS) Fine-tuning Guide","pathname":"/docs/basics/text-to-speech-tts-fine-tuning","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f50a","description":"Learn how to to fine-tune TTS & STT voice models with Unsloth.","breadcrumbs":[{"label":"Basics"}]},{"id":"QznsvWxKKvrY6PdiByzz","title":"Unsloth Dynamic 2.0 GGUFs","pathname":"/docs/basics/unsloth-dynamic-2.0-ggufs","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f9a5","description":"A big new upgrade to our Dynamic Quants!","breadcrumbs":[{"label":"Basics"}]},{"id":"jiaMFU5NqiW6tuZHwXPV","title":"Unsloth Dynamic GGUFs on Aider Polyglot","pathname":"/docs/basics/unsloth-dynamic-2.0-ggufs/unsloth-dynamic-ggufs-on-aider-polyglot","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f9a5","description":"Performance of Unsloth Dynamic GGUFs on Aider Polyglot Benchmarks","breadcrumbs":[{"label":"Basics"},{"label":"Unsloth Dynamic 2.0 GGUFs","emoji":"1f9a5"}]},{"id":"pEl4DasFmbHe97o5vK4R","title":"Tool Calling Guide for Local LLMs","pathname":"/docs/basics/tool-calling-guide-for-local-llms","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"screwdriver-wrench","description":"","breadcrumbs":[{"label":"Basics"}]},{"id":"Zo93wHRqGnzGRE62C395","title":"Vision Fine-tuning","pathname":"/docs/basics/vision-fine-tuning","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f441","description":"Learn how to fine-tune vision/multimodal LLMs with Unsloth","breadcrumbs":[{"label":"Basics"}]},{"id":"YCMcHSSIKR38pPhdq87W","title":"Troubleshooting & FAQs","pathname":"/docs/basics/troubleshooting-and-faqs","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"26a0","description":"Tips to solve issues, and frequently asked questions.","breadcrumbs":[{"label":"Basics"}]},{"id":"oJpfeo6SXe5svovA0sP6","title":"Hugging Face Hub, XET debugging","pathname":"/docs/basics/troubleshooting-and-faqs/hugging-face-hub-xet-debugging","siteSpaceId":"sitesp_VHa4A","lang":"en","description":"Debugging, troubleshooting stalled, stuck downloads and slow downloads","breadcrumbs":[{"label":"Basics"},{"label":"Troubleshooting & FAQs","emoji":"26a0"}]},{"id":"kuRwnzkO5NvMH5METpT8","title":"Chat Templates","pathname":"/docs/basics/chat-templates","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f4ac","description":"Learn the fundamentals and customization options of chat templates, including Conversational, ChatML, ShareGPT, Alpaca formats, and more!","breadcrumbs":[{"label":"Basics"}]},{"id":"IklKRZMgbD798u0IJAQR","title":"Unsloth Environment Flags","pathname":"/docs/basics/unsloth-environment-flags","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f6e0","description":"Advanced flags which might be useful if you see breaking finetunes, or you want to turn stuff off.","breadcrumbs":[{"label":"Basics"}]},{"id":"8LEeKIVgEgdecg1B7Ahm","title":"Continued Pretraining","pathname":"/docs/basics/continued-pretraining","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"267b","description":"AKA as Continued Finetuning. Unsloth allows you to continually pretrain so a model can learn a new language.","breadcrumbs":[{"label":"Basics"}]},{"id":"IqlfM9k57UJVtsZ2xtRu","title":"Finetuning from Last Checkpoint","pathname":"/docs/basics/finetuning-from-last-checkpoint","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f3c1","description":"Checkpointing allows you to save your finetuning progress so you can pause it and then continue.","breadcrumbs":[{"label":"Basics"}]},{"id":"C6nlcNfm92CKGlHjZq6A","title":"Unsloth Benchmarks","pathname":"/docs/basics/unsloth-benchmarks","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"1f4ca","description":"Unsloth recorded benchmarks on NVIDIA GPUs.","breadcrumbs":[{"label":"Basics"}]},{"id":"zb5JIG4b4Jhv2FaREe5I","title":"3x Faster LLM Training with Unsloth Kernels + Packing","pathname":"/docs/blog/3x-faster-training-packing","siteSpaceId":"sitesp_VHa4A","lang":"en","emoji":"26a1","description":"Learn how Unsloth increases training throughput and eliminates padding waste for fine-tuning.","breadcrumbs":[{"label":"Blog"}]},{"id":"bNA4JFPPNIXhfmQY7JfQ","title":"500K Context Length Fine-tuning","pathname":"/docs/blog/500k-context-length-fine-tuning","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"ruler-combined","description":"Learn how to enable >500K token context window fine-tuning with Unsloth.","breadcrumbs":[{"label":"Blog"}]},{"id":"OukZvlYQT7UIMT2ULKav","title":"Quantization-Aware Training (QAT)","pathname":"/docs/blog/quantization-aware-training-qat","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"down-left-and-up-right-to-center","description":"Quantize models to 4-bit with Unsloth and PyTorch to recover accuracy.","breadcrumbs":[{"label":"Blog"}]},{"id":"g1UMvrOXMWVheqG9egap","title":"Fine-Tuning LLMs on NVIDIA DGX Station with Unsloth","pathname":"/docs/blog/dgx-station","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"microchip-ai","description":"NVIDIA DGX Station tutorial on how to fine-tune with notebooks from Unsloth.","breadcrumbs":[{"label":"Blog"}]},{"id":"cP88TERn3hrjTC46YULf","title":"How to Fine-tune LLMs with Unsloth & Docker","pathname":"/docs/blog/how-to-fine-tune-llms-with-unsloth-and-docker","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"docker","description":"Learn how to fine-tune LLMs or do Reinforcement Learning (RL) with Unsloth's Docker image.","breadcrumbs":[{"label":"Blog"}]},{"id":"SP7tOaHurV8iKXitTy2O","title":"Fine-tuning LLMs with NVIDIA DGX Spark and Unsloth","pathname":"/docs/blog/fine-tuning-llms-with-nvidia-dgx-spark-and-unsloth","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"sparkle","description":"Tutorial on how to fine-tune and do reinforcement learning (RL) with OpenAI gpt-oss on NVIDIA DGX Spark.","breadcrumbs":[{"label":"Blog"}]},{"id":"FmZGHW74OKsd4nalCidC","title":"Fine-tuning LLMs with Blackwell, RTX 50 series & Unsloth","pathname":"/docs/blog/fine-tuning-llms-with-blackwell-rtx-50-series-and-unsloth","siteSpaceId":"sitesp_VHa4A","lang":"en","icon":"microchip","description":"Learn how to fine-tune LLMs on NVIDIA's Blackwell RTX 50 series and B200 GPUs with our step-by-step guide.","breadcrumbs":[{"label":"Blog"}]},{"id":"d223e1d473d91b0a823446588fad0295a03dee2e","title":"Unsloth 文档","pathname":"/docs/zh","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f9a5","description":"Unsloth 是一个用于运行和训练模型的开源框架。","breadcrumbs":[{"label":"开始使用"}]},{"id":"237026badf5bc822cff5a01118fb7de3da2e2153","title":"面向初学者的微调","pathname":"/docs/zh/kai-shi-shi-yong/fine-tuning-for-beginners","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"2b50","description":"","breadcrumbs":[{"label":"开始使用"}]},{"id":"fad5753068629237f814fd7a2737bf5277187b58","title":"Unsloth 需求","pathname":"/docs/zh/kai-shi-shi-yong/fine-tuning-for-beginners/unsloth-requirements","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f6e0","description":"这里列出了 Unsloth 的要求，包括系统和 GPU VRAM 要求。","breadcrumbs":[{"label":"开始使用"},{"label":"面向初学者的微调","emoji":"2b50"}]},{"id":"23f746d97a493793f0cf778956c3d1e708c15940","title":"常见问题 + 微调适合我吗？","pathname":"/docs/zh/kai-shi-shi-yong/fine-tuning-for-beginners/faq-+-is-fine-tuning-right-for-me","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f914","description":"如果你还在犹豫微调是否适合你，请看这里！了解微调误区、它与 RAG 的比较等内容：","breadcrumbs":[{"label":"开始使用"},{"label":"面向初学者的微调","emoji":"2b50"}]},{"id":"622af8e56d0924dd99cc871bc54a538197574e1a","title":"Unsloth 笔记本","pathname":"/docs/zh/kai-shi-shi-yong/unsloth-notebooks","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f4d2","description":"微调笔记本：浏览 Unsloth 目录。","breadcrumbs":[{"label":"开始使用"}]},{"id":"20805f6881460e7c3a088cff24acf0f1090f1984","title":"Unsloth 模型目录","pathname":"/docs/zh/kai-shi-shi-yong/unsloth-model-catalog","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f52e","description":"","breadcrumbs":[{"label":"开始使用"}]},{"id":"32a7ce415cf3da2e50fd777381735610343fea45","title":"Unsloth 安装","pathname":"/docs/zh/kai-shi-shi-yong/install","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f4e5","description":"了解如何在本地或在线安装 Unsloth。","breadcrumbs":[{"label":"开始使用"}]},{"id":"4597301c2f6c6df2660ec569f5a1d9e8481eb494","title":"通过 pip 和 uv 安装 Unsloth","pathname":"/docs/zh/kai-shi-shi-yong/install/pip-install","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"desktop-arrow-down","description":"要通过 Pip 在本地安装 Unsloth，请按照以下步骤操作：","breadcrumbs":[{"label":"开始使用"},{"label":"Unsloth 安装","emoji":"1f4e5"}]},{"id":"9e9449bc388de91ddda522975936cef81b928bce","title":"在 MacOS 上安装 Unsloth","pathname":"/docs/zh/kai-shi-shi-yong/install/mac","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"apple","description":"","breadcrumbs":[{"label":"开始使用"},{"label":"Unsloth 安装","emoji":"1f4e5"}]},{"id":"bc3c53c3e0f87180567dda04ad25f8d2b13374ce","title":"如何使用 Unsloth 在 Windows 上微调 LLM（分步指南）","pathname":"/docs/zh/kai-shi-shi-yong/install/windows-installation","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"windows","description":"了解如何在 Windows 上安装 Unsloth，以便开始在本地微调 LLM。","breadcrumbs":[{"label":"开始使用"},{"label":"Unsloth 安装","emoji":"1f4e5"}]},{"id":"2143ac5379e70861b75a6ff27ff09a4a4c8034c5","title":"通过 Docker 安装 Unsloth","pathname":"/docs/zh/kai-shi-shi-yong/install/docker","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"docker","description":"使用我们的官方 Docker 容器安装 Unsloth","breadcrumbs":[{"label":"开始使用"},{"label":"Unsloth 安装","emoji":"1f4e5"}]},{"id":"754d57d7c027164b0f10301137e5ddd66e802e42","title":"更新 Unsloth","pathname":"/docs/zh/kai-shi-shi-yong/install/updating","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"arrow-rotate-right","description":"要更新或使用旧版本的 Unsloth，请按照以下步骤操作：","breadcrumbs":[{"label":"开始使用"},{"label":"Unsloth 安装","emoji":"1f4e5"}]},{"id":"58db76de3e4da663680e51bb3a7f48ee9575ee07","title":"使用 Unsloth 在 AMD GPU 上微调 LLM 指南","pathname":"/docs/zh/kai-shi-shi-yong/install/amd","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"square-up-right","description":"了解如何使用 Unsloth 在 AMD GPU 上微调大型语言模型（LLM）。","breadcrumbs":[{"label":"开始使用"},{"label":"Unsloth 安装","emoji":"1f4e5"}]},{"id":"fb0bfd00002552fa929669fb13c5aafab2f274d1","title":"使用 Unsloth 在 Intel GPU 上微调 LLM","pathname":"/docs/zh/kai-shi-shi-yong/install/intel","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"info","description":"了解如何在 Intel GPU 上训练和微调大型语言模型。","breadcrumbs":[{"label":"开始使用"},{"label":"Unsloth 安装","emoji":"1f4e5"}]},{"id":"e722e86e330786e5915445b91f900d9f9e0ba067","title":"LLM 微调指南","pathname":"/docs/zh/kai-shi-shi-yong/fine-tuning-llms-guide","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f9ec","description":"了解微调的所有基础知识和最佳实践。适合初学者。","breadcrumbs":[{"label":"开始使用"}]},{"id":"e3d8f41874867d09291eadcea96a34bf92b096a7","title":"数据集指南","pathname":"/docs/zh/kai-shi-shi-yong/fine-tuning-llms-guide/datasets-guide","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f4c8","description":"了解如何为微调创建和准备数据集。","breadcrumbs":[{"label":"开始使用"},{"label":"LLM 微调指南","emoji":"1f9ec"}]},{"id":"0c67f9d3a44c115b4fda319de5774cc3513ea3a3","title":"LoRA 微调超参数指南","pathname":"/docs/zh/kai-shi-shi-yong/fine-tuning-llms-guide/lora-hyperparameters-guide","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f9e0","description":"一步步学习最佳 LLM 微调设置——LoRA rank 与 alpha、epoch、批量大小+梯度累积、QLoRA vs. LoRA、目标模块等。","breadcrumbs":[{"label":"开始使用"},{"label":"LLM 微调指南","emoji":"1f9ec"}]},{"id":"fb27a5a49eec8190a033600d7b4f8f78f333f7a9","title":"我应该用什么模型进行微调？","pathname":"/docs/zh/kai-shi-shi-yong/fine-tuning-llms-guide/what-model-should-i-use","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"2753","description":"","breadcrumbs":[{"label":"开始使用"},{"label":"LLM 微调指南","emoji":"1f9ec"}]},{"id":"e36b62f99558f2c1e1e644ea9bc15aa9621d449b","title":"教程：如何微调 Llama-3 并在 Ollama 中使用","pathname":"/docs/zh/kai-shi-shi-yong/fine-tuning-llms-guide/tutorial-how-to-finetune-llama-3-and-use-in-ollama","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f999","description":"创建一个可在本地 Ollama 上运行的个性化私人助手（类似 ChatGPT）的初学者指南","breadcrumbs":[{"label":"开始使用"},{"label":"LLM 微调指南","emoji":"1f9ec"}]},{"id":"7e8be22a966d3861ff1f7ebbd178ae7144d05c51","title":"强化学习（RL）指南","pathname":"/docs/zh/kai-shi-shi-yong/reinforcement-learning-rl-guide","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f4a1","description":"全面了解强化学习（RL），以及如何使用 Unsloth 和 GRPO 训练你自己的 DeepSeek-R1 推理模型。从入门到高级的完整指南。","breadcrumbs":[{"label":"开始使用"}]},{"id":"568f528390785432e96ab3958c36bec4b8482ec7","title":"强化学习 GRPO，上下文长度提升 7 倍","pathname":"/docs/zh/kai-shi-shi-yong/reinforcement-learning-rl-guide/grpo-long-context","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f300","description":"了解 Unsloth 如何支持超长上下文的 RL 微调。","breadcrumbs":[{"label":"开始使用"},{"label":"强化学习（RL）指南","emoji":"1f4a1"}]},{"id":"1d7550c0346dc79003c120cca2e0063a9addf768","title":"视觉强化学习（VLM RL）","pathname":"/docs/zh/kai-shi-shi-yong/reinforcement-learning-rl-guide/vision-reinforcement-learning-vlm-rl","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f441-1f5e8","description":"通过 Unsloth 使用 GRPO 和 RL 训练视觉/多模态模型！","breadcrumbs":[{"label":"开始使用"},{"label":"强化学习（RL）指南","emoji":"1f4a1"}]},{"id":"f63218900ca62905f408a7e5cca35eb4279231d7","title":"FP8 强化学习","pathname":"/docs/zh/kai-shi-shi-yong/reinforcement-learning-rl-guide/fp8-reinforcement-learning","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f3b1","description":"使用 Unsloth 以 FP8 精度训练强化学习（RL）和 GRPO。","breadcrumbs":[{"label":"开始使用"},{"label":"强化学习（RL）指南","emoji":"1f4a1"}]},{"id":"289d4338990d56fc4202e77bbae62bb949a7b439","title":"教程：使用 GRPO 训练你自己的推理模型","pathname":"/docs/zh/kai-shi-shi-yong/reinforcement-learning-rl-guide/tutorial-train-your-own-reasoning-model-with-grpo","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"26a1","description":"初学者指南：通过使用 Unsloth 和 GRPO，将 Llama 3.1（8B）之类的模型转换为推理模型。","breadcrumbs":[{"label":"开始使用"},{"label":"强化学习（RL）指南","emoji":"1f4a1"}]},{"id":"6223874117c54cbdacd6f3467136ab1146fce98a","title":"高级强化学习文档","pathname":"/docs/zh/kai-shi-shi-yong/reinforcement-learning-rl-guide/advanced-rl-documentation","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f9e9","description":"使用 Unsloth 与 GRPO 时的高级文档设置。","breadcrumbs":[{"label":"开始使用"},{"label":"强化学习（RL）指南","emoji":"1f4a1"}]},{"id":"20f0796d08c62347da8d70300eda483d376f94bb","title":"GSPO 强化学习","pathname":"/docs/zh/kai-shi-shi-yong/reinforcement-learning-rl-guide/advanced-rl-documentation/gspo-reinforcement-learning","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"lightbulb-on","description":"在 Unsloth 中使用 GSPO（Group Sequence Policy Optimization，组序列策略优化）进行 RL 训练。","breadcrumbs":[{"label":"开始使用"},{"label":"强化学习（RL）指南","emoji":"1f4a1"},{"label":"高级强化学习文档","emoji":"1f9e9"}]},{"id":"5aa66df0eeb87307aba670f22bb02189b4d9e37f","title":"RL 奖励黑客","pathname":"/docs/zh/kai-shi-shi-yong/reinforcement-learning-rl-guide/advanced-rl-documentation/rl-reward-hacking","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"treasure-chest","description":"了解强化学习中的奖励黑客是什么，以及如何应对。","breadcrumbs":[{"label":"开始使用"},{"label":"强化学习（RL）指南","emoji":"1f4a1"},{"label":"高级强化学习文档","emoji":"1f9e9"}]},{"id":"5487fa62e1aa76e8bf480388ee8d3305a4f0c4e1","title":"用于 RL 的 FP16 vs BF16","pathname":"/docs/zh/kai-shi-shi-yong/reinforcement-learning-rl-guide/advanced-rl-documentation/fp16-vs-bf16-for-rl","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"2049","description":"《通过 FP16 消除训练-推理不匹配》 https://arxiv.org/pdf/2510.26788 说明了使用 float16 比 bfloat16 更好。","breadcrumbs":[{"label":"开始使用"},{"label":"强化学习（RL）指南","emoji":"1f4a1"},{"label":"高级强化学习文档","emoji":"1f9e9"}]},{"id":"838ad11f4c8ab2c80a5f0297ee29a596bf49a0e6","title":"内存高效型 RL","pathname":"/docs/zh/kai-shi-shi-yong/reinforcement-learning-rl-guide/memory-efficient-rl","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"memory","description":"","breadcrumbs":[{"label":"开始使用"},{"label":"强化学习（RL）指南","emoji":"1f4a1"}]},{"id":"de455e62732ad7811d23f32a140e457c7d66724d","title":"偏好优化训练 - DPO、ORPO 与 KTO","pathname":"/docs/zh/kai-shi-shi-yong/reinforcement-learning-rl-guide/preference-dpo-orpo-and-kto","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f3c6","description":"了解如何通过 Unsloth 使用 DPO、GRPO、ORPO 或 KTO 进行偏好对齐微调，按照以下步骤操作：","breadcrumbs":[{"label":"开始使用"},{"label":"强化学习（RL）指南","emoji":"1f4a1"}]},{"id":"1bc06634f9646051a40b3ee9c1e88ef2179a0bc2","title":"介绍 Unsloth Studio","pathname":"/docs/zh/xin-zeng/studio","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f9a5","description":"使用 Unsloth Studio 在本地运行和训练 AI 模型。","breadcrumbs":[{"label":"新增"}]},{"id":"12f6b822ad28322267b74accad74f665b341f5a4","title":"开始使用 Unsloth Studio","pathname":"/docs/zh/xin-zeng/studio/start","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"bolt","description":"关于微调工作室、数据配方、模型导出和聊天功能的入门指南。","breadcrumbs":[{"label":"新增"},{"label":"介绍 Unsloth Studio","emoji":"1f9a5"}]},{"id":"5c2325084ff65c0303d8ec102b689868935855d3","title":"如何使用 Unsloth Studio 运行模型","pathname":"/docs/zh/xin-zeng/studio/chat","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"comment-dots","description":"使用 Unsloth Studio 在本地运行 AI 模型、LLM 和 GGUF。","breadcrumbs":[{"label":"新增"},{"label":"介绍 Unsloth Studio","emoji":"1f9a5"}]},{"id":"ef666f465ed5ec55124d17ba90cfb169b4efa95f","title":"Unsloth Studio 安装","pathname":"/docs/zh/xin-zeng/studio/install","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"arrow-down-to-square","description":"了解如何在本地设备上安装 Unsloth Studio。","breadcrumbs":[{"label":"新增"},{"label":"介绍 Unsloth Studio","emoji":"1f9a5"}]},{"id":"7223afcfd2df87e1fe32a963c0b5ef0e45f563c5","title":"Unsloth 数据配方","pathname":"/docs/zh/xin-zeng/studio/data-recipe","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"hat-chef","description":"了解如何使用 Unsloth Studio 的数据配方创建、构建和编辑数据集。","breadcrumbs":[{"label":"新增"},{"label":"介绍 Unsloth Studio","emoji":"1f9a5"}]},{"id":"f7c3389bdba9af3050e66a941596d827cdb11e0b","title":"使用 Unsloth Studio 导出模型","pathname":"/docs/zh/xin-zeng/studio/export","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"box-isometric","description":"了解如何将你的 safetensor 或 LoRA 模型文件导出为 GGUF 或其他格式。","breadcrumbs":[{"label":"新增"},{"label":"介绍 Unsloth Studio","emoji":"1f9a5"}]},{"id":"e1e43893beb1c3e2a075324e9a00800315b2e1a3","title":"Unsloth 更新","pathname":"/docs/zh/xin-zeng/changelog","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"sparkles","description":"我们最新发布、改进和修复的 Unsloth 更新日志。","breadcrumbs":[{"label":"新增"}]},{"id":"213bd08e4302b621f4392f7ee38decb275ffab02","title":"Qwen3.6 - 如何本地运行","pathname":"/docs/zh/mo-xing/qwen3.6","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f49c","description":"本地运行全新的 Qwen3.6-35-A3B 模型！","breadcrumbs":[{"label":"模型"}]},{"id":"10f714f4a513e0d0a86b6f9d5945f9014729b035","title":"Gemma 4 - 如何本地运行","pathname":"/docs/zh/mo-xing/gemma-4","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"2728","description":"本地运行 Google 全新的 Gemma 4 模型，包括 E2B、E4B、26B A4B 和 31B。","breadcrumbs":[{"label":"模型"}]},{"id":"33fa9e3bb3ccf6a5c0011aa600e98abbe3a829e3","title":"Gemma 4 微调指南","pathname":"/docs/zh/mo-xing/gemma-4/train","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"flask-gear","description":"使用 Unsloth 训练 Google 的 Gemma 4。","breadcrumbs":[{"label":"模型"},{"label":"Gemma 4 - 如何本地运行","emoji":"2728"}]},{"id":"1427becb679b955148197a06de42a82ae44b05b6","title":"Qwen3.5 - 如何本地运行","pathname":"/docs/zh/mo-xing/qwen3.5","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f49c","description":"在你的本地设备上运行全新的 Qwen3.5 LLM，包括中型：Qwen3.5-35B-A3B、27B、122B-A10B，小型：Qwen3.5-0.8B、2B、4B、9B 和 397B-A17B！","breadcrumbs":[{"label":"模型"}]},{"id":"f5ae6c6f6b6b3c34b616c7d668668d9ab102aa23","title":"Qwen3.5 微调指南","pathname":"/docs/zh/mo-xing/qwen3.5/fine-tune","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"flask-gear","description":"了解如何使用 Unsloth 微调 Qwen3.5 LLM。","breadcrumbs":[{"label":"模型"},{"label":"Qwen3.5 - 如何本地运行","emoji":"1f49c"}]},{"id":"29170937075312be229b292fa371d86315687849","title":"Qwen3.5 GGUF 基准测试","pathname":"/docs/zh/mo-xing/qwen3.5/gguf-benchmarks","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"chart-fft","description":"查看 Unsloth Dynamic GGUF 的性能，以及困惑度、KL 散度和 MXFP4 的分析。","breadcrumbs":[{"label":"模型"},{"label":"Qwen3.5 - 如何本地运行","emoji":"1f49c"}]},{"id":"9aaee5fe955235c67f90c9c0fd454b487c5f2d80","title":"GLM-5.1 - 如何本地运行","pathname":"/docs/zh/mo-xing/glm-5.1","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"z","description":"在你自己的本地设备上运行 Z.ai 的全新 GLM-5.1 模型！","breadcrumbs":[{"label":"模型"}]},{"id":"9bee4aaebd8150d5123bcdff6d78c7cc9a85098b","title":"MiniMax-M2.7 - 如何本地运行","pathname":"/docs/zh/mo-xing/minimax-m27","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"waveform","description":"在你自己的设备上本地运行 MiniMax-M2.7 LLM！","breadcrumbs":[{"label":"模型"}]},{"id":"744e7d433d981b3fd86d1a7ad5e4f1d406c1c0eb","title":"NVIDIA Nemotron 3 Nano - 运行指南","pathname":"/docs/zh/mo-xing/nemotron-3","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f9e9","description":"在你的设备上本地运行并微调 NVIDIA Nemotron 3 Nano！","breadcrumbs":[{"label":"模型"}]},{"id":"176b0d6591d584609d74c77b61a6dd546087717f","title":"NVIDIA Nemotron-3-Super：运行指南","pathname":"/docs/zh/mo-xing/nemotron-3/nemotron-3-super","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f9e9","description":"在你的设备上本地运行并微调 NVIDIA Nemotron-3-Super-120B-A12B！","breadcrumbs":[{"label":"模型"},{"label":"NVIDIA Nemotron 3 Nano - 运行指南","emoji":"1f9e9"}]},{"id":"07558781d1c2c35e14fcaa4821ee92be27a005a8","title":"Qwen3-Coder-Next：如何本地运行","pathname":"/docs/zh/mo-xing/qwen3-coder-next","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f320","description":"在你的设备上本地运行 Qwen3-Coder-Next 指南！","breadcrumbs":[{"label":"模型"}]},{"id":"a63fe18206ba278e17e40bf31ce577cda524869c","title":"GLM-4.7-Flash：如何本地运行","pathname":"/docs/zh/mo-xing/glm-4.7-flash","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"z","description":"在你的设备上本地运行并微调 GLM-4.7-Flash！","breadcrumbs":[{"label":"模型"}]},{"id":"5c26f8be53c96fd6b94596a72ef056d730c03d86","title":"Kimi K2.5：如何本地运行指南","pathname":"/docs/zh/mo-xing/kimi-k2.5","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f95d","description":"在你自己的本地设备上运行 Kimi-K2.5 的指南！","breadcrumbs":[{"label":"模型"}]},{"id":"965abb08a6c7b3c74818ff7dba38c65df452a2ca","title":"gpt-oss：运行指南","pathname":"/docs/zh/mo-xing/gpt-oss-how-to-run-and-fine-tune","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"openai","description":"运行并微调 OpenAI 全新的开源模型！","breadcrumbs":[{"label":"模型"}]},{"id":"0a72c8e1ec794760a091474f18d823586f245604","title":"gpt-oss 强化学习","pathname":"/docs/zh/mo-xing/gpt-oss-how-to-run-and-fine-tune/gpt-oss-reinforcement-learning","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"openai","description":"","breadcrumbs":[{"label":"模型"},{"label":"gpt-oss：运行指南","icon":"openai"}]},{"id":"c797743132dc316262472f4d5a65a35b7d30dc04","title":"教程：如何使用 RL 训练 gpt-oss","pathname":"/docs/zh/mo-xing/gpt-oss-how-to-run-and-fine-tune/gpt-oss-reinforcement-learning/tutorial-how-to-train-gpt-oss-with-rl","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"book-open-reader","description":"了解如何使用 GRPO 训练 OpenAI gpt-oss，以便在本地或 Colab 上自动击败 2048。","breadcrumbs":[{"label":"模型"},{"label":"gpt-oss：运行指南","icon":"openai"},{"label":"gpt-oss 强化学习","icon":"openai"}]},{"id":"0148694a359883dd63c310577df8161407074855","title":"教程：如何微调 gpt-oss","pathname":"/docs/zh/mo-xing/gpt-oss-how-to-run-and-fine-tune/tutorial-how-to-fine-tune-gpt-oss","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"openai","description":"一步步学习如何使用 Unsloth 在本地训练 OpenAI gpt-oss。","breadcrumbs":[{"label":"模型"},{"label":"gpt-oss：运行指南","icon":"openai"}]},{"id":"df91ad6f2c0f993f5596bb3fe97f9d889e4403c9","title":"长上下文 gpt-oss 训练","pathname":"/docs/zh/mo-xing/gpt-oss-how-to-run-and-fine-tune/long-context-gpt-oss-training","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"openai","description":"","breadcrumbs":[{"label":"模型"},{"label":"gpt-oss：运行指南","icon":"openai"}]},{"id":"8d4117b244a368b8f80b1a9d079fa31360c8e823","title":"大型语言模型（LLM）教程","pathname":"/docs/zh/mo-xing/tutorials","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f680","description":"探索最新的 LLM，并了解如何使用 Unsloth 在本地运行和微调模型，以获得最佳性能。","breadcrumbs":[{"label":"模型"}]},{"id":"b0620b25ba70d3f855acc525e8dbd8dc4c0e7acb","title":"GLM-5：如何本地运行指南","pathname":"/docs/zh/mo-xing/tutorials/glm-5","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"z","description":"在你自己的本地设备上运行 Z.ai 的全新 GLM-5 模型！","breadcrumbs":[{"label":"模型"},{"label":"大型语言模型（LLM）教程","emoji":"1f680"}]},{"id":"ea62c0023475466d68867497372502852aec44f9","title":"Qwen3 - 如何运行与微调","pathname":"/docs/zh/mo-xing/tutorials/qwen3-how-to-run-and-fine-tune","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f320","description":"学习如何使用 Unsloth + 我们的 Dynamic 2.0 量化，在本地运行和微调 Qwen3","breadcrumbs":[{"label":"模型"},{"label":"大型语言模型（LLM）教程","emoji":"1f680"}]},{"id":"27804e7e4efe89993216789d2932c5b1415cf077","title":"Qwen3-VL：运行指南","pathname":"/docs/zh/mo-xing/tutorials/qwen3-how-to-run-and-fine-tune/qwen3-vl-how-to-run-and-fine-tune","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f320","description":"了解如何使用 Unsloth 在本地微调和运行 Qwen3-VL。","breadcrumbs":[{"label":"模型"},{"label":"大型语言模型（LLM）教程","emoji":"1f680"},{"label":"Qwen3 - 如何运行与微调","emoji":"1f320"}]},{"id":"7b9f9f1f51740c3843cc4f6e4c00590de39b488b","title":"Qwen3-2507：本地运行指南","pathname":"/docs/zh/mo-xing/tutorials/qwen3-how-to-run-and-fine-tune/qwen3-2507","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f320","description":"在你的设备上本地运行 Qwen3-30B-A3B-2507 和 235B-A22B Thinking 及 Instruct 版本！","breadcrumbs":[{"label":"模型"},{"label":"大型语言模型（LLM）教程","emoji":"1f680"},{"label":"Qwen3 - 如何运行与微调","emoji":"1f320"}]},{"id":"77ac2f6d69532a9ed1df88fcd4048f113fedb4ad","title":"MiniMax-M2.5：运行指南","pathname":"/docs/zh/mo-xing/tutorials/minimax-m25","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"waveform","description":"在你自己的设备上本地运行 MiniMax-M2.5！","breadcrumbs":[{"label":"模型"},{"label":"大型语言模型（LLM）教程","emoji":"1f680"}]},{"id":"04b53b5f8ccc52e6530ba353ac2df848a070f75e","title":"Qwen3-Coder：如何本地运行","pathname":"/docs/zh/mo-xing/tutorials/qwen3-coder-how-to-run-locally","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f320","description":"使用 Unsloth Dynamic 量化，在本地运行 Qwen3-Coder-30B-A3B-Instruct 和 480B-A35B。","breadcrumbs":[{"label":"模型"},{"label":"大型语言模型（LLM）教程","emoji":"1f680"}]},{"id":"d4856201ff076e83de868b083ab0653288b531ee","title":"Gemma 3 - 运行指南","pathname":"/docs/zh/mo-xing/tutorials/gemma-3-how-to-run-and-fine-tune","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"google","description":"了解如何使用我们的 GGUF 在 llama.cpp、Ollama、Open WebUI 上有效运行 Gemma 3，以及如何使用 Unsloth 进行微调！","breadcrumbs":[{"label":"模型"},{"label":"大型语言模型（LLM）教程","emoji":"1f680"}]},{"id":"4f67226331354d381a9545a411f8f9364fd9cf27","title":"Gemma 3n：如何运行与微调","pathname":"/docs/zh/mo-xing/tutorials/gemma-3-how-to-run-and-fine-tune/gemma-3n-how-to-run-and-fine-tune","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"google","description":"使用 Dynamic GGUF 在 llama.cpp、Ollama、Open WebUI 上本地运行 Google 的全新 Gemma 3n，并使用 Unsloth 进行微调！","breadcrumbs":[{"label":"模型"},{"label":"大型语言模型（LLM）教程","emoji":"1f680"},{"label":"Gemma 3 - 运行指南","icon":"google"}]},{"id":"0efbaa992ff738764da5513b9bb33e8536c93397","title":"DeepSeek-OCR 2：运行与微调指南","pathname":"/docs/zh/mo-xing/tutorials/deepseek-ocr-2","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f433","description":"关于如何在本地运行和微调 DeepSeek-OCR-2 的指南。","breadcrumbs":[{"label":"模型"},{"label":"大型语言模型（LLM）教程","emoji":"1f680"}]},{"id":"f2f10d2f6d415fee991d199028e61af190b1bd9f","title":"GLM-4.7：如何本地运行指南","pathname":"/docs/zh/mo-xing/tutorials/glm-4.7","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"z","description":"关于如何在你自己的本地设备上运行 Z.ai GLM-4.7 模型的指南！","breadcrumbs":[{"label":"模型"},{"label":"大型语言模型（LLM）教程","emoji":"1f680"}]},{"id":"b5c7d5b7677814317721b04088046f74558e7756","title":"如何在 ComfyUI 中本地运行 Qwen-Image-2512","pathname":"/docs/zh/mo-xing/tutorials/qwen-image-2512","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f49f","description":"在你的本地设备上使用 ComfyUI 运行 Qwen-Image-2512 的分步教程。","breadcrumbs":[{"label":"模型"},{"label":"大型语言模型（LLM）教程","emoji":"1f680"}]},{"id":"36c45bfa260807b70cd911eddc881f78ab3166b4","title":"在 stable-diffusion.cpp 中运行 Qwen-Image-2512 教程","pathname":"/docs/zh/mo-xing/tutorials/qwen-image-2512/stable-diffusion.cpp","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f3a8","description":"在 stable-diffusion.cpp 中使用 Qwen-Image-2512 的教程。","breadcrumbs":[{"label":"模型"},{"label":"大型语言模型（LLM）教程","emoji":"1f680"},{"label":"如何在 ComfyUI 中本地运行 Qwen-Image-2512","emoji":"1f49f"}]},{"id":"1e5e3967b00f3aee6ffa79790e93cf515cd7a47d","title":"Devstral 2 - 运行指南","pathname":"/docs/zh/mo-xing/tutorials/devstral-2","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f4d9","description":"Mistral Devstral 2 模型本地运行指南：123B-Instruct-2512 和 Small-2-24B-Instruct-2512。","breadcrumbs":[{"label":"模型"},{"label":"大型语言模型（LLM）教程","emoji":"1f680"}]},{"id":"4991295da147774f87b981fb5f8227e572f31c2f","title":"Ministral 3 - 运行指南","pathname":"/docs/zh/mo-xing/tutorials/ministral-3","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f431","description":"Mistral Ministral 3 模型指南，可在你的设备上本地运行或微调","breadcrumbs":[{"label":"模型"},{"label":"大型语言模型（LLM）教程","emoji":"1f680"}]},{"id":"18f54403c2ec31fa43d1878d7b0d9f667b94a739","title":"DeepSeek-OCR：如何运行与微调","pathname":"/docs/zh/mo-xing/tutorials/deepseek-ocr-how-to-run-and-fine-tune","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f433","description":"关于如何在本地运行和微调 DeepSeek-OCR 的指南。","breadcrumbs":[{"label":"模型"},{"label":"大型语言模型（LLM）教程","emoji":"1f680"}]},{"id":"46d34a983e9c5a2763bb18c48daf83bcbd1ba871","title":"Kimi K2 Thinking：本地运行指南","pathname":"/docs/zh/mo-xing/tutorials/kimi-k2-thinking-how-to-run-locally","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f319","description":"在你自己的本地设备上运行 Kimi-K2-Thinking 和 Kimi-K2 的指南！","breadcrumbs":[{"label":"模型"},{"label":"大型语言模型（LLM）教程","emoji":"1f680"}]},{"id":"c95883ea2c2bc5222ff12a80cc8e663fcb28cbdd","title":"GLM-4.6：本地运行指南","pathname":"/docs/zh/mo-xing/tutorials/glm-4.6-how-to-run-locally","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"z","description":"关于如何在你自己的本地设备上运行 Z.ai GLM-4.6 和 GLM-4.6V-Flash 模型的指南！","breadcrumbs":[{"label":"模型"},{"label":"大型语言模型（LLM）教程","emoji":"1f680"}]},{"id":"8db617b6a4600d935dc1abb0de60011e111d55ff","title":"Qwen3-Next：本地运行指南","pathname":"/docs/zh/mo-xing/tutorials/qwen3-next","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f320","description":"在你的设备上本地运行 Qwen3-Next-80B-A3B-Instruct 和 Thinking 版本！","breadcrumbs":[{"label":"模型"},{"label":"大型语言模型（LLM）教程","emoji":"1f680"}]},{"id":"585492ce9031c7bf11018debd8017628aaf0db18","title":"FunctionGemma：如何运行与微调","pathname":"/docs/zh/mo-xing/tutorials/functiongemma","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"google","description":"了解如何在你的设备和手机上本地运行和微调 FunctionGemma。","breadcrumbs":[{"label":"模型"},{"label":"大型语言模型（LLM）教程","emoji":"1f680"}]},{"id":"e159c2414bf59eef4f0d1e98dc6013b013c34532","title":"DeepSeek-V3.1：如何本地运行","pathname":"/docs/zh/mo-xing/tutorials/deepseek-v3.1-how-to-run-locally","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f40b","description":"关于如何在你自己的本地设备上运行 DeepSeek-V3.1 和 Terminus 的指南！","breadcrumbs":[{"label":"模型"},{"label":"大型语言模型（LLM）教程","emoji":"1f680"}]},{"id":"cc90024db8d8f2b8456ab21f1a604bf6329b4ade","title":"DeepSeek-R1-0528：如何本地运行","pathname":"/docs/zh/mo-xing/tutorials/deepseek-r1-0528-how-to-run-locally","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f40b","description":"关于如何在你自己的本地设备上运行 DeepSeek-R1-0528（包括 Qwen3）的指南！","breadcrumbs":[{"label":"模型"},{"label":"大型语言模型（LLM）教程","emoji":"1f680"}]},{"id":"ca88dbf750085097663e25264a22a8807b3765ef","title":"Liquid LFM2.5：如何运行与微调","pathname":"/docs/zh/mo-xing/tutorials/lfm2.5","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f4a7","description":"在你的设备上本地运行和微调 LFM2.5 Instruct 与 Vision！","breadcrumbs":[{"label":"模型"},{"label":"大型语言模型（LLM）教程","emoji":"1f680"}]},{"id":"3e19eb045e2102b53e82b58d156a4c97ae6e6841","title":"Magistral：如何运行与微调","pathname":"/docs/zh/mo-xing/tutorials/magistral-how-to-run-and-fine-tune","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f4a5","description":"认识 Magistral——Mistral 的全新推理模型。","breadcrumbs":[{"label":"模型"},{"label":"大型语言模型（LLM）教程","emoji":"1f680"}]},{"id":"68af53cc82ad32be2fd8d907ed61998fd4ddb80c","title":"IBM Granite 4.0","pathname":"/docs/zh/mo-xing/tutorials/ibm-granite-4.0","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"cube","description":"了解如何使用 Unsloth GGUF 在 llama.cpp、Ollama 上运行 IBM Granite-4.0，以及如何进行微调！","breadcrumbs":[{"label":"模型"},{"label":"大型语言模型（LLM）教程","emoji":"1f680"}]},{"id":"42678e5a7e3e685fdb02fdfa055add8deb255047","title":"Llama 4：如何运行与微调","pathname":"/docs/zh/mo-xing/tutorials/llama-4-how-to-run-and-fine-tune","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f999","description":"了解如何使用我们的动态 GGUF 在本地运行 Llama 4，与标准量化相比可恢复准确率。","breadcrumbs":[{"label":"模型"},{"label":"大型语言模型（LLM）教程","emoji":"1f680"}]},{"id":"1834e3a31973190a1f05682cdcd1a9031464c344","title":"Grok 2","pathname":"/docs/zh/mo-xing/tutorials/grok-2","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"square-x-twitter","description":"在本地运行 xAI 的 Grok 2 模型！","breadcrumbs":[{"label":"模型"},{"label":"大型语言模型（LLM）教程","emoji":"1f680"}]},{"id":"bf18dbdaeec1eb9a55d550c53a565fcd383bbedc","title":"Devstral：如何运行与微调","pathname":"/docs/zh/mo-xing/tutorials/devstral-how-to-run-and-fine-tune","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f4d9","description":"运行并微调 Mistral Devstral 1.1，包括 Small-2507 和 2505。","breadcrumbs":[{"label":"模型"},{"label":"大型语言模型（LLM）教程","emoji":"1f680"}]},{"id":"cee0a9958d4af5692dc2cea39034597e9f71ddfd","title":"如何使用 Docker 运行本地 LLM：分步指南","pathname":"/docs/zh/mo-xing/tutorials/how-to-run-llms-with-docker","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"docker","description":"了解如何使用 Docker 和 Unsloth 在你的本地设备上运行大型语言模型（LLM）。","breadcrumbs":[{"label":"模型"},{"label":"大型语言模型（LLM）教程","emoji":"1f680"}]},{"id":"475f6ff513257bac96c996ccb621236825757b4e","title":"DeepSeek-V3-0324：如何本地运行","pathname":"/docs/zh/mo-xing/tutorials/deepseek-v3-0324-how-to-run-locally","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f433","description":"如何使用我们的动态量化在本地运行 DeepSeek-V3-0324，并恢复准确率","breadcrumbs":[{"label":"模型"},{"label":"大型语言模型（LLM）教程","emoji":"1f680"}]},{"id":"9aeceb5c9d6f0a75d39bba30684b3f56d8aa245f","title":"DeepSeek-R1：如何本地运行","pathname":"/docs/zh/mo-xing/tutorials/deepseek-r1-how-to-run-locally","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f40b","description":"关于如何使用 llama.cpp 运行我们针对 DeepSeek-R1 的 1.58-bit 动态量化的指南。","breadcrumbs":[{"label":"模型"},{"label":"大型语言模型（LLM）教程","emoji":"1f680"}]},{"id":"6a1a38ace50da481fbc7af2ec6d45ba6f2779db6","title":"DeepSeek-R1 动态 1.58-bit","pathname":"/docs/zh/mo-xing/tutorials/deepseek-r1-how-to-run-locally/deepseek-r1-dynamic-1.58-bit","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f433","description":"查看 Unsloth 的 Dynamic GGUF 量化与标准 IMatrix 量化的性能对比表。","breadcrumbs":[{"label":"模型"},{"label":"大型语言模型（LLM）教程","emoji":"1f680"},{"label":"DeepSeek-R1：如何本地运行","emoji":"1f40b"}]},{"id":"4af548c36209e6a98ab992e40cc8085cf71fe1b1","title":"Phi-4 Reasoning：如何运行与微调","pathname":"/docs/zh/mo-xing/tutorials/phi-4-reasoning-how-to-run-and-fine-tune","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"windows","description":"了解如何使用 Unsloth + 我们的 Dynamic 2.0 量化在本地运行和微调 Phi-4 推理模型","breadcrumbs":[{"label":"模型"},{"label":"大型语言模型（LLM）教程","emoji":"1f680"}]},{"id":"152e63aa9c7c5dfb094c254c117d4bd06908bf98","title":"QwQ-32B：如何高效运行","pathname":"/docs/zh/mo-xing/tutorials/qwq-32b-how-to-run-effectively","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f320","description":"了解如何使用我们的错误修复并避免无尽生成来高效运行 QwQ-32B，以及 GGUF。","breadcrumbs":[{"label":"模型"},{"label":"大型语言模型（LLM）教程","emoji":"1f680"}]},{"id":"9a72670992feb75def412a693565c84a88c8a266","title":"推理与部署","pathname":"/docs/zh/ji-chu/inference-and-deployment","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f5a5","description":"了解如何保存你的微调模型，以便在你喜欢的推理引擎中运行。","breadcrumbs":[{"label":"基础"}]},{"id":"b83d88f106d75c3396c46f5342fb401501910093","title":"保存为 GGUF","pathname":"/docs/zh/ji-chu/inference-and-deployment/saving-to-gguf","siteSpaceId":"sitesp_3cbXc","lang":"zh","description":"将模型保存为 GGUF 所需的 16 位格式，以便在 Ollama、Jan AI、Open WebUI 等中使用！","breadcrumbs":[{"label":"基础"},{"label":"推理与部署","emoji":"1f5a5"}]},{"id":"8ff566c5ada3f8fb59929a32b877837ba0041924","title":"推测解码","pathname":"/docs/zh/ji-chu/inference-and-deployment/saving-to-gguf/speculative-decoding","siteSpaceId":"sitesp_3cbXc","lang":"zh","description":"使用 llama-server、llama.cpp、vLLM 等进行推测解码，实现 2 倍更快的推理","breadcrumbs":[{"label":"基础"},{"label":"推理与部署","emoji":"1f5a5"},{"label":"保存为 GGUF"}]},{"id":"9f0e22d200c9105481e4854b8473aba99ca44835","title":"vLLM 部署与推理指南","pathname":"/docs/zh/ji-chu/inference-and-deployment/vllm-guide","siteSpaceId":"sitesp_3cbXc","lang":"zh","description":"关于将 LLM 保存并部署到 vLLM 以在生产环境中提供服务的指南","breadcrumbs":[{"label":"基础"},{"label":"推理与部署","emoji":"1f5a5"}]},{"id":"160443d79a06d2d700045d140452e790dbdb1173","title":"vLLM 引擎参数","pathname":"/docs/zh/ji-chu/inference-and-deployment/vllm-guide/vllm-engine-arguments","siteSpaceId":"sitesp_3cbXc","lang":"zh","description":"","breadcrumbs":[{"label":"基础"},{"label":"推理与部署","emoji":"1f5a5"},{"label":"vLLM 部署与推理指南"}]},{"id":"5536133cb2c4c06df946ff9440b26b7391a12b5c","title":"LoRA 热切换指南","pathname":"/docs/zh/ji-chu/inference-and-deployment/vllm-guide/lora-hot-swapping-guide","siteSpaceId":"sitesp_3cbXc","lang":"zh","description":"","breadcrumbs":[{"label":"基础"},{"label":"推理与部署","emoji":"1f5a5"},{"label":"vLLM 部署与推理指南"}]},{"id":"d4f9cf59ddb6cd217d8f8563eeb6c00042f21972","title":"保存到 Ollama","pathname":"/docs/zh/ji-chu/inference-and-deployment/saving-to-ollama","siteSpaceId":"sitesp_3cbXc","lang":"zh","description":"","breadcrumbs":[{"label":"基础"},{"label":"推理与部署","emoji":"1f5a5"}]},{"id":"771775d41e6a1596232819cdd79823d415eda744","title":"部署模型到 LM Studio","pathname":"/docs/zh/ji-chu/inference-and-deployment/lm-studio","siteSpaceId":"sitesp_3cbXc","lang":"zh","description":"将模型保存为 GGUF，以便你可以在 LM Studio 中运行和部署它们","breadcrumbs":[{"label":"基础"},{"label":"推理与部署","emoji":"1f5a5"}]},{"id":"b229232347528d9aad88114911fe039c42595fd3","title":"如何在 Linux 终端中安装 LM Studio CLI","pathname":"/docs/zh/ji-chu/inference-and-deployment/lm-studio/how-to-install-lm-studio-cli-in-linux-terminal","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f47e","description":"在终端实例中无需 UI 的 LM Studio CLI 安装指南。","breadcrumbs":[{"label":"基础"},{"label":"推理与部署","emoji":"1f5a5"},{"label":"部署模型到 LM Studio"}]},{"id":"23e76b12a72496ba4fcc9d857dd940dd6ae14736","title":"SGLang 部署与推理指南","pathname":"/docs/zh/ji-chu/inference-and-deployment/sglang-guide","siteSpaceId":"sitesp_3cbXc","lang":"zh","description":"关于将 LLM 保存并部署到 SGLang 以在生产环境中提供服务的指南","breadcrumbs":[{"label":"基础"},{"label":"推理与部署","emoji":"1f5a5"}]},{"id":"4a201e2f3e992b62e25a0ba283ec8b14ad3f414b","title":"llama-server 与 OpenAI 端点部署指南","pathname":"/docs/zh/ji-chu/inference-and-deployment/llama-server-and-openai-endpoint","siteSpaceId":"sitesp_3cbXc","lang":"zh","description":"通过支持 OpenAI 的兼容端点使用 llama-server 进行部署","breadcrumbs":[{"label":"基础"},{"label":"推理与部署","emoji":"1f5a5"}]},{"id":"e0e826e45659eab088ef3acd7826998bc36539e9","title":"如何在你的 iOS 或 Android 手机上运行和部署 LLM","pathname":"/docs/zh/ji-chu/inference-and-deployment/deploy-llms-phone","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f4f1","description":"关于如何微调你自己的 LLM 并使用 ExecuTorch 将其部署到 Android 或 iPhone 上的教程。","breadcrumbs":[{"label":"基础"},{"label":"推理与部署","emoji":"1f5a5"}]},{"id":"5511a85b8b57f4cfdffedbc8f1ea2110a10d550e","title":"推理故障排查","pathname":"/docs/zh/ji-chu/inference-and-deployment/troubleshooting-inference","siteSpaceId":"sitesp_3cbXc","lang":"zh","description":"如果你在运行或保存模型时遇到问题。","breadcrumbs":[{"label":"基础"},{"label":"推理与部署","emoji":"1f5a5"}]},{"id":"1a707991086189a8e5cd8374f3ce1b81915bc159","title":"如何使用 Claude Code 运行本地 LLM","pathname":"/docs/zh/ji-chu/claude-code","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"claude","description":"关于如何在本地设备上使用 Claude Code 运行开源模型的指南。","breadcrumbs":[{"label":"基础"}]},{"id":"b71ddea7924324c058a771e5e831c3cb6fc75b18","title":"如何使用 OpenAI Codex 运行本地 LLM","pathname":"/docs/zh/ji-chu/codex","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"openai","description":"在你的设备上本地使用 OpenAI Codex 运行开源模型。","breadcrumbs":[{"label":"基础"}]},{"id":"cd472d76f8ad81236011a0337b4cec66382031e2","title":"使用 Unsloth 进行多 GPU 微调","pathname":"/docs/zh/ji-chu/multi-gpu-training-with-unsloth","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"rectangle-history","description":"了解如何使用 Unsloth 在多个 GPU 上进行 LLM 微调和并行化。","breadcrumbs":[{"label":"基础"}]},{"id":"c16d3ee721c7692f2341fe3dcb16afaf2d84858f","title":"使用分布式数据并行（DDP）进行多 GPU 微调","pathname":"/docs/zh/ji-chu/multi-gpu-training-with-unsloth/ddp","siteSpaceId":"sitesp_3cbXc","lang":"zh","description":"了解如何使用 Unsloth CLI 通过分布式数据并行（DDP）在多个 GPU 上训练！","breadcrumbs":[{"label":"基础"},{"label":"使用 Unsloth 进行多 GPU 微调","icon":"rectangle-history"}]},{"id":"a078f1a9ba6457ae124f908cfdebf7ca27afaf56","title":"使用 Unsloth 的嵌入模型微调指南","pathname":"/docs/zh/ji-chu/embedding-finetuning","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f50e","description":"了解如何轻松使用 Unsloth 微调嵌入模型。","breadcrumbs":[{"label":"基础"}]},{"id":"90adfb72dc954d9c5afd7cd406bebf264f2005ac","title":"使用 Unsloth 将 MoE 模型微调速度提升 12 倍","pathname":"/docs/zh/ji-chu/faster-moe","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f48e","description":"使用 Unsloth 指南在本地训练 MoE LLM。","breadcrumbs":[{"label":"基础"}]},{"id":"d2ec5af816022e7e65a1929ffa5d6060bf270047","title":"文本转语音（TTS）微调指南","pathname":"/docs/zh/ji-chu/text-to-speech-tts-fine-tuning","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f50a","description":"了解如何使用 Unsloth 微调 TTS 和 STT 语音模型。","breadcrumbs":[{"label":"基础"}]},{"id":"e658f01212ed739b6cc1648a22333767661730a1","title":"Unsloth Dynamic 2.0 GGUF","pathname":"/docs/zh/ji-chu/unsloth-dynamic-2.0-ggufs","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f9a5","description":"我们的 Dynamic Quants 的一次重大新升级！","breadcrumbs":[{"label":"基础"}]},{"id":"9ab45d8a6a66c5a9a735fd4c5d902c7c44b27d87","title":"Aider Polyglot 上的 Unsloth Dynamic GGUF","pathname":"/docs/zh/ji-chu/unsloth-dynamic-2.0-ggufs/unsloth-dynamic-ggufs-on-aider-polyglot","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f9a5","description":"Unsloth Dynamic GGUF 在 Aider Polyglot 基准上的表现","breadcrumbs":[{"label":"基础"},{"label":"Unsloth Dynamic 2.0 GGUF","emoji":"1f9a5"}]},{"id":"4fe123c34ab0d523b509efe2b2b56b299498fc5c","title":"本地 LLM 的工具调用指南","pathname":"/docs/zh/ji-chu/tool-calling-guide-for-local-llms","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"screwdriver-wrench","description":"","breadcrumbs":[{"label":"基础"}]},{"id":"5fbd33a15690ccb7e121b10fec182c0112c69066","title":"视觉微调","pathname":"/docs/zh/ji-chu/vision-fine-tuning","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f441","description":"了解如何使用 Unsloth 微调视觉/多模态 LLM","breadcrumbs":[{"label":"基础"}]},{"id":"1e4bf502aabe6ddf3dfea962f3f9d713aaea2190","title":"故障排查与常见问题","pathname":"/docs/zh/ji-chu/troubleshooting-and-faqs","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"26a0","description":"解决问题的技巧，以及常见问题解答。","breadcrumbs":[{"label":"基础"}]},{"id":"d1ca1266e0caf1875e2ec49b943f99476047a4df","title":"Hugging Face Hub，XET 调试","pathname":"/docs/zh/ji-chu/troubleshooting-and-faqs/hugging-face-hub-xet-debugging","siteSpaceId":"sitesp_3cbXc","lang":"zh","description":"调试、排查卡住、停滞的下载以及慢速下载","breadcrumbs":[{"label":"基础"},{"label":"故障排查与常见问题","emoji":"26a0"}]},{"id":"5d2a5f8cc953da5c0db5753139d5f3e26a1c4635","title":"聊天模板","pathname":"/docs/zh/ji-chu/chat-templates","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f4ac","description":"了解聊天模板的基础知识和自定义选项，包括 Conversational、ChatML、ShareGPT、Alpaca 格式等！","breadcrumbs":[{"label":"基础"}]},{"id":"5efcf23278a6310dfc435b0e52b441693e4a640d","title":"Unsloth 环境标志","pathname":"/docs/zh/ji-chu/unsloth-environment-flags","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f6e0","description":"一些高级标志，在你看到微调出错或想关闭某些功能时可能会有用。","breadcrumbs":[{"label":"基础"}]},{"id":"eeac0ed07f31cdf24b72252a10d6eceef0bc3355","title":"持续预训练","pathname":"/docs/zh/ji-chu/continued-pretraining","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"267b","description":"也称为持续微调。Unsloth 允许你持续进行预训练，以便模型学习新语言。","breadcrumbs":[{"label":"基础"}]},{"id":"bf55f5975c7abaa2eb7d6c35d41d00a5ac7d3046","title":"从最后一个检查点继续微调","pathname":"/docs/zh/ji-chu/finetuning-from-last-checkpoint","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f3c1","description":"检查点机制允许你保存微调进度，这样你可以暂停后再继续。","breadcrumbs":[{"label":"基础"}]},{"id":"0faf69cc3c60f48fdf0fcaa450db46623ea2e487","title":"Unsloth 基准测试","pathname":"/docs/zh/ji-chu/unsloth-benchmarks","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"1f4ca","description":"Unsloth 在 NVIDIA GPU 上记录的基准测试。","breadcrumbs":[{"label":"基础"}]},{"id":"89384e94043ceb248ebc356b1fb9ee69559f1e62","title":"使用 Unsloth 内核 + Packing 实现 3 倍更快的 LLM 训练","pathname":"/docs/zh/bo-ke/3x-faster-training-packing","siteSpaceId":"sitesp_3cbXc","lang":"zh","emoji":"26a1","description":"了解 Unsloth 如何提高训练吞吐量并消除微调时的 padding 浪费。","breadcrumbs":[{"label":"博客"}]},{"id":"75464ca507ae7bfd1d067969e7f59dd5d631c4e3","title":"50 万上下文长度微调","pathname":"/docs/zh/bo-ke/500k-context-length-fine-tuning","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"ruler-combined","description":"了解如何使用 Unsloth 启用超过 50 万 token 上下文窗口的微调。","breadcrumbs":[{"label":"博客"}]},{"id":"67163c415f870f5b3b00cf5435820df0d5c6e7ce","title":"量化感知训练（QAT）","pathname":"/docs/zh/bo-ke/quantization-aware-training-qat","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"down-left-and-up-right-to-center","description":"使用 Unsloth 和 PyTorch 将模型量化为 4 位，以恢复准确率。","breadcrumbs":[{"label":"博客"}]},{"id":"702baf22d087bcf50380c3e6f1c28f53999562f2","title":"使用 Unsloth 在 NVIDIA DGX Station 上微调 LLM","pathname":"/docs/zh/bo-ke/dgx-station","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"microchip-ai","description":"关于如何使用 Unsloth 的笔记本在 NVIDIA DGX Station 上进行微调的教程。","breadcrumbs":[{"label":"博客"}]},{"id":"2940860946759c1e409cb13440841f44d539b907","title":"使用 Unsloth 和 Docker 微调 LLM","pathname":"/docs/zh/bo-ke/how-to-fine-tune-llms-with-unsloth-and-docker","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"docker","description":"了解如何使用 Unsloth 的 Docker 镜像微调 LLM 或进行强化学习（RL）。","breadcrumbs":[{"label":"博客"}]},{"id":"abaf2778401f93018feb5d51ff9b83ef161cbc68","title":"使用 NVIDIA DGX Spark 和 Unsloth 微调 LLM","pathname":"/docs/zh/bo-ke/fine-tuning-llms-with-nvidia-dgx-spark-and-unsloth","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"sparkle","description":"关于如何在 NVIDIA DGX Spark 上使用 OpenAI gpt-oss 进行微调和强化学习（RL）的教程。","breadcrumbs":[{"label":"博客"}]},{"id":"402177956bb6f4d27c0bfee66439d5584b91564e","title":"使用 Blackwell、RTX 50 系列与 Unsloth 微调 LLM","pathname":"/docs/zh/bo-ke/fine-tuning-llms-with-blackwell-rtx-50-series-and-unsloth","siteSpaceId":"sitesp_3cbXc","lang":"zh","icon":"microchip","description":"通过我们的分步指南了解如何在 NVIDIA 的 Blackwell、RTX 50 系列和 B200 GPU 上微调 LLM。","breadcrumbs":[{"label":"博客"}]},{"id":"6e2677498ce15bfc11f46d16a61c61c1a7ff3edd","title":"Unsloth ドキュメント","pathname":"/docs/jp","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f9a5","description":"Unsloth は、モデルの実行と学習のためのオープンソースフレームワークです。","breadcrumbs":[{"label":"始める"}]},{"id":"f10fbc893d08889a92e28bfa2fc8fe09c696363d","title":"初心者向けファインチューニング","pathname":"/docs/jp/meru/fine-tuning-for-beginners","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"2b50","description":"","breadcrumbs":[{"label":"始める"}]},{"id":"6d173d71b33fc6bb9bbdbeb7a340a80132236305","title":"Unsloth の要件","pathname":"/docs/jp/meru/fine-tuning-for-beginners/unsloth-requirements","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f6e0","description":"ここでは、システム要件と GPU VRAM 要件を含む Unsloth の要件を紹介します。","breadcrumbs":[{"label":"始める"},{"label":"初心者向けファインチューニング","emoji":"2b50"}]},{"id":"fffb67e71b118ce93cf382a53722ac7a1a41dd7c","title":"FAQ + ファインチューニングは自分に適している？","pathname":"/docs/jp/meru/fine-tuning-for-beginners/faq-+-is-fine-tuning-right-for-me","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f914","description":"ファインチューニングが自分に合っているか迷っているなら、こちらをご覧ください！ ファインチューニングの誤解、RAG との比較などを学べます：","breadcrumbs":[{"label":"始める"},{"label":"初心者向けファインチューニング","emoji":"2b50"}]},{"id":"a3325bc180768721f1119499c1b82e6d84e53794","title":"Unsloth ノートブック","pathname":"/docs/jp/meru/unsloth-notebooks","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f4d2","description":"ファインチューニング用ノートブック：Unsloth カタログを見てみましょう。","breadcrumbs":[{"label":"始める"}]},{"id":"189a8deb3aa7eabc5be5a7563f26d4b08f252e28","title":"Unsloth モデルカタログ","pathname":"/docs/jp/meru/unsloth-model-catalog","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f52e","description":"","breadcrumbs":[{"label":"始める"}]},{"id":"78ed2140c0094b3caa1931c2127e336118049b42","title":"Unsloth のインストール","pathname":"/docs/jp/meru/install","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f4e5","description":"Unsloth をローカルまたはオンラインでインストールする方法を学びましょう。","breadcrumbs":[{"label":"始める"}]},{"id":"2668025f87c0405c2cc5a80af272adcb7670b1c1","title":"pip と uv で Unsloth をインストールする","pathname":"/docs/jp/meru/install/pip-install","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"desktop-arrow-down","description":"Pip を使ってローカルに Unsloth をインストールするには、以下の手順に従ってください：","breadcrumbs":[{"label":"始める"},{"label":"Unsloth のインストール","emoji":"1f4e5"}]},{"id":"f0aec71b27dd3fb7098c85a48ee39b11e05798fd","title":"MacOS に Unsloth をインストール","pathname":"/docs/jp/meru/install/mac","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"apple","description":"","breadcrumbs":[{"label":"始める"},{"label":"Unsloth のインストール","emoji":"1f4e5"}]},{"id":"3f25ebdeb2b687d90421588e1b9a97b6f91995fe","title":"Unsloth を使って Windows で LLM をファインチューニングする方法（ステップバイステップガイド）","pathname":"/docs/jp/meru/install/windows-installation","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"windows","description":"Windows に Unsloth をインストールして、ローカルで LLM のファインチューニングを始める方法を見てみましょう。","breadcrumbs":[{"label":"始める"},{"label":"Unsloth のインストール","emoji":"1f4e5"}]},{"id":"c188f95d26bbe239b9750e6595c7164791be0287","title":"Docker で Unsloth をインストール","pathname":"/docs/jp/meru/install/docker","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"docker","description":"公式 Docker コンテナを使って Unsloth をインストールする","breadcrumbs":[{"label":"始める"},{"label":"Unsloth のインストール","emoji":"1f4e5"}]},{"id":"a7e24b56702a0f7fb45f9f2ce0e6bd34dd1d2f50","title":"Unsloth の更新","pathname":"/docs/jp/meru/install/updating","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"arrow-rotate-right","description":"Unsloth を更新する、または古いバージョンを使うには、以下の手順に従ってください：","breadcrumbs":[{"label":"始める"},{"label":"Unsloth のインストール","emoji":"1f4e5"}]},{"id":"cc479234476bfcb2928aaa70e0b137d2f1b29981","title":"Unsloth ガイドで AMD GPU 上の LLM をファインチューニングする","pathname":"/docs/jp/meru/install/amd","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"square-up-right","description":"Unsloth を使って AMD GPU 上で大規模言語モデル（LLM）をファインチューニングする方法を学びましょう。","breadcrumbs":[{"label":"始める"},{"label":"Unsloth のインストール","emoji":"1f4e5"}]},{"id":"69a2c17173ec9337f4907132dee8c77a4517814a","title":"Unsloth を使って Intel GPU 上の LLM をファインチューニングする","pathname":"/docs/jp/meru/install/intel","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"info","description":"Intel GPU 上で大規模言語モデルを学習・ファインチューニングする方法を学びましょう。","breadcrumbs":[{"label":"始める"},{"label":"Unsloth のインストール","emoji":"1f4e5"}]},{"id":"ae7926a0f371a1f594e3344d85571edf864a5259","title":"LLM ファインチューニングガイド","pathname":"/docs/jp/meru/fine-tuning-llms-guide","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f9ec","description":"ファインチューニングの基本とベストプラクティスをすべて学べます。初心者向けです。","breadcrumbs":[{"label":"始める"}]},{"id":"0169f16f28bd29579ad4805156b35641f23090aa","title":"データセットガイド","pathname":"/docs/jp/meru/fine-tuning-llms-guide/datasets-guide","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f4c8","description":"ファインチューニング用のデータセットを作成・準備する方法を学びましょう。","breadcrumbs":[{"label":"始める"},{"label":"LLM ファインチューニングガイド","emoji":"1f9ec"}]},{"id":"dac5c97d7a208c227b8115ec72133012ad2d0113","title":"LoRA ファインチューニングのハイパーパラメータガイド","pathname":"/docs/jp/meru/fine-tuning-llms-guide/lora-hyperparameters-guide","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f9e0","description":"LLM ファインチューニングの最適な設定をステップバイステップで学びましょう。LoRA の rank と alpha、epochs、batch size と gradient accumulation、QLoRA と LoRA の比較、target modules などを扱います。","breadcrumbs":[{"label":"始める"},{"label":"LLM ファインチューニングガイド","emoji":"1f9ec"}]},{"id":"b0eaf2528a8f433d814b471834923a39c77f801f","title":"ファインチューニングにはどのモデルを使うべき？","pathname":"/docs/jp/meru/fine-tuning-llms-guide/what-model-should-i-use","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"2753","description":"","breadcrumbs":[{"label":"始める"},{"label":"LLM ファインチューニングガイド","emoji":"1f9ec"}]},{"id":"f46773a93603b1da44e6319f55fd3b900d7700fc","title":"チュートリアル：Llama-3 のファインチューニングと Ollama での利用方法","pathname":"/docs/jp/meru/fine-tuning-llms-guide/tutorial-how-to-finetune-llama-3-and-use-in-ollama","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f999","description":"Ollama 上でローカルに動かす、カスタマイズされた個人アシスタント（ChatGPT のようなもの）を作成するための初心者向けガイド","breadcrumbs":[{"label":"始める"},{"label":"LLM ファインチューニングガイド","emoji":"1f9ec"}]},{"id":"2e1ff161b8dd839d98df40642b2647753c4a80e4","title":"強化学習（RL）ガイド","pathname":"/docs/jp/meru/reinforcement-learning-rl-guide","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f4a1","description":"強化学習（RL）のすべてと、GRPO を使って Unsloth で自分専用の DeepSeek-R1 推論モデルを学習させる方法を学びましょう。初心者から上級者まで対応した完全ガイドです。","breadcrumbs":[{"label":"始める"}]},{"id":"7e5df0bd00dfa415f0af9c38f10687153fb5f588","title":"7倍の長いコンテキストを持つ強化学習 GRPO","pathname":"/docs/jp/meru/reinforcement-learning-rl-guide/grpo-long-context","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f300","description":"Unsloth が超長文コンテキストの RL ファインチューニングをどう実現するかを学びましょう。","breadcrumbs":[{"label":"始める"},{"label":"強化学習（RL）ガイド","emoji":"1f4a1"}]},{"id":"01cdfed2f29f2821ff712fcb0ccb837f8ba4baf3","title":"Vision 強化学習（VLM RL）","pathname":"/docs/jp/meru/reinforcement-learning-rl-guide/vision-reinforcement-learning-vlm-rl","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f441-1f5e8","description":"Unsloth で GRPO と RL を使って Vision/マルチモーダルモデルを学習しよう！","breadcrumbs":[{"label":"始める"},{"label":"強化学習（RL）ガイド","emoji":"1f4a1"}]},{"id":"fb9d3ff64a5035fc434979bb3b39a92149730468","title":"FP8 強化学習","pathname":"/docs/jp/meru/reinforcement-learning-rl-guide/fp8-reinforcement-learning","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f3b1","description":"Unsloth を使って FP8 精度で強化学習（RL）と GRPO を学習します。","breadcrumbs":[{"label":"始める"},{"label":"強化学習（RL）ガイド","emoji":"1f4a1"}]},{"id":"db5a7a83b913f48723eb2d7a67636b192c105cd3","title":"チュートリアル：GRPO で自分専用の推論モデルを学習する","pathname":"/docs/jp/meru/reinforcement-learning-rl-guide/tutorial-train-your-own-reasoning-model-with-grpo","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"26a1","description":"Unsloth と GRPO を使って、Llama 3.1（8B）のようなモデルを推論モデルへ変換するための初心者向けガイド。","breadcrumbs":[{"label":"始める"},{"label":"強化学習（RL）ガイド","emoji":"1f4a1"}]},{"id":"d162fad71aa9478e713b7a7dfa3d60267dfca53b","title":"高度な強化学習ドキュメント","pathname":"/docs/jp/meru/reinforcement-learning-rl-guide/advanced-rl-documentation","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f9e9","description":"Unsloth を GRPO と組み合わせて使う際の高度なドキュメント設定。","breadcrumbs":[{"label":"始める"},{"label":"強化学習（RL）ガイド","emoji":"1f4a1"}]},{"id":"a803c7bb314b96d0901b440bc199389cb1bd3663","title":"GSPO 強化学習","pathname":"/docs/jp/meru/reinforcement-learning-rl-guide/advanced-rl-documentation/gspo-reinforcement-learning","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"lightbulb-on","description":"Unsloth で GSPO（Group Sequence Policy Optimization）RL を使って学習します。","breadcrumbs":[{"label":"始める"},{"label":"強化学習（RL）ガイド","emoji":"1f4a1"},{"label":"高度な強化学習ドキュメント","emoji":"1f9e9"}]},{"id":"a653c474327e232378ecbe04adeeaf5e3b6abaaa","title":"RL における報酬ハッキング","pathname":"/docs/jp/meru/reinforcement-learning-rl-guide/advanced-rl-documentation/rl-reward-hacking","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"treasure-chest","description":"強化学習における Reward Hacking とは何か、そしてそれを防ぐ方法を学びましょう。","breadcrumbs":[{"label":"始める"},{"label":"強化学習（RL）ガイド","emoji":"1f4a1"},{"label":"高度な強化学習ドキュメント","emoji":"1f9e9"}]},{"id":"279889a232e1a4ec554827f0eb7c954d2871a4a3","title":"RL における FP16 と BF16 の比較","pathname":"/docs/jp/meru/reinforcement-learning-rl-guide/advanced-rl-documentation/fp16-vs-bf16-for-rl","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"2049","description":"Defeating the Training-Inference Mismatch via FP16 https://arxiv.org/pdf/2510.26788 では、float16 を使う方が bfloat16 より優れていることが示されています","breadcrumbs":[{"label":"始める"},{"label":"強化学習（RL）ガイド","emoji":"1f4a1"},{"label":"高度な強化学習ドキュメント","emoji":"1f9e9"}]},{"id":"db44fb02e3caa0e241e41eb5c751f1554262cfb3","title":"メモリ効率の高い RL","pathname":"/docs/jp/meru/reinforcement-learning-rl-guide/memory-efficient-rl","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"memory","description":"","breadcrumbs":[{"label":"始める"},{"label":"強化学習（RL）ガイド","emoji":"1f4a1"}]},{"id":"7f735ce67d58d1d8b36af1a654b51e99d8461a8f","title":"選好最適化トレーニング - DPO、ORPO、KTO","pathname":"/docs/jp/meru/reinforcement-learning-rl-guide/preference-dpo-orpo-and-kto","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f3c6","description":"Unsloth を使った DPO、GRPO、ORPO、KTO による選好整合ファインチューニングについて学び、以下の手順に従ってください：","breadcrumbs":[{"label":"始める"},{"label":"強化学習（RL）ガイド","emoji":"1f4a1"}]},{"id":"a01cba7455dcbbf4a34e17600cd0b0101df41d37","title":"Unsloth Studio の紹介","pathname":"/docs/jp/xin-zhe/studio","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f9a5","description":"Unsloth Studio で AI モデルをローカルに実行・学習しましょう。","breadcrumbs":[{"label":"新着"}]},{"id":"511d3548745f95b53d7c4df809a4aeb55f219840","title":"Unsloth Studio を始める","pathname":"/docs/jp/xin-zhe/studio/start","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"bolt","description":"ファインチューニング Studio、データレシピ、モデルの書き出し、チャットの始め方ガイド。","breadcrumbs":[{"label":"新着"},{"label":"Unsloth Studio の紹介","emoji":"1f9a5"}]},{"id":"ca5ab7d9f589ffe0771972c3c3b4665342eb3baf","title":"Unsloth Studio でモデルを実行する方法","pathname":"/docs/jp/xin-zhe/studio/chat","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"comment-dots","description":"Unsloth Studio を使って、AI モデル、LLM、GGUF をローカルで実行します。","breadcrumbs":[{"label":"新着"},{"label":"Unsloth Studio の紹介","emoji":"1f9a5"}]},{"id":"8454803abb981b29f6c46027668d1416d8da0199","title":"Unsloth Studio のインストール","pathname":"/docs/jp/xin-zhe/studio/install","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"arrow-down-to-square","description":"自分のローカルデバイスに Unsloth Studio をインストールする方法を学びましょう。","breadcrumbs":[{"label":"新着"},{"label":"Unsloth Studio の紹介","emoji":"1f9a5"}]},{"id":"1efc27ff794b527bf40ca1399ab10a6c28cc4b61","title":"Unsloth データレシピ","pathname":"/docs/jp/xin-zhe/studio/data-recipe","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"hat-chef","description":"Unsloth Studio のデータレシピを使ってデータセットを作成・構築・編集する方法を学びましょう。","breadcrumbs":[{"label":"新着"},{"label":"Unsloth Studio の紹介","emoji":"1f9a5"}]},{"id":"a58a8ab897451539e1493312c6a640b4d5ee40b7","title":"Unsloth Studio でモデルを書き出す","pathname":"/docs/jp/xin-zhe/studio/export","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"box-isometric","description":"safetensors または LoRA モデルファイルを GGUF などの形式に書き出す方法を学びましょう。","breadcrumbs":[{"label":"新着"},{"label":"Unsloth Studio の紹介","emoji":"1f9a5"}]},{"id":"8a5c4db36cff597fc13ab7546333ece742a1f076","title":"Unsloth の更新情報","pathname":"/docs/jp/xin-zhe/changelog","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"sparkles","description":"最新リリース、改善、修正に関する Unsloth の変更履歴。","breadcrumbs":[{"label":"新着"}]},{"id":"693bc7a2f22dcaf0c6bc0818f2076196fe331fa7","title":"Gemma 4 - ローカルで実行する方法","pathname":"/docs/jp/moderu/gemma-4","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"2728","description":"E2B、E4B、26B A4B、31B を含む Google の新しい Gemma 4 モデルをローカルで実行しましょう。","breadcrumbs":[{"label":"モデル"}]},{"id":"4a6e7bbec569d341f876db55593564610de4d0a8","title":"Gemma 4 ファインチューニングガイド","pathname":"/docs/jp/moderu/gemma-4/train","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"flask-gear","description":"Google の Gemma 4 を Unsloth で学習しましょう。","breadcrumbs":[{"label":"モデル"},{"label":"Gemma 4 - ローカルで実行する方法","emoji":"2728"}]},{"id":"0af04e20683a2825742edd360e0a15913f42c5a8","title":"Qwen3.5 - ローカルで実行する方法","pathname":"/docs/jp/moderu/qwen3.5","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f49c","description":"Medium の Qwen3.5-35B-A3B、27B、122B-A10B、Small の Qwen3.5-0.8B、2B、4B、9B、397B-A17B を含む新しい Qwen3.5 LLM を自分のローカルデバイスで実行しましょう！","breadcrumbs":[{"label":"モデル"}]},{"id":"a38c8e31b4301f0ec111df120e156fefb092bbd8","title":"Qwen3.5 ファインチューニングガイド","pathname":"/docs/jp/moderu/qwen3.5/fine-tune","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"flask-gear","description":"Unsloth を使って Qwen3.5 LLM をファインチューニングする方法を学びましょう。","breadcrumbs":[{"label":"モデル"},{"label":"Qwen3.5 - ローカルで実行する方法","emoji":"1f49c"}]},{"id":"bb7af419ae482d9b8efadbd19e33855d14ce5ee9","title":"Qwen3.5 GGUF ベンチマーク","pathname":"/docs/jp/moderu/qwen3.5/gguf-benchmarks","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"chart-fft","description":"Unsloth の Dynamic GGUF の性能と、perplexity、KL divergence、MXFP4 の分析をご覧ください。","breadcrumbs":[{"label":"モデル"},{"label":"Qwen3.5 - ローカルで実行する方法","emoji":"1f49c"}]},{"id":"bbfe7c6edcb58685ec7cf51092f2663c6c435010","title":"GLM-5.1 - ローカルで実行する方法","pathname":"/docs/jp/moderu/glm-5.1","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"z","description":"Z.ai の新しい GLM-5.1 モデルを自分のローカルデバイスで実行しましょう！","breadcrumbs":[{"label":"モデル"}]},{"id":"30aa264fb1374e73acfe9203ecfdbd4556a9335b","title":"MiniMax-M2.7 - ローカルで実行する方法","pathname":"/docs/jp/moderu/minimax-m27","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"waveform","description":"MiniMax-M2.7 LLM を自分のデバイスでローカル実行しましょう！","breadcrumbs":[{"label":"モデル"}]},{"id":"6b8d31fd5301efc60f6ff33d32a66c700ccee8ba","title":"NVIDIA Nemotron 3 Nano - 実行方法ガイド","pathname":"/docs/jp/moderu/nemotron-3","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f9e9","description":"NVIDIA Nemotron 3 Nano を自分のデバイスでローカル実行・ファインチューニングしましょう！","breadcrumbs":[{"label":"モデル"}]},{"id":"02308e9ea4a5db5c78449b60faccc937a081527a","title":"NVIDIA Nemotron-3-Super: 実行方法ガイド","pathname":"/docs/jp/moderu/nemotron-3/nemotron-3-super","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f9e9","description":"NVIDIA Nemotron-3-Super-120B-A12B を自分のデバイスでローカル実行・ファインチューニングしましょう！","breadcrumbs":[{"label":"モデル"},{"label":"NVIDIA Nemotron 3 Nano - 実行方法ガイド","emoji":"1f9e9"}]},{"id":"983a60d2a135fd088443d0cba9213375b211e188","title":"Qwen3-Coder-Next: ローカルで実行する方法","pathname":"/docs/jp/moderu/qwen3-coder-next","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f320","description":"Qwen3-Coder-Next を自分のデバイスでローカル実行するためのガイド！","breadcrumbs":[{"label":"モデル"}]},{"id":"68cc5d489fe86eefb1bb9cd2e351bcdb90866b49","title":"GLM-4.7-Flash: ローカルで実行する方法","pathname":"/docs/jp/moderu/glm-4.7-flash","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"z","description":"GLM-4.7-Flash を自分のデバイスでローカル実行・ファインチューニングしましょう！","breadcrumbs":[{"label":"モデル"}]},{"id":"17c4f4f0107699d7b64bfeb4b34d427df32a68f9","title":"Kimi K2.5: ローカル実行ガイド","pathname":"/docs/jp/moderu/kimi-k2.5","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f95d","description":"Kimi-K2.5 を自分のローカルデバイスで実行するためのガイド！","breadcrumbs":[{"label":"モデル"}]},{"id":"9b0e240cf2bdb4a30b27c7da0e841fea208c5864","title":"gpt-oss: 実行方法ガイド","pathname":"/docs/jp/moderu/gpt-oss-how-to-run-and-fine-tune","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"openai","description":"OpenAI の新しいオープンソースモデルを実行・ファインチューニングしましょう！","breadcrumbs":[{"label":"モデル"}]},{"id":"b3407ceceb277abc9b29c0763a3985c7d1517595","title":"gpt-oss 強化学習","pathname":"/docs/jp/moderu/gpt-oss-how-to-run-and-fine-tune/gpt-oss-reinforcement-learning","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"openai","description":"","breadcrumbs":[{"label":"モデル"},{"label":"gpt-oss: 実行方法ガイド","icon":"openai"}]},{"id":"9b24ea31e7a9315c3db49288edcb8c4928a49bf2","title":"チュートリアル：RL で gpt-oss を学習する方法","pathname":"/docs/jp/moderu/gpt-oss-how-to-run-and-fine-tune/gpt-oss-reinforcement-learning/tutorial-how-to-train-gpt-oss-with-rl","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"book-open-reader","description":"GRPO を使って OpenAI gpt-oss を学習し、ローカルまたは Colab 上で 2048 に自律的に勝つ方法を学びましょう。","breadcrumbs":[{"label":"モデル"},{"label":"gpt-oss: 実行方法ガイド","icon":"openai"},{"label":"gpt-oss 強化学習","icon":"openai"}]},{"id":"9dc29758d13fc58c1ce64028cb44358ca432b625","title":"チュートリアル：gpt-oss のファインチューニング方法","pathname":"/docs/jp/moderu/gpt-oss-how-to-run-and-fine-tune/tutorial-how-to-fine-tune-gpt-oss","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"openai","description":"Unsloth を使って OpenAI gpt-oss をローカルで学習する方法をステップバイステップで学びましょう。","breadcrumbs":[{"label":"モデル"},{"label":"gpt-oss: 実行方法ガイド","icon":"openai"}]},{"id":"1d6b2dc04a79e2ada0b9f78168d98949b97782ad","title":"長文コンテキストの gpt-oss 学習","pathname":"/docs/jp/moderu/gpt-oss-how-to-run-and-fine-tune/long-context-gpt-oss-training","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"openai","description":"","breadcrumbs":[{"label":"モデル"},{"label":"gpt-oss: 実行方法ガイド","icon":"openai"}]},{"id":"ea13ebdc099377f5b37a2e0895aeb9ede74953ed","title":"大規模言語モデル（LLM）チュートリアル","pathname":"/docs/jp/moderu/tutorials","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f680","description":"最新の LLM を発見し、Unsloth を使って最適な性能を得るためにモデルをローカルで実行・ファインチューニングする方法を学びましょう。","breadcrumbs":[{"label":"モデル"}]},{"id":"5d12825037ce6a6f7dbf0360c12a373f136dfaf1","title":"GLM-5: ローカルで実行する方法ガイド","pathname":"/docs/jp/moderu/tutorials/glm-5","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"z","description":"Z.ai の新しい GLM-5 モデルを自分のローカルデバイスで実行しましょう！","breadcrumbs":[{"label":"モデル"},{"label":"大規模言語モデル（LLM）チュートリアル","emoji":"1f680"}]},{"id":"def6c4e623d8ca4355a9f45aa124675d9b1bc37e","title":"Qwen3 - 実行とファインチューニングの方法","pathname":"/docs/jp/moderu/tutorials/qwen3-how-to-run-and-fine-tune","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f320","description":"Unsloth と独自の Dynamic 2.0 quants を使って、Qwen3 をローカルで実行・ファインチューニングする方法を学びましょう","breadcrumbs":[{"label":"モデル"},{"label":"大規模言語モデル（LLM）チュートリアル","emoji":"1f680"}]},{"id":"41930a9c7687512da264c245376cdad995638965","title":"Qwen3-VL: 実行方法ガイド","pathname":"/docs/jp/moderu/tutorials/qwen3-how-to-run-and-fine-tune/qwen3-vl-how-to-run-and-fine-tune","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f320","description":"Unsloth を使って Qwen3-VL をローカルでファインチューニングし、実行する方法を学びましょう。","breadcrumbs":[{"label":"モデル"},{"label":"大規模言語モデル（LLM）チュートリアル","emoji":"1f680"},{"label":"Qwen3 - 実行とファインチューニングの方法","emoji":"1f320"}]},{"id":"fa5bc8c4a776b4aa07f35a8be3714e534db69c0d","title":"Qwen3-2507: ローカル実行ガイド","pathname":"/docs/jp/moderu/tutorials/qwen3-how-to-run-and-fine-tune/qwen3-2507","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f320","description":"Qwen3-30B-A3B-2507 と 235B-A22B の Thinking 版と Instruct 版を自分のデバイスでローカル実行しましょう！","breadcrumbs":[{"label":"モデル"},{"label":"大規模言語モデル（LLM）チュートリアル","emoji":"1f680"},{"label":"Qwen3 - 実行とファインチューニングの方法","emoji":"1f320"}]},{"id":"051e934a3c33af7cc4b8bd92b687148f8f65e39e","title":"MiniMax-M2.5: 実行方法ガイド","pathname":"/docs/jp/moderu/tutorials/minimax-m25","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"waveform","description":"MiniMax-M2.5 を自分のデバイスでローカル実行しましょう！","breadcrumbs":[{"label":"モデル"},{"label":"大規模言語モデル（LLM）チュートリアル","emoji":"1f680"}]},{"id":"14947b7a6d6c50557f1e971fcfb24aa4e19eeb01","title":"Qwen3-Coder: ローカルで実行する方法","pathname":"/docs/jp/moderu/tutorials/qwen3-coder-how-to-run-locally","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f320","description":"Unsloth Dynamic quants を使って Qwen3-Coder-30B-A3B-Instruct と 480B-A35B をローカルで実行しましょう。","breadcrumbs":[{"label":"モデル"},{"label":"大規模言語モデル（LLM）チュートリアル","emoji":"1f680"}]},{"id":"6248d03dd6d5a8cecb057bb4c904aa6596097dcb","title":"Gemma 3 - 実行方法ガイド","pathname":"/docs/jp/moderu/tutorials/gemma-3-how-to-run-and-fine-tune","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"google","description":"llama.cpp、Ollama、Open WebUI で GGUF を使って Gemma 3 を効果的に実行する方法と、Unsloth でファインチューニングする方法を学びましょう！","breadcrumbs":[{"label":"モデル"},{"label":"大規模言語モデル（LLM）チュートリアル","emoji":"1f680"}]},{"id":"51b993bab9cf405fd074014ca3dba97ff1a1ac56","title":"Gemma 3n: 実行とファインチューニングの方法","pathname":"/docs/jp/moderu/tutorials/gemma-3-how-to-run-and-fine-tune/gemma-3n-how-to-run-and-fine-tune","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"google","description":"Google の新しい Gemma 3n を Dynamic GGUF で llama.cpp、Ollama、Open WebUI 上でローカル実行し、Unsloth でファインチューニングしましょう！","breadcrumbs":[{"label":"モデル"},{"label":"大規模言語モデル（LLM）チュートリアル","emoji":"1f680"},{"label":"Gemma 3 - 実行方法ガイド","icon":"google"}]},{"id":"f7d04e17a76834c8a0c837ec4aee9d3d64c1c9db","title":"DeepSeek-OCR 2: 実行とファインチューニングの方法ガイド","pathname":"/docs/jp/moderu/tutorials/deepseek-ocr-2","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f433","description":"DeepSeek-OCR-2 をローカルで実行・ファインチューニングする方法のガイド。","breadcrumbs":[{"label":"モデル"},{"label":"大規模言語モデル（LLM）チュートリアル","emoji":"1f680"}]},{"id":"d120eefd9dfbf755a96bf2cab5ea0bef3bb5e508","title":"GLM-4.7: ローカルで実行する方法ガイド","pathname":"/docs/jp/moderu/tutorials/glm-4.7","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"z","description":"Z.ai の GLM-4.7 モデルを自分のローカルデバイスで実行する方法のガイド！","breadcrumbs":[{"label":"モデル"},{"label":"大規模言語モデル（LLM）チュートリアル","emoji":"1f680"}]},{"id":"a3a6dc010c538ced0d9db88c169b13882b595c54","title":"ComfyUI で Qwen-Image-2512 をローカル実行する方法","pathname":"/docs/jp/moderu/tutorials/qwen-image-2512","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f49f","description":"ComfyUI を使って Qwen-Image-2512 を自分のローカルデバイスで実行するためのステップバイステップチュートリアル。","breadcrumbs":[{"label":"モデル"},{"label":"大規模言語モデル（LLM）チュートリアル","emoji":"1f680"}]},{"id":"1b1608b9402b46fbfed79b5defc616e893115b63","title":"stable-diffusion.cpp で Qwen-Image-2512 を実行するチュートリアル","pathname":"/docs/jp/moderu/tutorials/qwen-image-2512/stable-diffusion.cpp","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f3a8","description":"stable-diffusion.cpp で Qwen-Image-2512 を使うためのチュートリアル。","breadcrumbs":[{"label":"モデル"},{"label":"大規模言語モデル（LLM）チュートリアル","emoji":"1f680"},{"label":"ComfyUI で Qwen-Image-2512 をローカル実行する方法","emoji":"1f49f"}]},{"id":"6e250b10672043df657699c97a0ef9185dd3fcf8","title":"Devstral 2 - 実行方法ガイド","pathname":"/docs/jp/moderu/tutorials/devstral-2","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f4d9","description":"Mistral Devstral 2 モデル（123B-Instruct-2512 と Small-2-24B-Instruct-2512）のローカル実行ガイド。","breadcrumbs":[{"label":"モデル"},{"label":"大規模言語モデル（LLM）チュートリアル","emoji":"1f680"}]},{"id":"d09407ea18ce3f2ae1895fad1fea42fea44a7084","title":"Ministral 3 - 実行方法ガイド","pathname":"/docs/jp/moderu/tutorials/ministral-3","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f431","description":"Mistral Ministral 3 モデルを、自分のデバイスでローカル実行またはファインチューニングするためのガイド","breadcrumbs":[{"label":"モデル"},{"label":"大規模言語モデル（LLM）チュートリアル","emoji":"1f680"}]},{"id":"f6ca689c1f955e7e2dc3b6b42bebc940f63a0c1a","title":"DeepSeek-OCR: 実行とファインチューニングの方法","pathname":"/docs/jp/moderu/tutorials/deepseek-ocr-how-to-run-and-fine-tune","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f433","description":"DeepSeek-OCR をローカルで実行・ファインチューニングする方法のガイド。","breadcrumbs":[{"label":"モデル"},{"label":"大規模言語モデル（LLM）チュートリアル","emoji":"1f680"}]},{"id":"d33b81014bee91e7c28cf5f645d19f717568e403","title":"Kimi K2 Thinking: ローカル実行ガイド","pathname":"/docs/jp/moderu/tutorials/kimi-k2-thinking-how-to-run-locally","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f319","description":"Kimi-K2-Thinking と Kimi-K2 を自分のローカルデバイスで実行するためのガイド！","breadcrumbs":[{"label":"モデル"},{"label":"大規模言語モデル（LLM）チュートリアル","emoji":"1f680"}]},{"id":"15da73ffae68627da854f843c0328406ea1ae99b","title":"GLM-4.6: ローカル実行ガイド","pathname":"/docs/jp/moderu/tutorials/glm-4.6-how-to-run-locally","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"z","description":"Z.ai の GLM-4.6 および GLM-4.6V-Flash モデルを自分のローカルデバイスで実行する方法のガイド！","breadcrumbs":[{"label":"モデル"},{"label":"大規模言語モデル（LLM）チュートリアル","emoji":"1f680"}]},{"id":"b2a04bf177dc4438c58a13a8bff973ff4b9d1cf8","title":"Qwen3-Next: ローカル実行ガイド","pathname":"/docs/jp/moderu/tutorials/qwen3-next","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f320","description":"Qwen3-Next-80B-A3B-Instruct と Thinking 版を自分のデバイスでローカル実行しましょう！","breadcrumbs":[{"label":"モデル"},{"label":"大規模言語モデル（LLM）チュートリアル","emoji":"1f680"}]},{"id":"c590d523e85962de32ad10f332254fb7ec5aa904","title":"FunctionGemma: 実行とファインチューニングの方法","pathname":"/docs/jp/moderu/tutorials/functiongemma","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"google","description":"FunctionGemma を自分のデバイスやスマートフォンでローカル実行・ファインチューニングする方法を学びましょう。","breadcrumbs":[{"label":"モデル"},{"label":"大規模言語モデル（LLM）チュートリアル","emoji":"1f680"}]},{"id":"1076a52a5c3ebf51b7a9fff0968fcea31aee4df1","title":"DeepSeek-V3.1: ローカルで実行する方法","pathname":"/docs/jp/moderu/tutorials/deepseek-v3.1-how-to-run-locally","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f40b","description":"DeepSeek-V3.1 と Terminus を自分のローカルデバイスで実行する方法のガイド！","breadcrumbs":[{"label":"モデル"},{"label":"大規模言語モデル（LLM）チュートリアル","emoji":"1f680"}]},{"id":"25a2638b85cdb3aec85d94e5705fb8ee19a1daab","title":"DeepSeek-R1-0528: ローカルで実行する方法","pathname":"/docs/jp/moderu/tutorials/deepseek-r1-0528-how-to-run-locally","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f40b","description":"Qwen3 を含む DeepSeek-R1-0528 を自分のローカルデバイスで実行する方法のガイド！","breadcrumbs":[{"label":"モデル"},{"label":"大規模言語モデル（LLM）チュートリアル","emoji":"1f680"}]},{"id":"56706e6412f8aee419d6c7850e9d0a63951b6c8f","title":"Liquid LFM2.5: 実行とファインチューニングの方法","pathname":"/docs/jp/moderu/tutorials/lfm2.5","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f4a7","description":"LFM2.5 Instruct と Vision を自分のデバイスでローカル実行・ファインチューニングしましょう！","breadcrumbs":[{"label":"モデル"},{"label":"大規模言語モデル（LLM）チュートリアル","emoji":"1f680"}]},{"id":"8df1f0c90d5752fe77400c0eb7e1397df03d993c","title":"Magistral: 実行とファインチューニングの方法","pathname":"/docs/jp/moderu/tutorials/magistral-how-to-run-and-fine-tune","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f4a5","description":"Magistral を紹介します - Mistral の新しい推論モデルです。","breadcrumbs":[{"label":"モデル"},{"label":"大規模言語モデル（LLM）チュートリアル","emoji":"1f680"}]},{"id":"ea8d2ac599fb9a6e666fafcc4696434ad323e8ef","title":"IBM Granite 4.0","pathname":"/docs/jp/moderu/tutorials/ibm-granite-4.0","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"cube","description":"Unsloth GGUF を使って llama.cpp、Ollama で IBM Granite-4.0 を実行する方法と、ファインチューニングする方法！","breadcrumbs":[{"label":"モデル"},{"label":"大規模言語モデル（LLM）チュートリアル","emoji":"1f680"}]},{"id":"49c5cefd40453c244bfe13e6bac549a8d476c3c8","title":"Llama 4: 実行とファインチューニングの方法","pathname":"/docs/jp/moderu/tutorials/llama-4-how-to-run-and-fine-tune","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f999","description":"標準的な量子化と比べて精度を回復できる動的 GGUF を使って、Llama 4 をローカルで実行する方法。","breadcrumbs":[{"label":"モデル"},{"label":"大規模言語モデル（LLM）チュートリアル","emoji":"1f680"}]},{"id":"4e9f2261069b3d110ae7851ab15866b1a06c1a2a","title":"Grok 2","pathname":"/docs/jp/moderu/tutorials/grok-2","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"square-x-twitter","description":"xAI の Grok 2 モデルをローカルで実行しましょう！","breadcrumbs":[{"label":"モデル"},{"label":"大規模言語モデル（LLM）チュートリアル","emoji":"1f680"}]},{"id":"31e0b99ff41cc424049d2d4d44d650fb271d0392","title":"Devstral: 実行とファインチューニングの方法","pathname":"/docs/jp/moderu/tutorials/devstral-how-to-run-and-fine-tune","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f4d9","description":"Small-2507 と 2505 を含む Mistral Devstral 1.1 を実行・ファインチューニングしましょう。","breadcrumbs":[{"label":"モデル"},{"label":"大規模言語モデル（LLM）チュートリアル","emoji":"1f680"}]},{"id":"b00c9977bf9de6379716fbcd59dde91df9f3e609","title":"Docker でローカル LLM を実行する方法：ステップバイステップガイド","pathname":"/docs/jp/moderu/tutorials/how-to-run-llms-with-docker","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"docker","description":"Docker と Unsloth を使って、ローカルデバイスで大規模言語モデル（LLM）を実行する方法を学びましょう。","breadcrumbs":[{"label":"モデル"},{"label":"大規模言語モデル（LLM）チュートリアル","emoji":"1f680"}]},{"id":"762541362c9823bb63ea9e0ca527832550a99694","title":"DeepSeek-V3-0324: ローカルで実行する方法","pathname":"/docs/jp/moderu/tutorials/deepseek-v3-0324-how-to-run-locally","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f433","description":"精度を回復する動的 quants を使って DeepSeek-V3-0324 をローカルで実行する方法","breadcrumbs":[{"label":"モデル"},{"label":"大規模言語モデル（LLM）チュートリアル","emoji":"1f680"}]},{"id":"551e9305c991b8df9f92f34988ae3d56b2fd8357","title":"DeepSeek-R1: ローカルで実行する方法","pathname":"/docs/jp/moderu/tutorials/deepseek-r1-how-to-run-locally","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f40b","description":"llama.cpp を使って DeepSeek-R1 用の 1.58-bit Dynamic Quants を実行する方法のガイド。","breadcrumbs":[{"label":"モデル"},{"label":"大規模言語モデル（LLM）チュートリアル","emoji":"1f680"}]},{"id":"178cfb29f5efd456d045bfa2bb771ca13cb6b8ed","title":"DeepSeek-R1 Dynamic 1.58-bit","pathname":"/docs/jp/moderu/tutorials/deepseek-r1-how-to-run-locally/deepseek-r1-dynamic-1.58-bit","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f433","description":"Unsloth の Dynamic GGUF Quants と標準の IMatrix Quants の性能比較表をご覧ください。","breadcrumbs":[{"label":"モデル"},{"label":"大規模言語モデル（LLM）チュートリアル","emoji":"1f680"},{"label":"DeepSeek-R1: ローカルで実行する方法","emoji":"1f40b"}]},{"id":"9fceb033f7114c1fe5f72941ba83dc6ec92b8a1d","title":"Phi-4 Reasoning: 実行とファインチューニングの方法","pathname":"/docs/jp/moderu/tutorials/phi-4-reasoning-how-to-run-and-fine-tune","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"windows","description":"Unsloth と独自の Dynamic 2.0 quants を使って Phi-4 推論モデルをローカルで実行・ファインチューニングする方法を学びましょう","breadcrumbs":[{"label":"モデル"},{"label":"大規模言語モデル（LLM）チュートリアル","emoji":"1f680"}]},{"id":"d22cc824eafc19900f391f15f9dc0e258f4479a3","title":"QwQ-32B: 効果的な実行方法","pathname":"/docs/jp/moderu/tutorials/qwq-32b-how-to-run-effectively","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f320","description":"バグ修正と無限生成の回避、さらに GGUF を使って QwQ-32B を効果的に実行する方法。","breadcrumbs":[{"label":"モデル"},{"label":"大規模言語モデル（LLM）チュートリアル","emoji":"1f680"}]},{"id":"d5ae43f1915ceda3d304ad7c413cb4efbe3d1a3f","title":"推論とデプロイ","pathname":"/docs/jp/ji-ben/inference-and-deployment","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f5a5","description":"ファインチューニングしたモデルを保存し、お気に入りの推論エンジンで実行できるようにする方法を学びましょう。","breadcrumbs":[{"label":"基本"}]},{"id":"9bfa988baa17c249340a58c332b8584f20d2537c","title":"GGUF への保存","pathname":"/docs/jp/ji-ben/inference-and-deployment/saving-to-gguf","siteSpaceId":"sitesp_8AL84","lang":"ja","description":"Ollama、Jan AI、Open WebUI などで使えるように、モデルを GGUF 用の 16bit で保存します！","breadcrumbs":[{"label":"基本"},{"label":"推論とデプロイ","emoji":"1f5a5"}]},{"id":"16a93a7cb5d8a8ae32bc84526191a967ae25818a","title":"Speculative Decoding","pathname":"/docs/jp/ji-ben/inference-and-deployment/saving-to-gguf/speculative-decoding","siteSpaceId":"sitesp_8AL84","lang":"ja","description":"llama-server、llama.cpp、vLLM などで Speculative Decoding を行い、推論を 2 倍高速化します","breadcrumbs":[{"label":"基本"},{"label":"推論とデプロイ","emoji":"1f5a5"},{"label":"GGUF への保存"}]},{"id":"0fde417d83989a8108b1d466ec2b53c46e9f4279","title":"vLLM デプロイと推論ガイド","pathname":"/docs/jp/ji-ben/inference-and-deployment/vllm-guide","siteSpaceId":"sitesp_8AL84","lang":"ja","description":"本番環境で LLM を提供するために、LLM を vLLM に保存してデプロイするためのガイド","breadcrumbs":[{"label":"基本"},{"label":"推論とデプロイ","emoji":"1f5a5"}]},{"id":"c81cf073f3b0e1fc8edcdbc81171084f13fcd40a","title":"vLLM エンジン引数","pathname":"/docs/jp/ji-ben/inference-and-deployment/vllm-guide/vllm-engine-arguments","siteSpaceId":"sitesp_8AL84","lang":"ja","description":"","breadcrumbs":[{"label":"基本"},{"label":"推論とデプロイ","emoji":"1f5a5"},{"label":"vLLM デプロイと推論ガイド"}]},{"id":"087d5c3a4a847726fc6a5174ab1fd71832977239","title":"LoRA ホットスワップガイド","pathname":"/docs/jp/ji-ben/inference-and-deployment/vllm-guide/lora-hot-swapping-guide","siteSpaceId":"sitesp_8AL84","lang":"ja","description":"","breadcrumbs":[{"label":"基本"},{"label":"推論とデプロイ","emoji":"1f5a5"},{"label":"vLLM デプロイと推論ガイド"}]},{"id":"52955f68c3f0eb3d6d351166737e23bf6d88b360","title":"Ollama への保存","pathname":"/docs/jp/ji-ben/inference-and-deployment/saving-to-ollama","siteSpaceId":"sitesp_8AL84","lang":"ja","description":"","breadcrumbs":[{"label":"基本"},{"label":"推論とデプロイ","emoji":"1f5a5"}]},{"id":"6fb5e7690fd265f7cbc75a7c89a22f8e97491f6e","title":"LM Studio へのモデルデプロイ","pathname":"/docs/jp/ji-ben/inference-and-deployment/lm-studio","siteSpaceId":"sitesp_8AL84","lang":"ja","description":"LM Studio で実行・デプロイできるように、モデルを GGUF に保存します","breadcrumbs":[{"label":"基本"},{"label":"推論とデプロイ","emoji":"1f5a5"}]},{"id":"b192549682c1e2b38258e6cf4ecfac5d454f5919","title":"Linux ターミナルで LM Studio CLI をインストールする方法","pathname":"/docs/jp/ji-ben/inference-and-deployment/lm-studio/how-to-install-lm-studio-cli-in-linux-terminal","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f47e","description":"ターミナル上の UI なしで LM Studio CLI をインストールするガイド。","breadcrumbs":[{"label":"基本"},{"label":"推論とデプロイ","emoji":"1f5a5"},{"label":"LM Studio へのモデルデプロイ"}]},{"id":"1f909a31e96a93e6296ea02f561cefd3bfad5cfb","title":"SGLang デプロイと推論ガイド","pathname":"/docs/jp/ji-ben/inference-and-deployment/sglang-guide","siteSpaceId":"sitesp_8AL84","lang":"ja","description":"本番環境で LLM を提供するために、LLM を SGLang に保存してデプロイするためのガイド","breadcrumbs":[{"label":"基本"},{"label":"推論とデプロイ","emoji":"1f5a5"}]},{"id":"bf15adf206552e2848b9122be6f17e403b1fc9c0","title":"llama-server と OpenAI エンドポイントのデプロイガイド","pathname":"/docs/jp/ji-ben/inference-and-deployment/llama-server-and-openai-endpoint","siteSpaceId":"sitesp_8AL84","lang":"ja","description":"OpenAI 互換エンドポイントを使った llama-server 経由のデプロイ","breadcrumbs":[{"label":"基本"},{"label":"推論とデプロイ","emoji":"1f5a5"}]},{"id":"6b439ad8eedcbf85fcbca5c3aa910e0d71794507","title":"iOS または Android スマートフォンで LLM を実行・デプロイする方法","pathname":"/docs/jp/ji-ben/inference-and-deployment/deploy-llms-phone","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f4f1","description":"ExecuTorch を使って独自の LLM をファインチューニングし、Android や iPhone にデプロイするためのチュートリアル。","breadcrumbs":[{"label":"基本"},{"label":"推論とデプロイ","emoji":"1f5a5"}]},{"id":"d7914a52b10ea48e9158d90873a1008f60c5f306","title":"推論のトラブルシューティング","pathname":"/docs/jp/ji-ben/inference-and-deployment/troubleshooting-inference","siteSpaceId":"sitesp_8AL84","lang":"ja","description":"モデルの実行や保存時に問題が発生している場合。","breadcrumbs":[{"label":"基本"},{"label":"推論とデプロイ","emoji":"1f5a5"}]},{"id":"ee610b22aa43d29d8415fd27eb7de15ba88f7385","title":"Claude Code でローカル LLM を実行する方法","pathname":"/docs/jp/ji-ben/claude-code","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"claude","description":"ローカルデバイスで Claude Code と一緒にオープンモデルを使うためのガイド。","breadcrumbs":[{"label":"基本"}]},{"id":"c87896ff7159620f4c01bb39fe9df1fd1a55274e","title":"OpenAI Codex でローカル LLM を実行する方法","pathname":"/docs/jp/ji-ben/codex","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"openai","description":"デバイス上で OpenAI Codex と一緒にオープンモデルをローカルに使います。","breadcrumbs":[{"label":"基本"}]},{"id":"2cb214340788afbb22dd3dbc72f5295f646274e5","title":"Unsloth によるマルチ GPU ファインチューニング","pathname":"/docs/jp/ji-ben/multi-gpu-training-with-unsloth","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"rectangle-history","description":"Unsloth を使って複数 GPU と並列処理で LLM をファインチューニングする方法を学びましょう。","breadcrumbs":[{"label":"基本"}]},{"id":"76cc4270e39e2817549ae7d884b3b38c0e064b48","title":"分散データ並列（DDP）によるマルチ GPU ファインチューニング","pathname":"/docs/jp/ji-ben/multi-gpu-training-with-unsloth/ddp","siteSpaceId":"sitesp_8AL84","lang":"ja","description":"Unsloth CLI を使って、分散データ並列（DDP）で複数 GPU に学習させる方法を学びましょう！","breadcrumbs":[{"label":"基本"},{"label":"Unsloth によるマルチ GPU ファインチューニング","icon":"rectangle-history"}]},{"id":"a637d2257857c4677688add9f50f851ec564d0b8","title":"Unsloth ガイドによる埋め込みモデルのファインチューニング","pathname":"/docs/jp/ji-ben/embedding-finetuning","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f50e","description":"Unsloth を使って埋め込みモデルを簡単にファインチューニングする方法を学びましょう。","breadcrumbs":[{"label":"基本"}]},{"id":"084af8821a599e81fc9cc18b65615343a785fc5d","title":"Unsloth で MoE モデルを 12 倍高速にファインチューニング","pathname":"/docs/jp/ji-ben/faster-moe","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f48e","description":"Unsloth ガイドを使って、MoE LLM をローカルで学習させます。","breadcrumbs":[{"label":"基本"}]},{"id":"abcb971ab51a2b38849d11fcae86c3a06fbca382","title":"テキスト読み上げ（TTS）ファインチューニングガイド","pathname":"/docs/jp/ji-ben/text-to-speech-tts-fine-tuning","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f50a","description":"Unsloth を使って TTS と STT の音声モデルをファインチューニングする方法を学びましょう。","breadcrumbs":[{"label":"基本"}]},{"id":"021c19d75a86d940ef9b83b7b41bee381657345b","title":"Unsloth Dynamic 2.0 GGUF","pathname":"/docs/jp/ji-ben/unsloth-dynamic-2.0-ggufs","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f9a5","description":"Dynamic Quants の大きな新アップグレードです！","breadcrumbs":[{"label":"基本"}]},{"id":"bef806b79aad8a62f0bba5a87c182261ad394be4","title":"Aider Polyglot 上の Unsloth Dynamic GGUF","pathname":"/docs/jp/ji-ben/unsloth-dynamic-2.0-ggufs/unsloth-dynamic-ggufs-on-aider-polyglot","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f9a5","description":"Aider Polyglot ベンチマークにおける Unsloth Dynamic GGUF の性能","breadcrumbs":[{"label":"基本"},{"label":"Unsloth Dynamic 2.0 GGUF","emoji":"1f9a5"}]},{"id":"34b68a962f8c73059943abdee91772400b1d1ecb","title":"ローカル LLM のツール呼び出しガイド","pathname":"/docs/jp/ji-ben/tool-calling-guide-for-local-llms","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"screwdriver-wrench","description":"","breadcrumbs":[{"label":"基本"}]},{"id":"4c4214eb859e3837d4cc099505565e44dba70ac2","title":"Vision ファインチューニング","pathname":"/docs/jp/ji-ben/vision-fine-tuning","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f441","description":"Unsloth を使って vision/マルチモーダル LLM をファインチューニングする方法を学びましょう","breadcrumbs":[{"label":"基本"}]},{"id":"f2407144d3c7f3017e630580e779721eb687c843","title":"トラブルシューティングと FAQ","pathname":"/docs/jp/ji-ben/troubleshooting-and-faqs","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"26a0","description":"問題を解決するためのヒントと、よくある質問。","breadcrumbs":[{"label":"基本"}]},{"id":"87c1a7628195f3ad3d84a0f7d73d266d9af883f1","title":"Hugging Face Hub、XET のデバッグ","pathname":"/docs/jp/ji-ben/troubleshooting-and-faqs/hugging-face-hub-xet-debugging","siteSpaceId":"sitesp_8AL84","lang":"ja","description":"停止、詰まり、遅いダウンロードのデバッグとトラブルシューティング","breadcrumbs":[{"label":"基本"},{"label":"トラブルシューティングと FAQ","emoji":"26a0"}]},{"id":"775aacfac2bd0502888931ca80934e75342ef4f6","title":"チャットテンプレート","pathname":"/docs/jp/ji-ben/chat-templates","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f4ac","description":"会話形式、ChatML、ShareGPT、Alpaca 形式などを含むチャットテンプレートの基礎とカスタマイズ方法を学びましょう！","breadcrumbs":[{"label":"基本"}]},{"id":"05baecaeb43111cac111ccadefa0a1e6b0c7cc6e","title":"Unsloth 環境フラグ","pathname":"/docs/jp/ji-ben/unsloth-environment-flags","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f6e0","description":"ファインチューニングが壊れている場合や、何かをオフにしたい場合に役立つ可能性のある高度なフラグ。","breadcrumbs":[{"label":"基本"}]},{"id":"ad4913741729d4d5c70b84903c80ad8fa330db35","title":"継続事前学習","pathname":"/docs/jp/ji-ben/continued-pretraining","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"267b","description":"継続ファインチューニングとも呼ばれます。Unsloth では継続的に事前学習できるため、モデルに新しい言語を学習させられます。","breadcrumbs":[{"label":"基本"}]},{"id":"a6c23d77a33eed11aa325cf186374365fbac069c","title":"最後のチェックポイントからのファインチューニング","pathname":"/docs/jp/ji-ben/finetuning-from-last-checkpoint","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f3c1","description":"チェックポイント保存により、ファインチューニングの進捗を保存して、一時停止してから再開できます。","breadcrumbs":[{"label":"基本"}]},{"id":"1a67dc318c6e7c970b0bed83a61a7f043ca05676","title":"Unsloth ベンチマーク","pathname":"/docs/jp/ji-ben/unsloth-benchmarks","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"1f4ca","description":"NVIDIA GPU における Unsloth のベンチマーク記録。","breadcrumbs":[{"label":"基本"}]},{"id":"b1418ee8202cf9a87ebe6dc21e8d3a6900e24405","title":"Unsloth Kernels + Packing による 3 倍高速な LLM 学習","pathname":"/docs/jp/burogu/3x-faster-training-packing","siteSpaceId":"sitesp_8AL84","lang":"ja","emoji":"26a1","description":"Unsloth が学習スループットを向上させ、ファインチューニング時の padding 無駄をなくす方法を学びましょう。","breadcrumbs":[{"label":"ブログ"}]},{"id":"243043ca15b2f3f3a09863fbaadc89c1b0b6720b","title":"50万コンテキスト長のファインチューニング","pathname":"/docs/jp/burogu/500k-context-length-fine-tuning","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"ruler-combined","description":"Unsloth で 50 万トークン超のコンテキストウィンドウのファインチューニングを有効にする方法を学びましょう。","breadcrumbs":[{"label":"ブログ"}]},{"id":"eb4e6f009b30d4f300c074f6d5886aac9d47065a","title":"量子化対応学習（QAT）","pathname":"/docs/jp/burogu/quantization-aware-training-qat","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"down-left-and-up-right-to-center","description":"Unsloth と PyTorch を使ってモデルを 4-bit に量子化し、精度を回復します。","breadcrumbs":[{"label":"ブログ"}]},{"id":"23f137d60bd35d4f00b21fe55329fe6e6909b293","title":"Unsloth を使った NVIDIA DGX Station での LLM ファインチューニング","pathname":"/docs/jp/burogu/dgx-station","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"microchip-ai","description":"Unsloth のノートブックを使ってファインチューニングする方法に関する NVIDIA DGX Station チュートリアル。","breadcrumbs":[{"label":"ブログ"}]},{"id":"ab3540db05c75f06746429e2d7defafc52bf32e1","title":"Unsloth と Docker を使って LLM をファインチューニングする方法","pathname":"/docs/jp/burogu/how-to-fine-tune-llms-with-unsloth-and-docker","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"docker","description":"Unsloth の Docker イメージを使って LLM をファインチューニングしたり、強化学習（RL）を行う方法を学びましょう。","breadcrumbs":[{"label":"ブログ"}]},{"id":"1df4a60634b989f58e2dabb3b331e7cca93c8e5b","title":"NVIDIA DGX Spark と Unsloth を使った LLM のファインチューニング","pathname":"/docs/jp/burogu/fine-tuning-llms-with-nvidia-dgx-spark-and-unsloth","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"sparkle","description":"NVIDIA DGX Spark 上で OpenAI gpt-oss を使ってファインチューニングと強化学習（RL）を行う方法のチュートリアル。","breadcrumbs":[{"label":"ブログ"}]},{"id":"2356c4886c1153e7e0c3288e404d25e96bc02144","title":"Blackwell、RTX 50 シリーズと Unsloth を使った LLM のファインチューニング","pathname":"/docs/jp/burogu/fine-tuning-llms-with-blackwell-rtx-50-series-and-unsloth","siteSpaceId":"sitesp_8AL84","lang":"ja","icon":"microchip","description":"ステップバイステップガイドで、NVIDIA の Blackwell、RTX 50 シリーズ、および B200 GPU 上で LLM をファインチューニングする方法を学びましょう。","breadcrumbs":[{"label":"ブログ"}]},{"id":"9eee90080b64494b2b4f2d69662dc2dbef8c74c8","title":"Unsloth-Dokumentation","pathname":"/docs/de","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f9a5","description":"Unsloth ist ein Open-Source-Framework zum Ausführen und Trainieren von Modellen.","breadcrumbs":[{"label":"Loslegen"}]},{"id":"2e5c28b5681fb8721dd9abf2ead7595b01aea335","title":"Feinabstimmung für Anfänger","pathname":"/docs/de/loslegen/fine-tuning-for-beginners","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"2b50","description":"","breadcrumbs":[{"label":"Loslegen"}]},{"id":"53df1382354c7ace1c37120c4af6ed50511854dd","title":"Unsloth-Anforderungen","pathname":"/docs/de/loslegen/fine-tuning-for-beginners/unsloth-requirements","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f6e0","description":"Hier sind die Anforderungen von Unsloth, einschließlich System- und GPU-VRAM-Anforderungen.","breadcrumbs":[{"label":"Loslegen"},{"label":"Feinabstimmung für Anfänger","emoji":"2b50"}]},{"id":"247e762fc931f96d3998ecfa1a4402cf524e9e97","title":"FAQ + Ist Feinabstimmung das Richtige für mich?","pathname":"/docs/de/loslegen/fine-tuning-for-beginners/faq-+-is-fine-tuning-right-for-me","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f914","description":"Wenn du dir nicht sicher bist, ob Feinabstimmung das Richtige für dich ist, sieh hier nach! Erfahre mehr über Missverständnisse zur Feinabstimmung, wie sie sich im Vergleich zu RAG verhält und mehr:","breadcrumbs":[{"label":"Loslegen"},{"label":"Feinabstimmung für Anfänger","emoji":"2b50"}]},{"id":"c96e3433e67c1b26226b1118128145a6ff8a990a","title":"Unsloth-Notebooks","pathname":"/docs/de/loslegen/unsloth-notebooks","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f4d2","description":"Notebooks zur Feinabstimmung: Entdecke den Unsloth-Katalog.","breadcrumbs":[{"label":"Loslegen"}]},{"id":"0d4e311c01cc0577c64d15b8a41f22ba29eab7fd","title":"Unsloth-Modellkatalog","pathname":"/docs/de/loslegen/unsloth-model-catalog","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f52e","description":"","breadcrumbs":[{"label":"Loslegen"}]},{"id":"fd362a47f0e8cc55190dad52421d6fe66df3a5cb","title":"Unsloth-Installation","pathname":"/docs/de/loslegen/install","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f4e5","description":"Lerne, Unsloth lokal oder online zu installieren.","breadcrumbs":[{"label":"Loslegen"}]},{"id":"f2e9c46bf9628a73605243a4f5d2859cf6831957","title":"Unsloth via pip und uv installieren","pathname":"/docs/de/loslegen/install/pip-install","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"desktop-arrow-down","description":"Um Unsloth lokal per Pip zu installieren, befolge die folgenden Schritte:","breadcrumbs":[{"label":"Loslegen"},{"label":"Unsloth-Installation","emoji":"1f4e5"}]},{"id":"c0a9741cebd0f6061894641559ed770f1819d0ae","title":"Unsloth auf MacOS installieren","pathname":"/docs/de/loslegen/install/mac","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"apple","description":"","breadcrumbs":[{"label":"Loslegen"},{"label":"Unsloth-Installation","emoji":"1f4e5"}]},{"id":"44f4bf2eea97ad3270d9e11ab9d33f2b9479ae22","title":"Wie man LLMs unter Windows mit Unsloth feinabstimmt (Schritt-für-Schritt-Anleitung)","pathname":"/docs/de/loslegen/install/windows-installation","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"windows","description":"Sieh, wie du Unsloth unter Windows installierst, um mit der lokalen Feinabstimmung von LLMs zu beginnen.","breadcrumbs":[{"label":"Loslegen"},{"label":"Unsloth-Installation","emoji":"1f4e5"}]},{"id":"4ce2c4dc8c5cacd60164238ecd513f358a061597","title":"Unsloth via Docker installieren","pathname":"/docs/de/loslegen/install/docker","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"docker","description":"Installiere Unsloth mit unserem offiziellen Docker-Container","breadcrumbs":[{"label":"Loslegen"},{"label":"Unsloth-Installation","emoji":"1f4e5"}]},{"id":"5216ff7aa081a61171fc75301bb42be87b4a9c8a","title":"Unsloth aktualisieren","pathname":"/docs/de/loslegen/install/updating","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"arrow-rotate-right","description":"Um Unsloth zu aktualisieren oder eine alte Version von Unsloth zu verwenden, befolge die folgenden Schritte:","breadcrumbs":[{"label":"Loslegen"},{"label":"Unsloth-Installation","emoji":"1f4e5"}]},{"id":"6cfc4e7277de7763f13423fb2a9191134f5aba98","title":"Anleitung zur Feinabstimmung von LLMs auf AMD-GPUs mit Unsloth","pathname":"/docs/de/loslegen/install/amd","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"square-up-right","description":"Lerne, wie du große Sprachmodelle (LLMs) auf AMD-GPUs mit Unsloth feinabstimmst.","breadcrumbs":[{"label":"Loslegen"},{"label":"Unsloth-Installation","emoji":"1f4e5"}]},{"id":"ae33a3cb262bdf9cbd68b7b4495db5c0a6bb5342","title":"Feinabstimmung von LLMs auf Intel-GPUs mit Unsloth","pathname":"/docs/de/loslegen/install/intel","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"info","description":"Lerne, wie du große Sprachmodelle auf Intel-GPUs trainierst und feinabstimmst.","breadcrumbs":[{"label":"Loslegen"},{"label":"Unsloth-Installation","emoji":"1f4e5"}]},{"id":"44aed34263310d67280841ab6b72ea1e5648761f","title":"Anleitung zur Feinabstimmung von LLMs","pathname":"/docs/de/loslegen/fine-tuning-llms-guide","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f9ec","description":"Lerne alle Grundlagen und Best Practices der Feinabstimmung. Anfängerfreundlich.","breadcrumbs":[{"label":"Loslegen"}]},{"id":"079b6ad30e8c25b4a6caae0d2dc5378a166d54c9","title":"Anleitung zu Datensätzen","pathname":"/docs/de/loslegen/fine-tuning-llms-guide/datasets-guide","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f4c8","description":"Lerne, wie man einen Datensatz für die Feinabstimmung erstellt und vorbereitet.","breadcrumbs":[{"label":"Loslegen"},{"label":"Anleitung zur Feinabstimmung von LLMs","emoji":"1f9ec"}]},{"id":"ce825bbf83c91ef73a7fc71d696bd3d1ecc78590","title":"Anleitung zu Hyperparametern für LoRA-Feinabstimmung","pathname":"/docs/de/loslegen/fine-tuning-llms-guide/lora-hyperparameters-guide","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f9e0","description":"Lerne Schritt für Schritt die besten Einstellungen für die Feinabstimmung von LLMs – LoRA-Rang & Alpha, Epochen, Batchgröße + Gradientenakkumulation, QLoRA vs. LoRA, Zielmodule und mehr.","breadcrumbs":[{"label":"Loslegen"},{"label":"Anleitung zur Feinabstimmung von LLMs","emoji":"1f9ec"}]},{"id":"e74e48e68b0d725224ca82d9e12183f70ec3dd62","title":"Welches Modell sollte ich für die Feinabstimmung verwenden?","pathname":"/docs/de/loslegen/fine-tuning-llms-guide/what-model-should-i-use","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"2753","description":"","breadcrumbs":[{"label":"Loslegen"},{"label":"Anleitung zur Feinabstimmung von LLMs","emoji":"1f9ec"}]},{"id":"3c1de2711fe52945f90b888765c1914adeb1e704","title":"Tutorial: Wie man Llama-3 feinabstimmt und in Ollama verwendet","pathname":"/docs/de/loslegen/fine-tuning-llms-guide/tutorial-how-to-finetune-llama-3-and-use-in-ollama","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f999","description":"Anfängerleitfaden zur Erstellung eines personalisierten Assistenten (wie ChatGPT), der lokal in Ollama läuft","breadcrumbs":[{"label":"Loslegen"},{"label":"Anleitung zur Feinabstimmung von LLMs","emoji":"1f9ec"}]},{"id":"5b0f8932321e8767d629a7ca0f24c3e9add748f5","title":"Anleitung zu Reinforcement Learning (RL)","pathname":"/docs/de/loslegen/reinforcement-learning-rl-guide","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f4a1","description":"Erfahre alles über Reinforcement Learning (RL) und wie du mit Unsloth dein eigenes DeepSeek-R1-Reasoning-Modell mit GRPO trainierst. Eine vollständige Anleitung von Anfänger bis Fortgeschrittene.","breadcrumbs":[{"label":"Loslegen"}]},{"id":"c040fd1976b219674df870b5332c0ed4f1ad488c","title":"Reinforcement Learning GRPO mit 7x längerem Kontext","pathname":"/docs/de/loslegen/reinforcement-learning-rl-guide/grpo-long-context","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f300","description":"Lerne, wie Unsloth eine Feinabstimmung mit extrem langem Kontext im RL ermöglicht.","breadcrumbs":[{"label":"Loslegen"},{"label":"Anleitung zu Reinforcement Learning (RL)","emoji":"1f4a1"}]},{"id":"b6b5ca32e463fa09c0f26919da403e22381129bc","title":"Visuelles Reinforcement Learning (VLM RL)","pathname":"/docs/de/loslegen/reinforcement-learning-rl-guide/vision-reinforcement-learning-vlm-rl","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f441-1f5e8","description":"Trainiere Vision-/multimodale Modelle mit GRPO und RL mit Unsloth!","breadcrumbs":[{"label":"Loslegen"},{"label":"Anleitung zu Reinforcement Learning (RL)","emoji":"1f4a1"}]},{"id":"e8e92128cfff3357409da6d50baeeb3180e259ec","title":"FP8 Reinforcement Learning","pathname":"/docs/de/loslegen/reinforcement-learning-rl-guide/fp8-reinforcement-learning","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f3b1","description":"Trainiere Reinforcement Learning (RL) und GRPO in FP8-Präzision mit Unsloth.","breadcrumbs":[{"label":"Loslegen"},{"label":"Anleitung zu Reinforcement Learning (RL)","emoji":"1f4a1"}]},{"id":"2b7904fd5422b954809fd2460ec92281ba56a6fa","title":"Tutorial: Trainiere dein eigenes Reasoning-Modell mit GRPO","pathname":"/docs/de/loslegen/reinforcement-learning-rl-guide/tutorial-train-your-own-reasoning-model-with-grpo","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"26a1","description":"Anfängerleitfaden, um ein Modell wie Llama 3.1 (8B) mithilfe von Unsloth und GRPO in ein Reasoning-Modell zu verwandeln.","breadcrumbs":[{"label":"Loslegen"},{"label":"Anleitung zu Reinforcement Learning (RL)","emoji":"1f4a1"}]},{"id":"5b769b691c2ac2681f88fb5053c526306a2c1629","title":"Erweiterte Dokumentation zu Reinforcement Learning","pathname":"/docs/de/loslegen/reinforcement-learning-rl-guide/advanced-rl-documentation","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f9e9","description":"Erweiterte Dokumentationseinstellungen bei der Verwendung von Unsloth mit GRPO.","breadcrumbs":[{"label":"Loslegen"},{"label":"Anleitung zu Reinforcement Learning (RL)","emoji":"1f4a1"}]},{"id":"e2d862d499f3c8adf52168e9caf20c41478a70da","title":"GSPO Reinforcement Learning","pathname":"/docs/de/loslegen/reinforcement-learning-rl-guide/advanced-rl-documentation/gspo-reinforcement-learning","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"lightbulb-on","description":"Trainiere mit GSPO (Group Sequence Policy Optimization) RL in Unsloth.","breadcrumbs":[{"label":"Loslegen"},{"label":"Anleitung zu Reinforcement Learning (RL)","emoji":"1f4a1"},{"label":"Erweiterte Dokumentation zu Reinforcement Learning","emoji":"1f9e9"}]},{"id":"547730be6638817f57e57c3a4e83ced021c3bd5b","title":"RL Reward Hacking","pathname":"/docs/de/loslegen/reinforcement-learning-rl-guide/advanced-rl-documentation/rl-reward-hacking","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"treasure-chest","description":"Lerne, was Reward Hacking im Reinforcement Learning ist und wie man ihm entgegenwirkt.","breadcrumbs":[{"label":"Loslegen"},{"label":"Anleitung zu Reinforcement Learning (RL)","emoji":"1f4a1"},{"label":"Erweiterte Dokumentation zu Reinforcement Learning","emoji":"1f9e9"}]},{"id":"d1438bae8d51e95e98d98855d18199f86e47ffa1","title":"FP16 vs BF16 für RL","pathname":"/docs/de/loslegen/reinforcement-learning-rl-guide/advanced-rl-documentation/fp16-vs-bf16-for-rl","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"2049","description":"Defeating the Training-Inference Mismatch via FP16 https://arxiv.org/pdf/2510.26788 zeigt, dass die Verwendung von float16 besser ist als bfloat16","breadcrumbs":[{"label":"Loslegen"},{"label":"Anleitung zu Reinforcement Learning (RL)","emoji":"1f4a1"},{"label":"Erweiterte Dokumentation zu Reinforcement Learning","emoji":"1f9e9"}]},{"id":"8ad88c458cbd2d3022808d6ae9392f3d311d2c81","title":"Speichereffizientes RL","pathname":"/docs/de/loslegen/reinforcement-learning-rl-guide/memory-efficient-rl","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"memory","description":"","breadcrumbs":[{"label":"Loslegen"},{"label":"Anleitung zu Reinforcement Learning (RL)","emoji":"1f4a1"}]},{"id":"43fcd2dba1e05f87b102b4cfb7f0994eeff8a1db","title":"Trainingsleitfaden für Preference Optimization - DPO, ORPO & KTO","pathname":"/docs/de/loslegen/reinforcement-learning-rl-guide/preference-dpo-orpo-and-kto","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f3c6","description":"Erfahre mehr über Preference-Alignment-Feinabstimmung mit DPO, GRPO, ORPO oder KTO über Unsloth; befolge die folgenden Schritte:","breadcrumbs":[{"label":"Loslegen"},{"label":"Anleitung zu Reinforcement Learning (RL)","emoji":"1f4a1"}]},{"id":"0eb7cfce0eb5651e720ac0944c9e36f7124bc8de","title":"Einführung in Unsloth Studio","pathname":"/docs/de/neu/studio","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f9a5","description":"Führe KI-Modelle lokal mit Unsloth Studio aus und trainiere sie.","breadcrumbs":[{"label":"Neu"}]},{"id":"bf6b637872d3a9f534daa54866ec7d33d9b369a2","title":"Erste Schritte mit Unsloth Studio","pathname":"/docs/de/neu/studio/start","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"bolt","description":"Eine Anleitung für den Einstieg in das Fine-Tuning-Studio, Datenrezepte, den Modellausgabe-Export und den Chat.","breadcrumbs":[{"label":"Neu"},{"label":"Einführung in Unsloth Studio","emoji":"1f9a5"}]},{"id":"73168493b14a89fa27e95b776ade6bd93679a3e5","title":"Wie man Modelle mit Unsloth Studio ausführt","pathname":"/docs/de/neu/studio/chat","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"comment-dots","description":"Führe KI-Modelle, LLMs und GGUFs lokal mit Unsloth Studio aus.","breadcrumbs":[{"label":"Neu"},{"label":"Einführung in Unsloth Studio","emoji":"1f9a5"}]},{"id":"fd472c5a40e73b8e54cee17244010f121bf38286","title":"Unsloth Studio Installation","pathname":"/docs/de/neu/studio/install","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"arrow-down-to-square","description":"Erfahre, wie du Unsloth Studio auf deinem lokalen Gerät installierst.","breadcrumbs":[{"label":"Neu"},{"label":"Einführung in Unsloth Studio","emoji":"1f9a5"}]},{"id":"dd9f9c69fff351709e4886e820d7d6facf3ec2b3","title":"Unsloth-Datenrezepte","pathname":"/docs/de/neu/studio/data-recipe","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"hat-chef","description":"Erfahre, wie du mit den Data Recipes von Unsloth Studio Datensätze erstellst, aufbaust und bearbeitest.","breadcrumbs":[{"label":"Neu"},{"label":"Einführung in Unsloth Studio","emoji":"1f9a5"}]},{"id":"13c9d0063a9732a68734b74792f3e30153873bf4","title":"Modelle mit Unsloth Studio exportieren","pathname":"/docs/de/neu/studio/export","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"box-isometric","description":"Erfahre, wie du deine Safetensor- oder LoRA-Modelldateien in GGUF oder andere Formate exportierst.","breadcrumbs":[{"label":"Neu"},{"label":"Einführung in Unsloth Studio","emoji":"1f9a5"}]},{"id":"baf914c5df639b5b868baac27a080b70396d87c2","title":"Unsloth-Updates","pathname":"/docs/de/neu/changelog","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"sparkles","description":"Unsloth-Änderungsprotokoll für unsere neuesten Releases, Verbesserungen und Fehlerbehebungen.","breadcrumbs":[{"label":"Neu"}]},{"id":"774ab7ebd11d30a8067d492668e9dd61a8b209fb","title":"Gemma 4 - So führst du es lokal aus","pathname":"/docs/de/modelle/gemma-4","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"2728","description":"Führe Googles neue Gemma-4-Modelle lokal aus, einschließlich E2B, E4B, 26B A4B und 31B.","breadcrumbs":[{"label":"Modelle"}]},{"id":"6b0583a8449166f2d11877ed16e0b6c150d61586","title":"Gemma-4-Anleitung zur Feinabstimmung","pathname":"/docs/de/modelle/gemma-4/train","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"flask-gear","description":"Trainiere Gemma 4 von Google mit Unsloth.","breadcrumbs":[{"label":"Modelle"},{"label":"Gemma 4 - So führst du es lokal aus","emoji":"2728"}]},{"id":"53f3f20fc7a21f1ca51eb7268267793cb5975b35","title":"Qwen3.5 - So führst du es lokal aus","pathname":"/docs/de/modelle/qwen3.5","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f49c","description":"Führe die neuen Qwen3.5-LLMs aus, darunter Medium: Qwen3.5-35B-A3B, 27B, 122B-A10B, Small: Qwen3.5-0.8B, 2B, 4B, 9B und 397B-A17B auf deinem lokalen Gerät!","breadcrumbs":[{"label":"Modelle"}]},{"id":"5e52a979e20c6eaea490f46fcf09c99fd97ae449","title":"Qwen3.5-Anleitung zur Feinabstimmung","pathname":"/docs/de/modelle/qwen3.5/fine-tune","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"flask-gear","description":"Lerne, wie du Qwen3.5-LLMs mit Unsloth feinabstimmst.","breadcrumbs":[{"label":"Modelle"},{"label":"Qwen3.5 - So führst du es lokal aus","emoji":"1f49c"}]},{"id":"fffcdfcc3dce216f018aa6cc7d0b3de63f9dd887","title":"Qwen3.5 GGUF-Benchmarks","pathname":"/docs/de/modelle/qwen3.5/gguf-benchmarks","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"chart-fft","description":"Sieh, wie sich Unsloth Dynamic GGUFs schlagen + Analyse von Perplexity, KL-Divergenz & MXFP4.","breadcrumbs":[{"label":"Modelle"},{"label":"Qwen3.5 - So führst du es lokal aus","emoji":"1f49c"}]},{"id":"3798c366d7b3cb260558eaaf89e704289df7e2fc","title":"GLM-5.1 - So führst du es lokal aus","pathname":"/docs/de/modelle/glm-5.1","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"z","description":"Führe das neue GLM-5.1-Modell von Z.ai auf deinem eigenen lokalen Gerät aus!","breadcrumbs":[{"label":"Modelle"}]},{"id":"f778dfc3ae4955798c6678a508ac2c8b21a949a5","title":"MiniMax-M2.7 - So führst du es lokal aus","pathname":"/docs/de/modelle/minimax-m27","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"waveform","description":"Führe das MiniMax-M2.7-LLM lokal auf deinem eigenen Gerät aus!","breadcrumbs":[{"label":"Modelle"}]},{"id":"5b44e9e3a6e7afb8361cf47dd0a061fbfa0ed3ef","title":"NVIDIA Nemotron 3 Nano - Anleitung zum Ausführen","pathname":"/docs/de/modelle/nemotron-3","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f9e9","description":"Führe NVIDIA Nemotron 3 Nano lokal auf deinem Gerät aus und feinstimme es!","breadcrumbs":[{"label":"Modelle"}]},{"id":"50cfc15a8017db43ddce98eaa9d2e1cc6ce66253","title":"NVIDIA Nemotron-3-Super: Anleitung zum Ausführen","pathname":"/docs/de/modelle/nemotron-3/nemotron-3-super","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f9e9","description":"Führe NVIDIA Nemotron-3-Super-120B-A12B lokal auf deinem Gerät aus und feinstimme es!","breadcrumbs":[{"label":"Modelle"},{"label":"NVIDIA Nemotron 3 Nano - Anleitung zum Ausführen","emoji":"1f9e9"}]},{"id":"e86ee4009a49d667ee108862ac81e2e550ce7ea1","title":"Qwen3-Coder-Next: So führst du es lokal aus","pathname":"/docs/de/modelle/qwen3-coder-next","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f320","description":"Anleitung zum lokalen Ausführen von Qwen3-Coder-Next auf deinem Gerät!","breadcrumbs":[{"label":"Modelle"}]},{"id":"1e435114456bba34bc232a642cd33933aee06bbc","title":"GLM-4.7-Flash: So führst du es lokal aus","pathname":"/docs/de/modelle/glm-4.7-flash","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"z","description":"Führe GLM-4.7-Flash lokal auf deinem Gerät aus und feinstimme es!","breadcrumbs":[{"label":"Modelle"}]},{"id":"f26cd5b2ebe566940595d2276192dd0ced0b1b5e","title":"Kimi K2.5: Anleitung zum lokalen Ausführen","pathname":"/docs/de/modelle/kimi-k2.5","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f95d","description":"Anleitung zum Ausführen von Kimi-K2.5 auf deinem eigenen lokalen Gerät!","breadcrumbs":[{"label":"Modelle"}]},{"id":"5dd7c08c9df999657586f6bfe054918b4545dce4","title":"gpt-oss: Anleitung zum Ausführen","pathname":"/docs/de/modelle/gpt-oss-how-to-run-and-fine-tune","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"openai","description":"Führe OpenAIs neue Open-Source-Modelle aus und feinstimme sie!","breadcrumbs":[{"label":"Modelle"}]},{"id":"669ff14a509803a03d3d4b82a985c9a7a3924119","title":"gpt-oss Reinforcement Learning","pathname":"/docs/de/modelle/gpt-oss-how-to-run-and-fine-tune/gpt-oss-reinforcement-learning","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"openai","description":"","breadcrumbs":[{"label":"Modelle"},{"label":"gpt-oss: Anleitung zum Ausführen","icon":"openai"}]},{"id":"81489abc58fa75cf0b8ac7a9b0be402ff5b0cebe","title":"Tutorial: Wie man gpt-oss mit RL trainiert","pathname":"/docs/de/modelle/gpt-oss-how-to-run-and-fine-tune/gpt-oss-reinforcement-learning/tutorial-how-to-train-gpt-oss-with-rl","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"book-open-reader","description":"Lerne, OpenAI gpt-oss mit GRPO zu trainieren, um lokal oder auf Colab autonom 2048 zu schlagen.","breadcrumbs":[{"label":"Modelle"},{"label":"gpt-oss: Anleitung zum Ausführen","icon":"openai"},{"label":"gpt-oss Reinforcement Learning","icon":"openai"}]},{"id":"795c60e5aacc9878814ca680a7342a1f57e65aad","title":"Tutorial: Wie man gpt-oss feinabstimmt","pathname":"/docs/de/modelle/gpt-oss-how-to-run-and-fine-tune/tutorial-how-to-fine-tune-gpt-oss","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"openai","description":"Lerne Schritt für Schritt, wie man OpenAI gpt-oss lokal mit Unsloth trainiert.","breadcrumbs":[{"label":"Modelle"},{"label":"gpt-oss: Anleitung zum Ausführen","icon":"openai"}]},{"id":"bbf59ada9c0a24292d09f167fffb4d80eda8e794","title":"Langkontext gpt-oss-Training","pathname":"/docs/de/modelle/gpt-oss-how-to-run-and-fine-tune/long-context-gpt-oss-training","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"openai","description":"","breadcrumbs":[{"label":"Modelle"},{"label":"gpt-oss: Anleitung zum Ausführen","icon":"openai"}]},{"id":"9b29615ab338f1d1924174468718cfeab406f641","title":"Tutorials zu großen Sprachmodellen (LLMs)","pathname":"/docs/de/modelle/tutorials","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f680","description":"Entdecke die neuesten LLMs und lerne, wie man Modelle lokal ausführt und mit Unsloth für optimale Leistung feinabstimmt.","breadcrumbs":[{"label":"Modelle"}]},{"id":"f9b4b78ffbc074c7f10ffb0ff9b54f66e629a834","title":"GLM-5: Anleitung zum lokalen Ausführen","pathname":"/docs/de/modelle/tutorials/glm-5","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"z","description":"Führe das neue GLM-5-Modell von Z.ai auf deinem eigenen lokalen Gerät aus!","breadcrumbs":[{"label":"Modelle"},{"label":"Tutorials zu großen Sprachmodellen (LLMs)","emoji":"1f680"}]},{"id":"577dc1f2e661d959da353a9d4b3748912873d42e","title":"Qwen3 - So führst du es aus und feinstimmst es","pathname":"/docs/de/modelle/tutorials/qwen3-how-to-run-and-fine-tune","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f320","description":"Lerne, Qwen3 lokal mit Unsloth + unseren Dynamic-2.0-Quants auszuführen und feinabzustimmen","breadcrumbs":[{"label":"Modelle"},{"label":"Tutorials zu großen Sprachmodellen (LLMs)","emoji":"1f680"}]},{"id":"e81b513117b7976d1bb2a959ddaaaf797b39b9e0","title":"Qwen3-VL: Anleitung zum Ausführen","pathname":"/docs/de/modelle/tutorials/qwen3-how-to-run-and-fine-tune/qwen3-vl-how-to-run-and-fine-tune","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f320","description":"Lerne, Qwen3-VL lokal mit Unsloth feinabzustimmen und auszuführen.","breadcrumbs":[{"label":"Modelle"},{"label":"Tutorials zu großen Sprachmodellen (LLMs)","emoji":"1f680"},{"label":"Qwen3 - So führst du es aus und feinstimmst es","emoji":"1f320"}]},{"id":"a1f4ef520ffb1a185e8a6deff4ec2b5569d8f5a7","title":"Qwen3-2507: Anleitung zum lokalen Ausführen","pathname":"/docs/de/modelle/tutorials/qwen3-how-to-run-and-fine-tune/qwen3-2507","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f320","description":"Führe die Thinking- und Instruct-Versionen von Qwen3-30B-A3B-2507 und 235B-A22B lokal auf deinem Gerät aus!","breadcrumbs":[{"label":"Modelle"},{"label":"Tutorials zu großen Sprachmodellen (LLMs)","emoji":"1f680"},{"label":"Qwen3 - So führst du es aus und feinstimmst es","emoji":"1f320"}]},{"id":"db6f61dbf018d9aa3e178cc7e880c0130989bce0","title":"MiniMax-M2.5: Anleitung zum Ausführen","pathname":"/docs/de/modelle/tutorials/minimax-m25","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"waveform","description":"Führe MiniMax-M2.5 lokal auf deinem eigenen Gerät aus!","breadcrumbs":[{"label":"Modelle"},{"label":"Tutorials zu großen Sprachmodellen (LLMs)","emoji":"1f680"}]},{"id":"542ecfd3ee96a444791bde7f395bdd1a59ec1ea5","title":"Qwen3-Coder: So führst du es lokal aus","pathname":"/docs/de/modelle/tutorials/qwen3-coder-how-to-run-locally","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f320","description":"Führe Qwen3-Coder-30B-A3B-Instruct und 480B-A35B lokal mit Unsloth Dynamic Quants aus.","breadcrumbs":[{"label":"Modelle"},{"label":"Tutorials zu großen Sprachmodellen (LLMs)","emoji":"1f680"}]},{"id":"ed8345f0cc9e0e67f0b2d0992b6381ecd6a9899b","title":"Gemma 3 - Anleitung zum Ausführen","pathname":"/docs/de/modelle/tutorials/gemma-3-how-to-run-and-fine-tune","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"google","description":"Wie man Gemma 3 effektiv mit unseren GGUFs auf llama.cpp, Ollama, Open WebUI ausführt und wie man es mit Unsloth feinabstimmt!","breadcrumbs":[{"label":"Modelle"},{"label":"Tutorials zu großen Sprachmodellen (LLMs)","emoji":"1f680"}]},{"id":"6375cb14400f983c1ec3d3c0d5f8699e3ee9a5ae","title":"Gemma 3n: So führst du es aus und feinstimmst es","pathname":"/docs/de/modelle/tutorials/gemma-3-how-to-run-and-fine-tune/gemma-3n-how-to-run-and-fine-tune","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"google","description":"Führe Googles neues Gemma 3n lokal mit Dynamic GGUFs auf llama.cpp, Ollama, Open WebUI aus und feinstimme es mit Unsloth!","breadcrumbs":[{"label":"Modelle"},{"label":"Tutorials zu großen Sprachmodellen (LLMs)","emoji":"1f680"},{"label":"Gemma 3 - Anleitung zum Ausführen","icon":"google"}]},{"id":"1e7861e594f2ea678a28332062a23bcee10c4e3d","title":"DeepSeek-OCR 2: Anleitung zum Ausführen und Feinabstimmen","pathname":"/docs/de/modelle/tutorials/deepseek-ocr-2","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f433","description":"Anleitung zum lokalen Ausführen und Feinabstimmen von DeepSeek-OCR-2.","breadcrumbs":[{"label":"Modelle"},{"label":"Tutorials zu großen Sprachmodellen (LLMs)","emoji":"1f680"}]},{"id":"9eaa51932747df2c1f3c4a4eb10410fa2cb5a2dd","title":"GLM-4.7: Anleitung zum lokalen Ausführen","pathname":"/docs/de/modelle/tutorials/glm-4.7","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"z","description":"Eine Anleitung, wie man das Z.ai GLM-4.7-Modell auf dem eigenen lokalen Gerät ausführt!","breadcrumbs":[{"label":"Modelle"},{"label":"Tutorials zu großen Sprachmodellen (LLMs)","emoji":"1f680"}]},{"id":"f9a59ced7d21e1bde6109eb7cfa0f721e1602428","title":"Wie man Qwen-Image-2512 lokal in ComfyUI ausführt","pathname":"/docs/de/modelle/tutorials/qwen-image-2512","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f49f","description":"Schritt-für-Schritt-Tutorial zum Ausführen von Qwen-Image-2512 auf deinem lokalen Gerät mit ComfyUI.","breadcrumbs":[{"label":"Modelle"},{"label":"Tutorials zu großen Sprachmodellen (LLMs)","emoji":"1f680"}]},{"id":"34e394724db99601f06f4a6d709b709fc4f49ab5","title":"Qwen-Image-2512 in stable-diffusion.cpp ausführen Tutorial","pathname":"/docs/de/modelle/tutorials/qwen-image-2512/stable-diffusion.cpp","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f3a8","description":"Tutorial zur Verwendung von Qwen-Image-2512 in stable-diffusion.cpp.","breadcrumbs":[{"label":"Modelle"},{"label":"Tutorials zu großen Sprachmodellen (LLMs)","emoji":"1f680"},{"label":"Wie man Qwen-Image-2512 lokal in ComfyUI ausführt","emoji":"1f49f"}]},{"id":"f092a1211912dae2208951f680a2b5a9944a6af4","title":"Devstral 2 - Anleitung zum Ausführen","pathname":"/docs/de/modelle/tutorials/devstral-2","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f4d9","description":"Anleitung für das lokale Ausführen der Mistral-Devstral-2-Modelle: 123B-Instruct-2512 und Small-2-24B-Instruct-2512.","breadcrumbs":[{"label":"Modelle"},{"label":"Tutorials zu großen Sprachmodellen (LLMs)","emoji":"1f680"}]},{"id":"ce1a52c32558234afe580939db0fcfe220dc208b","title":"Ministral 3 - Anleitung zum Ausführen","pathname":"/docs/de/modelle/tutorials/ministral-3","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f431","description":"Anleitung für die Mistral-Ministral-3-Modelle, um sie lokal auf deinem Gerät auszuführen oder feinabzustimmen","breadcrumbs":[{"label":"Modelle"},{"label":"Tutorials zu großen Sprachmodellen (LLMs)","emoji":"1f680"}]},{"id":"ba9915a2be20805eb5ba7d6ebec553163ddf863e","title":"DeepSeek-OCR: So führst du es aus und feinstimmst es","pathname":"/docs/de/modelle/tutorials/deepseek-ocr-how-to-run-and-fine-tune","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f433","description":"Anleitung zum lokalen Ausführen und Feinabstimmen von DeepSeek-OCR.","breadcrumbs":[{"label":"Modelle"},{"label":"Tutorials zu großen Sprachmodellen (LLMs)","emoji":"1f680"}]},{"id":"ec4485f0cffa8c16963f28544e89a652f988d869","title":"Kimi K2 Thinking: Anleitung zum lokalen Ausführen","pathname":"/docs/de/modelle/tutorials/kimi-k2-thinking-how-to-run-locally","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f319","description":"Anleitung zum Ausführen von Kimi-K2-Thinking und Kimi-K2 auf deinem eigenen lokalen Gerät!","breadcrumbs":[{"label":"Modelle"},{"label":"Tutorials zu großen Sprachmodellen (LLMs)","emoji":"1f680"}]},{"id":"d1a24e17aacb2e1edef96e954b901fe1f785221e","title":"GLM-4.6: Anleitung zum lokalen Ausführen","pathname":"/docs/de/modelle/tutorials/glm-4.6-how-to-run-locally","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"z","description":"Eine Anleitung, wie man das Z.ai GLM-4.6- und GLM-4.6V-Flash-Modell auf dem eigenen lokalen Gerät ausführt!","breadcrumbs":[{"label":"Modelle"},{"label":"Tutorials zu großen Sprachmodellen (LLMs)","emoji":"1f680"}]},{"id":"c8b6895be773f2849bd57d1b0d3578f491bfa67c","title":"Qwen3-Next: Anleitung zum lokalen Ausführen","pathname":"/docs/de/modelle/tutorials/qwen3-next","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f320","description":"Führe die Thinking- und Instruct-Versionen von Qwen3-Next-80B-A3B lokal auf deinem Gerät aus!","breadcrumbs":[{"label":"Modelle"},{"label":"Tutorials zu großen Sprachmodellen (LLMs)","emoji":"1f680"}]},{"id":"8ecff6350d3f4cbfb3de937da82a372c1385dcc6","title":"FunctionGemma: So führst du es aus und feinstimmst es","pathname":"/docs/de/modelle/tutorials/functiongemma","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"google","description":"Lerne, wie du FunctionGemma lokal auf deinem Gerät und Telefon ausführst und feinabstimmst.","breadcrumbs":[{"label":"Modelle"},{"label":"Tutorials zu großen Sprachmodellen (LLMs)","emoji":"1f680"}]},{"id":"42aa6b132b83bbca34de4b7a6d3d2074272827c3","title":"DeepSeek-V3.1: So führst du es lokal aus","pathname":"/docs/de/modelle/tutorials/deepseek-v3.1-how-to-run-locally","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f40b","description":"Eine Anleitung, wie man DeepSeek-V3.1 und Terminus auf dem eigenen lokalen Gerät ausführt!","breadcrumbs":[{"label":"Modelle"},{"label":"Tutorials zu großen Sprachmodellen (LLMs)","emoji":"1f680"}]},{"id":"2d93c1e9c4de5c5a267893947e9721f008ed9418","title":"DeepSeek-R1-0528: So führst du es lokal aus","pathname":"/docs/de/modelle/tutorials/deepseek-r1-0528-how-to-run-locally","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f40b","description":"Eine Anleitung, wie man DeepSeek-R1-0528 einschließlich Qwen3 auf dem eigenen lokalen Gerät ausführt!","breadcrumbs":[{"label":"Modelle"},{"label":"Tutorials zu großen Sprachmodellen (LLMs)","emoji":"1f680"}]},{"id":"0ba99f57da4d961b4b6ac0ebc82f8fc360f57cc1","title":"Liquid LFM2.5: So führst du es aus und feinstimmst es","pathname":"/docs/de/modelle/tutorials/lfm2.5","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f4a7","description":"Führe LFM2.5 Instruct und Vision lokal auf deinem Gerät aus und feinstimme sie!","breadcrumbs":[{"label":"Modelle"},{"label":"Tutorials zu großen Sprachmodellen (LLMs)","emoji":"1f680"}]},{"id":"a98dac686894198901004645b13c5c4c51f32843","title":"Magistral: So führst du es aus und feinstimmst es","pathname":"/docs/de/modelle/tutorials/magistral-how-to-run-and-fine-tune","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f4a5","description":"Lerne Magistral kennen – Mistrals neue Reasoning-Modelle.","breadcrumbs":[{"label":"Modelle"},{"label":"Tutorials zu großen Sprachmodellen (LLMs)","emoji":"1f680"}]},{"id":"ed18e39c4ad9797b97e59c64acb1e3bee4b53103","title":"IBM Granite 4.0","pathname":"/docs/de/modelle/tutorials/ibm-granite-4.0","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"cube","description":"Wie man IBM Granite-4.0 mit Unsloth GGUFs auf llama.cpp und Ollama ausführt und wie man es feinabstimmt!","breadcrumbs":[{"label":"Modelle"},{"label":"Tutorials zu großen Sprachmodellen (LLMs)","emoji":"1f680"}]},{"id":"48def9956d09f2f427c4285d05bd7567ca706ce8","title":"Llama 4: So führst du es aus und feinstimmst es","pathname":"/docs/de/modelle/tutorials/llama-4-how-to-run-and-fine-tune","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f999","description":"Wie man Llama 4 lokal mit unseren dynamischen GGUFs ausführt, die im Vergleich zur Standardquantisierung die Genauigkeit wiederherstellen.","breadcrumbs":[{"label":"Modelle"},{"label":"Tutorials zu großen Sprachmodellen (LLMs)","emoji":"1f680"}]},{"id":"4d0909d6bdc8700fe155476c8a8804eb7bcb39a7","title":"Grok 2","pathname":"/docs/de/modelle/tutorials/grok-2","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"square-x-twitter","description":"Führe das Grok-2-Modell von xAI lokal aus!","breadcrumbs":[{"label":"Modelle"},{"label":"Tutorials zu großen Sprachmodellen (LLMs)","emoji":"1f680"}]},{"id":"7f1818c639a40852ccefc40e5bc9f330d43c0920","title":"Devstral: So führst du es aus und feinstimmst es","pathname":"/docs/de/modelle/tutorials/devstral-how-to-run-and-fine-tune","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f4d9","description":"Führe Mistral Devstral 1.1 aus und feinstimme es, einschließlich Small-2507 und 2505.","breadcrumbs":[{"label":"Modelle"},{"label":"Tutorials zu großen Sprachmodellen (LLMs)","emoji":"1f680"}]},{"id":"69b29df1a649bc85fbd042bb58c2d5bc609ad71b","title":"Wie man lokale LLMs mit Docker ausführt: Schritt-für-Schritt-Anleitung","pathname":"/docs/de/modelle/tutorials/how-to-run-llms-with-docker","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"docker","description":"Lerne, wie man große Sprachmodelle (LLMs) mit Docker und Unsloth auf dem eigenen lokalen Gerät ausführt.","breadcrumbs":[{"label":"Modelle"},{"label":"Tutorials zu großen Sprachmodellen (LLMs)","emoji":"1f680"}]},{"id":"6874079159971d44a046544779789075f4168545","title":"DeepSeek-V3-0324: So führst du es lokal aus","pathname":"/docs/de/modelle/tutorials/deepseek-v3-0324-how-to-run-locally","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f433","description":"Wie man DeepSeek-V3-0324 lokal mit unseren dynamischen Quants ausführt, die die Genauigkeit wiederherstellen","breadcrumbs":[{"label":"Modelle"},{"label":"Tutorials zu großen Sprachmodellen (LLMs)","emoji":"1f680"}]},{"id":"59bfd1b66766603cea7f531c9b7fd8669737f1a6","title":"DeepSeek-R1: So führst du es lokal aus","pathname":"/docs/de/modelle/tutorials/deepseek-r1-how-to-run-locally","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f40b","description":"Eine Anleitung, wie du unsere 1,58-Bit Dynamic Quants für DeepSeek-R1 mit llama.cpp ausführen kannst.","breadcrumbs":[{"label":"Modelle"},{"label":"Tutorials zu großen Sprachmodellen (LLMs)","emoji":"1f680"}]},{"id":"6996e8b117edf77aa284fea26ffc8d3c9f4a6076","title":"DeepSeek-R1 Dynamic 1.58-bit","pathname":"/docs/de/modelle/tutorials/deepseek-r1-how-to-run-locally/deepseek-r1-dynamic-1.58-bit","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f433","description":"Sieh dir Leistungstabelle-Vergleiche von Unsloths Dynamic GGUF Quants mit Standard IMatrix Quants an.","breadcrumbs":[{"label":"Modelle"},{"label":"Tutorials zu großen Sprachmodellen (LLMs)","emoji":"1f680"},{"label":"DeepSeek-R1: So führst du es lokal aus","emoji":"1f40b"}]},{"id":"9f9221053a31dd3d971a43c6bc3f7afc4b2ca60a","title":"Phi-4 Reasoning: So führst du es aus und feinstimmst es","pathname":"/docs/de/modelle/tutorials/phi-4-reasoning-how-to-run-and-fine-tune","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"windows","description":"Lerne, Phi-4-Reasoning-Modelle lokal mit Unsloth + unseren Dynamic-2.0-Quants auszuführen und feinabzustimmen","breadcrumbs":[{"label":"Modelle"},{"label":"Tutorials zu großen Sprachmodellen (LLMs)","emoji":"1f680"}]},{"id":"57112ad35933fe550381d802d8f1e47ff8533916","title":"QwQ-32B: So führst du es effektiv aus","pathname":"/docs/de/modelle/tutorials/qwq-32b-how-to-run-effectively","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f320","description":"Wie man QwQ-32B effektiv mit unseren Fehlerbehebungen und ohne endlose Generierungen + GGUFs ausführt.","breadcrumbs":[{"label":"Modelle"},{"label":"Tutorials zu großen Sprachmodellen (LLMs)","emoji":"1f680"}]},{"id":"03532de69dfe0230fe5114e809721d8b7dd74ca6","title":"Inferenz & Bereitstellung","pathname":"/docs/de/grundlagen/inference-and-deployment","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f5a5","description":"Lerne, wie du dein feinabgestimmtes Modell speicherst, damit du es in deiner bevorzugten Inferenz-Engine ausführen kannst.","breadcrumbs":[{"label":"Grundlagen"}]},{"id":"9cfeafb2cc359999e3a7f6ba6ffa5468e4752653","title":"Speichern in GGUF","pathname":"/docs/de/grundlagen/inference-and-deployment/saving-to-gguf","siteSpaceId":"sitesp_L6rLB","lang":"de","description":"Modelle im 16-Bit-Format für GGUF speichern, damit du sie für Ollama, Jan AI, Open WebUI und mehr verwenden kannst!","breadcrumbs":[{"label":"Grundlagen"},{"label":"Inferenz & Bereitstellung","emoji":"1f5a5"}]},{"id":"a241be61d5411b52c7d9ab17486a0185949606b1","title":"Spekulatives Dekodieren","pathname":"/docs/de/grundlagen/inference-and-deployment/saving-to-gguf/speculative-decoding","siteSpaceId":"sitesp_L6rLB","lang":"de","description":"Spekulatives Dekodieren mit llama-server, llama.cpp, vLLM und mehr für 2x schnellere Inferenz","breadcrumbs":[{"label":"Grundlagen"},{"label":"Inferenz & Bereitstellung","emoji":"1f5a5"},{"label":"Speichern in GGUF"}]},{"id":"af094159d1c157db0d9afc00bd98b849fcdb8f0c","title":"vLLM-Bereitstellungs- und Inferenzanleitung","pathname":"/docs/de/grundlagen/inference-and-deployment/vllm-guide","siteSpaceId":"sitesp_L6rLB","lang":"de","description":"Anleitung zum Speichern und Bereitstellen von LLMs in vLLM für den produktiven Betrieb von LLMs","breadcrumbs":[{"label":"Grundlagen"},{"label":"Inferenz & Bereitstellung","emoji":"1f5a5"}]},{"id":"6969b7d50e6d4e4a2fd432fda78971db7d4f929a","title":"vLLM-Engine-Argumente","pathname":"/docs/de/grundlagen/inference-and-deployment/vllm-guide/vllm-engine-arguments","siteSpaceId":"sitesp_L6rLB","lang":"de","description":"","breadcrumbs":[{"label":"Grundlagen"},{"label":"Inferenz & Bereitstellung","emoji":"1f5a5"},{"label":"vLLM-Bereitstellungs- und Inferenzanleitung"}]},{"id":"7f6b2c78a8d675a1c775391d78fe85d3d0e31b3a","title":"Anleitung zum Hot-Swapping von LoRA","pathname":"/docs/de/grundlagen/inference-and-deployment/vllm-guide/lora-hot-swapping-guide","siteSpaceId":"sitesp_L6rLB","lang":"de","description":"","breadcrumbs":[{"label":"Grundlagen"},{"label":"Inferenz & Bereitstellung","emoji":"1f5a5"},{"label":"vLLM-Bereitstellungs- und Inferenzanleitung"}]},{"id":"535d0cfbde7f5f4826b1f619981b5b10742e2330","title":"Speichern in Ollama","pathname":"/docs/de/grundlagen/inference-and-deployment/saving-to-ollama","siteSpaceId":"sitesp_L6rLB","lang":"de","description":"","breadcrumbs":[{"label":"Grundlagen"},{"label":"Inferenz & Bereitstellung","emoji":"1f5a5"}]},{"id":"ea71c5c0df369f2b0055399448c044aabed9efba","title":"Modelle in LM Studio bereitstellen","pathname":"/docs/de/grundlagen/inference-and-deployment/lm-studio","siteSpaceId":"sitesp_L6rLB","lang":"de","description":"Modelle in GGUF speichern, damit du sie in LM Studio ausführen und bereitstellen kannst","breadcrumbs":[{"label":"Grundlagen"},{"label":"Inferenz & Bereitstellung","emoji":"1f5a5"}]},{"id":"d373cb87b7b6b14d5e135802924614c5ccbbb8ec","title":"Wie man die LM Studio CLI im Linux-Terminal installiert","pathname":"/docs/de/grundlagen/inference-and-deployment/lm-studio/how-to-install-lm-studio-cli-in-linux-terminal","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f47e","description":"Anleitung zur Installation der LM Studio CLI ohne UI in einer Terminal-Instanz.","breadcrumbs":[{"label":"Grundlagen"},{"label":"Inferenz & Bereitstellung","emoji":"1f5a5"},{"label":"Modelle in LM Studio bereitstellen"}]},{"id":"7ee5a053c7473687e3fc8557db16d666991941dc","title":"SGLang-Bereitstellungs- und Inferenzanleitung","pathname":"/docs/de/grundlagen/inference-and-deployment/sglang-guide","siteSpaceId":"sitesp_L6rLB","lang":"de","description":"Anleitung zum Speichern und Bereitstellen von LLMs in SGLang für den produktiven Betrieb von LLMs","breadcrumbs":[{"label":"Grundlagen"},{"label":"Inferenz & Bereitstellung","emoji":"1f5a5"}]},{"id":"6f7dcba79c06230757d338618fe62a8da47076da","title":"Bereitstellungsanleitung für llama-server & OpenAI-Endpunkt","pathname":"/docs/de/grundlagen/inference-and-deployment/llama-server-and-openai-endpoint","siteSpaceId":"sitesp_L6rLB","lang":"de","description":"Bereitstellung über llama-server mit einem mit OpenAI kompatiblen Endpunkt","breadcrumbs":[{"label":"Grundlagen"},{"label":"Inferenz & Bereitstellung","emoji":"1f5a5"}]},{"id":"86b6f77ee96e4edb00c776189ff1bf5032a3aaeb","title":"Wie man lokale LLMs auf deinem iOS- oder Android-Telefon ausführt und bereitstellt","pathname":"/docs/de/grundlagen/inference-and-deployment/deploy-llms-phone","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f4f1","description":"Tutorial zur Feinabstimmung deines eigenen LLMs und zur Bereitstellung auf deinem Android- oder iPhone mit ExecuTorch.","breadcrumbs":[{"label":"Grundlagen"},{"label":"Inferenz & Bereitstellung","emoji":"1f5a5"}]},{"id":"14781be2f5de20d42f8771160661ee0e1bf6e874","title":"Fehlerbehebung bei der Inferenz","pathname":"/docs/de/grundlagen/inference-and-deployment/troubleshooting-inference","siteSpaceId":"sitesp_L6rLB","lang":"de","description":"Wenn beim Ausführen oder Speichern deines Modells Probleme auftreten.","breadcrumbs":[{"label":"Grundlagen"},{"label":"Inferenz & Bereitstellung","emoji":"1f5a5"}]},{"id":"d12c953ceacbd6c3e44f3aa911056928e0488f5b","title":"Wie man lokale LLMs mit Claude Code ausführt","pathname":"/docs/de/grundlagen/claude-code","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"claude","description":"Anleitung zur Verwendung offener Modelle mit Claude Code auf deinem lokalen Gerät.","breadcrumbs":[{"label":"Grundlagen"}]},{"id":"1813c928d883d651dff92062bc0da6e96d06e50a","title":"Wie man lokale LLMs mit OpenAI Codex ausführt","pathname":"/docs/de/grundlagen/codex","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"openai","description":"Verwende offene Modelle mit OpenAI Codex lokal auf deinem Gerät.","breadcrumbs":[{"label":"Grundlagen"}]},{"id":"8b1f9407050c14f70efd7cfaa7d1daf4a67ff3e7","title":"Multi-GPU-Feinabstimmung mit Unsloth","pathname":"/docs/de/grundlagen/multi-gpu-training-with-unsloth","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"rectangle-history","description":"Lerne, wie du LLMs auf mehreren GPUs und mit Parallelisierung mit Unsloth feinabstimmst.","breadcrumbs":[{"label":"Grundlagen"}]},{"id":"5514b4f06913bfcee7c5b9d6e0fd6c014340cdf3","title":"Multi-GPU-Feinabstimmung mit Distributed Data Parallel (DDP)","pathname":"/docs/de/grundlagen/multi-gpu-training-with-unsloth/ddp","siteSpaceId":"sitesp_L6rLB","lang":"de","description":"Lerne, wie du die Unsloth CLI verwendest, um mit Distributed Data Parallel (DDP) auf mehreren GPUs zu trainieren!","breadcrumbs":[{"label":"Grundlagen"},{"label":"Multi-GPU-Feinabstimmung mit Unsloth","icon":"rectangle-history"}]},{"id":"f9cecc317356c3fa794a39ae4190a6d22c029f46","title":"Anleitung zur Feinabstimmung von Embedding-Modellen mit Unsloth","pathname":"/docs/de/grundlagen/embedding-finetuning","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f50e","description":"Lerne, wie du Embedding-Modelle ganz einfach mit Unsloth feinabstimmst.","breadcrumbs":[{"label":"Grundlagen"}]},{"id":"d2073bad97e9c04578abf5444104a5502ae38941","title":"MoE-Modelle 12x schneller mit Unsloth feinabstimmen","pathname":"/docs/de/grundlagen/faster-moe","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f48e","description":"Trainiere MoE-LLMs lokal mit der Unsloth-Anleitung.","breadcrumbs":[{"label":"Grundlagen"}]},{"id":"18ba00f910090c7a44c97dedb9a0d82ad0eccacc","title":"Anleitung zur Feinabstimmung von Text-to-Speech (TTS)","pathname":"/docs/de/grundlagen/text-to-speech-tts-fine-tuning","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f50a","description":"Lerne, wie du TTS- und STT-Sprachmodelle mit Unsloth feinabstimmst.","breadcrumbs":[{"label":"Grundlagen"}]},{"id":"abe22dcb3049581e00371a1b82b9e9cf6821a9b0","title":"Unsloth Dynamic 2.0 GGUFs","pathname":"/docs/de/grundlagen/unsloth-dynamic-2.0-ggufs","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f9a5","description":"Ein großes neues Upgrade für unsere Dynamic Quants!","breadcrumbs":[{"label":"Grundlagen"}]},{"id":"575776db88d905467eea1b184b6780b2f9ed78e5","title":"Unsloth Dynamic GGUFs auf Aider Polyglot","pathname":"/docs/de/grundlagen/unsloth-dynamic-2.0-ggufs/unsloth-dynamic-ggufs-on-aider-polyglot","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f9a5","description":"Leistung von Unsloth Dynamic GGUFs auf Aider Polyglot Benchmarks","breadcrumbs":[{"label":"Grundlagen"},{"label":"Unsloth Dynamic 2.0 GGUFs","emoji":"1f9a5"}]},{"id":"ba7e51b2382f5cf41d34361579ec54dd6bfc4e71","title":"Anleitung für Tool Calling für lokale LLMs","pathname":"/docs/de/grundlagen/tool-calling-guide-for-local-llms","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"screwdriver-wrench","description":"","breadcrumbs":[{"label":"Grundlagen"}]},{"id":"57170bb26b2747b2ef94d23a7666e8a08d688b27","title":"Vision-Feinabstimmung","pathname":"/docs/de/grundlagen/vision-fine-tuning","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f441","description":"Lerne, wie man Vision-/multimodale LLMs mit Unsloth feinabstimmt","breadcrumbs":[{"label":"Grundlagen"}]},{"id":"3fc49911b5923db7fb5a51b50c7c638ebe732f42","title":"Fehlerbehebung & FAQs","pathname":"/docs/de/grundlagen/troubleshooting-and-faqs","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"26a0","description":"Tipps zur Lösung von Problemen und häufig gestellte Fragen.","breadcrumbs":[{"label":"Grundlagen"}]},{"id":"e2d19835fc68fb83771756e84c0d98ac1e182677","title":"Hugging Face Hub, XET-Debugging","pathname":"/docs/de/grundlagen/troubleshooting-and-faqs/hugging-face-hub-xet-debugging","siteSpaceId":"sitesp_L6rLB","lang":"de","description":"Debugging und Fehlerbehebung bei stockenden, hängenden und langsamen Downloads","breadcrumbs":[{"label":"Grundlagen"},{"label":"Fehlerbehebung & FAQs","emoji":"26a0"}]},{"id":"3757060b6860c23169555a4f9ab57784a90ed455","title":"Chat-Vorlagen","pathname":"/docs/de/grundlagen/chat-templates","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f4ac","description":"Lerne die Grundlagen und Anpassungsoptionen von Chat-Vorlagen kennen, einschließlich der Formate Conversational, ChatML, ShareGPT, Alpaca und mehr!","breadcrumbs":[{"label":"Grundlagen"}]},{"id":"4a52add32b0967d961ca39225bc178234e2beea9","title":"Unsloth-Umgebungsflags","pathname":"/docs/de/grundlagen/unsloth-environment-flags","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f6e0","description":"Erweiterte Flags, die nützlich sein können, wenn fehlerhafte Feinabstimmungen auftreten oder du Dinge abschalten möchtest.","breadcrumbs":[{"label":"Grundlagen"}]},{"id":"7833952b3c01beb5e61cbcebda8182c1be83777e","title":"Fortgesetztes Vortraining","pathname":"/docs/de/grundlagen/continued-pretraining","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"267b","description":"Auch bekannt als fortgesetzte Feinabstimmung. Unsloth ermöglicht dir das kontinuierliche Vortrainieren, damit ein Modell eine neue Sprache lernen kann.","breadcrumbs":[{"label":"Grundlagen"}]},{"id":"24e353a4cd4d66055901c275613af851f819330c","title":"Feinabstimmung ab dem letzten Checkpoint","pathname":"/docs/de/grundlagen/finetuning-from-last-checkpoint","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f3c1","description":"Checkpointing ermöglicht es dir, deinen Feinabstimmungsfortschritt zu speichern, damit du pausieren und später fortfahren kannst.","breadcrumbs":[{"label":"Grundlagen"}]},{"id":"8f066782276b19e21753a9a2f644fd81a8071c0f","title":"Unsloth-Benchmarks","pathname":"/docs/de/grundlagen/unsloth-benchmarks","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"1f4ca","description":"Von Unsloth aufgezeichnete Benchmarks auf NVIDIA-GPUs.","breadcrumbs":[{"label":"Grundlagen"}]},{"id":"c119d057de6a9fb91c2922e5fb7f498aa600bc18","title":"3x schnelleres LLM-Training mit Unsloth-Kernels + Packing","pathname":"/docs/de/blog/3x-faster-training-packing","siteSpaceId":"sitesp_L6rLB","lang":"de","emoji":"26a1","description":"Lerne, wie Unsloth den Trainingsdurchsatz erhöht und unnötige Padding-Verluste bei der Feinabstimmung eliminiert.","breadcrumbs":[{"label":"Blog"}]},{"id":"d9c1ce434cf33ed7ef7bf701657d32b0e37edee8","title":"Feinabstimmung mit 500K Kontextlänge","pathname":"/docs/de/blog/500k-context-length-fine-tuning","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"ruler-combined","description":"Lerne, wie du das Feinabstimmen mit einem Kontextfenster von >500K Tokens mit Unsloth aktivierst.","breadcrumbs":[{"label":"Blog"}]},{"id":"403e1f81fd936dfabff4d1937d0bbdd84585c969","title":"Quantisierungsbewusstes Training (QAT)","pathname":"/docs/de/blog/quantization-aware-training-qat","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"down-left-and-up-right-to-center","description":"Quantisiere Modelle mit Unsloth und PyTorch auf 4-Bit, um die Genauigkeit wiederherzustellen.","breadcrumbs":[{"label":"Blog"}]},{"id":"ab4316bf164534c25b177f0a9338ead582c3201c","title":"Feinabstimmung von LLMs auf NVIDIA DGX Station mit Unsloth","pathname":"/docs/de/blog/dgx-station","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"microchip-ai","description":"NVIDIA-DGX-Station-Tutorial zur Feinabstimmung mit Notebooks von Unsloth.","breadcrumbs":[{"label":"Blog"}]},{"id":"3baa81327c4b135f98ea3c328775c251feb9564f","title":"Wie man LLMs mit Unsloth & Docker feinabstimmt","pathname":"/docs/de/blog/how-to-fine-tune-llms-with-unsloth-and-docker","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"docker","description":"Lerne, wie du LLMs feinabstimmst oder Reinforcement Learning (RL) mit dem Docker-Image von Unsloth durchführst.","breadcrumbs":[{"label":"Blog"}]},{"id":"6d9bae5b6da8050c1b9a805e1d9eefc6d4d02f08","title":"Feinabstimmung von LLMs mit NVIDIA DGX Spark und Unsloth","pathname":"/docs/de/blog/fine-tuning-llms-with-nvidia-dgx-spark-and-unsloth","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"sparkle","description":"Tutorial zur Feinabstimmung und zum Reinforcement Learning (RL) mit OpenAI gpt-oss auf NVIDIA DGX Spark.","breadcrumbs":[{"label":"Blog"}]},{"id":"2c09033b89b260a14c0fd8f6283604d65c2ce493","title":"Feinabstimmung von LLMs mit Blackwell, RTX-50-Serie & Unsloth","pathname":"/docs/de/blog/fine-tuning-llms-with-blackwell-rtx-50-series-and-unsloth","siteSpaceId":"sitesp_L6rLB","lang":"de","icon":"microchip","description":"Lerne mit unserer Schritt-für-Schritt-Anleitung, wie man LLMs auf NVIDIA Blackwell, der RTX-50-Serie und B200-GPUs feinabstimmt.","breadcrumbs":[{"label":"Blog"}]},{"id":"4512843f43e3d51c6fd567ba8d1d41bd61aac871","title":"Documentation Unsloth","pathname":"/docs/fr","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f9a5","description":"Unsloth est un framework open source pour exécuter et entraîner des modèles.","breadcrumbs":[{"label":"Commencer"}]},{"id":"fae0ce37756fb39f97798fc8e7e12aa27c33bc88","title":"Fine-tuning pour débutants","pathname":"/docs/fr/commencer/fine-tuning-for-beginners","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"2b50","description":"","breadcrumbs":[{"label":"Commencer"}]},{"id":"47370eb8ad54f7198da9c5711a7d50efdf003784","title":"Exigences d'Unsloth","pathname":"/docs/fr/commencer/fine-tuning-for-beginners/unsloth-requirements","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f6e0","description":"Voici les exigences d'Unsloth, y compris les exigences système et de VRAM GPU.","breadcrumbs":[{"label":"Commencer"},{"label":"Fine-tuning pour débutants","emoji":"2b50"}]},{"id":"a4c177d0cc0d485af2566701bc0c3f6f94175629","title":"FAQ + Le fine-tuning est-il fait pour moi ?","pathname":"/docs/fr/commencer/fine-tuning-for-beginners/faq-+-is-fine-tuning-right-for-me","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f914","description":"Si vous hésitez à savoir si le fine-tuning vous convient, voyez ici ! Découvrez les idées reçues sur le fine-tuning, comment il se compare à RAG, et plus encore :","breadcrumbs":[{"label":"Commencer"},{"label":"Fine-tuning pour débutants","emoji":"2b50"}]},{"id":"2bdfa5349e5d636154595876b11e5db5e1f9e9d6","title":"Carnets Unsloth","pathname":"/docs/fr/commencer/unsloth-notebooks","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f4d2","description":"Notebooks de fine-tuning : explorez le catalogue Unsloth.","breadcrumbs":[{"label":"Commencer"}]},{"id":"ca98e2e30b9d7bfca2555b29e750ac7d5e3e674c","title":"Catalogue des modèles Unsloth","pathname":"/docs/fr/commencer/unsloth-model-catalog","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f52e","description":"","breadcrumbs":[{"label":"Commencer"}]},{"id":"7ffcb5efbe9e4cf09524c2b084bceb779bedad90","title":"Installation d'Unsloth","pathname":"/docs/fr/commencer/install","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f4e5","description":"Apprenez à installer Unsloth localement ou en ligne.","breadcrumbs":[{"label":"Commencer"}]},{"id":"017499fb6962e259f964d419ed274c68a2f78f23","title":"Installer Unsloth via pip et uv","pathname":"/docs/fr/commencer/install/pip-install","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"desktop-arrow-down","description":"Pour installer Unsloth localement via Pip, suivez les étapes ci-dessous :","breadcrumbs":[{"label":"Commencer"},{"label":"Installation d'Unsloth","emoji":"1f4e5"}]},{"id":"14c4a3ccd3644f3f4905e3d528cdb66ead97b8ce","title":"Installer Unsloth sur MacOS","pathname":"/docs/fr/commencer/install/mac","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"apple","description":"","breadcrumbs":[{"label":"Commencer"},{"label":"Installation d'Unsloth","emoji":"1f4e5"}]},{"id":"897f358231280f27c42fdd0fe983b7be0dd75dfc","title":"Comment fine-tuner des LLMs sur Windows avec Unsloth (guide étape par étape)","pathname":"/docs/fr/commencer/install/windows-installation","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"windows","description":"Découvrez comment installer Unsloth sur Windows pour commencer à fine-tuner des LLMs localement.","breadcrumbs":[{"label":"Commencer"},{"label":"Installation d'Unsloth","emoji":"1f4e5"}]},{"id":"753bcf3b457799d53f6336913d21ff3056ea8655","title":"Installer Unsloth via Docker","pathname":"/docs/fr/commencer/install/docker","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"docker","description":"Installez Unsloth à l'aide de notre conteneur Docker officiel","breadcrumbs":[{"label":"Commencer"},{"label":"Installation d'Unsloth","emoji":"1f4e5"}]},{"id":"961323c1e9ab5878c28afd53cf87474039080525","title":"Mise à jour d'Unsloth","pathname":"/docs/fr/commencer/install/updating","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"arrow-rotate-right","description":"Pour mettre à jour ou utiliser une ancienne version d'Unsloth, suivez les étapes ci-dessous :","breadcrumbs":[{"label":"Commencer"},{"label":"Installation d'Unsloth","emoji":"1f4e5"}]},{"id":"4428ac8b919c6d934a1260003dc58ba0547d6273","title":"Fine-tuning de LLMs sur GPU AMD avec le guide Unsloth","pathname":"/docs/fr/commencer/install/amd","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"square-up-right","description":"Apprenez à fine-tuner de grands modèles de langage (LLMs) sur des GPU AMD avec Unsloth.","breadcrumbs":[{"label":"Commencer"},{"label":"Installation d'Unsloth","emoji":"1f4e5"}]},{"id":"b3a0a3e07dbd65d73c215148fac229cf552115ac","title":"Fine-tuning de LLMs sur GPU Intel avec Unsloth","pathname":"/docs/fr/commencer/install/intel","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"info","description":"Apprenez à entraîner et fine-tuner de grands modèles de langage sur des GPU Intel.","breadcrumbs":[{"label":"Commencer"},{"label":"Installation d'Unsloth","emoji":"1f4e5"}]},{"id":"9bbe6f156adffaddead7109d8475ab4a8547be46","title":"Guide du fine-tuning de LLMs","pathname":"/docs/fr/commencer/fine-tuning-llms-guide","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f9ec","description":"Apprenez toutes les bases et les bonnes pratiques du fine-tuning. Adapté aux débutants.","breadcrumbs":[{"label":"Commencer"}]},{"id":"106dfb3c53d8e4d182108e27a2e91ce2b4f38210","title":"Guide des jeux de données","pathname":"/docs/fr/commencer/fine-tuning-llms-guide/datasets-guide","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f4c8","description":"Apprenez à créer et préparer un jeu de données pour le fine-tuning.","breadcrumbs":[{"label":"Commencer"},{"label":"Guide du fine-tuning de LLMs","emoji":"1f9ec"}]},{"id":"f4254d24125c1e7ad4d5ad30f1a9c8f7d36e7c4d","title":"Guide des hyperparamètres de fine-tuning LoRA","pathname":"/docs/fr/commencer/fine-tuning-llms-guide/lora-hyperparameters-guide","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f9e0","description":"Apprenez pas à pas les meilleurs réglages de fine-tuning LLM : rang et alpha LoRA, époques, taille de lot + accumulation de gradients, QLoRA vs. LoRA, modules cibles, et plus encore.","breadcrumbs":[{"label":"Commencer"},{"label":"Guide du fine-tuning de LLMs","emoji":"1f9ec"}]},{"id":"6a7779ad1adc734a55034f8880a76401f04d84c5","title":"Quel modèle dois-je utiliser pour le fine-tuning ?","pathname":"/docs/fr/commencer/fine-tuning-llms-guide/what-model-should-i-use","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"2753","description":"","breadcrumbs":[{"label":"Commencer"},{"label":"Guide du fine-tuning de LLMs","emoji":"1f9ec"}]},{"id":"9418759bd903302ac509b6da96465b4bb2e40cfe","title":"Tutoriel : comment fine-tuner Llama-3 et l'utiliser dans Ollama","pathname":"/docs/fr/commencer/fine-tuning-llms-guide/tutorial-how-to-finetune-llama-3-and-use-in-ollama","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f999","description":"Guide du débutant pour créer un assistant personnel personnalisé (comme ChatGPT) à exécuter localement sur Ollama","breadcrumbs":[{"label":"Commencer"},{"label":"Guide du fine-tuning de LLMs","emoji":"1f9ec"}]},{"id":"85e0cbe0d1323be98ebb2569af86994406e2a2d6","title":"Guide de l'apprentissage par renforcement (RL)","pathname":"/docs/fr/commencer/reinforcement-learning-rl-guide","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f4a1","description":"Découvrez tout sur l'apprentissage par renforcement (RL) et comment entraîner votre propre modèle de raisonnement DeepSeek-R1 avec Unsloth en utilisant GRPO. Un guide complet du débutant au niveau avancé.","breadcrumbs":[{"label":"Commencer"}]},{"id":"93e8b0e7e053d1516b2631829beedee63c88ea4c","title":"Apprentissage par renforcement GRPO avec un contexte 7x plus long","pathname":"/docs/fr/commencer/reinforcement-learning-rl-guide/grpo-long-context","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f300","description":"Découvrez comment Unsloth permet un fine-tuning RL à très long contexte.","breadcrumbs":[{"label":"Commencer"},{"label":"Guide de l'apprentissage par renforcement (RL)","emoji":"1f4a1"}]},{"id":"828a94c1637e052b62bda4ce216caa066667b218","title":"Apprentissage par renforcement pour la vision (VLM RL)","pathname":"/docs/fr/commencer/reinforcement-learning-rl-guide/vision-reinforcement-learning-vlm-rl","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f441-1f5e8","description":"Entraînez des modèles de vision/multimodaux via GRPO et RL avec Unsloth !","breadcrumbs":[{"label":"Commencer"},{"label":"Guide de l'apprentissage par renforcement (RL)","emoji":"1f4a1"}]},{"id":"92ec1eda06a9686b604ef754fe5b88c169de1cae","title":"Apprentissage par renforcement FP8","pathname":"/docs/fr/commencer/reinforcement-learning-rl-guide/fp8-reinforcement-learning","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f3b1","description":"Entraînez l'apprentissage par renforcement (RL) et GRPO en précision FP8 avec Unsloth.","breadcrumbs":[{"label":"Commencer"},{"label":"Guide de l'apprentissage par renforcement (RL)","emoji":"1f4a1"}]},{"id":"3b308de419b284a6a192a8a4ada6d06eeb57ae12","title":"Tutoriel : entraînez votre propre modèle de raisonnement avec GRPO","pathname":"/docs/fr/commencer/reinforcement-learning-rl-guide/tutorial-train-your-own-reasoning-model-with-grpo","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"26a1","description":"Guide du débutant pour transformer un modèle comme Llama 3.1 (8B) en modèle de raisonnement en utilisant Unsloth et GRPO.","breadcrumbs":[{"label":"Commencer"},{"label":"Guide de l'apprentissage par renforcement (RL)","emoji":"1f4a1"}]},{"id":"94ab2d9623c104a6b6a1aa04195e1ef51b45b84c","title":"Documentation avancée sur l'apprentissage par renforcement","pathname":"/docs/fr/commencer/reinforcement-learning-rl-guide/advanced-rl-documentation","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f9e9","description":"Paramètres de documentation avancés lors de l'utilisation d'Unsloth avec GRPO.","breadcrumbs":[{"label":"Commencer"},{"label":"Guide de l'apprentissage par renforcement (RL)","emoji":"1f4a1"}]},{"id":"7fd0dddd3d571760e996aecd18e08f8ab58fbf5b","title":"Apprentissage par renforcement GSPO","pathname":"/docs/fr/commencer/reinforcement-learning-rl-guide/advanced-rl-documentation/gspo-reinforcement-learning","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"lightbulb-on","description":"Entraînez-vous avec le RL GSPO (Group Sequence Policy Optimization) dans Unsloth.","breadcrumbs":[{"label":"Commencer"},{"label":"Guide de l'apprentissage par renforcement (RL)","emoji":"1f4a1"},{"label":"Documentation avancée sur l'apprentissage par renforcement","emoji":"1f9e9"}]},{"id":"fc22bd16a489581349db130b2b5b1c7d4a9f2195","title":"Reward Hacking en RL","pathname":"/docs/fr/commencer/reinforcement-learning-rl-guide/advanced-rl-documentation/rl-reward-hacking","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"treasure-chest","description":"Découvrez ce qu'est le Reward Hacking en apprentissage par renforcement et comment le contrer.","breadcrumbs":[{"label":"Commencer"},{"label":"Guide de l'apprentissage par renforcement (RL)","emoji":"1f4a1"},{"label":"Documentation avancée sur l'apprentissage par renforcement","emoji":"1f9e9"}]},{"id":"c10868db27b3a6ca9f7465ed5087d86177024eb0","title":"FP16 vs BF16 pour le RL","pathname":"/docs/fr/commencer/reinforcement-learning-rl-guide/advanced-rl-documentation/fp16-vs-bf16-for-rl","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"2049","description":"Le document Defeating the Training-Inference Mismatch via FP16 https://arxiv.org/pdf/2510.26788 montre que l'utilisation de float16 est meilleure que bfloat16","breadcrumbs":[{"label":"Commencer"},{"label":"Guide de l'apprentissage par renforcement (RL)","emoji":"1f4a1"},{"label":"Documentation avancée sur l'apprentissage par renforcement","emoji":"1f9e9"}]},{"id":"2002066c130086ecdd2c6778ee34ab38a981a1dd","title":"RL économe en mémoire","pathname":"/docs/fr/commencer/reinforcement-learning-rl-guide/memory-efficient-rl","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"memory","description":"","breadcrumbs":[{"label":"Commencer"},{"label":"Guide de l'apprentissage par renforcement (RL)","emoji":"1f4a1"}]},{"id":"d9644059f502160bc700a9f4d6e0821533b1b59b","title":"Entraînement d'optimisation des préférences - DPO, ORPO et KTO","pathname":"/docs/fr/commencer/reinforcement-learning-rl-guide/preference-dpo-orpo-and-kto","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f3c6","description":"Découvrez le fine-tuning d'alignement des préférences avec DPO, GRPO, ORPO ou KTO via Unsloth, suivez les étapes ci-dessous :","breadcrumbs":[{"label":"Commencer"},{"label":"Guide de l'apprentissage par renforcement (RL)","emoji":"1f4a1"}]},{"id":"22a56cb154401d43a94d87fa72ce6dfde69b18e3","title":"Présentation d’Unsloth Studio","pathname":"/docs/fr/nouveau/studio","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f9a5","description":"Exécutez et entraînez des modèles d'IA localement avec Unsloth Studio.","breadcrumbs":[{"label":"Nouveau"}]},{"id":"a4ab77cc976ec484d13317b14422a754e2f1d4e8","title":"Commencer avec Unsloth Studio","pathname":"/docs/fr/nouveau/studio/start","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"bolt","description":"Un guide pour bien démarrer avec le studio de fine-tuning, les recettes de données, l'exportation des modèles et le chat.","breadcrumbs":[{"label":"Nouveau"},{"label":"Présentation d’Unsloth Studio","emoji":"1f9a5"}]},{"id":"02de109936ce31121bffae3333822baa85a115f0","title":"Comment exécuter des modèles avec Unsloth Studio","pathname":"/docs/fr/nouveau/studio/chat","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"comment-dots","description":"Exécutez localement des modèles d'IA, des LLMs et des GGUF avec Unsloth Studio.","breadcrumbs":[{"label":"Nouveau"},{"label":"Présentation d’Unsloth Studio","emoji":"1f9a5"}]},{"id":"3ca98ab552557cd126d152e81d801c8cb0caa73c","title":"Installation d'Unsloth Studio","pathname":"/docs/fr/nouveau/studio/install","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"arrow-down-to-square","description":"Apprenez à installer Unsloth Studio sur votre appareil local.","breadcrumbs":[{"label":"Nouveau"},{"label":"Présentation d’Unsloth Studio","emoji":"1f9a5"}]},{"id":"ec921eec2a37820cfd0cf432328d619c9fa1080b","title":"Recettes de données Unsloth","pathname":"/docs/fr/nouveau/studio/data-recipe","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"hat-chef","description":"Apprenez à créer, construire et modifier des jeux de données avec les Data Recipes d'Unsloth Studio.","breadcrumbs":[{"label":"Nouveau"},{"label":"Présentation d’Unsloth Studio","emoji":"1f9a5"}]},{"id":"817a1275219e1e8d86fe100d223ac4b0862ab3a1","title":"Exporter des modèles avec Unsloth Studio","pathname":"/docs/fr/nouveau/studio/export","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"box-isometric","description":"Apprenez à exporter vos fichiers de modèle safetensor ou LoRA vers GGUF ou d'autres formats.","breadcrumbs":[{"label":"Nouveau"},{"label":"Présentation d’Unsloth Studio","emoji":"1f9a5"}]},{"id":"17e668a82f6382e48c91ce24710a758a8d7eb23c","title":"Mises à jour d'Unsloth","pathname":"/docs/fr/nouveau/changelog","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"sparkles","description":"Journal des modifications d'Unsloth pour nos dernières versions, améliorations et correctifs.","breadcrumbs":[{"label":"Nouveau"}]},{"id":"4a2b83ac4bf0233da80a1e3b6ab9fb218108742c","title":"Qwen3.6 - Comment l'exécuter localement","pathname":"/docs/fr/modeles/qwen3.6","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f49c","description":"Exécutez le nouveau modèle Qwen3.6-35-A3B localement !","breadcrumbs":[{"label":"Modèles"}]},{"id":"fa9788be3ba8450d14c81331c0249f2201968a40","title":"Gemma 4 - Comment l'exécuter localement","pathname":"/docs/fr/modeles/gemma-4","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"2728","description":"Exécutez localement les nouveaux modèles Gemma 4 de Google, y compris E2B, E4B, 26B A4B et 31B.","breadcrumbs":[{"label":"Modèles"}]},{"id":"11ad65c8cb780dbffa6556d9554801824345ccfa","title":"Guide de fine-tuning de Gemma 4","pathname":"/docs/fr/modeles/gemma-4/train","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"flask-gear","description":"Entraînez Gemma 4 de Google avec Unsloth.","breadcrumbs":[{"label":"Modèles"},{"label":"Gemma 4 - Comment l'exécuter localement","emoji":"2728"}]},{"id":"5dc1787d57509ad87481c06d36ca9df87c35c053","title":"Qwen3.5 - Comment l'exécuter localement","pathname":"/docs/fr/modeles/qwen3.5","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f49c","description":"Exécutez les nouveaux LLMs Qwen3.5, y compris Medium : Qwen3.5-35B-A3B, 27B, 122B-A10B, Small : Qwen3.5-0.8B, 2B, 4B, 9B et 397B-A17B sur votre appareil local !","breadcrumbs":[{"label":"Modèles"}]},{"id":"f7f3dcef9bc832cb26935d1dd2773d37a53b1d88","title":"Guide de fine-tuning de Qwen3.5","pathname":"/docs/fr/modeles/qwen3.5/fine-tune","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"flask-gear","description":"Apprenez à fine-tuner les LLMs Qwen3.5 avec Unsloth.","breadcrumbs":[{"label":"Modèles"},{"label":"Qwen3.5 - Comment l'exécuter localement","emoji":"1f49c"}]},{"id":"0ca7fe7921ecd1e2e030c324a794e27e90007ffb","title":"Benchmarks Qwen3.5 GGUF","pathname":"/docs/fr/modeles/qwen3.5/gguf-benchmarks","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"chart-fft","description":"Voyez comment les GGUF dynamiques d'Unsloth se comportent + analyse de la perplexité, de la divergence KL et de MXFP4.","breadcrumbs":[{"label":"Modèles"},{"label":"Qwen3.5 - Comment l'exécuter localement","emoji":"1f49c"}]},{"id":"40461af86e39f7eb7ece0eb6f1e6393ccedb5b02","title":"GLM-5.1 - Comment l'exécuter localement","pathname":"/docs/fr/modeles/glm-5.1","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"z","description":"Exécutez le nouveau modèle GLM-5.1 de Z.ai sur votre propre appareil local !","breadcrumbs":[{"label":"Modèles"}]},{"id":"76ae8488a3005b9cbd5dd14d8f7445641a902fff","title":"MiniMax-M2.7 - Comment l'exécuter localement","pathname":"/docs/fr/modeles/minimax-m27","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"waveform","description":"Exécutez le LLM MiniMax-M2.7 localement sur votre propre appareil !","breadcrumbs":[{"label":"Modèles"}]},{"id":"9ed8ddde53d68481706a3e7f68f59bb62e25a895","title":"NVIDIA Nemotron 3 Nano - Guide d'exécution","pathname":"/docs/fr/modeles/nemotron-3","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f9e9","description":"Exécutez et fine-tunez NVIDIA Nemotron 3 Nano localement sur votre appareil !","breadcrumbs":[{"label":"Modèles"}]},{"id":"24835c065057f6834a3e9350195e31c7109c9b67","title":"NVIDIA Nemotron-3-Super : guide d'exécution","pathname":"/docs/fr/modeles/nemotron-3/nemotron-3-super","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f9e9","description":"Exécutez et fine-tunez NVIDIA Nemotron-3-Super-120B-A12B localement sur votre appareil !","breadcrumbs":[{"label":"Modèles"},{"label":"NVIDIA Nemotron 3 Nano - Guide d'exécution","emoji":"1f9e9"}]},{"id":"012ba9db623e0eefda3058b67f1349147eafda22","title":"Qwen3-Coder-Next : Comment l'exécuter localement","pathname":"/docs/fr/modeles/qwen3-coder-next","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f320","description":"Guide pour exécuter Qwen3-Coder-Next localement sur votre appareil !","breadcrumbs":[{"label":"Modèles"}]},{"id":"e093c577dca65e62304722a0d0d3198ab33d3de8","title":"GLM-4.7-Flash : Comment l'exécuter localement","pathname":"/docs/fr/modeles/glm-4.7-flash","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"z","description":"Exécutez et fine-tunez GLM-4.7-Flash localement sur votre appareil !","breadcrumbs":[{"label":"Modèles"}]},{"id":"54af960e93fa00757e50c6eeafca4d56f706cf90","title":"Kimi K2.5 : guide d'exécution locale","pathname":"/docs/fr/modeles/kimi-k2.5","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f95d","description":"Guide pour exécuter Kimi-K2.5 sur votre propre appareil local !","breadcrumbs":[{"label":"Modèles"}]},{"id":"d83e1c0daa996374fe44eb72ff3ba6facaab3f10","title":"gpt-oss : guide d'exécution","pathname":"/docs/fr/modeles/gpt-oss-how-to-run-and-fine-tune","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"openai","description":"Exécutez et fine-tunez les nouveaux modèles open source d'OpenAI !","breadcrumbs":[{"label":"Modèles"}]},{"id":"883ee192b41562b491f6560f506f0c1351e3e0d1","title":"Apprentissage par renforcement gpt-oss","pathname":"/docs/fr/modeles/gpt-oss-how-to-run-and-fine-tune/gpt-oss-reinforcement-learning","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"openai","description":"","breadcrumbs":[{"label":"Modèles"},{"label":"gpt-oss : guide d'exécution","icon":"openai"}]},{"id":"d3ad6045025b16e1f9154591f2652d7f8e2dbd7f","title":"Tutoriel : comment entraîner gpt-oss avec RL","pathname":"/docs/fr/modeles/gpt-oss-how-to-run-and-fine-tune/gpt-oss-reinforcement-learning/tutorial-how-to-train-gpt-oss-with-rl","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"book-open-reader","description":"Apprenez à entraîner OpenAI gpt-oss avec GRPO pour battre automatiquement 2048 localement ou sur Colab.","breadcrumbs":[{"label":"Modèles"},{"label":"gpt-oss : guide d'exécution","icon":"openai"},{"label":"Apprentissage par renforcement gpt-oss","icon":"openai"}]},{"id":"06e24894ce66a8eb390a0591426123ca7fc631ad","title":"Tutoriel : comment fine-tuner gpt-oss","pathname":"/docs/fr/modeles/gpt-oss-how-to-run-and-fine-tune/tutorial-how-to-fine-tune-gpt-oss","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"openai","description":"Apprenez pas à pas à entraîner OpenAI gpt-oss localement avec Unsloth.","breadcrumbs":[{"label":"Modèles"},{"label":"gpt-oss : guide d'exécution","icon":"openai"}]},{"id":"3ebb92723b7323f88ac9cb67d3d4fdfe1c7b01f2","title":"Entraînement gpt-oss à long contexte","pathname":"/docs/fr/modeles/gpt-oss-how-to-run-and-fine-tune/long-context-gpt-oss-training","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"openai","description":"","breadcrumbs":[{"label":"Modèles"},{"label":"gpt-oss : guide d'exécution","icon":"openai"}]},{"id":"41f2e6bbbe116f730cf3549300cf28972e8c72f4","title":"Tutoriels sur les grands modèles de langage (LLMs)","pathname":"/docs/fr/modeles/tutorials","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f680","description":"Découvrez les derniers LLMs et apprenez à exécuter et fine-tuner des modèles localement pour des performances optimales avec Unsloth.","breadcrumbs":[{"label":"Modèles"}]},{"id":"a8d2bea93c712c2574751649afb854b785762586","title":"GLM-5 : guide d'exécution locale","pathname":"/docs/fr/modeles/tutorials/glm-5","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"z","description":"Exécutez le nouveau modèle GLM-5 de Z.ai sur votre propre appareil local !","breadcrumbs":[{"label":"Modèles"},{"label":"Tutoriels sur les grands modèles de langage (LLMs)","emoji":"1f680"}]},{"id":"cdcd2213e0d2a7563e4a8dabe621d0042b1ba8b2","title":"Qwen3 - Comment exécuter et fine-tuner","pathname":"/docs/fr/modeles/tutorials/qwen3-how-to-run-and-fine-tune","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f320","description":"Apprenez à exécuter et fine-tuner Qwen3 localement avec Unsloth + nos quantifications Dynamic 2.0","breadcrumbs":[{"label":"Modèles"},{"label":"Tutoriels sur les grands modèles de langage (LLMs)","emoji":"1f680"}]},{"id":"d45a882f894f981162b0fef555540add4b7d72e1","title":"Qwen3-VL : guide d'exécution","pathname":"/docs/fr/modeles/tutorials/qwen3-how-to-run-and-fine-tune/qwen3-vl-how-to-run-and-fine-tune","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f320","description":"Apprenez à fine-tuner et à exécuter Qwen3-VL localement avec Unsloth.","breadcrumbs":[{"label":"Modèles"},{"label":"Tutoriels sur les grands modèles de langage (LLMs)","emoji":"1f680"},{"label":"Qwen3 - Comment exécuter et fine-tuner","emoji":"1f320"}]},{"id":"08b638079f720f03f4ae61f30ea4084f852da73b","title":"Qwen3-2507 : guide d'exécution locale","pathname":"/docs/fr/modeles/tutorials/qwen3-how-to-run-and-fine-tune/qwen3-2507","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f320","description":"Exécutez localement sur votre appareil les versions Thinking et Instruct de Qwen3-30B-A3B-2507 et 235B-A22B !","breadcrumbs":[{"label":"Modèles"},{"label":"Tutoriels sur les grands modèles de langage (LLMs)","emoji":"1f680"},{"label":"Qwen3 - Comment exécuter et fine-tuner","emoji":"1f320"}]},{"id":"a6f4f8425254815d780258ea77aa8ed7d94c90b6","title":"MiniMax-M2.5 : guide d'exécution","pathname":"/docs/fr/modeles/tutorials/minimax-m25","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"waveform","description":"Exécutez MiniMax-M2.5 localement sur votre propre appareil !","breadcrumbs":[{"label":"Modèles"},{"label":"Tutoriels sur les grands modèles de langage (LLMs)","emoji":"1f680"}]},{"id":"e602d78d631056880787b680897d774bfbdacc01","title":"Qwen3-Coder : comment l'exécuter localement","pathname":"/docs/fr/modeles/tutorials/qwen3-coder-how-to-run-locally","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f320","description":"Exécutez Qwen3-Coder-30B-A3B-Instruct et 480B-A35B localement avec les quantifications dynamiques d'Unsloth.","breadcrumbs":[{"label":"Modèles"},{"label":"Tutoriels sur les grands modèles de langage (LLMs)","emoji":"1f680"}]},{"id":"18b45693c88a48fd1d16532bb5dfceb5166df38e","title":"Gemma 3 - guide d'exécution","pathname":"/docs/fr/modeles/tutorials/gemma-3-how-to-run-and-fine-tune","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"google","description":"Comment exécuter efficacement Gemma 3 avec nos GGUF sur llama.cpp, Ollama, Open WebUI et comment fine-tuner avec Unsloth !","breadcrumbs":[{"label":"Modèles"},{"label":"Tutoriels sur les grands modèles de langage (LLMs)","emoji":"1f680"}]},{"id":"316806651eee65f5b87bb9c872d0f7ad0026b962","title":"Gemma 3n : comment exécuter et fine-tuner","pathname":"/docs/fr/modeles/tutorials/gemma-3-how-to-run-and-fine-tune/gemma-3n-how-to-run-and-fine-tune","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"google","description":"Exécutez le nouveau Gemma 3n de Google localement avec des GGUF dynamiques sur llama.cpp, Ollama, Open WebUI et fine-tunez avec Unsloth !","breadcrumbs":[{"label":"Modèles"},{"label":"Tutoriels sur les grands modèles de langage (LLMs)","emoji":"1f680"},{"label":"Gemma 3 - guide d'exécution","icon":"google"}]},{"id":"2033b6a7791704447730e8398ce00576aed8425a","title":"DeepSeek-OCR 2 : guide d'exécution et de fine-tuning","pathname":"/docs/fr/modeles/tutorials/deepseek-ocr-2","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f433","description":"Guide sur la façon d'exécuter et de fine-tuner DeepSeek-OCR-2 localement.","breadcrumbs":[{"label":"Modèles"},{"label":"Tutoriels sur les grands modèles de langage (LLMs)","emoji":"1f680"}]},{"id":"0774814301220b49efb3fccdce0c6102dcad153c","title":"GLM-4.7 : guide d'exécution locale","pathname":"/docs/fr/modeles/tutorials/glm-4.7","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"z","description":"Un guide sur la façon d'exécuter le modèle Z.ai GLM-4.7 sur votre propre appareil local !","breadcrumbs":[{"label":"Modèles"},{"label":"Tutoriels sur les grands modèles de langage (LLMs)","emoji":"1f680"}]},{"id":"f02184c12ad2b22efb34111252f7a23753e3f4fc","title":"Comment exécuter Qwen-Image-2512 localement dans ComfyUI","pathname":"/docs/fr/modeles/tutorials/qwen-image-2512","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f49f","description":"Tutoriel étape par étape pour exécuter Qwen-Image-2512 sur votre appareil local avec ComfyUI.","breadcrumbs":[{"label":"Modèles"},{"label":"Tutoriels sur les grands modèles de langage (LLMs)","emoji":"1f680"}]},{"id":"71ea20aa6692f64a79b8514a198ef941fd0c8632","title":"Exécuter Qwen-Image-2512 dans stable-diffusion.cpp Tutoriel","pathname":"/docs/fr/modeles/tutorials/qwen-image-2512/stable-diffusion.cpp","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f3a8","description":"Tutoriel pour utiliser Qwen-Image-2512 dans stable-diffusion.cpp.","breadcrumbs":[{"label":"Modèles"},{"label":"Tutoriels sur les grands modèles de langage (LLMs)","emoji":"1f680"},{"label":"Comment exécuter Qwen-Image-2512 localement dans ComfyUI","emoji":"1f49f"}]},{"id":"ceadbfb0f2e489cd5e42c6a03ec971d9d0e0700e","title":"Devstral 2 - guide d'exécution","pathname":"/docs/fr/modeles/tutorials/devstral-2","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f4d9","description":"Guide pour exécuter localement les modèles Mistral Devstral 2 : 123B-Instruct-2512 et Small-2-24B-Instruct-2512.","breadcrumbs":[{"label":"Modèles"},{"label":"Tutoriels sur les grands modèles de langage (LLMs)","emoji":"1f680"}]},{"id":"b7e8e6530edc0f8d90d1dd7614a8a127f98532da","title":"Ministral 3 - guide d'exécution","pathname":"/docs/fr/modeles/tutorials/ministral-3","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f431","description":"Guide des modèles Mistral Ministral 3, pour les exécuter ou les fine-tuner localement sur votre appareil","breadcrumbs":[{"label":"Modèles"},{"label":"Tutoriels sur les grands modèles de langage (LLMs)","emoji":"1f680"}]},{"id":"6c1e05625f15370e1907a3fcd77edd84cccd087f","title":"DeepSeek-OCR : comment l'exécuter et le fine-tuner","pathname":"/docs/fr/modeles/tutorials/deepseek-ocr-how-to-run-and-fine-tune","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f433","description":"Guide sur la façon d'exécuter et de fine-tuner DeepSeek-OCR localement.","breadcrumbs":[{"label":"Modèles"},{"label":"Tutoriels sur les grands modèles de langage (LLMs)","emoji":"1f680"}]},{"id":"1be04c8e20275b503c8a5a9caf1ef49be79c8ff1","title":"Kimi K2 Thinking : guide d'exécution locale","pathname":"/docs/fr/modeles/tutorials/kimi-k2-thinking-how-to-run-locally","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f319","description":"Guide pour exécuter Kimi-K2-Thinking et Kimi-K2 sur votre propre appareil local !","breadcrumbs":[{"label":"Modèles"},{"label":"Tutoriels sur les grands modèles de langage (LLMs)","emoji":"1f680"}]},{"id":"dae16239a066aab3e73d5ba62459a60853a54430","title":"GLM-4.6 : guide d'exécution locale","pathname":"/docs/fr/modeles/tutorials/glm-4.6-how-to-run-locally","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"z","description":"Un guide sur la façon d'exécuter les modèles Z.ai GLM-4.6 et GLM-4.6V-Flash sur votre propre appareil local !","breadcrumbs":[{"label":"Modèles"},{"label":"Tutoriels sur les grands modèles de langage (LLMs)","emoji":"1f680"}]},{"id":"29445d0c137d738fbbd518145144f5451be337d7","title":"Qwen3-Next : guide d'exécution locale","pathname":"/docs/fr/modeles/tutorials/qwen3-next","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f320","description":"Exécutez localement sur votre appareil les versions Qwen3-Next-80B-A3B-Instruct et Thinking !","breadcrumbs":[{"label":"Modèles"},{"label":"Tutoriels sur les grands modèles de langage (LLMs)","emoji":"1f680"}]},{"id":"3c9d2836b7f65414c0a313d922358338d23d3862","title":"FunctionGemma : comment l'exécuter et le fine-tuner","pathname":"/docs/fr/modeles/tutorials/functiongemma","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"google","description":"Apprenez à exécuter et fine-tuner FunctionGemma localement sur votre appareil et votre téléphone.","breadcrumbs":[{"label":"Modèles"},{"label":"Tutoriels sur les grands modèles de langage (LLMs)","emoji":"1f680"}]},{"id":"b3b1fa5961974e1d851732430e0f9edd08662c7c","title":"DeepSeek-V3.1 : comment l'exécuter localement","pathname":"/docs/fr/modeles/tutorials/deepseek-v3.1-how-to-run-locally","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f40b","description":"Un guide sur la façon d'exécuter DeepSeek-V3.1 et Terminus sur votre propre appareil local !","breadcrumbs":[{"label":"Modèles"},{"label":"Tutoriels sur les grands modèles de langage (LLMs)","emoji":"1f680"}]},{"id":"bf22065882c875337e6a9aea8f14f3ab25542607","title":"DeepSeek-R1-0528 : comment l'exécuter localement","pathname":"/docs/fr/modeles/tutorials/deepseek-r1-0528-how-to-run-locally","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f40b","description":"Un guide sur la façon d'exécuter DeepSeek-R1-0528, y compris Qwen3, sur votre propre appareil local !","breadcrumbs":[{"label":"Modèles"},{"label":"Tutoriels sur les grands modèles de langage (LLMs)","emoji":"1f680"}]},{"id":"c528ad8519f4ca046cd1328b5bdfae3996e0899b","title":"Liquid LFM2.5 : comment exécuter et fine-tuner","pathname":"/docs/fr/modeles/tutorials/lfm2.5","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f4a7","description":"Exécutez et fine-tunez LFM2.5 Instruct et Vision localement sur votre appareil !","breadcrumbs":[{"label":"Modèles"},{"label":"Tutoriels sur les grands modèles de langage (LLMs)","emoji":"1f680"}]},{"id":"8f26be2f0d48a809bafc8762a3ec56ade9b747af","title":"Magistral : comment exécuter et fine-tuner","pathname":"/docs/fr/modeles/tutorials/magistral-how-to-run-and-fine-tune","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f4a5","description":"Découvrez Magistral - les nouveaux modèles de raisonnement de Mistral.","breadcrumbs":[{"label":"Modèles"},{"label":"Tutoriels sur les grands modèles de langage (LLMs)","emoji":"1f680"}]},{"id":"d97697685cec88b23ee4d6443b03696bf640cb58","title":"IBM Granite 4.0","pathname":"/docs/fr/modeles/tutorials/ibm-granite-4.0","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"cube","description":"Comment exécuter IBM Granite-4.0 avec les GGUF d'Unsloth sur llama.cpp, Ollama et comment le fine-tuner !","breadcrumbs":[{"label":"Modèles"},{"label":"Tutoriels sur les grands modèles de langage (LLMs)","emoji":"1f680"}]},{"id":"fca3c3fee394776398920e2c9e3428f5ec51196c","title":"Llama 4 : comment exécuter et fine-tuner","pathname":"/docs/fr/modeles/tutorials/llama-4-how-to-run-and-fine-tune","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f999","description":"Comment exécuter Llama 4 localement en utilisant nos GGUF dynamiques qui récupèrent la précision par rapport à la quantification standard.","breadcrumbs":[{"label":"Modèles"},{"label":"Tutoriels sur les grands modèles de langage (LLMs)","emoji":"1f680"}]},{"id":"bc5e5c47bb3bbf61a7d7e6e5b5209070925d54ee","title":"Grok 2","pathname":"/docs/fr/modeles/tutorials/grok-2","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"square-x-twitter","description":"Exécutez le modèle Grok 2 de xAI localement !","breadcrumbs":[{"label":"Modèles"},{"label":"Tutoriels sur les grands modèles de langage (LLMs)","emoji":"1f680"}]},{"id":"8d1672056da71a0aa082fbd04684a154069c6733","title":"Devstral : comment exécuter et fine-tuner","pathname":"/docs/fr/modeles/tutorials/devstral-how-to-run-and-fine-tune","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f4d9","description":"Exécutez et fine-tunez Mistral Devstral 1.1, y compris Small-2507 et 2505.","breadcrumbs":[{"label":"Modèles"},{"label":"Tutoriels sur les grands modèles de langage (LLMs)","emoji":"1f680"}]},{"id":"18ff82c8ec22aac46f659c98c536562dad45be0b","title":"Comment exécuter des LLMs locaux avec Docker : guide étape par étape","pathname":"/docs/fr/modeles/tutorials/how-to-run-llms-with-docker","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"docker","description":"Apprenez à exécuter des grands modèles de langage (LLMs) avec Docker et Unsloth sur votre appareil local.","breadcrumbs":[{"label":"Modèles"},{"label":"Tutoriels sur les grands modèles de langage (LLMs)","emoji":"1f680"}]},{"id":"dc11579ec5aed1393c09ae05e1a43c23c967f615","title":"DeepSeek-V3-0324 : comment l'exécuter localement","pathname":"/docs/fr/modeles/tutorials/deepseek-v3-0324-how-to-run-locally","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f433","description":"Comment exécuter DeepSeek-V3-0324 localement en utilisant nos quantifications dynamiques qui récupèrent la précision","breadcrumbs":[{"label":"Modèles"},{"label":"Tutoriels sur les grands modèles de langage (LLMs)","emoji":"1f680"}]},{"id":"77b53d22f61fd669ad87b20c49e3612460acea2d","title":"DeepSeek-R1 : comment l'exécuter localement","pathname":"/docs/fr/modeles/tutorials/deepseek-r1-how-to-run-locally","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f40b","description":"Un guide sur la façon d'exécuter nos quantifications dynamiques 1,58 bit pour DeepSeek-R1 en utilisant llama.cpp.","breadcrumbs":[{"label":"Modèles"},{"label":"Tutoriels sur les grands modèles de langage (LLMs)","emoji":"1f680"}]},{"id":"7748174ae79b078e6a9c8c4be21b8560134b0eeb","title":"DeepSeek-R1 quantification dynamique 1,58 bit","pathname":"/docs/fr/modeles/tutorials/deepseek-r1-how-to-run-locally/deepseek-r1-dynamic-1.58-bit","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f433","description":"Voir les tableaux de comparaison des performances des quantifications GGUF dynamiques d'Unsloth par rapport aux quantifications IMatrix standard.","breadcrumbs":[{"label":"Modèles"},{"label":"Tutoriels sur les grands modèles de langage (LLMs)","emoji":"1f680"},{"label":"DeepSeek-R1 : comment l'exécuter localement","emoji":"1f40b"}]},{"id":"3a68359f2c51acf87bf9a70a01f9fd0cdf7c5e8a","title":"Phi-4 Reasoning : comment exécuter et fine-tuner","pathname":"/docs/fr/modeles/tutorials/phi-4-reasoning-how-to-run-and-fine-tune","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"windows","description":"Apprenez à exécuter et fine-tuner localement les modèles de raisonnement Phi-4 avec Unsloth + nos quantifications Dynamic 2.0","breadcrumbs":[{"label":"Modèles"},{"label":"Tutoriels sur les grands modèles de langage (LLMs)","emoji":"1f680"}]},{"id":"3677562dc14186ff4f32a2628a809387e65fd0ea","title":"QwQ-32B : comment l'exécuter efficacement","pathname":"/docs/fr/modeles/tutorials/qwq-32b-how-to-run-effectively","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f320","description":"Comment exécuter efficacement QwQ-32B avec nos corrections de bugs et sans générations sans fin + GGUF.","breadcrumbs":[{"label":"Modèles"},{"label":"Tutoriels sur les grands modèles de langage (LLMs)","emoji":"1f680"}]},{"id":"44b6f06033c7dbf3b6521a33337058e295acc604","title":"Inférence et déploiement","pathname":"/docs/fr/bases/inference-and-deployment","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f5a5","description":"Apprenez à enregistrer votre modèle fine-tuné afin de pouvoir l'exécuter dans votre moteur d'inférence préféré.","breadcrumbs":[{"label":"Bases"}]},{"id":"0ce33fc68eed069d43cdcfb76b9793ce71c64c1f","title":"Enregistrement en GGUF","pathname":"/docs/fr/bases/inference-and-deployment/saving-to-gguf","siteSpaceId":"sitesp_TGKTc","lang":"fr","description":"Enregistrez les modèles en 16 bits pour GGUF afin de pouvoir les utiliser avec Ollama, Jan AI, Open WebUI et plus encore !","breadcrumbs":[{"label":"Bases"},{"label":"Inférence et déploiement","emoji":"1f5a5"}]},{"id":"e11ba1106fa93f45a313adb5372f2081e3ad0b90","title":"Décodage spéculatif","pathname":"/docs/fr/bases/inference-and-deployment/saving-to-gguf/speculative-decoding","siteSpaceId":"sitesp_TGKTc","lang":"fr","description":"Décodage spéculatif avec llama-server, llama.cpp, vLLM et plus encore pour une inférence 2x plus rapide","breadcrumbs":[{"label":"Bases"},{"label":"Inférence et déploiement","emoji":"1f5a5"},{"label":"Enregistrement en GGUF"}]},{"id":"682151c53afcf1f6d611eb29ad62b7182b5187ea","title":"Guide de déploiement et d'inférence vLLM","pathname":"/docs/fr/bases/inference-and-deployment/vllm-guide","siteSpaceId":"sitesp_TGKTc","lang":"fr","description":"Guide pour enregistrer et déployer des LLMs vers vLLM pour servir des LLMs en production","breadcrumbs":[{"label":"Bases"},{"label":"Inférence et déploiement","emoji":"1f5a5"}]},{"id":"1ef02dc5c24ab1305556a9b21ced05fca5ca43d9","title":"Arguments du moteur vLLM","pathname":"/docs/fr/bases/inference-and-deployment/vllm-guide/vllm-engine-arguments","siteSpaceId":"sitesp_TGKTc","lang":"fr","description":"","breadcrumbs":[{"label":"Bases"},{"label":"Inférence et déploiement","emoji":"1f5a5"},{"label":"Guide de déploiement et d'inférence vLLM"}]},{"id":"4017cca7d27bd8a0f41fd28d9e47f3f9699549d8","title":"Guide de permutation à chaud LoRA","pathname":"/docs/fr/bases/inference-and-deployment/vllm-guide/lora-hot-swapping-guide","siteSpaceId":"sitesp_TGKTc","lang":"fr","description":"","breadcrumbs":[{"label":"Bases"},{"label":"Inférence et déploiement","emoji":"1f5a5"},{"label":"Guide de déploiement et d'inférence vLLM"}]},{"id":"169851d8ccb3cd6dc872748a239f3bf944e2cd74","title":"Enregistrement pour Ollama","pathname":"/docs/fr/bases/inference-and-deployment/saving-to-ollama","siteSpaceId":"sitesp_TGKTc","lang":"fr","description":"","breadcrumbs":[{"label":"Bases"},{"label":"Inférence et déploiement","emoji":"1f5a5"}]},{"id":"36cb808bf0f748d077633c7ecbb311cce41e282a","title":"Déploiement de modèles vers LM Studio","pathname":"/docs/fr/bases/inference-and-deployment/lm-studio","siteSpaceId":"sitesp_TGKTc","lang":"fr","description":"Enregistrez les modèles en GGUF afin de pouvoir les exécuter et les déployer dans LM Studio","breadcrumbs":[{"label":"Bases"},{"label":"Inférence et déploiement","emoji":"1f5a5"}]},{"id":"4f981fad0586ec63b4986a1a83a7a1dd61f82b94","title":"Comment installer LM Studio CLI dans le terminal Linux","pathname":"/docs/fr/bases/inference-and-deployment/lm-studio/how-to-install-lm-studio-cli-in-linux-terminal","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f47e","description":"Guide d'installation de LM Studio CLI sans interface dans une instance de terminal.","breadcrumbs":[{"label":"Bases"},{"label":"Inférence et déploiement","emoji":"1f5a5"},{"label":"Déploiement de modèles vers LM Studio"}]},{"id":"b4083297e9c4dc4c5eedc209c17ef65ddd265e4e","title":"Guide de déploiement et d'inférence SGLang","pathname":"/docs/fr/bases/inference-and-deployment/sglang-guide","siteSpaceId":"sitesp_TGKTc","lang":"fr","description":"Guide pour enregistrer et déployer des LLMs vers SGLang pour servir des LLMs en production","breadcrumbs":[{"label":"Bases"},{"label":"Inférence et déploiement","emoji":"1f5a5"}]},{"id":"b7833386edaca08d62cb22de0c06676726d89d43","title":"Guide de déploiement de llama-server et du point de terminaison OpenAI","pathname":"/docs/fr/bases/inference-and-deployment/llama-server-and-openai-endpoint","siteSpaceId":"sitesp_TGKTc","lang":"fr","description":"Déploiement via llama-server avec un point de terminaison compatible OpenAI","breadcrumbs":[{"label":"Bases"},{"label":"Inférence et déploiement","emoji":"1f5a5"}]},{"id":"feb774ec9526706cf5885edfba3f100fbb320ea6","title":"Comment exécuter et déployer des LLMs sur votre téléphone iOS ou Android","pathname":"/docs/fr/bases/inference-and-deployment/deploy-llms-phone","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f4f1","description":"Tutoriel pour fine-tuner votre propre LLM et le déployer sur votre Android ou iPhone avec ExecuTorch.","breadcrumbs":[{"label":"Bases"},{"label":"Inférence et déploiement","emoji":"1f5a5"}]},{"id":"6dba44c4c4f004bdca413ea55834649bee26efe4","title":"Dépannage de l'inférence","pathname":"/docs/fr/bases/inference-and-deployment/troubleshooting-inference","siteSpaceId":"sitesp_TGKTc","lang":"fr","description":"Si vous rencontrez des problèmes lors de l'exécution ou de l'enregistrement de votre modèle.","breadcrumbs":[{"label":"Bases"},{"label":"Inférence et déploiement","emoji":"1f5a5"}]},{"id":"6c4a155ae35df476974e25b66af4db620dffaf2c","title":"Comment exécuter des LLMs locaux avec Claude Code","pathname":"/docs/fr/bases/claude-code","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"claude","description":"Guide pour utiliser des modèles ouverts avec Claude Code sur votre appareil local.","breadcrumbs":[{"label":"Bases"}]},{"id":"0bb2f0a13e244fd2f0ea640c96c4e297bf83db93","title":"Comment exécuter des LLMs locaux avec OpenAI Codex","pathname":"/docs/fr/bases/codex","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"openai","description":"Utilisez des modèles ouverts avec OpenAI Codex sur votre appareil localement.","breadcrumbs":[{"label":"Bases"}]},{"id":"1bdc7fef3d7496e8390edc3622f217d2cc26329f","title":"Fine-tuning multi-GPU avec Unsloth","pathname":"/docs/fr/bases/multi-gpu-training-with-unsloth","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"rectangle-history","description":"Apprenez à fine-tuner des LLMs sur plusieurs GPU et en parallèle avec Unsloth.","breadcrumbs":[{"label":"Bases"}]},{"id":"a09f64a6086d2b9e787e7c01c6d267c6ada0a6a0","title":"Fine-tuning multi-GPU avec Distributed Data Parallel (DDP)","pathname":"/docs/fr/bases/multi-gpu-training-with-unsloth/ddp","siteSpaceId":"sitesp_TGKTc","lang":"fr","description":"Apprenez à utiliser l'interface CLI d'Unsloth pour entraîner sur plusieurs GPU avec Distributed Data Parallel (DDP) !","breadcrumbs":[{"label":"Bases"},{"label":"Fine-tuning multi-GPU avec Unsloth","icon":"rectangle-history"}]},{"id":"092cf26abaedd940ff2a72e0b51363bc08782323","title":"Guide de fine-tuning des modèles d'embedding avec Unsloth","pathname":"/docs/fr/bases/embedding-finetuning","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f50e","description":"Apprenez à fine-tuner facilement des modèles d'embedding avec Unsloth.","breadcrumbs":[{"label":"Bases"}]},{"id":"658e372c1319ec78ca0371d549bc63e3fdd303d1","title":"Fine-tunez des modèles MoE 12x plus rapidement avec Unsloth","pathname":"/docs/fr/bases/faster-moe","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f48e","description":"Entraînez localement des LLMs MoE à l'aide du guide Unsloth.","breadcrumbs":[{"label":"Bases"}]},{"id":"16606095d5a697b2f0c9e168e53a334ae0d4ca27","title":"Guide de fine-tuning Text-to-Speech (TTS)","pathname":"/docs/fr/bases/text-to-speech-tts-fine-tuning","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f50a","description":"Apprenez à fine-tuner des modèles vocaux TTS et STT avec Unsloth.","breadcrumbs":[{"label":"Bases"}]},{"id":"8e3d570b34072053937ce45a7a2125f403689ec3","title":"Unsloth Dynamic 2.0 GGUF","pathname":"/docs/fr/bases/unsloth-dynamic-2.0-ggufs","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f9a5","description":"Une grande nouvelle amélioration de nos quantifications dynamiques !","breadcrumbs":[{"label":"Bases"}]},{"id":"350e0b309b3d2e742a0ce946a613e74c5e82d980","title":"Unsloth Dynamic GGUF sur Aider Polyglot","pathname":"/docs/fr/bases/unsloth-dynamic-2.0-ggufs/unsloth-dynamic-ggufs-on-aider-polyglot","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f9a5","description":"Performances des GGUF dynamiques d'Unsloth sur les benchmarks Aider Polyglot","breadcrumbs":[{"label":"Bases"},{"label":"Unsloth Dynamic 2.0 GGUF","emoji":"1f9a5"}]},{"id":"f47c3320cb986c4fe011958fe46fbbdef0e37e35","title":"Guide d'appel d'outils pour les LLMs locaux","pathname":"/docs/fr/bases/tool-calling-guide-for-local-llms","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"screwdriver-wrench","description":"","breadcrumbs":[{"label":"Bases"}]},{"id":"d5d9395669b4b71402e8f13b4b75e8385568af1e","title":"Fine-tuning de la vision","pathname":"/docs/fr/bases/vision-fine-tuning","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f441","description":"Apprenez à fine-tuner des LLMs vision/multimodaux avec Unsloth","breadcrumbs":[{"label":"Bases"}]},{"id":"80484284c3437da1a5e2fc9cf043834fffd2943e","title":"Dépannage et FAQ","pathname":"/docs/fr/bases/troubleshooting-and-faqs","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"26a0","description":"Conseils pour résoudre les problèmes et questions fréquemment posées.","breadcrumbs":[{"label":"Bases"}]},{"id":"a198cfc28f608fe1b7c87d4b263f987c276c6a81","title":"Hugging Face Hub, débogage XET","pathname":"/docs/fr/bases/troubleshooting-and-faqs/hugging-face-hub-xet-debugging","siteSpaceId":"sitesp_TGKTc","lang":"fr","description":"Débogage, dépannage des téléchargements bloqués, coincés et lents","breadcrumbs":[{"label":"Bases"},{"label":"Dépannage et FAQ","emoji":"26a0"}]},{"id":"62fe9263ff68a062df2ebefa3b02ed09c677ebb4","title":"Modèles de chat","pathname":"/docs/fr/bases/chat-templates","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f4ac","description":"Apprenez les bases et les options de personnalisation des modèles de chat, y compris les formats Conversational, ChatML, ShareGPT, Alpaca et plus encore !","breadcrumbs":[{"label":"Bases"}]},{"id":"3e2629b7af8e46722a552d41673baae8c9e21d35","title":"Indicateurs d'environnement Unsloth","pathname":"/docs/fr/bases/unsloth-environment-flags","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f6e0","description":"Indicateurs avancés qui peuvent être utiles si vous voyez des fine-tunings cassés, ou si vous voulez désactiver certaines choses.","breadcrumbs":[{"label":"Bases"}]},{"id":"7e94174930795ab1dc771195c50c516628ce2655","title":"Pré-entraînement continu","pathname":"/docs/fr/bases/continued-pretraining","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"267b","description":"Également appelé fine-tuning continu. Unsloth vous permet de poursuivre le pré-entraînement afin qu'un modèle puisse apprendre une nouvelle langue.","breadcrumbs":[{"label":"Bases"}]},{"id":"be565689d8cadaf11a6d552e700eb2a0e7fd3f16","title":"Fine-tuning à partir du dernier point de contrôle","pathname":"/docs/fr/bases/finetuning-from-last-checkpoint","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f3c1","description":"Les points de contrôle vous permettent d'enregistrer la progression de votre fine-tuning afin de pouvoir le mettre en pause puis le reprendre.","breadcrumbs":[{"label":"Bases"}]},{"id":"d748fef4faffef5d30e5a815225fd0b8ce113c7f","title":"Benchmarks Unsloth","pathname":"/docs/fr/bases/unsloth-benchmarks","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"1f4ca","description":"Benchmarks enregistrés par Unsloth sur les GPU NVIDIA.","breadcrumbs":[{"label":"Bases"}]},{"id":"3da83f3b5b7b14de7eeb51382389ba36f855fca1","title":"Entraînement de LLM 3x plus rapide avec les kernels Unsloth + packing","pathname":"/docs/fr/blog/3x-faster-training-packing","siteSpaceId":"sitesp_TGKTc","lang":"fr","emoji":"26a1","description":"Apprenez comment Unsloth augmente le débit d'entraînement et élimine le gaspillage dû au padding pour le fine-tuning.","breadcrumbs":[{"label":"Blog"}]},{"id":"4a5176a47bc9d614733d5b512a5f28361c0e8c76","title":"Fine-tuning avec longueur de contexte de 500K","pathname":"/docs/fr/blog/500k-context-length-fine-tuning","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"ruler-combined","description":"Apprenez à activer le fine-tuning avec une fenêtre de contexte de plus de 500K tokens avec Unsloth.","breadcrumbs":[{"label":"Blog"}]},{"id":"e2c1707a97ba45cd1c604e94446a98e276e15828","title":"Entraînement conscient de la quantification (QAT)","pathname":"/docs/fr/blog/quantization-aware-training-qat","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"down-left-and-up-right-to-center","description":"Quantifiez des modèles en 4 bits avec Unsloth et PyTorch pour récupérer la précision.","breadcrumbs":[{"label":"Blog"}]},{"id":"4cf0bfc3d59f854858a4fffec4eab50d6dcc52ef","title":"Fine-tuning des LLMs sur NVIDIA DGX Station avec Unsloth","pathname":"/docs/fr/blog/dgx-station","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"microchip-ai","description":"Tutoriel NVIDIA DGX Station sur la façon de fine-tuner avec les notebooks d'Unsloth.","breadcrumbs":[{"label":"Blog"}]},{"id":"1cb27eb4fb55038e3ce0244a0b0ab5242959a416","title":"Comment fine-tuner des LLMs avec Unsloth et Docker","pathname":"/docs/fr/blog/how-to-fine-tune-llms-with-unsloth-and-docker","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"docker","description":"Apprenez à fine-tuner des LLMs ou à faire de l'apprentissage par renforcement (RL) avec l'image Docker d'Unsloth.","breadcrumbs":[{"label":"Blog"}]},{"id":"6b63496de50aea16b20eb35d7ed91459bc683484","title":"Fine-tuning des LLMs avec NVIDIA DGX Spark et Unsloth","pathname":"/docs/fr/blog/fine-tuning-llms-with-nvidia-dgx-spark-and-unsloth","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"sparkle","description":"Tutoriel sur la façon de fine-tuner et de faire de l'apprentissage par renforcement (RL) avec OpenAI gpt-oss sur NVIDIA DGX Spark.","breadcrumbs":[{"label":"Blog"}]},{"id":"fe305725c6722e64d44672e8c15ea8ebdabed30c","title":"Fine-tuning des LLMs avec Blackwell, la série RTX 50 et Unsloth","pathname":"/docs/fr/blog/fine-tuning-llms-with-blackwell-rtx-50-series-and-unsloth","siteSpaceId":"sitesp_TGKTc","lang":"fr","icon":"microchip","description":"Apprenez à fine-tuner des LLMs sur les GPU NVIDIA Blackwell, série RTX 50 et B200 avec notre guide étape par étape.","breadcrumbs":[{"label":"Blog"}]}]}