diff --git a/ext/doom/config.el b/ext/doom/config.el index 7441ee0..3275017 100644 --- a/ext/doom/config.el +++ b/ext/doom/config.el @@ -624,12 +624,18 @@ ;; Default Model ;; TODO Consider switching to coder (setq! gptel-backend - (gptel-make-openai "llama-cpp" + (gptel-make-openai "llama-cpp-chat" :stream t :protocol "http" - :host "100.64.0.3:18080" - :models '("llama-cpp-qwen-coder"))) - (setq! gptel-model "llama-cpp-qwen-coder") + :host "100.64.0.3:18082" + :models '("llama-cpp-qwen3"))) + (gptel-make-openai "llama-cpp" + :stream t + :protocol "http" + :host "100.64.0.3:18080" + :models '("llama-cpp-qwen-coder")) + + (setq! gptel-model "llama-cpp-qwen3") ;; nicer org mode foo (setq! gptel-default-mode 'org-mode) @@ -638,13 +644,6 @@ (setf (alist-get 'org-mode gptel-prompt-prefix-alist) "@user ") (setf (alist-get 'org-mode gptel-response-prefix-alist) "@assistant ") - ;; Qwen-2.5-Coder-32B / Tabby model - (gptel-make-openai "llama-cpp-chat" - :stream t - :protocol "http" - :host "100.64.0.3:18082" - :models '("llama-cpp-qwen3")) - ;; Groq API (gptel-make-openai "groq" :host "api.groq.com"