gptel-openai: default :header key to simplify config
* gptel.el (gptel--openai): Don't specify header. * gptel-openai.el (gptel-make-openai): Use a key-aware lambda for the header argument. This should make it easier to define new OpenAI-style API backends (see #177, #184) * README.org: Update with instructions for together.ai and Anyscale, both of which provide OpenAI-style APIs. Clean up the config blocks for the other backends.
This commit is contained in:
parent
d0c685e501
commit
8a25058eed
3 changed files with 83 additions and 37 deletions
89
README.org
89
README.org
|
@ -15,7 +15,8 @@ GPTel is a simple Large Language Model chat client for Emacs, with support for m
|
|||
| Llamafile | ✓ | [[https://github.com/Mozilla-Ocho/llamafile#quickstart][Local Llamafile server]] |
|
||||
| Kagi FastGPT | ✓ | [[https://kagi.com/settings?p=api][API key]] |
|
||||
| Kagi Summarizer | ✓ | [[https://kagi.com/settings?p=api][API key]] |
|
||||
| PrivateGPT | Planned | - |
|
||||
| together.ai | ✓ | [[https://api.together.xyz/settings/api-keys][API key]] |
|
||||
| Anyscale | ✓ | [[https://docs.endpoints.anyscale.com/][API key]] |
|
||||
|
||||
*General usage*: ([[https://www.youtube.com/watch?v=bsRnh_brggM][YouTube Demo]])
|
||||
|
||||
|
@ -52,6 +53,8 @@ GPTel uses Curl if available, but falls back to url-retrieve to work without ext
|
|||
- [[#gemini][Gemini]]
|
||||
- [[#llamacpp-or-llamafile][Llama.cpp or Llamafile]]
|
||||
- [[#kagi-fastgpt--summarizer][Kagi (FastGPT & Summarizer)]]
|
||||
- [[#togetherai][together.ai]]
|
||||
- [[#anyscale][Anyscale]]
|
||||
- [[#usage][Usage]]
|
||||
- [[#in-any-buffer][In any buffer:]]
|
||||
- [[#in-a-dedicated-chat-buffer][In a dedicated chat buffer:]]
|
||||
|
@ -133,9 +136,8 @@ machine api.openai.com login apikey password TOKEN
|
|||
|
||||
Register a backend with
|
||||
#+begin_src emacs-lisp
|
||||
(gptel-make-azure
|
||||
"Azure-1" ;Name, whatever you'd like
|
||||
:protocol "https" ;optional -- https is the default
|
||||
(gptel-make-azure "Azure-1" ;Name, whatever you'd like
|
||||
:protocol "https" ;Optional -- https is the default
|
||||
:host "YOUR_RESOURCE_NAME.openai.azure.com"
|
||||
:endpoint "/openai/deployments/YOUR_DEPLOYMENT_NAME/chat/completions?api-version=2023-05-15" ;or equivalent
|
||||
:stream t ;Enable streaming responses
|
||||
|
@ -148,10 +150,8 @@ You can pick this backend from the menu when using gptel. (see [[#usage][Usage]]
|
|||
|
||||
If you want it to be the default, set it as the default value of =gptel-backend=:
|
||||
#+begin_src emacs-lisp
|
||||
(setq-default gptel-backend
|
||||
(gptel-make-azure
|
||||
"Azure-1"
|
||||
...))
|
||||
(setq-default gptel-backend (gptel-make-azure "Azure-1" ...)
|
||||
gptel-model "gpt-3.5-turbo")
|
||||
#+end_src
|
||||
#+html: </details>
|
||||
|
||||
|
@ -161,8 +161,7 @@ If you want it to be the default, set it as the default value of =gptel-backend=
|
|||
|
||||
Register a backend with
|
||||
#+begin_src emacs-lisp
|
||||
(gptel-make-gpt4all
|
||||
"GPT4All" ;Name of your choosing
|
||||
(gptel-make-gpt4all "GPT4All" ;Name of your choosing
|
||||
:protocol "http"
|
||||
:host "localhost:4891" ;Where it's running
|
||||
:models '("mistral-7b-openorca.Q4_0.gguf")) ;Available models
|
||||
|
@ -186,11 +185,10 @@ You can pick this backend from the menu when using gptel (see [[#usage][Usage]])
|
|||
|
||||
Register a backend with
|
||||
#+begin_src emacs-lisp
|
||||
(gptel-make-ollama
|
||||
"Ollama" ;Any name of your choosing
|
||||
(gptel-make-ollama "Ollama" ;Any name of your choosing
|
||||
:host "localhost:11434" ;Where it's running
|
||||
:models '("mistral:latest") ;Installed models
|
||||
:stream t) ;Stream responses
|
||||
:stream t ;Stream responses
|
||||
:models '("mistral:latest")) ;List of models
|
||||
#+end_src
|
||||
These are the required parameters, refer to the documentation of =gptel-make-ollama= for more.
|
||||
|
||||
|
@ -211,8 +209,7 @@ You can pick this backend from the menu when using gptel (see [[#usage][Usage]])
|
|||
Register a backend with
|
||||
#+begin_src emacs-lisp
|
||||
;; :key can be a function that returns the API key.
|
||||
(gptel-make-gemini
|
||||
"Gemini"
|
||||
(gptel-make-gemini "Gemini"
|
||||
:key "YOUR_GEMINI_API_KEY"
|
||||
:stream t)
|
||||
#+end_src
|
||||
|
@ -237,18 +234,18 @@ You can pick this backend from the menu when using gptel (see [[#usage][Usage]])
|
|||
|
||||
Register a backend with
|
||||
#+begin_src emacs-lisp
|
||||
(gptel-make-openai ;Not a typo, same API as OpenAI
|
||||
"llama-cpp" ;Any name
|
||||
;; Llama.cpp offers an OpenAI compatible API
|
||||
(gptel-make-openai "llama-cpp" ;Any name
|
||||
:stream t ;Stream responses
|
||||
:protocol "http"
|
||||
:host "localhost:8000" ;Llama.cpp server location, typically localhost:8080 for Llamafile
|
||||
:key nil ;No key needed
|
||||
:host "localhost:8000" ;Llama.cpp server location
|
||||
:models '("test")) ;Any names, doesn't matter for Llama
|
||||
#+end_src
|
||||
These are the required parameters, refer to the documentation of =gptel-make-openai= for more.
|
||||
|
||||
You can pick this backend from the menu when using gptel (see [[#usage][Usage]]), or set this as the default value of =gptel-backend=:
|
||||
#+begin_src emacs-lisp
|
||||
;; OPTIONAL configuration
|
||||
(setq-default gptel-backend (gptel-make-openai "llama-cpp" ...)
|
||||
gptel-model "test")
|
||||
#+end_src
|
||||
|
@ -266,9 +263,8 @@ Kagi's FastGPT model and the Universal Summarizer are both supported. A couple
|
|||
|
||||
Register a backend with
|
||||
#+begin_src emacs-lisp
|
||||
(gptel-make-kagi
|
||||
"Kagi" ;any name
|
||||
:key "YOUR_KAGI_API_KEY") ;:key can be a function
|
||||
(gptel-make-kagi "Kagi" ;any name
|
||||
:key "YOUR_KAGI_API_KEY") ;can be a function that returns the key
|
||||
#+end_src
|
||||
These are the required parameters, refer to the documentation of =gptel-make-kagi= for more.
|
||||
|
||||
|
@ -282,6 +278,53 @@ You can pick this backend and the model (fastgpt/summarizer) from the transient
|
|||
|
||||
The alternatives to =fastgpt= include =summarize:cecil=, =summarize:agnes=, =summarize:daphne= and =summarize:muriel=. The difference between the summarizer engines is [[https://help.kagi.com/kagi/api/summarizer.html#summarization-engines][documented here]].
|
||||
|
||||
#+html: </details>
|
||||
#+html: <details><summary>
|
||||
**** together.ai
|
||||
#+html: </summary>
|
||||
|
||||
Register a backend with
|
||||
#+begin_src emacs-lisp
|
||||
;; Together.ai offers an OpenAI compatible API
|
||||
(gptel-make-openai "TogetherAI" ;Any name you want
|
||||
:host "api.together.xyz"
|
||||
:key "your-api-key" ;can be a function that returns the key
|
||||
:stream t
|
||||
:models '(;; has many more, check together.ai
|
||||
"mistralai/Mixtral-8x7B-Instruct-v0.1"
|
||||
"codellama/CodeLlama-13b-Instruct-hf"
|
||||
"codellama/CodeLlama-34b-Instruct-hf"))
|
||||
#+end_src
|
||||
|
||||
You can pick this backend from the menu when using gptel (see [[#usage][Usage]]), or set this as the default value of =gptel-backend=:
|
||||
#+begin_src emacs-lisp
|
||||
;; OPTIONAL configuration
|
||||
(setq-default gptel-backend (gptel-make-openai "TogetherAI" ...)
|
||||
gptel-model "mistralai/Mixtral-8x7B-Instruct-v0.1")
|
||||
#+end_src
|
||||
|
||||
#+html: </details>
|
||||
#+html: <details><summary>
|
||||
**** Anyscale
|
||||
#+html: </summary>
|
||||
|
||||
Register a backend with
|
||||
#+begin_src emacs-lisp
|
||||
;; Anyscale offers an OpenAI compatible API
|
||||
(gptel-make-openai "Anyscale" ;Any name you want
|
||||
:host "api.endpoints.anyscale.com"
|
||||
:key "your-api-key" ;can be a function that returns the key
|
||||
:models '(;; has many more, check anyscale
|
||||
"mistralai/Mixtral-8x7B-Instruct-v0.1"))
|
||||
#+end_src
|
||||
|
||||
You can pick this backend from the menu when using gptel (see [[#usage][Usage]]), or set this as the default value of =gptel-backend=:
|
||||
#+begin_src emacs-lisp
|
||||
;; OPTIONAL configuration
|
||||
(setq-default gptel-backend (gptel-make-openai "Anyscale" ...)
|
||||
gptel-model "mistralai/Mixtral-8x7B-Instruct-v0.1")
|
||||
#+end_src
|
||||
|
||||
#+html: </details>
|
||||
|
||||
** Usage
|
||||
|
|
|
@ -115,7 +115,10 @@
|
|||
|
||||
;;;###autoload
|
||||
(cl-defun gptel-make-openai
|
||||
(name &key header models stream key
|
||||
(name &key models stream key
|
||||
(header
|
||||
(lambda () (when-let (key (gptel--get-api-key))
|
||||
`(("Authorization" . ,(concat "Bearer " key))))))
|
||||
(host "api.openai.com")
|
||||
(protocol "https")
|
||||
(endpoint "/v1/chat/completions"))
|
||||
|
|
4
gptel.el
4
gptel.el
|
@ -431,10 +431,10 @@ with differing settings.")
|
|||
(defvar gptel--openai
|
||||
(gptel-make-openai
|
||||
"ChatGPT"
|
||||
:header (lambda () `(("Authorization" . ,(concat "Bearer " (gptel--get-api-key)))))
|
||||
:key 'gptel-api-key
|
||||
:stream t
|
||||
:models '("gpt-3.5-turbo" "gpt-3.5-turbo-16k" "gpt-4" "gpt-4-1106-preview")))
|
||||
:models '("gpt-3.5-turbo" "gpt-3.5-turbo-16k"
|
||||
"gpt-4" "gpt-4-1106-preview")))
|
||||
|
||||
(defcustom gptel-backend gptel--openai
|
||||
"LLM backend to use.
|
||||
|
|
Loading…
Add table
Reference in a new issue