gptel: Fix prompt collection bug + linting
* gptel.el: Update package description. * gptel-gemini.el(gptel--request-data, gptel--parse-buffer): Add model temperature to request correctly. * gptel-ollama.el(gptel--parse-buffer): Ensure that newlines are trimmed correctly even when `gptel-prompt-prefix-string` and `gptel-response-prefix-string` are absent. Fix formatting and linter warnings. * gptel-openai.el(gptel--parse-buffer): Ditto.
This commit is contained in:
parent
3dd00a7457
commit
38095eaed5
4 changed files with 37 additions and 21 deletions
|
@ -27,6 +27,10 @@
|
||||||
(require 'cl-generic)
|
(require 'cl-generic)
|
||||||
(require 'map)
|
(require 'map)
|
||||||
|
|
||||||
|
(declare-function prop-match-value "text-property-search")
|
||||||
|
(declare-function text-property-search-backward "text-property-search")
|
||||||
|
(declare-function json-read "json")
|
||||||
|
|
||||||
;;; Gemini
|
;;; Gemini
|
||||||
(cl-defstruct
|
(cl-defstruct
|
||||||
(gptel-gemini (:constructor gptel--make-gemini)
|
(gptel-gemini (:constructor gptel--make-gemini)
|
||||||
|
@ -61,7 +65,7 @@
|
||||||
(when gptel-temperature
|
(when gptel-temperature
|
||||||
(setq params
|
(setq params
|
||||||
(plist-put params
|
(plist-put params
|
||||||
:temperature (max temperature 1.0))))
|
:temperature (max gptel-temperature 1.0))))
|
||||||
(when gptel-max-tokens
|
(when gptel-max-tokens
|
||||||
(setq params
|
(setq params
|
||||||
(plist-put params
|
(plist-put params
|
||||||
|
@ -88,8 +92,7 @@
|
||||||
(format "[\t\r\n ]*\\(?:%s\\)?[\t\r\n ]*"
|
(format "[\t\r\n ]*\\(?:%s\\)?[\t\r\n ]*"
|
||||||
(regexp-quote (gptel-prompt-prefix-string)))
|
(regexp-quote (gptel-prompt-prefix-string)))
|
||||||
(format "[\t\r\n ]*\\(?:%s\\)?[\t\r\n ]*"
|
(format "[\t\r\n ]*\\(?:%s\\)?[\t\r\n ]*"
|
||||||
(regexp-quote (gptel-response-prefix-string)))))
|
(regexp-quote (gptel-response-prefix-string))))))
|
||||||
)
|
|
||||||
prompts)
|
prompts)
|
||||||
(and max-entries (cl-decf max-entries)))
|
(and max-entries (cl-decf max-entries)))
|
||||||
prompts))
|
prompts))
|
||||||
|
|
|
@ -92,9 +92,12 @@ Ollama models.")
|
||||||
(string-trim
|
(string-trim
|
||||||
(buffer-substring-no-properties (prop-match-beginning prop)
|
(buffer-substring-no-properties (prop-match-beginning prop)
|
||||||
(prop-match-end prop))
|
(prop-match-end prop))
|
||||||
(format "[\t\r\n ]*%s[\t\r\n ]*" (regexp-quote (gptel-prompt-prefix-string)))
|
(format "[\t\r\n ]*\\(?:%s\\)?[\t\r\n ]*"
|
||||||
(format "[\t\r\n ]*%s[\t\r\n ]*" (regexp-quote (gptel-response-prefix-string))))
|
(regexp-quote (gptel-prompt-prefix-string)))
|
||||||
""))))))
|
(format "[\t\r\n ]*\\(?:%s\\)?[\t\r\n ]*"
|
||||||
|
(regexp-quote (gptel-response-prefix-string))))
|
||||||
|
"")))
|
||||||
|
prompts)))
|
||||||
|
|
||||||
;;;###autoload
|
;;;###autoload
|
||||||
(cl-defun gptel-make-ollama
|
(cl-defun gptel-make-ollama
|
||||||
|
@ -118,13 +121,13 @@ ENDPOINT (optional) is the API endpoint for completions, defaults to
|
||||||
\"/api/generate\".
|
\"/api/generate\".
|
||||||
|
|
||||||
HEADER (optional) is for additional headers to send with each
|
HEADER (optional) is for additional headers to send with each
|
||||||
request. It should be an alist or a function that retuns an
|
request. It should be an alist or a function that retuns an
|
||||||
alist, like:
|
alist, like:
|
||||||
((\"Content-Type\" . \"application/json\"))
|
((\"Content-Type\" . \"application/json\"))
|
||||||
|
|
||||||
KEY (optional) is a variable whose value is the API key, or
|
KEY (optional) is a variable whose value is the API key, or
|
||||||
function that returns the key. This is typically not required for
|
function that returns the key. This is typically not required
|
||||||
local models like Ollama.
|
for local models like Ollama.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
-------
|
-------
|
||||||
|
|
|
@ -40,6 +40,8 @@
|
||||||
(declare-function prop-match-value "text-property-search")
|
(declare-function prop-match-value "text-property-search")
|
||||||
(declare-function text-property-search-backward "text-property-search")
|
(declare-function text-property-search-backward "text-property-search")
|
||||||
(declare-function json-read "json")
|
(declare-function json-read "json")
|
||||||
|
(declare-function gptel-prompt-prefix-string "gptel")
|
||||||
|
(declare-function gptel-response-prefix-string "gptel")
|
||||||
|
|
||||||
;;; Common backend struct for LLM support
|
;;; Common backend struct for LLM support
|
||||||
(cl-defstruct
|
(cl-defstruct
|
||||||
|
@ -100,8 +102,10 @@
|
||||||
(string-trim
|
(string-trim
|
||||||
(buffer-substring-no-properties (prop-match-beginning prop)
|
(buffer-substring-no-properties (prop-match-beginning prop)
|
||||||
(prop-match-end prop))
|
(prop-match-end prop))
|
||||||
(format "[\t\r\n ]*%s[\t\r\n ]*" (regexp-quote (gptel-prompt-prefix-string)))
|
(format "[\t\r\n ]*\\(?:%s\\)?[\t\r\n ]*"
|
||||||
(format "[\t\r\n ]*%s[\t\r\n ]*" (regexp-quote (gptel-response-prefix-string)))))
|
(regexp-quote (gptel-prompt-prefix-string)))
|
||||||
|
(format "[\t\r\n ]*\\(?:%s\\)?[\t\r\n ]*"
|
||||||
|
(regexp-quote (gptel-response-prefix-string)))))
|
||||||
prompts)
|
prompts)
|
||||||
(and max-entries (cl-decf max-entries)))
|
(and max-entries (cl-decf max-entries)))
|
||||||
(cons (list :role "system"
|
(cons (list :role "system"
|
||||||
|
|
24
gptel.el
24
gptel.el
|
@ -41,13 +41,14 @@
|
||||||
;; - You can go back and edit your previous prompts or LLM responses when
|
;; - You can go back and edit your previous prompts or LLM responses when
|
||||||
;; continuing a conversation. These will be fed back to the model.
|
;; continuing a conversation. These will be fed back to the model.
|
||||||
;;
|
;;
|
||||||
;; Requirements for ChatGPT/Azure:
|
;; Requirements for ChatGPT, Azure or Gemini:
|
||||||
;;
|
;;
|
||||||
;; - You need an OpenAI API key. Set the variable `gptel-api-key' to the key or
|
;; - You need an appropriate API key. Set the variable `gptel-api-key' to the
|
||||||
;; to a function of no arguments that returns the key. (It tries to use
|
;; key or to a function of no arguments that returns the key. (It tries to use
|
||||||
;; `auth-source' by default)
|
;; `auth-source' by default)
|
||||||
;;
|
;;
|
||||||
;; - For Azure: define a gptel-backend with `gptel-make-azure', which see.
|
;; - For Azure: define a gptel-backend with `gptel-make-azure', which see.
|
||||||
|
;; - For Gemini: define a gptel-backend with `gptel-make-gemini', which see.
|
||||||
;;
|
;;
|
||||||
;; For local models using Ollama or GPT4All:
|
;; For local models using Ollama or GPT4All:
|
||||||
;;
|
;;
|
||||||
|
@ -72,15 +73,20 @@
|
||||||
;; model, or choose to redirect the input or output elsewhere (such as to the
|
;; model, or choose to redirect the input or output elsewhere (such as to the
|
||||||
;; kill ring).
|
;; kill ring).
|
||||||
;;
|
;;
|
||||||
;; - If using `org-mode': You can save this buffer to a file. When opening this
|
;; - You can save this buffer to a file. When opening this file, turning on
|
||||||
;; file, turning on `gptel-mode' will allow resuming the conversation.
|
;; `gptel-mode' will allow resuming the conversation.
|
||||||
;;
|
;;
|
||||||
;; To use this in any buffer:
|
;; To use this in any buffer:
|
||||||
;;
|
;;
|
||||||
;; - Select a region of text and call `gptel-send'. Call with a prefix argument
|
;; - Call `gptel-send' to send the text up to the cursor. Select a region to
|
||||||
;; to access the menu. The contents of the buffer up to (point) are used
|
;; send only the region.
|
||||||
;; if no region is selected.
|
;;
|
||||||
;; - You can select previous prompts and responses to continue the conversation.
|
;; - You can select previous prompts and responses to
|
||||||
|
;; continue the conversation.
|
||||||
|
;;
|
||||||
|
;; - Call `gptel-send' with a prefix argument to access a menu where you can set
|
||||||
|
;; your backend, model and other parameters, or to redirect the
|
||||||
|
;; prompt/response.
|
||||||
;;
|
;;
|
||||||
;; Finally, gptel offers a general purpose API for writing LLM ineractions
|
;; Finally, gptel offers a general purpose API for writing LLM ineractions
|
||||||
;; that suit how you work, see `gptel-request'.
|
;; that suit how you work, see `gptel-request'.
|
||||||
|
|
Loading…
Add table
Reference in a new issue