gptel: Fix prompt collection bug + linting

* gptel.el: Update package description.

* gptel-gemini.el(gptel--request-data, gptel--parse-buffer): Add
model temperature to request correctly.

* gptel-ollama.el(gptel--parse-buffer): Ensure that newlines are
trimmed correctly even when `gptel-prompt-prefix-string` and
`gptel-response-prefix-string` are absent.  Fix formatting and
linter warnings.

* gptel-openai.el(gptel--parse-buffer): Ditto.
This commit is contained in:
Karthik Chikmagalur 2023-12-20 14:23:45 -08:00
parent 3dd00a7457
commit 38095eaed5
4 changed files with 37 additions and 21 deletions

View file

@ -27,6 +27,10 @@
(require 'cl-generic)
(require 'map)
(declare-function prop-match-value "text-property-search")
(declare-function text-property-search-backward "text-property-search")
(declare-function json-read "json")
;;; Gemini
(cl-defstruct
(gptel-gemini (:constructor gptel--make-gemini)
@ -61,7 +65,7 @@
(when gptel-temperature
(setq params
(plist-put params
:temperature (max temperature 1.0))))
:temperature (max gptel-temperature 1.0))))
(when gptel-max-tokens
(setq params
(plist-put params
@ -88,8 +92,7 @@
(format "[\t\r\n ]*\\(?:%s\\)?[\t\r\n ]*"
(regexp-quote (gptel-prompt-prefix-string)))
(format "[\t\r\n ]*\\(?:%s\\)?[\t\r\n ]*"
(regexp-quote (gptel-response-prefix-string)))))
)
(regexp-quote (gptel-response-prefix-string))))))
prompts)
(and max-entries (cl-decf max-entries)))
prompts))

View file

@ -20,7 +20,7 @@
;;; Commentary:
;; This file adds support for the Ollama LLM API to gptel
;; This file adds support for the Ollama LLM API to gptel
;;; Code:
(require 'gptel)
@ -92,9 +92,12 @@ Ollama models.")
(string-trim
(buffer-substring-no-properties (prop-match-beginning prop)
(prop-match-end prop))
(format "[\t\r\n ]*%s[\t\r\n ]*" (regexp-quote (gptel-prompt-prefix-string)))
(format "[\t\r\n ]*%s[\t\r\n ]*" (regexp-quote (gptel-response-prefix-string))))
""))))))
(format "[\t\r\n ]*\\(?:%s\\)?[\t\r\n ]*"
(regexp-quote (gptel-prompt-prefix-string)))
(format "[\t\r\n ]*\\(?:%s\\)?[\t\r\n ]*"
(regexp-quote (gptel-response-prefix-string))))
"")))
prompts)))
;;;###autoload
(cl-defun gptel-make-ollama
@ -118,13 +121,13 @@ ENDPOINT (optional) is the API endpoint for completions, defaults to
\"/api/generate\".
HEADER (optional) is for additional headers to send with each
request. It should be an alist or a function that retuns an
request. It should be an alist or a function that retuns an
alist, like:
((\"Content-Type\" . \"application/json\"))
KEY (optional) is a variable whose value is the API key, or
function that returns the key. This is typically not required for
local models like Ollama.
function that returns the key. This is typically not required
for local models like Ollama.
Example:
-------

View file

@ -40,6 +40,8 @@
(declare-function prop-match-value "text-property-search")
(declare-function text-property-search-backward "text-property-search")
(declare-function json-read "json")
(declare-function gptel-prompt-prefix-string "gptel")
(declare-function gptel-response-prefix-string "gptel")
;;; Common backend struct for LLM support
(cl-defstruct
@ -100,8 +102,10 @@
(string-trim
(buffer-substring-no-properties (prop-match-beginning prop)
(prop-match-end prop))
(format "[\t\r\n ]*%s[\t\r\n ]*" (regexp-quote (gptel-prompt-prefix-string)))
(format "[\t\r\n ]*%s[\t\r\n ]*" (regexp-quote (gptel-response-prefix-string)))))
(format "[\t\r\n ]*\\(?:%s\\)?[\t\r\n ]*"
(regexp-quote (gptel-prompt-prefix-string)))
(format "[\t\r\n ]*\\(?:%s\\)?[\t\r\n ]*"
(regexp-quote (gptel-response-prefix-string)))))
prompts)
(and max-entries (cl-decf max-entries)))
(cons (list :role "system"

View file

@ -41,13 +41,14 @@
;; - You can go back and edit your previous prompts or LLM responses when
;; continuing a conversation. These will be fed back to the model.
;;
;; Requirements for ChatGPT/Azure:
;; Requirements for ChatGPT, Azure or Gemini:
;;
;; - You need an OpenAI API key. Set the variable `gptel-api-key' to the key or
;; to a function of no arguments that returns the key. (It tries to use
;; - You need an appropriate API key. Set the variable `gptel-api-key' to the
;; key or to a function of no arguments that returns the key. (It tries to use
;; `auth-source' by default)
;;
;; - For Azure: define a gptel-backend with `gptel-make-azure', which see.
;; - For Gemini: define a gptel-backend with `gptel-make-gemini', which see.
;;
;; For local models using Ollama or GPT4All:
;;
@ -72,15 +73,20 @@
;; model, or choose to redirect the input or output elsewhere (such as to the
;; kill ring).
;;
;; - If using `org-mode': You can save this buffer to a file. When opening this
;; file, turning on `gptel-mode' will allow resuming the conversation.
;; - You can save this buffer to a file. When opening this file, turning on
;; `gptel-mode' will allow resuming the conversation.
;;
;; To use this in any buffer:
;;
;; - Select a region of text and call `gptel-send'. Call with a prefix argument
;; to access the menu. The contents of the buffer up to (point) are used
;; if no region is selected.
;; - You can select previous prompts and responses to continue the conversation.
;; - Call `gptel-send' to send the text up to the cursor. Select a region to
;; send only the region.
;;
;; - You can select previous prompts and responses to
;; continue the conversation.
;;
;; - Call `gptel-send' with a prefix argument to access a menu where you can set
;; your backend, model and other parameters, or to redirect the
;; prompt/response.
;;
;; Finally, gptel offers a general purpose API for writing LLM ineractions
;; that suit how you work, see `gptel-request'.