diff --git a/gptel-gemini.el b/gptel-gemini.el index ca1c06b..f05f33b 100644 --- a/gptel-gemini.el +++ b/gptel-gemini.el @@ -27,6 +27,10 @@ (require 'cl-generic) (require 'map) +(declare-function prop-match-value "text-property-search") +(declare-function text-property-search-backward "text-property-search") +(declare-function json-read "json") + ;;; Gemini (cl-defstruct (gptel-gemini (:constructor gptel--make-gemini) @@ -61,7 +65,7 @@ (when gptel-temperature (setq params (plist-put params - :temperature (max temperature 1.0)))) + :temperature (max gptel-temperature 1.0)))) (when gptel-max-tokens (setq params (plist-put params @@ -88,8 +92,7 @@ (format "[\t\r\n ]*\\(?:%s\\)?[\t\r\n ]*" (regexp-quote (gptel-prompt-prefix-string))) (format "[\t\r\n ]*\\(?:%s\\)?[\t\r\n ]*" - (regexp-quote (gptel-response-prefix-string))))) - ) + (regexp-quote (gptel-response-prefix-string)))))) prompts) (and max-entries (cl-decf max-entries))) prompts)) diff --git a/gptel-ollama.el b/gptel-ollama.el index 2bfd306..08b6db8 100644 --- a/gptel-ollama.el +++ b/gptel-ollama.el @@ -20,7 +20,7 @@ ;;; Commentary: -;; This file adds support for the Ollama LLM API to gptel +;; This file adds support for the Ollama LLM API to gptel ;;; Code: (require 'gptel) @@ -92,9 +92,12 @@ Ollama models.") (string-trim (buffer-substring-no-properties (prop-match-beginning prop) (prop-match-end prop)) - (format "[\t\r\n ]*%s[\t\r\n ]*" (regexp-quote (gptel-prompt-prefix-string))) - (format "[\t\r\n ]*%s[\t\r\n ]*" (regexp-quote (gptel-response-prefix-string)))) - "")))))) + (format "[\t\r\n ]*\\(?:%s\\)?[\t\r\n ]*" + (regexp-quote (gptel-prompt-prefix-string))) + (format "[\t\r\n ]*\\(?:%s\\)?[\t\r\n ]*" + (regexp-quote (gptel-response-prefix-string)))) + ""))) + prompts))) ;;;###autoload (cl-defun gptel-make-ollama @@ -118,13 +121,13 @@ ENDPOINT (optional) is the API endpoint for completions, defaults to \"/api/generate\". HEADER (optional) is for additional headers to send with each -request. It should be an alist or a function that retuns an +request. It should be an alist or a function that retuns an alist, like: ((\"Content-Type\" . \"application/json\")) KEY (optional) is a variable whose value is the API key, or -function that returns the key. This is typically not required for -local models like Ollama. +function that returns the key. This is typically not required +for local models like Ollama. Example: ------- diff --git a/gptel-openai.el b/gptel-openai.el index cfd2b98..018cc4b 100644 --- a/gptel-openai.el +++ b/gptel-openai.el @@ -40,6 +40,8 @@ (declare-function prop-match-value "text-property-search") (declare-function text-property-search-backward "text-property-search") (declare-function json-read "json") +(declare-function gptel-prompt-prefix-string "gptel") +(declare-function gptel-response-prefix-string "gptel") ;;; Common backend struct for LLM support (cl-defstruct @@ -100,8 +102,10 @@ (string-trim (buffer-substring-no-properties (prop-match-beginning prop) (prop-match-end prop)) - (format "[\t\r\n ]*%s[\t\r\n ]*" (regexp-quote (gptel-prompt-prefix-string))) - (format "[\t\r\n ]*%s[\t\r\n ]*" (regexp-quote (gptel-response-prefix-string))))) + (format "[\t\r\n ]*\\(?:%s\\)?[\t\r\n ]*" + (regexp-quote (gptel-prompt-prefix-string))) + (format "[\t\r\n ]*\\(?:%s\\)?[\t\r\n ]*" + (regexp-quote (gptel-response-prefix-string))))) prompts) (and max-entries (cl-decf max-entries))) (cons (list :role "system" diff --git a/gptel.el b/gptel.el index 0f9bdf9..1a61e85 100644 --- a/gptel.el +++ b/gptel.el @@ -41,13 +41,14 @@ ;; - You can go back and edit your previous prompts or LLM responses when ;; continuing a conversation. These will be fed back to the model. ;; -;; Requirements for ChatGPT/Azure: +;; Requirements for ChatGPT, Azure or Gemini: ;; -;; - You need an OpenAI API key. Set the variable `gptel-api-key' to the key or -;; to a function of no arguments that returns the key. (It tries to use +;; - You need an appropriate API key. Set the variable `gptel-api-key' to the +;; key or to a function of no arguments that returns the key. (It tries to use ;; `auth-source' by default) ;; ;; - For Azure: define a gptel-backend with `gptel-make-azure', which see. +;; - For Gemini: define a gptel-backend with `gptel-make-gemini', which see. ;; ;; For local models using Ollama or GPT4All: ;; @@ -72,15 +73,20 @@ ;; model, or choose to redirect the input or output elsewhere (such as to the ;; kill ring). ;; -;; - If using `org-mode': You can save this buffer to a file. When opening this -;; file, turning on `gptel-mode' will allow resuming the conversation. +;; - You can save this buffer to a file. When opening this file, turning on +;; `gptel-mode' will allow resuming the conversation. ;; ;; To use this in any buffer: ;; -;; - Select a region of text and call `gptel-send'. Call with a prefix argument -;; to access the menu. The contents of the buffer up to (point) are used -;; if no region is selected. -;; - You can select previous prompts and responses to continue the conversation. +;; - Call `gptel-send' to send the text up to the cursor. Select a region to +;; send only the region. +;; +;; - You can select previous prompts and responses to +;; continue the conversation. +;; +;; - Call `gptel-send' with a prefix argument to access a menu where you can set +;; your backend, model and other parameters, or to redirect the +;; prompt/response. ;; ;; Finally, gptel offers a general purpose API for writing LLM ineractions ;; that suit how you work, see `gptel-request'.