From 9b3db255e85f669758cfffb8d23828bd545eef5a Mon Sep 17 00:00:00 2001 From: Karthik Chikmagalur Date: Fri, 24 Mar 2023 17:30:02 -0700 Subject: [PATCH] gptel: Turn API parameters into defcustoms * gptel.el (gptel--request-data, gptel--system-message-alist, gptel--model, gptel--temperature, gptel--max-tokens): Rename API parameters and turn them into customizable variables. They are still buffer-local. Rename: `gptel--system-message-alist' to `gptel-directives' `gptel--max-tokens' to `gptel-max-tokens' `gptel--model' to `gptel-model' `gptel--temperature' to `gptel-temperature' * gptel-transient.el (gptel-system-prompt, gptel--infix-max-tokens, gptel--infix-model, gptel--infix-temperature): Accommodating changes when setting the renamed parameters. --- gptel-transient.el | 21 ++++++------ gptel.el | 81 +++++++++++++++++++++++++++++++++++++++------- 2 files changed, 80 insertions(+), 22 deletions(-) diff --git a/gptel-transient.el b/gptel-transient.el index 79dc955..a94cc05 100644 --- a/gptel-transient.el +++ b/gptel-transient.el @@ -57,8 +57,7 @@ You are a helpful assistant. Answer as concisely as possible. Reply only with shell commands and no prose. You are a poet. Reply only in verse. -Customize `gptel--system-message-alist' for task-specific -prompts." +Customize `gptel-directives' for task-specific prompts." [:description (lambda () (format "Directive: %s" (truncate-string-to-width gptel--system-message (max (- (window-width) 14) 20) nil nil t))) @@ -68,22 +67,22 @@ prompts." ("p" "Programming" (lambda () (interactive) (setq gptel--system-message - (alist-get 'programming gptel--system-message-alist))) + (alist-get 'programming gptel-directives))) :transient t) ("d" "Default" (lambda () (interactive) (setq gptel--system-message - (alist-get 'default gptel--system-message-alist))) + (alist-get 'default gptel-directives))) :transient t) ("w" "Writing" (lambda () (interactive) (setq gptel--system-message - (alist-get 'writing gptel--system-message-alist))) + (alist-get 'writing gptel-directives))) :transient t) ("c" "Chat" (lambda () (interactive) (setq gptel--system-message - (alist-get 'chat gptel--system-message-alist))) + (alist-get 'chat gptel-directives))) :transient t)]) ;; TODO: Switch to dynamic Transient menus (below) once there's a new Transient release @@ -99,7 +98,7 @@ prompts." ;; "Set up suffixes for system prompt." ;; (transient-parse-suffixes ;; 'gptel-system-prompt -;; (cl-loop for (type . prompt) in gptel--system-message-alist +;; (cl-loop for (type . prompt) in gptel-directives ;; for name = (symbol-name type) ;; for key = (substring name 0 1) ;; collect (list (key-description key) (capitalize name) @@ -137,7 +136,7 @@ count of the conversation so far in each message, so messages will get progressively longer!" :description "Response length (tokens)" :class 'transient-lisp-variable - :variable 'gptel--max-tokens + :variable 'gptel-max-tokens :key "<" :prompt "Response length in tokens (leave empty: default, 80-200: short, 200-500: long): " :reader 'transient-read-number-N+) @@ -146,7 +145,7 @@ will get progressively longer!" "AI Model for Chat." :description "GPT Model: " :class 'transient-lisp-variable - :variable 'gptel--model + :variable 'gptel-model :key "m" :choices '("gpt-3.5-turbo-0301" "gpt-3.5-turbo" "gpt-4") :reader (lambda (prompt &rest _) @@ -158,11 +157,11 @@ will get progressively longer!" "Temperature of request." :description "Randomness (0 - 2.0)" :class 'transient-lisp-variable - :variable 'gptel--temperature + :variable 'gptel-temperature :key "t" :reader (lambda (&rest _) (read-from-minibuffer "Set temperature (0.0-2.0, leave empty for default): " - (number-to-string gptel--temperature)))) + (number-to-string gptel-temperature)))) (transient-define-suffix gptel--suffix-send-existing () "Send query in existing chat session." diff --git a/gptel.el b/gptel.el index 0f1f738..566df72 100644 --- a/gptel.el +++ b/gptel.el @@ -124,17 +124,76 @@ is only inserted in dedicated gptel buffers." ;; Model and interaction parameters (defvar-local gptel--system-message "You are a large language model living in Emacs and a helpful assistant. Respond concisely.") -(defvar gptel--system-message-alist + +(defcustom gptel-directives `((default . ,gptel--system-message) (programming . "You are a large language model and a careful programmer. Provide code and only code as output without any additional text, prompt or note.") (writing . "You are a large language model and a writing assistant. Respond concisely.") (chat . "You are a large language model and a conversation partner. Respond concisely.")) - "Prompt templates (directives).") -(defvar gptel--debug nil) -(defvar-local gptel--max-tokens nil) -(defvar-local gptel--model "gpt-3.5-turbo") -(defvar-local gptel--temperature 1.0) + "System prompts (directives) for ChatGPT. + +These are system instructions sent at the beginning of each +request to ChatGPT. + +Each entry in this alist maps a symbol naming the directive to +the string that is sent. To set the directive for a chat session +interactively call `gptel-send' with a prefix argument. + +Note: Currently the names (default, programming, writing and +chat) are hard-coded and only their values may be customized. +This will be fixed in an upcoming release." + :group 'gptel + :type '(alist :key-type symbol :value-type string)) + +(defcustom gptel-max-tokens nil + "Max tokens per response. + +This is roughly the number of words in the response. 100-300 is a +reasonable range for short answers, 400 or more for longer +responses. + +To set the target token count for a chat session interactively +call `gptel-send' with a prefix argument. + +If left unset, ChatGPT will target about 40% of the total token +count of the conversation so far in each message, so messages +will get progressively longer!" + :local t + :group 'gptel + :type '(choice (integer :tag "Specify Token count") + (const :tag "Default" nil))) + +(defcustom gptel-model "gpt-3.5-turbo" + "GPT Model for chat. + +The current options are +- \"gpt-3.5-turbo\" +- \"gpt-3.5-turbo-0301\" +- \"gpt-4\" (experimental) + +To set the model for a chat session interactively call +`gptel-send' with a prefix argument." + :local t + :group 'gptel + :type '(choice + (const :tag "GPT 3.5 turbo" "gpt-3.5-turbo") + (const :tag "GPT 3.5 turbo 0301" "gpt-3.5-turbo-0301") + (const :tag "GPT 4 (experimental)" "gpt-4"))) + +(defcustom gptel-temperature 1.0 + "\"Temperature\" of ChatGPT response. + +This is a number between 0.0 and 2.0 that controls the randomness +of the response, with 2.0 being the most random. + +To set the temperature for a chat session interactively call +`gptel-send' with a prefix argument." + :local t + :group 'gptel + :type 'number) + (defvar-local gptel--num-messages-to-send nil) +(defvar gptel--debug nil) (defun gptel-api-key-from-auth-source (&optional host user) "Lookup api key in the auth source. @@ -285,12 +344,12 @@ there." (defun gptel--request-data (prompts) "JSON encode PROMPTS for sending to ChatGPT." (let ((prompts-plist - `(:model ,gptel--model + `(:model ,gptel-model :messages [,@prompts]))) - (when gptel--temperature - (plist-put prompts-plist :temperature (gptel--numberize gptel--temperature))) - (when gptel--max-tokens - (plist-put prompts-plist :max_tokens (gptel--numberize gptel--max-tokens))) + (when gptel-temperature + (plist-put prompts-plist :temperature (gptel--numberize gptel-temperature))) + (when gptel-max-tokens + (plist-put prompts-plist :max_tokens (gptel--numberize gptel-max-tokens))) prompts-plist)) ;; TODO: Use `run-hook-wrapped' with an accumulator instead to handle