|
@@ -23,26 +23,48 @@ class ChatConfig(QueryConfig):
|
|
|
"""
|
|
|
Config for the `chat` method, inherits from `QueryConfig`.
|
|
|
"""
|
|
|
- def __init__(self, template: Template = None, model = None, temperature = None, max_tokens = None, top_p = None, stream: bool = False):
|
|
|
+
|
|
|
+ def __init__(
|
|
|
+ self,
|
|
|
+ template: Template = None,
|
|
|
+ model=None,
|
|
|
+ temperature=None,
|
|
|
+ max_tokens=None,
|
|
|
+ top_p=None,
|
|
|
+ stream: bool = False,
|
|
|
+ ):
|
|
|
"""
|
|
|
Initializes the ChatConfig instance.
|
|
|
|
|
|
- :param template: Optional. The `Template` instance to use as a template for prompt.
|
|
|
+ :param template: Optional. The `Template` instance to use as a template for
|
|
|
+ prompt.
|
|
|
:param model: Optional. Controls the OpenAI model used.
|
|
|
- :param temperature: Optional. Controls the randomness of the model's output.
|
|
|
- Higher values (closer to 1) make output more random, lower values make it more deterministic.
|
|
|
+ :param temperature: Optional. Controls the randomness of the model's output.
|
|
|
+ Higher values (closer to 1) make output more random,lower values make it more
|
|
|
+ deterministic.
|
|
|
:param max_tokens: Optional. Controls how many tokens are generated.
|
|
|
- :param top_p: Optional. Controls the diversity of words. Higher values (closer to 1) make word selection more diverse, lower values make words less diverse.
|
|
|
+ :param top_p: Optional. Controls the diversity of words.Higher values
|
|
|
+ (closer to 1) make word selection more diverse, lower values make words less
|
|
|
+ diverse.
|
|
|
:param stream: Optional. Control if response is streamed back to the user
|
|
|
- :raises ValueError: If the template is not valid as template should contain $context and $query and $history
|
|
|
+ :raises ValueError: If the template is not valid as template should contain
|
|
|
+ $context and $query and $history
|
|
|
"""
|
|
|
if template is None:
|
|
|
template = DEFAULT_PROMPT_TEMPLATE
|
|
|
|
|
|
-
|
|
|
- # History is set as 0 to ensure that there is always a history, that way, there don't have to be two templates.
|
|
|
- # Having two templates would make it complicated because the history is not user controlled.
|
|
|
- super().__init__(template, model=model, temperature=temperature, max_tokens=max_tokens, top_p=top_p, history=[0], stream=stream)
|
|
|
+ # History is set as 0 to ensure that there is always a history, that way,
|
|
|
+ # there don't have to be two templates. Having two templates would make it
|
|
|
+ # complicated because the history is not user controlled.
|
|
|
+ super().__init__(
|
|
|
+ template,
|
|
|
+ model=model,
|
|
|
+ temperature=temperature,
|
|
|
+ max_tokens=max_tokens,
|
|
|
+ top_p=top_p,
|
|
|
+ history=[0],
|
|
|
+ stream=stream,
|
|
|
+ )
|
|
|
|
|
|
def set_history(self, history):
|
|
|
"""
|