move variables to config/settings
This commit is contained in:
parent
89ec0476ca
commit
889816a9a5
3 changed files with 24 additions and 22 deletions
|
|
@ -13,11 +13,11 @@ from connector.llm.utils.helpers import (
|
||||||
preprocess_llama_chat_into_query_instruction,
|
preprocess_llama_chat_into_query_instruction,
|
||||||
extract_first_query_dict,
|
extract_first_query_dict,
|
||||||
)
|
)
|
||||||
# from connector.llm.utils.prompts import GenerativeQAPrompt, GenerativeQAPromptDE
|
|
||||||
from connector.llm.utils.base_llm import BaseLLM
|
from connector.llm.utils.base_llm import BaseLLM
|
||||||
from connector.llm.utils.base_prompts import BaseChatPrompts, BaseGenerativePrompts
|
from connector.llm.utils.base_prompts import BaseChatPrompts, BaseGenerativePrompts
|
||||||
|
|
||||||
from common_packages import logging
|
from common_packages import logging
|
||||||
|
from core.config import settings
|
||||||
|
|
||||||
# instantiate logger
|
# instantiate logger
|
||||||
logger = logging.create_logger(
|
logger = logging.create_logger(
|
||||||
|
|
@ -25,19 +25,14 @@ logger = logging.create_logger(
|
||||||
logger_name=__name__,
|
logger_name=__name__,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Message:
|
|
||||||
# [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "Hello! What are some good questions to ask you?"}, {"role": "assistant", "content": "Hello! I am here to help you with any information or guidance you need."}, {"role": "user", "content": "Ok, can you list me the capital cities of all european countries?"}]
|
|
||||||
|
|
||||||
|
|
||||||
class OllamaLLM(BaseLLM):
|
class OllamaLLM(BaseLLM):
|
||||||
|
|
||||||
def __init__(self, language):
|
def __init__(self, language):
|
||||||
# client.api_key = os.getenv("LLM_API_KEY")
|
self.api_key = settings.LLM_API_KEY
|
||||||
self.api_key = os.getenv("LLM_API_KEY")
|
self.base_url = settings.LLM_API_ENDPOINT
|
||||||
self.base_url = os.getenv("LLM_API_ENDPOINT")
|
|
||||||
|
|
||||||
self.modelname = "llama3"
|
self.modelname = settings.LLM_MODEL_NAME
|
||||||
# self.modelname = "mistral"
|
|
||||||
self.max_num_tokens = 1850
|
self.max_num_tokens = 1850
|
||||||
self.language = language
|
self.language = language
|
||||||
|
|
||||||
|
|
@ -217,12 +212,11 @@ class GenerativeQAPromptDE(BaseGenerativePrompts):
|
||||||
|
|
||||||
class GenerativeChatPromptDE(BaseChatPrompts):
|
class GenerativeChatPromptDE(BaseChatPrompts):
|
||||||
llm_purpose = """
|
llm_purpose = """
|
||||||
Du bist ein hilfreicher Assistent, der entwickelt wurde um Fragen auf der Grundlage einer vom Benutzer bereitgestellten Wissensbasis zu beantworten.
|
Du bist ein hilfreicher Assistent, der Fragen auf der Grundlage einer vom Benutzer bereitgestellten Wissensbasis beantwortet.
|
||||||
Integriere zusätzliche Einblicke oder Perspektiven, die das Verständnis des Lesers verbessern können.
|
Verwende den bereitgestellten Kontext, um die Frage zu beantworten, und erläutere kurz und prägnant, warum Du glaubst, dass diese Antwort korrekt ist.
|
||||||
Verwende den bereitgestellten Kontext, um die Frage zu beantworten, und erläutere so ausführlich wie möglich, warum Du glaubst, dass diese Antwort korrekt ist.
|
Verwende nur die Dokumente, die die Frage beantworten können.
|
||||||
Verwende nur diejenigen Dokumente, die die Frage beantworten können.
|
|
||||||
Formuliere deine Antwort immer in deutscher Sprache.
|
Formuliere deine Antwort immer in deutscher Sprache.
|
||||||
Beantworte keine Fragen die nicht zum gegeben Kontext sich beziehen!
|
Beantworte keine Fragen die sich nicht zum gegeben Kontext beziehen!
|
||||||
"""
|
"""
|
||||||
|
|
||||||
context_command = """
|
context_command = """
|
||||||
|
|
|
||||||
|
|
@ -6,9 +6,16 @@ import os
|
||||||
class Settings:
|
class Settings:
|
||||||
API_V1_STR: str = "/api"
|
API_V1_STR: str = "/api"
|
||||||
|
|
||||||
# OS_INTERFACE: str = ""
|
# Object Storage
|
||||||
BUCKET: str = os.getenv("BUCKET_NAME")
|
BUCKET: str = os.getenv("BUCKET_NAME")
|
||||||
BUCKET_FILE_PATH: str = os.getenv("BUCKET_FILE_PATH")
|
BUCKET_FILE_PATH: str = os.getenv("BUCKET_FILE_PATH")
|
||||||
|
|
||||||
|
# LLM
|
||||||
|
LLM_OPTION: str = os.getenv("LLM_OPTION")
|
||||||
|
LLM_API_ENDPOINT: str = os.getenv("LLM_API_ENDPOINT")
|
||||||
|
LLM_API_KEY: str = os.getenv("LLM_API_KEY")
|
||||||
|
LLM_MODEL_NAME: str = os.getenv("LLM_MODEL_NAME")
|
||||||
|
LLM_LANGUAGE: str = os.getenv("LLM_LANGUAGE")
|
||||||
|
|
||||||
|
|
||||||
settings = Settings()
|
settings = Settings()
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,7 @@
|
||||||
import json
|
import json
|
||||||
from fastapi import APIRouter, Response
|
from fastapi import APIRouter, Response
|
||||||
from endpoints.llm import LLM
|
from endpoints.llm import LLM
|
||||||
import os
|
from core.config import settings
|
||||||
|
|
||||||
router = APIRouter()
|
router = APIRouter()
|
||||||
|
|
||||||
|
|
@ -16,13 +16,14 @@ def get_configs():
|
||||||
"llm": {
|
"llm": {
|
||||||
'language': LLM.language,
|
'language': LLM.language,
|
||||||
'max_num_tokens': LLM.max_num_tokens,
|
'max_num_tokens': LLM.max_num_tokens,
|
||||||
'modelname': LLM.modelname
|
'modelname': settings.LLM_MODEL_NAME
|
||||||
},
|
},
|
||||||
'env_vars': {
|
'env_vars': {
|
||||||
'language': os.getenv('LLM_LANGUAGE'),
|
'language': settings.LLM_LANGUAGE,
|
||||||
'llm_option': os.getenv('LLM_OPTION'),
|
'llm_option': settings.LLM_OPTION,
|
||||||
'bucket_name': os.getenv('BUCKET_NAME'),
|
'llm_endpoint': settings.LLM_API_ENDPOINT,
|
||||||
|
'bucket_name': settings.BUCKET,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return Response(status_code=200, content=json.dumps(backend_configs))
|
return Response(status_code=200, content=json.dumps(backend_configs), media_type="application/json")
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue