Refactor chat_with_assistant function to support messages array input: Enhance the chatbot API by allowing an array of messages for context, extracting system messages, and updating the response format. Maintain backward compatibility with the previous prompt structure while improving error handling for empty inputs.
This commit is contained in:
Binary file not shown.
@@ -932,15 +932,38 @@ def too_many_requests(e):
|
||||
def chat_with_assistant():
|
||||
"""Chatbot-API mit OpenAI Integration."""
|
||||
data = request.json
|
||||
prompt = data.get('prompt', '')
|
||||
context = data.get('context', '')
|
||||
|
||||
if not prompt:
|
||||
return jsonify({
|
||||
'error': 'Prompt darf nicht leer sein.'
|
||||
}), 400
|
||||
|
||||
try:
|
||||
# Prüfen, ob wir ein einzelnes Prompt oder ein messages-Array haben
|
||||
if 'messages' in data:
|
||||
messages = data.get('messages', [])
|
||||
if not messages:
|
||||
return jsonify({
|
||||
'error': 'Keine Nachrichten vorhanden.'
|
||||
}), 400
|
||||
|
||||
# Extrahiere Systemnachricht falls vorhanden, sonst Standard-Systemnachricht
|
||||
system_message = next((msg['content'] for msg in messages if msg['role'] == 'system'),
|
||||
"Du bist ein hilfreicher Assistent, der Menschen dabei hilft, "
|
||||
"Wissen zu organisieren und zu verknüpfen. Liefere informative, "
|
||||
"sachliche und gut strukturierte Antworten.")
|
||||
|
||||
# Formatiere Nachrichten für OpenAI API
|
||||
api_messages = [{"role": "system", "content": system_message}]
|
||||
|
||||
# Füge Benutzer- und Assistenten-Nachrichten hinzu
|
||||
for msg in messages:
|
||||
if msg['role'] in ['user', 'assistant']:
|
||||
api_messages.append({"role": msg['role'], "content": msg['content']})
|
||||
else:
|
||||
# Alte Implementierung für direktes Prompt
|
||||
prompt = data.get('prompt', '')
|
||||
context = data.get('context', '')
|
||||
|
||||
if not prompt:
|
||||
return jsonify({
|
||||
'error': 'Prompt darf nicht leer sein.'
|
||||
}), 400
|
||||
|
||||
# Zusammenfassen mehrerer Gedanken oder Analyse anfordern
|
||||
system_message = (
|
||||
"Du bist ein hilfreicher Assistent, der Menschen dabei hilft, "
|
||||
@@ -951,20 +974,24 @@ def chat_with_assistant():
|
||||
if context:
|
||||
system_message += f"\n\nKontext: {context}"
|
||||
|
||||
api_messages = [
|
||||
{"role": "system", "content": system_message},
|
||||
{"role": "user", "content": prompt}
|
||||
]
|
||||
|
||||
try:
|
||||
response = client.chat.completions.create(
|
||||
model="gpt-3.5-turbo-16k",
|
||||
messages=[
|
||||
{"role": "system", "content": system_message},
|
||||
{"role": "user", "content": prompt}
|
||||
],
|
||||
messages=api_messages,
|
||||
max_tokens=300,
|
||||
temperature=0.7
|
||||
)
|
||||
|
||||
answer = response.choices[0].message.content
|
||||
|
||||
# Für das neue Format erwarten wir response statt answer
|
||||
return jsonify({
|
||||
'answer': answer
|
||||
'response': answer
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
|
||||
Reference in New Issue
Block a user