Spaces:
Sleeping
Sleeping
Update document_generator.py
Browse files- document_generator.py +26 -79
document_generator.py
CHANGED
|
@@ -161,14 +161,12 @@ from typing import List, Dict, Optional, Any, Callable, Union
|
|
| 161 |
from openai import OpenAI
|
| 162 |
import logging
|
| 163 |
import functools
|
| 164 |
-
from fastapi import APIRouter, HTTPException, Request
|
| 165 |
from fastapi.responses import StreamingResponse
|
| 166 |
from pydantic import BaseModel
|
| 167 |
from fastapi_cache.decorator import cache
|
| 168 |
import psycopg2
|
| 169 |
from datetime import datetime
|
| 170 |
-
import base64
|
| 171 |
-
|
| 172 |
|
| 173 |
|
| 174 |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
|
@@ -266,43 +264,6 @@ class AIClient:
|
|
| 266 |
return response.choices[0].message.content
|
| 267 |
|
| 268 |
|
| 269 |
-
class VisionTools:
|
| 270 |
-
def __init__(self, ai_client):
|
| 271 |
-
self.ai_client = ai_client
|
| 272 |
-
|
| 273 |
-
async def extract_images_info(self, images: List[UploadFile]) -> str:
|
| 274 |
-
try:
|
| 275 |
-
image_contents = []
|
| 276 |
-
for image in images:
|
| 277 |
-
image_content = await image.read()
|
| 278 |
-
base64_image = base64.b64encode(image_content).decode('utf-8')
|
| 279 |
-
image_contents.append({
|
| 280 |
-
"type": "image_url",
|
| 281 |
-
"image_url": {
|
| 282 |
-
"url": f"data:image/jpeg;base64,{base64_image}"
|
| 283 |
-
}
|
| 284 |
-
})
|
| 285 |
-
|
| 286 |
-
messages = [
|
| 287 |
-
{
|
| 288 |
-
"role": "user",
|
| 289 |
-
"content": [
|
| 290 |
-
{
|
| 291 |
-
"type": "text",
|
| 292 |
-
"text": "Extract the contents of these images in detail in a structured format, focusing on any text, tables, diagrams, or visual elements that might be relevant for document generation."
|
| 293 |
-
},
|
| 294 |
-
*image_contents
|
| 295 |
-
]
|
| 296 |
-
}
|
| 297 |
-
]
|
| 298 |
-
|
| 299 |
-
image_context = self.ai_client.generate_vision_response(messages)
|
| 300 |
-
return image_context
|
| 301 |
-
except Exception as e:
|
| 302 |
-
print(f"Error processing images: {str(e)}")
|
| 303 |
-
return ""
|
| 304 |
-
|
| 305 |
-
|
| 306 |
class DatabaseManager:
|
| 307 |
"""Manages database operations."""
|
| 308 |
|
|
@@ -352,14 +313,11 @@ class DocumentGenerator:
|
|
| 352 |
return content.lstrip()
|
| 353 |
|
| 354 |
@log_execution
|
| 355 |
-
def generate_document_outline(self, query: str, template: bool = False,
|
| 356 |
messages = [
|
| 357 |
{"role": "system", "content": DOCUMENT_OUTLINE_PROMPT_SYSTEM if not template else DOCUMENT_TEMPLATE_OUTLINE_PROMPT_SYSTEM},
|
| 358 |
-
{"role": "user", "content": DOCUMENT_OUTLINE_PROMPT_USER.format(query=query) if not template else DOCUMENT_TEMPLATE_PROMPT_USER.format(query=query
|
| 359 |
]
|
| 360 |
-
# Update user content to include image context if provided
|
| 361 |
-
if image_context:
|
| 362 |
-
messages[1]["content"] += f"<attached_images>\n\n{image_context}\n\n</attached_images>"
|
| 363 |
|
| 364 |
for attempt in range(max_retries):
|
| 365 |
outline_response = self.ai_client.generate_response(messages, model="openai/gpt-4o")
|
|
@@ -448,13 +406,12 @@ class MarkdownConverter:
|
|
| 448 |
|
| 449 |
router = APIRouter()
|
| 450 |
|
| 451 |
-
class
|
| 452 |
-
json_document: Dict
|
| 453 |
-
|
| 454 |
-
class JsonDocumentRequest(BaseModel):
|
| 455 |
query: str
|
| 456 |
template: bool = False
|
| 457 |
-
|
|
|
|
|
|
|
| 458 |
|
| 459 |
class MarkdownDocumentRequest(BaseModel):
|
| 460 |
json_document: Dict
|
|
@@ -529,6 +486,22 @@ async def generate_document_stream(document_generator: DocumentGenerator, docume
|
|
| 529 |
db_manager.update_database("elevatics", query, markdown_document)
|
| 530 |
|
| 531 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 532 |
@router.post("/generate-document/markdown-stream")
|
| 533 |
async def generate_markdown_document_stream_endpoint(request: MarkdownDocumentRequest):
|
| 534 |
ai_client = AIClient()
|
|
@@ -547,34 +520,6 @@ async def generate_markdown_document_stream_endpoint(request: MarkdownDocumentRe
|
|
| 547 |
return StreamingResponse(stream_generator(), media_type="application/octet-stream")
|
| 548 |
|
| 549 |
|
| 550 |
-
@cache(expire=600*24*7)
|
| 551 |
-
@router.post("/generate-document/json")
|
| 552 |
-
async def generate_document_outline_endpoint(request: jsonDocumentRequest):
|
| 553 |
-
):
|
| 554 |
-
ai_client = AIClient()
|
| 555 |
-
document_generator = DocumentGenerator(ai_client)
|
| 556 |
-
vision_tools = VisionTools(ai_client)
|
| 557 |
-
|
| 558 |
-
try:
|
| 559 |
-
image_context = ""
|
| 560 |
-
if request.images:
|
| 561 |
-
image_context = await vision_tools.extract_images_info(request.images)
|
| 562 |
-
|
| 563 |
-
json_document = document_generator.generate_document_outline(
|
| 564 |
-
request.query,
|
| 565 |
-
request.template,
|
| 566 |
-
image_context=image_context
|
| 567 |
-
)
|
| 568 |
-
|
| 569 |
-
if json_document is None:
|
| 570 |
-
raise HTTPException(status_code=500, detail="Failed to generate a valid document outline")
|
| 571 |
-
|
| 572 |
-
return JsonDocumentResponse(json_document=json_document)
|
| 573 |
-
except Exception as e:
|
| 574 |
-
raise HTTPException(status_code=500, detail=str(e))
|
| 575 |
-
|
| 576 |
-
|
| 577 |
-
|
| 578 |
## OBSERVABILITY
|
| 579 |
from uuid import uuid4
|
| 580 |
import csv
|
|
@@ -631,12 +576,14 @@ async def get_last_observations(limit: int = 10, format: str = "json"):
|
|
| 631 |
raise HTTPException(status_code=500, detail=f"Failed to retrieve observations: {str(e)}")
|
| 632 |
|
| 633 |
|
|
|
|
|
|
|
| 634 |
###########################################
|
| 635 |
class MarkdownDocumentResponse(BaseModel):
|
| 636 |
markdown_document: str
|
| 637 |
|
| 638 |
@router.post("/generate-document-test", response_model=MarkdownDocumentResponse)
|
| 639 |
-
async def test_generate_document_endpoint(request:
|
| 640 |
try:
|
| 641 |
# Load JSON document from file
|
| 642 |
json_path = os.path.join("output/document_generator", "ai-chatbot-prd.json")
|
|
|
|
| 161 |
from openai import OpenAI
|
| 162 |
import logging
|
| 163 |
import functools
|
| 164 |
+
from fastapi import APIRouter, HTTPException, Request
|
| 165 |
from fastapi.responses import StreamingResponse
|
| 166 |
from pydantic import BaseModel
|
| 167 |
from fastapi_cache.decorator import cache
|
| 168 |
import psycopg2
|
| 169 |
from datetime import datetime
|
|
|
|
|
|
|
| 170 |
|
| 171 |
|
| 172 |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
|
|
|
| 264 |
return response.choices[0].message.content
|
| 265 |
|
| 266 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 267 |
class DatabaseManager:
|
| 268 |
"""Manages database operations."""
|
| 269 |
|
|
|
|
| 313 |
return content.lstrip()
|
| 314 |
|
| 315 |
@log_execution
|
| 316 |
+
def generate_document_outline(self, query: str, template: bool = False, max_retries: int = 3) -> Optional[Dict]:
|
| 317 |
messages = [
|
| 318 |
{"role": "system", "content": DOCUMENT_OUTLINE_PROMPT_SYSTEM if not template else DOCUMENT_TEMPLATE_OUTLINE_PROMPT_SYSTEM},
|
| 319 |
+
{"role": "user", "content": DOCUMENT_OUTLINE_PROMPT_USER.format(query=query) if not template else DOCUMENT_TEMPLATE_PROMPT_USER.format(query=query)}
|
| 320 |
]
|
|
|
|
|
|
|
|
|
|
| 321 |
|
| 322 |
for attempt in range(max_retries):
|
| 323 |
outline_response = self.ai_client.generate_response(messages, model="openai/gpt-4o")
|
|
|
|
| 406 |
|
| 407 |
router = APIRouter()
|
| 408 |
|
| 409 |
+
class DocumentRequest(BaseModel):
|
|
|
|
|
|
|
|
|
|
| 410 |
query: str
|
| 411 |
template: bool = False
|
| 412 |
+
|
| 413 |
+
class JsonDocumentResponse(BaseModel):
|
| 414 |
+
json_document: Dict
|
| 415 |
|
| 416 |
class MarkdownDocumentRequest(BaseModel):
|
| 417 |
json_document: Dict
|
|
|
|
| 486 |
db_manager.update_database("elevatics", query, markdown_document)
|
| 487 |
|
| 488 |
|
| 489 |
+
@cache(expire=600*24*7)
|
| 490 |
+
@router.post("/generate-document/json", response_model=JsonDocumentResponse)
|
| 491 |
+
async def generate_document_outline_endpoint(request: DocumentRequest):
|
| 492 |
+
ai_client = AIClient()
|
| 493 |
+
document_generator = DocumentGenerator(ai_client)
|
| 494 |
+
|
| 495 |
+
try:
|
| 496 |
+
json_document = document_generator.generate_document_outline(request.query, request.template)
|
| 497 |
+
|
| 498 |
+
if json_document is None:
|
| 499 |
+
raise HTTPException(status_code=500, detail="Failed to generate a valid document outline")
|
| 500 |
+
|
| 501 |
+
return JsonDocumentResponse(json_document=json_document)
|
| 502 |
+
except Exception as e:
|
| 503 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 504 |
+
|
| 505 |
@router.post("/generate-document/markdown-stream")
|
| 506 |
async def generate_markdown_document_stream_endpoint(request: MarkdownDocumentRequest):
|
| 507 |
ai_client = AIClient()
|
|
|
|
| 520 |
return StreamingResponse(stream_generator(), media_type="application/octet-stream")
|
| 521 |
|
| 522 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 523 |
## OBSERVABILITY
|
| 524 |
from uuid import uuid4
|
| 525 |
import csv
|
|
|
|
| 576 |
raise HTTPException(status_code=500, detail=f"Failed to retrieve observations: {str(e)}")
|
| 577 |
|
| 578 |
|
| 579 |
+
|
| 580 |
+
|
| 581 |
###########################################
|
| 582 |
class MarkdownDocumentResponse(BaseModel):
|
| 583 |
markdown_document: str
|
| 584 |
|
| 585 |
@router.post("/generate-document-test", response_model=MarkdownDocumentResponse)
|
| 586 |
+
async def test_generate_document_endpoint(request: DocumentRequest):
|
| 587 |
try:
|
| 588 |
# Load JSON document from file
|
| 589 |
json_path = os.path.join("output/document_generator", "ai-chatbot-prd.json")
|