Killian Mahé commited on
Commit
aea1727
·
1 Parent(s): b5953b6

Update agent

Browse files
Files changed (5) hide show
  1. agent.py +3 -2
  2. app.py +160 -89
  3. pyproject.toml +1 -0
  4. tools.py +18 -1
  5. uv.lock +34 -0
agent.py CHANGED
@@ -8,9 +8,9 @@ from langgraph.graph.message import add_messages
8
  from langgraph.prebuilt import ToolNode, tools_condition
9
  from langchain_openai import ChatOpenAI
10
 
11
- from tools import multiply, divide, add, substract
12
 
13
- available_tools = [multiply, divide, add, substract, DuckDuckGoSearchRun(output_format="list", max_results=5)]
14
 
15
  with open("system_prompt.txt", "r") as file:
16
  sys_prompt = SystemMessage(content=file.read())
@@ -18,6 +18,7 @@ with open("system_prompt.txt", "r") as file:
18
  class AgentState(TypedDict):
19
  messages: Annotated[list[AnyMessage], add_messages]
20
 
 
21
  chat = ChatOpenAI(model="o4-mini",)
22
  chat.bind_tools(available_tools)
23
 
 
8
  from langgraph.prebuilt import ToolNode, tools_condition
9
  from langchain_openai import ChatOpenAI
10
 
11
+ from tools import multiply, divide, add, substract, wiki_search
12
 
13
+ available_tools = [multiply, divide, add, substract, DuckDuckGoSearchRun(), wiki_search]
14
 
15
  with open("system_prompt.txt", "r") as file:
16
  sys_prompt = SystemMessage(content=file.read())
 
18
  class AgentState(TypedDict):
19
  messages: Annotated[list[AnyMessage], add_messages]
20
 
21
+
22
  chat = ChatOpenAI(model="o4-mini",)
23
  chat.bind_tools(available_tools)
24
 
app.py CHANGED
@@ -1,146 +1,217 @@
1
  import os
 
 
 
 
 
 
 
 
2
  import gradio as gr
3
  import requests
4
  import pandas as pd
5
  from langchain_core.messages import HumanMessage
 
6
 
7
  from agent import gaia_agent
8
 
 
 
 
 
 
 
 
 
 
9
  # (Keep Constants as is)
10
  # --- Constants ---
11
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
 
 
 
 
 
 
 
12
 
13
  # --- Basic Agent Definition ---
14
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
15
  class BasicAgent:
16
  def __init__(self):
17
  print("BasicAgent initialized.")
18
- def __call__(self, question: str) -> str:
19
  print(f"Agent received question (first 50 chars): {question[:50]}...")
20
- messages = gaia_agent.invoke({"messages": [HumanMessage(content=question)]})
21
- answer = messages['messages'][-1].content[14:]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  print(f"Agent returning answer: {answer}")
23
  return answer
24
 
25
- def run_and_submit_all( profile: gr.OAuthProfile | None):
26
- """
27
- Fetches all questions, runs the BasicAgent on them, submits all answers,
28
- and displays the results.
29
- """
30
- # --- Determine HF Space Runtime URL and Repo URL ---
31
- space_id = os.getenv("SPACE_ID") # Get the SPACE_ID for sending link to the code
32
-
33
- if profile:
34
- username= f"{profile.username}"
35
- print(f"User logged in: {username}")
36
- else:
37
- print("User not logged in.")
38
  return "Please Login to Hugging Face with the button.", None
39
 
40
- api_url = DEFAULT_API_URL
41
- questions_url = f"{api_url}/questions"
42
- submit_url = f"{api_url}/submit"
43
 
44
- # 1. Instantiate Agent ( modify this part to create your agent)
45
- try:
46
- agent = BasicAgent()
47
- except Exception as e:
48
- print(f"Error instantiating agent: {e}")
49
- return f"Error initializing agent: {e}", None
50
- # In the case of an app running as a hugging Face space, this link points toward your codebase ( usefull for others so please keep it public)
51
  agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
52
- print(agent_code)
53
 
54
- # 2. Fetch Questions
55
- print(f"Fetching questions from: {questions_url}")
56
  try:
57
- response = requests.get(questions_url, timeout=15)
58
  response.raise_for_status()
59
  questions_data = response.json()
60
  if not questions_data:
61
- print("Fetched questions list is empty.")
62
- return "Fetched questions list is empty or invalid format.", None
63
- print(f"Fetched {len(questions_data)} questions.")
64
- except requests.exceptions.RequestException as e:
65
- print(f"Error fetching questions: {e}")
66
  return f"Error fetching questions: {e}", None
67
- except requests.exceptions.JSONDecodeError as e:
68
- print(f"Error decoding JSON response from questions endpoint: {e}")
69
- print(f"Response text: {response.text[:500]}")
70
- return f"Error decoding server response for questions: {e}", None
 
71
  except Exception as e:
72
- print(f"An unexpected error occurred fetching questions: {e}")
73
- return f"An unexpected error occurred fetching questions: {e}", None
74
 
75
- # 3. Run your Agent
76
- results_log = []
77
- answers_payload = []
78
- print(f"Running agent on {len(questions_data)} questions...")
79
- for item in questions_data:
80
  task_id = item.get("task_id")
81
- question_text = item.get("question")
82
- if not task_id or question_text is None:
83
- print(f"Skipping item with missing task_id or question: {item}")
84
- continue
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
  try:
86
- submitted_answer = agent(question_text)
87
- answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
88
- results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
 
89
  except Exception as e:
90
- print(f"Error running agent on task {task_id}: {e}")
91
- results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
 
 
 
 
 
 
 
 
 
 
 
 
 
92
 
93
  if not answers_payload:
94
- print("Agent did not produce any answers to submit.")
95
  return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
96
 
97
- # 4. Prepare Submission
98
- submission_data = {"username": username.strip(), "agent_code": agent_code, "answers": answers_payload}
99
- status_update = f"Agent finished. Submitting {len(answers_payload)} answers for user '{username}'..."
100
- print(status_update)
 
 
 
101
 
102
- # 5. Submit
103
- print(f"Submitting {len(answers_payload)} answers to: {submit_url}")
104
  try:
105
- response = requests.post(submit_url, json=submission_data, timeout=60)
106
  response.raise_for_status()
107
- result_data = response.json()
108
  final_status = (
109
  f"Submission Successful!\n"
110
- f"User: {result_data.get('username')}\n"
111
- f"Overall Score: {result_data.get('score', 'N/A')}% "
112
- f"({result_data.get('correct_count', '?')}/{result_data.get('total_attempted', '?')} correct)\n"
113
- f"Message: {result_data.get('message', 'No message received.')}"
114
  )
115
- print("Submission successful.")
116
- results_df = pd.DataFrame(results_log)
117
- return final_status, results_df
118
  except requests.exceptions.HTTPError as e:
119
- error_detail = f"Server responded with status {e.response.status_code}."
120
  try:
121
- error_json = e.response.json()
122
- error_detail += f" Detail: {error_json.get('detail', e.response.text)}"
123
- except requests.exceptions.JSONDecodeError:
124
- error_detail += f" Response: {e.response.text[:500]}"
125
  status_message = f"Submission Failed: {error_detail}"
126
- print(status_message)
127
- results_df = pd.DataFrame(results_log)
128
- return status_message, results_df
129
  except requests.exceptions.Timeout:
130
  status_message = "Submission Failed: The request timed out."
131
- print(status_message)
132
- results_df = pd.DataFrame(results_log)
133
- return status_message, results_df
134
  except requests.exceptions.RequestException as e:
135
  status_message = f"Submission Failed: Network error - {e}"
136
- print(status_message)
137
- results_df = pd.DataFrame(results_log)
138
- return status_message, results_df
139
  except Exception as e:
140
- status_message = f"An unexpected error occurred during submission: {e}"
141
- print(status_message)
142
- results_df = pd.DataFrame(results_log)
143
- return status_message, results_df
144
 
145
 
146
  # --- Build Gradio Interface using Blocks ---
 
1
  import os
2
+ import re
3
+ import base64
4
+ from enum import Enum
5
+ from pydantic import BaseModel
6
+ from io import BytesIO
7
+ from tempfile import SpooledTemporaryFile
8
+ from typing import Optional
9
+ import logging
10
  import gradio as gr
11
  import requests
12
  import pandas as pd
13
  from langchain_core.messages import HumanMessage
14
+ from concurrent.futures import ThreadPoolExecutor, as_completed
15
 
16
  from agent import gaia_agent
17
 
18
+ logging.basicConfig(level=logging.INFO)
19
+ logger = logging.getLogger(__name__)
20
+
21
+ class ContentType(Enum):
22
+ IMAGE = "image"
23
+ PDF = "pdf"
24
+ AUDIO = "audio"
25
+ TEXT = "text"
26
+
27
  # (Keep Constants as is)
28
  # --- Constants ---
29
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
30
+ MAX_WORKERS = 8
31
+
32
+ class LLMFile(BaseModel):
33
+ filename: str
34
+ file: bytes
35
+ mime: str
36
+ content_type: ContentType
37
 
38
  # --- Basic Agent Definition ---
39
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
40
  class BasicAgent:
41
  def __init__(self):
42
  print("BasicAgent initialized.")
43
+ def __call__(self, question: str, content: Optional[LLMFile]) -> str:
44
  print(f"Agent received question (first 50 chars): {question[:50]}...")
45
+
46
+ message_content = [{"type": "text", "text": question}]
47
+
48
+ if content:
49
+ if content.content_type == ContentType.AUDIO:
50
+ media = {
51
+ "type": "input_audio",
52
+ "input_audio": {"data": base64.b64encode(content.file).encode("ascii"), "format": "wav"}
53
+ }
54
+ elif content.content_type == ContentType.IMAGE:
55
+ media = {
56
+ "type": "image",
57
+ "image_url": {"url": f"data:image/jpeg;base64,{base64.b64encode(content.file).encode("ascii")}"}
58
+ }
59
+ elif content.content_type == ContentType.PDF:
60
+ media = {
61
+ "type": "file",
62
+ "file": {
63
+ "filename": content.filename,
64
+ "file_data": f"data:application/pdf;base64,{base64.b64encode(content.file).encode("ascii")}",
65
+ }
66
+ }
67
+
68
+ message_content.append(media)
69
+
70
+ messages = gaia_agent.invoke({"messages": [
71
+ HumanMessage(content=message_content)
72
+ ]})
73
+ message = messages['messages'][-1].content
74
+
75
+ match = re.search(r'FINAL ANSWER:\s*(.*)', message)
76
+ if match:
77
+ answer = match.group(1)
78
+ else:
79
+ answer = "ERROR"
80
+
81
  print(f"Agent returning answer: {answer}")
82
  return answer
83
 
84
+
85
+ def run_and_submit_all(profile: Optional[gr.OAuthProfile]):
86
+ if not profile:
87
+ logger.warning("User not logged in.")
 
 
 
 
 
 
 
 
 
88
  return "Please Login to Hugging Face with the button.", None
89
 
90
+ username = profile.username.strip()
91
+ logger.info(f"User logged in: {username}")
 
92
 
93
+ session = requests.Session()
94
+ space_id = os.getenv("SPACE_ID")
 
 
 
 
 
95
  agent_code = f"https://huggingface.co/spaces/{space_id}/tree/main"
 
96
 
97
+ # --- Fetch questions ---
98
+ questions_url = f"{DEFAULT_API_URL}/questions"
99
  try:
100
+ response = session.get(questions_url, timeout=15)
101
  response.raise_for_status()
102
  questions_data = response.json()
103
  if not questions_data:
104
+ raise ValueError("Fetched questions list is empty or invalid.")
105
+ logger.info(f"Fetched {len(questions_data)} questions.")
106
+ except Exception as e:
107
+ logger.exception("Error fetching questions.")
 
108
  return f"Error fetching questions: {e}", None
109
+
110
+
111
+ # --- Instantiate agent ---
112
+ try:
113
+ agent = BasicAgent()
114
  except Exception as e:
115
+ logger.exception("Error initializing agent.")
116
+ return f"Error initializing agent: {e}", None
117
 
118
+ # --- Run agent in parallel ---
119
+ def process_question(item):
 
 
 
120
  task_id = item.get("task_id")
121
+ question = item.get("question")
122
+ if not task_id or question is None:
123
+ return None, {"Task ID": task_id, "Question": question, "Submitted Answer": "INVALID QUESTION FORMAT"}
124
+
125
+ if item.get("filename", None):
126
+ # --- Fetch file ---
127
+ file_url = f"{DEFAULT_API_URL}/files/{task_id}"
128
+ try:
129
+ response = session.get(file_url, timeout=15)
130
+ response.raise_for_status()
131
+
132
+ content_disposition = response.headers.get("content-disposition", "")
133
+ filename = task_id + ".bin"
134
+ if "filename=" in content_disposition:
135
+ filename = content_disposition.split("filename=")[-1].strip('"')
136
+
137
+ mime = response.headers.get("content-type", "")
138
+
139
+ if mime.startswith("audio/"):
140
+ media = LLMFile(filename=filename, mime=mime, content_type=ContentType.AUDIO, file=response.content)
141
+ elif mime.startswith("image/"):
142
+ media = LLMFile(filename=filename, mime=mime, content_type=ContentType.IMAGE, file=response.content)
143
+ elif mime.startswith("image/"):
144
+ media = LLMFile(filename=filename, mime=mime, content_type=ContentType.IMAGE, file=response.content)
145
+ elif mime.startswith("text/"):
146
+ media = LLMFile(filename=filename, mime=mime, content_type=ContentType.TEXT, file=response.content)
147
+
148
+ except Exception as e:
149
+ logger.exception("Error fetching file for task id %s.", str(task_id))
150
+ return f"Error fetching file for task id ({task_id}): {e}", None
151
+
152
+
153
  try:
154
+ answer = agent(question, media if item.get("filename", None) else None)
155
+ return {"task_id": task_id, "submitted_answer": answer}, {
156
+ "Task ID": task_id, "Question": question, "Submitted Answer": answer
157
+ }
158
  except Exception as e:
159
+ logger.warning(f"Agent error on task {task_id}: {e}")
160
+ return None, {
161
+ "Task ID": task_id, "Question": question, "Submitted Answer": f"AGENT ERROR: {e}"
162
+ }
163
+
164
+ answers_payload = []
165
+ results_log = []
166
+
167
+ with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
168
+ futures = [executor.submit(process_question, item) for item in questions_data]
169
+ for future in as_completed(futures):
170
+ answer, log = future.result()
171
+ if answer:
172
+ answers_payload.append(answer)
173
+ results_log.append(log)
174
 
175
  if not answers_payload:
 
176
  return "Agent did not produce any answers to submit.", pd.DataFrame(results_log)
177
 
178
+ # --- Submit answers ---
179
+ submit_url = f"{DEFAULT_API_URL}/submit"
180
+ submission_data = {
181
+ "username": username,
182
+ "agent_code": agent_code,
183
+ "answers": answers_payload,
184
+ }
185
 
186
+ logger.info(f"Submitting {len(answers_payload)} answers to: {submit_url}")
 
187
  try:
188
+ response = session.post(submit_url, json=submission_data, timeout=60)
189
  response.raise_for_status()
190
+ result = response.json()
191
  final_status = (
192
  f"Submission Successful!\n"
193
+ f"User: {result.get('username')}\n"
194
+ f"Overall Score: {result.get('score', 'N/A')}% "
195
+ f"({result.get('correct_count', '?')}/{result.get('total_attempted', '?')} correct)\n"
196
+ f"Message: {result.get('message', 'No message received.')}"
197
  )
198
+ return final_status, pd.DataFrame(results_log)
199
+
 
200
  except requests.exceptions.HTTPError as e:
 
201
  try:
202
+ error_detail = e.response.json().get("detail", e.response.text)
203
+ except Exception:
204
+ error_detail = e.response.text[:500]
 
205
  status_message = f"Submission Failed: {error_detail}"
 
 
 
206
  except requests.exceptions.Timeout:
207
  status_message = "Submission Failed: The request timed out."
 
 
 
208
  except requests.exceptions.RequestException as e:
209
  status_message = f"Submission Failed: Network error - {e}"
 
 
 
210
  except Exception as e:
211
+ status_message = f"Unexpected error during submission: {e}"
212
+
213
+ logger.error(status_message)
214
+ return status_message, pd.DataFrame(results_log)
215
 
216
 
217
  # --- Build Gradio Interface using Blocks ---
pyproject.toml CHANGED
@@ -14,4 +14,5 @@ dependencies = [
14
  "pandas>=2.3.0",
15
  "gradio[oauth]>=5.35.0",
16
  "duckduckgo-search>=8.0.4",
 
17
  ]
 
14
  "pandas>=2.3.0",
15
  "gradio[oauth]>=5.35.0",
16
  "duckduckgo-search>=8.0.4",
17
+ "wikipedia>=1.4.0",
18
  ]
tools.py CHANGED
@@ -1,4 +1,6 @@
1
  from langchain.tools import tool
 
 
2
 
3
  @tool
4
  def multiply(a: float, b: float) -> float:
@@ -43,4 +45,19 @@ def divide(a: float, b: float) -> float:
43
  :return: Le résultat de la division de a par b.
44
  :raises ZeroDivisionError: Si b est égal à zéro.
45
  """
46
- return a / b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  from langchain.tools import tool
2
+ from langchain_community.document_loaders import WikipediaLoader
3
+
4
 
5
  @tool
6
  def multiply(a: float, b: float) -> float:
 
45
  :return: Le résultat de la division de a par b.
46
  :raises ZeroDivisionError: Si b est égal à zéro.
47
  """
48
+ return a / b
49
+
50
+
51
+ @tool
52
+ def wiki_search(query: str) -> str:
53
+ """Search Wikipedia for a query and return maximum 2 results.
54
+
55
+ Args:
56
+ query: The search query."""
57
+ search_docs = WikipediaLoader(query=query, load_max_docs=2).load()
58
+ formatted_search_docs = "\n\n---\n\n".join(
59
+ [
60
+ f'<Document source="{doc.metadata["source"]}" page="{doc.metadata.get("page", "")}"/>\n{doc.page_content}\n</Document>'
61
+ for doc in search_docs
62
+ ])
63
+ return {"wiki_results": formatted_search_docs}
uv.lock CHANGED
@@ -170,6 +170,19 @@ wheels = [
170
  { url = "https://files.pythonhosted.org/packages/84/29/587c189bbab1ccc8c86a03a5d0e13873df916380ef1be461ebe6acebf48d/authlib-1.6.0-py2.py3-none-any.whl", hash = "sha256:91685589498f79e8655e8a8947431ad6288831d643f11c55c2143ffcc738048d", size = 239981 },
171
  ]
172
 
 
 
 
 
 
 
 
 
 
 
 
 
 
173
  [[package]]
174
  name = "certifi"
175
  version = "2025.6.15"
@@ -385,6 +398,7 @@ dependencies = [
385
  { name = "langgraph" },
386
  { name = "pandas" },
387
  { name = "scrapingant-client" },
 
388
  ]
389
 
390
  [package.metadata]
@@ -398,6 +412,7 @@ requires-dist = [
398
  { name = "langgraph", specifier = ">=0.5.0" },
399
  { name = "pandas", specifier = ">=2.3.0" },
400
  { name = "scrapingant-client", specifier = ">=2.1.0" },
 
401
  ]
402
 
403
  [[package]]
@@ -1725,6 +1740,15 @@ wheels = [
1725
  { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235 },
1726
  ]
1727
 
 
 
 
 
 
 
 
 
 
1728
  [[package]]
1729
  name = "sqlalchemy"
1730
  version = "2.0.41"
@@ -1956,6 +1980,16 @@ wheels = [
1956
  { url = "https://files.pythonhosted.org/packages/fa/a8/5b41e0da817d64113292ab1f8247140aac61cbf6cfd085d6a0fa77f4984f/websockets-15.0.1-py3-none-any.whl", hash = "sha256:f7a866fbc1e97b5c617ee4116daaa09b722101d4a3c170c787450ba409f9736f", size = 169743 },
1957
  ]
1958
 
 
 
 
 
 
 
 
 
 
 
1959
  [[package]]
1960
  name = "xxhash"
1961
  version = "3.5.0"
 
170
  { url = "https://files.pythonhosted.org/packages/84/29/587c189bbab1ccc8c86a03a5d0e13873df916380ef1be461ebe6acebf48d/authlib-1.6.0-py2.py3-none-any.whl", hash = "sha256:91685589498f79e8655e8a8947431ad6288831d643f11c55c2143ffcc738048d", size = 239981 },
171
  ]
172
 
173
+ [[package]]
174
+ name = "beautifulsoup4"
175
+ version = "4.13.4"
176
+ source = { registry = "https://pypi.org/simple" }
177
+ dependencies = [
178
+ { name = "soupsieve" },
179
+ { name = "typing-extensions" },
180
+ ]
181
+ sdist = { url = "https://files.pythonhosted.org/packages/d8/e4/0c4c39e18fd76d6a628d4dd8da40543d136ce2d1752bd6eeeab0791f4d6b/beautifulsoup4-4.13.4.tar.gz", hash = "sha256:dbb3c4e1ceae6aefebdaf2423247260cd062430a410e38c66f2baa50a8437195", size = 621067 }
182
+ wheels = [
183
+ { url = "https://files.pythonhosted.org/packages/50/cd/30110dc0ffcf3b131156077b90e9f60ed75711223f306da4db08eff8403b/beautifulsoup4-4.13.4-py3-none-any.whl", hash = "sha256:9bbbb14bfde9d79f38b8cd5f8c7c85f4b8f2523190ebed90e950a8dea4cb1c4b", size = 187285 },
184
+ ]
185
+
186
  [[package]]
187
  name = "certifi"
188
  version = "2025.6.15"
 
398
  { name = "langgraph" },
399
  { name = "pandas" },
400
  { name = "scrapingant-client" },
401
+ { name = "wikipedia" },
402
  ]
403
 
404
  [package.metadata]
 
412
  { name = "langgraph", specifier = ">=0.5.0" },
413
  { name = "pandas", specifier = ">=2.3.0" },
414
  { name = "scrapingant-client", specifier = ">=2.1.0" },
415
+ { name = "wikipedia", specifier = ">=1.4.0" },
416
  ]
417
 
418
  [[package]]
 
1740
  { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235 },
1741
  ]
1742
 
1743
+ [[package]]
1744
+ name = "soupsieve"
1745
+ version = "2.7"
1746
+ source = { registry = "https://pypi.org/simple" }
1747
+ sdist = { url = "https://files.pythonhosted.org/packages/3f/f4/4a80cd6ef364b2e8b65b15816a843c0980f7a5a2b4dc701fc574952aa19f/soupsieve-2.7.tar.gz", hash = "sha256:ad282f9b6926286d2ead4750552c8a6142bc4c783fd66b0293547c8fe6ae126a", size = 103418 }
1748
+ wheels = [
1749
+ { url = "https://files.pythonhosted.org/packages/e7/9c/0e6afc12c269578be5c0c1c9f4b49a8d32770a080260c333ac04cc1c832d/soupsieve-2.7-py3-none-any.whl", hash = "sha256:6e60cc5c1ffaf1cebcc12e8188320b72071e922c2e897f737cadce79ad5d30c4", size = 36677 },
1750
+ ]
1751
+
1752
  [[package]]
1753
  name = "sqlalchemy"
1754
  version = "2.0.41"
 
1980
  { url = "https://files.pythonhosted.org/packages/fa/a8/5b41e0da817d64113292ab1f8247140aac61cbf6cfd085d6a0fa77f4984f/websockets-15.0.1-py3-none-any.whl", hash = "sha256:f7a866fbc1e97b5c617ee4116daaa09b722101d4a3c170c787450ba409f9736f", size = 169743 },
1981
  ]
1982
 
1983
+ [[package]]
1984
+ name = "wikipedia"
1985
+ version = "1.4.0"
1986
+ source = { registry = "https://pypi.org/simple" }
1987
+ dependencies = [
1988
+ { name = "beautifulsoup4" },
1989
+ { name = "requests" },
1990
+ ]
1991
+ sdist = { url = "https://files.pythonhosted.org/packages/67/35/25e68fbc99e672127cc6fbb14b8ec1ba3dfef035bf1e4c90f78f24a80b7d/wikipedia-1.4.0.tar.gz", hash = "sha256:db0fad1829fdd441b1852306e9856398204dc0786d2996dd2e0c8bb8e26133b2", size = 27748 }
1992
+
1993
  [[package]]
1994
  name = "xxhash"
1995
  version = "3.5.0"