jarguello76 commited on
Commit
e1a463d
·
verified ·
1 Parent(s): 69759af

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -4
app.py CHANGED
@@ -26,7 +26,7 @@ class QuestionValidation:
26
  self.client = InferenceClient(model="HuggingFaceH4/zephyr-7b-beta", token=HF_TOKEN)
27
 
28
  self.llm2_model = "HuggingFaceH4/zephyr-7b-beta"
29
- self.embedding_model = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2")
30
 
31
  def guess_question(self, answer: str) -> str:
32
  prompt = f"This was the answer: {answer}\nWhat question would likely have led to it?"
@@ -53,13 +53,46 @@ def search_web(query: str, max_results: int = 5) -> List[Dict[str, str]]:
53
  except Exception as e:
54
  return [{"error": str(e)}]
55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  def evaluate_math_expression(expr: str) -> str:
57
  try:
58
- result = eval(expr, {"__builtins__": {}})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
  return str(result)
60
  except Exception as e:
61
  return f"Error evaluating expression: {e}"
62
 
 
63
  validator = QuestionValidation(hf_token=HF_TOKEN)
64
 
65
  validate_tool = FunctionTool.from_defaults(
@@ -93,7 +126,7 @@ llm = HuggingFaceLLM(
93
  generate_kwargs={"temperature": 0.7, "top_p": 0.95},
94
  tokenizer_name="HuggingFaceH4/zephyr-7b-beta",
95
  model_name="HuggingFaceH4/zephyr-7b-beta",
96
- # device="cpu", # or "cuda" if available
97
  )
98
 
99
  agent = ReActAgent.from_tools(
@@ -127,7 +160,7 @@ iface_loop = gr.Interface(
127
  fn=question_loop_agent,
128
  inputs=gr.Textbox(lines=2, placeholder="Ask me a question..."),
129
  outputs="text",
130
- title="🧠 Question Similarity Loop Agent",
131
  description="Loops until the guessed question has a similarity score > 0.6."
132
  )
133
 
 
26
  self.client = InferenceClient(model="HuggingFaceH4/zephyr-7b-beta", token=HF_TOKEN)
27
 
28
  self.llm2_model = "HuggingFaceH4/zephyr-7b-beta"
29
+ self.embedding_model = SentenceTransformer("sentence-transformers/paraphrase-MiniLM-L3-v2")
30
 
31
  def guess_question(self, answer: str) -> str:
32
  prompt = f"This was the answer: {answer}\nWhat question would likely have led to it?"
 
53
  except Exception as e:
54
  return [{"error": str(e)}]
55
 
56
+ import ast
57
+ import operator as op
58
+
59
+ OPERATORS = {
60
+ ast.Add: op.add,
61
+ ast.Sub: op.sub,
62
+ ast.Mult: op.mul,
63
+ ast.Div: op.truediv,
64
+ ast.Mod: op.mod,
65
+ ast.Pow: op.pow,
66
+ ast.USub: op.neg,
67
+ ast.UAdd: op.pos,
68
+ ast.FloorDiv: op.floordiv,
69
+ }
70
+
71
  def evaluate_math_expression(expr: str) -> str:
72
  try:
73
+ node = ast.parse(expr, mode="eval")
74
+
75
+ def _eval(node):
76
+ if isinstance(node, ast.Expression):
77
+ return _eval(node.body)
78
+ elif isinstance(node, ast.Num):
79
+ return node.n
80
+ elif isinstance(node, ast.Constant):
81
+ if isinstance(node.value, (int, float)):
82
+ return node.value
83
+ elif isinstance(node, ast.BinOp):
84
+ return OPERATORS[type(node.op)](_eval(node.left), _eval(node.right))
85
+ elif isinstance(node, ast.UnaryOp):
86
+ return OPERATORS[type(node.op)](_eval(node.operand))
87
+ else:
88
+ raise ValueError(f"Unsupported expression: {ast.dump(node)}")
89
+
90
+ result = _eval(node)
91
  return str(result)
92
  except Exception as e:
93
  return f"Error evaluating expression: {e}"
94
 
95
+
96
  validator = QuestionValidation(hf_token=HF_TOKEN)
97
 
98
  validate_tool = FunctionTool.from_defaults(
 
126
  generate_kwargs={"temperature": 0.7, "top_p": 0.95},
127
  tokenizer_name="HuggingFaceH4/zephyr-7b-beta",
128
  model_name="HuggingFaceH4/zephyr-7b-beta",
129
+
130
  )
131
 
132
  agent = ReActAgent.from_tools(
 
160
  fn=question_loop_agent,
161
  inputs=gr.Textbox(lines=2, placeholder="Ask me a question..."),
162
  outputs="text",
163
+ title="Question Similarity Loop Agent",
164
  description="Loops until the guessed question has a similarity score > 0.6."
165
  )
166