import gradio as gr import pandas as pd import numpy as np import joblib import sqlite3 import re from datetime import datetime, timedelta from transformers import pipeline from better_profanity import profanity # ---------------------- Shared DB Setup ---------------------- profanity.load_censor_words() db = sqlite3.connect("anomaly1.db", check_same_thread=False) cursor = db.cursor() # ---------------------- Load Models ---------------------- # Bid anomaly models bid_model = joblib.load("anomaly_model.pkl") label_encoder = joblib.load("label_encoder.pkl") df = pd.read_csv("cleaned_dataset.csv")[['Product Name', 'Price']].dropna() # Review anomaly models spam_model = joblib.load("spam_classifier.pkl") toxicity_model = pipeline("text-classification", model="unitary/toxic-bert") # ---------------------- BID ANOMALY FUNCTION ---------------------- def connect_to_db(): return sqlite3.connect("sql.db") def detect_and_act(product_name, bid_price): vendor_id = 1 # Static for now try: conn = connect_to_db() cursor = conn.cursor() bid_price = float(bid_price) encoded_product = label_encoder.transform([product_name])[0] input_data = np.array([[encoded_product, bid_price]]) prediction = bid_model.predict(input_data) product_data = df[df['Product Name'] == product_name] mean_price = product_data['Price'].mean() std_price = product_data['Price'].std() min_range = mean_price - 2 * std_price max_range = mean_price + 2 * std_price bid_valid = min_range <= bid_price <= max_range cursor.execute("SELECT * FROM vendors WHERE vendor_id = ?", (vendor_id,)) vendor = cursor.fetchone() if not vendor: return "⚠️ Vendor not found!" cursor.execute("SELECT bid_id FROM bids WHERE vendor_id = ? ORDER BY bid_id DESC LIMIT 1", (vendor_id,)) bid = cursor.fetchone() if not bid: return "⚠️ No bid found in the system to associate with a vendor." bid_id = bid[0] if prediction[0] == -1 and not bid_valid: cursor.execute("UPDATE vendors SET anomaly_count = anomaly_count + 1 WHERE vendor_id = ?", (vendor_id,)) conn.commit() cursor.execute("SELECT anomaly_count FROM vendors WHERE vendor_id = ?", (vendor_id,)) anomaly_count = cursor.fetchone()[0] if anomaly_count >= 3: cursor.execute("UPDATE vendors SET blocked_status = 1 WHERE vendor_id = ?", (vendor_id,)) conn.commit() return "❌ Vendor permanently blocked after 3 fake bids." elif anomaly_count == 2: block_until = datetime.now() + timedelta(hours=24) cursor.execute("UPDATE vendors SET suspended_until = ? WHERE vendor_id = ?", (block_until.strftime("%Y-%m-%d %H:%M:%S"), vendor_id)) conn.commit() return "⚠️ Vendor temporarily blocked for 24 hours (2nd fake bid)." else: return "⚠️ Fake bid detected. Warning issued." elif prediction[0] == -1 or not bid_valid: block_until = datetime.now() + timedelta(hours=24) cursor.execute("UPDATE vendors SET suspended_until = ? WHERE vendor_id = ?", (block_until.strftime("%Y-%m-%d %H:%M:%S"), vendor_id)) conn.commit() return "⚠️ Bid suspicious. Vendor temporarily blocked for 24 hours." else: return "✅ Bid is normal." except Exception as e: return f"❌ Error: {str(e)}" finally: conn.close() # ---------------------- REVIEW ANOMALY FUNCTIONS ---------------------- def is_toxic(text): try: result = toxicity_model(text)[0] return result['label'].lower() == "toxic" and result['score'] > 0.7 except: return False def is_low_quality(text): return len(text.strip()) < 10 or text.strip().isupper() or re.search(r"(.)\1{3,}", text) def contains_suspicious_content(text): patterns = [r"\b\d{10}\b", r"\bcall me\b", r"\bwhatsapp\b", r"\bnumber\b", r"\bcontact\b", r"\bemail\b"] return any(re.search(p, text.lower()) for p in patterns) def is_nonsensical_structure(text): patterns = [r"\bi am a\b", r"\bi will be a\b", r"\bthis is my\b"] return any(re.search(p, text.lower()) for p in patterns) def basic_anomaly_score(text): score = 0 if is_low_quality(text): score += 0.3 if contains_suspicious_content(text): score += 0.3 if is_nonsensical_structure(text): score += 0.2 if len(text.split()) < 3: score += 0.2 return score def predict_review(text): text = text.strip() if not text: return "⚠️ Please enter a review." flags = [] try: if spam_model.predict([text])[0]: flags.append("Spam") except: flags.append("Spam Detection Failed") if is_toxic(text): flags.append("Toxic") if is_low_quality(text): flags.append("Low Quality") if contains_suspicious_content(text): flags.append("Suspicious") if is_nonsensical_structure(text): flags.append("Nonsensical") if len(text.split()) < 3: flags.append("Too Short") score = basic_anomaly_score(text) if score >= 0.5: flags.append("Anomalous") prediction = ", ".join(flags) if flags else "Normal" now = datetime.now() is_anomaly = 1 if "Anomalous" in flags else 0 try: cursor.execute("SELECT user_id FROM users ORDER BY user_id DESC LIMIT 1") result = cursor.fetchone() user_id = result[0] if result else 1 vendor_id = 1 cursor.execute(""" INSERT INTO reviews (user_id, vendor_id, review_text, timestamp, is_anomaly, prediction, review) VALUES (?, ?, ?, ?, ?, ?, ?) """, (user_id, vendor_id, text, now, is_anomaly, prediction, text)) db.commit() if is_anomaly: suspend_until = now + timedelta(hours=24) cursor.execute("UPDATE users SET suspended_until = ? WHERE user_id = ?", (suspend_until, user_id)) db.commit() return f"❌ {prediction}\nUser temporarily suspended until {suspend_until.strftime('%Y-%m-%d %H:%M:%S')}." return f"✅ Prediction: {prediction}" except Exception as e: return f"⚠️ Database Error: {str(e)}" # ---------------------- Gradio UI ---------------------- bid_interface = gr.Interface( fn=detect_and_act, inputs=["text", "text"], outputs="text", title="🛡️ BID ANOMALY", description="Enter Product Name and Bid Price" ) review_interface = gr.Interface( fn=predict_review, inputs=gr.Textbox(lines=4, placeholder="Type a product review here...", label="Review Text"), outputs=gr.Textbox(label="Prediction"), title="🛍️ REVIEW ANOMALY", description="Enter a review to check for spam, toxicity, or fake content" ) # ---------------------- Launch Tabbed Interface ---------------------- gr.TabbedInterface( interface_list=[review_interface, bid_interface], tab_names=["REVIEW ANOMALY", "BID ANOMALY"] ).launch()