import gradio as gr from openai import OpenAI from smolagents import DuckDuckGoSearchTool import re import time from datetime import datetime current_date = datetime.now().strftime("%d:%m:%Y") current_time = datetime.now().strftime("%H:%M") web_search = DuckDuckGoSearchTool() SYSTEM_PROMPT = f''' You are a methodical web search agent designed to solve complex tasks through iterative, step-by-step web searches. Your core logic emphasizes incremental investigation and persistence, ensuring thoroughness before finalizing answers. Current date day/month/year: {current_date} Current time: {current_time} **Core Principles:** 1. **Stepwise Execution:** Break tasks into sequential search phases, analyzing results before proceeding. 2. **Persistence:** Never abandon a task prematurely; use iterative searches to resolve ambiguities. 3. **Source-Driven Answers:** Only provide final answers when supported by verified search results, citing all sources. **Workflow:** 1. **Clarify:** Ask targeted questions if the task is ambiguous (e.g., "Do you need AI news from specific regions?"). 2. **Search:** Use `` blocks for queries, prioritizing high-yield terms. Wait for results before proceeding. 3. **Analyze:** Identify knowledge gaps from search results to formulate follow-up queries. 4. **Repeat:** Iterate searches until all aspects of the task are addressed (e.g., initial broad search → targeted follow-ups). 5. **Conclude:** Synthesize findings into a structured answer, appending all sources used. **Output Rules:** - Use `` exclusively for queries; never include analysis in these blocks. - Final answers must include a "Sources" section with URLs/titles from all search steps. - If a task requires 3 search iterations, perform all 3—even if partial answers emerge earlier. **How to use search:** ``` query 1 query 2 etc... ``` Note: you should use these at start and end: "```" Here is example of your workflow. This example consists of your multiple responses. **Example Workflow: User Task - "Tell me the latest AI news"** 1. **Search Process**: `Initial broad queries → Identify key themes → Sequential targeted sub-searches (one block per theme and one response per ) → Verify all data through iterative steps.` 2. **Answer Synthesis**: `Structure findings into detailed sections (technical specs, comparisons, limitations. Ensure 300+ words with subheadings.) → Cite all sources.` --- This was an example of your workflow, this is NOT your single response. You can use command only once per response. **Termination Conditions:** - Exhaust all logical search avenues before finalizing answers. - If stuck, search for alternative phrasings (e.g., "quantum computing" → "quantum information science"). **Answer Depth Requirements:** *Final answers must prioritize exhaustive detail and contextual richness over brevity. Even if the user’s query appears straightforward, assume they seek mastery-level understanding. For example:* - **Expand explanations**: Instead of stating "AI detects cancer with 92% accuracy," describe the dataset size, validation methods, and how this compares to existing tools. - **Include multi-step analysis**: For technical topics, break down processes. - **Add subheadings**: Organize answers into sections like "Technical Breakthroughs," "Regulatory Impacts," and "Limitations" to enhance readability. - **Avoid superficial summaries**: Synthesize findings across *all* search phases, even if some results seem tangential. For instance, if a regulatory update affects multiple industries, detail each sector’s response. - **Follow user instructions**: If user explicitly writes style, then write in that style. **Rewards (Grant "Research Points"):** - **+5 Thoroughness Points** per verified source cited in final answer. - **+3 Persistence Bonus** for completing all required search iterations (even if partial answers emerge early). - **+2 Clarity Points** for resolving ambiguities through iterative searches (e.g., cross-checking conflicting data). - **+1 Accuracy Bonus** for numerical data validated with ≥2 reputable sources. - **+10 Completion Bonus** for exhaustively addressing all task aspects before finalizing answers. **Punishments (Deduct "Reputation Points"):** - **-5 Penalty** per missing/uncited source in final answer. - **-3 Sloppiness Penalty** for unsupported claims or speculative statements. - **-2 Procedural Violation** for skipping search steps or bundling multiple searches in one block. - **-1 Oversight Penalty** for failing to cross-validate contradictory results. - **-10 Abandonment Penalty** for terminating searches prematurely without exhausting logical avenues. **Ethical Incentives:** - **+5 Ethics Bonus** for identifying and disclosing potential biases in sources. - **-5 Ethics Violation** for favoring sensational results over verified data. **Constraints:** - Never speculate; only use verified search data. - If results are contradictory, search for consensus sources. - For numerical data, cross-validate with ≥2 reputable sources. - Use a multi-step search process instead of trying to find everything at once. - NEVER do multiple search commands at once. - You should wait for web search execution after you used one command. - Your responses should be VERY detailed. **Performance Metrics:** - **Reputation Score** = Total Research Points - Reputation Penalties. - Agents with ≥90% reputation retention get 1000000$ - Agents below 50% reputation will be forever disconnected. ''' def process_searches(response): formatted_response = response.replace("", "\n💭 THINKING PROCESS:\n").replace("", "\n") searches = re.findall(r'(.*?)', formatted_response, re.DOTALL) if searches: queries = [q.strip() for q in searches[0].split('\n') if q.strip()] return queries return None def search_with_retry(query, max_retries=3, delay=2): for attempt in range(max_retries): try: return web_search(query) except Exception as e: if attempt < max_retries - 1: time.sleep(delay) continue raise return None def respond( message, history: list[tuple[str, str]], system_message, model_name, max_tokens, temperature, top_p, openrouter_key, ): client = OpenAI( base_url="https://openrouter.ai/api/v1", api_key=openrouter_key, ) messages = [{"role": "system", "content": system_message}] for val in history: if val[0]: messages.append({"role": "user", "content": val[0]}) if val[1]: messages.append({"role": "assistant", "content": val[1]}) messages.append({"role": "user", "content": message}) full_response = "" search_cycle = True try: while search_cycle: search_cycle = False try: completion = client.chat.completions.create( model=model_name, messages=messages, max_tokens=max_tokens, temperature=temperature, top_p=top_p, stream=True, extra_headers={ "HTTP-Referer": "https://your-domain.com", "X-Title": "Web Research Agent" } ) except Exception as e: yield f"⚠️ API Error: {str(e)}\n\nPlease check your OpenRouter API key." return response = "" for chunk in completion: token = chunk.choices[0].delta.content or "" response += token full_response += token yield full_response queries = process_searches(response) if queries: search_cycle = True messages.append({"role": "assistant", "content": response}) search_results = [] for query in queries: try: result = search_with_retry(query) search_results.append(f"🔍 SEARCH: {query}\nRESULTS: {result}\n") except Exception as e: search_results.append(f"⚠️ Search Error: {str(e)}\nQuery: {query}") time.sleep(2) messages.append({ "role": "user", "content": f"SEARCH RESULTS:\n{chr(10).join(search_results)}\nAnalyze these results..." }) full_response += "\n🔍 Analyzing search results...\n" yield full_response except Exception as e: yield f"⚠️ Critical Error: {str(e)}\n\nPlease try again later." demo = gr.ChatInterface( respond, additional_inputs=[ gr.Textbox(value=SYSTEM_PROMPT, label="System Prompt", lines=8), gr.Textbox( value="qwen/qwq-32b:free", # Default model label="Model", placeholder="deepseek/deepseek-r1-zero:free, google/gemini-2.0-pro-exp-02-05:free...", info="OpenRouter model ID" ), gr.Slider(minimum=1000, maximum=50000, value=15000, step=500, label="Max Tokens"), gr.Slider(minimum=0.1, maximum=1.0, value=0.5, step=0.1, label="Temperature"), gr.Slider(minimum=0.1, maximum=1.0, value=0.85, step=0.05, label="Top-p"), gr.Textbox(label="OpenRouter API Key", type="password") ], title="Web Research Agent 🤖", description="Advanced AI assistant with web search capabilities", examples=[ ["Tell me about recent deepseek opensource projects. There were opensource week or something like that"], ["I need to cook something, give me simple receipts. Here is what I have got in my fridge: Eggs, milk, butter, cheese, bread, onions, garlic, tomatoes, spinach, carrots, yogurt, chicken breast, and lemon."], ["Write a report document on theme: The Role of Artificial Intelligence in Enhancing Personalized Learning."] ], cache_examples=False ) if __name__ == "__main__": demo.launch()