Kaynağa Gözat

make model a parameter

david 11 ay önce
ebeveyn
işleme
0e4f69e2a7
1 değiştirilmiş dosya ile 12 ekleme ve 12 silme
  1. 12 12
      main.py

+ 12 - 12
main.py

@@ -42,8 +42,7 @@ def extract_text_from_links(links, timeout=5):
         'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'
     }
     
-    for link in links:
-        print("downloading text from: " + link)
+    for link in links:        
         try:
             response = requests.get(link, headers=headers, timeout=timeout)
             if response.status_code == 200:
@@ -59,13 +58,13 @@ def extract_text_from_links(links, timeout=5):
     return extracted_texts
 
 
-def summarize_individual_texts(texts_and_urls, query, ollama_url="http://localhost:11434/api/generate"):
+def summarize_individual_texts(texts_and_urls, query, model, ollama_url="http://localhost:11434/api/generate"):
     summaries = []
     for url, text in texts_and_urls:
         prompt = f"Extract the relevant information from the following text with regards to the original \
         query: '{query}'\n\n{text}\n"
         payload = {
-            "model": "command-r",
+            "model": model,
             "prompt": prompt,
             "stream": False,
             "max_tokens": 1000
@@ -84,7 +83,7 @@ def summarize_individual_texts(texts_and_urls, query, ollama_url="http://localho
     return summaries
 
 
-def summarize_with_ollama(texts_and_urls, query, ollama_url="http://localhost:11434/api/generate"):
+def summarize_with_ollama(texts_and_urls, query, model, ollama_url="http://localhost:11434/api/generate"):
     # Prepare the context and prompt
     context = "\n".join([f"URL: {url}\nText: {text}" for url, text in texts_and_urls])
     prompt = f"Summarize the following search results with regards to the original query: '{query}' \
@@ -93,7 +92,7 @@ def summarize_with_ollama(texts_and_urls, query, ollama_url="http://localhost:11
     
     # Create the payload for the POST request
     payload = {        
-        "model": "command-r",
+        "model": model,
         "prompt": prompt,
         "stream": False,
         "max_tokens": 1500
@@ -114,14 +113,14 @@ def summarize_with_ollama(texts_and_urls, query, ollama_url="http://localhost:11
         return None
 
 
-def optimize_search_query(query, ollama_url="http://localhost:11434/api/generate"):
+def optimize_search_query(query, model, ollama_url="http://localhost:11434/api/generate"):
     # Prepare the prompt for optimizing the search query
     prompt = f"Optimize the following natural language query to improve its effectiveness in a web search.\
         Make it very concise. query: '{query}'"
     
     # Create the payload for the POST request
     payload = {        
-        "model": "command-r",
+        "model": model,
         "prompt": prompt,
         "stream": False,
         "max_tokens": 50
@@ -158,8 +157,10 @@ if __name__ == "__main__":
     args = parser.parse_args()
     
     original_query = args.query
+    model = "qc"
+
     # Optimize the search query
-    optimized_query = optimize_search_query(original_query)
+    optimized_query = optimize_search_query(original_query, model)
     print(f"Original Query: {original_query}")
     print(f"Optimized Query: {optimized_query}")
 
@@ -173,9 +174,8 @@ if __name__ == "__main__":
     texts_and_urls = extract_text_from_links(links)
     
     print("Summarizing individual search results")
-    intermediate_summaries = summarize_individual_texts(texts_and_urls, original_query)
-    
-    final_summary = summarize_with_ollama(intermediate_summaries, original_query)
+    intermediate_summaries = summarize_individual_texts(texts_and_urls, original_query, model)
+    final_summary = summarize_with_ollama(intermediate_summaries, original_query, model)
     
     if final_summary:
         print("\nFinal Summary of search results:\n")