|
|
@@ -78,6 +78,7 @@ def summarize_individual_texts(texts_and_urls, query, model, api_url="http://loc
|
|
|
if response.status_code == 200:
|
|
|
#result = json.loads(response.text)["response"]
|
|
|
result_json = json.loads(response.text)
|
|
|
+ print(result_json)
|
|
|
result = result_json["choices"][0]["text"].strip()
|
|
|
summaries.append((url, result))
|
|
|
else:
|
|
|
@@ -100,7 +101,7 @@ def summarize(texts_and_urls, query, model, api_url="http://localhost:8000/api/v
|
|
|
"model": model,
|
|
|
"prompt": prompt,
|
|
|
"stream": False,
|
|
|
- "max_tokens": 1500,
|
|
|
+ # "max_tokens": 1500,
|
|
|
"options": {
|
|
|
"num_ctx": 16384
|
|
|
}
|
|
|
@@ -168,6 +169,11 @@ if __name__ == "__main__":
|
|
|
|
|
|
original_query = args.query
|
|
|
query_model = "Gemma-3-4b-it-GGUF"
|
|
|
+ #query_model = "Qwen3-1.7B-GGUF"
|
|
|
+ #summary_model = "Qwen3-4B-Instruct-2507-GGUF"
|
|
|
+ summary_model = "Qwen3-1.7B-GGUF"
|
|
|
+ #final_summary_model = "Gemma-3-4b-it-GGUF"
|
|
|
+ final_summary_model = "Qwen3-1.7B-GGUF"
|
|
|
|
|
|
# Optimize the search query
|
|
|
optimized_query = optimize_search_query(original_query, query_model)
|
|
|
@@ -182,10 +188,10 @@ if __name__ == "__main__":
|
|
|
print(f"{i}. {link}")
|
|
|
|
|
|
texts_and_urls = extract_text_from_links(links)
|
|
|
- summary_model = "Gemma-3-4b-it-GGUF"
|
|
|
+
|
|
|
print("Summarizing individual search results")
|
|
|
intermediate_summaries = summarize_individual_texts(texts_and_urls, original_query, summary_model)
|
|
|
- final_summary = summarize(intermediate_summaries, original_query, summary_model)
|
|
|
+ final_summary = summarize(intermediate_summaries, original_query, final_summary_model)
|
|
|
|
|
|
if final_summary:
|
|
|
print("\nFinal Summary of search results:\n")
|