File size: 14,647 Bytes
17bd849
 
 
 
 
 
 
 
 
6459e79
 
 
17bd849
6459e79
 
 
17bd849
57a25d3
6459e79
 
17bd849
 
 
57a25d3
 
 
 
 
 
 
 
 
 
 
 
 
 
17bd849
 
 
 
 
 
 
 
6459e79
17bd849
 
 
 
 
 
 
 
 
57a25d3
17bd849
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57a25d3
17bd849
57a25d3
 
6459e79
 
 
 
 
57a25d3
6459e79
 
 
 
 
 
57a25d3
6459e79
57a25d3
6459e79
57a25d3
 
 
 
6459e79
 
17bd849
 
 
6459e79
17bd849
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57a25d3
17bd849
57a25d3
17bd849
57a25d3
17bd849
57a25d3
17bd849
57a25d3
17bd849
57a25d3
17bd849
57a25d3
17bd849
57a25d3
17bd849
57a25d3
 
17bd849
57a25d3
 
17bd849
57a25d3
 
 
17bd849
57a25d3
 
17bd849
57a25d3
17bd849
1a42de3
57a25d3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17bd849
 
 
1a42de3
17bd849
 
1a42de3
17bd849
 
 
 
 
57a25d3
 
17bd849
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46f1f7d
 
17bd849
46f1f7d
89facc8
 
17bd849
46f1f7d
 
 
17bd849
 
 
b9ef049
1a42de3
f808a83
46f1f7d
 
17bd849
943e3a5
6459e79
943e3a5
17bd849
943e3a5
 
 
 
57a25d3
1a42de3
943e3a5
57a25d3
6459e79
b54a1cb
17bd849
6459e79
943e3a5
 
 
17bd849
6459e79
943e3a5
 
 
 
57a25d3
943e3a5
17bd849
6459e79
1a42de3
6459e79
1a42de3
6459e79
17bd849
943e3a5
 
 
 
 
 
1a42de3
943e3a5
17bd849
fdd078a
 
13e66cb
fdd078a
 
 
1a42de3
 
 
 
 
17bd849
 
 
6459e79
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
import streamlit as st
import requests
import os
from typing import Literal, List
from tavily import TavilyClient
from pydantic import BaseModel
from ollama import chat
from dotenv import load_dotenv
import instructor
import logging
from together import Together
import json

# Set up basic logger
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)


ELEVEN_LABS_API_KEY = "sk_cc3fea7dcfd81744dcc51673fcd011e7315d4732bab408a7"
TAVILY_API_KEY = "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"

load_dotenv()

def call_llm(prompt):
    client = Together(api_key = "aa77adf5b5adaefe8fb3e4a5a1e9bb4937ba9d5d362e03de2521631ab9dab07f")
    response = client.chat.completions.create(
            model="meta-llama/Llama-3.3-70B-Instruct-Turbo",
            messages=[
            {
                "role": "user",
                "content": prompt
            }
            ]
        )
    response = response.choices[0].message.content

    return response

def fetch_from_web(query):
    tavily_client = TavilyClient(api_key=TAVILY_API_KEY)
    response = tavily_client.search(
        query,
        include_raw_content=True,
        max_results=10,
        topic="news",
        search_depth= "basic"
    )
    return {"sources": response['results']}

class Sentiment(BaseModel):
    summary: str
    reasoning: str
    topics: List[str]
    sentiment: Literal['positive', 'negative', 'neutral']

def analyze_sentiment(article):
    sentiment_prompt = f"""
                Analyze the following news article about a company:

                1. **Summary**: Provide a comprehensive summary of the article's key points.
                
                2. **Sentiment Analysis**: 
                - Classify the overall sentiment toward the company as: POSITIVE, NEGATIVE, or NEUTRAL
                - Support your classification with specific quotes, tone analysis, and factual evidence from the article
                - Explain your reasoning for this sentiment classification in 2 to 3 lines.
                
                3. **Key Topics**: 
                - Identify 3-5 main topics discussed in the article
                - Only give the name of the topics

                Be as detailed and objective as possible in your reasoning.

                Article Title: {article['title']}

                Article: {article['raw_content']}
                """

    try:
        client = Together(api_key = "aa77adf5b5adaefe8fb3e4a5a1e9bb4937ba9d5d362e03de2521631ab9dab07f")

        extract = client.chat.completions.create(
        messages=[
                    {
                        "role": "user",
                        "content": sentiment_prompt,
                    },
                ],
                model="meta-llama/Llama-3.3-70B-Instruct-Turbo",
                response_format={
                    "type": "json_object",
                    "schema": Sentiment.model_json_schema(),
                },
            )

        output = json.loads(extract.choices[0].message.content)
           
        final_dict = {
                "title": article["title"],
                "summary": output.get("summary"),
                "reasoning": output.get("reasoning"),
                "topics": output.get("topics"),
                "sentiment": output.get("sentiment")
            }

        return final_dict

    except Exception as e:
        logger.error(f"Error parsing sentiment output: {e}")
        return None
    
def generate_comparative_sentiment(articles):
    sentiment_counts = {"Positive": 0, "Negative": 0, "Neutral": 0}
    
    for article in articles:
        sentiment = article.get("sentiment", "").lower()
        if sentiment == "positive":
            sentiment_counts["Positive"] += 1
        elif sentiment == "negative":
            sentiment_counts["Negative"] += 1
        elif sentiment == "neutral":
            sentiment_counts["Neutral"] += 1
    
    all_topics = []
    for article in articles:
        all_topics.extend(article.get("topics", []))
    
    unique_topics = set(all_topics)
    
    topic_counts = {}
    
    for topic in unique_topics:
        count = all_topics.count(topic)
        topic_counts[topic] = count

    common_topics = [topic for topic, count in topic_counts.items() if count > 1]
    unique_topics = {}
    
    for i, article in enumerate(articles):
        article_topics = set(article.get("topics", []))
        for j, other_article in enumerate(articles):
            if i != j:
                other_topics = set(other_article.get("topics", []))
                unique_topics[f"Unique Topics in Article {i+1}"] = list(article_topics - other_topics)
    
    comparative_sentiment = {
        "Sentiment Distribution": sentiment_counts,
        "Coverage Differences": "coverage_differences",
        "Topic Overlap": {
            "Common Topics": common_topics,
            "Unique Topics in Article 1": unique_topics.get("Unique Topics in Article 1", []),
            "Unique Topics in Article 2": unique_topics.get("Unique Topics in Article 2", []),
            "Unique Topics in Article 3": unique_topics.get("Unique Topics in Article 3", []),
            "Unique Topics in Article 4": unique_topics.get("Unique Topics in Article 4", []),
            "Unique Topics in Article 5": unique_topics.get("Unique Topics in Article 5", []),
            "Unique Topics in Article 6": unique_topics.get("Unique Topics in Article 6", []),
            "Unique Topics in Article 7": unique_topics.get("Unique Topics in Article 7", []),
            "Unique Topics in Article 8": unique_topics.get("Unique Topics in Article 8", []),
            "Unique Topics in Article 9": unique_topics.get("Unique Topics in Article 9", []),
            "Unique Topics in Article 10": unique_topics.get("Unique Topics in Article 10", [])
        },
    }
    
    return comparative_sentiment

def get_summaries_by_sentiment(articles):
    pos_sum = []
    neg_sum = []
    neutral_sum = []
    
    for article in articles:
        sentiment = article.get("sentiment", "").lower()
        title = article.get("title", "No Title")
        summary = article.get("summary", "No Summary")
        
        article_text = f'Title: {title}\nSummary: {summary}'
        
        if sentiment == "positive":
            pos_sum.append(article_text)
        elif sentiment == "negative":
            neg_sum.append(article_text)
        elif sentiment == "neutral":
            neutral_sum.append(article_text)

    pos_sum = "\n\n".join(pos_sum) if pos_sum else "No positive articles available."
    neg_sum = "\n\n".join(neg_sum) if neg_sum else "No negative articles available."
    neutral_sum = "\n\n".join(neutral_sum) if neutral_sum else "No neutral articles available."

    return pos_sum, neg_sum, neutral_sum

def comparative_analysis(pos_sum, neg_sum, neutral_sum):
    prompt = f"""
    Perform a detailed comparative analysis of the sentiment across three categories of articles (Positive, Negative, and Neutral) about a specific company. Address the following aspects:

    1. **Sentiment Breakdown**: Identify how each category (positive, negative, and neutral) portrays the company. Highlight the language, tone, and emotional cues that shape the sentiment.

    2. **Key Themes and Topics**: Compare the primary themes and narratives within each sentiment group. What aspects of the company's operations, performance, or reputation does each category focus on?

    3. **Perceived Company Image**: Analyze how each sentiment type influences public perception of the company. What impression is created by positive vs. negative vs. neutral coverage?

    4. **Bias and Framing**: Evaluate whether any of the articles reflect explicit biases or specific agendas regarding the company. Are there patterns in how the company is framed across different sentiments?

    5. **Market or Stakeholder Impact**: Discuss potential effects on stakeholders (e.g., investors, customers, regulators) based on the sentiment of each article type.

    6. **Comparative Insights**: Provide a concise summary of the major differences and commonalities between the three sentiment groups. What overall narrative emerges about the company?

    ### Positive Articles:
    {pos_sum}

    ### Negative Articles:
    {neg_sum}

    ### Neutral Articles:
    {neutral_sum}
    """

    output = call_llm(prompt)
    return output

def generate_final_report(pos_sum, neg_sum, neutral_sum, comparative_sentiment):
    final_report_prompt = f"""
    Corporate News Sentiment Analysis Report:

    ### 1. Executive Summary
    - Overview of sentiment distribution: {comparative_sentiment["Sentiment Distribution"]['Positive']} positive, {comparative_sentiment["Sentiment Distribution"]['Negative']} negative, {comparative_sentiment["Sentiment Distribution"]['Neutral']} neutral.
    - Highlight the dominant narrative shaping the company's perception.
    - Summarize key drivers behind positive and negative sentiments.

    ### 2. Media Coverage Analysis
    - Identify major news sources covering the company.
    - Highlight patterns in coverage across platforms (e.g., frequency, timing).
    - Identify whether media sentiment shifts over time.

    ### 3. Sentiment Breakdown
    - **Positive Sentiment:**
        * Titles and sources: {pos_sum}
        * Key themes, notable quotes, and focal areas (e.g., product, leadership).
    - **Negative Sentiment:**
        * Titles and sources: {neg_sum}
        * Key themes, notable quotes, and areas of concern.
    - **Neutral Sentiment:**
        * Titles and sources: {neutral_sum}
        * Key themes and neutral narratives.

    ### 4. Narrative Analysis
    - Identify primary storylines about the company.
    - Analyze how the company is positioned (positive, neutral, negative).
    - Detect shifts or emerging narratives over time.

    ### 5. Key Drivers of Sentiment
    - Identify specific events, announcements, or actions driving media sentiment.
    - Evaluate sentiment linked to industry trends vs. company-specific factors.
    - Highlight company strengths and weaknesses based on media portrayal.

    ### 6. Competitive Context
    - Identify competitor comparisons.
    - Analyze how media sentiment about the company compares to industry standards.
    - Highlight competitive advantages or concerns raised by the media.

    ### 7. Stakeholder Perspective
    - Identify how key stakeholders (e.g., investors, customers, regulators) are represented.
    - Analyze stakeholder concerns and reputation risks/opportunities.

    ### 8. Recommendations
    - Suggest strategies to mitigate negative sentiment.
    - Recommend approaches to amplify positive narratives.
    - Provide messaging suggestions for future announcements.

    ### 9. Appendix
    - Full article details (title, publication, date, author, URL).
    - Media monitoring metrics (reach, engagement, etc.).
    """

    response = call_llm(final_report_prompt)
    
    return response
    
def translate(report, target_language):

    translation_prompt = f"""
    Translate the following corporate sentiment analysis report into {target_language}:

    {report}

    Ensure the translation maintains professional tone and structure while accurately conveying key insights and details.
    """
    translation = call_llm(translation_prompt)
    return translation

def text_to_speech(text):
    url = "https://api.elevenlabs.io/v1/text-to-speech/JBFqnCBsd6RMkjVDRZzb?output_format=mp3_44100_128"

    model_id = "eleven_multilingual_v2"
    output_file = "output.mp3"
    api_key = "sk_a927222500aab9665f83f078b92e833e7ec1389ee68238c0"
    
    headers = {
        "xi-api-key": api_key,
        "Content-Type": "application/json"
    }

    payload = {
        "text": text,
        "model_id": model_id
    }

    response = requests.post(url, headers=headers, json=payload)
    
    
    if response.status_code == 200:
 
        return response.content
    
    else:
        print(f"❌ Error: {response.status_code} - {response.text}")
        return None
        

st.title("Company Sentiment Analyzer")

company_name = st.text_input("Enter Company Name")
target_language = st.text_input("Enter Target Language for Translation")

# Save your file with the correct path

if st.button("Fetch Sentiment Data"):
    try:
        logger.info(f"Fetching web results for {company_name}")
        web_results = fetch_from_web(company_name)

        if "sources" not in web_results:
            st.error("No sources found.")
        else:
            sentiment_output = [
                analyze_sentiment(article)
                for article in web_results["sources"]
            ]
            sentiment_output = [s for s in sentiment_output if s is not None]
            logger.info(f"Generating comparative sentiment")
            comparative_sentiment = generate_comparative_sentiment(sentiment_output)

            logger.info(f"Summarizing report by sentiment")
            positive_summary, negative_summary, neutral_summary = get_summaries_by_sentiment(
                sentiment_output
            )

            logger.info(f"Generating final summary")
            final_report = generate_final_report(
                positive_summary,
                negative_summary,
                neutral_summary,
                comparative_sentiment   
            )

            logger.info(f"Translating Report")
            hindi_translation = translate(final_report, target_language= target_language)

            logger.info(f"Generating Speech from Text")
            #audio_data = text_to_speech(hindi_translation)

            output_dict = {
                "company_name": company_name,
                "articles": sentiment_output,
                "comparative_sentiment": comparative_sentiment,
                "final_report": final_report,
                "hindi_translation": hindi_translation,
                "audio_text": "",
            }

            st.subheader("Company Name")
            st.write(output_dict.get("company_name"))
    
            st.subheader("Final Report")
            st.write(output_dict.get("final_report"))

            st.subheader("Translated Report")
            st.write(output_dict.get("hindi_translation", "Please Check Your Internet Connection"))

            st.subheader("Speech To Text")
            st.write("Request Timed Out Please Check Your Internet Connection")

    except requests.exceptions.RequestException as e:
        st.error(f"Error fetching data: {e}")
        logger.error(f"Error fetching data: {e}")