File size: 3,373 Bytes
79f611e
 
 
fdb7855
79f611e
 
 
 
 
 
 
 
 
fdb7855
 
 
 
 
79f611e
fdb7855
 
79f611e
fdb7855
 
79f611e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fdb7855
 
79f611e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fdb7855
 
 
 
 
 
 
 
79f611e
 
 
fdb7855
79f611e
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
import streamlit as st
import datetime
import pandas as pd
from gnews import GNews
from transformers import pipeline
import plotly.graph_objects as go

# Load the sentiment analysis model
pipe = pipeline("text-classification", model="pramudyalyza/bert-indonesian-finetuned-news")

# Function to process the keyword and get sentiment analysis
def process_keyword(keyword):
    one_week_ago = datetime.datetime.now() - datetime.timedelta(days=7)
    
    news = GNews(language='id', country='ID', max_results=100)
    
    search_results = news.get_news(keyword)
    
    filtered_headlines = []
    for article in search_results:
        published_date = datetime.datetime.strptime(article['published date'], '%a, %d %b %Y %H:%M:%S %Z')
        if published_date > one_week_ago:
            filtered_headlines.append(article['title'])
    
    df = pd.DataFrame(filtered_headlines, columns=['title'])
    df_clean = df.drop_duplicates()

    df_clean['sentiment'] = df_clean['title'].apply(lambda x: pipe(x)[0]['label'])

    positive_count = (df_clean['sentiment'] == 'Positive').sum()
    negative_count = (df_clean['sentiment'] == 'Negative').sum()
    total_count = len(df_clean)

    return positive_count, negative_count, total_count, df_clean

# Streamlit app layout
st.title("News Sentiment Analysis Dashboard")

keyword_input = st.text_input("Enter a keyword to search for news", placeholder="Type a keyword...")

if st.button("Analyze"):
    if keyword_input:
        with st.spinner('Scraping and analyzing the data...'):
            positive_count, negative_count, total_count, df_clean = process_keyword(keyword_input)

        # Create plots
        fig_positive = go.Figure(go.Indicator(
            mode="gauge+number",
            value=positive_count,
            title={'text': "Positive Sentiment"},
            gauge={'axis': {'range': [0, total_count]},
                   'bar': {'color': "green"}}
        ))

        fig_negative = go.Figure(go.Indicator(
            mode="gauge+number",
            value=negative_count,
            title={'text': "Negative Sentiment"},
            gauge={'axis': {'range': [0, total_count]},
                   'bar': {'color': "red"}}
        ))

        fig_donut = go.Figure(go.Pie(
            labels=['Positive', 'Negative'],
            values=[positive_count, negative_count],
            hole=0.5,
            marker=dict(colors=['green', 'red'])
        ))
        fig_donut.update_layout(title_text='Sentiment Distribution')

        # Create a horizontal layout using st.columns
        col1, col2, col3 = st.columns(3)

        # Display results in each column
        col1.plotly_chart(fig_positive, use_container_width=True)
        col2.plotly_chart(fig_negative, use_container_width=True)
        col3.plotly_chart(fig_donut, use_container_width=True)

        st.write(f"News articles found: {total_count}")

        # Show DataFrame
        st.dataframe(df_clean, use_container_width=True)

        # Download CSV
        csv = df_clean.to_csv(index=False).encode('utf-8')
        st.download_button(
            label="Download CSV",
            data=csv,
            file_name='news_sentiment_analysis.csv',
            mime='text/csv',
        )
    else:
        st.error("Please enter a keyword.")