Spaces:
Sleeping
Sleeping
updating app.py files
Browse files- Balance_sheet.py +121 -0
- Cashflows.py +69 -0
- PL.py +126 -0
- YOU_FINANCE.ipynb +0 -0
- app.py +105 -0
- daily_prices.py +229 -0
- macd_prices.py +186 -0
- news.py +158 -0
- requirements.txt +8 -0
Balance_sheet.py
ADDED
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import pandas as pd
|
3 |
+
import yfinance as yf
|
4 |
+
import seaborn as sns
|
5 |
+
import matplotlib.pyplot as plt
|
6 |
+
|
7 |
+
def load_and_display_BS(ticker1, ticker2):
|
8 |
+
try:
|
9 |
+
# Fetch P&L data
|
10 |
+
ticker1_data = yf.Ticker(ticker1).balance_sheet
|
11 |
+
ticker2_data = yf.Ticker(ticker2).balance_sheet
|
12 |
+
|
13 |
+
# Transpose for better visualization
|
14 |
+
ticker1_df = ticker1_data.transpose()
|
15 |
+
ticker2_df = ticker2_data.transpose()
|
16 |
+
|
17 |
+
# Basic data cleaning (adjust as needed)
|
18 |
+
ticker1_df = ticker1_df.fillna(0) # Fill missing values with 0
|
19 |
+
ticker2_df = ticker2_df.fillna(0)
|
20 |
+
|
21 |
+
|
22 |
+
#MARGINS & Ratios
|
23 |
+
#ticker one
|
24 |
+
ticker1_df['Debt-to-Equity Ratio'] = (ticker1_df['Total Debt'] /ticker1_df['Common Stock Equity'])
|
25 |
+
ticker1_df['Debt-to-Assets Ratio'] = (ticker1_df['Total Debt'] /ticker1_df['Total Assets'])
|
26 |
+
ticker1_df['Current Ratio'] = (ticker1_df['Current Assets'] / ticker1_df['Current Liabilities'])
|
27 |
+
#ticker1_df['Quick Ratio'] = (ticker1_df['Current Assets'] - ticker1_df['Inventory'] /ticker1_df['Current Liabilities'])
|
28 |
+
ticker1_df['Working Capital Ratio'] = ticker1_df['Working Capital'] / ticker1_df['Total Assets']
|
29 |
+
ticker1_df['Cash Ratio'] = ticker1_df['Cash Cash Equivalents And Short Term Investments'] / ticker1_df['Current Liabilities']
|
30 |
+
|
31 |
+
ticker2_df['Debt-to-Equity Ratio'] = (ticker2_df['Total Debt'] /ticker2_df['Common Stock Equity'])
|
32 |
+
ticker2_df['Debt-to-Assets Ratio'] = (ticker2_df['Total Debt'] /ticker2_df['Total Assets'])
|
33 |
+
ticker2_df['Current Ratio'] = (ticker2_df['Current Assets'] / ticker2_df['Current Liabilities'])
|
34 |
+
#ticker2_df['Quick Ratio'] = (ticker2_df['Current Assets'] -ticker2_df['Inventory'] /ticker2_df['Current Liabilities'])
|
35 |
+
ticker2_df['Working Capital Ratio'] = ticker2_df['Working Capital'] / ticker2_df['Total Assets']
|
36 |
+
ticker2_df['Cash Ratio'] = ticker2_df['Cash Cash Equivalents And Short Term Investments'] / ticker2_df['Current Liabilities']
|
37 |
+
|
38 |
+
|
39 |
+
Solvency_Ratios_1 = ticker1_df[['Debt-to-Equity Ratio', 'Debt-to-Assets Ratio', 'Current Ratio' ]]
|
40 |
+
Solvency_Ratios_2 = ticker2_df[['Debt-to-Equity Ratio', 'Debt-to-Assets Ratio', 'Current Ratio' ]]
|
41 |
+
|
42 |
+
cash_and_working_captial_1 = ticker1_df[['Working Capital Ratio', 'Cash Ratio']]
|
43 |
+
cash_and_working_captial_2 = ticker2_df[['Working Capital Ratio', 'Cash Ratio']]
|
44 |
+
|
45 |
+
|
46 |
+
assets_1 = ticker1_df[['Total Assets', 'Current Assets', 'Cash Cash Equivalents And Short Term Investments', 'Inventory','Gross PPE', 'Properties', 'Land And Improvements', 'Accumulated Depreciation', 'Goodwill And Other Intangible Assets', 'Invested Capital', 'Net Tangible Assets']]
|
47 |
+
Debt_1 = ticker1_df[['Total Liabilities Net Minority Interest', 'Total Debt', 'Current Liabilities', 'Current Provisions', 'Other Current Liabilities', 'Total Non Current Liabilities Net Minority Interest', 'Long Term Debt', 'Long Term Debt And Capital Lease Obligation', 'Long Term Provisions', 'Stockholders Equity']]
|
48 |
+
|
49 |
+
|
50 |
+
assets_2 = ticker2_df[['Total Assets', 'Current Assets', 'Cash Cash Equivalents And Short Term Investments', 'Inventory', 'Gross PPE','Properties', 'Land And Improvements', 'Accumulated Depreciation', 'Goodwill And Other Intangible Assets', 'Invested Capital', 'Net Tangible Assets']]
|
51 |
+
Debt_2 = ticker2_df[[ 'Total Liabilities Net Minority Interest', 'Total Debt', 'Current Liabilities', 'Current Provisions', 'Other Current Liabilities', 'Total Non Current Liabilities Net Minority Interest', 'Long Term Debt', 'Long Term Debt And Capital Lease Obligation', 'Long Term Provisions' ,'Stockholders Equity']]
|
52 |
+
|
53 |
+
#calculate the vertical analysis
|
54 |
+
assets_1_vertical = assets_1.T.div(assets_1['Total Assets'], axis=1).round(2) * 100
|
55 |
+
Debt_1_vertical = Debt_1.T.div(Debt_1['Total Liabilities Net Minority Interest'], axis=1).round(2) * 100
|
56 |
+
|
57 |
+
#calculate the vertical analysis
|
58 |
+
assets_2_vertical = assets_2.T.div(assets_2['Total Assets'], axis=1).round(2) * 100
|
59 |
+
Debt_2_vertical = Debt_2.T.div(Debt_2['Total Liabilities Net Minority Interest'], axis=1).round(2) * 100
|
60 |
+
|
61 |
+
# Calculate and visualize horizontal analysis
|
62 |
+
ticker1_asset_1_pct_change = assets_1.pct_change(axis=1) * 100
|
63 |
+
ticker2_asset_2_pct_change = assets_2.pct_change(axis=1) * 100
|
64 |
+
|
65 |
+
|
66 |
+
|
67 |
+
|
68 |
+
|
69 |
+
# Display side-by-side
|
70 |
+
|
71 |
+
col1, col2 = st.columns(2)
|
72 |
+
with col1:
|
73 |
+
|
74 |
+
|
75 |
+
st.title(f"{ticker1}:Ratios")
|
76 |
+
st.line_chart(Solvency_Ratios_1)
|
77 |
+
|
78 |
+
|
79 |
+
plt.title(f"{ticker1}:Ratios")
|
80 |
+
st.dataframe(cash_and_working_captial_1)
|
81 |
+
# sns.heatmap(cash_and_working_captial_1, annot=True, fmt='.2f', cmap='coolwarm', ax=ax)
|
82 |
+
st.subheader(f"Vertical Analysis of Assets in % : {ticker1}")
|
83 |
+
st.dataframe(assets_1_vertical)
|
84 |
+
st.subheader(f"Vertical Analysis of Debts in % : {ticker1}")
|
85 |
+
|
86 |
+
fig, ax =plt.subplots()
|
87 |
+
plt.title(f"Vertical Analysis of Debts in %: {ticker1}")
|
88 |
+
sns.heatmap(Debt_1_vertical, annot=True, fmt='.2f', cmap='coolwarm', ax=ax)
|
89 |
+
st.pyplot(fig)
|
90 |
+
st.dataframe(ticker1_asset_1_pct_change)
|
91 |
+
|
92 |
+
|
93 |
+
|
94 |
+
|
95 |
+
|
96 |
+
|
97 |
+
with col2:
|
98 |
+
|
99 |
+
st.title(f"{ticker2}:Ratios")
|
100 |
+
st.line_chart(Solvency_Ratios_2)
|
101 |
+
|
102 |
+
|
103 |
+
plt.title(f"Cash and Equaivalents & working captial Ratios : {ticker2}")
|
104 |
+
st.dataframe(cash_and_working_captial_2)
|
105 |
+
# sns.heatmap(cash_and_working_captial_2, annot=True, fmt='.2f', cmap='coolwarm', ax=ax)
|
106 |
+
st.subheader(f"Vertical Analysis of Assets in % : {ticker2}")
|
107 |
+
st.dataframe(assets_2_vertical)
|
108 |
+
st.subheader(f"Vertical Analysis of Debts in % : {ticker2}")
|
109 |
+
|
110 |
+
fig, ax =plt.subplots()
|
111 |
+
plt.title(f"Vertical Analysis of Debts in % : {ticker2}")
|
112 |
+
sns.heatmap(Debt_2_vertical, annot=True, fmt='.2f', cmap='coolwarm', ax=ax)
|
113 |
+
st.pyplot(fig)
|
114 |
+
st.dataframe(ticker2_asset_2_pct_change)
|
115 |
+
|
116 |
+
|
117 |
+
|
118 |
+
except Exception as e:
|
119 |
+
st.error(f"An error occurred: {e}")
|
120 |
+
return load_and_display_BS
|
121 |
+
|
Cashflows.py
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import pandas as pd
|
3 |
+
import yfinance as yf
|
4 |
+
import seaborn as sns
|
5 |
+
import matplotlib.pyplot as plt
|
6 |
+
|
7 |
+
def load_and_display_cashflows(ticker1, ticker2):
|
8 |
+
try:
|
9 |
+
# Fetch P&L data
|
10 |
+
ticker1_data = yf.Ticker(ticker1).cashflow
|
11 |
+
ticker2_data = yf.Ticker(ticker2).cashflow
|
12 |
+
|
13 |
+
# Transpose for better visualization
|
14 |
+
ticker1_df = ticker1_data.transpose()
|
15 |
+
ticker2_df = ticker2_data.transpose()
|
16 |
+
|
17 |
+
# Basic data cleaning (adjust as needed)
|
18 |
+
ticker1_df = ticker1_df.fillna(0) # Fill missing values with 0
|
19 |
+
ticker2_df = ticker2_df.fillna(0)
|
20 |
+
|
21 |
+
|
22 |
+
#MARGINS & Ratios
|
23 |
+
#ticker one
|
24 |
+
ticker1_df['CFO_to_Revenue'] = (ticker1_df['Operating Cash Flow'] /ticker1_df['Net Income From Continuing Operations'])
|
25 |
+
ticker1_df['FCF_to_Revenue'] = (ticker1_df['Free Cash Flow'] /ticker1_df['Net Income From Continuing Operations'])
|
26 |
+
|
27 |
+
ticker2_df['CFO_to_Revenue'] = (ticker2_df['Operating Cash Flow'] /ticker2_df['Net Income From Continuing Operations'])
|
28 |
+
ticker2_df['FCF_to_Revenue'] = (ticker2_df['Free Cash Flow'] /ticker2_df['Net Income From Continuing Operations'])
|
29 |
+
|
30 |
+
|
31 |
+
cash_flow_ratios_1 = ticker1_df[['CFO_to_Revenue', 'FCF_to_Revenue']]
|
32 |
+
cash_flow_ratios_2 = ticker2_df[['CFO_to_Revenue', 'FCF_to_Revenue']]
|
33 |
+
|
34 |
+
|
35 |
+
|
36 |
+
|
37 |
+
|
38 |
+
# Display side-by-side
|
39 |
+
|
40 |
+
col1, col2 = st.columns(2)
|
41 |
+
with col1:
|
42 |
+
|
43 |
+
|
44 |
+
st.title(f"{ticker1}:Ratios")
|
45 |
+
st.line_chart(cash_flow_ratios_1)
|
46 |
+
|
47 |
+
|
48 |
+
plt.title(f"{ticker1}:Ratios")
|
49 |
+
st.dataframe(cash_flow_ratios_1)
|
50 |
+
# sns.heatmap(cash_and_working_captial_1, annot=True, fmt='.2f', cmap='coolwarm', ax=ax)
|
51 |
+
|
52 |
+
|
53 |
+
|
54 |
+
|
55 |
+
|
56 |
+
with col2:
|
57 |
+
|
58 |
+
st.title(f"{ticker2}:Ratios")
|
59 |
+
st.line_chart(cash_flow_ratios_2)
|
60 |
+
|
61 |
+
plt.title(f"Cash and Equaivalents & working captial Ratios : {ticker2}")
|
62 |
+
st.dataframe(cash_flow_ratios_2)
|
63 |
+
# sns.heatmap(cash_and_working_captial_2, annot=True, fmt='.2f', cmap='coolwarm', ax=ax)
|
64 |
+
|
65 |
+
|
66 |
+
except Exception as e:
|
67 |
+
st.error(f"An error occurred: {e}")
|
68 |
+
return load_and_display_cashflows
|
69 |
+
|
PL.py
ADDED
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import pandas as pd
|
3 |
+
import yfinance as yf
|
4 |
+
import seaborn as sns
|
5 |
+
import matplotlib.pyplot as plt
|
6 |
+
# import openai
|
7 |
+
|
8 |
+
|
9 |
+
def load_and_display_pl(ticker1, ticker2):
|
10 |
+
try:
|
11 |
+
# Fetch P&L data
|
12 |
+
ticker1_data = yf.Ticker(ticker1).financials
|
13 |
+
ticker2_data = yf.Ticker(ticker2).financials
|
14 |
+
|
15 |
+
# Transpose for better visualization
|
16 |
+
ticker1_df = ticker1_data.transpose()
|
17 |
+
ticker2_df = ticker2_data.transpose()
|
18 |
+
|
19 |
+
# Basic data cleaning (adjust as needed)
|
20 |
+
ticker1_df = ticker1_df.fillna(0) # Fill missing values with 0
|
21 |
+
ticker2_df = ticker2_df.fillna(0)
|
22 |
+
|
23 |
+
ticker1_df = ticker1_df[['Total Revenue', 'Cost Of Revenue', 'Gross Profit', 'Operating Income', 'Operating Expense', 'EBIT', 'EBITDA', 'Pretax Income','Tax Provision','Net Interest Income','Net Income', 'Basic EPS']]
|
24 |
+
ticker2_df = ticker2_df[['Total Revenue', 'Cost Of Revenue', 'Gross Profit', 'Operating Income', 'Operating Expense', 'EBIT', 'EBITDA', 'Pretax Income','Tax Provision','Net Interest Income','Net Income', 'Basic EPS']]
|
25 |
+
|
26 |
+
# Calculate and visualize horizontal analysis
|
27 |
+
ticker1_pct_change = ticker1_df.pct_change(axis=1) * 100
|
28 |
+
ticker2_pct_change = ticker2_df.pct_change(axis=1) * 100
|
29 |
+
|
30 |
+
ticker1_pct_change_1 = ticker1_pct_change[['Gross Profit', 'Pretax Income', 'Net Income']]
|
31 |
+
ticker2_pct_change_2 = ticker2_pct_change[['Gross Profit', 'Pretax Income', 'Net Income']]
|
32 |
+
|
33 |
+
#calculate the vertical analysis
|
34 |
+
ticker_1_vertical = ticker1_df.T.div(ticker1_df['Total Revenue'], axis=1).round(2) * 100
|
35 |
+
ticker_2_vertical = ticker2_df.T.div(ticker2_df['Total Revenue'], axis=1).round(2) * 100
|
36 |
+
|
37 |
+
#MARGINS & Ratios
|
38 |
+
#ticker one
|
39 |
+
ticker1_df['Operating_profit_Margin'] = (ticker1_df['Operating Income'] /ticker1_df['Total Revenue']) *100
|
40 |
+
ticker1_df['Operating_Expense_Ratio'] = (ticker1_df['Operating Expense'] /ticker1_df['Total Revenue']) *100
|
41 |
+
ticker1_df['Interest_Coverage_Ratio'] = ticker1_df['EBIT'] / abs(ticker1_df['Net Interest Income'])
|
42 |
+
ticker1_df['Effective_Tax_Rate'] = (ticker1_df['Tax Provision'] / ticker1_df['Pretax Income']) * 100
|
43 |
+
ticker1_df['Net_profit_Margin'] = (ticker1_df['Net Income'] /ticker1_df['Total Revenue']) *100
|
44 |
+
|
45 |
+
ticker2_df['Operating_profit_Margin'] = (ticker2_df['Operating Income'] /ticker2_df['Total Revenue']) *100
|
46 |
+
ticker2_df['Operating_Expense_Ratio'] = (ticker2_df['Operating Expense'] /ticker2_df['Total Revenue']) *100
|
47 |
+
ticker2_df['Interest_Coverage_Ratio'] = ticker2_df['EBIT'] / abs(ticker2_df['Net Interest Income'])
|
48 |
+
ticker2_df['Effective_Tax_Rate'] = (ticker2_df['Tax Provision'] / ticker2_df['Pretax Income']) * 100
|
49 |
+
ticker2_df['Net_profit_Margin'] = (ticker2_df['Net Income'] /ticker2_df['Total Revenue']) *100
|
50 |
+
|
51 |
+
|
52 |
+
Profit_Margins_1 = ticker1_df[['Operating_profit_Margin', 'Operating_Expense_Ratio', 'Interest_Coverage_Ratio', 'Effective_Tax_Rate', 'Net_profit_Margin']]
|
53 |
+
Profit_Margins_2 = ticker2_df[['Operating_profit_Margin', 'Operating_Expense_Ratio', 'Interest_Coverage_Ratio', 'Effective_Tax_Rate', 'Net_profit_Margin']]
|
54 |
+
|
55 |
+
|
56 |
+
|
57 |
+
# Display side-by-side
|
58 |
+
|
59 |
+
col1, col2 = st.columns(2)
|
60 |
+
with col1:
|
61 |
+
st.header(f"Vertical Analysis : {ticker1}")
|
62 |
+
st.dataframe(ticker_1_vertical)
|
63 |
+
st.header(f"Horizantal Analysis : {ticker1}")
|
64 |
+
st.dataframe(ticker1_pct_change)
|
65 |
+
st.header(f"Trend Analysis : {ticker1}")
|
66 |
+
st.line_chart(ticker1_pct_change_1)
|
67 |
+
# st.dataframe(Profit_Margins)
|
68 |
+
fig, ax =plt.subplots()
|
69 |
+
plt.title(f"Margins & Tax coverage Ratio of : {ticker1}")
|
70 |
+
sns.heatmap(Profit_Margins_1, annot=True, fmt='.2f', cmap='coolwarm', ax=ax)
|
71 |
+
st.pyplot(fig)
|
72 |
+
|
73 |
+
|
74 |
+
|
75 |
+
|
76 |
+
with col2:
|
77 |
+
st.header(f"Vertical Analysis of : {ticker2}")
|
78 |
+
st.dataframe(ticker_2_vertical)
|
79 |
+
st.header(f"Horizantal Analysis : {ticker2}")
|
80 |
+
st.dataframe(ticker2_pct_change)
|
81 |
+
st.header(f"Trend Analysis of :{ticker2}")
|
82 |
+
st.line_chart(ticker2_pct_change_2)
|
83 |
+
fig, ax =plt.subplots()
|
84 |
+
plt.title(f"Margins & Tax coverage Ratio of : {ticker2}")
|
85 |
+
sns.heatmap(Profit_Margins_2, annot=True, fmt='.2f', cmap='coolwarm', ax=ax)
|
86 |
+
st.pyplot(fig)
|
87 |
+
|
88 |
+
|
89 |
+
|
90 |
+
# # GPT-3 Integration
|
91 |
+
# openai.api_key = ""
|
92 |
+
# # Prepare data for GPT-3
|
93 |
+
# ticker1_summary = f"**{ticker1}**\n\n" \
|
94 |
+
# f"**Vertical Analysis:**\n{ticker_1_vertical.to_markdown()}\n\n" \
|
95 |
+
# f"**Horizontal Analysis:**\n{ticker1_pct_change.to_markdown()}\n\n" \
|
96 |
+
# f"**Key Ratios:**\n{Profit_Margins_1.to_markdown()}\n\n"
|
97 |
+
# ticker2_summary = f"**{ticker2}**\n\n" \
|
98 |
+
# f"**Vertical Analysis:**\n{ticker_2_vertical.to_markdown()}\n\n" \
|
99 |
+
# f"**Horizontal Analysis:**\n{ticker2_pct_change.to_markdown()}\n\n" \
|
100 |
+
# f"**Key Ratios:**\n{Profit_Margins_2.to_markdown()}\n\n"
|
101 |
+
|
102 |
+
# prompt = f"Compare and contrast the financial performance of the two companies based on their P&L data:\n\n" \
|
103 |
+
# f"{ticker1_summary}\n\n" \
|
104 |
+
# f"{ticker2_summary}\n\n" \
|
105 |
+
# f"Provide insights into their profitability, growth trends, and operational efficiency."
|
106 |
+
|
107 |
+
# # Generate response from GPT-3
|
108 |
+
# response = openai.Completion.create(
|
109 |
+
# engine="text-ada-001", # Choose an appropriate engine
|
110 |
+
# prompt=prompt,
|
111 |
+
# max_tokens=1024,
|
112 |
+
# n=1,
|
113 |
+
# stop=None,
|
114 |
+
# temperature=0.7
|
115 |
+
# )
|
116 |
+
|
117 |
+
# # Display GPT-3's analysis
|
118 |
+
# st.header("GPT-3 Analysis")
|
119 |
+
# st.write(response.choices[0].text)
|
120 |
+
|
121 |
+
except Exception as e:
|
122 |
+
st.error(f"An error occurred: {e}")
|
123 |
+
return load_and_display_pl
|
124 |
+
|
125 |
+
|
126 |
+
|
YOU_FINANCE.ipynb
ADDED
The diff for this file is too large to render.
See raw diff
|
|
app.py
ADDED
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import pandas as pd
|
3 |
+
import yfinance as yf
|
4 |
+
import plotly.graph_objects as go
|
5 |
+
from PL import load_and_display_pl
|
6 |
+
from Balance_sheet import load_and_display_BS
|
7 |
+
from Cashflows import load_and_display_cashflows
|
8 |
+
# from news import classify_sentiment
|
9 |
+
from daily_prices import load_and_display_prices
|
10 |
+
from macd_prices import load_and_display_macd
|
11 |
+
|
12 |
+
|
13 |
+
def main():
|
14 |
+
# st.title("Compare Companies fundamental Statements")
|
15 |
+
|
16 |
+
# Using Markdown to make the title bold
|
17 |
+
st.markdown("# **SURF THROUGH YOUR STOCKS**")
|
18 |
+
|
19 |
+
|
20 |
+
# User input for ticker symbols
|
21 |
+
|
22 |
+
ticker1 = st.text_input("Your Yahoo Ticker Symbol-1")
|
23 |
+
ticker2 = st.text_input("Your Yahoo Ticker Symbol-2")
|
24 |
+
|
25 |
+
# Tabbed interface
|
26 |
+
tab1, tab2, tab3, tab5, tab6 = st.tabs([ "P/L Statement","Balance Sheet", "Cashflow", 'Compare Prices', "MACD Indicatords"])
|
27 |
+
|
28 |
+
with tab1:
|
29 |
+
# Button to trigger data loading and display
|
30 |
+
if st.button("Load and Compare Profit and loss statement"):
|
31 |
+
load_and_display_pl(ticker1, ticker2)
|
32 |
+
|
33 |
+
with tab2:
|
34 |
+
# Button to trigger data loading and display the Balance sheets Ratios
|
35 |
+
if st.button("Load and Compare Balance statement"):
|
36 |
+
load_and_display_BS(ticker1, ticker2)
|
37 |
+
|
38 |
+
with tab3:
|
39 |
+
# Button to trigger data loading and display the cashflow statement Ratios
|
40 |
+
if st.button("Load and Compare cashflow statement"):
|
41 |
+
load_and_display_cashflows(ticker1, ticker2)
|
42 |
+
|
43 |
+
# with tab4:
|
44 |
+
# # Button to trigger data loading and display the news updates
|
45 |
+
# if st.button("Load and Compare News updates"):
|
46 |
+
|
47 |
+
|
48 |
+
|
49 |
+
with tab5:
|
50 |
+
|
51 |
+
if st.button("Load and Compare Prices"):
|
52 |
+
load_and_display_prices(ticker1, ticker2)
|
53 |
+
|
54 |
+
with tab6:
|
55 |
+
# User input for MACD parameters
|
56 |
+
price_column = st.selectbox("Price Column", ["High", "Low", "Close", "Open"], index=2) # Default to "Close"
|
57 |
+
ema_fast = st.number_input("Fast EMA Span", min_value=1, step=1, value=12)
|
58 |
+
ema_slow = st.number_input("Slow EMA Span", min_value=1, step=1, value=26)
|
59 |
+
signal_span = st.number_input("Signal Span", min_value=1, step=1, value=9)
|
60 |
+
user_inputs = {
|
61 |
+
"price_column": price_column,
|
62 |
+
"ema_fast": ema_fast,
|
63 |
+
"ema_slow": ema_slow,
|
64 |
+
"signal_span": signal_span
|
65 |
+
}
|
66 |
+
|
67 |
+
if st.button("Load and Compare MACD Indicators"):
|
68 |
+
load_and_display_macd(ticker1, ticker2, user_inputs)
|
69 |
+
|
70 |
+
|
71 |
+
|
72 |
+
|
73 |
+
|
74 |
+
|
75 |
+
# Add the footer with HTML
|
76 |
+
st.markdown(
|
77 |
+
"""
|
78 |
+
<style>
|
79 |
+
.footer {
|
80 |
+
position: fixed;
|
81 |
+
left: 0;
|
82 |
+
bottom: 0;
|
83 |
+
width: 100%;
|
84 |
+
background-color: white;
|
85 |
+
color: black;
|
86 |
+
text-align: center;
|
87 |
+
padding: 10px;
|
88 |
+
}
|
89 |
+
</style>
|
90 |
+
<div class="footer">
|
91 |
+
<p>Made with ❤️ by TJ and Auttribe Community</p>
|
92 |
+
</div>
|
93 |
+
""",
|
94 |
+
unsafe_allow_html=True
|
95 |
+
)
|
96 |
+
|
97 |
+
# Ensure that the layout does not extend beyond the footer
|
98 |
+
st.write("\n" * 10)
|
99 |
+
|
100 |
+
|
101 |
+
|
102 |
+
|
103 |
+
|
104 |
+
if __name__ == "__main__":
|
105 |
+
main()
|
daily_prices.py
ADDED
@@ -0,0 +1,229 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import numpy as np
|
3 |
+
import yfinance as yf
|
4 |
+
import pandas as pd
|
5 |
+
import plotly.graph_objects as go
|
6 |
+
from sklearn.preprocessing import MinMaxScaler
|
7 |
+
from tensorflow.keras.models import Sequential
|
8 |
+
from tensorflow.keras.layers import LSTM, Dense, Dropout
|
9 |
+
|
10 |
+
def load_and_display_prices(ticker1, ticker2):
|
11 |
+
|
12 |
+
# Fetch historical data
|
13 |
+
try:
|
14 |
+
data1 = yf.download(ticker1, period="5y")
|
15 |
+
data2 = yf.download(ticker2, period="5y")
|
16 |
+
except Exception as e:
|
17 |
+
st.error(f"Error fetching data: {e}")
|
18 |
+
return
|
19 |
+
|
20 |
+
# Calculate daily change in closing price
|
21 |
+
data1['Daily Change (' + ticker1 + ')'] = data1['Close'].diff()
|
22 |
+
data2['Daily Change (' + ticker2 + ')'] = data2['Close'].diff()
|
23 |
+
|
24 |
+
# ---- Chart of Daily Changes ----
|
25 |
+
fig = go.Figure()
|
26 |
+
|
27 |
+
# Create line traces for daily changes
|
28 |
+
fig.add_trace(go.Scatter(
|
29 |
+
x=data1.index,
|
30 |
+
y=data1['Daily Change (' + ticker1 + ')'],
|
31 |
+
name='Daily Change (' + ticker1 + ')',
|
32 |
+
mode='lines',
|
33 |
+
marker=dict(color='royalblue')
|
34 |
+
))
|
35 |
+
fig.add_trace(go.Scatter(
|
36 |
+
x=data2.index,
|
37 |
+
y=data2['Daily Change (' + ticker2 + ')'],
|
38 |
+
name='Daily Change (' + ticker2 + ')',
|
39 |
+
mode='lines',
|
40 |
+
marker=dict(color='darkorange')
|
41 |
+
))
|
42 |
+
|
43 |
+
# Customize the layout
|
44 |
+
fig.update_layout(
|
45 |
+
xaxis_title='Date',
|
46 |
+
yaxis_title='Daily Change',
|
47 |
+
title='Daily Change in Closing Price for ' + ticker1 + ' and ' + ticker2
|
48 |
+
)
|
49 |
+
|
50 |
+
# Display the chart
|
51 |
+
st.plotly_chart(fig)
|
52 |
+
|
53 |
+
# Calculate 50-day and 100-day moving averages
|
54 |
+
data1['50-Day MA (' + ticker1 + ')'] = data1['Close'].rolling(window=50).mean()
|
55 |
+
data1['100-Day MA (' + ticker1 + ')'] = data1['Close'].rolling(window=100).mean()
|
56 |
+
data2['50-Day MA (' + ticker2 + ')'] = data2['Close'].rolling(window=50).mean()
|
57 |
+
data2['100-Day MA (' + ticker2 + ')'] = data2['Close'].rolling(window=100).mean()
|
58 |
+
|
59 |
+
# ---- Candlestick Chart with Moving Averages ----
|
60 |
+
fig_1 = go.Figure()
|
61 |
+
|
62 |
+
# Add candlestick traces for each ticker
|
63 |
+
fig_1.add_trace(go.Candlestick(
|
64 |
+
x=data1.index,
|
65 |
+
open=data1['Open'],
|
66 |
+
high=data1['High'],
|
67 |
+
low=data1['Low'],
|
68 |
+
close=data1['Close'],
|
69 |
+
name=ticker1
|
70 |
+
))
|
71 |
+
fig_1.add_trace(go.Candlestick(
|
72 |
+
x=data2.index,
|
73 |
+
open=data2['Open'],
|
74 |
+
high=data2['High'],
|
75 |
+
low=data2['Low'],
|
76 |
+
close=data2['Close'],
|
77 |
+
name=ticker2
|
78 |
+
))
|
79 |
+
|
80 |
+
# Add moving average lines for each ticker
|
81 |
+
fig_1.add_trace(go.Scatter(
|
82 |
+
x=data1.index,
|
83 |
+
y=data1['50-Day MA (' + ticker1 + ')'],
|
84 |
+
name='50-Day MA (' + ticker1 + ')',
|
85 |
+
mode='lines',
|
86 |
+
line=dict(color='blue', width=2)
|
87 |
+
))
|
88 |
+
fig_1.add_trace(go.Scatter(
|
89 |
+
x=data1.index,
|
90 |
+
y=data1['100-Day MA (' + ticker1 + ')'],
|
91 |
+
name='100-Day MA (' + ticker1 + ')',
|
92 |
+
mode='lines',
|
93 |
+
line=dict(color='green', width=2)
|
94 |
+
))
|
95 |
+
fig_1.add_trace(go.Scatter(
|
96 |
+
x=data2.index,
|
97 |
+
y=data2['50-Day MA (' + ticker2 + ')'],
|
98 |
+
name='50-Day MA (' + ticker2 + ')',
|
99 |
+
mode='lines',
|
100 |
+
line=dict(color='orange', width=2)
|
101 |
+
))
|
102 |
+
fig_1.add_trace(go.Scatter(
|
103 |
+
x=data2.index,
|
104 |
+
y=data2['100-Day MA (' + ticker2 + ')'],
|
105 |
+
name='100-Day MA (' + ticker2 + ')',
|
106 |
+
mode='lines',
|
107 |
+
line=dict(color='teal', width=2)
|
108 |
+
))
|
109 |
+
|
110 |
+
# Customize the layout
|
111 |
+
fig_1.update_layout(
|
112 |
+
xaxis_title='Date',
|
113 |
+
yaxis_title='Price'
|
114 |
+
)
|
115 |
+
|
116 |
+
# Display the chart
|
117 |
+
st.plotly_chart(fig_1)
|
118 |
+
|
119 |
+
# ---- Machine Learning Model for Future Price Prediction ----
|
120 |
+
def create_model(x_train, y_train):
|
121 |
+
model = Sequential()
|
122 |
+
model.add(LSTM(100, return_sequences=True, input_shape=(x_train.shape[1], x_train.shape[2]))) # Explicitly define input shape
|
123 |
+
model.add(Dropout(0.2))
|
124 |
+
model.add(LSTM(100))
|
125 |
+
model.add(Dropout(0.01))
|
126 |
+
model.add(Dense(1))
|
127 |
+
model.compile(loss='mse', optimizer='adam')
|
128 |
+
model.fit(x_train, y_train, epochs=5, batch_size=32 , verbose=1)
|
129 |
+
return model
|
130 |
+
|
131 |
+
# Hyperparameters (adjust as needed)
|
132 |
+
look_back = 500 # Number of days to look back for prediction
|
133 |
+
n_features = 1 # Using closing price as the feature
|
134 |
+
|
135 |
+
# Prepare data for model (assuming closing price for prediction)
|
136 |
+
scaler_1 = MinMaxScaler(feature_range=(0, 1))
|
137 |
+
scaler_2 = MinMaxScaler(feature_range=(0, 1))
|
138 |
+
scaled_data1 = scaler_1.fit_transform(data1[['Close']])
|
139 |
+
scaled_data2 = scaler_2.fit_transform(data2[['Close']])
|
140 |
+
|
141 |
+
x_train1, y_train1 = [], []
|
142 |
+
x_train2, y_train2 = [], []
|
143 |
+
|
144 |
+
for i in range(look_back, len(scaled_data1)):
|
145 |
+
x_train1.append(scaled_data1[i-look_back:i, 0])
|
146 |
+
y_train1.append(scaled_data1[i, 0])
|
147 |
+
|
148 |
+
for i in range(look_back, len(scaled_data2)):
|
149 |
+
x_train2.append(scaled_data2[i-look_back:i, 0])
|
150 |
+
y_train2.append(scaled_data2[i, 0])
|
151 |
+
|
152 |
+
x_train1, y_train1 = np.array(x_train1), np.array(y_train1)
|
153 |
+
x_train2, y_train2 = np.array(x_train2), np.array(y_train2)
|
154 |
+
|
155 |
+
# Reshape input data to 3D format (samples, timesteps, features)
|
156 |
+
x_train1 = np.reshape(x_train1, (x_train1.shape[0], x_train1.shape[1], n_features))
|
157 |
+
x_train2 = np.reshape(x_train2, (x_train2.shape[0], x_train2.shape[1], n_features))
|
158 |
+
|
159 |
+
# Train the model (feel free to experiment with different architectures)
|
160 |
+
model1 = create_model(x_train1, y_train1)
|
161 |
+
model2 = create_model(x_train2, y_train2)
|
162 |
+
|
163 |
+
# ---- User Interface for Prediction ----
|
164 |
+
prediction_days = st.slider('Number of Days to Predict', 1, 30, 7)
|
165 |
+
|
166 |
+
# Make predictions
|
167 |
+
predicted_prices1 = []
|
168 |
+
predicted_prices2 = []
|
169 |
+
|
170 |
+
for i in range(prediction_days):
|
171 |
+
# Get the last 'look_back' days of data for each ticker
|
172 |
+
last_n_days1 = scaled_data1[-look_back:]
|
173 |
+
last_n_days2 = scaled_data2[-look_back:]
|
174 |
+
|
175 |
+
# Reshape input data to 3D format (samples, timesteps, features)
|
176 |
+
x_test1 = np.array([last_n_days1]).reshape(1, look_back, n_features)
|
177 |
+
x_test2 = np.array([last_n_days2]).reshape(1, look_back, n_features)
|
178 |
+
|
179 |
+
# Predict future prices
|
180 |
+
predicted_price1 = model1.predict(x_test1)
|
181 |
+
predicted_price2 = model2.predict(x_test2)
|
182 |
+
|
183 |
+
# Inverse scale the predicted prices
|
184 |
+
predicted_price1 = scaler_1.inverse_transform(predicted_price1)
|
185 |
+
predicted_price2 = scaler_2.inverse_transform(predicted_price2)
|
186 |
+
|
187 |
+
# Append the predicted prices to the lists
|
188 |
+
predicted_prices1.append(predicted_price1.flatten()[0])
|
189 |
+
predicted_prices2.append(predicted_price2.flatten()[0])
|
190 |
+
|
191 |
+
# Update data for the next day's prediction
|
192 |
+
scaled_data1 = np.append(scaled_data1[1:], predicted_price1.flatten())
|
193 |
+
scaled_data2 = np.append(scaled_data2[1:], predicted_price2.flatten())
|
194 |
+
|
195 |
+
# Create a DataFrame for predicted prices
|
196 |
+
prediction_df = pd.DataFrame({'Predicted Price (' + ticker1 + ')': predicted_prices1,
|
197 |
+
'Predicted Price (' + ticker2 + ')': predicted_prices2},
|
198 |
+
index=pd.date_range(start=data1.index[-1] + pd.Timedelta(days=1),
|
199 |
+
periods=prediction_days))
|
200 |
+
|
201 |
+
# Display the predicted prices
|
202 |
+
st.write("Predicted Prices for the Next", prediction_days, "Days:")
|
203 |
+
st.dataframe(prediction_df)
|
204 |
+
|
205 |
+
# Get current prices
|
206 |
+
try:
|
207 |
+
current_price1 = yf.download(ticker1, period="1d")['Close'][0]
|
208 |
+
current_price2 = yf.download(ticker2, period="1d")['Close'][0]
|
209 |
+
except Exception as e:
|
210 |
+
st.error(f"Error fetching current prices: {e}")
|
211 |
+
return
|
212 |
+
|
213 |
+
# Display current prices
|
214 |
+
st.write(f"**Current Price ({ticker1}): ** {current_price1:.2f}")
|
215 |
+
st.write(f"**Current Price ({ticker2}): ** {current_price2:.2f}")
|
216 |
+
|
217 |
+
|
218 |
+
# Calculate prediction errors for each day
|
219 |
+
prediction_df['Error (' + ticker1 + ')'] = prediction_df['Predicted Price (' + ticker1 + ')'] - current_price1
|
220 |
+
prediction_df['Error (' + ticker2 + ')'] = prediction_df['Predicted Price (' + ticker2 + ')'] - current_price2
|
221 |
+
|
222 |
+
|
223 |
+
|
224 |
+
# Display the prediction errors along with predicted prices
|
225 |
+
st.write("Predicted Prices and Errors for the Next", prediction_days, "Days:")
|
226 |
+
st.dataframe(prediction_df)
|
227 |
+
|
228 |
+
|
229 |
+
|
macd_prices.py
ADDED
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import yfinance as yf
|
3 |
+
import pandas as pd
|
4 |
+
import numpy as np
|
5 |
+
import plotly.graph_objects as go
|
6 |
+
from sklearn.preprocessing import StandardScaler
|
7 |
+
from tensorflow.keras.models import Sequential
|
8 |
+
from tensorflow.keras.layers import LSTM, Dense, Conv1D, MaxPooling1D, Flatten
|
9 |
+
|
10 |
+
def load_and_display_macd(ticker1, ticker2, user_inputs):
|
11 |
+
"""
|
12 |
+
Fetches historical data, calculates MACD and moving averages based on user input,
|
13 |
+
trains LSTM-CNN models (for each ticker), and displays charts and predictions.
|
14 |
+
|
15 |
+
Args:
|
16 |
+
ticker1 (str): First ticker symbol.
|
17 |
+
ticker2 (str): Second ticker symbol.
|
18 |
+
user_inputs (dict): Dictionary containing user-specified parameters
|
19 |
+
- price_column (str): "High", "Low", "Close", or "Open" (default: "Close")
|
20 |
+
- ema_fast (int): Span for fast EMA (default: 12)
|
21 |
+
- ema_slow (int): Span for slow EMA (default: 26)
|
22 |
+
- signal_span (int): Span for signal EMA (default: 9)
|
23 |
+
"""
|
24 |
+
|
25 |
+
try:
|
26 |
+
data1 = yf.download(ticker1, period="3mo", interval="1h")
|
27 |
+
data2 = yf.download(ticker2, period="3mo", interval="1h")
|
28 |
+
except Exception as e:
|
29 |
+
st.error(f"Error fetching data: {e}")
|
30 |
+
return
|
31 |
+
|
32 |
+
# Get user-specified parameters (with defaults)
|
33 |
+
price_column = user_inputs.get("price_column", "Close")
|
34 |
+
ema_fast = user_inputs.get("ema_fast", 12)
|
35 |
+
ema_slow = user_inputs.get("ema_slow", 26)
|
36 |
+
signal_span = user_inputs.get("signal_span", 9)
|
37 |
+
|
38 |
+
# Calculate MACD and Signal based on user input
|
39 |
+
data1['ema_fast'] = data1[price_column].ewm(span=ema_fast, adjust=False).mean()
|
40 |
+
data1['ema_slow'] = data1[price_column].ewm(span=ema_slow, adjust=False).mean()
|
41 |
+
data1['MACD'] = data1['ema_fast'] - data1['ema_slow']
|
42 |
+
data1['Signal'] = data1['MACD'].ewm(span=signal_span, adjust=False).mean()
|
43 |
+
data1['Histogram'] = data1['MACD'] - data1['Signal']
|
44 |
+
|
45 |
+
data2['ema_fast'] = data2[price_column].ewm(span=ema_fast, adjust=False).mean()
|
46 |
+
data2['ema_slow'] = data2[price_column].ewm(span=ema_slow, adjust=False).mean()
|
47 |
+
data2['MACD'] = data2['ema_fast'] - data2['ema_slow']
|
48 |
+
data2['Signal'] = data2['MACD'].ewm(span=signal_span, adjust=False).mean()
|
49 |
+
data2['Histogram'] = data1['MACD'] - data2['Signal']
|
50 |
+
|
51 |
+
# Create candlestick charts with EMAs
|
52 |
+
fig1_candlestick = go.Figure()
|
53 |
+
fig1_candlestick.add_trace(go.Candlestick(x=data1.index,
|
54 |
+
open=data1['Open'],
|
55 |
+
high=data1['High'],
|
56 |
+
low=data1['Low'],
|
57 |
+
close=data1['Close'],
|
58 |
+
name=f'{ticker1} Candlestick'))
|
59 |
+
fig1_candlestick.add_trace(go.Scatter(x=data1.index, y=data1['ema_fast'], name=f'{ticker1} {ema_fast}-EMA', line=dict(color='red')))
|
60 |
+
fig1_candlestick.add_trace(go.Scatter(x=data1.index, y=data1['ema_slow'], name=f'{ticker1} {ema_slow}-EMA', line=dict(color='blue')))
|
61 |
+
fig1_candlestick.update_layout(title=f'{ticker1} Candlestick Chart with EMAs')
|
62 |
+
|
63 |
+
fig2_candlestick = go.Figure()
|
64 |
+
fig2_candlestick.add_trace(go.Candlestick(x=data2.index,
|
65 |
+
open=data2['Open'],
|
66 |
+
high=data2['High'],
|
67 |
+
low=data2['Low'],
|
68 |
+
close=data2['Close'],
|
69 |
+
name=f'{ticker2} Candlestick'))
|
70 |
+
fig2_candlestick.add_trace(go.Scatter(x=data2.index, y=data2['ema_fast'], name=f'{ticker2} {ema_fast}-EMA', line=dict(color='red')))
|
71 |
+
fig2_candlestick.add_trace(go.Scatter(x=data2.index, y=data2['ema_slow'], name=f'{ticker2} {ema_slow}-EMA', line=dict(color='blue')))
|
72 |
+
fig2_candlestick.update_layout(title=f'{ticker2} Candlestick Chart with EMAs')
|
73 |
+
|
74 |
+
# Create line chart for MACD and Signal
|
75 |
+
fig1_macd = go.Figure()
|
76 |
+
fig1_macd.add_trace(go.Histogram(x=data1['Histogram'], name=f'{ticker1} MACD Histogram', opacity=0.5))
|
77 |
+
fig1_macd.add_trace(go.Scatter(x=data1.index, y=data1['MACD'], name=f'{ticker1} MACD', line=dict(color='red')))
|
78 |
+
fig1_macd.add_trace(go.Scatter(x=data1.index, y=data1['Signal'], name=f'{ticker1} Signal', line=dict(color='green')))
|
79 |
+
fig1_macd.update_layout(title=f'{ticker1} MACD and Signal & Histogram')
|
80 |
+
|
81 |
+
fig2_macd = go.Figure()
|
82 |
+
fig2_macd.add_trace(go.Histogram(x=data2['Histogram'], name=f'{ticker2} MACD Histogram', opacity=0.5))
|
83 |
+
fig2_macd.add_trace(go.Scatter(x=data2.index, y=data2['MACD'], name=f'{ticker2} MACD', line=dict(color='red')))
|
84 |
+
fig2_macd.add_trace(go.Scatter(x=data2.index, y=data2['Signal'], name=f'{ticker2} Signal', line=dict(color='green')))
|
85 |
+
fig2_macd.update_layout(title=f'{ticker2} MACD and Signal & Histogram')
|
86 |
+
|
87 |
+
# Display charts
|
88 |
+
col1, col2 = st.columns(2)
|
89 |
+
with col1:
|
90 |
+
st.plotly_chart(fig1_candlestick)
|
91 |
+
st.plotly_chart(fig1_macd)
|
92 |
+
with col2:
|
93 |
+
st.plotly_chart(fig2_candlestick)
|
94 |
+
st.plotly_chart(fig2_macd)
|
95 |
+
|
96 |
+
# Create features and target for training
|
97 |
+
features_1 = data1[['ema_fast', 'ema_slow', 'MACD', 'Signal']]
|
98 |
+
target_1 = data1[price_column]
|
99 |
+
|
100 |
+
features_2 = data2[['ema_fast', 'ema_slow', 'MACD', 'Signal']]
|
101 |
+
target_2 = data2[price_column]
|
102 |
+
|
103 |
+
# Split data into training and testing sets (80/20 split)
|
104 |
+
train_size_1 = int(len(data1) * 0.8)
|
105 |
+
train_data_1, test_data_1 = features_1.iloc[:train_size_1], features_1.iloc[train_size_1:]
|
106 |
+
train_target_1, test_target_1 = target_1.iloc[:train_size_1], target_1.iloc[train_size_1:]
|
107 |
+
|
108 |
+
train_size_2 = int(len(data2) * 0.8)
|
109 |
+
train_data_2, test_data_2 = features_2.iloc[:train_size_2], features_2.iloc[train_size_2:]
|
110 |
+
train_target_2, test_target_2 = target_2.iloc[:train_size_2], target_2.iloc[train_size_2:]
|
111 |
+
|
112 |
+
# Standardize data (using StandardScaler)
|
113 |
+
scaler = StandardScaler()
|
114 |
+
scaled_train_data_1 = scaler.fit_transform(train_data_1)
|
115 |
+
scaled_test_data_1 = scaler.transform(test_data_1)
|
116 |
+
|
117 |
+
scaled_train_data_2 = scaler.fit_transform(train_data_2)
|
118 |
+
scaled_test_data_2 = scaler.transform(test_data_2)
|
119 |
+
|
120 |
+
# Reshape data for LSTM-CNN input (samples, time steps, features)
|
121 |
+
train_data_reshaped_1 = scaled_train_data_1.reshape(train_data_1.shape[0], train_data_1.shape[1], 1)
|
122 |
+
test_data_reshaped_1 = scaled_test_data_1.reshape(test_data_1.shape[0], test_data_1.shape[1], 1)
|
123 |
+
|
124 |
+
train_data_reshaped_2 = scaled_train_data_2.reshape(train_data_2.shape[0], train_data_2.shape[1], 1)
|
125 |
+
test_data_reshaped_2 = scaled_test_data_2.reshape(test_data_2.shape[0], test_data_2.shape[1], 1)
|
126 |
+
|
127 |
+
# Create LSTM-CNN models
|
128 |
+
model1 = Sequential()
|
129 |
+
model1.add(Conv1D(filters=32, kernel_size=3, activation='relu', input_shape=(train_data_reshaped_1.shape[1], 1)))
|
130 |
+
model1.add(MaxPooling1D(pool_size=2))
|
131 |
+
model1.add(LSTM(50, return_sequences=True))
|
132 |
+
model1.add(LSTM(50))
|
133 |
+
# Add a Flatten layer before the Dense layer
|
134 |
+
model1.add(Flatten())
|
135 |
+
model1.add(Dense(features_1.shape[1])) # Use the number of features from training data
|
136 |
+
model1.compile(loss='mse', optimizer='adam')
|
137 |
+
|
138 |
+
model2 = Sequential()
|
139 |
+
model2.add(Conv1D(filters=32, kernel_size=3, activation='relu', input_shape=(train_data_reshaped_2.shape[1], 1)))
|
140 |
+
model2.add(MaxPooling1D(pool_size=2))
|
141 |
+
model2.add(LSTM(50, return_sequences=True))
|
142 |
+
model2.add(LSTM(50))
|
143 |
+
model2.add(Flatten())
|
144 |
+
model2.add(Dense(features_2.shape[1])) # Use the number of features from training data
|
145 |
+
model2.compile(loss='mse', optimizer='adam')
|
146 |
+
|
147 |
+
# Train the models
|
148 |
+
model1.fit(train_data_reshaped_1, train_target_1, epochs=50, batch_size=32, verbose=1)
|
149 |
+
model2.fit(train_data_reshaped_2, train_target_2, epochs=50, batch_size=32, verbose=1)
|
150 |
+
|
151 |
+
# Make predictions
|
152 |
+
predictions_1 = model1.predict(test_data_reshaped_1)
|
153 |
+
predictions_2 = model2.predict(test_data_reshaped_2)
|
154 |
+
|
155 |
+
# Inverse transform predictions
|
156 |
+
predictions_1 = scaler.inverse_transform(predictions_1)
|
157 |
+
predictions_2 = scaler.inverse_transform(predictions_2)
|
158 |
+
|
159 |
+
# Append the predicted prices to the lists
|
160 |
+
last_prediction_1 = predictions_1[-1].reshape(1, -1)
|
161 |
+
predictions_1 = np.concatenate((predictions_1, last_prediction_1), axis=0)
|
162 |
+
|
163 |
+
last_prediction_2 = predictions_2[-1].reshape(1, -1)
|
164 |
+
predictions_2 = np.concatenate((predictions_2, last_prediction_2), axis=0)
|
165 |
+
|
166 |
+
# Update data for the next day's prediction
|
167 |
+
# Note: This part might need adjustment based on your specific data handling and prediction logic
|
168 |
+
# Here, we simply append the last prediction to the existing data
|
169 |
+
# In a real-world scenario, you might need to shift the data and handle windows more carefully
|
170 |
+
|
171 |
+
# Create a DataFrame for predicted prices
|
172 |
+
prediction_df = pd.DataFrame({'Predicted Price (' + ticker1 + ')': predictions_1[:, 0], # Assuming single-step price prediction
|
173 |
+
'Predicted Price (' + ticker2 + ')': predictions_2[:, 0]},
|
174 |
+
index=pd.date_range(start=data1.index[-1] + pd.Timedelta(days=1),
|
175 |
+
periods=len(predictions_1)))
|
176 |
+
|
177 |
+
# Display the predicted prices
|
178 |
+
st.write("Predicted Prices for the Next Day:")
|
179 |
+
st.dataframe(prediction_df)
|
180 |
+
|
181 |
+
# # Display predictions
|
182 |
+
# st.dataframe(predictions_1)
|
183 |
+
# st.dataframe(predictions_2)
|
184 |
+
# st.write("Predictions for Next 3 Days:")
|
185 |
+
# st.write(f"{ticker1}: {predictions_1[-3:]}")
|
186 |
+
# st.write(f"{ticker2}: {predictions_2[-3:]}")
|
news.py
ADDED
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import pandas as pd
|
3 |
+
import yfinance as yf
|
4 |
+
from textblob import TextBlob
|
5 |
+
import re
|
6 |
+
from gensim import corpora, models
|
7 |
+
from nltk import word_tokenize
|
8 |
+
from nltk.corpus import stopwords
|
9 |
+
from sklearn.feature_extraction.text import TfidfVectorizer
|
10 |
+
from sklearn.decomposition import LatentDirichletAllocation
|
11 |
+
from nltk import pos_tag
|
12 |
+
from nltk.chunk import conlltags2tree, tree2conlltags
|
13 |
+
import spacy
|
14 |
+
from spacy import displacy
|
15 |
+
|
16 |
+
def fetch_news_data(ticker1, ticker2):
|
17 |
+
"""
|
18 |
+
Fetches news data for the given tickers from Yahoo Finance.
|
19 |
+
"""
|
20 |
+
try:
|
21 |
+
ticker1_data = yf.Ticker(ticker1).news
|
22 |
+
ticker2_data = yf.Ticker(ticker2).news
|
23 |
+
return ticker1_data, ticker2_data
|
24 |
+
except Exception as e:
|
25 |
+
st.error(f"An error occurred while fetching news data: {e}")
|
26 |
+
return fetch_news_data()
|
27 |
+
|
28 |
+
def clean_news_data(news_data):
|
29 |
+
"""
|
30 |
+
Cleans the news data by removing special characters and extra spaces.
|
31 |
+
"""
|
32 |
+
clean_data = []
|
33 |
+
for news in news_data:
|
34 |
+
news_text = re.sub(r"[^\w\s]", "", news['title'])
|
35 |
+
news_text = " ".join(news_text.split())
|
36 |
+
clean_data.append(news_text)
|
37 |
+
return clean_data
|
38 |
+
|
39 |
+
def perform_sentiment_analysis(news_data):
|
40 |
+
"""
|
41 |
+
Performs sentiment analysis on the given news data using TextBlob.
|
42 |
+
"""
|
43 |
+
sentiments = []
|
44 |
+
for news in news_data:
|
45 |
+
analysis = TextBlob(news)
|
46 |
+
sentiments.append(analysis.sentiment.polarity)
|
47 |
+
return sentiments
|
48 |
+
|
49 |
+
def classify_sentiment(sentiment_scores):
|
50 |
+
"""
|
51 |
+
Classifies sentiment based on the polarity scores.
|
52 |
+
"""
|
53 |
+
sentiments = []
|
54 |
+
for score in sentiment_scores:
|
55 |
+
if score > 0:
|
56 |
+
sentiments.append('Positive')
|
57 |
+
elif score < 0:
|
58 |
+
sentiments.append('Negative')
|
59 |
+
else:
|
60 |
+
sentiments.append('Neutral')
|
61 |
+
return sentiments
|
62 |
+
|
63 |
+
def topic_modeling(news_data):
|
64 |
+
"""
|
65 |
+
Performs topic modeling using LDA.
|
66 |
+
"""
|
67 |
+
# Tokenize and remove stop words
|
68 |
+
stop_words = set(stopwords.words('english'))
|
69 |
+
tokenized_news = [word_tokenize(text) for text in news_data]
|
70 |
+
tokenized_news = [[word for word in tokens if not word in stop_words] for tokens in tokenized_news]
|
71 |
+
|
72 |
+
# Create Dictionary and Corpus
|
73 |
+
dictionary = corpora.Dictionary(tokenized_news)
|
74 |
+
corpus = [dictionary.doc2bow(text) for text in tokenized_news]
|
75 |
+
|
76 |
+
# Train LDA model
|
77 |
+
lda_model = models.LdaMulticore(corpus=corpus, id2word=dictionary, num_topics=5, passes=10)
|
78 |
+
|
79 |
+
return lda_model, dictionary
|
80 |
+
|
81 |
+
def extract_topics(lda_model, dictionary, num_words=5):
|
82 |
+
"""
|
83 |
+
Extracts top topics from the LDA model.
|
84 |
+
"""
|
85 |
+
topics = []
|
86 |
+
for idx, topic in lda_model.print_topics(-1):
|
87 |
+
topic_words = " ".join([word for word, _ in dictionary.items() if word in topic])
|
88 |
+
topics.append(f"Topic {idx}: {topic_words}")
|
89 |
+
return topics
|
90 |
+
|
91 |
+
def topic_sentiment_analysis(news_data, lda_model, dictionary):
|
92 |
+
"""
|
93 |
+
Performs sentiment analysis for each topic.
|
94 |
+
"""
|
95 |
+
topic_sentiments = {}
|
96 |
+
for news, topic_idx in zip(news_data, lda_model.get_document_topics()):
|
97 |
+
topic_idx = max(topic_idx, key=lambda x: x[1])[0]
|
98 |
+
if topic_idx not in topic_sentiments:
|
99 |
+
topic_sentiments[topic_idx] = []
|
100 |
+
topic_sentiments[topic_idx].append(TextBlob(news).sentiment.polarity)
|
101 |
+
|
102 |
+
return topic_sentiments
|
103 |
+
|
104 |
+
def ner_and_event_detection(news_data):
|
105 |
+
"""
|
106 |
+
Performs Named Entity Recognition and Event Detection.
|
107 |
+
"""
|
108 |
+
nlp = spacy.load("en_core_web_sm")
|
109 |
+
events = []
|
110 |
+
for news in news_data:
|
111 |
+
doc = nlp(news)
|
112 |
+
entities = [(ent.text, ent.label_) for ent in doc.ents]
|
113 |
+
events.extend(entities)
|
114 |
+
|
115 |
+
return events
|
116 |
+
|
117 |
+
def display_results(ticker1, ticker2, ticker1_sentiments, ticker2_sentiments,
|
118 |
+
ticker1_topics, ticker2_topics,
|
119 |
+
ticker1_topic_sentiments=None, ticker2_topic_sentiments=None):
|
120 |
+
"""
|
121 |
+
Displays the sentiment analysis results in Streamlit.
|
122 |
+
|
123 |
+
Args:
|
124 |
+
ticker1: The first ticker symbol.
|
125 |
+
ticker2: The second ticker symbol.
|
126 |
+
ticker1_sentiments: A list of sentiment classifications for ticker1.
|
127 |
+
ticker2_sentiments: A list of sentiment classifications for ticker2.
|
128 |
+
ticker1_topics: A list of topics for ticker1.
|
129 |
+
ticker2_topics: A list of topics for ticker2.
|
130 |
+
ticker1_topic_sentiments: A dictionary of topic sentiments for ticker1.
|
131 |
+
ticker2_topic_sentiments: A dictionary of topic sentiments for ticker2.
|
132 |
+
"""
|
133 |
+
st.title(f"{ticker1} vs. {ticker2} Sentiment Analysis")
|
134 |
+
|
135 |
+
col1, col2 = st.columns(2)
|
136 |
+
with col1:
|
137 |
+
st.header(f"{ticker1} Sentiment")
|
138 |
+
st.bar_chart(pd.Series(ticker1_sentiments).value_counts())
|
139 |
+
st.header(f"{ticker1} Topics")
|
140 |
+
for topic in ticker1_topics:
|
141 |
+
st.write(topic)
|
142 |
+
if ticker1_topic_sentiments:
|
143 |
+
st.header(f"{ticker1} Topic Sentiments")
|
144 |
+
for topic_idx, sentiments in ticker1_topic_sentiments.items():
|
145 |
+
st.write(f"Topic {topic_idx}: Average Sentiment = {sum(sentiments) / len(sentiments)}")
|
146 |
+
|
147 |
+
with col2:
|
148 |
+
st.header(f"{ticker2} Sentiment")
|
149 |
+
st.bar_chart(pd.Series(ticker2_sentiments).value_counts())
|
150 |
+
st.header(f"{ticker2} Topics")
|
151 |
+
for topic in ticker2_topics:
|
152 |
+
st.write(topic)
|
153 |
+
if ticker2_topic_sentiments:
|
154 |
+
st.header(f"{ticker2} Topic Sentiments")
|
155 |
+
for topic_idx, sentiments in ticker2_topic_sentiments.items():
|
156 |
+
st.write(f"Topic {topic_idx}: Average Sentiment = {sum(sentiments) / len(sentiments)}")
|
157 |
+
|
158 |
+
return display_results
|
requirements.txt
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
plotly
|
2 |
+
seaborn
|
3 |
+
yfinance
|
4 |
+
streamlit
|
5 |
+
scipy
|
6 |
+
scikit-learn
|
7 |
+
textblob
|
8 |
+
tensorflow
|