QuantumLearner commited on
Commit
9928397
·
verified ·
1 Parent(s): ed1e6f7

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +1341 -0
app.py ADDED
@@ -0,0 +1,1341 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import requests
3
+ import pandas as pd
4
+ import numpy as np
5
+ import yfinance as yf
6
+ import plotly.graph_objects as go
7
+ import plotly.figure_factory as ff
8
+ from datetime import datetime, date
9
+ from dateutil.relativedelta import relativedelta
10
+ import datetime as dt
11
+ import warnings
12
+ warnings.filterwarnings("ignore")
13
+ import os
14
+ from scipy.optimize import fsolve
15
+ from scipy.stats import norm
16
+
17
+ ###############################################################################
18
+ # SET WIDE LAYOUT AND PAGE TITLE
19
+ ###############################################################################
20
+ st.set_page_config(page_title="Default Risk Estimation", layout="wide")
21
+
22
+ ###############################################################################
23
+ # GLOBALS & SESSION STATE
24
+ ###############################################################################
25
+ FMP_API_KEY = os.getenv("FMP_API_KEY")
26
+
27
+ if "altman_results" not in st.session_state:
28
+ st.session_state["altman_results"] = None
29
+
30
+ if "dtd_results" not in st.session_state:
31
+ st.session_state["dtd_results"] = None
32
+
33
+ ###############################################################################
34
+ # HELPER FUNCTIONS (Altman Z)
35
+ ###############################################################################
36
+ def get_fmp_json(url):
37
+ """
38
+ Retrieves JSON from the specified URL and returns as a list.
39
+ Omits direct mention of data source in any error messages.
40
+ """
41
+ r = requests.get(url)
42
+ try:
43
+ data = r.json()
44
+ if not isinstance(data, list):
45
+ return []
46
+ return data
47
+ except Exception:
48
+ return []
49
+
50
+ def fetch_fmp_annual(endpoint):
51
+ """
52
+ Fetches annual data from the endpoint, sorts by date if present.
53
+ """
54
+ data = get_fmp_json(endpoint)
55
+ df = pd.DataFrame(data)
56
+ if not df.empty and 'date' in df.columns:
57
+ df['date'] = pd.to_datetime(df['date'])
58
+ df.sort_values('date', inplace=True)
59
+ return df
60
+
61
+ ###############################################################################
62
+ # HELPER FUNCTIONS (Distance-to-Default)
63
+ ###############################################################################
64
+ def solve_merton(E, sigma_E, D, T, r):
65
+ """
66
+ Merton model solver:
67
+ E = A * N(d1) - D * exp(-rT) * N(d2)
68
+ sigma_E = (A / E) * N(d1) * sigma_A
69
+ """
70
+ def equations(vars_):
71
+ A_, sigmaA_ = vars_
72
+ d1_ = (np.log(A_ / D) + (r + 0.5 * sigmaA_**2) * T) / (sigmaA_ * np.sqrt(T))
73
+ d2_ = d1_ - sigmaA_ * np.sqrt(T)
74
+ eq1 = A_ * norm.cdf(d1_) - D * np.exp(-r * T) * norm.cdf(d2_) - E
75
+ eq2 = sigma_E - (A_ / E) * norm.cdf(d1_) * sigmaA_
76
+ return (eq1, eq2)
77
+
78
+ A_guess = E + D
79
+ sigmaA_guess = sigma_E * (E / (E + D))
80
+ A_star, sigmaA_star = fsolve(equations, [A_guess, sigmaA_guess], maxfev=3000)
81
+ return A_star, sigmaA_star
82
+
83
+ def distance_to_default(A, D, T, r, sigmaA):
84
+ """
85
+ Merton distance to default (d2):
86
+ d2 = [ln(A/D) + (r - 0.5*sigmaA^2)*T] / (sigmaA * sqrt(T))
87
+ """
88
+ return (np.log(A / D) + (r - 0.5 * sigmaA**2) * T) / (sigmaA * np.sqrt(T))
89
+
90
+ ###############################################################################
91
+ # ALTMAN Z-SCORE EXECUTION (From Provided Code)
92
+ ###############################################################################
93
+ def run_altman_zscore_calculations(ticker, years_back):
94
+ """
95
+ Uses the original user-provided Altman Z code to fetch and compute partials.
96
+ Returns the final DataFrame with partials and total Z-scores.
97
+ """
98
+ # 1) FETCH ANNUAL STATEMENTS
99
+ income_url = f"https://financialmodelingprep.com/api/v3/income-statement/{ticker}?period=annual&limit=100&apikey={FMP_API_KEY}"
100
+ balance_url = f"https://financialmodelingprep.com/api/v3/balance-sheet-statement/{ticker}?period=annual&limit=100&apikey={FMP_API_KEY}"
101
+
102
+ income_df = fetch_fmp_annual(income_url)
103
+ balance_df = fetch_fmp_annual(balance_url)
104
+
105
+ merged_bi = pd.merge(balance_df, income_df, on='date', how='inner', suffixes=('_bal','_inc'))
106
+ merged_bi.sort_values('date', inplace=True)
107
+
108
+ if merged_bi.empty:
109
+ st.warning("No statements to analyze for this ticker/date range.")
110
+ return pd.DataFrame()
111
+
112
+ # 2) FILTER TO LAST X YEARS
113
+ end_date = pd.Timestamp.today()
114
+ start_date = end_date - relativedelta(years=years_back)
115
+
116
+ merged_bi = merged_bi[(merged_bi['date'] >= start_date) & (merged_bi['date'] <= end_date)]
117
+ merged_bi.sort_values('date', inplace=True)
118
+
119
+ if merged_bi.empty:
120
+ st.warning("No financial statements found in the chosen range.")
121
+ return pd.DataFrame()
122
+
123
+ # 3) FETCH HISTORICAL MARKET CAP
124
+ mktcap_df = pd.DataFrame()
125
+ iterations = (years_back // 5) + (1 if years_back % 5 != 0 else 0)
126
+
127
+ for i in range(iterations):
128
+ period_end_date = end_date - relativedelta(years=i * 5)
129
+ period_start_date = period_end_date - relativedelta(years=5)
130
+
131
+ if period_start_date < start_date:
132
+ period_start_date = start_date
133
+
134
+ mktcap_url = (
135
+ f"https://financialmodelingprep.com/api/v3/historical-market-capitalization/{ticker}"
136
+ f"?from={period_start_date.date()}&to={period_end_date.date()}&apikey={FMP_API_KEY}"
137
+ )
138
+ mktcap_data = get_fmp_json(mktcap_url)
139
+ mktcap_period_df = pd.DataFrame(mktcap_data)
140
+
141
+ if not mktcap_period_df.empty and 'date' in mktcap_period_df.columns:
142
+ mktcap_period_df['date'] = pd.to_datetime(mktcap_period_df['date'])
143
+ mktcap_period_df.rename(columns={'marketCap': 'historical_market_cap'}, inplace=True)
144
+ mktcap_df = pd.concat([mktcap_df, mktcap_period_df], ignore_index=True)
145
+
146
+ mktcap_df = mktcap_df.sort_values('date').drop_duplicates(subset=['date'])
147
+ if not mktcap_df.empty and 'date' in mktcap_df.columns:
148
+ mktcap_df['date'] = pd.to_datetime(mktcap_df['date'])
149
+ mktcap_df = mktcap_df[(mktcap_df['date'] >= start_date) & (mktcap_df['date'] <= end_date)]
150
+ mktcap_df.sort_values('date', inplace=True)
151
+ else:
152
+ mktcap_df = pd.DataFrame(columns=['date','historical_market_cap'])
153
+
154
+ if not merged_bi.empty and not mktcap_df.empty:
155
+ merged_bi = pd.merge_asof(
156
+ merged_bi.sort_values('date'),
157
+ mktcap_df.sort_values('date'),
158
+ on='date',
159
+ direction='nearest'
160
+ )
161
+ else:
162
+ merged_bi['historical_market_cap'] = np.nan
163
+
164
+ # 4) COMPUTE PARTIAL CONTRIBUTIONS
165
+ z_rows = []
166
+ for _, row in merged_bi.iterrows():
167
+ ta = row.get('totalAssets', np.nan)
168
+ tl = row.get('totalLiabilities', np.nan)
169
+ if pd.isnull(ta) or pd.isnull(tl) or ta == 0 or tl == 0:
170
+ continue
171
+
172
+ rev = row.get('revenue', 0)
173
+ hist_mcap = row.get('historical_market_cap', np.nan)
174
+ if pd.isnull(hist_mcap):
175
+ continue
176
+
177
+ tca = row.get('totalCurrentAssets', np.nan)
178
+ tcl = row.get('totalCurrentLiabilities', np.nan)
179
+ if pd.isnull(tca) or pd.isnull(tcl):
180
+ continue
181
+
182
+ wc = (tca - tcl)
183
+ re = row.get('retainedEarnings', 0)
184
+ ebit = row.get('operatingIncome', np.nan)
185
+ if pd.isnull(ebit):
186
+ ebit = row.get('ebitda', 0)
187
+
188
+ X1 = wc / ta
189
+ X2 = re / ta
190
+ X3 = ebit / ta
191
+ X4 = hist_mcap / tl
192
+ X5 = rev / ta if ta != 0 else 0
193
+
194
+ # Original Z
195
+ o_part1 = 1.2 * X1
196
+ o_part2 = 1.4 * X2
197
+ o_part3 = 3.3 * X3
198
+ o_part4 = 0.6 * X4
199
+ o_part5 = 1.0 * X5
200
+ z_original = o_part1 + o_part2 + o_part3 + o_part4 + o_part5
201
+
202
+ # Z''
203
+ d_part1 = 6.56 * X1
204
+ d_part2 = 3.26 * X2
205
+ d_part3 = 6.72 * X3
206
+ d_part4 = 1.05 * X4
207
+ z_double_prime = d_part1 + d_part2 + d_part3 + d_part4
208
+
209
+ # Z'''
210
+ t_part1 = 3.25 * X1
211
+ t_part2 = 2.85 * X2
212
+ t_part3 = 4.15 * X3
213
+ t_part4 = 0.95 * X4
214
+ z_triple_prime_service = t_part1 + t_part2 + t_part3 + t_part4
215
+
216
+ z_rows.append({
217
+ 'date': row['date'],
218
+
219
+ # Original partials
220
+ 'o_part1': o_part1,
221
+ 'o_part2': o_part2,
222
+ 'o_part3': o_part3,
223
+ 'o_part4': o_part4,
224
+ 'o_part5': o_part5,
225
+ 'z_original': z_original,
226
+
227
+ # Z'' partials
228
+ 'd_part1': d_part1,
229
+ 'd_part2': d_part2,
230
+ 'd_part3': d_part3,
231
+ 'd_part4': d_part4,
232
+ 'z_double_prime': z_double_prime,
233
+
234
+ # Z''' partials
235
+ 't_part1': t_part1,
236
+ 't_part2': t_part2,
237
+ 't_part3': t_part3,
238
+ 't_part4': t_part4,
239
+ 'z_triple_prime_service': z_triple_prime_service
240
+ })
241
+
242
+ z_df = pd.DataFrame(z_rows)
243
+ z_df.sort_values('date', inplace=True)
244
+ return z_df
245
+
246
+ ###############################################################################
247
+ # DTD EXECUTION (From Provided Code)
248
+ ###############################################################################
249
+ def calculate_yearly_distance_to_default(
250
+ symbol="AAPL",
251
+ years_back=10,
252
+ debt_method="TOTAL",
253
+ risk_free_ticker="^TNX",
254
+ apikey="YOUR_FMP_API_KEY",
255
+ ):
256
+ """
257
+ Fetches up to `years_back` years of annual data, merges market cap, debt,
258
+ and risk-free yields. Then computes Merton Distance to Default for each year.
259
+ Returns a DataFrame.
260
+ """
261
+ end_date = date.today()
262
+ start_date = end_date - dt.timedelta(days=365 * years_back)
263
+
264
+ # Market cap
265
+ df_mcap = pd.DataFrame()
266
+ iterations = (years_back // 5) + (1 if years_back % 5 != 0 else 0)
267
+ for i in range(iterations):
268
+ period_end_date = end_date - dt.timedelta(days=365 * i * 5)
269
+ period_start_date = period_end_date - dt.timedelta(days=365 * 5)
270
+ url_mcap = (
271
+ f"https://financialmodelingprep.com/api/v3/historical-market-capitalization/"
272
+ f"{symbol}?from={period_start_date}&to={period_end_date}&apikey={apikey}"
273
+ )
274
+ resp_mcap = requests.get(url_mcap)
275
+ data_mcap = resp_mcap.json() if resp_mcap.status_code == 200 else []
276
+ df_mcap_period = pd.DataFrame(data_mcap)
277
+ df_mcap = pd.concat([df_mcap, df_mcap_period], ignore_index=True)
278
+
279
+ if df_mcap.empty or "date" not in df_mcap.columns:
280
+ raise ValueError("No market cap data returned. Check your inputs.")
281
+ df_mcap["year"] = pd.to_datetime(df_mcap["date"]).dt.year
282
+ df_mcap = (
283
+ df_mcap.groupby("year", as_index=False)
284
+ .agg({"marketCap": "mean"})
285
+ .sort_values("year", ascending=False)
286
+ )
287
+
288
+ # Balance Sheet
289
+ url_bs = f"https://financialmodelingprep.com/api/v3/balance-sheet-statement/{symbol}?period=annual&apikey={apikey}"
290
+ resp_bs = requests.get(url_bs)
291
+ data_bs = resp_bs.json() if resp_bs.status_code == 200 else []
292
+ df_bs = pd.DataFrame(data_bs)
293
+ if df_bs.empty or "date" not in df_bs.columns:
294
+ raise ValueError("No balance sheet data returned. Check your inputs.")
295
+ df_bs["year"] = pd.to_datetime(df_bs["date"]).dt.year
296
+ keep_cols = ["year", "shortTermDebt", "longTermDebt", "totalDebt", "date"]
297
+ df_bs = df_bs[keep_cols].sort_values("year", ascending=False)
298
+
299
+ # Risk-free from yfinance
300
+ rf_ticker_obj = yf.Ticker(risk_free_ticker)
301
+ rf_data = rf_ticker_obj.history(start=start_date, end=end_date, auto_adjust=False)
302
+ if rf_data.empty or "Close" not in rf_data.columns:
303
+ raise ValueError("No valid risk-free rate data found. Check your inputs.")
304
+ rf_data = rf_data.reset_index()
305
+ rf_data["year"] = rf_data["Date"].dt.year
306
+ rf_data = rf_data[["year", "Close"]]
307
+ rf_yearly = rf_data.groupby("year", as_index=False)["Close"].mean()
308
+ rf_yearly.rename(columns={"Close": "rf_yield"}, inplace=True)
309
+ rf_yearly["rf_yield"] = rf_yearly["rf_yield"] / 100.0 # decimal
310
+
311
+ # Merge
312
+ df_all = pd.merge(df_mcap, df_bs, on="year", how="left")
313
+ df_all = pd.merge(df_all, rf_yearly, on="year", how="left")
314
+
315
+ # Merton each year
316
+ results = []
317
+ for _, row in df_all.iterrows():
318
+ yr = row["year"]
319
+ E = row["marketCap"]
320
+ if pd.isna(E) or E <= 0:
321
+ continue
322
+
323
+ shortD = row.get("shortTermDebt", 0) or 0
324
+ longD = row.get("longTermDebt", 0) or 0
325
+ totalD = row.get("totalDebt", 0) or 0
326
+ if debt_method.upper() == "STPLUSLT":
327
+ D = shortD + longD
328
+ elif debt_method.upper() == "STPLUSHALFLT":
329
+ D = shortD + 0.5 * longD
330
+ else:
331
+ D = totalD
332
+ if not D or D <= 0:
333
+ D = np.nan
334
+
335
+ r_val = row.get("rf_yield", 0.03)
336
+
337
+ from_dt = f"{yr}-01-01"
338
+ to_dt = f"{yr}-12-31"
339
+ url_hist = (
340
+ f"https://financialmodelingprep.com/api/v3/historical-price-full/{symbol}"
341
+ f"?from={from_dt}&to={to_dt}&apikey={apikey}"
342
+ )
343
+ resp_hist = requests.get(url_hist)
344
+ data_hist = resp_hist.json() if resp_hist.status_code == 200 else {}
345
+ daily_prices = data_hist.get("historical", [])
346
+
347
+ if not daily_prices:
348
+ sigma_E = 0.30
349
+ else:
350
+ df_prices = pd.DataFrame(daily_prices)
351
+ df_prices.sort_values("date", inplace=True)
352
+ close_vals = df_prices["close"].values
353
+ log_rets = np.diff(np.log(close_vals))
354
+ daily_vol = np.std(log_rets)
355
+ sigma_E = daily_vol * np.sqrt(252)
356
+ if sigma_E < 1e-4:
357
+ sigma_E = 0.30
358
+
359
+ T = 1.0
360
+ if not np.isnan(D):
361
+ try:
362
+ A_star, sigmaA_star = solve_merton(E, sigma_E, D, T, r_val)
363
+ dtd_value = distance_to_default(A_star, D, T, r_val, sigmaA_star)
364
+ except:
365
+ A_star, sigmaA_star, dtd_value = np.nan, np.nan, np.nan
366
+ else:
367
+ A_star, sigmaA_star, dtd_value = np.nan, np.nan, np.nan
368
+
369
+ results.append({
370
+ "year": yr,
371
+ "marketCap": E,
372
+ "shortTermDebt": shortD,
373
+ "longTermDebt": longD,
374
+ "totalDebt": totalD,
375
+ "chosenDebt": D,
376
+ "rf": r_val,
377
+ "sigma_E": sigma_E,
378
+ "A_star": A_star,
379
+ "sigmaA_star": sigmaA_star,
380
+ "DTD": dtd_value
381
+ })
382
+
383
+ result_df = pd.DataFrame(results).sort_values("year")
384
+ return result_df
385
+
386
+ ###############################################################################
387
+ # PLOTTING HELPERS
388
+ ###############################################################################
389
+ def plot_zscore_figure(df, date_col, partial_cols, total_col, partial_names, total_name, title_text, zones, ticker):
390
+ """
391
+ Creates stacked bar for partial contributions plus a line for the total Z.
392
+ Draws shading for distress/gray/safe zones. Full width.
393
+ """
394
+ fig = go.Figure()
395
+
396
+ x_min = df[date_col].min()
397
+ x_max = df[date_col].max()
398
+ total_max = df[total_col].max()
399
+ partial_sum_max = df[partial_cols].sum(axis=1).max() if not df[partial_cols].empty else 0
400
+ y_max = max(total_max, partial_sum_max, 0) * 1.2
401
+ y_min = min(df[total_col].min(), 0) * 1.2 if df[total_col].min() < 0 else 0
402
+
403
+ # Distress
404
+ fig.add_shape(
405
+ type="rect",
406
+ x0=x_min, x1=x_max,
407
+ y0=y_min, y1=zones['distress'],
408
+ fillcolor="red",
409
+ opacity=0.2,
410
+ layer="below",
411
+ line=dict(width=0)
412
+ )
413
+ # Gray
414
+ fig.add_shape(
415
+ type="rect",
416
+ x0=x_min, x1=x_max,
417
+ y0=zones['gray_lower'], y1=zones['gray_upper'],
418
+ fillcolor="gray",
419
+ opacity=0.2,
420
+ layer="below",
421
+ line=dict(width=0)
422
+ )
423
+ # Safe
424
+ fig.add_shape(
425
+ type="rect",
426
+ x0=x_min, x1=x_max,
427
+ y0=zones['safe'], y1=y_max,
428
+ fillcolor="green",
429
+ opacity=0.2,
430
+ layer="below",
431
+ line=dict(width=0)
432
+ )
433
+
434
+ # Stacked bars
435
+ for col, name, color in partial_names:
436
+ fig.add_trace(go.Bar(
437
+ x=df[date_col],
438
+ y=df[col],
439
+ name=name,
440
+ marker_color=color
441
+ ))
442
+
443
+ # Line
444
+ fig.add_trace(go.Scatter(
445
+ x=df[date_col],
446
+ y=df[total_col],
447
+ mode='lines+markers+text',
448
+ text=df[total_col].round(2),
449
+ textposition='top center',
450
+ textfont=dict(size=16),
451
+ name=total_name,
452
+ line=dict(color='white', width=2)
453
+ ))
454
+
455
+ fig.update_layout(
456
+ title=dict(
457
+ text=f"{title_text} for {ticker}",
458
+ font=dict(size=26, color="white")
459
+ ),
460
+
461
+ legend=dict(
462
+ font=dict(color="white", size=18)
463
+ ),
464
+ barmode="stack",
465
+ template="plotly_dark",
466
+ paper_bgcolor="#0e1117",
467
+ plot_bgcolor="#0e1117",
468
+ xaxis=dict(
469
+ title="Year",
470
+ tickangle=45,
471
+ tickformat="%Y",
472
+ dtick="M12",
473
+ showgrid=True,
474
+ gridcolor="rgba(255, 255, 255, 0.1)"
475
+ ),
476
+ yaxis=dict(
477
+ title="Z-Score Contribution",
478
+ showgrid=True,
479
+ gridcolor="rgba(255, 255, 255, 0.1)"
480
+ ),
481
+ margin=dict(l=40, r=40, t=80, b=80),
482
+ height=700
483
+ )
484
+ st.plotly_chart(fig, use_container_width=True)
485
+
486
+ ###############################################################################
487
+ # STREAMLIT APP
488
+ ###############################################################################
489
+ st.title("Default Risk Estimation")
490
+
491
+ #st.write("## Overview")
492
+ st.write("This tool assesses a firm's default risk using two widely recognized models:")
493
+ st.write("1) **Altman Z-Score**: A financial distress predictor based on accounting ratios.")
494
+ st.write("2) **Merton Distance-to-Default (DTD)**: A market-based risk measure derived from option pricing theory.")
495
+ #st.write("Select a page from the sidebar to explore each model’s estimates and methodology.")
496
+
497
+ # Sidebar for user inputs
498
+ with st.sidebar:
499
+ st.write("## Input Parameters")
500
+
501
+ # Page selector in an open-by-default expander
502
+ with st.expander("Page Selector", expanded=True):
503
+ page = st.radio("Select Page:", ["Altman Z Score", "Distance-to-Default"])
504
+
505
+ with st.expander("General Settings", expanded=True):
506
+ ticker = st.text_input("Ticker", value="AAPL", help="Enter a valid stock ticker")
507
+ years_back = st.number_input("Years back", min_value=1, max_value=30, value=10, step=1,
508
+ help="How many years of data to retrieve?")
509
+ run_button = st.button("Run Analysis", help="Fetch data and compute metrics")
510
+
511
+ # If user clicks to run, fetch data
512
+ if run_button:
513
+ # Altman Z
514
+ z_data = run_altman_zscore_calculations(ticker, years_back)
515
+ st.session_state["altman_results"] = z_data
516
+
517
+ # DTD
518
+ try:
519
+ dtd_df = calculate_yearly_distance_to_default(
520
+ symbol=ticker,
521
+ years_back=years_back,
522
+ debt_method="TOTAL",
523
+ risk_free_ticker="^TNX",
524
+ apikey=FMP_API_KEY
525
+ )
526
+ st.session_state["dtd_results"] = dtd_df
527
+ except ValueError:
528
+ st.warning("No valid data was returned. Check your inputs.")
529
+
530
+ ###############################################################################
531
+ # PAGE 1: ALTMAN Z
532
+ ###############################################################################
533
+ if page == "Altman Z Score":
534
+ z_df = st.session_state.get("altman_results", None)
535
+ if z_df is None or z_df.empty:
536
+ st.info("Select Page, input the paramters and click 'Run Analysis' on the sidebar.")
537
+ else:
538
+ # Original
539
+ #st.subheader("Original Altman Z-Score (1968)")
540
+
541
+ with st.expander("Methodology: Original Altman Z-Score (1968)", expanded=False):
542
+ st.write("The **Altman Z-Score** is a financial distress prediction model developed by Edward Altman in 1968. It combines five financial ratios to assess the likelihood of corporate bankruptcy.")
543
+
544
+ # Formula
545
+ st.latex(r"Z = 1.2 \times X_1 + 1.4 \times X_2 + 3.3 \times X_3 + 0.6 \times X_4 + 1.0 \times X_5")
546
+
547
+ # Definitions of variables
548
+ st.latex(r"X_1 = \frac{\text{Working Capital}}{\text{Total Assets}}")
549
+ st.write("**Liquidity (X₁)**: Measures short-term financial health by comparing working capital to total assets. Higher values suggest better liquidity and lower default risk.")
550
+
551
+ st.latex(r"X_2 = \frac{\text{Retained Earnings}}{\text{Total Assets}}")
552
+ st.write("**Accumulated Profitability (X₂)**: Indicates the proportion of assets financed through retained earnings. Firms with strong retained earnings are less dependent on external financing.")
553
+
554
+ st.latex(r"X_3 = \frac{\text{EBIT}}{\text{Total Assets}}")
555
+ st.write("**Earnings Strength (X₃)**: EBIT (Earnings Before Interest and Taxes) relative to total assets reflects operating profitability and efficiency.")
556
+
557
+ st.latex(r"X_4 = \frac{\text{Market Value of Equity}}{\text{Total Liabilities}}")
558
+ st.write("**Leverage (X₄)**: Compares a firm's market capitalization to its total liabilities. A higher ratio suggests lower financial risk, as equity holders have a stronger claim.")
559
+
560
+ st.latex(r"X_5 = \frac{\text{Revenue}}{\text{Total Assets}}")
561
+ st.write("**Asset Turnover (X₅)**: Assesses how efficiently a company generates revenue from its assets. High turnover suggests better asset utilization.")
562
+
563
+ # Academic Justification
564
+ st.write("##### Academic Justification")
565
+ st.write(
566
+ "The Altman Z-Score was developed using **discriminant analysis** on a dataset of manufacturing firms. "
567
+ "The Altman Z-Score was found to correctly predict bankruptcy **72-80%** of the time in original studies, typically with a **one-year lead time** before actual default."
568
+ "The model’s strength lies in its ability to quantify financial health across multiple dimensions—liquidity, profitability, leverage, and efficiency."
569
+ )
570
+
571
+ # Interpretation
572
+ st.write("##### Interpretation")
573
+ st.write(
574
+ "**Z > 2.99**: Company is considered financially healthy (Low risk of bankruptcy). \n"
575
+ "**1.81 ≤ Z ≤ 2.99**: 'Gray Area' where financial stability is uncertain. \n"
576
+ "**Z < 1.81**: High financial distress, indicating potential bankruptcy risk."
577
+ )
578
+
579
+ # Downsides / Limitations
580
+ st.write("##### Limitations")
581
+ st.write(
582
+ "- Developed using **only manufacturing firms**, which limits its applicability to other industries.\n"
583
+ "- Uses **historical accounting data**, which may not reflect current market conditions.\n"
584
+ "- Market Value of Equity (X₄) makes the score **sensitive to stock price volatility**.\n"
585
+ "- Does not incorporate forward-looking indicators such as market sentiment or macroeconomic risks."
586
+ )
587
+
588
+
589
+ orig_partial_names = [
590
+ ('o_part1', "1.2 × (WC/TA)", 'blue'),
591
+ ('o_part2', "1.4 × (RE/TA)", 'orange'),
592
+ ('o_part3', "3.3 × (EBIT/TA)", 'green'),
593
+ ('o_part4', "0.6 × (MktCap/TL)", 'red'),
594
+ ('o_part5', "1.0 × (Rev/TA)", 'purple'),
595
+ ]
596
+ orig_zones = {
597
+ 'distress': 1.81,
598
+ 'gray_lower': 1.81,
599
+ 'gray_upper': 2.99,
600
+ 'safe': 2.99
601
+ }
602
+ plot_zscore_figure(
603
+ df=z_df,
604
+ date_col='date',
605
+ partial_cols=['o_part1','o_part2','o_part3','o_part4','o_part5'],
606
+ total_col='z_original',
607
+ partial_names=orig_partial_names,
608
+ total_name="Original Z (Total)",
609
+ title_text="Original Altman Z-Score (1968)",
610
+ zones=orig_zones,
611
+ ticker=ticker
612
+ )
613
+
614
+
615
+ with st.expander("Interpretation", expanded=False):
616
+ # EXACT TEXT from user code (Original Z)
617
+ latest_z = z_df['z_original'].iloc[-1]
618
+ # For time-series logic:
619
+ first_val = z_df['z_original'].iloc[0]
620
+ if latest_z > first_val:
621
+ trend = "increased"
622
+ elif latest_z < first_val:
623
+ trend = "decreased"
624
+ else:
625
+ trend = "remained the same"
626
+ min_val = z_df['z_original'].min()
627
+ max_val = z_df['z_original'].max()
628
+ min_idx = z_df['z_original'].idxmin()
629
+ max_idx = z_df['z_original'].idxmax()
630
+ min_year = z_df.loc[min_idx, 'date'].year
631
+ max_year = z_df.loc[max_idx, 'date'].year
632
+
633
+ st.write("**--- Rich Interpretation for Original Z-Score ---")
634
+ st.write(f"Over the entire time series, the Z-Score has {trend}.")
635
+ st.write(f"The lowest value was {min_val:.2f} in {min_year}.")
636
+ st.write(f"The highest value was {max_val:.2f} in {max_year}.")
637
+
638
+ if latest_z < orig_zones['distress']:
639
+ st.write("Current reading is in distress zone. This suggests high financial risk.")
640
+ elif latest_z < orig_zones['gray_upper']:
641
+ st.write("Current reading is in the gray area. This signals mixed financial stability.")
642
+ else:
643
+ st.write("Current reading is in the safe zone. This implies a stronger financial condition.")
644
+
645
+ latest_data = z_df.iloc[-1]
646
+ orig_partials = {
647
+ 'o_part1': latest_data['o_part1'],
648
+ 'o_part2': latest_data['o_part2'],
649
+ 'o_part3': latest_data['o_part3'],
650
+ 'o_part4': latest_data['o_part4'],
651
+ 'o_part5': latest_data['o_part5']
652
+ }
653
+ key_driver = max(orig_partials, key=orig_partials.get)
654
+ if key_driver == 'o_part1':
655
+ st.write("The most significant factor is Working Capital. This suggests the company's ability to cover short-term obligations with current assets. ")
656
+ st.write("A high contribution from Working Capital means strong liquidity, but too much could indicate inefficient capital allocation. ")
657
+ st.write("If the company holds excess current assets, it may not be deploying resources efficiently for growth.")
658
+ elif key_driver == 'o_part2':
659
+ st.write("The most significant factor is Retained Earnings. This reflects the company's history of profitability and reinvestment. ")
660
+ st.write("A high retained earnings contribution indicates that past profits have been reinvested rather than paid out as dividends. ")
661
+ st.write("This can be a positive sign of financial stability, but if earnings retention is excessive, investors may question the company’s capital allocation strategy.")
662
+ elif key_driver == 'o_part3':
663
+ st.write("The most significant factor is EBIT (Earnings Before Interest and Taxes). This underscores the company’s ability to generate profits from operations. ")
664
+ st.write("A high EBIT contribution suggests that core business activities are profitable and drive financial health. ")
665
+ st.write("However, if EBIT dominates the Z-Score, it may mean the company is heavily reliant on operational earnings, making it vulnerable to downturns in revenue.")
666
+ elif key_driver == 'o_part4':
667
+ st.write("The most significant factor is Market Cap to Liabilities. This reflects investor confidence in the company’s future performance relative to its debt burden. ")
668
+ st.write("A strong market cap contribution means investors perceive the company as having high equity value compared to liabilities, reducing bankruptcy risk. ")
669
+ st.write("However, if this is the dominant driver, financial stability may be tied to market sentiment, which can be volatile.")
670
+ elif key_driver == 'o_part5':
671
+ st.write("The most significant factor is Revenue. This indicates that top-line growth is a major driver of financial stability. ")
672
+ st.write("A high revenue contribution is positive if it translates to strong margins, but if costs are rising at the same pace, profitability may not improve. ")
673
+ st.write("If revenue dominates the Z-Score, the company must ensure sustainable cost management and profitability to maintain financial strength.")
674
+
675
+ st.write("If management seeks to lower this Z-Score, they might reduce liquidity or raise liabilities.")
676
+ st.write("A higher liability base or weaker earnings can press the score downward.")
677
+
678
+ # Z''
679
+ #st.subheader("Z'' (1993, Non-Manufacturing)")
680
+
681
+ with st.expander("Methodology: Z'' (1993, Non-Manufacturing)", expanded=False):
682
+ st.write("The **Z''-Score (1993)** is an adaptation of the original Altman Z-Score, developed to assess financial distress in **non-manufacturing firms**, particularly service and retail sectors. It removes the revenue-based efficiency metric (X₅) and adjusts weightings to better fit firms with different asset structures.")
683
+
684
+ # Formula
685
+ st.latex(r"Z'' = 6.56 \times X_1 + 3.26 \times X_2 + 6.72 \times X_3 + 1.05 \times X_4")
686
+
687
+ # Definitions of variables
688
+ st.latex(r"X_1 = \frac{\text{Working Capital}}{\text{Total Assets}}")
689
+ st.write("**Liquidity (X₁)**: Measures short-term financial flexibility. Firms with higher working capital relative to assets are better positioned to meet short-term obligations.")
690
+
691
+ st.latex(r"X_2 = \frac{\text{Retained Earnings}}{\text{Total Assets}}")
692
+ st.write("**Cumulative Profitability (X₂)**: Higher retained earnings relative to total assets suggest long-term profitability and financial resilience.")
693
+
694
+ st.latex(r"X_3 = \frac{\text{EBIT}}{\text{Total Assets}}")
695
+ st.write("**Operating Profitability (X₃)**: Measures how efficiently a company generates profit from its assets, reflecting core business strength.")
696
+
697
+ st.latex(r"X_4 = \frac{\text{Market Value of Equity}}{\text{Total Liabilities}}")
698
+ st.write("**Leverage (X₄)**: A firm's ability to cover its liabilities with market value equity. A lower ratio suggests greater financial risk.")
699
+
700
+ # Academic Justification
701
+ st.write("##### Academic Justification")
702
+ st.write(
703
+ "The original Z-Score was optimized for **manufacturing firms**, making it less effective for firms with fewer tangible assets. "
704
+ "Z'' (1993) improves bankruptcy prediction for **service and retail firms**, as it excludes the revenue turnover component (X₅) "
705
+ "and places greater emphasis on profitability and liquidity. Empirical studies found Z'' to be **better suited for firms with lower capital intensity**."
706
+ )
707
+
708
+ # Interpretation
709
+ st.write("##### Interpretation")
710
+ st.write(
711
+ "**Z'' > 2.60**: Firm is financially stable, with low bankruptcy risk. \n"
712
+ "**1.10 ≤ Z'' ≤ 2.60**: 'Gray Area'—financial condition is uncertain. \n"
713
+ "**Z'' < 1.10**: Firm is in financial distress, at a higher risk of default."
714
+ )
715
+
716
+ # Downsides / Limitations
717
+ st.write("##### Limitations")
718
+ st.write(
719
+ "- Developed for **non-manufacturing firms**, but may not be applicable to banks or financial institutions.\n"
720
+ "- Still **relies on historical accounting data**, which may not fully capture real-time financial conditions.\n"
721
+ "- Market-based variable (X₄) makes the score **sensitive to stock market fluctuations**.\n"
722
+ "- Does not consider external macroeconomic risks or qualitative factors like management decisions."
723
+ )
724
+
725
+
726
+ double_partial_names = [
727
+ ('d_part1', "6.56 × (WC/TA)", 'blue'),
728
+ ('d_part2', "3.26 × (RE/TA)", 'orange'),
729
+ ('d_part3', "6.72 × (EBIT/TA)", 'green'),
730
+ ('d_part4', "1.05 × (MktCap/TL)", 'red'),
731
+ ]
732
+ double_zones = {
733
+ 'distress': 1.1,
734
+ 'gray_lower': 1.1,
735
+ 'gray_upper': 2.6,
736
+ 'safe': 2.6
737
+ }
738
+ plot_zscore_figure(
739
+ df=z_df,
740
+ date_col='date',
741
+ partial_cols=['d_part1','d_part2','d_part3','d_part4'],
742
+ total_col='z_double_prime',
743
+ partial_names=double_partial_names,
744
+ total_name="Z'' (Total)",
745
+ title_text="Z'' (1993, Non-Manufacturing)",
746
+ zones=double_zones,
747
+ ticker=ticker
748
+ )
749
+
750
+ with st.expander("Interpretation", expanded=False):
751
+ latest_z_double = z_df['z_double_prime'].iloc[-1]
752
+ first_val = z_df['z_double_prime'].iloc[0]
753
+ if latest_z_double > first_val:
754
+ trend_d = "increased"
755
+ elif latest_z_double < first_val:
756
+ trend_d = "decreased"
757
+ else:
758
+ trend_d = "remained the same"
759
+
760
+ min_val_d = z_df['z_double_prime'].min()
761
+ max_val_d = z_df['z_double_prime'].max()
762
+ min_idx_d = z_df['z_double_prime'].idxmin()
763
+ max_idx_d = z_df['z_double_prime'].idxmax()
764
+ min_year_d = z_df.loc[min_idx_d, 'date'].year
765
+ max_year_d = z_df.loc[max_idx_d, 'date'].year
766
+
767
+ st.write("**--- Rich Interpretation for Z'' (Non-Manufacturing) ---**")
768
+ st.write(f"Over the chosen period, the Z-Score has {trend_d}.")
769
+ st.write(f"Lowest: {min_val_d:.2f} in {min_year_d}.")
770
+ st.write(f"Highest: {max_val_d:.2f} in {max_year_d}.")
771
+
772
+ if latest_z_double < double_zones['distress']:
773
+ st.write("Current reading is in distress zone. Financial risk is elevated.")
774
+ elif latest_z_double < double_zones['gray_upper']:
775
+ st.write("Current reading is in the gray zone. Financial signals are not clear.")
776
+ else:
777
+ st.write("Current reading is in the safe zone. Financial picture seems stable.")
778
+
779
+ latest_data_double = z_df.iloc[-1]
780
+ double_partials = {
781
+ 'd_part1': latest_data_double['d_part1'],
782
+ 'd_part2': latest_data_double['d_part2'],
783
+ 'd_part3': latest_data_double['d_part3'],
784
+ 'd_part4': latest_data_double['d_part4']
785
+ }
786
+ key_driver_double = max(double_partials, key=double_partials.get)
787
+
788
+ if key_driver_double == 'd_part1':
789
+ st.write("The key factor is Working Capital. This measures the company’s ability to cover short-term liabilities with current assets.")
790
+ st.write("A strong working capital contribution means the company has a healthy liquidity buffer, reducing short-term financial risk.")
791
+ st.write("However, excessive working capital can signal inefficient capital deployment, where too much cash is tied up in receivables or inventory.")
792
+ elif key_driver_double == 'd_part2':
793
+ st.write("The key factor is Retained Earnings. This represents accumulated profits that have been reinvested rather than distributed as dividends.")
794
+ st.write("A high retained earnings contribution suggests financial discipline and the ability to self-finance operations, reducing reliance on external funding.")
795
+ st.write("However, if retained earnings are excessive, investors may question whether the company is efficiently reinvesting in growth opportunities or hoarding cash.")
796
+ elif key_driver_double == 'd_part3':
797
+ st.write("The key factor is EBIT (Earnings Before Interest and Taxes). This highlights the strength of the company’s core operations in driving profitability.")
798
+ st.write("A high EBIT contribution is a strong indicator of financial health, as it suggests the company generates consistent earnings before financing costs.")
799
+ st.write("However, if EBIT is the dominant driver, the company may be vulnerable to economic downturns or market shifts that impact its ability to sustain margins.")
800
+ elif key_driver_double == 'd_part4':
801
+ st.write("The key factor is Market Cap vs. Liabilities. This shows how the market values the company relative to its total debt obligations.")
802
+ st.write("A strong contribution from this metric suggests investor confidence in the company’s financial future, lowering perceived bankruptcy risk.")
803
+ st.write("However, if market sentiment is the main driver, the company could be vulnerable to stock price fluctuations rather than underlying business fundamentals.")
804
+
805
+ st.write("To decrease this score, raising debt or reducing EBIT can cause the drop.")
806
+ st.write("An increase in liabilities often pulls down the ratio.")
807
+
808
+ # Z'''
809
+ #st.subheader("Z''' (2023, Service/Tech)")
810
+
811
+ with st.expander("Methodology: Z''' (2023, Service/Tech)", expanded=False):
812
+ st.write("The **Z'''-Score (2023)** is a further refinement of the Altman Z models, designed to assess financial distress in **modern service and technology firms**. This version accounts for the **intangible asset-heavy nature** of these companies, where traditional balance sheet metrics may not fully capture financial health.")
813
+
814
+ # Formula
815
+ st.latex(r"Z''' = 3.25 \times X_1 + 2.85 \times X_2 + 4.15 \times X_3 + 0.95 \times X_4")
816
+
817
+ # Definitions of variables
818
+ st.latex(r"X_1 = \frac{\text{Working Capital}}{\text{Total Assets}}")
819
+ st.write("**Liquidity (X₁)**: Measures short-term financial flexibility. A strong working capital position helps firms cover immediate liabilities.")
820
+
821
+ st.latex(r"X_2 = \frac{\text{Retained Earnings}}{\text{Total Assets}}")
822
+ st.write("**Accumulated Profitability (X₂)**: Indicates the extent to which a firm’s assets are funded by retained earnings rather than external debt or equity.")
823
+
824
+ st.latex(r"X_3 = \frac{\text{EBIT}}{\text{Total Assets}}")
825
+ st.write("**Core Earnings Strength (X₃)**: Measures profitability before interest and taxes, reflecting operational efficiency.")
826
+
827
+ st.latex(r"X_4 = \frac{\text{Market Value of Equity}}{\text{Total Liabilities}}")
828
+ st.write("**Market Confidence (X₄)**: Assesses how the market values the firm relative to its total liabilities. Higher values suggest lower financial risk.")
829
+
830
+ # Academic Justification
831
+ st.write("##### Academic Justification")
832
+ st.write(
833
+ "Unlike traditional manufacturing firms, **service and tech firms rely heavily on intangible assets** (e.g., software, R&D, brand equity), "
834
+ "which are often not reflected on the balance sheet. **Z''' (2023) adjusts for this** by rebalancing weightings to better account for profitability "
835
+ "and market valuation. It provides a more relevant measure for industries where physical assets play a reduced role in financial stability."
836
+ )
837
+
838
+ # Interpretation
839
+ st.write("##### Interpretation")
840
+ st.write(
841
+ "**Z''' > 2.90**: Firm is financially stable, with a low probability of distress. \n"
842
+ "**1.50 ≤ Z''' ≤ 2.90**: 'Gray Area'—financial condition is uncertain. \n"
843
+ "**Z''' < 1.50**: Firm is in financial distress, with an elevated bankruptcy risk."
844
+ )
845
+
846
+ # Downsides / Limitations
847
+ st.write("##### Limitations")
848
+ st.write(
849
+ "- Developed for **service and tech firms**, but may not generalize well to capital-intensive industries.\n"
850
+ "- **Still based on historical financial data**, which may lag behind real-time market shifts.\n"
851
+ "- Market value component (X₄) **introduces volatility**, making results sensitive to stock price swings.\n"
852
+ "- Does not explicitly factor in **R&D investment or future revenue potential**, which are key in tech sectors."
853
+ )
854
+
855
+
856
+ triple_partial_names = [
857
+ ('t_part1', "3.25 × (WC/TA)", 'blue'),
858
+ ('t_part2', "2.85 × (RE/TA)", 'orange'),
859
+ ('t_part3', "4.15 × (EBIT/TA)", 'green'),
860
+ ('t_part4', "0.95 × (MktCap/TL)", 'red'),
861
+ ]
862
+ triple_zones = {
863
+ 'distress': 1.5,
864
+ 'gray_lower': 1.5,
865
+ 'gray_upper': 2.9,
866
+ 'safe': 2.9
867
+ }
868
+ plot_zscore_figure(
869
+ df=z_df,
870
+ date_col='date',
871
+ partial_cols=['t_part1','t_part2','t_part3','t_part4'],
872
+ total_col='z_triple_prime_service',
873
+ partial_names=triple_partial_names,
874
+ total_name="Z''' (Total)",
875
+ title_text="Z''' (2023, Service/Tech)",
876
+ zones=triple_zones,
877
+ ticker=ticker
878
+ )
879
+
880
+ with st.expander("Interpretation", expanded=False):
881
+ latest_z_triple = z_df['z_triple_prime_service'].iloc[-1]
882
+ first_val_t = z_df['z_triple_prime_service'].iloc[0]
883
+ if latest_z_triple > first_val_t:
884
+ trend_t = "increased"
885
+ elif latest_z_triple < first_val_t:
886
+ trend_t = "decreased"
887
+ else:
888
+ trend_t = "remained the same"
889
+
890
+ min_val_t = z_df['z_triple_prime_service'].min()
891
+ max_val_t = z_df['z_triple_prime_service'].max()
892
+ min_idx_t = z_df['z_triple_prime_service'].idxmin()
893
+ max_idx_t = z_df['z_triple_prime_service'].idxmax()
894
+ min_year_t = z_df.loc[min_idx_t, 'date'].year
895
+ max_year_t = z_df.loc[max_idx_t, 'date'].year
896
+
897
+ st.write("**--- Rich Interpretation for Z''' (Service/Tech) ---**")
898
+ st.write(f"Across the selected years, this Z-Score has {trend_t}.")
899
+ st.write(f"Minimum was {min_val_t:.2f} in {min_year_t}.")
900
+ st.write(f"Maximum was {max_val_t:.2f} in {max_year_t}.")
901
+
902
+ if latest_z_triple < triple_zones['distress']:
903
+ st.write("Current reading is in the distress zone. This indicates possible financial strain.")
904
+ elif latest_z_triple < triple_zones['gray_upper']:
905
+ st.write("Current reading is in the gray range. This means uncertain financial signals.")
906
+ else:
907
+ st.write("Current reading is in the safe zone. Financial health looks positive.")
908
+
909
+ latest_data_triple = z_df.iloc[-1]
910
+ triple_partials = {
911
+ 't_part1': latest_data_triple['t_part1'],
912
+ 't_part2': latest_data_triple['t_part2'],
913
+ 't_part3': latest_data_triple['t_part3'],
914
+ 't_part4': latest_data_triple['t_part4']
915
+ }
916
+ key_driver_triple = max(triple_partials, key=triple_partials.get)
917
+ if key_driver_triple == 't_part1':
918
+ st.write("Working Capital stands out as the main influence, emphasizing the company's short-term financial flexibility.")
919
+ st.write("A strong working capital contribution indicates a well-managed balance between current assets and liabilities, reducing liquidity risk.")
920
+ st.write("However, if too much capital is tied up in cash or inventory, it may suggest inefficiency in deploying assets for growth.")
921
+ elif key_driver_triple == 't_part2':
922
+ st.write("Retained Earnings plays the biggest role, highlighting the company's ability to reinvest past profits into future growth.")
923
+ st.write("A high retained earnings contribution suggests the company has a history of profitability and financial discipline, reducing reliance on external financing.")
924
+ st.write("However, if retained earnings dominate, it raises questions about whether capital is allocated effectively.")
925
+ elif key_driver_triple == 't_part3':
926
+ st.write("EBIT is the dominant factor, meaning the company’s operational efficiency is the primary driver of financial stability.")
927
+ st.write("A strong EBIT contribution indicates that core business activities are profitable. This supports the firm's financial health.")
928
+ st.write("But if EBIT is the largest driver, the company may be heavily dependent on margins, making it vulnerable to cost pressures.")
929
+ elif key_driver_triple == 't_part4':
930
+ st.write("Market Cap vs. Liabilities leads, suggesting that investor confidence and market valuation are key drivers of financial stability.")
931
+ st.write("A high contribution from this metric means the company’s equity is valued significantly higher than its liabilities.")
932
+ st.write("Reliance on market sentiment can expose the firm to stock price volatility.")
933
+
934
+ st.write("If the goal is to reduce this Z-Score, rising debt or shrinking EBIT will push it downward.")
935
+ st.write("Lower liquidity or lower equity value can also move the score lower.")
936
+
937
+ # Show raw data
938
+ with st.expander("Raw Altman Z Data", expanded=False):
939
+ st.dataframe(z_df)
940
+
941
+ ###############################################################################
942
+ # PAGE 2: DISTANCE TO DEFAULT
943
+ ###############################################################################
944
+ if page == "Distance-to-Default":
945
+
946
+ dtd_df = st.session_state.get("dtd_results", None)
947
+ if dtd_df is None or dtd_df.empty:
948
+ st.info("Select Page, input the paramters and click 'Run Analysis' on the sidebar.")
949
+ else:
950
+ valid_df = dtd_df.dropna(subset=["chosenDebt", "A_star", "sigmaA_star", "DTD"])
951
+ if valid_df.empty:
952
+ st.warning("No valid rows for Merton calculations in the chosen range.")
953
+ else:
954
+ with st.expander("Methodology: Merton Distance-to-Default (DTD)", expanded=False):
955
+ st.write(
956
+ "The **Distance-to-Default (DTD)** is a structural credit risk model based on Merton's (1974) option pricing theory. "
957
+ "It estimates the likelihood that a firm's asset value will fall below its debt obligations, triggering default."
958
+ )
959
+
960
+ # Merton Model Core Equations
961
+ st.latex(r"V_t = S_t + D_t")
962
+ st.write("**Firm Value (Vₜ)**: The total market value of the firm, consisting of equity (Sₜ) and debt (Dₜ).")
963
+
964
+ st.latex(r"\sigma_V = \frac{S_t}{V_t} \sigma_S")
965
+ st.write("**Asset Volatility (σ_V)**: Derived from the observed equity volatility (σ_S), using the Merton model.")
966
+
967
+ st.latex(r"d_1 = \frac{\ln{\left(\frac{V_t}{D_t}\right)} + \left( r - \frac{1}{2} \sigma_V^2 \right)T}{\sigma_V \sqrt{T}}")
968
+ st.latex(r"d_2 = d_1 - \sigma_V \sqrt{T}")
969
+ st.write("**Merton's d₁ and d₂**: Standardized metrics capturing the firm's asset dynamics relative to debt.")
970
+
971
+ st.latex(r"\text{DTD} = d_2 = \frac{\ln{\left(\frac{V_t}{D_t}\right)} + \left( r - \frac{1}{2} \sigma_V^2 \right)T}{\sigma_V \sqrt{T}}")
972
+ st.write("**Distance-to-Default (DTD)**: Measures how many standard deviations the firm's asset value is from the default threshold (Dₜ).")
973
+
974
+ # Academic Justification
975
+ st.write("##### Academic Justification")
976
+ st.write(
977
+ "Merton's model treats a firm's equity as a **call option** on its assets, where default occurs if asset value (Vₜ) "
978
+ "falls below debt (Dₜ) at time T. **DTD quantifies this probability** by measuring how far the firm is from this threshold, "
979
+ "adjusting for volatility. Studies show that **lower DTD values correlate with higher default probabilities**, making it "
980
+ "a key metric for credit risk analysis in corporate finance and banking."
981
+ )
982
+
983
+ # Interpretation
984
+ st.write("##### Interpretation")
985
+ st.write(
986
+ "**DTD > 2.0**: Low probability of default (strong financial health). \n"
987
+ "**1.0 ≤ DTD ≤ 2.0**: Moderate risk—firm is financially stable but should be monitored. \n"
988
+ "**DTD < 1.0**: High default risk—firm is approaching financial distress. \n"
989
+ "**DTD < 0.0**: Extreme risk—firm’s asset value is below its debt obligations."
990
+ )
991
+
992
+ # Downsides / Limitations
993
+ st.write("##### Limitations")
994
+ st.write(
995
+ "- **Assumes market efficiency**, meaning it relies heavily on accurate stock price movements.\n"
996
+ "- **Volatility estimates impact accuracy**, as market fluctuations can distort results.\n"
997
+ "- **Ignores liquidity constraints**—a firm may default due to cash flow problems, even if assets exceed liabilities.\n"
998
+ "- **Not designed for financial institutions**, where leverage and risk dynamics differ significantly.\n"
999
+ "- **Short-term focused**, making it less predictive for long-term financial health."
1000
+ )
1001
+
1002
+ #st.subheader("Annual Distance to Default (Merton Model)")
1003
+
1004
+ # Chart 1
1005
+ fig_time = go.Figure()
1006
+ fig_time.add_trace(
1007
+ go.Scatter(
1008
+ x=dtd_df["year"],
1009
+ y=dtd_df["DTD"],
1010
+ mode="lines+markers",
1011
+ name="Distance to Default"
1012
+ )
1013
+ )
1014
+ fig_time.update_layout(
1015
+ title=f"{ticker} Annual Distance to Default (Merton Model)",
1016
+ title_font=dict(size=26, color="white"),
1017
+ xaxis_title="Year",
1018
+ yaxis_title="Distance to Default (d2)",
1019
+ template="plotly_dark",
1020
+ paper_bgcolor="#0e1117",
1021
+ plot_bgcolor="#0e1117",
1022
+ xaxis=dict(
1023
+ showgrid=True,
1024
+ gridcolor="rgba(255, 255, 255, 0.1)"
1025
+ ),
1026
+ yaxis=dict(
1027
+ showgrid=True,
1028
+ gridcolor="rgba(255, 255, 255, 0.1)"
1029
+ )
1030
+ )
1031
+ st.plotly_chart(fig_time, use_container_width=True)
1032
+
1033
+ # --- Dynamic Interpretation for Chart 1 (Exact user code) ---
1034
+ with st.expander("Interpretation", expanded=False):
1035
+ dtd_series = dtd_df["DTD"].dropna()
1036
+ if len(dtd_series) > 1:
1037
+ first_val = dtd_series.iloc[0]
1038
+ last_val = dtd_series.iloc[-1]
1039
+ trend_str = "increased" if last_val > first_val else "decreased" if last_val < first_val else "remained stable"
1040
+ min_val = dtd_series.min()
1041
+ max_val = dtd_series.max()
1042
+ min_yr = dtd_df.loc[dtd_series.idxmin(), "year"]
1043
+ max_yr = dtd_df.loc[dtd_series.idxmax(), "year"]
1044
+
1045
+ st.write("Dynamic Interpretation for Annual Distance to Default:")
1046
+ st.write(f"**1) The time series shows that DTD has {trend_str} from {first_val:.2f} to {last_val:.2f}.**")
1047
+ st.write(f"**2) The lowest DTD was {min_val:.2f} in {min_yr}, and the highest was {max_val:.2f} in {max_yr}.**")
1048
+ if last_val < 0:
1049
+ st.write(" Current DTD is negative. The firm may be in distress territory, implying higher default risk.")
1050
+ elif last_val < 1:
1051
+ st.write(" Current DTD is below 1. This suggests caution, as default risk is higher than comfortable.")
1052
+ elif last_val < 2:
1053
+ st.write(" Current DTD is between 1 and 2. This is moderate territory. Risk is not extreme but warrants monitoring.")
1054
+ else:
1055
+ st.write(" Current DTD is above 2. This generally indicates safer conditions and lower default probability.")
1056
+ else:
1057
+ st.write("DTD time series is insufficient for a dynamic interpretation.")
1058
+
1059
+ # Chart 2: Distribution
1060
+ #st.subheader("Distribution of Simulated Distance-to-Default")
1061
+ latest_data = valid_df.iloc[-1]
1062
+ A_star = latest_data["A_star"]
1063
+ sigmaA_star = latest_data["sigmaA_star"]
1064
+ D = latest_data["chosenDebt"]
1065
+ r = latest_data["rf"]
1066
+ T = 1.0
1067
+ dtd_value = latest_data["DTD"]
1068
+
1069
+ num_simulations = 10000
1070
+ A_simulated = np.random.normal(A_star, sigmaA_star * A_star, num_simulations)
1071
+ A_simulated = np.where(A_simulated > 0, A_simulated, np.nan)
1072
+ DTD_simulated = (np.log(A_simulated / D) + (r - 0.5 * sigmaA_star**2) * T) / (sigmaA_star * np.sqrt(T))
1073
+ DTD_simulated = DTD_simulated[~np.isnan(DTD_simulated)]
1074
+
1075
+ fig_hist = ff.create_distplot(
1076
+ [DTD_simulated],
1077
+ ["Simulated DTD"],
1078
+ show_hist=True,
1079
+ show_rug=False,
1080
+ curve_type='kde'
1081
+ )
1082
+ fig_hist.add_vline(
1083
+ x=dtd_value,
1084
+ line=dict(color="red", dash="dash"),
1085
+ annotation_text=f"Actual DTD = {dtd_value:.2f}"
1086
+ )
1087
+ fig_hist.update_layout(
1088
+ title=f"{ticker} Distribution of Simulated Distance-to-Default (DTD)",
1089
+ title_font=dict(size=26, color="white"),
1090
+ xaxis_title="Distance-to-Default (DTD)",
1091
+ yaxis_title="Frequency",
1092
+ template="plotly_dark",
1093
+ paper_bgcolor="#0e1117",
1094
+ plot_bgcolor="#0e1117",
1095
+ xaxis=dict(
1096
+ showgrid=True,
1097
+ gridcolor="rgba(255, 255, 255, 0.1)"
1098
+ ),
1099
+ yaxis=dict(
1100
+ showgrid=True,
1101
+ gridcolor="rgba(255, 255, 255, 0.1)"
1102
+ )
1103
+ )
1104
+ st.plotly_chart(fig_hist, use_container_width=True)
1105
+
1106
+ # --- Dynamic Interpretation for Chart 2 (Exact user code) ---
1107
+ with st.expander("Interpretation", expanded=False):
1108
+ mean_sim = np.mean(DTD_simulated)
1109
+ median_sim = np.median(DTD_simulated)
1110
+ st.write("\n--- Dynamic Interpretation for DTD Distribution ---")
1111
+ st.write(f"**1) The mean simulated Distance-to-Default (DTD) is {mean_sim:.2f}, while the median is {median_sim:.2f}.**")
1112
+ if mean_sim < 0:
1113
+ st.write(" On average, the simulations suggest the firm is in distress. A negative mean DTD implies that, in many scenarios, asset value falls below debt obligations.")
1114
+ st.write(" This significantly raises default risk, indicating a high probability of financial distress under typical market conditions.")
1115
+ elif mean_sim < 1:
1116
+ st.write(" A large portion of simulations yield a DTD below 1, signaling heightened risk. The firm’s financial cushion against default is thin.")
1117
+ st.write(" Companies in this range often face higher borrowing costs and investor skepticism, as they are perceived as more vulnerable to downturns.")
1118
+ elif mean_sim < 2:
1119
+ st.write(" The majority of simulations fall between 1 and 2, meaning the firm is not in immediate danger but isn’t fully secure either.")
1120
+ st.write(" This suggests moderate financial health. While not at crisis levels, management should remain cautious about leverage and volatility.")
1121
+ else:
1122
+ st.write(" The distribution is mostly above 2, implying that, under most scenarios, the firm maintains a strong buffer against default.")
1123
+ st.write(" Companies in this range generally enjoy greater financial stability, better credit ratings, and lower risk premiums.")
1124
+
1125
+ if dtd_value < mean_sim:
1126
+ st.write(f"**2) The actual DTD ({dtd_value:.2f}) is below the simulation average ({mean_sim:.2f}).**")
1127
+ st.write(" This suggests that the real-world financial position of the company is weaker than the average simulated outcome.")
1128
+ st.write(" It may imply that recent market conditions or company-specific factors have increased risk beyond what the model predicts.")
1129
+ st.write(" Management might need to reinforce liquidity or reassess capital structure to avoid sliding into higher-risk territory.")
1130
+ else:
1131
+ st.write(f"2) The actual DTD ({dtd_value:.2f}) is above the simulation average ({mean_sim:.2f}).")
1132
+ st.write(" This is a positive signal, suggesting that real-world financial conditions are better than the typical simulated scenario.")
1133
+ st.write(" The firm may have a stronger-than-expected balance sheet or be benefiting from favorable market conditions.")
1134
+ st.write(" While this is reassuring, it is important to monitor whether this stability is due to structural financial strength or short-term market factors.")
1135
+
1136
+ # Chart 3: Sensitivity of DTD to Asset Value
1137
+ #st.subheader("Sensitivity of DTD to Asset Value")
1138
+ asset_range = np.linspace(D, 1.1 * A_star, 200)
1139
+ dtd_asset = (np.log(asset_range / D) + (r - 0.5 * sigmaA_star**2) * T) / (sigmaA_star * np.sqrt(T))
1140
+
1141
+ fig_asset = go.Figure()
1142
+ fig_asset.add_trace(
1143
+ go.Scatter(
1144
+ x=asset_range,
1145
+ y=dtd_asset,
1146
+ mode='lines',
1147
+ name="DTD vs. Asset Value",
1148
+ line=dict(color="blue")
1149
+ )
1150
+ )
1151
+ fig_asset.add_vline(
1152
+ x=A_star,
1153
+ line=dict(color="red", dash="dash"),
1154
+ annotation_text=f"Estimated A = {A_star:,.2f}"
1155
+ )
1156
+ fig_asset.update_layout(
1157
+ title=f"{ticker} Sensitivity of DTD to Variation in Asset Value",
1158
+ title_font=dict(size=26, color="white"),
1159
+ xaxis_title="Asset Value (A)",
1160
+ yaxis_title="Distance-to-Default (d2)",
1161
+ xaxis_type="log",
1162
+ template="plotly_dark",
1163
+ paper_bgcolor="#0e1117",
1164
+ plot_bgcolor="#0e1117",
1165
+ xaxis=dict(
1166
+ showgrid=True,
1167
+ gridcolor="rgba(255, 255, 255, 0.1)"
1168
+ ),
1169
+ yaxis=dict(
1170
+ showgrid=True,
1171
+ gridcolor="rgba(255, 255, 255, 0.1)"
1172
+ )
1173
+ )
1174
+ st.plotly_chart(fig_asset, use_container_width=True)
1175
+
1176
+ # --- Dynamic Interpretation for Chart 3 (Exact user code) ---
1177
+ with st.expander("Interpretation", expanded=False):
1178
+ dtd_lowA = dtd_asset[0]
1179
+ dtd_highA = dtd_asset[-1]
1180
+ st.write("\nDynamic Interpretation for Asset Value Sensitivity:")
1181
+ st.write(f"**1) At the lower bound (A = {asset_range[0]:,.2f}), DTD is {dtd_lowA:.2f}.**")
1182
+ st.write(f"**2) At the higher bound (A = {asset_range[-1]:,.2f}), DTD rises to {dtd_highA:.2f}.**")
1183
+ if dtd_highA > 2:
1184
+ st.write(" If asset value grows, the firm gains a comfortable buffer against default.")
1185
+ else:
1186
+ st.write(" Even at higher asset values, default risk remains moderate. Growth alone may not guarantee safety.")
1187
+
1188
+ # Chart 4: Sensitivity of DTD to Debt Variation
1189
+ #st.subheader("Sensitivity of DTD to Debt Variation")
1190
+ debt_range = np.linspace(0.1 * D, 1.2 * A_star, 300)
1191
+ dtd_debt = (np.log(A_star / debt_range) + (r - 0.5 * sigmaA_star**2) * T) / (sigmaA_star * np.sqrt(T))
1192
+
1193
+ fig_debt = go.Figure()
1194
+ fig_debt.add_trace(
1195
+ go.Scatter(
1196
+ x=debt_range,
1197
+ y=dtd_debt,
1198
+ mode='lines',
1199
+ name="DTD vs. Debt",
1200
+ line=dict(color="green")
1201
+ )
1202
+ )
1203
+ fig_debt.add_vline(
1204
+ x=D,
1205
+ line=dict(color="red", dash="dash"),
1206
+ annotation_text=f"Estimated D = {D:,.2f}"
1207
+ )
1208
+ fig_debt.update_layout(
1209
+ title=f"{ticker} Sensitivity of DTD to Variation in Debt (Extended Range)",
1210
+ title_font=dict(size=26, color="white"),
1211
+ xaxis_title="Debt (D)",
1212
+ yaxis_title="Distance-to-Default (d2)",
1213
+ xaxis_type="log",
1214
+ template="plotly_dark",
1215
+ paper_bgcolor="#0e1117",
1216
+ plot_bgcolor="#0e1117",
1217
+ xaxis=dict(
1218
+ showgrid=True,
1219
+ gridcolor="rgba(255, 255, 255, 0.1)"
1220
+ ),
1221
+ yaxis=dict(
1222
+ showgrid=True,
1223
+ gridcolor="rgba(255, 255, 255, 0.1)"
1224
+ )
1225
+ )
1226
+ st.plotly_chart(fig_debt, use_container_width=True)
1227
+
1228
+ # --- Dynamic Interpretation for Chart 4 (Exact user code) ---
1229
+ with st.expander("Interpretation", expanded=False):
1230
+ dtd_lowD = dtd_debt[0]
1231
+ dtd_highD = dtd_debt[-1]
1232
+ st.write("\n--- Dynamic Interpretation for Debt Variation ---")
1233
+ st.write(f"**1) At lower debt levels (D ≈ {debt_range[0]:,.2f}), the estimated Distance-to-Default (DTD) is {dtd_lowD:.2f}.**")
1234
+ st.write(f"**2) At higher debt levels (D ≈ {debt_range[-1]:,.2f}), the estimated DTD drops to {dtd_highD:.2f}.**")
1235
+
1236
+ if dtd_lowD > 2:
1237
+ st.write(" With lower debt, the firm has a strong financial cushion. A DTD above 2 typically indicates low default risk.")
1238
+ st.write(" This suggests the company could sustain economic downturns or earnings declines without significantly increasing its probability of distress.")
1239
+ st.write(" In this range, the firm may enjoy better credit ratings, lower borrowing costs, and greater investor confidence.")
1240
+ elif 1 < dtd_lowD <= 2:
1241
+ st.write(" Even with reduced debt, the firm remains in a moderate risk zone. While the probability of default is not alarming, it isn't fully secure.")
1242
+ st.write(" This suggests that other financial pressures—such as earnings volatility or low asset returns—might be limiting the risk buffer.")
1243
+ st.write(" Maintaining a balanced capital structure with prudent debt management will be key to ensuring financial stability.")
1244
+ else:
1245
+ st.write(" Despite lowering debt, the firm remains in a high-risk category. This indicates that other financial weaknesses, such as low asset returns or high volatility, are still dominant.")
1246
+ st.write(" The company may need a more aggressive strategy to strengthen its financial position, such as improving earnings stability or reducing operational risks.")
1247
+
1248
+ if dtd_highD < 0:
1249
+ st.write(" At significantly higher debt levels, the model suggests a **negative DTD**, which signals extreme financial distress.")
1250
+ st.write(" This implies that, under this scenario, the company's total asset value would likely fall below its debt obligations.")
1251
+ st.write(" If this situation were to materialize, the company would be seen as highly vulnerable, potentially leading to credit downgrades or refinancing difficulties.")
1252
+ elif 0 <= dtd_highD < 1:
1253
+ st.write(" With higher debt, DTD drops to below 1, meaning the firm is dangerously close to default.")
1254
+ st.write(" A DTD below 1 indicates that even small negative shocks to asset value could push the firm into financial distress.")
1255
+ st.write(" This could lead to increased borrowing costs, investor concerns, and potential restrictions on raising further capital.")
1256
+ elif 1 <= dtd_highD < 2:
1257
+ st.write(" The firm’s risk profile worsens with higher debt, but it remains in the moderate zone. The probability of distress increases but is not immediately alarming.")
1258
+ st.write(" Companies in this range often need to manage debt maturities carefully and ensure steady cash flow generation to avoid further deterioration.")
1259
+ else:
1260
+ st.write(" Even at a higher debt level, the firm maintains a strong buffer (DTD > 2).")
1261
+ st.write(" This suggests the company has **enough asset value or earnings strength to comfortably manage the additional leverage**.")
1262
+ st.write(" However, increasing debt too aggressively, even in a safe zone, could reduce financial flexibility in downturns.")
1263
+
1264
+ # Chart 5: Asset Value vs. Debt
1265
+ #st.subheader("Asset Value vs. Default Point (Debt)")
1266
+ fig_bar = go.Figure()
1267
+ fig_bar.add_trace(
1268
+ go.Bar(
1269
+ x=["Asset Value (A)", "Debt (D)"],
1270
+ y=[A_star, D],
1271
+ marker=dict(color=["blue", "orange"])
1272
+ )
1273
+ )
1274
+ fig_bar.update_layout(
1275
+ title=f"{ticker} Asset Value vs. Default Point",
1276
+ title_font=dict(size=26, color="white"),
1277
+ yaxis_title="Value (USD)",
1278
+ template="plotly_dark",
1279
+ paper_bgcolor="#0e1117",
1280
+ plot_bgcolor="#0e1117",
1281
+ xaxis=dict(
1282
+ showgrid=True,
1283
+ gridcolor="rgba(255, 255, 255, 0.1)"
1284
+ ),
1285
+ yaxis=dict(
1286
+ showgrid=True,
1287
+ gridcolor="rgba(255, 255, 255, 0.1)"
1288
+ )
1289
+ )
1290
+ st.plotly_chart(fig_bar, use_container_width=True)
1291
+
1292
+ # --- Dynamic Interpretation for Chart 5 (Exact user code) ---
1293
+ with st.expander("Interpretation", expanded=False):
1294
+ st.write("\n--- Dynamic Interpretation for Asset Value vs. Debt ---")
1295
+ if A_star > D:
1296
+ st.write(f"**1) The estimated asset value ({A_star:,.2f}) exceeds total debt ({D:,.2f}), providing a financial buffer.**")
1297
+ asset_debt_ratio = A_star / D
1298
+ if asset_debt_ratio > 2:
1299
+ st.write(" The asset-to-debt ratio is above 2, meaning the firm holds **more than double the assets compared to its debt obligations**.")
1300
+ st.write(" This implies a highly secure financial position, with a strong ability to absorb economic downturns or revenue declines.")
1301
+ elif 1.5 <= asset_debt_ratio <= 2:
1302
+ st.write(" The asset-to-debt ratio is between 1.5 and 2, which is considered **moderately strong**.")
1303
+ st.write(" While there is a solid financial cushion, **prudent debt management is still necessary** to maintain stability.")
1304
+ else:
1305
+ st.write(" The asset-to-debt ratio is between 1 and 1.5, meaning the firm has a **narrower but still positive buffer.**")
1306
+ st.write(" This level is acceptable, but **a small decline in asset value could quickly increase financial risk.**")
1307
+ else:
1308
+ st.write(f"1) The estimated asset value ({A_star:,.2f}) is **less than or close to total debt** ({D:,.2f}).")
1309
+ st.write(" This signals **a limited financial cushion**, increasing the probability of distress in unfavorable conditions.")
1310
+ if A_star < D:
1311
+ st.write(" **Warning:** The company’s total assets are lower than its total debt.")
1312
+ st.write(" This implies that if the company were to liquidate its assets today, it would still **not be able to fully cover its obligations**.")
1313
+ st.write(" Such a position increases the likelihood of credit downgrades and difficulty in securing additional financing.")
1314
+ elif A_star / D < 1.1:
1315
+ st.write(" The asset buffer is extremely thin. A minor shock in earnings or asset valuation could put the firm in distress.")
1316
+ st.write(" The company should consider **reducing leverage or improving asset utilization** to reinforce financial stability.")
1317
+
1318
+ gap = A_star - D
1319
+ if gap > 0.5 * D:
1320
+ st.write("**2) The firm has a **comfortable margin** between assets and debt. Even with some decline in asset value, financial stability is not immediately at risk.**")
1321
+ elif 0.2 * D < gap <= 0.5 * D:
1322
+ st.write("2) The firm has **a moderate cushion**, but there is some vulnerability to financial shocks.")
1323
+ st.write(" If debt levels increase or asset values decline, risk could rise quickly.")
1324
+ else:
1325
+ st.write("2) The asset buffer is **very narrow**, making the firm susceptible to external risks such as declining revenues, rising interest rates, or asset write-downs.")
1326
+ st.write(" A **small misstep in financial strategy could significantly increase default probability.**")
1327
+
1328
+ with st.expander("Raw Distance-to-Default Data", expanded=False):
1329
+ st.dataframe(dtd_df)
1330
+
1331
+
1332
+ # Hide default Streamlit style
1333
+ st.markdown(
1334
+ """
1335
+ <style>
1336
+ #MainMenu {visibility: hidden;}
1337
+ footer {visibility: hidden;}
1338
+ </style>
1339
+ """,
1340
+ unsafe_allow_html=True
1341
+ )