AdithyaSNair commited on
Commit
ce04c1a
·
verified ·
1 Parent(s): d6a0cb3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +160 -725
app.py CHANGED
@@ -1,5 +1,3 @@
1
- # app.py
2
-
3
  import streamlit as st
4
  import requests
5
  from langchain_groq import ChatGroq
@@ -14,6 +12,7 @@ from streamlit_option_menu import option_menu
14
  import fitz # PyMuPDF
15
  from bs4 import BeautifulSoup
16
 
 
17
  GROQ_API_KEY = st.secrets["GROQ_API_KEY"]
18
  RAPIDAPI_KEY = st.secrets["RAPIDAPI_KEY"]
19
  YOUTUBE_API_KEY = st.secrets["YOUTUBE_API_KEY"]
@@ -203,49 +202,6 @@ def suggest_keywords(resume_text, job_description=None):
203
  st.error(f"Error suggesting keywords: {e}")
204
  return []
205
 
206
- @st.cache_data(ttl=3600)
207
- def get_job_recommendations(job_title, location="India"):
208
- """
209
- Fetches salary estimates using the Job Salary Data API based on the job title and location.
210
- """
211
- url = "https://job-salary-data.p.rapidapi.com/job-salary"
212
- querystring = {
213
- "job_title": job_title.strip(),
214
- "location": location.strip(),
215
- "radius": "100" # Adjust radius as needed
216
- }
217
-
218
- headers = {
219
- "x-rapidapi-key": RAPIDAPI_KEY, # Securely access the API key
220
- "x-rapidapi-host": "job-salary-data.p.rapidapi.com"
221
- }
222
-
223
- try:
224
- response = requests.get(url, headers=headers, params=querystring)
225
- response.raise_for_status()
226
- salary_data = response.json()
227
-
228
- # Adjust the keys based on the API's response structure
229
- min_salary = salary_data.get("min_salary")
230
- avg_salary = salary_data.get("avg_salary")
231
- max_salary = salary_data.get("max_salary")
232
-
233
- if not all([min_salary, avg_salary, max_salary]):
234
- st.error("Incomplete salary data received from the API.")
235
- return {}
236
-
237
- return {
238
- "min_salary": min_salary,
239
- "avg_salary": avg_salary,
240
- "max_salary": max_salary
241
- }
242
- except requests.exceptions.HTTPError as http_err:
243
- st.error(f"HTTP error occurred: {http_err}")
244
- return {}
245
- except Exception as err:
246
- st.error(f"An error occurred: {err}")
247
- return {}
248
-
249
  def create_skill_distribution_chart(skills):
250
  """
251
  Creates a bar chart showing the distribution of skills.
@@ -394,94 +350,110 @@ def fetch_muse_jobs_api(job_title, location=None, category=None, max_results=50)
394
 
395
  # Adzuna API Integration
396
  @st.cache_data(ttl=86400) # Cache results for 1 day
397
- def fetch_adzuna_jobs_api(job_title, location="us", category=None, max_results=50):
398
  """
399
  Fetches job listings from Adzuna API based on user preferences.
400
 
401
  Args:
402
- job_title (str): The job title to search for (e.g., "Front end developer").
403
- location (str, optional): The country code for the job location (e.g., "us" for USA). Defaults to "us".
404
  category (str, optional): The job category. Defaults to None.
405
  max_results (int, optional): Maximum number of jobs to fetch. Defaults to 50.
406
 
407
  Returns:
408
  list: A list of job dictionaries.
409
  """
410
- # Mapping common location inputs to Adzuna country codes
411
- location_mapping = {
412
- "usa": "us",
413
- "united states": "us",
414
- "us": "us",
415
- "india": "in",
416
- "uk": "gb",
417
- "united kingdom": "gb",
418
- "canada": "ca",
419
- # Add more mappings as needed
420
- }
421
-
422
- # Normalize and map the location input
423
- normalized_location = location.strip().lower()
424
- country_code = location_mapping.get(normalized_location, normalized_location)
425
-
426
- # Construct the API endpoint
427
- base_url = f"https://api.adzuna.com/v1/api/jobs/{country_code}/search/1"
428
-
429
- # Prepare query parameters
430
  params = {
431
- "app_id": st.secrets["ADZUNA_APP_ID"], # Ensure ADZUNA_APP_ID is correctly set in secrets.toml
432
- "app_key": st.secrets["ADZUNA_APP_KEY"], # Ensure ADZUNA_APP_KEY is correctly set in secrets.toml
433
- "what": job_title.strip(),
434
  "results_per_page": max_results,
435
  "content-type": "application/json"
436
  }
437
-
438
  if category:
439
  params["category"] = category
440
-
441
  try:
442
  response = requests.get(base_url, params=params)
443
- response.raise_for_status() # Raises HTTPError for bad responses (4xx or 5xx)
444
  jobs = response.json().get("results", [])
445
-
446
- if not jobs:
447
- st.info("ℹ️ No job listings found for the specified criteria.")
448
-
449
  return jobs
450
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
451
  except requests.exceptions.HTTPError as http_err:
452
- if response.status_code == 403:
453
- st.error("❌ Access Forbidden: Check your API credentials and permissions.")
 
 
454
  elif response.status_code == 404:
455
  st.error("❌ Resource Not Found: Verify the endpoint and parameters.")
456
  else:
457
  st.error(f"❌ HTTP error occurred: {http_err}")
458
  return []
459
-
460
  except requests.exceptions.RequestException as req_err:
461
  st.error(f"❌ Request Exception: {req_err}")
462
  return []
463
-
464
  except Exception as e:
465
  st.error(f"❌ An unexpected error occurred: {e}")
466
  return []
467
-
468
- def recommend_jobs(user_skills, user_preferences):
469
  """
470
- Recommends jobs based on user skills and preferences from Remotive API.
471
-
472
  Args:
473
  user_skills (list): List of user's skills.
474
  user_preferences (dict): User preferences like job title, location, category.
475
-
476
  Returns:
477
  list: Recommended job listings.
478
  """
479
  job_title = user_preferences.get("job_title", "")
480
- location = user_preferences.get("location")
481
  category = user_preferences.get("category")
482
-
483
- jobs = fetch_remotive_jobs_api(job_title, location, category)
484
-
485
  # Simple matching based on skills appearing in job description
486
  recommended_jobs = []
487
  for job in jobs:
@@ -489,74 +461,47 @@ def recommend_jobs(user_skills, user_preferences):
489
  match_score = sum(skill.lower() in job_description for skill in user_skills)
490
  if match_score > 0:
491
  recommended_jobs.append((match_score, job))
492
-
493
- # Sort jobs based on match_score
494
- recommended_jobs.sort(reverse=True, key=lambda x: x[0])
495
-
496
- # Return only the job dictionaries
497
- return [job for score, job in recommended_jobs[:10]] # Top 10 recommendations
498
 
499
- def recommend_muse_jobs(user_skills, user_preferences):
500
- """
501
- Recommends jobs from The Muse API based on user skills and preferences.
502
-
503
- Args:
504
- user_skills (list): List of user's skills.
505
- user_preferences (dict): User preferences like job title, location, category.
506
-
507
- Returns:
508
- list: Recommended job listings.
509
- """
510
- job_title = user_preferences.get("job_title", "")
511
- location = user_preferences.get("location")
512
- category = user_preferences.get("category")
513
-
514
- jobs = fetch_muse_jobs_api(job_title, location, category)
515
-
516
- # Simple matching based on skills appearing in job description
517
- recommended_jobs = []
518
- for job in jobs:
519
- job_description = job.get("contents", "").lower()
520
- match_score = sum(skill.lower() in job_description for skill in user_skills)
521
- if match_score > 0:
522
- recommended_jobs.append((match_score, job))
523
-
524
  # Sort jobs based on match_score
525
  recommended_jobs.sort(reverse=True, key=lambda x: x[0])
526
-
527
  # Return only the job dictionaries
528
  return [job for score, job in recommended_jobs[:10]] # Top 10 recommendations
529
 
530
- def recommend_adzuna_jobs(user_skills, user_preferences):
531
  """
532
- Recommends jobs from Adzuna API based on user skills and preferences.
533
-
534
  Args:
535
  user_skills (list): List of user's skills.
536
  user_preferences (dict): User preferences like job title, location, category.
537
-
538
  Returns:
539
  list: Recommended job listings.
540
  """
541
- job_title = user_preferences.get("job_title", "")
542
- location = user_preferences.get("location", "india")
543
- category = user_preferences.get("category")
544
-
545
- jobs = fetch_adzuna_jobs_api(job_title, location, category)
546
-
547
- # Simple matching based on skills appearing in job description
548
- recommended_jobs = []
549
- for job in jobs:
550
- job_description = job.get("description", "").lower()
551
- match_score = sum(skill.lower() in job_description for skill in user_skills)
552
- if match_score > 0:
553
- recommended_jobs.append((match_score, job))
554
-
555
- # Sort jobs based on match_score
556
- recommended_jobs.sort(reverse=True, key=lambda x: x[0])
557
-
558
- # Return only the job dictionaries
559
- return [job for score, job in recommended_jobs[:10]] # Top 10 recommendations
 
 
 
 
560
 
561
  # -------------------------------
562
  # BLS API Integration and Display
@@ -566,12 +511,12 @@ def recommend_adzuna_jobs(user_skills, user_preferences):
566
  def fetch_bls_data(series_ids, start_year=2020, end_year=datetime.now().year):
567
  """
568
  Fetches labor market data from the BLS API.
569
-
570
  Args:
571
  series_ids (list): List of BLS series IDs.
572
  start_year (int, optional): Start year for data. Defaults to 2020.
573
  end_year (int, optional): End year for data. Defaults to current year.
574
-
575
  Returns:
576
  dict: BLS data response.
577
  """
@@ -600,7 +545,7 @@ def fetch_bls_data(series_ids, start_year=2020, end_year=datetime.now().year):
600
  def display_bls_data(series_id, title):
601
  """
602
  Processes and displays BLS data with visualizations.
603
-
604
  Args:
605
  series_id (str): BLS series ID.
606
  title (str): Title for the visualization.
@@ -609,20 +554,20 @@ def display_bls_data(series_id, title):
609
  if not data:
610
  st.info("No data available.")
611
  return
612
-
613
  series_data = data.get("series", [])[0]
614
  series_title = series_data.get("title", title)
615
  observations = series_data.get("data", [])
616
-
617
  # Extract year and value
618
  years = [int(obs["year"]) for obs in observations]
619
  values = [float(obs["value"].replace(',', '')) for obs in observations]
620
-
621
  df = pd.DataFrame({
622
  "Year": years,
623
  "Value": values
624
  }).sort_values("Year")
625
-
626
  st.markdown(f"### {series_title}")
627
  fig = px.line(df, x="Year", y="Value", title=series_title, markers=True)
628
  st.plotly_chart(fig, use_container_width=True)
@@ -728,298 +673,68 @@ def generate_learning_path(career_goal, current_skills):
728
  {career_goal}
729
 
730
  **Current Skills:**
731
- {current_skills}
732
-
733
- **Learning Path:**
734
- """
735
-
736
- try:
737
- response = llm.invoke(prompt)
738
- learning_path = response.content.strip()
739
- return learning_path
740
- except Exception as e:
741
- st.error(f"Error generating learning path: {e}")
742
- return ""
743
-
744
- # -------------------------------
745
- # Visual Resume Analytics Functions
746
- # -------------------------------
747
-
748
- def create_skill_distribution_chart(skills):
749
- """
750
- Creates a bar chart showing the distribution of skills.
751
- """
752
- skill_counts = {}
753
- for skill in skills:
754
- skill_counts[skill] = skill_counts.get(skill, 0) + 1
755
- df = pd.DataFrame(list(skill_counts.items()), columns=['Skill', 'Count'])
756
- fig = px.bar(df, x='Skill', y='Count', title='Skill Distribution')
757
- return fig
758
-
759
- def create_experience_timeline(resume_text):
760
- """
761
- Creates an experience timeline from the resume text.
762
- """
763
- # Extract work experience details using Groq
764
- prompt = f"""
765
- From the following resume text, extract the job titles, companies, and durations of employment. Provide the information in a table format with columns: Job Title, Company, Duration (in years).
766
-
767
- Resume Text:
768
- {resume_text}
769
-
770
- Table:
771
- """
772
-
773
- try:
774
- response = llm.invoke(prompt)
775
- table_text = response.content.strip()
776
- # Parse the table_text to create a DataFrame
777
- data = []
778
- for line in table_text.split('\n'):
779
- if line.strip() and not line.lower().startswith("job title"):
780
- parts = line.split('|')
781
- if len(parts) == 3:
782
- job_title = parts[0].strip()
783
- company = parts[1].strip()
784
- duration = parts[2].strip()
785
- # Convert duration to a float representing years
786
- duration_years = parse_duration(duration)
787
- data.append({"Job Title": job_title, "Company": company, "Duration (years)": duration_years})
788
- df = pd.DataFrame(data)
789
- if not df.empty:
790
- # Create a cumulative duration for timeline
791
- df['Start Year'] = df['Duration (years)'].cumsum() - df['Duration (years)']
792
- df['End Year'] = df['Duration (years)'].cumsum()
793
- fig = px.timeline(df, x_start="Start Year", x_end="End Year", y="Job Title", color="Company", title="Experience Timeline")
794
- fig.update_yaxes(categoryorder="total ascending")
795
- return fig
796
- else:
797
- return None
798
- except Exception as e:
799
- st.error(f"Error creating experience timeline: {e}")
800
- return None
801
-
802
- def parse_duration(duration_str):
803
- """
804
- Parses duration strings like '2 years' or '6 months' into float years.
805
- """
806
- try:
807
- if 'year' in duration_str.lower():
808
- years = float(re.findall(r'\d+\.?\d*', duration_str)[0])
809
- return years
810
- elif 'month' in duration_str.lower():
811
- months = float(re.findall(r'\d+\.?\d*', duration_str)[0])
812
- return months / 12
813
- else:
814
- return 0
815
- except:
816
- return 0
817
-
818
- # -------------------------------
819
- # Job Recommendations and BLS Integration
820
- # -------------------------------
821
-
822
- # Remotive API Integration
823
- @st.cache_data(ttl=86400) # Cache results for 1 day
824
- def fetch_remotive_jobs_api(job_title, location=None, category=None, remote=True, max_results=50):
825
- """
826
- Fetches job listings from Remotive API based on user preferences.
827
-
828
- Args:
829
- job_title (str): The job title to search for.
830
- location (str, optional): The job location. Defaults to None.
831
- category (str, optional): The job category. Defaults to None.
832
- remote (bool, optional): Whether to fetch remote jobs. Defaults to True.
833
- max_results (int, optional): Maximum number of jobs to fetch. Defaults to 50.
834
-
835
- Returns:
836
- list: A list of job dictionaries.
837
- """
838
- base_url = "https://remotive.com/api/remote-jobs"
839
- params = {
840
- "search": job_title,
841
- "limit": max_results
842
- }
843
- if category:
844
- params["category"] = category
845
- try:
846
- response = requests.get(base_url, params=params)
847
- response.raise_for_status()
848
- jobs = response.json().get("jobs", [])
849
- if remote:
850
- # Filter for remote jobs if not already
851
- jobs = [job for job in jobs if job.get("candidate_required_location") == "Worldwide" or job.get("remote") == True]
852
- return jobs
853
- except requests.exceptions.RequestException as e:
854
- st.error(f"Error fetching jobs from Remotive: {e}")
855
- return []
856
-
857
- # The Muse API Integration
858
- @st.cache_data(ttl=86400) # Cache results for 1 day
859
- def fetch_muse_jobs_api(job_title, location=None, category=None, max_results=50):
860
- """
861
- Fetches job listings from The Muse API based on user preferences.
862
-
863
- Args:
864
- job_title (str): The job title to search for.
865
- location (str, optional): The job location. Defaults to None.
866
- category (str, optional): The job category. Defaults to None.
867
- max_results (int, optional): Maximum number of jobs to fetch. Defaults to 50.
868
-
869
- Returns:
870
- list: A list of job dictionaries.
871
- """
872
- base_url = "https://www.themuse.com/api/public/jobs"
873
- headers = {
874
- "Content-Type": "application/json"
875
- }
876
- params = {
877
- "page": 1,
878
- "per_page": max_results,
879
- "category": category,
880
- "location": location,
881
- "company": None # Can be extended based on needs
882
- }
883
- try:
884
- response = requests.get(base_url, params=params, headers=headers)
885
- response.raise_for_status()
886
- jobs = response.json().get("results", [])
887
- # Filter based on job title
888
- filtered_jobs = [job for job in jobs if job_title.lower() in job.get("name", "").lower()]
889
- return filtered_jobs
890
- except requests.exceptions.RequestException as e:
891
- st.error(f"Error fetching jobs from The Muse: {e}")
892
- return []
893
-
894
- # Adzuna API Integration
895
- @st.cache_data(ttl=86400) # Cache results for 1 day
896
- def fetch_adzuna_jobs_api(job_title, location="india", category=None, max_results=50):
897
- """
898
- Fetches job listings from Adzuna API based on user preferences.
899
-
900
- Args:
901
- job_title (str): The job title to search for.
902
- location (str, optional): The job location. Defaults to "india".
903
- category (str, optional): The job category. Defaults to None.
904
- max_results (int, optional): Maximum number of jobs to fetch. Defaults to 50.
905
-
906
- Returns:
907
- list: A list of job dictionaries.
908
- """
909
- base_url = f"https://api.adzuna.com/v1/api/jobs/{location}/search/1"
910
- params = {
911
- "app_id": ADZUNA_APP_ID,
912
- "app_key": ADZUNA_APP_KEY,
913
- "what": job_title,
914
- "results_per_page": max_results,
915
- "content-type": "application/json"
916
- }
917
- if category:
918
- params["category"] = category
919
- try:
920
- response = requests.get(base_url, params=params)
921
- response.raise_for_status()
922
- jobs = response.json().get("results", [])
923
- return jobs
924
- except requests.exceptions.RequestException as e:
925
- st.error(f"Error fetching jobs from Adzuna: {e}")
926
- return []
927
-
928
- def recommend_jobs(user_skills, user_preferences):
929
- """
930
- Recommends jobs based on user skills and preferences from Remotive API.
931
-
932
- Args:
933
- user_skills (list): List of user's skills.
934
- user_preferences (dict): User preferences like job title, location, category.
935
-
936
- Returns:
937
- list: Recommended job listings.
938
- """
939
- job_title = user_preferences.get("job_title", "")
940
- location = user_preferences.get("location")
941
- category = user_preferences.get("category")
942
-
943
- jobs = fetch_remotive_jobs_api(job_title, location, category)
944
-
945
- # Simple matching based on skills appearing in job description
946
- recommended_jobs = []
947
- for job in jobs:
948
- job_description = job.get("description", "").lower()
949
- match_score = sum(skill.lower() in job_description for skill in user_skills)
950
- if match_score > 0:
951
- recommended_jobs.append((match_score, job))
952
-
953
- # Sort jobs based on match_score
954
- recommended_jobs.sort(reverse=True, key=lambda x: x[0])
955
-
956
- # Return only the job dictionaries
957
- return [job for score, job in recommended_jobs[:10]] # Top 10 recommendations
958
 
959
- def recommend_muse_jobs(user_skills, user_preferences):
 
960
  """
961
- Recommends jobs from The Muse API based on user skills and preferences.
962
 
963
  Args:
964
- user_skills (list): List of user's skills.
965
- user_preferences (dict): User preferences like job title, location, category.
 
966
 
967
  Returns:
968
- list: Recommended job listings.
969
  """
970
- job_title = user_preferences.get("job_title", "")
971
- location = user_preferences.get("location")
972
- category = user_preferences.get("category")
973
-
974
- jobs = fetch_muse_jobs_api(job_title, location, category)
975
-
976
- # Simple matching based on skills appearing in job description
977
- recommended_jobs = []
978
- for job in jobs:
979
- job_description = job.get("contents", "").lower()
980
- match_score = sum(skill.lower() in job_description for skill in user_skills)
981
- if match_score > 0:
982
- recommended_jobs.append((match_score, job))
983
-
984
- # Sort jobs based on match_score
985
- recommended_jobs.sort(reverse=True, key=lambda x: x[0])
986
-
987
- # Return only the job dictionaries
988
- return [job for score, job in recommended_jobs[:10]] # Top 10 recommendations
989
 
990
- def recommend_adzuna_jobs(user_skills, user_preferences):
991
  """
992
- Recommends jobs from Adzuna API based on user skills and preferences.
993
 
994
  Args:
995
- user_skills (list): List of user's skills.
996
- user_preferences (dict): User preferences like job title, location, category.
997
-
998
- Returns:
999
- list: Recommended job listings.
1000
  """
1001
- job_title = user_preferences.get("job_title", "")
1002
- location = user_preferences.get("location", "india")
1003
- category = user_preferences.get("category")
1004
-
1005
- jobs = fetch_adzuna_jobs_api(job_title, location, category)
1006
-
1007
- # Simple matching based on skills appearing in job description
1008
- recommended_jobs = []
1009
- for job in jobs:
1010
- job_description = job.get("description", "").lower()
1011
- match_score = sum(skill.lower() in job_description for skill in user_skills)
1012
- if match_score > 0:
1013
- recommended_jobs.append((match_score, job))
1014
-
1015
- # Sort jobs based on match_score
1016
- recommended_jobs.sort(reverse=True, key=lambda x: x[0])
1017
-
1018
- # Return only the job dictionaries
1019
- return [job for score, job in recommended_jobs[:10]] # Top 10 recommendations
1020
 
1021
  # -------------------------------
1022
- # Labor Market Insights Module
1023
  # -------------------------------
1024
 
1025
  def labor_market_insights_module():
@@ -1049,8 +764,6 @@ def labor_market_insights_module():
1049
  - **Geographical Demand:** Some regions may have higher demand for certain roles, guiding your location preferences.
1050
  """)
1051
 
1052
- # Fetch and display more BLS data as needed
1053
-
1054
  # -------------------------------
1055
  # Page Functions
1056
  # -------------------------------
@@ -1403,7 +1116,7 @@ def job_recommendations_module():
1403
  if not job_title or not user_skills_input:
1404
  st.error("❌ Please enter both job title and your skills.")
1405
  return
1406
-
1407
  user_skills = [skill.strip() for skill in user_skills_input.split(",") if skill.strip()]
1408
  user_preferences = {
1409
  "job_title": job_title,
@@ -1412,35 +1125,22 @@ def job_recommendations_module():
1412
  }
1413
 
1414
  with st.spinner("🔄 Fetching job recommendations..."):
1415
- # Fetch recommendations from Remotive
1416
- remotive_jobs = recommend_jobs(user_skills, user_preferences)
1417
- # Fetch recommendations from The Muse
1418
- muse_jobs = recommend_muse_jobs(user_skills, user_preferences)
1419
- # Fetch recommendations from Adzuna
1420
- adzuna_jobs = recommend_adzuna_jobs(user_skills, user_preferences)
1421
-
1422
- # Combine all job listings
1423
- combined_jobs = remotive_jobs + muse_jobs + adzuna_jobs
1424
- # Remove duplicates based on job URL
1425
- unique_jobs = {}
1426
- for job in combined_jobs:
1427
- url = job.get("url") or job.get("redirect_url") or job.get("url_standard")
1428
- if url and url not in unique_jobs:
1429
- unique_jobs[url] = job
1430
-
1431
- if unique_jobs:
1432
  st.subheader("💼 Recommended Jobs:")
1433
- for idx, job in enumerate(unique_jobs.values(), 1):
1434
- job_title_display = job.get("title") or job.get("name")
1435
- company_display = job.get("company", {}).get("name") or job.get("company_name")
1436
- location_display = job.get("candidate_required_location") or job.get("location")
1437
- salary_display = f"${job.get('salary_min', 'N/A')} - ${job.get('salary_max', 'N/A')}" if job.get('salary_min') or job.get('salary_max') else "N/A"
 
1438
  job_url = job.get("url") or job.get("redirect_url") or job.get("url_standard")
1439
 
1440
  st.markdown(f"### {idx}. {job_title_display}")
1441
  st.markdown(f"**🏢 Company:** {company_display}")
1442
  st.markdown(f"**📍 Location:** {location_display}")
1443
- st.markdown(f"**💰 Salary:** {salary_display}")
1444
  st.markdown(f"**🔗 Job URL:** [Apply Here]({job_url})")
1445
  st.write("---")
1446
  else:
@@ -1565,65 +1265,6 @@ def networking_opportunities_module():
1565
  except Exception as e:
1566
  st.error(f"❌ Error fetching networking opportunities: {e}")
1567
 
1568
- def salary_estimation_module():
1569
- st.header("💵 Salary Estimation and Negotiation Tips")
1570
-
1571
- st.write("""
1572
- Understand the salary expectations for your desired roles and learn effective negotiation strategies.
1573
- """)
1574
-
1575
- # Create two columns for input fields
1576
- col1, col2 = st.columns(2)
1577
- with col1:
1578
- job_title = st.text_input("🔍 Enter the job title:")
1579
- with col2:
1580
- location = st.text_input("📍 Enter the location (e.g., New York, NY, USA):")
1581
-
1582
- if st.button("💰 Get Salary Estimate"):
1583
- if not job_title or not location:
1584
- st.error("❌ Please enter both job title and location.")
1585
- return
1586
- with st.spinner("🔄 Fetching salary data..."):
1587
- # Job Salary Data API Integration
1588
- salary_data = get_job_recommendations(job_title, location)
1589
- if salary_data:
1590
- min_salary = salary_data.get("min_salary")
1591
- avg_salary = salary_data.get("avg_salary")
1592
- max_salary = salary_data.get("max_salary")
1593
-
1594
- if min_salary and avg_salary and max_salary:
1595
- st.subheader("💲 Salary Estimate:")
1596
- st.write(f"**Minimum Salary:** ${min_salary:,}")
1597
- st.write(f"**Average Salary:** ${avg_salary:,}")
1598
- st.write(f"**Maximum Salary:** ${max_salary:,}")
1599
-
1600
- # Visualization
1601
- salary_df = pd.DataFrame({
1602
- "Salary Range": ["Minimum", "Average", "Maximum"],
1603
- "Amount": [min_salary, avg_salary, max_salary]
1604
- })
1605
-
1606
- fig = px.bar(salary_df, x="Salary Range", y="Amount",
1607
- title=f"Salary Estimates for {job_title} in {location}",
1608
- labels={"Amount": "Salary (USD)"},
1609
- text_auto=True)
1610
- st.plotly_chart(fig, use_container_width=True)
1611
- else:
1612
- st.error("❌ Salary data not available for the provided job title and location.")
1613
-
1614
- # Generate negotiation tips using Groq
1615
- tips_prompt = f"""
1616
- Provide a list of 5 effective tips for negotiating a salary for a {job_title} position in {location}.
1617
- """
1618
- try:
1619
- tips = llm.invoke(tips_prompt).content.strip()
1620
- st.subheader("📝 Negotiation Tips:")
1621
- st.write(tips)
1622
- except Exception as e:
1623
- st.error(f"❌ Error generating negotiation tips: {e}")
1624
- else:
1625
- st.error("❌ Failed to retrieve salary data.")
1626
-
1627
  def feedback_and_improvement_module():
1628
  st.header("🗣️ Feedback and Continuous Improvement")
1629
 
@@ -1825,209 +1466,6 @@ def help_page():
1825
  Navigate to the **Feedback and Continuous Improvement** section, fill out the form, and submit your feedback.
1826
  """)
1827
 
1828
- # -------------------------------
1829
- # YouTube Video Search and Embed Functions
1830
- # -------------------------------
1831
-
1832
- @st.cache_data(ttl=86400) # Cache results for 1 day
1833
- def search_youtube_videos(query, max_results=2, video_duration="long"):
1834
- """
1835
- Searches YouTube for videos matching the query and returns video URLs.
1836
-
1837
- Args:
1838
- query (str): Search query.
1839
- max_results (int, optional): Number of videos to return. Defaults to 2.
1840
- video_duration (str, optional): Duration filter ('any', 'short', 'medium', 'long'). Defaults to "long".
1841
-
1842
- Returns:
1843
- list: List of YouTube video URLs.
1844
- """
1845
- search_url = "https://www.googleapis.com/youtube/v3/search"
1846
- params = {
1847
- "part": "snippet",
1848
- "q": query,
1849
- "type": "video",
1850
- "maxResults": max_results,
1851
- "videoDuration": video_duration,
1852
- "key": YOUTUBE_API_KEY
1853
- }
1854
- try:
1855
- response = requests.get(search_url, params=params)
1856
- response.raise_for_status()
1857
- results = response.json().get("items", [])
1858
- video_urls = [f"https://www.youtube.com/watch?v={item['id']['videoId']}" for item in results]
1859
- return video_urls
1860
- except requests.exceptions.RequestException as e:
1861
- st.error(f"❌ Error fetching YouTube videos: {e}")
1862
- return []
1863
-
1864
- def embed_youtube_videos(video_urls, module_name):
1865
- """
1866
- Embeds YouTube videos in the Streamlit app.
1867
-
1868
- Args:
1869
- video_urls (list): List of YouTube video URLs.
1870
- module_name (str): Name of the module for context.
1871
- """
1872
- for url in video_urls:
1873
- st.video(url)
1874
-
1875
- def fetch_jsearch_estimated_salary(job_title, location, radius=100):
1876
- """
1877
- Fetches estimated salary data for a given job title and location using the JSearch API.
1878
-
1879
- Args:
1880
- job_title (str): The job title to search for (e.g., "NodeJS Developer").
1881
- location (str): The job location (e.g., "New York, NY, USA").
1882
- radius (int, optional): Search radius in miles. Defaults to 100.
1883
-
1884
- Returns:
1885
- dict: A dictionary containing minimum, average, and maximum salary estimates.
1886
- """
1887
- url = "https://jsearch.p.rapidapi.com/estimated-salary"
1888
-
1889
- # Encode job title and location to handle spaces and special characters
1890
- encoded_job_title = quote(job_title.strip())
1891
- encoded_location = quote(location.strip())
1892
-
1893
- querystring = {
1894
- "job_title": encoded_job_title,
1895
- "location": encoded_location,
1896
- "radius": str(radius)
1897
- }
1898
-
1899
- headers = {
1900
- "x-rapidapi-key": st.secrets["RAPIDAPI_KEY"], # Ensure RAPIDAPI_KEY is correctly set in secrets.toml
1901
- "x-rapidapi-host": "jsearch.p.rapidapi.com"
1902
- }
1903
-
1904
- try:
1905
- response = requests.get(url, headers=headers, params=querystring)
1906
- response.raise_for_status() # Raises HTTPError for bad responses (4xx or 5xx)
1907
- data = response.json()
1908
-
1909
- # Extract salary estimates
1910
- min_salary = data.get("min_salary")
1911
- avg_salary = data.get("avg_salary")
1912
- max_salary = data.get("max_salary")
1913
-
1914
- if not all([min_salary, avg_salary, max_salary]):
1915
- st.error("❌ Incomplete salary data received from the API.")
1916
- return {}
1917
-
1918
- return {
1919
- "min_salary": min_salary,
1920
- "avg_salary": avg_salary,
1921
- "max_salary": max_salary
1922
- }
1923
-
1924
- except requests.exceptions.HTTPError as http_err:
1925
- if response.status_code == 403:
1926
- st.error("❌ Access Forbidden: Check your API key and permissions.")
1927
- elif response.status_code == 404:
1928
- st.error("❌ Resource Not Found: Verify the endpoint and parameters.")
1929
- else:
1930
- st.error(f"❌ HTTP error occurred: {http_err}")
1931
- return {}
1932
-
1933
- except requests.exceptions.RequestException as req_err:
1934
- st.error(f"❌ Request Exception: {req_err}")
1935
- return {}
1936
-
1937
- except Exception as e:
1938
- st.error(f"❌ An unexpected error occurred: {e}")
1939
- return {}
1940
-
1941
- def job_recommendations_module():
1942
- st.header("🔍 Job Matching & Recommendations")
1943
-
1944
- st.write("""
1945
- Discover job opportunities tailored to your skills and preferences. Get personalized recommendations from multiple job platforms.
1946
- """)
1947
-
1948
- # User Preferences Form
1949
- st.subheader("🎯 Set Your Preferences")
1950
- with st.form("preferences_form"):
1951
- job_title = st.text_input("🔍 Desired Job Title", placeholder="e.g., Data Scientist, Backend Developer")
1952
- location = st.text_input("📍 Preferred Location", placeholder="e.g., New York, NY, USA or Remote")
1953
- category = st.selectbox("📂 Job Category", ["", "Engineering", "Marketing", "Design", "Sales", "Finance", "Healthcare", "Education", "Other"])
1954
- user_skills_input = st.text_input("💡 Your Skills (comma-separated)", placeholder="e.g., Python, Machine Learning, SQL")
1955
- submitted = st.form_submit_button("🚀 Get Recommendations")
1956
-
1957
- if submitted:
1958
- if not job_title or not user_skills_input:
1959
- st.error("❌ Please enter both job title and your skills.")
1960
- return
1961
-
1962
- user_skills = [skill.strip() for skill in user_skills_input.split(",") if skill.strip()]
1963
- user_preferences = {
1964
- "job_title": job_title,
1965
- "location": location,
1966
- "category": category
1967
- }
1968
-
1969
- with st.spinner("🔄 Fetching job recommendations..."):
1970
- # Fetch recommendations from Remotive
1971
- remotive_jobs = recommend_jobs(user_skills, user_preferences)
1972
- # Fetch recommendations from The Muse
1973
- muse_jobs = recommend_muse_jobs(user_skills, user_preferences)
1974
- # Fetch recommendations from Adzuna
1975
- adzuna_jobs = recommend_adzuna_jobs(user_skills, user_preferences)
1976
-
1977
- # Combine all job listings
1978
- combined_jobs = remotive_jobs + muse_jobs + adzuna_jobs
1979
- # Remove duplicates based on job URL
1980
- unique_jobs = {}
1981
- for job in combined_jobs:
1982
- url = job.get("url") or job.get("redirect_url") or job.get("url_standard")
1983
- if url and url not in unique_jobs:
1984
- unique_jobs[url] = job
1985
-
1986
- if unique_jobs:
1987
- st.subheader("💼 Recommended Jobs:")
1988
- for idx, job in enumerate(unique_jobs.values(), 1):
1989
- job_title_display = job.get("title") or job.get("name")
1990
- company_display = job.get("company", {}).get("name") or job.get("company_name")
1991
- location_display = job.get("candidate_required_location") or job.get("location")
1992
- salary_display = f"${job.get('salary_min', 'N/A')} - ${job.get('salary_max', 'N/A')}" if job.get('salary_min') or job.get('salary_max') else "N/A"
1993
- job_url = job.get("url") or job.get("redirect_url") or job.get("url_standard")
1994
-
1995
- st.markdown(f"### {idx}. {job_title_display}")
1996
- st.markdown(f"**🏢 Company:** {company_display}")
1997
- st.markdown(f"**📍 Location:** {location_display}")
1998
- st.markdown(f"**💰 Salary:** {salary_display}")
1999
- st.markdown(f"**🔗 Job URL:** [Apply Here]({job_url})")
2000
- st.write("---")
2001
- else:
2002
- st.info("ℹ️ No job recommendations found based on your criteria.")
2003
-
2004
- def labor_market_insights_module():
2005
- st.header("📈 Labor Market Insights")
2006
-
2007
- st.write("""
2008
- Gain valuable insights into the current labor market trends, employment rates, and industry growth to make informed career decisions.
2009
- """)
2010
-
2011
- # Define BLS Series IDs based on desired data
2012
- # Example: Unemployment rate (Series ID: LNS14000000)
2013
- # Reference: https://www.bls.gov/web/laus/laumstrk.htm
2014
- unemployment_series_id = "LNS14000000" # Unemployment Rate
2015
- employment_series_id = "CEU0000000001" # Total Employment
2016
-
2017
- # Display Unemployment Rate
2018
- display_bls_data(unemployment_series_id, "Unemployment Rate (%)")
2019
-
2020
- # Display Total Employment
2021
- display_bls_data(employment_series_id, "Total Employment")
2022
-
2023
- # Additional Insights
2024
- st.subheader("💡 Additional Insights")
2025
- st.write("""
2026
- - **Industry Growth:** Understanding which industries are growing can help you target your job search effectively.
2027
- - **Salary Trends:** Keeping an eye on salary trends ensures that you negotiate effectively and align your expectations.
2028
- - **Geographical Demand:** Some regions may have higher demand for certain roles, guiding your location preferences.
2029
- """)
2030
-
2031
  # -------------------------------
2032
  # Main App Function
2033
  # -------------------------------
@@ -2055,11 +1493,11 @@ def main_app():
2055
  menu_title="📂 Main Menu",
2056
  options=["Email Generator", "Cover Letter Generator", "Resume Analysis", "Application Tracking",
2057
  "Job Recommendations", "Labor Market Insights", "Interview Preparation", "Personalized Learning Paths",
2058
- "Networking Opportunities", "Salary Estimation", "Feedback", "Gamification", "Resource Library",
2059
  "Success Stories", "Chatbot Support", "Help"],
2060
  icons=["envelope", "file-earmark-text", "file-person", "briefcase",
2061
  "search", "bar-chart-line", "microphone", "book",
2062
- "people", "currency-dollar", "chat-left-text", "trophy", "collection",
2063
  "star", "robot", "question-circle"],
2064
  menu_icon="cast",
2065
  default_index=0,
@@ -2090,8 +1528,6 @@ def main_app():
2090
  personalized_learning_paths_module()
2091
  elif selected == "Networking Opportunities":
2092
  networking_opportunities_module()
2093
- elif selected == "Salary Estimation":
2094
- salary_estimation_module()
2095
  elif selected == "Feedback":
2096
  feedback_and_improvement_module()
2097
  elif selected == "Gamification":
@@ -2105,6 +1541,5 @@ def main_app():
2105
  elif selected == "Help":
2106
  help_page()
2107
 
2108
-
2109
  if __name__ == "__main__":
2110
  main_app()
 
 
 
1
  import streamlit as st
2
  import requests
3
  from langchain_groq import ChatGroq
 
12
  import fitz # PyMuPDF
13
  from bs4 import BeautifulSoup
14
 
15
+
16
  GROQ_API_KEY = st.secrets["GROQ_API_KEY"]
17
  RAPIDAPI_KEY = st.secrets["RAPIDAPI_KEY"]
18
  YOUTUBE_API_KEY = st.secrets["YOUTUBE_API_KEY"]
 
202
  st.error(f"Error suggesting keywords: {e}")
203
  return []
204
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205
  def create_skill_distribution_chart(skills):
206
  """
207
  Creates a bar chart showing the distribution of skills.
 
350
 
351
  # Adzuna API Integration
352
  @st.cache_data(ttl=86400) # Cache results for 1 day
353
+ def fetch_adzuna_jobs_api(job_title, location="india", category=None, max_results=50):
354
  """
355
  Fetches job listings from Adzuna API based on user preferences.
356
 
357
  Args:
358
+ job_title (str): The job title to search for.
359
+ location (str, optional): The job location. Defaults to "india".
360
  category (str, optional): The job category. Defaults to None.
361
  max_results (int, optional): Maximum number of jobs to fetch. Defaults to 50.
362
 
363
  Returns:
364
  list: A list of job dictionaries.
365
  """
366
+ base_url = f"https://api.adzuna.com/v1/api/jobs/{location}/search/1"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
367
  params = {
368
+ "app_id": ADZUNA_APP_ID,
369
+ "app_key": ADZUNA_APP_KEY,
370
+ "what": job_title,
371
  "results_per_page": max_results,
372
  "content-type": "application/json"
373
  }
 
374
  if category:
375
  params["category"] = category
 
376
  try:
377
  response = requests.get(base_url, params=params)
378
+ response.raise_for_status()
379
  jobs = response.json().get("results", [])
 
 
 
 
380
  return jobs
381
+ except requests.exceptions.RequestException as e:
382
+ st.error(f"Error fetching jobs from Adzuna: {e}")
383
+ return []
384
+
385
+ # Indeed API Integration
386
+ @st.cache_data(ttl=86400) # Cache results for 1 day
387
+ def fetch_indeed_jobs_api(job_title, country="CA", sort="-1", page_size=50):
388
+ """
389
+ Fetches job listings from Indeed API based on user preferences.
390
+
391
+ Args:
392
+ job_title (str): The job title to search for (e.g., "Front end developer").
393
+ country (str, optional): The country code (e.g., "CA" for Canada). Defaults to "CA".
394
+ sort (str, optional): Sorting parameter (e.g., "-1" for relevance). Defaults to "-1".
395
+ page_size (int, optional): Number of results per page. Defaults to 50.
396
+
397
+ Returns:
398
+ list: A list of job dictionaries.
399
+ """
400
+ url = "https://indeed46.p.rapidapi.com/job"
401
+
402
+ # Encode job title to handle spaces and special characters
403
+ encoded_job_title = re.sub(r'\s+', '+', job_title.strip())
404
+
405
+ querystring = {
406
+ "country": country,
407
+ "sort": sort,
408
+ "page_size": str(page_size),
409
+ "title": encoded_job_title # Assuming the API accepts a 'title' parameter for job titles
410
+ }
411
+
412
+ headers = {
413
+ "x-rapidapi-key": RAPIDAPI_KEY, # Ensure RAPIDAPI_KEY is set in .streamlit/secrets.toml
414
+ "x-rapidapi-host": "indeed46.p.rapidapi.com"
415
+ }
416
+
417
+ try:
418
+ response = requests.get(url, headers=headers, params=querystring)
419
+ response.raise_for_status() # Raises HTTPError for bad responses (4xx or 5xx)
420
+ data = response.json()
421
+ jobs = data.get("data", [])
422
+ return jobs
423
  except requests.exceptions.HTTPError as http_err:
424
+ if response.status_code == 400:
425
+ st.error("❌ Bad Request: Please check the parameters you're sending.")
426
+ elif response.status_code == 403:
427
+ st.error("❌ Access Forbidden: Check your API key and permissions.")
428
  elif response.status_code == 404:
429
  st.error("❌ Resource Not Found: Verify the endpoint and parameters.")
430
  else:
431
  st.error(f"❌ HTTP error occurred: {http_err}")
432
  return []
 
433
  except requests.exceptions.RequestException as req_err:
434
  st.error(f"❌ Request Exception: {req_err}")
435
  return []
 
436
  except Exception as e:
437
  st.error(f"❌ An unexpected error occurred: {e}")
438
  return []
439
+
440
+ def recommend_indeed_jobs(user_skills, user_preferences):
441
  """
442
+ Recommends jobs from Indeed API based on user skills and preferences.
443
+
444
  Args:
445
  user_skills (list): List of user's skills.
446
  user_preferences (dict): User preferences like job title, location, category.
447
+
448
  Returns:
449
  list: Recommended job listings.
450
  """
451
  job_title = user_preferences.get("job_title", "")
452
+ country = user_preferences.get("location_country", "CA")
453
  category = user_preferences.get("category")
454
+
455
+ jobs = fetch_indeed_jobs_api(job_title, country, sort="-1", page_size=50)
456
+
457
  # Simple matching based on skills appearing in job description
458
  recommended_jobs = []
459
  for job in jobs:
 
461
  match_score = sum(skill.lower() in job_description for skill in user_skills)
462
  if match_score > 0:
463
  recommended_jobs.append((match_score, job))
 
 
 
 
 
 
464
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
465
  # Sort jobs based on match_score
466
  recommended_jobs.sort(reverse=True, key=lambda x: x[0])
467
+
468
  # Return only the job dictionaries
469
  return [job for score, job in recommended_jobs[:10]] # Top 10 recommendations
470
 
471
+ def recommend_jobs(user_skills, user_preferences):
472
  """
473
+ Recommends jobs based on user skills and preferences from Remotive, The Muse, Adzuna, and Indeed APIs.
474
+
475
  Args:
476
  user_skills (list): List of user's skills.
477
  user_preferences (dict): User preferences like job title, location, category.
478
+
479
  Returns:
480
  list: Recommended job listings.
481
  """
482
+ # Fetch from Remotive
483
+ remotive_jobs = fetch_remotive_jobs_api(user_preferences.get("job_title", ""), user_preferences.get("location"), user_preferences.get("category"))
484
+
485
+ # Fetch from The Muse
486
+ muse_jobs = fetch_muse_jobs_api(user_preferences.get("job_title", ""), user_preferences.get("location"), user_preferences.get("category"))
487
+
488
+ # Fetch from Adzuna
489
+ adzuna_jobs = fetch_adzuna_jobs_api(user_preferences.get("job_title", ""), user_preferences.get("location", "india"), user_preferences.get("category"))
490
+
491
+ # Fetch from Indeed
492
+ indeed_jobs = recommend_indeed_jobs(user_skills, user_preferences)
493
+
494
+ # Combine all job listings
495
+ combined_jobs = remotive_jobs + muse_jobs + adzuna_jobs + indeed_jobs
496
+
497
+ # Remove duplicates based on job URL
498
+ unique_jobs = {}
499
+ for job in combined_jobs:
500
+ url = job.get("url") or job.get("redirect_url") or job.get("url_standard")
501
+ if url and url not in unique_jobs:
502
+ unique_jobs[url] = job
503
+
504
+ return list(unique_jobs.values())
505
 
506
  # -------------------------------
507
  # BLS API Integration and Display
 
511
  def fetch_bls_data(series_ids, start_year=2020, end_year=datetime.now().year):
512
  """
513
  Fetches labor market data from the BLS API.
514
+
515
  Args:
516
  series_ids (list): List of BLS series IDs.
517
  start_year (int, optional): Start year for data. Defaults to 2020.
518
  end_year (int, optional): End year for data. Defaults to current year.
519
+
520
  Returns:
521
  dict: BLS data response.
522
  """
 
545
  def display_bls_data(series_id, title):
546
  """
547
  Processes and displays BLS data with visualizations.
548
+
549
  Args:
550
  series_id (str): BLS series ID.
551
  title (str): Title for the visualization.
 
554
  if not data:
555
  st.info("No data available.")
556
  return
557
+
558
  series_data = data.get("series", [])[0]
559
  series_title = series_data.get("title", title)
560
  observations = series_data.get("data", [])
561
+
562
  # Extract year and value
563
  years = [int(obs["year"]) for obs in observations]
564
  values = [float(obs["value"].replace(',', '')) for obs in observations]
565
+
566
  df = pd.DataFrame({
567
  "Year": years,
568
  "Value": values
569
  }).sort_values("Year")
570
+
571
  st.markdown(f"### {series_title}")
572
  fig = px.line(df, x="Year", y="Value", title=series_title, markers=True)
573
  st.plotly_chart(fig, use_container_width=True)
 
673
  {career_goal}
674
 
675
  **Current Skills:**
676
+ {current_skills}
677
+
678
+ **Learning Path:**
679
+ """
680
+
681
+ try:
682
+ response = llm.invoke(prompt)
683
+ learning_path = response.content.strip()
684
+ return learning_path
685
+ except Exception as e:
686
+ st.error(f"Error generating learning path: {e}")
687
+ return ""
688
+
689
+ # -------------------------------
690
+ # YouTube Video Search and Embed Functions
691
+ # -------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
692
 
693
+ @st.cache_data(ttl=86400) # Cache results for 1 day
694
+ def search_youtube_videos(query, max_results=2, video_duration="long"):
695
  """
696
+ Searches YouTube for videos matching the query and returns video URLs.
697
 
698
  Args:
699
+ query (str): Search query.
700
+ max_results (int, optional): Number of videos to return. Defaults to 2.
701
+ video_duration (str, optional): Duration filter ('any', 'short', 'medium', 'long'). Defaults to "long".
702
 
703
  Returns:
704
+ list: List of YouTube video URLs.
705
  """
706
+ search_url = "https://www.googleapis.com/youtube/v3/search"
707
+ params = {
708
+ "part": "snippet",
709
+ "q": query,
710
+ "type": "video",
711
+ "maxResults": max_results,
712
+ "videoDuration": video_duration,
713
+ "key": YOUTUBE_API_KEY
714
+ }
715
+ try:
716
+ response = requests.get(search_url, params=params)
717
+ response.raise_for_status()
718
+ results = response.json().get("items", [])
719
+ video_urls = [f"https://www.youtube.com/watch?v={item['id']['videoId']}" for item in results]
720
+ return video_urls
721
+ except requests.exceptions.RequestException as e:
722
+ st.error(f"❌ Error fetching YouTube videos: {e}")
723
+ return []
 
724
 
725
+ def embed_youtube_videos(video_urls, module_name):
726
  """
727
+ Embeds YouTube videos in the Streamlit app.
728
 
729
  Args:
730
+ video_urls (list): List of YouTube video URLs.
731
+ module_name (str): Name of the module for context.
 
 
 
732
  """
733
+ for url in video_urls:
734
+ st.video(url)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
735
 
736
  # -------------------------------
737
+ # Job Recommendations and BLS Integration
738
  # -------------------------------
739
 
740
  def labor_market_insights_module():
 
764
  - **Geographical Demand:** Some regions may have higher demand for certain roles, guiding your location preferences.
765
  """)
766
 
 
 
767
  # -------------------------------
768
  # Page Functions
769
  # -------------------------------
 
1116
  if not job_title or not user_skills_input:
1117
  st.error("❌ Please enter both job title and your skills.")
1118
  return
1119
+
1120
  user_skills = [skill.strip() for skill in user_skills_input.split(",") if skill.strip()]
1121
  user_preferences = {
1122
  "job_title": job_title,
 
1125
  }
1126
 
1127
  with st.spinner("🔄 Fetching job recommendations..."):
1128
+ # Fetch recommendations from all APIs
1129
+ recommended_jobs = recommend_jobs(user_skills, user_preferences)
1130
+
1131
+ if recommended_jobs:
 
 
 
 
 
 
 
 
 
 
 
 
 
1132
  st.subheader("💼 Recommended Jobs:")
1133
+ for idx, job in enumerate(recommended_jobs, 1):
1134
+ # Depending on the API, job data structure might differ
1135
+ job_title_display = job.get("title") or job.get("name") or job.get("jobTitle")
1136
+ company_display = job.get("company", {}).get("name") or job.get("company_name") or job.get("employer", {}).get("name")
1137
+ location_display = job.get("candidate_required_location") or job.get("location") or job.get("country")
1138
+ salary_display = "N/A" # Salary is removed
1139
  job_url = job.get("url") or job.get("redirect_url") or job.get("url_standard")
1140
 
1141
  st.markdown(f"### {idx}. {job_title_display}")
1142
  st.markdown(f"**🏢 Company:** {company_display}")
1143
  st.markdown(f"**📍 Location:** {location_display}")
 
1144
  st.markdown(f"**🔗 Job URL:** [Apply Here]({job_url})")
1145
  st.write("---")
1146
  else:
 
1265
  except Exception as e:
1266
  st.error(f"❌ Error fetching networking opportunities: {e}")
1267
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1268
  def feedback_and_improvement_module():
1269
  st.header("🗣️ Feedback and Continuous Improvement")
1270
 
 
1466
  Navigate to the **Feedback and Continuous Improvement** section, fill out the form, and submit your feedback.
1467
  """)
1468
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1469
  # -------------------------------
1470
  # Main App Function
1471
  # -------------------------------
 
1493
  menu_title="📂 Main Menu",
1494
  options=["Email Generator", "Cover Letter Generator", "Resume Analysis", "Application Tracking",
1495
  "Job Recommendations", "Labor Market Insights", "Interview Preparation", "Personalized Learning Paths",
1496
+ "Networking Opportunities", "Feedback", "Gamification", "Resource Library",
1497
  "Success Stories", "Chatbot Support", "Help"],
1498
  icons=["envelope", "file-earmark-text", "file-person", "briefcase",
1499
  "search", "bar-chart-line", "microphone", "book",
1500
+ "people", "chat-left-text", "trophy", "collection",
1501
  "star", "robot", "question-circle"],
1502
  menu_icon="cast",
1503
  default_index=0,
 
1528
  personalized_learning_paths_module()
1529
  elif selected == "Networking Opportunities":
1530
  networking_opportunities_module()
 
 
1531
  elif selected == "Feedback":
1532
  feedback_and_improvement_module()
1533
  elif selected == "Gamification":
 
1541
  elif selected == "Help":
1542
  help_page()
1543
 
 
1544
  if __name__ == "__main__":
1545
  main_app()