Vicuna / app.py
skoneru's picture
Update app.py
77edf2f verified
import os
import wget
import sys
import bs4
import json
import pandas as pd
from huggingface_hub import InferenceClient
import urllib.request
import gradio as gr
def get_menu():
fp = urllib.request.urlopen("https://www.sw-ka.de/en/hochschulgastronomie/speiseplan/mensa_adenauerring/")
mybytes = fp.read()
html_content = mybytes.decode("utf8")
#html_content = "".join(open('index.html',mode='r',encoding='utf-8').readlines())
# Parse the HTML content
# Parse the HTML using BeautifulSoup
soup = bs4.BeautifulSoup(html_content, 'html.parser')
canteen_div = soup.find('div', id='canteen_day_1')
# Find all tables within the canteen_div
tables = canteen_div.find_all('table')
foods = []
prices = []
nutri = []
line_names = []
cnt = 0
canteen_div = soup.find('div', id='canteen_day_1')
# Find all tables within the canteen_div
tables = canteen_div.find_all('table')
# Iterate over each table
for table in tables:
# Extract food name
# Find all table rows with a class starting with "mt-"
menu_items = table.find_all('tr', class_=lambda class_name: class_name and class_name.startswith('mt-'))
# Iterate through each menu item
for item in menu_items:
food_name = item.find('span', class_='bg').text.strip()
# Extract price
price = item.find('span', class_='bgp price_1').text.strip()
# Extract nutritional information
nutritional_info = {}
nutritional_data = item.find('div', class_='nutrition_facts')
if nutritional_data:
for element in nutritional_data.find_all('div', class_=['energie', 'proteine', 'kohlenhydrate', 'zucker', 'fett', 'gesaettigt', 'salz']):
key = element.find('div').text.strip()
value = element.find_all('div')[1].text.strip()
nutritional_info[key] = value
# Print extracted information
#print(f"\nFood Name: {food_name}")
foods.append(food_name)
prices.append(price)
try:
nutri.append(json.dumps(nutritional_info['Energie'], indent=4))
except:
nutri.append("")
#print(f"Price: {price}")
if nutritional_info:
#print("Nutritional Information:")
for key, value in nutritional_info.items():
pass
#print(f"- {key}: {value}")
else:
pass
#print("No nutritional information available.")
cnt+=1
break
# Iterate over each row
# Find all rows (tr) with class 'mensatype_rows'
#rows = table.find_all('tr', class_='mensatype_rows')
# Find all menu items within the table
# Find all rows with class 'mensatype_rows'
canteen_div = soup.find('div', id='canteen_day_1')
# Find all tables within the canteen_div
tables = canteen_div.find_all('table')
# Iterate over each table
for table in tables:
# Iterate over each row
# Find all rows (tr) with class 'mensatype_rows'
rows = table.find_all('tr', class_='mensatype_rows')
# Iterate over each row
for row in rows:
# Extract the row name
row_name = row.find('div').get_text(strip=True)
menu_titles = row.find_all('td', class_='menu-title')
# Iterate over each food item
for menu_title in menu_titles:
line_names.append(row_name)
menu = ""
df = pd.DataFrame(zip(line_names,foods,prices,nutri),columns=['line','food','price','nutri'])
#df = df[~df['line'].str.contains("Abendessen")]
#df = df[~df['line'].str.contains("pizza")]
#df = df[~df['line'].str.contains("werk")]
df_line = df.groupby('line', sort=False)
for line, df_group in df_line:
menu+= "Line Name: " + line + "\n"
for idx,row in df_group.iterrows():
menu+=row['food'] + "\n"
menu+= "Price: " + row['price'] + "\n"
menu+= "Calories: " + row['nutri'] + "\n"
return menu
def reply_bot(message, history):
client = InferenceClient(model="https://hardy-casual-adder.ngrok-free.app")
curr_prompt = message
try:
print(curr_prompt)
#answer = client.text_generation(prompt=prompt, max_new_tokens=512)
answer = ""
for token in client.text_generation(prompt=curr_prompt, max_new_tokens=512, stream=True):
answer+=token
yield answer
except:
return "Clear History or ask FR to increase Context Window. Current capacity only 4k tokens"
#return answer
gr.ChatInterface(reply_bot).launch()