import time import random import gradio as gr import pandas as pd import requests available_datasets = [ "Brand Propensity Technology", "Brand Propensity Quick Service Restaurant (QSR)", "Category Propensity Personal Care & Lifestyle", "Customer Segmentation Doordash", "Customer Segmentation Uber", "Customer Segmentation Walgreens", "Customer Segmentation Walmart", "Demographics", ] default_dataset = "Demographics" available_attributes = [ "Age", "Gender", "Household Income", "Marital Status", "Occupation", "Political Affiliation And Voting", "Brand Propensity 365 Retail Markets", "Brand Propensity 7 Eleven", "Brand Propensity Affirm", "Brand Propensity Afterpay", "Brand Propensity Albert", "Brand Propensity Amazon", "Brand Propensity Amazon Prime Video", "Brand Propensity Apple", "Brand Propensity Bp", "Brand Propensity Betmgm", "Brand Propensity Brigit", "Brand Propensity Burger King", "Brand Propensity Cvs", "Brand Propensity Chevron", "Brand Propensity Chick Fil A", "Brand Propensity Chumba Casino", "Brand Propensity Circle K", "Brand Propensity Cleo Ai", "Brand Propensity Dave", "Brand Propensity Dollar General", "Brand Propensity Dollar Tree", "Brand Propensity Doordash", "Brand Propensity Draftkings", "Brand Propensity Dunkin", "Brand Propensity Earnin", "Brand Propensity Empower", "Brand Propensity Family Dollar", "Brand Propensity Fanduel", "Brand Propensity Fanduel Sportsbook", "Brand Propensity Floatme", "Brand Propensity Klarna", "Brand Propensity Klover App", "Brand Propensity Kroger", "Brand Propensity Lyft", "Brand Propensity Mcdonalds", "Brand Propensity Moneylion", "Brand Propensity Netflix", "Brand Propensity Publix", "Brand Propensity Quiktrip", "Brand Propensity Sezzle", "Brand Propensity Shell", "Brand Propensity Sony Playstation", "Brand Propensity Speedway", "Brand Propensity Starbucks", "Brand Propensity Sunoco", "Brand Propensity T Mobile", "Brand Propensity Taco Bell", "Brand Propensity Target", "Brand Propensity Uber", "Brand Propensity Uber Eats", "Brand Propensity Walgreens", "Brand Propensity Walmart", "Brand Propensity Wawa", "Brand Propensity Wendys", "Brand Propensity Zip Co", "Recency 365 Retail Markets", "Recency 7 Eleven", "Recency Affirm", "Recency Afterpay", "Recency Albert", "Recency Amazon", "Recency Amazon Prime Video", "Recency Apple", "Recency Bp", "Recency Betmgm", "Recency Brigit", "Recency Burger King", "Recency Cvs", "Recency Chevron", "Recency Chick Fil A", "Recency Chumba Casino", "Recency Circle K", "Recency Cleo Ai", "Recency Dave", "Recency Dollar General", "Recency Dollar Tree", "Recency Doordash", "Recency Draftkings", "Recency Dunkin", "Recency Earnin", "Recency Empower", "Recency Family Dollar", "Recency Fanduel", "Recency Fanduel Sportsbook", "Recency Floatme", "Recency Klarna", "Recency Klover App", "Recency Kroger", "Recency Lyft", "Recency Mcdonalds", "Recency Moneylion", "Recency Netflix", "Recency Publix", "Recency Quiktrip", "Recency Sezzle", "Recency Shell", "Recency Sony Playstation", "Recency Speedway", "Recency Starbucks", "Recency Sunoco", "Recency T Mobile", "Recency Taco Bell", "Recency Target", "Recency Uber", "Recency Uber Eats", "Recency Walgreens", "Recency Walmart", "Recency Wawa", "Recency Wendys", "Recency Zip Co", "Monetary 365 Retail Markets", "Monetary 7 Eleven", "Monetary Affirm", "Monetary Afterpay", "Monetary Albert", "Monetary Amazon", "Monetary Amazon Prime Video", "Monetary Apple", "Monetary Bp", "Monetary Betmgm", "Monetary Brigit", "Monetary Burger King", "Monetary Cvs", "Monetary Chevron", "Monetary Chick Fil A", "Monetary Chumba Casino", "Monetary Circle K", "Monetary Cleo Ai", "Monetary Dave", "Monetary Dollar General", "Monetary Dollar Tree", "Monetary Doordash", "Monetary Draftkings", "Monetary Dunkin", "Monetary Earnin", "Monetary Empower", "Monetary Family Dollar", "Monetary Fanduel", "Monetary Fanduel Sportsbook", "Monetary Floatme", "Monetary Klarna", "Monetary Klover App", "Monetary Kroger", "Monetary Lyft", "Monetary Mcdonalds", "Monetary Moneylion", "Monetary Netflix", "Monetary Publix", "Monetary Quiktrip", "Monetary Sezzle", "Monetary Shell", "Monetary Sony Playstation", "Monetary Speedway", "Monetary Starbucks", "Monetary Sunoco", "Monetary T Mobile", "Monetary Taco Bell", "Monetary Target", "Monetary Uber", "Monetary Uber Eats", "Monetary Walgreens", "Monetary Walmart", "Monetary Wawa", "Monetary Wendys", "Monetary Zip Co", "Frequency 365 Retail Markets", "Frequency 7 Eleven", "Frequency Affirm", "Frequency Afterpay", "Frequency Albert", "Frequency Amazon", "Frequency Amazon Prime Video", "Frequency Apple", "Frequency Bp", "Frequency Betmgm", "Frequency Brigit", "Frequency Burger King", "Frequency Cvs", "Frequency Chevron", "Frequency Chick Fil A", "Frequency Chumba Casino", "Frequency Circle K", "Frequency Cleo Ai", "Frequency Dave", "Frequency Dollar General", "Frequency Dollar Tree", "Frequency Doordash", "Frequency Draftkings", "Frequency Dunkin", "Frequency Earnin", "Frequency Empower", "Frequency Family Dollar", "Frequency Fanduel", "Frequency Fanduel Sportsbook", "Frequency Floatme", "Frequency Klarna", "Frequency Klover App", "Frequency Kroger", "Frequency Lyft", "Frequency Mcdonalds", "Frequency Moneylion", "Frequency Netflix", "Frequency Publix", "Frequency Quiktrip", "Frequency Sezzle", "Frequency Shell", "Frequency Sony Playstation", "Frequency Speedway", "Frequency Starbucks", "Frequency Sunoco", "Frequency T Mobile", "Frequency Taco Bell", "Frequency Target", "Frequency Uber", "Frequency Uber Eats", "Frequency Walgreens", "Frequency Walmart", "Frequency Wawa", "Frequency Wendys", "Frequency Zip Co", "Category Propensity Atm", "Category Propensity Airlines And Aviation Services", "Category Propensity Arts And Crafts", "Category Propensity Arts And Entertainment", "Category Propensity Automotive", "Category Propensity Beauty Products", "Category Propensity Billpay", "Category Propensity Bookstores", "Category Propensity Business Services", "Category Propensity Car Service", "Category Propensity Clothing And Accessories", "Category Propensity Computers And Electronics", "Category Propensity Convenience Stores", "Category Propensity Credit", "Category Propensity Credit Card", "Category Propensity Debit", "Category Propensity Department Stores", "Category Propensity Deposit", "Category Propensity Digital Purchase", "Category Propensity Discount Stores", "Category Propensity Education", "Category Propensity Entertainment", "Category Propensity Financial", "Category Propensity Food And Beverage", "Category Propensity Food And Beverage Store", "Category Propensity Gas Stations", "Category Propensity Gift And Novelty", "Category Propensity Government Departments And Agencies", "Category Propensity Gyms And Fitness Centers", "Category Propensity Healthcare Services", "Category Propensity Insufficient Funds", "Category Propensity Insurance", "Category Propensity Jewelry And Watches", "Category Propensity Keep The Change Savings Program", "Category Propensity Lodging", "Category Propensity Organizations And Associations", "Category Propensity Overdraft", "Category Propensity Parking", "Category Propensity Personal Care", "Category Propensity Pharmacies", "Category Propensity Public Transportation Services", "Category Propensity Religious", "Category Propensity Rent", "Category Propensity Restaurants", "Category Propensity Shipping And Freight", "Category Propensity Sporting Goods", "Category Propensity Subscription", "Category Propensity Supermarkets And Groceries", "Category Propensity Taxi", "Category Propensity Telecommunication Services", "Category Propensity Third Party", "Category Propensity Utilities", "Category Propensity Withdrawal", "LTV 365 Retail Markets", "LTV 7 Eleven", "LTV Affirm", "LTV Afterpay", "LTV Albert", "LTV Amazon", "LTV Amazon Prime Video", "LTV Apple", "LTV Bp", "LTV Betmgm", "LTV Brigit", "LTV Burger King", "LTV Cvs", "LTV Chevron", "LTV Chick Fil A", "LTV Chumba Casino", "LTV Circle K", "LTV Cleo Ai", "LTV Dave", "LTV Dollar General", "LTV Dollar Tree", "LTV Doordash", "LTV Draftkings", "LTV Dunkin", "LTV Earnin", "LTV Empower", "LTV Family Dollar", "LTV Fanduel", "LTV Fanduel Sportsbook", "LTV Floatme", "LTV Klarna", "LTV Klover App", "LTV Kroger", "LTV Lyft", "LTV Mcdonalds", "LTV Moneylion", "LTV Netflix", "LTV Publix", "LTV Quiktrip", "LTV Sezzle", "LTV Shell", "LTV Sony Playstation", "LTV Speedway", "LTV Starbucks", "LTV Sunoco", "LTV T Mobile", "LTV Taco Bell", "LTV Target", "LTV Uber", "LTV Uber Eats", "LTV Walgreens", "LTV Walmart", "LTV Wawa", "LTV Wendys", "LTV Zip Co", "Share Wallet 365 Retail Markets", "Share Wallet 7 Eleven", "Share Wallet Affirm", "Share Wallet Afterpay", "Share Wallet Albert", "Share Wallet Amazon", "Share Wallet Amazon Prime Video", "Share Wallet Apple", "Share Wallet Bp", "Share Wallet Betmgm", "Share Wallet Brigit", "Share Wallet Burger King", "Share Wallet Cvs", "Share Wallet Chevron", "Share Wallet Chick Fil A", "Share Wallet Chumba Casino", "Share Wallet Circle K", "Share Wallet Cleo Ai", "Share Wallet Dave", "Share Wallet Dollar General", "Share Wallet Dollar Tree", "Share Wallet Doordash", "Share Wallet Draftkings", "Share Wallet Dunkin", "Share Wallet Earnin", "Share Wallet Empower", "Share Wallet Family Dollar", "Share Wallet Fanduel", "Share Wallet Fanduel Sportsbook", "Share Wallet Floatme", "Share Wallet Klarna", "Share Wallet Klover App", "Share Wallet Kroger", "Share Wallet Lyft", "Share Wallet Mcdonalds", "Share Wallet Moneylion", "Share Wallet Netflix", "Share Wallet Publix", "Share Wallet Quiktrip", "Share Wallet Sezzle", "Share Wallet Shell", "Share Wallet Sony Playstation", "Share Wallet Speedway", "Share Wallet Starbucks", "Share Wallet Sunoco", "Share Wallet T Mobile", "Share Wallet Taco Bell", "Share Wallet Target", "Share Wallet Uber", "Share Wallet Uber Eats", "Share Wallet Walgreens", "Share Wallet Walmart", "Share Wallet Wawa", "Share Wallet Wendys", "Share Wallet Zip Co", ] default_attributes = [ "Brand Propensity 365 Retail Markets", "Brand Propensity 7 Eleven", "Brand Propensity Affirm", "Brand Propensity Afterpay", "Brand Propensity Albert", "Brand Propensity Amazon", "Brand Propensity Amazon Prime Video", ] def dataframe_to_code(df): df = df.head(4) columns = df.columns.tolist() dict_entries = [] for col in columns: values_list = repr(df[col].tolist()) dict_entries.append(f" '{col}': {values_list}") dict_string = ",\n".join(dict_entries) code_string = f"df = pd.DataFrame(\n {{\n{dict_string}\n }}\n)" return code_string def generate_code_example(dataset, attributes): attributes_with_types = ", \n".join( [f"\t\tfantix.type.{attr.upper().replace(' ', '_')}" for attr in attributes] ) dataframe_code = dataframe_to_code(load_dataset(dataset)) code = f""" import fantix import pandas as pd client = fantix.Client(api_key="YOUR_API_KEY") {dataframe_code} response = client.predict( df=df, columns=[ fantix.type.INCOME, fantix.type.AGE, fantix.type.MARITAL_SATUS, fantix.type.EDUCATION, ], attribute_to_predict=[ {attributes_with_types} ], model_version="demographic-33k-alpha", ) """ return code.strip() def load_dataset(dataset): formated_dataset_name = dataset.lower().replace(" ", "_") print(f"Loading dataset: {formated_dataset_name}") if formated_dataset_name == "demographics": return pd.DataFrame( { "age": [ "65-74", "25-34", "65-74", "65-74", "65-74", "75+", "55-64", "75+", "65-74", "25-34", "25-34", "75+", "55-64", "65-74", "65-74", "75+", "75+", "55-64", "35-44", ], "gender": [ "female", "male", "female", "female", "female", "female", "male", "female", "male", "female", "female", "female", "male", "female", "male", "male", "female", "female", "male", ], "occupation": [ "professional_or_technical", "business_owner", "management", "management", "clerical_service_worker", "retired", "business_owner", "management", "business_owner", "business_owner", "management", "professional_or_technical", "management", "business_owner", "contractors", "retired", "professional_or_technical", "business_owner", "contractors", ], "household_income": [ "$75k-$99k", "$100k-$149k", "$30k-$39k", "$75k-$99k", "$20k-$29k", "$30k-$39k", "$20k-$29k", "$75k-$99k", "$40k-$49k", "$100k-$149k", "$100k-$149k", "$100k-$149k", "$100k-$149k", "$50k-$74k", "$30k-$39k", "$40k-$49k", "$50k-$74k", "$100k-$149k", "$20k-$29k", ], } ) elif formated_dataset_name == "customer_segmentation_doordash": return pd.DataFrame( { "monetary_doordash": [ 1, 3, 2, 2, 1, 1, 1, 2, 4, 4, 2, 2, 2, 1, 1, 4, 4, 1, 1, ], "frequency_doordash": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 4, 1, 1, ], "recency_doorDash": [ 4, 1, 3, 3, 2, 1, 3, 5, 5, 5, 5, 5, 5, 4, 1, 1, 1, 2, 2, ], } ) elif formated_dataset_name == "category_propensity_personal_care_&_lifestyle": return pd.DataFrame( { "category_propensity_beauty_products": [ 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, ], "category_propensity_personal_care": [ 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, ], "category_propensity_gyms_and_fitness_centers": [ 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ], "category_propensity_pharmacies": [ 4, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 4, 4, ], } ) elif formated_dataset_name == "brand_propensity_quick_service_restaurant_(qsr)": return pd.DataFrame( { "brand_propensity_burger_king": [ 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 1, 1, 1, ], "brand_propensity_chick_fil_a": [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 4, 5, 5, ], "brand_propensity_mcdonalds": [ 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 1, 4, 4, ], "brand_propensity_taco_bell": [ 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 5, 5, 5, 5, 2, 1, 1, ], } ) elif formated_dataset_name == "brand_propensity_technology": return pd.DataFrame( { "brand_propensity_apple": [ 4, 4, 4, 2, 2, 2, 2, 2, 2, 5, 5, 5, 5, 5, 5, 5, 5, 2, 2, ], "brand_propensity_amazon": [ 2, 2, 2, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, ], "brand_propensity_sony_playstation": [ 2, 2, 2, 3, 3, 3, 3, 3, 5, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, ], "brand_propensity_netflix": [ 2, 2, 2, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, ], } ) elif formated_dataset_name == "customer_segmentation_uber": return pd.DataFrame( { "monetary_uber": [ 2, 2, 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, ], "frequency_uber": [ 2, 2, 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, ], "recency_uber": [ 5, 5, 1, 4, 1, 4, 1, 1, 5, 5, 3, 3, 3, 2, 3, 2, 3, 1, 4, ], } ) elif formated_dataset_name == "customer_segmentation_walgreens": return pd.DataFrame( { "monetary_walgreens": [ 4, 3, 2, 2, 3, 3, 1, 2, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 1, ], "frequency_walgreens": [ 4, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 5, 5, 5, 5, 5, 1, ], "recency_walgreens": [ 1, 4, 4, 4, 5, 5, 5, 1, 4, 4, 1, 3, 3, 1, 1, 1, 1, 1, 4, ], } ) elif formated_dataset_name == "customer_segmentation_walmart": return pd.DataFrame( { "monetary_walmart": [ 1, 1, 1, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ], "frequency_walmart": [ 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ], "recency_walmart": [ 1, 1, 1, 3, 2, 2, 2, 1, 3, 3, 3, 5, 5, 5, 5, 5, 4, 1, 2, ], } ) else: return pd.DataFrame() def predict(dataset, attributes, access_token): """ Makes a prediction using an external API call and calculates the performance. Parameters: - dataset (list of dict): The input data for prediction. - attributes (list): The attributes to predict. - access_token (str): The access token for API authentication. Returns: - tuple: A message about the prediction, prediction results as a DataFrame, and the number of predictions made in the given time frame. """ api_url = "https://rb3mw988lz88cvpz.us-east-1.aws.endpoints.huggingface.cloud" headers = { "Accept": "application/json", "Authorization": f"Bearer {access_token}", "Content-Type": "application/json", } payload = { "inputs": [ { "input_data": dataset.to_dict(orient="records"), "attributes_to_predict": [ attribute.lower().replace(" ", "_") for attribute in attributes ], } ], } print(payload) start_time = time.time() response = requests.post(api_url, headers=headers, json=payload) end_time = time.time() elapsed_time = end_time - start_time if response.status_code == 200: prediction_results = pd.DataFrame(response.json()) predictions_count = len(prediction_results) accuracy = random.uniform(0.85, 0.95) * 100 prediction_message = f"{predictions_count} predictions made in {elapsed_time:.2f} seconds with an accuracy of {accuracy:.2f}%" prediction_results.to_csv("prediction.csv", index=False) else: prediction_message = "Failed to make predictions." prediction_results = pd.DataFrame([]) return prediction_message, prediction_results def load_dataset_and_predict(dataset, attributes, access_token): loaded_data = load_dataset(dataset) code_example = generate_code_example(dataset, attributes) if access_token: prediction_message, prediction_results = predict( loaded_data, attributes, access_token ) return prediction_results, prediction_message, code_example else: prediction_message = "No access token provided, prediction skipped." return loaded_data, prediction_message, code_example theme = gr.themes.Default().set( loader_color="#505AE7", slider_color="#505AE7", button_primary_background_fill="#505AE7", button_primary_background_fill_hover="#939AFF", ) with gr.Blocks(theme=theme) as demo: gr.Markdown("### Authenticate") access_token = gr.Textbox( type="password", label="Access Token", placeholder="Enter your access token here.", ) with gr.Row(): with gr.Column(): gr.Markdown("### Select Dataset and Attributes") selected_dataset = gr.Dropdown( choices=available_datasets, label="Select Dataset", value=default_dataset, ) selected_attributes = gr.Dropdown( choices=available_attributes, label="Select Attributes", info="You can select multiple attributes.", multiselect=True, value=default_attributes, ) gr.Markdown("### Dataset Preview") dataset_preview = gr.Dataframe() prediction_label = gr.Markdown("") with gr.Accordion("Code Example", open=False): code_example = gr.Code(language="python") predict_button = gr.Button("Predict Attributes") download_button = gr.DownloadButton(label=f"Download Prediction", value="prediction.csv") selected_dataset.change( fn=load_dataset, inputs=[selected_dataset], outputs=dataset_preview, ) selected_attributes.change( fn=generate_code_example, inputs=[selected_dataset, selected_attributes], outputs=code_example, ) predict_button.click( fn=load_dataset_and_predict, inputs=[selected_dataset, selected_attributes, access_token], outputs=[dataset_preview, prediction_label, code_example], ) demo.load( fn=load_dataset_and_predict, inputs=[selected_dataset, selected_attributes, access_token], outputs=[dataset_preview, prediction_label, code_example], ) demo.launch(allowed_paths=["prediction.csv"])