# -*- coding: utf-8 -*- """Fake_News Generator_Detector.ipynb Automatically generated by Colab. Original file is located at https://colab.research.google.com/drive/1C8ZWvJVZzTCngwpz788EpcT2lrbBWhYa ### 📰 **Project Description: Fake News Generator & Detector using Generative AI and NLP** This project is an interactive AI-powered tool that enables users to both generate realistic-looking fake news articles and detect whether a given news text is fake or real using state-of-the-art natural language processing models. It highlights how language models can both create and combat misinformation. 🧰 **The application is built using:** 1. **Transformers Library (by Hugging Face)**: For loading and using pretrained models — GPT-2 for fake news generation and fine-tuned BERT for fake news detection. 2. **Gradio:** To create a simple, interactive web-based user interface with tabbed sections for generation and detection. 3. **Google Colab / Python:** For backend development, prototyping, and running the application in a cloud-based notebook environment. 🎯 **Project Objectives** **1. Generate Fake News Text** Use GPT-2 to simulate fake news articles from user-provided prompts for for awareness and experimentation. **2. Detect Fake or Real News** Utilize a fine-tuned BERT model (Pulk17/Fake-News-Detection) to accurately classify news content as fake or real. **3. Provide an Interactive Interface** Use Gradio to build a user-friendly web interface. **4. Demonstrate Dual Use of AI in Misinformation** Showcase how AI can both create and detect fake news, promoting awareness and responsible AI usage. **Step 2: Import Required Modules** Import libraries for model handling, tokenization, and interface building. """ # app.py import torch from transformers import GPT2LMHeadModel, GPT2Tokenizer from transformers import AutoTokenizer, AutoModelForSequenceClassification import gradio as gr # Device setup device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Load GPT-2 for fake news generation gpt2_tokenizer = GPT2Tokenizer.from_pretrained("gpt2") gpt2_model = GPT2LMHeadModel.from_pretrained("gpt2").to(device) # Load fine-tuned BERT model for fake news detection bert_tokenizer = AutoTokenizer.from_pretrained("Pulk17/Fake-News-Detection") bert_model = AutoModelForSequenceClassification.from_pretrained( "Pulk17/Fake-News-Detection" ).to(device) # Function to generate fake news from a prompt def generate_fake_news(prompt): inputs = gpt2_tokenizer.encode(prompt, return_tensors="pt").to(device) outputs = gpt2_model.generate( inputs, max_length=200, num_return_sequences=1, no_repeat_ngram_size=2, do_sample=True, temperature=0.7, top_k=50, top_p=0.95, early_stopping=True ) generated_text = gpt2_tokenizer.decode(outputs[0], skip_special_tokens=True) return generated_text # Function to detect if news is fake or real def detect_news(text): inputs = bert_tokenizer(text, return_tensors="pt", truncation=True, padding=True).to(device) with torch.no_grad(): outputs = bert_model(**inputs) logits = outputs.logits predicted_class = torch.argmax(logits, dim=1).item() confidence = torch.softmax(logits, dim=1)[0][predicted_class].item() label = "🟥 Fake News" if predicted_class == 0 else "🟩 Real News" return f"{label} (Confidence: {confidence:.2f})" # Gradio Interface with gr.Blocks() as demo: gr.Markdown("## 📰 Fake News Generator & Detector (GPT-2 + BERT)") with gr.Tab("🛠️ Generate Fake News"): with gr.Row(): input_text = gr.Textbox( label="Enter a News Headline or Prompt", placeholder="e.g. Scientists discover a talking dolphin species near Japan...", lines=2 ) generate_btn = gr.Button("Generate") output_text = gr.Textbox(label="Generated News Article") generate_btn.click(generate_fake_news, inputs=input_text, outputs=output_text) with gr.Tab("🔍 Detect Fake or Real"): with gr.Row(): detect_input = gr.Textbox( label="Enter a News Article or Statement", placeholder="Paste a paragraph to detect if it's fake or real...", lines=5 ) detect_btn = gr.Button("Detect") detect_output = gr.Textbox(label="Detection Result") detect_btn.click(detect_news, inputs=detect_input, outputs=detect_output) # Launch the app demo.launch()