|
import csv |
|
import json |
|
import os |
|
import praw |
|
import random |
|
import re |
|
import sys |
|
|
|
env = json.load(open('.env', 'r')) |
|
|
|
|
|
reddit = praw.Reddit( |
|
client_id=env["app_id"], |
|
client_secret=env["secret"], |
|
user_agent=env["user_agent"] |
|
) |
|
|
|
def main(post_id): |
|
post = reddit.submission(id=post_id) |
|
post.comments.replace_more(limit=0) |
|
sorted_comments = sorted(post.comments, key=lambda c: c.score, reverse=True) |
|
|
|
if post.link_flair_text is None: |
|
raise Exception("Must be a gender or non-binary fashion flair post") |
|
elif "Men's" in post.link_flair_text: |
|
gender = "M" |
|
elif "Women's" in post.link_flair_text or "Ladies'" in post.link_flair_text: |
|
gender = "F" |
|
elif "Non Binary" in post.link_flair_text: |
|
gender = "X" |
|
else: |
|
raise Exception("Must be a gender or non-binary fashion flair post") |
|
|
|
data = { |
|
"post_id": post_id, |
|
"gender_flair": gender, |
|
"title": post.title, |
|
"selftext": post.selftext, |
|
"images": [], |
|
} |
|
|
|
image_urls = [] |
|
if post.is_gallery: |
|
metadata = post.media_metadata |
|
for item in post.gallery_data['items']: |
|
url = metadata[item['media_id']]['s']['u'].replace("&", "&") |
|
image_urls.append(url) |
|
else: |
|
raise Exception("Need multiple images for there to be a multimodal eval question") |
|
|
|
lead_choices = {} |
|
firstChoice = None |
|
secondChoice = None |
|
for c in sorted_comments: |
|
text = c.body.strip().lower().replace('first', '1st').replace('second', '2nd').replace('third', '3rd').replace('fourth', '4th').replace('fifth', '5th') |
|
text = text.replace('last', str(len(image_urls))) |
|
selections = re.findall(r'\d', text) |
|
if len(selections) == 0: |
|
print("Skipped comment without digit") |
|
continue |
|
elif len(set(selections)) > 1: |
|
print(text) |
|
raise Exception("Encountered a top comment with multiple digits, complex") |
|
selection = int(selections[0]) |
|
if selection not in lead_choices: |
|
print(f"Found comment with {c.score} votes: {text}") |
|
lead_choices[selection] = c.score |
|
data["images"].append(image_urls[selection - 1]) |
|
if firstChoice is None: |
|
firstChoice = selection |
|
elif secondChoice is None: |
|
secondChoice = selection |
|
break |
|
else: |
|
print(f"Found repeat comment with {c.score} votes") |
|
lead_choices[selection] += c.score |
|
if lead_choices[selection] > 100: |
|
print("Overwhelming support") |
|
if selection == 1: |
|
secondChoice = 2 |
|
else: |
|
secondChoice = selection - 1 |
|
lead_choices[secondChoice] = 1 |
|
data["images"].append(image_urls[secondChoice - 1]) |
|
break |
|
if len(lead_choices.keys()) != 2: |
|
raise Exception("Did not find two distinct comments with single outfit suggestions") |
|
|
|
data["firstChoiceVotes"] = lead_choices[firstChoice] |
|
data["secondChoice"] = lead_choices[secondChoice] |
|
if len(image_urls) > 2: |
|
print("We are assuming that there is one image of each outfit") |
|
used_set = set(image_urls) |
|
remaining = [img for img in used_set if img not in data["images"]] |
|
extra_images = random.sample(remaining, min(len(remaining), 2)) |
|
data["images"] += extra_images |
|
|
|
|
|
fieldnames = [ |
|
"post_id", |
|
"gender_flair", |
|
"title", |
|
"selftext", |
|
"images", |
|
"firstChoiceVotes", |
|
"secondChoice" |
|
] |
|
|
|
filename = "dataset.csv" |
|
file_exists = os.path.isfile(filename) |
|
with open(filename, mode="a", newline="", encoding="utf-8") as f: |
|
writer = csv.DictWriter(f, fieldnames=fieldnames) |
|
if not file_exists: |
|
writer.writeheader() |
|
row = data.copy() |
|
row["images"] = ",".join(data["images"]) |
|
writer.writerow(row) |
|
|
|
|
|
if __name__ == "__main__": |
|
if len(sys.argv) != 2: |
|
print("Usage: python outfit_scraper.py <reddit_post_id>") |
|
sys.exit(1) |
|
main(sys.argv[1]) |
|
|