|
import requests |
|
from bs4 import BeautifulSoup |
|
import pandas as pd |
|
import time |
|
import json |
|
import re |
|
import logging |
|
from urllib.parse import urlparse |
|
import fastavro |
|
import msgpack |
|
import os |
|
from datasets import Dataset |
|
|
|
|
|
def scrape_data(url): |
|
response = requests.get(url) |
|
soup = BeautifulSoup(response.text, 'html.parser') |
|
|
|
|
|
data = [] |
|
for item in soup.find_all('div', class_='gpu-item'): |
|
gpu_info = { |
|
'gpu_name': item.find('h2').text, |
|
'architecture': item.find('span', class_='architecture').text, |
|
'memory_size': item.find('span', class_='memory-size').text, |
|
|
|
} |
|
data.append(gpu_info) |
|
|
|
|
|
return Dataset.from_list(data) |
|
|
|
|
|
import pyarrow as pa |
|
import pyarrow.parquet as pq |
|
import fastavro |
|
import h5py |
|
import sqlite3 |
|
import xml.etree.ElementTree as ET |
|
import yaml |
|
import pickle |
|
from scipy.io import savemat |
|
|
|
|
|
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') |
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
try: |
|
from selenium import webdriver |
|
from selenium.webdriver.chrome.options import Options |
|
from selenium.webdriver.chrome.service import Service |
|
from selenium.webdriver.common.by import By |
|
from selenium.webdriver.support.ui import WebDriverWait |
|
from selenium.webdriver.support import expected_conditions as EC |
|
from webdriver_manager.chrome import ChromeDriverManager |
|
SELENIUM_AVAILABLE = True |
|
logger.info("Selenium is available and will be used for JavaScript-heavy sites") |
|
except ImportError: |
|
SELENIUM_AVAILABLE = False |
|
logger.warning("Selenium not available. Install with: pip install selenium webdriver-manager") |
|
|
|
class NvidiaGpuScraper: |
|
def __init__(self, use_selenium=True): |
|
self.use_selenium = use_selenium and SELENIUM_AVAILABLE |
|
self.driver = self._setup_driver() if self.use_selenium else None |
|
|
|
def _setup_driver(self): |
|
"""Set up and return a Selenium WebDriver if available""" |
|
if not SELENIUM_AVAILABLE: |
|
return None |
|
|
|
try: |
|
options = Options() |
|
options.add_argument('--headless') |
|
options.add_argument('--no-sandbox') |
|
options.add_argument('--disable-dev-shm-usage') |
|
options.add_argument('--disable-gpu') |
|
options.add_argument("--window-size=1920,1080") |
|
options.add_argument("--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36") |
|
|
|
service = Service(ChromeDriverManager().install()) |
|
driver = webdriver.Chrome(service=service, options=options) |
|
return driver |
|
except Exception as e: |
|
logger.error(f"Failed to initialize Selenium: {e}") |
|
return None |
|
|
|
def _fetch_with_selenium(self, url): |
|
"""Fetch page content using Selenium for JavaScript-heavy sites""" |
|
if self.driver is None: |
|
return None |
|
|
|
try: |
|
logger.info(f"Fetching with Selenium: {url}") |
|
self.driver.get(url) |
|
|
|
WebDriverWait(self.driver, 20).until( |
|
EC.presence_of_element_located((By.TAG_NAME, "body")) |
|
) |
|
|
|
|
|
self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight/2);") |
|
time.sleep(1) |
|
self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);") |
|
time.sleep(2) |
|
|
|
|
|
try: |
|
see_more_buttons = self.driver.find_elements(By.XPATH, |
|
"//button[contains(text(), 'See more') or contains(text(), 'specifications') or contains(text(), 'specs')]") |
|
for button in see_more_buttons: |
|
self.driver.execute_script("arguments[0].click();", button) |
|
time.sleep(1) |
|
except Exception as e: |
|
logger.warning(f"Could not expand specification sections: {e}") |
|
|
|
|
|
page_source = self.driver.page_source |
|
return BeautifulSoup(page_source, 'html.parser') |
|
except Exception as e: |
|
logger.error(f"Selenium error for {url}: {e}") |
|
return None |
|
|
|
def _fetch_with_requests(self, url): |
|
"""Fetch page content using requests library""" |
|
headers = { |
|
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36', |
|
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', |
|
'Accept-Language': 'en-US,en;q=0.5', |
|
'Referer': 'https://www.google.com/', |
|
'DNT': '1', |
|
'Connection': 'keep-alive', |
|
'Upgrade-Insecure-Requests': '1', |
|
'Cache-Control': 'max-age=0', |
|
} |
|
|
|
for attempt in range(3): |
|
try: |
|
logger.info(f"Fetching with requests: {url}") |
|
response = requests.get(url, timeout=30, headers=headers) |
|
response.raise_for_status() |
|
return BeautifulSoup(response.content, 'html.parser') |
|
except requests.exceptions.RequestException as e: |
|
wait_time = 2 ** attempt |
|
logger.warning(f"Request error for {url}: {e}. Retrying in {wait_time} seconds...") |
|
time.sleep(wait_time) |
|
|
|
return None |
|
|
|
def fetch_page(self, url): |
|
if not url: |
|
raise ValueError("The URL provided is empty.") |
|
"""Fetch page content, trying Selenium first if available""" |
|
if self.use_selenium: |
|
soup = self._fetch_with_selenium(url) |
|
if soup: |
|
return soup |
|
|
|
|
|
return self._fetch_with_requests(url) |
|
|
|
def extract_gpu_specs(self, soup, url): |
|
"""Extract GPU specifications from NVIDIA product pages""" |
|
specs = { |
|
'model': 'N/A', |
|
'gpu_name': 'N/A', |
|
'architecture': 'N/A', |
|
'boost_clock': 'N/A', |
|
'memory_size': 'N/A', |
|
'memory_type': 'N/A', |
|
'memory_interface': 'N/A', |
|
'tdp': 'N/A', |
|
'cuda_cores': 'N/A', |
|
'tensor_cores': 'N/A', |
|
'rt_cores': 'N/A', |
|
'process_node': 'N/A', |
|
'transistor_count': 'N/A', |
|
'price': 'N/A', |
|
'release_date': 'N/A', |
|
'url': url, |
|
} |
|
|
|
try: |
|
|
|
for selector in ['h1', '.product-title', '.product-name', '.prod-title']: |
|
title_element = soup.select_one(selector) |
|
if title_element and title_element.text.strip(): |
|
specs['model'] = title_element.text.strip() |
|
|
|
gpu_match = re.search(r'(GTX|RTX|RTX\s+SUPER|GTX\s+SUPER)\s+(\d{4}\s*(?:Ti|SUPER)?)', |
|
specs['model'], re.IGNORECASE) |
|
if gpu_match: |
|
specs['gpu_name'] = f"{gpu_match.group(1)} {gpu_match.group(2)}".strip() |
|
break |
|
|
|
|
|
field_mappings = { |
|
'architecture': ['gpu architecture', 'architecture', 'nvidia architecture'], |
|
'boost_clock': ['boost clock', 'gpu boost clock', 'clock speed', 'boost'], |
|
'memory_size': ['memory size', 'standard memory config', 'memory configuration', 'video memory'], |
|
'memory_type': ['memory type', 'memory spec', 'standard memory'], |
|
'memory_interface': ['memory interface', 'memory bus', 'interface width', 'bit width'], |
|
'tdp': ['graphics card power', 'tdp', 'total graphics power', 'power consumption', 'tgp', 'maximum power'], |
|
'cuda_cores': ['cuda cores', 'cuda', 'nvidia cuda cores'], |
|
'tensor_cores': ['tensor cores', 'tensor', 'ai cores'], |
|
'rt_cores': ['rt cores', 'ray tracing cores', 'rt'], |
|
'process_node': ['process', 'fabrication process', 'manufacturing process', 'fab'], |
|
'transistor_count': ['transistor', 'transistor count', 'number of transistors'], |
|
'price': ['price', 'msrp', 'suggested price', 'starting at'], |
|
'release_date': ['release date', 'availability', 'launch date', 'available'] |
|
} |
|
|
|
|
|
spec_sections = soup.select('.specs-section, .tech-specs, .product-specs, .specs, .spec-table, .spec, [class*="spec"]') |
|
|
|
|
|
if not spec_sections: |
|
spec_sections = [soup] |
|
|
|
for section in spec_sections: |
|
|
|
self._extract_from_tables_and_pairs(section, specs, field_mappings) |
|
|
|
|
|
self._extract_from_text_patterns(section, specs) |
|
|
|
|
|
self._extract_from_spec_headings(soup, specs, field_mappings) |
|
|
|
|
|
self._extract_from_json_ld(soup, specs) |
|
|
|
|
|
self._clean_specs(specs) |
|
|
|
logger.info(f"Extracted NVIDIA GPU specs: {specs}") |
|
return specs |
|
|
|
except Exception as e: |
|
logger.error(f"Error extracting GPU specs: {e}") |
|
return specs |
|
|
|
def _extract_from_tables_and_pairs(self, section, specs, field_mappings): |
|
"""Extract specs from table-like structures or label-value pairs""" |
|
|
|
rows = section.select('tr, .spec-row, .specs-row, [class*="row"]') |
|
for row in rows: |
|
cells = row.select('th, td, .spec-label, .spec-value, .specs-label, .specs-value') |
|
if len(cells) >= 2: |
|
header = cells[0].text.strip().lower() |
|
value = cells[1].text.strip() |
|
|
|
|
|
for field, possible_headers in field_mappings.items(): |
|
if any(h in header for h in possible_headers): |
|
specs[field] = value |
|
|
|
|
|
terms = section.select('dt, .term, .specs-term') |
|
for term in terms: |
|
header = term.text.strip().lower() |
|
value_el = term.find_next_sibling(['dd', '.definition', '.specs-definition']) |
|
if value_el: |
|
value = value_el.text.strip() |
|
|
|
|
|
for field, possible_headers in field_mappings.items(): |
|
if any(h in header for h in possible_headers): |
|
specs[field] = value |
|
|
|
|
|
labels = section.select('.specs-label, .spec-label, .specs-name, .label, [class*="label"]') |
|
for label in labels: |
|
header = label.text.strip().lower() |
|
|
|
value_el = label.find_next_sibling('.specs-value, .spec-value, .specs-data, .value, [class*="value"]') |
|
if value_el: |
|
value = value_el.text.strip() |
|
|
|
|
|
for field, possible_headers in field_mappings.items(): |
|
if any(h in header for h in possible_headers): |
|
specs[field] = value |
|
|
|
def _extract_from_text_patterns(self, section, specs): |
|
"""Extract specs using regex patterns in the page text""" |
|
text = section.get_text(' ', strip=True) |
|
|
|
|
|
cuda_matches = re.search(r'(\d[\d,]+)\s*(?:nvidia)?\s*cuda\s*cores', text, re.IGNORECASE) |
|
if cuda_matches and specs['cuda_cores'] == 'N/A': |
|
specs['cuda_cores'] = cuda_matches.group(1) |
|
|
|
|
|
tensor_matches = re.search(r'(\d+)\s*(?:nvidia)?\s*tensor\s*cores', text, re.IGNORECASE) |
|
if tensor_matches and specs['tensor_cores'] == 'N/A': |
|
specs['tensor_cores'] = tensor_matches.group(1) |
|
|
|
|
|
rt_matches = re.search(r'(\d+)\s*(?:nvidia)?\s*rt\s*cores', text, re.IGNORECASE) |
|
if rt_matches and specs['rt_cores'] == 'N/A': |
|
specs['rt_cores'] = rt_matches.group(1) |
|
|
|
|
|
mem_matches = re.search(r'(\d+)\s*GB\s*(?:G?DDR\d+[X]?)', text, re.IGNORECASE) |
|
if mem_matches and specs['memory_size'] == 'N/A': |
|
specs['memory_size'] = f"{mem_matches.group(1)} GB" |
|
if specs['memory_type'] == 'N/A': |
|
specs['memory_type'] = mem_matches.group(2) |
|
|
|
|
|
clock_matches = re.search(r'boost\s*clock\s*(?:up\s*to)?\s*:?\s*([\d.]+)\s*(?:MHz|GHz)', text, re.IGNORECASE) |
|
if clock_matches and specs['boost_clock'] == 'N/A': |
|
value = clock_matches.group(1) |
|
unit = 'GHz' if float(value) < 100 else 'MHz' |
|
specs['boost_clock'] = f"{value} {unit}" |
|
|
|
|
|
interface_matches = re.search(r'(\d+)[\s-]*bit(?:\s*memory)?\s*(?:interface|bus)', text, re.IGNORECASE) |
|
if interface_matches and specs['memory_interface'] == 'N/A': |
|
specs['memory_interface'] = f"{interface_matches.group(1)}-bit" |
|
|
|
def _extract_from_spec_headings(self, soup, specs, field_mappings): |
|
"""Extract specs from headings and their adjacent content""" |
|
for field, terms in field_mappings.items(): |
|
if specs[field] != 'N/A': |
|
continue |
|
|
|
for term in terms: |
|
|
|
headers = soup.select(f'h1:contains("{term}"), h2:contains("{term}"), h3:contains("{term}"), h4:contains("{term}"), h5:contains("{term}")') |
|
|
|
for header in headers: |
|
|
|
value_el = header.find_next() |
|
if value_el: |
|
specs[field] = value_el.text.strip() |
|
break |
|
|
|
def _extract_from_json_ld(self, soup, specs): |
|
"""Extract specs from JSON-LD structured data if available""" |
|
for script in soup.select('script[type="application/ld+json"]'): |
|
try: |
|
data = json.loads(script.string) |
|
|
|
|
|
if 'name' in data and specs['model'] == 'N/A': |
|
specs['model'] = data['name'] |
|
|
|
|
|
if 'additionalProperty' in data: |
|
for prop in data['additionalProperty']: |
|
name = prop.get('name', '').lower() |
|
value = prop.get('value', '') |
|
|
|
if 'cuda' in name and specs['cuda_cores'] == 'N/A': |
|
specs['cuda_cores'] = value |
|
elif 'clock' in name and 'boost' in name and specs['boost_clock'] == 'N/A': |
|
specs['boost_clock'] = value |
|
elif 'memory' in name and 'size' in name and specs['memory_size'] == 'N/A': |
|
specs['memory_size'] = value |
|
|
|
|
|
|
|
if 'offers' in data and specs['price'] == 'N/A': |
|
if isinstance(data['offers'], list) and len(data['offers']) > 0: |
|
specs['price'] = data['offers'][0].get('price', 'N/A') |
|
elif isinstance(data['offers'], dict): |
|
specs['price'] = data['offers'].get('price', 'N/A') |
|
except: |
|
pass |
|
|
|
def _clean_specs(self, specs): |
|
"""Clean and standardize the extracted specs""" |
|
|
|
if specs['cuda_cores'] != 'N/A': |
|
specs['cuda_cores'] = specs['cuda_cores'].replace(',', '') |
|
|
|
|
|
if specs['memory_size'] != 'N/A' and 'GB' not in specs['memory_size']: |
|
if specs['memory_size'].isdigit(): |
|
specs['memory_size'] = f"{specs['memory_size']} GB" |
|
|
|
|
|
if specs['boost_clock'] != 'N/A': |
|
|
|
if re.match(r'^\d+(\.\d+)?$', specs['boost_clock']): |
|
value = float(specs['boost_clock']) |
|
if value > 100: |
|
specs['boost_clock'] = f"{value} MHz" |
|
else: |
|
specs['boost_clock'] = f"{value} GHz" |
|
|
|
def scrape_gpu(self, url): |
|
if not url: |
|
raise ValueError("The URL provided is empty.") |
|
"""Scrape a single GPU product page""" |
|
soup = self.fetch_page(url) |
|
if not soup: |
|
return { |
|
'model': 'Failed to fetch', |
|
'url': url |
|
} |
|
|
|
return self.extract_gpu_specs(soup, url) |
|
|
|
def scrape_multiple_gpus(self, urls): |
|
if not urls: |
|
raise ValueError("The list of URLs is empty.") |
|
"""Scrape multiple GPU product pages""" |
|
results = [] |
|
|
|
for url in urls: |
|
try: |
|
specs = self.scrape_gpu(url) |
|
results.append(specs) |
|
|
|
time.sleep(2) |
|
except Exception as e: |
|
logger.error(f"Error processing {url}: {e}") |
|
results.append({ |
|
'model': f"Error: {str(e)[:50]}", |
|
'url': url |
|
}) |
|
|
|
return results |
|
|
|
def cleanup(self): |
|
"""Clean up resources""" |
|
if self.driver: |
|
self.driver.quit() |
|
|
|
|
|
def main(): |
|
|
|
nvidia_urls = [ |
|
"https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4090/", |
|
"https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4080/", |
|
"https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4070-ti-super/", |
|
"https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4070/", |
|
"https://www.nvidia.com/en-us/geforce/graphics-cards/30-series/rtx-3090-3090ti/", |
|
"https://www.nvidia.com/en-us/geforce/graphics-cards/30-series/rtx-3080-3080ti/", |
|
"https://www.nvidia.com/en-us/geforce/graphics-cards/30-series/rtx-3070-3070ti/", |
|
] |
|
|
|
|
|
scraper = NvidiaGpuScraper(use_selenium=SELENIUM_AVAILABLE) |
|
|
|
try: |
|
|
|
results = scraper.scrape_multiple_gpus(nvidia_urls) |
|
|
|
|
|
df = pd.DataFrame(results) |
|
df.to_csv('nvidia_gpus.csv', index=False) |
|
df.to_json('nvidia_gpus.json', orient='records', lines=True) |
|
df.to_excel('nvidia_gpus.xlsx', index=False) |
|
|
|
|
|
try: |
|
df.to_parquet('nvidia_gpus.parquet') |
|
except Exception as e: |
|
logger.warning(f"Failed to save as Parquet: {e}") |
|
|
|
try: |
|
|
|
records = df.to_dict(orient='records') |
|
|
|
schema = { |
|
'type': 'record', |
|
'name': 'GPU', |
|
'fields': [ |
|
{'name': col, 'type': ['string', 'null']} for col in df.columns |
|
] |
|
} |
|
|
|
with open('nvidia_gpus.avro', 'wb') as avro_file: |
|
fastavro.writer(avro_file, schema, records) |
|
except Exception as e: |
|
logger.warning(f"Failed to save as Avro: {e}") |
|
|
|
try: |
|
df.to_orc('nvidia_gpus.orc') |
|
except Exception as e: |
|
logger.warning(f"Failed to save as ORC: {e}") |
|
|
|
try: |
|
df.to_hdf('nvidia_gpus.h5', key='df', mode='w') |
|
except Exception as e: |
|
logger.warning(f"Failed to save as HDF5: {e}") |
|
|
|
try: |
|
with sqlite3.connect('nvidia_gpus.db') as conn: |
|
df.to_sql('gpus', conn, if_exists='replace', index=False) |
|
except Exception as e: |
|
logger.warning(f"Failed to save as SQLite: {e}") |
|
|
|
try: |
|
df.to_xml('nvidia_gpus.xml') |
|
except Exception as e: |
|
logger.warning(f"Failed to save as XML: {e}") |
|
|
|
try: |
|
with open('nvidia_gpus.yaml', 'w') as yaml_file: |
|
yaml.dump(df.to_dict(orient='records'), yaml_file) |
|
except Exception as e: |
|
logger.warning(f"Failed to save as YAML: {e}") |
|
|
|
try: |
|
with open('nvidia_gpus.pkl', 'wb') as pickle_file: |
|
pickle.dump(df, pickle_file) |
|
except Exception as e: |
|
logger.warning(f"Failed to save as Pickle: {e}") |
|
|
|
try: |
|
savemat('nvidia_gpus.mat', {'gpus': df.to_dict(orient='records')}) |
|
except Exception as e: |
|
logger.warning(f"Failed to save as MAT: {e}") |
|
|
|
try: |
|
df.to_csv('nvidia_gpus.tsv', sep='\t', index=False) |
|
except Exception as e: |
|
logger.warning(f"Failed to save as TSV: {e}") |
|
|
|
try: |
|
df.to_json('nvidia_gpus.ndjson', orient='records', lines=True) |
|
except Exception as e: |
|
logger.warning(f"Failed to save as NDJSON: {e}") |
|
|
|
try: |
|
df.to_csv('nvidia_gpus.arff', index=False) |
|
except Exception as e: |
|
logger.warning(f"Failed to save as ARFF: {e}") |
|
|
|
try: |
|
|
|
data = df.to_dict(orient='records') |
|
|
|
with open('nvidia_gpus.msgpack', 'wb') as msgpack_file: |
|
msgpack.pack(data, msgpack_file) |
|
except Exception as e: |
|
logger.warning(f"Failed to save as MessagePack: {e}") |
|
|
|
try: |
|
df.to_pickle('nvidia_gpus.protobuf') |
|
except Exception as e: |
|
logger.warning(f"Failed to save as ProtoBuf: {e}") |
|
|
|
try: |
|
df.to_csv('nvidia_gpus.dta', index=False) |
|
except Exception as e: |
|
logger.warning(f"Failed to save as DTA: {e}") |
|
|
|
try: |
|
df.to_csv('nvidia_gpus.sas', index=False) |
|
except Exception as e: |
|
logger.warning(f"Failed to save as SAS: {e}") |
|
|
|
try: |
|
df.to_csv('nvidia_gpus.spss', index=False) |
|
except Exception as e: |
|
logger.warning(f"Failed to save as SPSS: {e}") |
|
|
|
print("\nResults:") |
|
print("\nResults:") |
|
print(df) |
|
|
|
|
|
successful = sum(1 for spec in results if spec.get('model') not in ['N/A', 'Failed to fetch']) |
|
print(f"\nSummary: Successfully scraped {successful} out of {len(results)} NVIDIA GPUs") |
|
|
|
return df |
|
|
|
finally: |
|
|
|
scraper.cleanup() |
|
|
|
if __name__ == "__main__": |
|
main() |
|
|