Niladri Das
commited on
Commit
·
0bfbee7
1
Parent(s):
c44ba13
Add NVIDIA GPU dataset files
Browse files- Dockerfile +59 -0
- README.md +135 -3
- nvidia-gpu-dataset +1 -0
- nvidia_gpu_summary_report.csv +5 -0
- nvidia_gpus.arff +8 -0
- nvidia_gpus.avro +0 -0
- nvidia_gpus.csv +8 -0
- nvidia_gpus.db +0 -0
- nvidia_gpus.dta +8 -0
- nvidia_gpus.json +128 -0
- nvidia_gpus.mat +0 -0
- nvidia_gpus.msgpack +3 -0
- nvidia_gpus.ndjson +7 -0
- nvidia_gpus.orc +0 -0
- nvidia_gpus.parquet +3 -0
- nvidia_gpus.pkl +3 -0
- nvidia_gpus.protobuf +0 -0
- nvidia_gpus.sas +8 -0
- nvidia_gpus.spss +8 -0
- nvidia_gpus.tsv +8 -0
- nvidia_gpus.xlsx +0 -0
- nvidia_gpus.xml +136 -0
- nvidia_gpus.yaml +112 -0
- push_nvidia_gpu_dataset.sh +42 -0
- requirements.txt +20 -0
- summary.py +46 -0
- webscraped.py +580 -0
Dockerfile
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# ───────────────────────────────────────
|
2 |
+
# 🚀 1️⃣ Multi-Stage Build for Smallest Image
|
3 |
+
# ───────────────────────────────────────
|
4 |
+
FROM python:3.9-slim AS builder
|
5 |
+
|
6 |
+
# Set a fixed working directory
|
7 |
+
WORKDIR /app
|
8 |
+
|
9 |
+
# Install essential build tools
|
10 |
+
RUN apt-get update && apt-get install -y gcc g++ python3-dev libffi-dev && \
|
11 |
+
python3 -m venv venv && \
|
12 |
+
. venv/bin/activate && \
|
13 |
+
pip install --no-cache-dir --upgrade pip setuptools wheel
|
14 |
+
|
15 |
+
# Copy only requirements first for better caching
|
16 |
+
COPY requirements.txt .
|
17 |
+
RUN apt-get install -y gfortran libopenblas-dev && \
|
18 |
+
. venv/bin/activate && \
|
19 |
+
pip install --no-cache-dir -r requirements.txt
|
20 |
+
|
21 |
+
# Copy the rest of the app files
|
22 |
+
COPY . .
|
23 |
+
|
24 |
+
# ───────────────────────────────────────
|
25 |
+
# 🚀 2️⃣ Runtime Image - Minimal & Secure
|
26 |
+
# ───────────────────────────────────────
|
27 |
+
FROM python:3.9-slim
|
28 |
+
|
29 |
+
# Set working directory
|
30 |
+
WORKDIR /app
|
31 |
+
|
32 |
+
# Copy virtual environment from builder stage
|
33 |
+
COPY --from=builder /app/venv /app/venv
|
34 |
+
|
35 |
+
# Environment Variables for Performance & Security
|
36 |
+
ENV PATH="/app/venv/bin:$PATH" \
|
37 |
+
PYTHONUNBUFFERED=1 \
|
38 |
+
PYTHONDONTWRITEBYTECODE=1 \
|
39 |
+
APP_USER=appuser
|
40 |
+
|
41 |
+
# Security: Create and use non-root user
|
42 |
+
RUN addgroup appgroup && adduser appuser && \
|
43 |
+
adduser appuser appgroup && \
|
44 |
+
chown -R appuser:appgroup /app
|
45 |
+
|
46 |
+
USER $APP_USER
|
47 |
+
|
48 |
+
# Copy application code (Ensures proper permissions)
|
49 |
+
COPY --chown=$APP_USER:appgroup . .
|
50 |
+
|
51 |
+
# Debugging commands to check working directory and list files
|
52 |
+
RUN pwd && ls -l /app
|
53 |
+
|
54 |
+
# Use a health check to ensure the container is running correctly
|
55 |
+
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s \
|
56 |
+
CMD python3 -c 'import os; exit(0 if os.path.exists("summary.py") else 1)'
|
57 |
+
|
58 |
+
# Use ENTRYPOINT instead of CMD for better runtime control
|
59 |
+
ENTRYPOINT ["python3", "summary.py"]
|
README.md
CHANGED
@@ -1,3 +1,135 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# NVIDIA Project
|
2 |
+
|
3 |
+
This project processes scraped data related to NVIDIA GPUs using a Dockerized Python environment. It runs scripts to scrape data (`webscraped.py`) and generate a summary report (`summary.py`).
|
4 |
+
|
5 |
+
## Pushing to Hugging Face
|
6 |
+
|
7 |
+
To push the dataset to Hugging Face, follow these steps:
|
8 |
+
|
9 |
+
1. **Install the Hugging Face Datasets library**:
|
10 |
+
```bash
|
11 |
+
pip install datasets
|
12 |
+
```
|
13 |
+
|
14 |
+
2. **Prepare your dataset**: Ensure your dataset is in a compatible format (e.g., JSON, CSV).
|
15 |
+
|
16 |
+
3. **Upload the dataset**:
|
17 |
+
Use the following command to upload your dataset:
|
18 |
+
```bash
|
19 |
+
huggingface-cli dataset create --dataset_name bniladridas/nvidia-gpu-dataset --path <path_to_your_dataset>
|
20 |
+
```
|
21 |
+
|
22 |
+
4. **Add metadata**: Optionally, you can add a dataset card to describe your dataset.
|
23 |
+
|
24 |
+
5. **Validate your dataset**: After uploading, check the Hugging Face website to ensure your dataset is correctly formatted and accessible.
|
25 |
+
|
26 |
+
For more detailed instructions, refer to the [Hugging Face documentation](https://huggingface.co/docs).
|
27 |
+
|
28 |
+
To push the dataset to Hugging Face, follow these steps:
|
29 |
+
|
30 |
+
1. **Install the Hugging Face Datasets library**:
|
31 |
+
```bash
|
32 |
+
pip install datasets
|
33 |
+
```
|
34 |
+
|
35 |
+
2. **Prepare your dataset**: Ensure your dataset is in a compatible format (e.g., JSON, CSV).
|
36 |
+
|
37 |
+
3. **Upload the dataset**:
|
38 |
+
Use the following command to upload your dataset:
|
39 |
+
```bash
|
40 |
+
huggingface-cli dataset create --dataset_name <your_dataset_name> --path <path_to_your_dataset>
|
41 |
+
```
|
42 |
+
|
43 |
+
4. **Add metadata**: Optionally, you can add a dataset card to describe your dataset.
|
44 |
+
|
45 |
+
5. **Validate your dataset**: After uploading, check the Hugging Face website to ensure your dataset is correctly formatted and accessible.
|
46 |
+
|
47 |
+
For more detailed instructions, refer to the [Hugging Face documentation](https://huggingface.co/docs).
|
48 |
+
|
49 |
+
|
50 |
+
This project processes scraped data related to NVIDIA GPUs using a Dockerized Python environment. It runs scripts to scrape data (`webscraped.py`) and generate a summary report (`summary.py`), outputting results like `nvidia_gpu_summary_report.csv`.
|
51 |
+
|
52 |
+
## Prerequisites
|
53 |
+
|
54 |
+
- [Docker](https://docs.docker.com/get-docker/) installed on your system.
|
55 |
+
- Basic familiarity with command-line tools.
|
56 |
+
|
57 |
+
## Project Setup and Usage
|
58 |
+
|
59 |
+
### Building the Docker Image
|
60 |
+
|
61 |
+
The entire environment, including dependencies, is managed by Docker—no need for virtual environments like `virtualenv` inside the container. Everything is defined in the `Dockerfile` and `requirements.txt`.
|
62 |
+
|
63 |
+
### Directory Structure
|
64 |
+
|
65 |
+
```
|
66 |
+
nvidia_project/
|
67 |
+
├── Dockerfile
|
68 |
+
├── requirements.txt
|
69 |
+
├── webscraped.py
|
70 |
+
├── summary.py
|
71 |
+
└── README.md
|
72 |
+
```
|
73 |
+
|
74 |
+
## Workflow
|
75 |
+
|
76 |
+
### 1. Build the Docker Image
|
77 |
+
|
78 |
+
Build the container image with all dependencies:
|
79 |
+
|
80 |
+
```bash
|
81 |
+
docker build -t nvidia_project .
|
82 |
+
```
|
83 |
+
|
84 |
+
### 2. Run the Container
|
85 |
+
|
86 |
+
Launch the container to execute the scripts (`webscraped.py` followed by `summary.py`):
|
87 |
+
|
88 |
+
```bash
|
89 |
+
docker run --rm -it nvidia_project
|
90 |
+
```
|
91 |
+
|
92 |
+
- `--rm`: Automatically removes the container after it stops, keeping your system clean.
|
93 |
+
- `-it`: Runs interactively with a terminal.
|
94 |
+
|
95 |
+
### 3. Access the Output
|
96 |
+
|
97 |
+
Copy the generated report from the container to your local machine (adjust the path as needed):
|
98 |
+
|
99 |
+
```bash
|
100 |
+
docker cp $(docker ps -l -q):/app/nvidia_gpu_summary_report.csv /path/to/your/local/folder
|
101 |
+
```
|
102 |
+
|
103 |
+
Example:
|
104 |
+
|
105 |
+
```bash
|
106 |
+
docker cp $(docker ps -l -q):/app/nvidia_gpu_summary_report.csv /Users/niladridas/Desktop/nvidia_doc
|
107 |
+
```
|
108 |
+
|
109 |
+
## Debugging
|
110 |
+
|
111 |
+
If something goes wrong, inspect the container:
|
112 |
+
|
113 |
+
1. **Enter the container**:
|
114 |
+
```bash
|
115 |
+
docker run -it nvidia_project /bin/sh
|
116 |
+
```
|
117 |
+
Check files with:
|
118 |
+
```bash
|
119 |
+
ls -l /app
|
120 |
+
```
|
121 |
+
|
122 |
+
2. **View logs**:
|
123 |
+
```bash
|
124 |
+
docker logs $(docker ps -l -q)
|
125 |
+
```
|
126 |
+
|
127 |
+
## Key Lessons
|
128 |
+
|
129 |
+
- **Trust Docker**: No need to activate a virtual environment (e.g., `source .venv/bin/activate`) inside the container—Docker handles isolation.
|
130 |
+
- Keep workflows simple and let the container manage dependencies.
|
131 |
+
|
132 |
+
## Notes
|
133 |
+
|
134 |
+
- Ensure your `Dockerfile` is configured to copy `webscraped.py` and `summary.py` into `/app` and set the entrypoint to run them.
|
135 |
+
- Update the local path in the `docker cp` command to match your system.
|
nvidia-gpu-dataset
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
Subproject commit c44ba136858c056951c2c79bec5ffcb1de739296
|
nvidia_gpu_summary_report.csv
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model,gpu_name,architecture,boost_clock,memory_size,memory_type,memory_interface,tdp,cuda_cores,tensor_cores,rt_cores,process_node,transistor_count,price,release_date,url
|
2 |
+
21,20,16,19,20,20,16,16,20,16,19,17,14,14,14,21
|
3 |
+
7,7,2,5,5,3,2,5,11,3,4,2,1,1,1,7
|
4 |
+
GeForce RTX 4090,RTX 4090,N/A,Yes,24 GB,Standard Memory Config,N/A,N/A,8.9,N/A,Maximum Digital Resolution (1),Up to 2X performance and power efficiency,N/A,N/A,N/A,https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4090/
|
5 |
+
3,3,10,6,6,12,10,10,4,10,9,9,14,14,14,3
|
nvidia_gpus.arff
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model,gpu_name,architecture,boost_clock,memory_size,memory_type,memory_interface,tdp,cuda_cores,tensor_cores,rt_cores,process_node,transistor_count,price,release_date,url
|
2 |
+
GeForce RTX 4090,RTX 4090,N/A,N/A,24 GB,Standard Memory Config,N/A,N/A,4090,N/A,Maximum Resolution & Refresh Rate (1),Up to 2X performance and power efficiency,N/A,N/A,N/A,https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4090/
|
3 |
+
GeForce RTX 4080 Family,RTX 4080,Ada Lovelace,Yes,16 GB GDDR6X,16 GB GDDR6X,256-bit,320,8.9,4th Generation836 AI TOPS,,Up to 2X performance and power efficiency,N/A,N/A,N/A,https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4080/
|
4 |
+
Game Over,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,Advertising Cookies,N/A,N/A,N/A,N/A,https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4070-ti-super/
|
5 |
+
GeForce RTX 4070 Family,RTX 4070,Ada Lovelace,Yes,16 GB GDDR6X,16 GB GDDR6X,256-bit,285,8.9,4th Generation 706 AI TOPS,,Up to 2X performance and power efficiency,N/A,N/A,N/A,https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4070/
|
6 |
+
GeForce RTX 3090 Family,RTX 3090,N/A,1.86 GHz,24 GB,Standard Memory Config,N/A,N/A,3090,N/A,Maximum Digital Resolution (1),N/A,N/A,N/A,N/A,https://www.nvidia.com/en-us/geforce/graphics-cards/30-series/rtx-3090-3090ti/
|
7 |
+
GeForce RTX 3080 Family,RTX 3080,N/A,1.67 GHz,12 GB,Standard Memory Config,N/A,N/A,3080,N/A,Maximum Digital Resolution (1),N/A,N/A,N/A,N/A,https://www.nvidia.com/en-us/geforce/graphics-cards/30-series/rtx-3080-3080ti/
|
8 |
+
GeForce RTX 3070 Family,RTX 3070,N/A,1.77 GHz,8 GB,Standard Memory Config,N/A,N/A,3070,N/A,Maximum Digital Resolution (1),N/A,N/A,N/A,N/A,https://www.nvidia.com/en-us/geforce/graphics-cards/30-series/rtx-3070-3070ti/
|
nvidia_gpus.avro
ADDED
Binary file (2.61 kB). View file
|
|
nvidia_gpus.csv
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model,gpu_name,architecture,boost_clock,memory_size,memory_type,memory_interface,tdp,cuda_cores,tensor_cores,rt_cores,process_node,transistor_count,price,release_date,url
|
2 |
+
GeForce RTX 4090,RTX 4090,N/A,N/A,24 GB,Standard Memory Config,N/A,N/A,4090,N/A,Maximum Resolution & Refresh Rate (1),Up to 2X performance and power efficiency,N/A,N/A,N/A,https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4090/
|
3 |
+
GeForce RTX 4080 Family,RTX 4080,Ada Lovelace,Yes,16 GB GDDR6X,16 GB GDDR6X,256-bit,320,8.9,4th Generation836 AI TOPS,,Up to 2X performance and power efficiency,N/A,N/A,N/A,https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4080/
|
4 |
+
Game Over,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,Advertising Cookies,N/A,N/A,N/A,N/A,https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4070-ti-super/
|
5 |
+
GeForce RTX 4070 Family,RTX 4070,Ada Lovelace,Yes,16 GB GDDR6X,16 GB GDDR6X,256-bit,285,8.9,4th Generation 706 AI TOPS,,Up to 2X performance and power efficiency,N/A,N/A,N/A,https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4070/
|
6 |
+
GeForce RTX 3090 Family,RTX 3090,N/A,1.86 GHz,24 GB,Standard Memory Config,N/A,N/A,3090,N/A,Maximum Digital Resolution (1),N/A,N/A,N/A,N/A,https://www.nvidia.com/en-us/geforce/graphics-cards/30-series/rtx-3090-3090ti/
|
7 |
+
GeForce RTX 3080 Family,RTX 3080,N/A,1.67 GHz,12 GB,Standard Memory Config,N/A,N/A,3080,N/A,Maximum Digital Resolution (1),N/A,N/A,N/A,N/A,https://www.nvidia.com/en-us/geforce/graphics-cards/30-series/rtx-3080-3080ti/
|
8 |
+
GeForce RTX 3070 Family,RTX 3070,N/A,1.77 GHz,8 GB,Standard Memory Config,N/A,N/A,3070,N/A,Maximum Digital Resolution (1),N/A,N/A,N/A,N/A,https://www.nvidia.com/en-us/geforce/graphics-cards/30-series/rtx-3070-3070ti/
|
nvidia_gpus.db
ADDED
Binary file (8.19 kB). View file
|
|
nvidia_gpus.dta
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model,gpu_name,architecture,boost_clock,memory_size,memory_type,memory_interface,tdp,cuda_cores,tensor_cores,rt_cores,process_node,transistor_count,price,release_date,url
|
2 |
+
GeForce RTX 4090,RTX 4090,N/A,N/A,24 GB,Standard Memory Config,N/A,N/A,4090,N/A,Maximum Resolution & Refresh Rate (1),Up to 2X performance and power efficiency,N/A,N/A,N/A,https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4090/
|
3 |
+
GeForce RTX 4080 Family,RTX 4080,Ada Lovelace,Yes,16 GB GDDR6X,16 GB GDDR6X,256-bit,320,8.9,4th Generation836 AI TOPS,,Up to 2X performance and power efficiency,N/A,N/A,N/A,https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4080/
|
4 |
+
Game Over,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,Advertising Cookies,N/A,N/A,N/A,N/A,https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4070-ti-super/
|
5 |
+
GeForce RTX 4070 Family,RTX 4070,Ada Lovelace,Yes,16 GB GDDR6X,16 GB GDDR6X,256-bit,285,8.9,4th Generation 706 AI TOPS,,Up to 2X performance and power efficiency,N/A,N/A,N/A,https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4070/
|
6 |
+
GeForce RTX 3090 Family,RTX 3090,N/A,1.86 GHz,24 GB,Standard Memory Config,N/A,N/A,3090,N/A,Maximum Digital Resolution (1),N/A,N/A,N/A,N/A,https://www.nvidia.com/en-us/geforce/graphics-cards/30-series/rtx-3090-3090ti/
|
7 |
+
GeForce RTX 3080 Family,RTX 3080,N/A,1.67 GHz,12 GB,Standard Memory Config,N/A,N/A,3080,N/A,Maximum Digital Resolution (1),N/A,N/A,N/A,N/A,https://www.nvidia.com/en-us/geforce/graphics-cards/30-series/rtx-3080-3080ti/
|
8 |
+
GeForce RTX 3070 Family,RTX 3070,N/A,1.77 GHz,8 GB,Standard Memory Config,N/A,N/A,3070,N/A,Maximum Digital Resolution (1),N/A,N/A,N/A,N/A,https://www.nvidia.com/en-us/geforce/graphics-cards/30-series/rtx-3070-3070ti/
|
nvidia_gpus.json
ADDED
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"architecture": "Ada Lovelace",
|
4 |
+
"boost_clock": "Yes",
|
5 |
+
"cuda_cores": "16",
|
6 |
+
"gpu_name": "RTX 4090",
|
7 |
+
"memory_interface": "384-bit",
|
8 |
+
"memory_size": "24 GB GDDR6X",
|
9 |
+
"memory_type": "GDDR6X",
|
10 |
+
"model": "GeForce RTX 4090",
|
11 |
+
"price": "N/A",
|
12 |
+
"process_node": "4nm",
|
13 |
+
"release_date": "September 2022",
|
14 |
+
"rt_cores": "3rd Generation",
|
15 |
+
"tdp": "450",
|
16 |
+
"tensor_cores": "4th Generation",
|
17 |
+
"transistor_count": "76.3 million",
|
18 |
+
"url": "https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4090/"
|
19 |
+
},
|
20 |
+
{
|
21 |
+
"architecture": "Ada Lovelace",
|
22 |
+
"boost_clock": "2.51 GHz",
|
23 |
+
"cuda_cores": "76",
|
24 |
+
"gpu_name": "RTX 4080",
|
25 |
+
"memory_interface": "256-bit",
|
26 |
+
"memory_size": "16 GB GDDR6X",
|
27 |
+
"memory_type": "GDDR6X",
|
28 |
+
"model": "GeForce RTX 4080",
|
29 |
+
"price": "N/A",
|
30 |
+
"process_node": "4nm",
|
31 |
+
"release_date": "September 2022",
|
32 |
+
"rt_cores": "3rd Generation",
|
33 |
+
"tdp": "320",
|
34 |
+
"tensor_cores": "4th Generation",
|
35 |
+
"transistor_count": "45.9 million",
|
36 |
+
"url": "https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4080/"
|
37 |
+
},
|
38 |
+
{
|
39 |
+
"architecture": "N/A",
|
40 |
+
"boost_clock": "N/A",
|
41 |
+
"cuda_cores": "N/A",
|
42 |
+
"gpu_name": "N/A",
|
43 |
+
"memory_interface": "N/A",
|
44 |
+
"memory_size": "N/A",
|
45 |
+
"memory_type": "N/A",
|
46 |
+
"model": "Game Over",
|
47 |
+
"price": "N/A",
|
48 |
+
"process_node": "N/A",
|
49 |
+
"release_date": "N/A",
|
50 |
+
"rt_cores": "N/A",
|
51 |
+
"tdp": "N/A",
|
52 |
+
"tensor_cores": "N/A",
|
53 |
+
"transistor_count": "N/A",
|
54 |
+
"url": "https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4070-ti-super/"
|
55 |
+
},
|
56 |
+
{
|
57 |
+
"architecture": "Ada Lovelace",
|
58 |
+
"boost_clock": "Yes",
|
59 |
+
"cuda_cores": "8.9",
|
60 |
+
"gpu_name": "RTX 4070",
|
61 |
+
"memory_interface": "256-bit",
|
62 |
+
"memory_size": "16 GB GDDR6X",
|
63 |
+
"memory_type": "16 GB GDDR6X",
|
64 |
+
"model": "GeForce RTX 4070 Family",
|
65 |
+
"price": "N/A",
|
66 |
+
"process_node": "Up to 2X performance and power efficiency",
|
67 |
+
"release_date": "N/A",
|
68 |
+
"rt_cores": "",
|
69 |
+
"tdp": "285",
|
70 |
+
"tensor_cores": "4th Generation 706 AI TOPS",
|
71 |
+
"transistor_count": "N/A",
|
72 |
+
"url": "https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4070/"
|
73 |
+
},
|
74 |
+
{
|
75 |
+
"architecture": "N/A",
|
76 |
+
"boost_clock": "1.86 GHz",
|
77 |
+
"cuda_cores": "3090",
|
78 |
+
"gpu_name": "RTX 3090",
|
79 |
+
"memory_interface": "N/A",
|
80 |
+
"memory_size": "24 GB",
|
81 |
+
"memory_type": "Standard Memory Config",
|
82 |
+
"model": "GeForce RTX 3090 Family",
|
83 |
+
"price": "N/A",
|
84 |
+
"process_node": "N/A",
|
85 |
+
"release_date": "N/A",
|
86 |
+
"rt_cores": "N/A",
|
87 |
+
"tdp": "N/A",
|
88 |
+
"tensor_cores": "N/A",
|
89 |
+
"transistor_count": "N/A",
|
90 |
+
"url": "https://www.nvidia.com/en-us/geforce/graphics-cards/30-series/rtx-3090-3090ti/"
|
91 |
+
},
|
92 |
+
{
|
93 |
+
"architecture": "N/A",
|
94 |
+
"boost_clock": "1.67 GHz",
|
95 |
+
"cuda_cores": "3080",
|
96 |
+
"gpu_name": "RTX 3080",
|
97 |
+
"memory_interface": "N/A",
|
98 |
+
"memory_size": "12 GB",
|
99 |
+
"memory_type": "Standard Memory Config",
|
100 |
+
"model": "GeForce RTX 3080 Family",
|
101 |
+
"price": "N/A",
|
102 |
+
"process_node": "N/A",
|
103 |
+
"release_date": "N/A",
|
104 |
+
"rt_cores": "Maximum Digital Resolution (1)",
|
105 |
+
"tdp": "N/A",
|
106 |
+
"tensor_cores": "N/A",
|
107 |
+
"transistor_count": "N/A",
|
108 |
+
"url": "https://www.nvidia.com/en-us/geforce/graphics-cards/30-series/rtx-3080-3080ti/"
|
109 |
+
},
|
110 |
+
{
|
111 |
+
"architecture": "N/A",
|
112 |
+
"boost_clock": "1.77 GHz",
|
113 |
+
"cuda_cores": "3070",
|
114 |
+
"gpu_name": "RTX 3070",
|
115 |
+
"memory_interface": "N/A",
|
116 |
+
"memory_size": "8 GB",
|
117 |
+
"memory_type": "Standard Memory Config",
|
118 |
+
"model": "GeForce RTX 3070 Family",
|
119 |
+
"price": "N/A",
|
120 |
+
"process_node": "N/A",
|
121 |
+
"release_date": "N/A",
|
122 |
+
"rt_cores": "Maximum Digital Resolution (1)",
|
123 |
+
"tdp": "N/A",
|
124 |
+
"tensor_cores": "N/A",
|
125 |
+
"transistor_count": "N/A",
|
126 |
+
"url": "https://www.nvidia.com/en-us/geforce/graphics-cards/30-series/rtx-3070-3070ti/"
|
127 |
+
}
|
128 |
+
]
|
nvidia_gpus.mat
ADDED
Binary file (10.2 kB). View file
|
|
nvidia_gpus.msgpack
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a49947159c09d323389dbd4c51435985fa411c105fb5984c45b69eef1dbe57f0
|
3 |
+
size 2781
|
nvidia_gpus.ndjson
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{"model":"GeForce RTX 4090","gpu_name":"RTX 4090","architecture":"N\/A","boost_clock":"N\/A","memory_size":"24 GB","memory_type":"Standard Memory Config","memory_interface":"N\/A","tdp":"N\/A","cuda_cores":"4090","tensor_cores":"N\/A","rt_cores":"Maximum Resolution & Refresh Rate (1)","process_node":"Up to 2X performance and power efficiency","transistor_count":"N\/A","price":"N\/A","release_date":"N\/A","url":"https:\/\/www.nvidia.com\/en-us\/geforce\/graphics-cards\/40-series\/rtx-4090\/"}
|
2 |
+
{"model":"GeForce RTX 4080 Family","gpu_name":"RTX 4080","architecture":"Ada Lovelace","boost_clock":"Yes","memory_size":"16 GB GDDR6X","memory_type":"16 GB GDDR6X","memory_interface":"256-bit","tdp":"320","cuda_cores":"8.9","tensor_cores":"4th Generation836 AI TOPS","rt_cores":"","process_node":"Up to 2X performance and power efficiency","transistor_count":"N\/A","price":"N\/A","release_date":"N\/A","url":"https:\/\/www.nvidia.com\/en-us\/geforce\/graphics-cards\/40-series\/rtx-4080\/"}
|
3 |
+
{"model":"Game Over","gpu_name":"N\/A","architecture":"N\/A","boost_clock":"N\/A","memory_size":"N\/A","memory_type":"N\/A","memory_interface":"N\/A","tdp":"N\/A","cuda_cores":"N\/A","tensor_cores":"N\/A","rt_cores":"Advertising Cookies","process_node":"N\/A","transistor_count":"N\/A","price":"N\/A","release_date":"N\/A","url":"https:\/\/www.nvidia.com\/en-us\/geforce\/graphics-cards\/40-series\/rtx-4070-ti-super\/"}
|
4 |
+
{"model":"GeForce RTX 4070 Family","gpu_name":"RTX 4070","architecture":"Ada Lovelace","boost_clock":"Yes","memory_size":"16 GB GDDR6X","memory_type":"16 GB GDDR6X","memory_interface":"256-bit","tdp":"285","cuda_cores":"8.9","tensor_cores":"4th Generation 706 AI TOPS","rt_cores":"","process_node":"Up to 2X performance and power efficiency","transistor_count":"N\/A","price":"N\/A","release_date":"N\/A","url":"https:\/\/www.nvidia.com\/en-us\/geforce\/graphics-cards\/40-series\/rtx-4070\/"}
|
5 |
+
{"model":"GeForce RTX 3090 Family","gpu_name":"RTX 3090","architecture":"N\/A","boost_clock":"1.86 GHz","memory_size":"24 GB","memory_type":"Standard Memory Config","memory_interface":"N\/A","tdp":"N\/A","cuda_cores":"3090","tensor_cores":"N\/A","rt_cores":"Maximum Digital Resolution (1)","process_node":"N\/A","transistor_count":"N\/A","price":"N\/A","release_date":"N\/A","url":"https:\/\/www.nvidia.com\/en-us\/geforce\/graphics-cards\/30-series\/rtx-3090-3090ti\/"}
|
6 |
+
{"model":"GeForce RTX 3080 Family","gpu_name":"RTX 3080","architecture":"N\/A","boost_clock":"1.67 GHz","memory_size":"12 GB","memory_type":"Standard Memory Config","memory_interface":"N\/A","tdp":"N\/A","cuda_cores":"3080","tensor_cores":"N\/A","rt_cores":"Maximum Digital Resolution (1)","process_node":"N\/A","transistor_count":"N\/A","price":"N\/A","release_date":"N\/A","url":"https:\/\/www.nvidia.com\/en-us\/geforce\/graphics-cards\/30-series\/rtx-3080-3080ti\/"}
|
7 |
+
{"model":"GeForce RTX 3070 Family","gpu_name":"RTX 3070","architecture":"N\/A","boost_clock":"1.77 GHz","memory_size":"8 GB","memory_type":"Standard Memory Config","memory_interface":"N\/A","tdp":"N\/A","cuda_cores":"3070","tensor_cores":"N\/A","rt_cores":"Maximum Digital Resolution (1)","process_node":"N\/A","transistor_count":"N\/A","price":"N\/A","release_date":"N\/A","url":"https:\/\/www.nvidia.com\/en-us\/geforce\/graphics-cards\/30-series\/rtx-3070-3070ti\/"}
|
nvidia_gpus.orc
ADDED
Binary file (4.52 kB). View file
|
|
nvidia_gpus.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b0a8b01c3b2366a2c8eece8e793a689e12ff8a820c3fb902245e1b216c5861fa
|
3 |
+
size 10409
|
nvidia_gpus.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5bac6c9c822cd7ba1f8a18ad0ad0894e858dcb2d69fea682f588007d42acae28
|
3 |
+
size 2367
|
nvidia_gpus.protobuf
ADDED
Binary file (2.37 kB). View file
|
|
nvidia_gpus.sas
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model,gpu_name,architecture,boost_clock,memory_size,memory_type,memory_interface,tdp,cuda_cores,tensor_cores,rt_cores,process_node,transistor_count,price,release_date,url
|
2 |
+
GeForce RTX 4090,RTX 4090,N/A,N/A,24 GB,Standard Memory Config,N/A,N/A,4090,N/A,Maximum Resolution & Refresh Rate (1),Up to 2X performance and power efficiency,N/A,N/A,N/A,https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4090/
|
3 |
+
GeForce RTX 4080 Family,RTX 4080,Ada Lovelace,Yes,16 GB GDDR6X,16 GB GDDR6X,256-bit,320,8.9,4th Generation836 AI TOPS,,Up to 2X performance and power efficiency,N/A,N/A,N/A,https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4080/
|
4 |
+
Game Over,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,Advertising Cookies,N/A,N/A,N/A,N/A,https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4070-ti-super/
|
5 |
+
GeForce RTX 4070 Family,RTX 4070,Ada Lovelace,Yes,16 GB GDDR6X,16 GB GDDR6X,256-bit,285,8.9,4th Generation 706 AI TOPS,,Up to 2X performance and power efficiency,N/A,N/A,N/A,https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4070/
|
6 |
+
GeForce RTX 3090 Family,RTX 3090,N/A,1.86 GHz,24 GB,Standard Memory Config,N/A,N/A,3090,N/A,Maximum Digital Resolution (1),N/A,N/A,N/A,N/A,https://www.nvidia.com/en-us/geforce/graphics-cards/30-series/rtx-3090-3090ti/
|
7 |
+
GeForce RTX 3080 Family,RTX 3080,N/A,1.67 GHz,12 GB,Standard Memory Config,N/A,N/A,3080,N/A,Maximum Digital Resolution (1),N/A,N/A,N/A,N/A,https://www.nvidia.com/en-us/geforce/graphics-cards/30-series/rtx-3080-3080ti/
|
8 |
+
GeForce RTX 3070 Family,RTX 3070,N/A,1.77 GHz,8 GB,Standard Memory Config,N/A,N/A,3070,N/A,Maximum Digital Resolution (1),N/A,N/A,N/A,N/A,https://www.nvidia.com/en-us/geforce/graphics-cards/30-series/rtx-3070-3070ti/
|
nvidia_gpus.spss
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model,gpu_name,architecture,boost_clock,memory_size,memory_type,memory_interface,tdp,cuda_cores,tensor_cores,rt_cores,process_node,transistor_count,price,release_date,url
|
2 |
+
GeForce RTX 4090,RTX 4090,N/A,N/A,24 GB,Standard Memory Config,N/A,N/A,4090,N/A,Maximum Resolution & Refresh Rate (1),Up to 2X performance and power efficiency,N/A,N/A,N/A,https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4090/
|
3 |
+
GeForce RTX 4080 Family,RTX 4080,Ada Lovelace,Yes,16 GB GDDR6X,16 GB GDDR6X,256-bit,320,8.9,4th Generation836 AI TOPS,,Up to 2X performance and power efficiency,N/A,N/A,N/A,https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4080/
|
4 |
+
Game Over,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,N/A,Advertising Cookies,N/A,N/A,N/A,N/A,https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4070-ti-super/
|
5 |
+
GeForce RTX 4070 Family,RTX 4070,Ada Lovelace,Yes,16 GB GDDR6X,16 GB GDDR6X,256-bit,285,8.9,4th Generation 706 AI TOPS,,Up to 2X performance and power efficiency,N/A,N/A,N/A,https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4070/
|
6 |
+
GeForce RTX 3090 Family,RTX 3090,N/A,1.86 GHz,24 GB,Standard Memory Config,N/A,N/A,3090,N/A,Maximum Digital Resolution (1),N/A,N/A,N/A,N/A,https://www.nvidia.com/en-us/geforce/graphics-cards/30-series/rtx-3090-3090ti/
|
7 |
+
GeForce RTX 3080 Family,RTX 3080,N/A,1.67 GHz,12 GB,Standard Memory Config,N/A,N/A,3080,N/A,Maximum Digital Resolution (1),N/A,N/A,N/A,N/A,https://www.nvidia.com/en-us/geforce/graphics-cards/30-series/rtx-3080-3080ti/
|
8 |
+
GeForce RTX 3070 Family,RTX 3070,N/A,1.77 GHz,8 GB,Standard Memory Config,N/A,N/A,3070,N/A,Maximum Digital Resolution (1),N/A,N/A,N/A,N/A,https://www.nvidia.com/en-us/geforce/graphics-cards/30-series/rtx-3070-3070ti/
|
nvidia_gpus.tsv
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
model gpu_name architecture boost_clock memory_size memory_type memory_interface tdp cuda_cores tensor_cores rt_cores process_node transistor_count price release_date url
|
2 |
+
GeForce RTX 4090 RTX 4090 N/A N/A 24 GB Standard Memory Config N/A N/A 4090 N/A Maximum Resolution & Refresh Rate (1) Up to 2X performance and power efficiency N/A N/A N/A https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4090/
|
3 |
+
GeForce RTX 4080 Family RTX 4080 Ada Lovelace Yes 16 GB GDDR6X 16 GB GDDR6X 256-bit 320 8.9 4th Generation836 AI TOPS Up to 2X performance and power efficiency N/A N/A N/A https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4080/
|
4 |
+
Game Over N/A N/A N/A N/A N/A N/A N/A N/A N/A Advertising Cookies N/A N/A N/A N/A https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4070-ti-super/
|
5 |
+
GeForce RTX 4070 Family RTX 4070 Ada Lovelace Yes 16 GB GDDR6X 16 GB GDDR6X 256-bit 285 8.9 4th Generation 706 AI TOPS Up to 2X performance and power efficiency N/A N/A N/A https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4070/
|
6 |
+
GeForce RTX 3090 Family RTX 3090 N/A 1.86 GHz 24 GB Standard Memory Config N/A N/A 3090 N/A Maximum Digital Resolution (1) N/A N/A N/A N/A https://www.nvidia.com/en-us/geforce/graphics-cards/30-series/rtx-3090-3090ti/
|
7 |
+
GeForce RTX 3080 Family RTX 3080 N/A 1.67 GHz 12 GB Standard Memory Config N/A N/A 3080 N/A Maximum Digital Resolution (1) N/A N/A N/A N/A https://www.nvidia.com/en-us/geforce/graphics-cards/30-series/rtx-3080-3080ti/
|
8 |
+
GeForce RTX 3070 Family RTX 3070 N/A 1.77 GHz 8 GB Standard Memory Config N/A N/A 3070 N/A Maximum Digital Resolution (1) N/A N/A N/A N/A https://www.nvidia.com/en-us/geforce/graphics-cards/30-series/rtx-3070-3070ti/
|
nvidia_gpus.xlsx
ADDED
Binary file (5.85 kB). View file
|
|
nvidia_gpus.xml
ADDED
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<?xml version='1.0' encoding='utf-8'?>
|
2 |
+
<data>
|
3 |
+
<row>
|
4 |
+
<index>0</index>
|
5 |
+
<model>GeForce RTX 4090</model>
|
6 |
+
<gpu_name>RTX 4090</gpu_name>
|
7 |
+
<architecture>N/A</architecture>
|
8 |
+
<boost_clock>N/A</boost_clock>
|
9 |
+
<memory_size>24 GB</memory_size>
|
10 |
+
<memory_type>Standard Memory Config</memory_type>
|
11 |
+
<memory_interface>N/A</memory_interface>
|
12 |
+
<tdp>N/A</tdp>
|
13 |
+
<cuda_cores>4090</cuda_cores>
|
14 |
+
<tensor_cores>N/A</tensor_cores>
|
15 |
+
<rt_cores>Maximum Resolution & Refresh Rate (1)</rt_cores>
|
16 |
+
<process_node>Up to 2X performance and power efficiency</process_node>
|
17 |
+
<transistor_count>N/A</transistor_count>
|
18 |
+
<price>N/A</price>
|
19 |
+
<release_date>N/A</release_date>
|
20 |
+
<url>https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4090/</url>
|
21 |
+
</row>
|
22 |
+
<row>
|
23 |
+
<index>1</index>
|
24 |
+
<model>GeForce RTX 4080 Family</model>
|
25 |
+
<gpu_name>RTX 4080</gpu_name>
|
26 |
+
<architecture>Ada Lovelace</architecture>
|
27 |
+
<boost_clock>Yes</boost_clock>
|
28 |
+
<memory_size>16 GB GDDR6X</memory_size>
|
29 |
+
<memory_type>16 GB GDDR6X</memory_type>
|
30 |
+
<memory_interface>256-bit</memory_interface>
|
31 |
+
<tdp>320</tdp>
|
32 |
+
<cuda_cores>8.9</cuda_cores>
|
33 |
+
<tensor_cores>4th Generation836 AI TOPS</tensor_cores>
|
34 |
+
<rt_cores/>
|
35 |
+
<process_node>Up to 2X performance and power efficiency</process_node>
|
36 |
+
<transistor_count>N/A</transistor_count>
|
37 |
+
<price>N/A</price>
|
38 |
+
<release_date>N/A</release_date>
|
39 |
+
<url>https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4080/</url>
|
40 |
+
</row>
|
41 |
+
<row>
|
42 |
+
<index>2</index>
|
43 |
+
<model>Game Over</model>
|
44 |
+
<gpu_name>N/A</gpu_name>
|
45 |
+
<architecture>N/A</architecture>
|
46 |
+
<boost_clock>N/A</boost_clock>
|
47 |
+
<memory_size>N/A</memory_size>
|
48 |
+
<memory_type>N/A</memory_type>
|
49 |
+
<memory_interface>N/A</memory_interface>
|
50 |
+
<tdp>N/A</tdp>
|
51 |
+
<cuda_cores>N/A</cuda_cores>
|
52 |
+
<tensor_cores>N/A</tensor_cores>
|
53 |
+
<rt_cores>Advertising Cookies</rt_cores>
|
54 |
+
<process_node>N/A</process_node>
|
55 |
+
<transistor_count>N/A</transistor_count>
|
56 |
+
<price>N/A</price>
|
57 |
+
<release_date>N/A</release_date>
|
58 |
+
<url>https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4070-ti-super/</url>
|
59 |
+
</row>
|
60 |
+
<row>
|
61 |
+
<index>3</index>
|
62 |
+
<model>GeForce RTX 4070 Family</model>
|
63 |
+
<gpu_name>RTX 4070</gpu_name>
|
64 |
+
<architecture>Ada Lovelace</architecture>
|
65 |
+
<boost_clock>Yes</boost_clock>
|
66 |
+
<memory_size>16 GB GDDR6X</memory_size>
|
67 |
+
<memory_type>16 GB GDDR6X</memory_type>
|
68 |
+
<memory_interface>256-bit</memory_interface>
|
69 |
+
<tdp>285</tdp>
|
70 |
+
<cuda_cores>8.9</cuda_cores>
|
71 |
+
<tensor_cores>4th Generation 706 AI TOPS</tensor_cores>
|
72 |
+
<rt_cores/>
|
73 |
+
<process_node>Up to 2X performance and power efficiency</process_node>
|
74 |
+
<transistor_count>N/A</transistor_count>
|
75 |
+
<price>N/A</price>
|
76 |
+
<release_date>N/A</release_date>
|
77 |
+
<url>https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4070/</url>
|
78 |
+
</row>
|
79 |
+
<row>
|
80 |
+
<index>4</index>
|
81 |
+
<model>GeForce RTX 3090 Family</model>
|
82 |
+
<gpu_name>RTX 3090</gpu_name>
|
83 |
+
<architecture>N/A</architecture>
|
84 |
+
<boost_clock>1.86 GHz</boost_clock>
|
85 |
+
<memory_size>24 GB</memory_size>
|
86 |
+
<memory_type>Standard Memory Config</memory_type>
|
87 |
+
<memory_interface>N/A</memory_interface>
|
88 |
+
<tdp>N/A</tdp>
|
89 |
+
<cuda_cores>3090</cuda_cores>
|
90 |
+
<tensor_cores>N/A</tensor_cores>
|
91 |
+
<rt_cores>Maximum Digital Resolution (1)</rt_cores>
|
92 |
+
<process_node>N/A</process_node>
|
93 |
+
<transistor_count>N/A</transistor_count>
|
94 |
+
<price>N/A</price>
|
95 |
+
<release_date>N/A</release_date>
|
96 |
+
<url>https://www.nvidia.com/en-us/geforce/graphics-cards/30-series/rtx-3090-3090ti/</url>
|
97 |
+
</row>
|
98 |
+
<row>
|
99 |
+
<index>5</index>
|
100 |
+
<model>GeForce RTX 3080 Family</model>
|
101 |
+
<gpu_name>RTX 3080</gpu_name>
|
102 |
+
<architecture>N/A</architecture>
|
103 |
+
<boost_clock>1.67 GHz</boost_clock>
|
104 |
+
<memory_size>12 GB</memory_size>
|
105 |
+
<memory_type>Standard Memory Config</memory_type>
|
106 |
+
<memory_interface>N/A</memory_interface>
|
107 |
+
<tdp>N/A</tdp>
|
108 |
+
<cuda_cores>3080</cuda_cores>
|
109 |
+
<tensor_cores>N/A</tensor_cores>
|
110 |
+
<rt_cores>Maximum Digital Resolution (1)</rt_cores>
|
111 |
+
<process_node>N/A</process_node>
|
112 |
+
<transistor_count>N/A</transistor_count>
|
113 |
+
<price>N/A</price>
|
114 |
+
<release_date>N/A</release_date>
|
115 |
+
<url>https://www.nvidia.com/en-us/geforce/graphics-cards/30-series/rtx-3080-3080ti/</url>
|
116 |
+
</row>
|
117 |
+
<row>
|
118 |
+
<index>6</index>
|
119 |
+
<model>GeForce RTX 3070 Family</model>
|
120 |
+
<gpu_name>RTX 3070</gpu_name>
|
121 |
+
<architecture>N/A</architecture>
|
122 |
+
<boost_clock>1.77 GHz</boost_clock>
|
123 |
+
<memory_size>8 GB</memory_size>
|
124 |
+
<memory_type>Standard Memory Config</memory_type>
|
125 |
+
<memory_interface>N/A</memory_interface>
|
126 |
+
<tdp>N/A</tdp>
|
127 |
+
<cuda_cores>3070</cuda_cores>
|
128 |
+
<tensor_cores>N/A</tensor_cores>
|
129 |
+
<rt_cores>Maximum Digital Resolution (1)</rt_cores>
|
130 |
+
<process_node>N/A</process_node>
|
131 |
+
<transistor_count>N/A</transistor_count>
|
132 |
+
<price>N/A</price>
|
133 |
+
<release_date>N/A</release_date>
|
134 |
+
<url>https://www.nvidia.com/en-us/geforce/graphics-cards/30-series/rtx-3070-3070ti/</url>
|
135 |
+
</row>
|
136 |
+
</data>
|
nvidia_gpus.yaml
ADDED
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
- architecture: Ada Lovelace
|
2 |
+
boost_clock: 'Yes'
|
3 |
+
cuda_cores: '16'
|
4 |
+
gpu_name: RTX 4090
|
5 |
+
memory_interface: 384-bit
|
6 |
+
memory_size: 24 GB GDDR6X
|
7 |
+
memory_type: GDDR6X
|
8 |
+
model: GeForce RTX 4090
|
9 |
+
price: N/A
|
10 |
+
process_node: 4nm
|
11 |
+
release_date: September 2022
|
12 |
+
rt_cores: 3rd Generation
|
13 |
+
tdp: '450'
|
14 |
+
tensor_cores: 4th Generation
|
15 |
+
transistor_count: 76.3 million
|
16 |
+
url: https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4090/
|
17 |
+
- architecture: Ada Lovelace
|
18 |
+
boost_clock: '2.51 GHz'
|
19 |
+
cuda_cores: '76'
|
20 |
+
gpu_name: RTX 4080
|
21 |
+
memory_interface: 256-bit
|
22 |
+
memory_size: 16 GB GDDR6X
|
23 |
+
memory_type: GDDR6X
|
24 |
+
model: GeForce RTX 4080
|
25 |
+
price: N/A
|
26 |
+
process_node: 4nm
|
27 |
+
release_date: September 2022
|
28 |
+
rt_cores: 3rd Generation
|
29 |
+
tdp: '320'
|
30 |
+
tensor_cores: 4th Generation
|
31 |
+
transistor_count: 45.9 million
|
32 |
+
url: https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4080/
|
33 |
+
- architecture: N/A
|
34 |
+
boost_clock: N/A
|
35 |
+
cuda_cores: N/A
|
36 |
+
gpu_name: N/A
|
37 |
+
memory_interface: N/A
|
38 |
+
memory_size: N/A
|
39 |
+
memory_type: N/A
|
40 |
+
model: Game Over
|
41 |
+
price: N/A
|
42 |
+
process_node: N/A
|
43 |
+
release_date: N/A
|
44 |
+
rt_cores: Advertising Cookies
|
45 |
+
tdp: N/A
|
46 |
+
tensor_cores: N/A
|
47 |
+
transistor_count: N/A
|
48 |
+
url: https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4070-ti-super/
|
49 |
+
- architecture: Ada Lovelace
|
50 |
+
boost_clock: 'Yes'
|
51 |
+
cuda_cores: '8.9'
|
52 |
+
gpu_name: RTX 4070
|
53 |
+
memory_interface: 256-bit
|
54 |
+
memory_size: 16 GB GDDR6X
|
55 |
+
memory_type: 16 GB GDDR6X
|
56 |
+
model: GeForce RTX 4070 Family
|
57 |
+
price: N/A
|
58 |
+
process_node: Up to 2X performance and power efficiency
|
59 |
+
release_date: N/A
|
60 |
+
rt_cores: ''
|
61 |
+
tdp: '285'
|
62 |
+
tensor_cores: 4th Generation 706 AI TOPS
|
63 |
+
transistor_count: N/A
|
64 |
+
url: https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4070/
|
65 |
+
- architecture: N/A
|
66 |
+
boost_clock: 1.86 GHz
|
67 |
+
cuda_cores: '3090'
|
68 |
+
gpu_name: RTX 3090
|
69 |
+
memory_interface: N/A
|
70 |
+
memory_size: 24 GB
|
71 |
+
memory_type: Standard Memory Config
|
72 |
+
model: GeForce RTX 3090 Family
|
73 |
+
price: N/A
|
74 |
+
process_node: N/A
|
75 |
+
release_date: N/A
|
76 |
+
rt_cores: Maximum Digital Resolution (1)
|
77 |
+
tdp: N/A
|
78 |
+
tensor_cores: N/A
|
79 |
+
transistor_count: N/A
|
80 |
+
url: https://www.nvidia.com/en-us/geforce/graphics-cards/30-series/rtx-3090-3090ti/
|
81 |
+
- architecture: N/A
|
82 |
+
boost_clock: 1.67 GHz
|
83 |
+
cuda_cores: '3080'
|
84 |
+
gpu_name: RTX 3080
|
85 |
+
memory_interface: N/A
|
86 |
+
memory_size: 12 GB
|
87 |
+
memory_type: Standard Memory Config
|
88 |
+
model: GeForce RTX 3080 Family
|
89 |
+
price: N/A
|
90 |
+
process_node: N/A
|
91 |
+
release_date: N/A
|
92 |
+
rt_cores: Maximum Digital Resolution (1)
|
93 |
+
tdp: N/A
|
94 |
+
tensor_cores: N/A
|
95 |
+
transistor_count: N/A
|
96 |
+
url: https://www.nvidia.com/en-us/geforce/graphics-cards/30-series/rtx-3080-3080ti/
|
97 |
+
- architecture: N/A
|
98 |
+
boost_clock: 1.77 GHz
|
99 |
+
cuda_cores: '3070'
|
100 |
+
gpu_name: RTX 3070
|
101 |
+
memory_interface: N/A
|
102 |
+
memory_size: 8 GB
|
103 |
+
memory_type: Standard Memory Config
|
104 |
+
model: GeForce RTX 3070 Family
|
105 |
+
price: N/A
|
106 |
+
process_node: N/A
|
107 |
+
release_date: N/A
|
108 |
+
rt_cores: Maximum Digital Resolution (1)
|
109 |
+
tdp: N/A
|
110 |
+
tensor_cores: N/A
|
111 |
+
transistor_count: N/A
|
112 |
+
url: https://www.nvidia.com/en-us/geforce/graphics-cards/30-series/rtx-3070-3070ti/
|
push_nvidia_gpu_dataset.sh
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
# Check if Git LFS is installed
|
4 |
+
if ! command -v git-lfs &> /dev/null
|
5 |
+
then
|
6 |
+
echo "Git LFS is not installed. Installing..."
|
7 |
+
brew install git-lfs
|
8 |
+
git lfs install
|
9 |
+
fi
|
10 |
+
|
11 |
+
# Check if the repository already exists
|
12 |
+
if [ -d "nvidia-gpu-dataset" ]; then
|
13 |
+
echo "Removing existing nvidia-gpu-dataset directory..."
|
14 |
+
rm -rf nvidia-gpu-dataset
|
15 |
+
fi
|
16 |
+
|
17 |
+
# Clone the Hugging Face dataset repository
|
18 |
+
git clone https://huggingface.co/datasets/bniladridas/nvidia-gpu-dataset
|
19 |
+
|
20 |
+
# Check if the clone was successful
|
21 |
+
if [ ! -d "nvidia-gpu-dataset" ]; then
|
22 |
+
echo "Failed to clone the repository."
|
23 |
+
exit 1
|
24 |
+
fi
|
25 |
+
|
26 |
+
# Copy dataset files into the cloned repository
|
27 |
+
cp -r /Users/niladridas/Desktop/nvidia_doc/* nvidia-gpu-dataset/
|
28 |
+
|
29 |
+
# Change directory to the cloned repository
|
30 |
+
cd nvidia-gpu-dataset/
|
31 |
+
|
32 |
+
# Log in to Hugging Face
|
33 |
+
huggingface-cli login
|
34 |
+
|
35 |
+
# Add files to git
|
36 |
+
git add .
|
37 |
+
|
38 |
+
# Commit the changes
|
39 |
+
git commit -m "Add NVIDIA GPU dataset files"
|
40 |
+
|
41 |
+
# Push the changes to Hugging Face
|
42 |
+
git push
|
requirements.txt
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
requests
|
2 |
+
beautifulsoup4
|
3 |
+
pandas
|
4 |
+
selenium
|
5 |
+
webdriver-manager
|
6 |
+
pyarrow
|
7 |
+
fastavro
|
8 |
+
h5py
|
9 |
+
lxml
|
10 |
+
pyyaml
|
11 |
+
scipy
|
12 |
+
msgpack
|
13 |
+
openpyxl
|
14 |
+
fastavro
|
15 |
+
msgpack
|
16 |
+
pyarrow
|
17 |
+
h5py
|
18 |
+
scipy
|
19 |
+
openpyxl
|
20 |
+
pandas
|
summary.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
import json
|
3 |
+
import os
|
4 |
+
from datasets import Dataset, DatasetDict
|
5 |
+
|
6 |
+
# Function to read CSV file
|
7 |
+
def read_csv(file_path):
|
8 |
+
if not os.path.exists(file_path):
|
9 |
+
raise FileNotFoundError(f"The file {file_path} does not exist.")
|
10 |
+
return pd.read_csv(file_path)
|
11 |
+
|
12 |
+
# Function to read JSON file
|
13 |
+
def read_json(file_path):
|
14 |
+
if not os.path.exists(file_path):
|
15 |
+
raise FileNotFoundError(f"The file {file_path} does not exist.")
|
16 |
+
with open(file_path, 'r') as file:
|
17 |
+
return [json.loads(line) for line in file]
|
18 |
+
|
19 |
+
# Function to read NDJSON file
|
20 |
+
def read_ndjson(file_path):
|
21 |
+
if not os.path.exists(file_path):
|
22 |
+
raise FileNotFoundError(f"The file {file_path} does not exist.")
|
23 |
+
with open(file_path, 'r') as file:
|
24 |
+
return [json.loads(line) for line in file]
|
25 |
+
|
26 |
+
# Consolidate data from different formats
|
27 |
+
def consolidate_data():
|
28 |
+
csv_data = read_csv('nvidia_gpus.csv')
|
29 |
+
json_data = read_json('nvidia_gpus.json')
|
30 |
+
ndjson_data = read_ndjson('nvidia_gpus.ndjson')
|
31 |
+
|
32 |
+
# Combine all data into a single DataFrame
|
33 |
+
combined_data = pd.concat([csv_data, pd.DataFrame(json_data), pd.DataFrame(ndjson_data)], ignore_index=True)
|
34 |
+
return combined_data
|
35 |
+
|
36 |
+
# Generate summary report
|
37 |
+
def generate_summary():
|
38 |
+
output_file = 'nvidia_gpu_summary_report.csv'
|
39 |
+
data = consolidate_data()
|
40 |
+
summary = data.describe(include='all')
|
41 |
+
summary.to_csv(output_file, index=False)
|
42 |
+
print(f"Summary report generated: {output_file}")
|
43 |
+
print("Summary report generated: nvidia_gpu_summary_report.csv")
|
44 |
+
|
45 |
+
if __name__ == "__main__":
|
46 |
+
generate_summary()
|
webscraped.py
ADDED
@@ -0,0 +1,580 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
from bs4 import BeautifulSoup
|
3 |
+
import pandas as pd
|
4 |
+
import time
|
5 |
+
import json
|
6 |
+
import re
|
7 |
+
import logging
|
8 |
+
from urllib.parse import urlparse
|
9 |
+
import fastavro
|
10 |
+
import msgpack
|
11 |
+
import os
|
12 |
+
from datasets import Dataset
|
13 |
+
|
14 |
+
# Function to scrape data and return as a Hugging Face Dataset
|
15 |
+
def scrape_data(url):
|
16 |
+
response = requests.get(url)
|
17 |
+
soup = BeautifulSoup(response.text, 'html.parser')
|
18 |
+
|
19 |
+
# Example scraping logic (to be customized)
|
20 |
+
data = []
|
21 |
+
for item in soup.find_all('div', class_='gpu-item'):
|
22 |
+
gpu_info = {
|
23 |
+
'gpu_name': item.find('h2').text,
|
24 |
+
'architecture': item.find('span', class_='architecture').text,
|
25 |
+
'memory_size': item.find('span', class_='memory-size').text,
|
26 |
+
# Add more fields as necessary
|
27 |
+
}
|
28 |
+
data.append(gpu_info)
|
29 |
+
|
30 |
+
# Convert to Hugging Face Dataset
|
31 |
+
return Dataset.from_list(data)
|
32 |
+
|
33 |
+
# Import additional libraries for new formats
|
34 |
+
import pyarrow as pa
|
35 |
+
import pyarrow.parquet as pq
|
36 |
+
import fastavro
|
37 |
+
import h5py
|
38 |
+
import sqlite3
|
39 |
+
import xml.etree.ElementTree as ET
|
40 |
+
import yaml
|
41 |
+
import pickle
|
42 |
+
from scipy.io import savemat
|
43 |
+
|
44 |
+
# Set up logging
|
45 |
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
46 |
+
logger = logging.getLogger(__name__)
|
47 |
+
|
48 |
+
# Try to import Selenium components - they'll be used if available
|
49 |
+
try:
|
50 |
+
from selenium import webdriver
|
51 |
+
from selenium.webdriver.chrome.options import Options
|
52 |
+
from selenium.webdriver.chrome.service import Service
|
53 |
+
from selenium.webdriver.common.by import By
|
54 |
+
from selenium.webdriver.support.ui import WebDriverWait
|
55 |
+
from selenium.webdriver.support import expected_conditions as EC
|
56 |
+
from webdriver_manager.chrome import ChromeDriverManager
|
57 |
+
SELENIUM_AVAILABLE = True
|
58 |
+
logger.info("Selenium is available and will be used for JavaScript-heavy sites")
|
59 |
+
except ImportError:
|
60 |
+
SELENIUM_AVAILABLE = False
|
61 |
+
logger.warning("Selenium not available. Install with: pip install selenium webdriver-manager")
|
62 |
+
|
63 |
+
class NvidiaGpuScraper:
|
64 |
+
def __init__(self, use_selenium=True):
|
65 |
+
self.use_selenium = use_selenium and SELENIUM_AVAILABLE
|
66 |
+
self.driver = self._setup_driver() if self.use_selenium else None
|
67 |
+
|
68 |
+
def _setup_driver(self):
|
69 |
+
"""Set up and return a Selenium WebDriver if available"""
|
70 |
+
if not SELENIUM_AVAILABLE:
|
71 |
+
return None
|
72 |
+
|
73 |
+
try:
|
74 |
+
options = Options()
|
75 |
+
options.add_argument('--headless')
|
76 |
+
options.add_argument('--no-sandbox')
|
77 |
+
options.add_argument('--disable-dev-shm-usage')
|
78 |
+
options.add_argument('--disable-gpu')
|
79 |
+
options.add_argument("--window-size=1920,1080")
|
80 |
+
options.add_argument("--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36")
|
81 |
+
|
82 |
+
service = Service(ChromeDriverManager().install())
|
83 |
+
driver = webdriver.Chrome(service=service, options=options)
|
84 |
+
return driver
|
85 |
+
except Exception as e:
|
86 |
+
logger.error(f"Failed to initialize Selenium: {e}")
|
87 |
+
return None
|
88 |
+
|
89 |
+
def _fetch_with_selenium(self, url):
|
90 |
+
"""Fetch page content using Selenium for JavaScript-heavy sites"""
|
91 |
+
if self.driver is None:
|
92 |
+
return None
|
93 |
+
|
94 |
+
try:
|
95 |
+
logger.info(f"Fetching with Selenium: {url}")
|
96 |
+
self.driver.get(url)
|
97 |
+
# Wait for the page to load completely
|
98 |
+
WebDriverWait(self.driver, 20).until(
|
99 |
+
EC.presence_of_element_located((By.TAG_NAME, "body"))
|
100 |
+
)
|
101 |
+
|
102 |
+
# Scroll down to load lazy content
|
103 |
+
self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight/2);")
|
104 |
+
time.sleep(1)
|
105 |
+
self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
|
106 |
+
time.sleep(2) # Additional wait for dynamic content
|
107 |
+
|
108 |
+
# Expand any "See more specifications" buttons if they exist
|
109 |
+
try:
|
110 |
+
see_more_buttons = self.driver.find_elements(By.XPATH,
|
111 |
+
"//button[contains(text(), 'See more') or contains(text(), 'specifications') or contains(text(), 'specs')]")
|
112 |
+
for button in see_more_buttons:
|
113 |
+
self.driver.execute_script("arguments[0].click();", button)
|
114 |
+
time.sleep(1)
|
115 |
+
except Exception as e:
|
116 |
+
logger.warning(f"Could not expand specification sections: {e}")
|
117 |
+
|
118 |
+
# Get the page source after JavaScript execution
|
119 |
+
page_source = self.driver.page_source
|
120 |
+
return BeautifulSoup(page_source, 'html.parser')
|
121 |
+
except Exception as e:
|
122 |
+
logger.error(f"Selenium error for {url}: {e}")
|
123 |
+
return None
|
124 |
+
|
125 |
+
def _fetch_with_requests(self, url):
|
126 |
+
"""Fetch page content using requests library"""
|
127 |
+
headers = {
|
128 |
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36',
|
129 |
+
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
|
130 |
+
'Accept-Language': 'en-US,en;q=0.5',
|
131 |
+
'Referer': 'https://www.google.com/',
|
132 |
+
'DNT': '1',
|
133 |
+
'Connection': 'keep-alive',
|
134 |
+
'Upgrade-Insecure-Requests': '1',
|
135 |
+
'Cache-Control': 'max-age=0',
|
136 |
+
}
|
137 |
+
|
138 |
+
for attempt in range(3):
|
139 |
+
try:
|
140 |
+
logger.info(f"Fetching with requests: {url}")
|
141 |
+
response = requests.get(url, timeout=30, headers=headers)
|
142 |
+
response.raise_for_status()
|
143 |
+
return BeautifulSoup(response.content, 'html.parser')
|
144 |
+
except requests.exceptions.RequestException as e:
|
145 |
+
wait_time = 2 ** attempt
|
146 |
+
logger.warning(f"Request error for {url}: {e}. Retrying in {wait_time} seconds...")
|
147 |
+
time.sleep(wait_time)
|
148 |
+
|
149 |
+
return None
|
150 |
+
|
151 |
+
def fetch_page(self, url):
|
152 |
+
if not url:
|
153 |
+
raise ValueError("The URL provided is empty.")
|
154 |
+
"""Fetch page content, trying Selenium first if available"""
|
155 |
+
if self.use_selenium:
|
156 |
+
soup = self._fetch_with_selenium(url)
|
157 |
+
if soup:
|
158 |
+
return soup
|
159 |
+
|
160 |
+
# Fall back to requests if Selenium failed or isn't available
|
161 |
+
return self._fetch_with_requests(url)
|
162 |
+
|
163 |
+
def extract_gpu_specs(self, soup, url):
|
164 |
+
"""Extract GPU specifications from NVIDIA product pages"""
|
165 |
+
specs = {
|
166 |
+
'model': 'N/A',
|
167 |
+
'gpu_name': 'N/A',
|
168 |
+
'architecture': 'N/A',
|
169 |
+
'boost_clock': 'N/A',
|
170 |
+
'memory_size': 'N/A',
|
171 |
+
'memory_type': 'N/A',
|
172 |
+
'memory_interface': 'N/A',
|
173 |
+
'tdp': 'N/A',
|
174 |
+
'cuda_cores': 'N/A',
|
175 |
+
'tensor_cores': 'N/A',
|
176 |
+
'rt_cores': 'N/A',
|
177 |
+
'process_node': 'N/A',
|
178 |
+
'transistor_count': 'N/A',
|
179 |
+
'price': 'N/A',
|
180 |
+
'release_date': 'N/A',
|
181 |
+
'url': url,
|
182 |
+
}
|
183 |
+
|
184 |
+
try:
|
185 |
+
# Extract model name
|
186 |
+
for selector in ['h1', '.product-title', '.product-name', '.prod-title']:
|
187 |
+
title_element = soup.select_one(selector)
|
188 |
+
if title_element and title_element.text.strip():
|
189 |
+
specs['model'] = title_element.text.strip()
|
190 |
+
# Try to extract GPU name (e.g., RTX 4090)
|
191 |
+
gpu_match = re.search(r'(GTX|RTX|RTX\s+SUPER|GTX\s+SUPER)\s+(\d{4}\s*(?:Ti|SUPER)?)',
|
192 |
+
specs['model'], re.IGNORECASE)
|
193 |
+
if gpu_match:
|
194 |
+
specs['gpu_name'] = f"{gpu_match.group(1)} {gpu_match.group(2)}".strip()
|
195 |
+
break
|
196 |
+
|
197 |
+
# Field mapping dictionary - different ways NVIDIA might label each spec
|
198 |
+
field_mappings = {
|
199 |
+
'architecture': ['gpu architecture', 'architecture', 'nvidia architecture'],
|
200 |
+
'boost_clock': ['boost clock', 'gpu boost clock', 'clock speed', 'boost'],
|
201 |
+
'memory_size': ['memory size', 'standard memory config', 'memory configuration', 'video memory'],
|
202 |
+
'memory_type': ['memory type', 'memory spec', 'standard memory'],
|
203 |
+
'memory_interface': ['memory interface', 'memory bus', 'interface width', 'bit width'],
|
204 |
+
'tdp': ['graphics card power', 'tdp', 'total graphics power', 'power consumption', 'tgp', 'maximum power'],
|
205 |
+
'cuda_cores': ['cuda cores', 'cuda', 'nvidia cuda cores'],
|
206 |
+
'tensor_cores': ['tensor cores', 'tensor', 'ai cores'],
|
207 |
+
'rt_cores': ['rt cores', 'ray tracing cores', 'rt'],
|
208 |
+
'process_node': ['process', 'fabrication process', 'manufacturing process', 'fab'],
|
209 |
+
'transistor_count': ['transistor', 'transistor count', 'number of transistors'],
|
210 |
+
'price': ['price', 'msrp', 'suggested price', 'starting at'],
|
211 |
+
'release_date': ['release date', 'availability', 'launch date', 'available']
|
212 |
+
}
|
213 |
+
|
214 |
+
# Look for various specs sections
|
215 |
+
spec_sections = soup.select('.specs-section, .tech-specs, .product-specs, .specs, .spec-table, .spec, [class*="spec"]')
|
216 |
+
|
217 |
+
# If no dedicated sections found, look through the entire page
|
218 |
+
if not spec_sections:
|
219 |
+
spec_sections = [soup]
|
220 |
+
|
221 |
+
for section in spec_sections:
|
222 |
+
# Method 1: Look for labeled pairs or tables
|
223 |
+
self._extract_from_tables_and_pairs(section, specs, field_mappings)
|
224 |
+
|
225 |
+
# Method 2: Look for text patterns throughout the page
|
226 |
+
self._extract_from_text_patterns(section, specs)
|
227 |
+
|
228 |
+
# Extract from specification headings and adjacent elements
|
229 |
+
self._extract_from_spec_headings(soup, specs, field_mappings)
|
230 |
+
|
231 |
+
# Try to find any JSON-LD or structured data with specs
|
232 |
+
self._extract_from_json_ld(soup, specs)
|
233 |
+
|
234 |
+
# Clean and standardize specs
|
235 |
+
self._clean_specs(specs)
|
236 |
+
|
237 |
+
logger.info(f"Extracted NVIDIA GPU specs: {specs}")
|
238 |
+
return specs
|
239 |
+
|
240 |
+
except Exception as e:
|
241 |
+
logger.error(f"Error extracting GPU specs: {e}")
|
242 |
+
return specs
|
243 |
+
|
244 |
+
def _extract_from_tables_and_pairs(self, section, specs, field_mappings):
|
245 |
+
"""Extract specs from table-like structures or label-value pairs"""
|
246 |
+
# Check for table rows
|
247 |
+
rows = section.select('tr, .spec-row, .specs-row, [class*="row"]')
|
248 |
+
for row in rows:
|
249 |
+
cells = row.select('th, td, .spec-label, .spec-value, .specs-label, .specs-value')
|
250 |
+
if len(cells) >= 2:
|
251 |
+
header = cells[0].text.strip().lower()
|
252 |
+
value = cells[1].text.strip()
|
253 |
+
|
254 |
+
# Match header to our fields
|
255 |
+
for field, possible_headers in field_mappings.items():
|
256 |
+
if any(h in header for h in possible_headers):
|
257 |
+
specs[field] = value
|
258 |
+
|
259 |
+
# Check for definition lists
|
260 |
+
terms = section.select('dt, .term, .specs-term')
|
261 |
+
for term in terms:
|
262 |
+
header = term.text.strip().lower()
|
263 |
+
value_el = term.find_next_sibling(['dd', '.definition', '.specs-definition'])
|
264 |
+
if value_el:
|
265 |
+
value = value_el.text.strip()
|
266 |
+
|
267 |
+
# Match header to our fields
|
268 |
+
for field, possible_headers in field_mappings.items():
|
269 |
+
if any(h in header for h in possible_headers):
|
270 |
+
specs[field] = value
|
271 |
+
|
272 |
+
# Check for labeled pairs (common in NVIDIA's newer layout)
|
273 |
+
labels = section.select('.specs-label, .spec-label, .specs-name, .label, [class*="label"]')
|
274 |
+
for label in labels:
|
275 |
+
header = label.text.strip().lower()
|
276 |
+
# Try to find the adjacent value element
|
277 |
+
value_el = label.find_next_sibling('.specs-value, .spec-value, .specs-data, .value, [class*="value"]')
|
278 |
+
if value_el:
|
279 |
+
value = value_el.text.strip()
|
280 |
+
|
281 |
+
# Match header to our fields
|
282 |
+
for field, possible_headers in field_mappings.items():
|
283 |
+
if any(h in header for h in possible_headers):
|
284 |
+
specs[field] = value
|
285 |
+
|
286 |
+
def _extract_from_text_patterns(self, section, specs):
|
287 |
+
"""Extract specs using regex patterns in the page text"""
|
288 |
+
text = section.get_text(' ', strip=True)
|
289 |
+
|
290 |
+
# Extract CUDA cores
|
291 |
+
cuda_matches = re.search(r'(\d[\d,]+)\s*(?:nvidia)?\s*cuda\s*cores', text, re.IGNORECASE)
|
292 |
+
if cuda_matches and specs['cuda_cores'] == 'N/A':
|
293 |
+
specs['cuda_cores'] = cuda_matches.group(1)
|
294 |
+
|
295 |
+
# Extract Tensor cores
|
296 |
+
tensor_matches = re.search(r'(\d+)\s*(?:nvidia)?\s*tensor\s*cores', text, re.IGNORECASE)
|
297 |
+
if tensor_matches and specs['tensor_cores'] == 'N/A':
|
298 |
+
specs['tensor_cores'] = tensor_matches.group(1)
|
299 |
+
|
300 |
+
# Extract RT cores
|
301 |
+
rt_matches = re.search(r'(\d+)\s*(?:nvidia)?\s*rt\s*cores', text, re.IGNORECASE)
|
302 |
+
if rt_matches and specs['rt_cores'] == 'N/A':
|
303 |
+
specs['rt_cores'] = rt_matches.group(1)
|
304 |
+
|
305 |
+
# Extract memory size
|
306 |
+
mem_matches = re.search(r'(\d+)\s*GB\s*(?:G?DDR\d+[X]?)', text, re.IGNORECASE)
|
307 |
+
if mem_matches and specs['memory_size'] == 'N/A':
|
308 |
+
specs['memory_size'] = f"{mem_matches.group(1)} GB"
|
309 |
+
if specs['memory_type'] == 'N/A':
|
310 |
+
specs['memory_type'] = mem_matches.group(2)
|
311 |
+
|
312 |
+
# Extract boost clock
|
313 |
+
clock_matches = re.search(r'boost\s*clock\s*(?:up\s*to)?\s*:?\s*([\d.]+)\s*(?:MHz|GHz)', text, re.IGNORECASE)
|
314 |
+
if clock_matches and specs['boost_clock'] == 'N/A':
|
315 |
+
value = clock_matches.group(1)
|
316 |
+
unit = 'GHz' if float(value) < 100 else 'MHz' # Infer unit if not in match
|
317 |
+
specs['boost_clock'] = f"{value} {unit}"
|
318 |
+
|
319 |
+
# Extract memory interface
|
320 |
+
interface_matches = re.search(r'(\d+)[\s-]*bit(?:\s*memory)?\s*(?:interface|bus)', text, re.IGNORECASE)
|
321 |
+
if interface_matches and specs['memory_interface'] == 'N/A':
|
322 |
+
specs['memory_interface'] = f"{interface_matches.group(1)}-bit"
|
323 |
+
|
324 |
+
def _extract_from_spec_headings(self, soup, specs, field_mappings):
|
325 |
+
"""Extract specs from headings and their adjacent content"""
|
326 |
+
for field, terms in field_mappings.items():
|
327 |
+
if specs[field] != 'N/A': # Skip if already found
|
328 |
+
continue
|
329 |
+
|
330 |
+
for term in terms:
|
331 |
+
# Look for headings containing the term
|
332 |
+
headers = soup.select(f'h1:contains("{term}"), h2:contains("{term}"), h3:contains("{term}"), h4:contains("{term}"), h5:contains("{term}")')
|
333 |
+
|
334 |
+
for header in headers:
|
335 |
+
# Look at next sibling or child for the value
|
336 |
+
value_el = header.find_next()
|
337 |
+
if value_el:
|
338 |
+
specs[field] = value_el.text.strip()
|
339 |
+
break
|
340 |
+
|
341 |
+
def _extract_from_json_ld(self, soup, specs):
|
342 |
+
"""Extract specs from JSON-LD structured data if available"""
|
343 |
+
for script in soup.select('script[type="application/ld+json"]'):
|
344 |
+
try:
|
345 |
+
data = json.loads(script.string)
|
346 |
+
|
347 |
+
# Look for product data
|
348 |
+
if 'name' in data and specs['model'] == 'N/A':
|
349 |
+
specs['model'] = data['name']
|
350 |
+
|
351 |
+
# Check for specs in properties
|
352 |
+
if 'additionalProperty' in data:
|
353 |
+
for prop in data['additionalProperty']:
|
354 |
+
name = prop.get('name', '').lower()
|
355 |
+
value = prop.get('value', '')
|
356 |
+
|
357 |
+
if 'cuda' in name and specs['cuda_cores'] == 'N/A':
|
358 |
+
specs['cuda_cores'] = value
|
359 |
+
elif 'clock' in name and 'boost' in name and specs['boost_clock'] == 'N/A':
|
360 |
+
specs['boost_clock'] = value
|
361 |
+
elif 'memory' in name and 'size' in name and specs['memory_size'] == 'N/A':
|
362 |
+
specs['memory_size'] = value
|
363 |
+
# Add other mappings as needed
|
364 |
+
|
365 |
+
# Check for offer data
|
366 |
+
if 'offers' in data and specs['price'] == 'N/A':
|
367 |
+
if isinstance(data['offers'], list) and len(data['offers']) > 0:
|
368 |
+
specs['price'] = data['offers'][0].get('price', 'N/A')
|
369 |
+
elif isinstance(data['offers'], dict):
|
370 |
+
specs['price'] = data['offers'].get('price', 'N/A')
|
371 |
+
except:
|
372 |
+
pass
|
373 |
+
|
374 |
+
def _clean_specs(self, specs):
|
375 |
+
"""Clean and standardize the extracted specs"""
|
376 |
+
# Clean CUDA cores (remove commas)
|
377 |
+
if specs['cuda_cores'] != 'N/A':
|
378 |
+
specs['cuda_cores'] = specs['cuda_cores'].replace(',', '')
|
379 |
+
|
380 |
+
# Standardize memory size format
|
381 |
+
if specs['memory_size'] != 'N/A' and 'GB' not in specs['memory_size']:
|
382 |
+
if specs['memory_size'].isdigit():
|
383 |
+
specs['memory_size'] = f"{specs['memory_size']} GB"
|
384 |
+
|
385 |
+
# Standardize boost clock format
|
386 |
+
if specs['boost_clock'] != 'N/A':
|
387 |
+
# If it's just a number, add units
|
388 |
+
if re.match(r'^\d+(\.\d+)?$', specs['boost_clock']):
|
389 |
+
value = float(specs['boost_clock'])
|
390 |
+
if value > 100: # Likely MHz
|
391 |
+
specs['boost_clock'] = f"{value} MHz"
|
392 |
+
else: # Likely GHz
|
393 |
+
specs['boost_clock'] = f"{value} GHz"
|
394 |
+
|
395 |
+
def scrape_gpu(self, url):
|
396 |
+
if not url:
|
397 |
+
raise ValueError("The URL provided is empty.")
|
398 |
+
"""Scrape a single GPU product page"""
|
399 |
+
soup = self.fetch_page(url)
|
400 |
+
if not soup:
|
401 |
+
return {
|
402 |
+
'model': 'Failed to fetch',
|
403 |
+
'url': url
|
404 |
+
}
|
405 |
+
|
406 |
+
return self.extract_gpu_specs(soup, url)
|
407 |
+
|
408 |
+
def scrape_multiple_gpus(self, urls):
|
409 |
+
if not urls:
|
410 |
+
raise ValueError("The list of URLs is empty.")
|
411 |
+
"""Scrape multiple GPU product pages"""
|
412 |
+
results = []
|
413 |
+
|
414 |
+
for url in urls:
|
415 |
+
try:
|
416 |
+
specs = self.scrape_gpu(url)
|
417 |
+
results.append(specs)
|
418 |
+
# Be polite with a delay between requests
|
419 |
+
time.sleep(2)
|
420 |
+
except Exception as e:
|
421 |
+
logger.error(f"Error processing {url}: {e}")
|
422 |
+
results.append({
|
423 |
+
'model': f"Error: {str(e)[:50]}",
|
424 |
+
'url': url
|
425 |
+
})
|
426 |
+
|
427 |
+
return results
|
428 |
+
|
429 |
+
def cleanup(self):
|
430 |
+
"""Clean up resources"""
|
431 |
+
if self.driver:
|
432 |
+
self.driver.quit()
|
433 |
+
|
434 |
+
# Main execution function
|
435 |
+
def main():
|
436 |
+
# NVIDIA GPU product URLs - focused on specific product pages
|
437 |
+
nvidia_urls = [
|
438 |
+
"https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4090/",
|
439 |
+
"https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4080/",
|
440 |
+
"https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4070-ti-super/",
|
441 |
+
"https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4070/",
|
442 |
+
"https://www.nvidia.com/en-us/geforce/graphics-cards/30-series/rtx-3090-3090ti/",
|
443 |
+
"https://www.nvidia.com/en-us/geforce/graphics-cards/30-series/rtx-3080-3080ti/",
|
444 |
+
"https://www.nvidia.com/en-us/geforce/graphics-cards/30-series/rtx-3070-3070ti/",
|
445 |
+
]
|
446 |
+
|
447 |
+
# Create the scraper and run
|
448 |
+
scraper = NvidiaGpuScraper(use_selenium=SELENIUM_AVAILABLE)
|
449 |
+
|
450 |
+
try:
|
451 |
+
# Scrape the GPUs
|
452 |
+
results = scraper.scrape_multiple_gpus(nvidia_urls)
|
453 |
+
|
454 |
+
# Create and save DataFrame
|
455 |
+
df = pd.DataFrame(results)
|
456 |
+
df.to_csv('nvidia_gpus.csv', index=False)
|
457 |
+
df.to_json('nvidia_gpus.json', orient='records', lines=True)
|
458 |
+
df.to_excel('nvidia_gpus.xlsx', index=False)
|
459 |
+
|
460 |
+
# Save DataFrame in various formats
|
461 |
+
try:
|
462 |
+
df.to_parquet('nvidia_gpus.parquet')
|
463 |
+
except Exception as e:
|
464 |
+
logger.warning(f"Failed to save as Parquet: {e}")
|
465 |
+
|
466 |
+
try:
|
467 |
+
# Convert DataFrame to list of dictionaries
|
468 |
+
records = df.to_dict(orient='records')
|
469 |
+
# Define Avro schema
|
470 |
+
schema = {
|
471 |
+
'type': 'record',
|
472 |
+
'name': 'GPU',
|
473 |
+
'fields': [
|
474 |
+
{'name': col, 'type': ['string', 'null']} for col in df.columns
|
475 |
+
]
|
476 |
+
}
|
477 |
+
# Write to Avro file
|
478 |
+
with open('nvidia_gpus.avro', 'wb') as avro_file:
|
479 |
+
fastavro.writer(avro_file, schema, records)
|
480 |
+
except Exception as e:
|
481 |
+
logger.warning(f"Failed to save as Avro: {e}")
|
482 |
+
|
483 |
+
try:
|
484 |
+
df.to_orc('nvidia_gpus.orc')
|
485 |
+
except Exception as e:
|
486 |
+
logger.warning(f"Failed to save as ORC: {e}")
|
487 |
+
|
488 |
+
try:
|
489 |
+
df.to_hdf('nvidia_gpus.h5', key='df', mode='w')
|
490 |
+
except Exception as e:
|
491 |
+
logger.warning(f"Failed to save as HDF5: {e}")
|
492 |
+
|
493 |
+
try:
|
494 |
+
with sqlite3.connect('nvidia_gpus.db') as conn:
|
495 |
+
df.to_sql('gpus', conn, if_exists='replace', index=False)
|
496 |
+
except Exception as e:
|
497 |
+
logger.warning(f"Failed to save as SQLite: {e}")
|
498 |
+
|
499 |
+
try:
|
500 |
+
df.to_xml('nvidia_gpus.xml')
|
501 |
+
except Exception as e:
|
502 |
+
logger.warning(f"Failed to save as XML: {e}")
|
503 |
+
|
504 |
+
try:
|
505 |
+
with open('nvidia_gpus.yaml', 'w') as yaml_file:
|
506 |
+
yaml.dump(df.to_dict(orient='records'), yaml_file)
|
507 |
+
except Exception as e:
|
508 |
+
logger.warning(f"Failed to save as YAML: {e}")
|
509 |
+
|
510 |
+
try:
|
511 |
+
with open('nvidia_gpus.pkl', 'wb') as pickle_file:
|
512 |
+
pickle.dump(df, pickle_file)
|
513 |
+
except Exception as e:
|
514 |
+
logger.warning(f"Failed to save as Pickle: {e}")
|
515 |
+
|
516 |
+
try:
|
517 |
+
savemat('nvidia_gpus.mat', {'gpus': df.to_dict(orient='records')})
|
518 |
+
except Exception as e:
|
519 |
+
logger.warning(f"Failed to save as MAT: {e}")
|
520 |
+
|
521 |
+
try:
|
522 |
+
df.to_csv('nvidia_gpus.tsv', sep='\t', index=False)
|
523 |
+
except Exception as e:
|
524 |
+
logger.warning(f"Failed to save as TSV: {e}")
|
525 |
+
|
526 |
+
try:
|
527 |
+
df.to_json('nvidia_gpus.ndjson', orient='records', lines=True)
|
528 |
+
except Exception as e:
|
529 |
+
logger.warning(f"Failed to save as NDJSON: {e}")
|
530 |
+
|
531 |
+
try:
|
532 |
+
df.to_csv('nvidia_gpus.arff', index=False)
|
533 |
+
except Exception as e:
|
534 |
+
logger.warning(f"Failed to save as ARFF: {e}")
|
535 |
+
|
536 |
+
try:
|
537 |
+
# Convert DataFrame to dictionary
|
538 |
+
data = df.to_dict(orient='records')
|
539 |
+
# Write to MessagePack file
|
540 |
+
with open('nvidia_gpus.msgpack', 'wb') as msgpack_file:
|
541 |
+
msgpack.pack(data, msgpack_file)
|
542 |
+
except Exception as e:
|
543 |
+
logger.warning(f"Failed to save as MessagePack: {e}")
|
544 |
+
|
545 |
+
try:
|
546 |
+
df.to_pickle('nvidia_gpus.protobuf')
|
547 |
+
except Exception as e:
|
548 |
+
logger.warning(f"Failed to save as ProtoBuf: {e}")
|
549 |
+
|
550 |
+
try:
|
551 |
+
df.to_csv('nvidia_gpus.dta', index=False)
|
552 |
+
except Exception as e:
|
553 |
+
logger.warning(f"Failed to save as DTA: {e}")
|
554 |
+
|
555 |
+
try:
|
556 |
+
df.to_csv('nvidia_gpus.sas', index=False)
|
557 |
+
except Exception as e:
|
558 |
+
logger.warning(f"Failed to save as SAS: {e}")
|
559 |
+
|
560 |
+
try:
|
561 |
+
df.to_csv('nvidia_gpus.spss', index=False)
|
562 |
+
except Exception as e:
|
563 |
+
logger.warning(f"Failed to save as SPSS: {e}")
|
564 |
+
|
565 |
+
print("\nResults:")
|
566 |
+
print("\nResults:")
|
567 |
+
print(df)
|
568 |
+
|
569 |
+
# Print summary
|
570 |
+
successful = sum(1 for spec in results if spec.get('model') not in ['N/A', 'Failed to fetch'])
|
571 |
+
print(f"\nSummary: Successfully scraped {successful} out of {len(results)} NVIDIA GPUs")
|
572 |
+
|
573 |
+
return df
|
574 |
+
|
575 |
+
finally:
|
576 |
+
# Always clean up resources
|
577 |
+
scraper.cleanup()
|
578 |
+
|
579 |
+
if __name__ == "__main__":
|
580 |
+
main()
|