Spaces:
Sleeping
Sleeping
Commit
·
8e1fc1a
1
Parent(s):
e530b85
updated streamlit file
Browse files
.ipynb_checkpoints/Dockerfile-checkpoint
CHANGED
|
@@ -14,6 +14,8 @@ RUN pip install --no-cache-dir -r requirements.txt
|
|
| 14 |
|
| 15 |
RUN pip install --no-cache-dir torch
|
| 16 |
|
|
|
|
|
|
|
| 17 |
# Install supervisor to manage multiple processes
|
| 18 |
RUN apt-get update && apt-get install -y supervisor \
|
| 19 |
&& apt-get clean && rm -rf /var/lib/apt/lists/*
|
|
|
|
| 14 |
|
| 15 |
RUN pip install --no-cache-dir torch
|
| 16 |
|
| 17 |
+
RUN pip install --no-cache-dir --upgrade transformers
|
| 18 |
+
|
| 19 |
# Install supervisor to manage multiple processes
|
| 20 |
RUN apt-get update && apt-get install -y supervisor \
|
| 21 |
&& apt-get clean && rm -rf /var/lib/apt/lists/*
|
.ipynb_checkpoints/streamlit_app-checkpoint.py
CHANGED
|
@@ -16,6 +16,7 @@ def load_model():
|
|
| 16 |
return model, processor
|
| 17 |
|
| 18 |
model, processor = load_model()
|
|
|
|
| 19 |
|
| 20 |
st.title("Image Captioning with Fine-Tuned BLiPv2 Model")
|
| 21 |
|
|
|
|
| 16 |
return model, processor
|
| 17 |
|
| 18 |
model, processor = load_model()
|
| 19 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 20 |
|
| 21 |
st.title("Image Captioning with Fine-Tuned BLiPv2 Model")
|
| 22 |
|
Dockerfile
CHANGED
|
@@ -14,6 +14,8 @@ RUN pip install --no-cache-dir -r requirements.txt
|
|
| 14 |
|
| 15 |
RUN pip install --no-cache-dir torch
|
| 16 |
|
|
|
|
|
|
|
| 17 |
# Install supervisor to manage multiple processes
|
| 18 |
RUN apt-get update && apt-get install -y supervisor \
|
| 19 |
&& apt-get clean && rm -rf /var/lib/apt/lists/*
|
|
|
|
| 14 |
|
| 15 |
RUN pip install --no-cache-dir torch
|
| 16 |
|
| 17 |
+
RUN pip install --no-cache-dir --upgrade transformers
|
| 18 |
+
|
| 19 |
# Install supervisor to manage multiple processes
|
| 20 |
RUN apt-get update && apt-get install -y supervisor \
|
| 21 |
&& apt-get clean && rm -rf /var/lib/apt/lists/*
|
streamlit_app.py
CHANGED
|
@@ -16,6 +16,7 @@ def load_model():
|
|
| 16 |
return model, processor
|
| 17 |
|
| 18 |
model, processor = load_model()
|
|
|
|
| 19 |
|
| 20 |
st.title("Image Captioning with Fine-Tuned BLiPv2 Model")
|
| 21 |
|
|
|
|
| 16 |
return model, processor
|
| 17 |
|
| 18 |
model, processor = load_model()
|
| 19 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 20 |
|
| 21 |
st.title("Image Captioning with Fine-Tuned BLiPv2 Model")
|
| 22 |
|