Anushree1 commited on
Commit
b26f451
·
verified ·
1 Parent(s): 7c3b7ed

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -23
app.py CHANGED
@@ -1,24 +1,44 @@
 
1
  import gradio as gr
2
- import torch
3
- from transformers import AutoModelForImageClassification, AutoFeatureExtractor
4
-
5
- # Load model and feature extractor (adjust these according to your model)
6
- model_name = "sign_language_model(1).h5" # Replace with your model path or Hugging Face model
7
- model = AutoModelForImageClassification.from_pretrained(model_name)
8
- feature_extractor = AutoFeatureExtractor.from_pretrained(model_name)
9
-
10
- # Define prediction function
11
- def predict(image):
12
- inputs = feature_extractor(images=image, return_tensors="pt")
13
- with torch.no_grad():
14
- logits = model(**inputs).logits
15
- predicted_class = logits.argmax(-1).item()
16
- return predicted_class
17
-
18
- # Set up the Gradio interface
19
- interface = gr.Interface(fn=predict,
20
- inputs=gr.Image(type="pil"),
21
- outputs=gr.Text(label="Predicted Sign Language"))
22
-
23
- if __name__ == "__main__":
24
- interface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
  import gradio as gr
3
+ import numpy as np
4
+ from transformers import pipeline
5
+
6
+ # Load the hand detection model from Hugging Face
7
+ gesture_pipeline = pipeline("image-classification", model="google/vit-base-patch16-224-in21k")
8
+
9
+ # Function to process the video stream
10
+ def process_frame(frame):
11
+ # Convert the frame to RGB for the Hugging Face model
12
+ rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
13
+
14
+ # You can apply hand gesture recognition logic here (e.g., hand landmarks tracking)
15
+ gesture = gesture_pipeline(rgb_frame)
16
+
17
+ # Output gesture recognition results
18
+ gesture_name = gesture[0]["label"]
19
+ gesture_confidence = gesture[0]["score"]
20
+
21
+ # Display gesture on the screen (in this case, we'll move the elements or give a thumbs up)
22
+ if "Thumbs up" in gesture_name:
23
+ print("Gesture recognized: Thumbs Up!")
24
+ if "Heart" in gesture_name:
25
+ print("Gesture recognized: Heart!")
26
+
27
+ # Update the frame with the recognized gesture
28
+ cv2.putText(frame, f"Gesture: {gesture_name}", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
29
+ return frame
30
+
31
+ # Gradio interface function
32
+ def video_input(video):
33
+ # Process the video frame by frame
34
+ while True:
35
+ ret, frame = video.read()
36
+ if not ret:
37
+ break
38
+ processed_frame = process_frame(frame)
39
+ yield processed_frame
40
+
41
+ # Set up the Gradio interface with the webcam
42
+ iface = gr.Interface(fn=video_input, inputs=gr.Video(source="webcam"), outputs="video", live=True)
43
+
44
+ iface.launch()