rohanjain2312 commited on
Commit
aeab86d
·
1 Parent(s): f60bc48

Optimize requirements for Hugging Face Space build

Browse files
Files changed (2) hide show
  1. app.py +21 -19
  2. requirements.txt +11 -3
app.py CHANGED
@@ -13,25 +13,23 @@ with open("class_names.txt", "r") as f:
13
 
14
  ### 2. Model and transforms preparation ###
15
 
16
- # Create EffNetB2 model
17
  effnetb4, effnetb4_transforms = create_effnetb4_model(
18
- num_classes=101, # len(class_names) would also work
19
  )
20
 
21
  # Load saved weights
22
  effnetb4.load_state_dict(
23
  torch.load(
24
  f="model_weights.pth",
25
- map_location=torch.device("cpu"), # load to CPU
26
  )
27
  )
28
 
29
  ### 3. Predict function ###
30
 
31
- # Create predict function
32
  def predict(img) -> Tuple[Dict, float]:
33
- """Transforms and performs a prediction on img and returns prediction and time taken.
34
- """
35
  # Start the timer
36
  start_time = timer()
37
 
@@ -44,7 +42,7 @@ def predict(img) -> Tuple[Dict, float]:
44
  # Pass the transformed image through the model and turn the prediction logits into prediction probabilities
45
  pred_probs = torch.softmax(effnetb4(img), dim=1)
46
 
47
- # Create a prediction label and prediction probability dictionary for each prediction class (this is the required format for Gradio's output parameter)
48
  pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))}
49
 
50
  # Calculate the prediction time
@@ -56,23 +54,27 @@ def predict(img) -> Tuple[Dict, float]:
56
  ### 4. Gradio app ###
57
 
58
  # Create title, description and article strings
59
- title = "FoodVision"
60
- description="An EfficientNet-B4 model trained on **Food-101** to classify food images into 101 categories.\n\nCreated by [Rohan Jain](https://www.linkedin.com/in/jaroh23/)"
61
  article = "Created by Rohan Jain - https://www.linkedin.com/in/jaroh23/"
62
 
63
  # Create examples list from "examples/" directory
64
  example_list = [["examples/" + example] for example in os.listdir("examples")]
65
 
66
  # Create the Gradio demo
67
- demo = gr.Interface(fn=predict, # mapping function from input to output
68
- inputs=gr.Image(type="pil"), # what are the inputs?
69
- outputs=[gr.Label(num_top_classes=3, label="Predictions"), # what are the outputs?
70
- gr.Number(label="Prediction time (s)")], # our fn has two outputs, therefore we have two outputs
71
- # Create examples list from "examples/" directory
72
- examples=example_list,
73
- title=title,
74
- description=description,
75
- article=article)
 
 
 
76
 
77
  # Launch the demo
78
- demo.launch(share=True)
 
 
13
 
14
  ### 2. Model and transforms preparation ###
15
 
16
+ # Create EffNetB4 model
17
  effnetb4, effnetb4_transforms = create_effnetb4_model(
18
+ num_classes=101,
19
  )
20
 
21
  # Load saved weights
22
  effnetb4.load_state_dict(
23
  torch.load(
24
  f="model_weights.pth",
25
+ map_location=torch.device("cpu"),
26
  )
27
  )
28
 
29
  ### 3. Predict function ###
30
 
 
31
  def predict(img) -> Tuple[Dict, float]:
32
+ """Transforms and performs a prediction on img and returns prediction and time taken."""
 
33
  # Start the timer
34
  start_time = timer()
35
 
 
42
  # Pass the transformed image through the model and turn the prediction logits into prediction probabilities
43
  pred_probs = torch.softmax(effnetb4(img), dim=1)
44
 
45
+ # Create a prediction label and prediction probability dictionary for each prediction class
46
  pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))}
47
 
48
  # Calculate the prediction time
 
54
  ### 4. Gradio app ###
55
 
56
  # Create title, description and article strings
57
+ title = "FoodVision Big"
58
+ description = "An EfficientNet-B4 model trained on Food-101 to classify food images into 101 categories."
59
  article = "Created by Rohan Jain - https://www.linkedin.com/in/jaroh23/"
60
 
61
  # Create examples list from "examples/" directory
62
  example_list = [["examples/" + example] for example in os.listdir("examples")]
63
 
64
  # Create the Gradio demo
65
+ demo = gr.Interface(
66
+ fn=predict,
67
+ inputs=gr.Image(type="pil"),
68
+ outputs=[
69
+ gr.Label(num_top_classes=5, label="Top 5 Predictions"),
70
+ gr.Number(label="Prediction time (s)")
71
+ ],
72
+ examples=example_list,
73
+ title=title,
74
+ description=description,
75
+ article=article
76
+ )
77
 
78
  # Launch the demo
79
+ if __name__ == "__main__":
80
+ demo.launch()
requirements.txt CHANGED
@@ -1,3 +1,11 @@
1
- torch==2.9.0
2
- torchvision==0.24.0
3
- gradio==4.36.0
 
 
 
 
 
 
 
 
 
1
+ # Core dependencies
2
+ torch==2.2.2
3
+ torchvision==0.17.2
4
+ gradio==3.50.2
5
+
6
+ # Optional performance & stability dependencies
7
+ numpy>=1.26.0
8
+ pillow>=9.0.0
9
+
10
+ # (Optional) helpful for torchvision transforms
11
+ requests>=2.31.0