File size: 6,707 Bytes
48a990e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
import streamlit as st

def run_app2():
    st.title(" LLM Compatibility Advisor (Manual Spec Entry)")
    st.markdown("Enter your CPU and GPU specs to get local LLM suggestions (Ollama-compatible)")

    st.markdown("### ๐Ÿ”ง How to Find Your Device Configuration")
    st.markdown("""
    - **๐ŸชŸ Windows**: Press `Win + R`, type `dxdiag`, and press Enter to view CPU, GPU, and RAM details.
    - **๐ŸŽ macOS**: Click Apple ๏ฃฟ menu โ†’ `About This Mac` โ†’ `More Info...`
    - **๐Ÿง Linux**: Use terminal commands like `lscpu`, `free -h`, or `neofetch`
    """)

    st.markdown("#### ๐Ÿ’ก Tip: You can use these tools to help find your processor and GPU names.")

    cpu_name = st.text_input("๐Ÿ–ฅ๏ธ Enter your Processor Name", placeholder="e.g., Intel Core i5-10300H")
    st.caption("โ„น๏ธ Enter your exact CPU name to help identify its capability (found in system settings).")

    with st.expander("๐Ÿ’ก Suggested Processor Examples"):
        st.markdown("""
        - **Intel i3**: i3-10100U, i3-1115G4, i3-N305  
        - **Intel i5**: i5-10300H, i5-1240P, i5-13400  
        - **Intel i7**: i7-9750H, i7-11800H, i7-13700K  
        - **Apple M Series**: M1, M1 Pro, M2, M3  
        - **AMD Ryzen**: Ryzen 5 5600H, Ryzen 7 5800X, Ryzen 9 7945HX  
        - **Snapdragon**: Snapdragon 8 Gen 1, Snapdragon 7c Gen 2
        """)

    cpu_category = st.selectbox(
        "โš™๏ธ Select your CPU Category",
        ["Intel i3", "Intel i5", "Intel i7", "Apple M Series", "AMD Ryzen", "Qualcomm Snapdragon", "Other/Unknown"]
    )
    st.caption("โ„น๏ธ If unsure, choose the closest matching category from the dropdown.")

    gpu_name = st.text_input("๐ŸŽฎ GPU (Optional)", placeholder="e.g., NVIDIA GTX 1650 / None")
    st.caption("โ„น๏ธ GPU helps speed up models. If you're unsure or using only CPU, leave it blank or type 'None'.")

    performance_score = {
        "Intel i3": ("๐Ÿ”ด Low-end", "May only support 4-bit quantized models."),
        "Intel i5": ("๐ŸŸก Moderate", "Can run most 4-bit and some 8-bit models."),
        "Intel i7": ("๐ŸŸข High-end", "Handles 8-bit and some full FP16 models."),
        "Apple M Series": ("๐ŸŸข High-end", "Great efficiency for quantized models."),
        "AMD Ryzen": ("๐ŸŸข High-end", "Multi-core power suitable for larger models."),
        "Qualcomm Snapdragon": ("๐Ÿ”ด Low-end", "Best for smallest on-device models."),
        "Other/Unknown": ("๐ŸŸก Average", "Limited infoโ€”may vary by chip.")
    }

    llm_recommendations = {
        "Intel i3": {
            "Coding": ["Code Llama (7B - quantized)", "Phi-2"],
            "Math & Logic": ["Mistral (7B - quantized)", "Gemma 2B"],
            "General": ["Phi-2", "TinyLlama"]
        },
        "Intel i5": {
            "Coding": ["Code Llama (7B)", "Deepseek Coder (6.7B)"],
            "Math & Logic": ["Mistral 7B", "Gemma 7B"],
            "General": ["Phi-2", "Mistral", "LLaMA 2 (7B)"]
        },
        "Intel i7": {
            "Coding": ["Code Llama (13B - Q4)", "Deepseek Coder 6.7B"],
            "Math & Logic": ["Mistral 7B", "LLaMA 2 13B (quantized)"],
            "General": ["LLaMA 2 (13B)", "OpenChat 3.5"]
        },
        "Apple M Series": {
            "Coding": ["Code Llama 7B (Q4)", "Phi-2"],
            "Math & Logic": ["Gemma 7B", "Mistral (quantized)"],
            "General": ["Mistral", "LLaMA 2 7B", "Phi-2"]
        },
        "AMD Ryzen": {
            "Coding": ["Deepseek Coder", "Code Llama"],
            "Math & Logic": ["Mistral", "LLaMA 2"],
            "General": ["Phi-2", "Mistral", "LLaMA 2"]
        },
        "Qualcomm Snapdragon": {
            "Coding": ["Phi-2 (on-device)"],
            "Math & Logic": ["TinyLlama", "Phi-2"],
            "General": ["TinyLlama", "Gemma 2B"]
        },
        "Other/Unknown": {
            "Coding": ["Phi-2", "TinyLlama"],
            "Math & Logic": ["Gemma 2B", "TinyLlama"],
            "General": ["Phi-2", "TinyLlama"]
        }
    }

    quantized_sizes = {
        "TinyLlama": "FP16: 0.6GB, 8-bit: 0.3GB, 4-bit: 0.15GB",
        "Phi-2": "FP16: 5.2GB, 8-bit: 2.6GB, 4-bit: 1.3GB",
        "Mistral": "FP16: 13GB, 8-bit: 7GB, 4-bit: 3.5GB",
        "Gemma 2B": "FP16: 4.2GB, 8-bit: 2.1GB, 4-bit: 1.1GB",
        "Gemma 7B": "FP16: 13GB, 8-bit: 6.5GB, 4-bit: 3.2GB",
        "Code Llama": "7B: FP16: 13GB, 8-bit: 6.5GB, 4-bit: 3.3GB | 13B: FP16: 26GB, 8-bit: 13GB, 4-bit: 6.5GB",
        "Deepseek Coder": "6.7B: FP16: 12.8GB, 8-bit: 6.4GB, 4-bit: 3.2GB",
        "LLaMA 2": "7B: FP16: 13GB, 8-bit: 6.7GB, 4-bit: 3.5GB | 13B: FP16: 26GB, 8-bit: 13GB, 4-bit: 6.5GB",
        "OpenChat 3.5": "FP16: 7.1GB, 8-bit: 3.6GB, 4-bit: 1.8GB"
    }

    if cpu_name:
        st.markdown("---")
        st.subheader(" Your Hardware Configuration")
        st.write(f"**Processor Name:** {cpu_name}")
        st.write(f"**CPU Category:** {cpu_category}")
        st.write(f"**GPU:** {gpu_name or 'Not specified'}")

        score_label, score_note = performance_score.get(cpu_category, ("๐ŸŸก Unknown", "Estimate based on general category."))
        st.success(f"๐Ÿ’ก Performance Score: {score_label}")
        st.caption(score_note)

        st.markdown("---")
        st.subheader("๐Ÿ“‹ Recommended LLMs for Local Use (Ollama Compatible)")

        recommendations_text = f"Processor: {cpu_name} ({cpu_category})\\nGPU: {gpu_name or 'None'}\\nPerformance Score: {score_label}\\n\\nRecommended Models:\\n"

        recs = llm_recommendations.get(cpu_category, llm_recommendations["Other/Unknown"])
        for task in ["Coding", "Math & Logic", "General"]:
            st.markdown(f"### ๐Ÿ”น {task}")
            recommendations_text += f"\\n{task}:\\n"
            for model in recs[task]:
                st.markdown(f"- โœ… **{model}**")
                recommendations_text += f"- {model}"
                for key in quantized_sizes:
                    if key.lower() in model.lower():
                        st.caption(f"๐Ÿ’พ {quantized_sizes[key]}")
                        recommendations_text += f" ({quantized_sizes[key]})"
                        break
                # Generate command
                cmd = model.split("(")[0].strip().lower().replace(" ", "_").replace("-", "")
                st.code(f"ollama pull {cmd}", language="bash")
                recommendations_text += f"\\n  Command: ollama pull {cmd}\\n"

        st.markdown("---")
        st.markdown("๐Ÿ’ก _Tip: Run these models using `ollama run <model>` or download with `ollama pull <model>`._")

        st.download_button("๐Ÿ“ฅ Download These Recommendations", recommendations_text, file_name="llm_suggestions.txt")
    else:
        st.info("Enter your processor details to see LLM recommendations.")