Julian Bilcke commited on
Commit
65214e5
·
1 Parent(s): 422262e

k, seems to work

Browse files
assets/config/curated_models.yaml CHANGED
@@ -45,42 +45,48 @@ models:
45
  # display_name: Qwen3 4B
46
  # num_of_parameters: 4B
47
 
48
- - model_id: deepseek-ai/DeepSeek-R1-0528-Qwen3-8B
49
- display_name: DeepSeek R1 Qwen3 8B (0528)
50
- num_of_parameters: 8B
 
51
 
 
52
  - model_id: google/gemma-2-9b-it
53
  display_name: Gemma 2 9B
54
  num_of_parameters: 9B
55
 
56
- - model_id: meta-llama/Llama-3.3-70B-Instruct
57
- display_name: Llama 3.3 70B
58
- num_of_parameters: 70B
59
-
60
  - model_id: meta-llama/Llama-4-Scout-17B-16E-Instruct
61
  display_name: Llama 4 Scout 17B
62
  num_of_parameters: 17B
 
 
 
 
63
 
64
  # no providers found - bug?
65
  #- model_id: mistralai/Mistral-Small-3.2-24B-Instruct-2506
66
  # display_name: Mistral Small 3.2 24B
67
  # num_of_parameters: 24B
68
 
69
- - model_id: Qwen/Qwen3-32B
70
- display_name: Qwen3 32B
71
- num_of_parameters: 32B
 
72
 
73
- - model_id: Qwen/Qwen3-235B-A22B-Instruct-2507
74
- display_name: Qwen3 235B A22B
75
- num_of_parameters: 235B
 
76
 
 
77
  #- model_id: deepseek-ai/DeepSeek-V3-0324
78
  # display_name: DeepSeek V3
79
  # num_of_parameters: 685B
80
 
81
- - model_id: moonshotai/Kimi-K2-Instruct
82
- display_name: Kimi K2
83
- num_of_parameters: 1000B
 
84
 
85
  # Gemma 3n models are not available on the Inference Providers yet
86
  #- model_id: google/gemma-3n-E2B-it
 
45
  # display_name: Qwen3 4B
46
  # num_of_parameters: 4B
47
 
48
+ # this LLM pattern format is not supported by Tikslop yet
49
+ #- model_id: deepseek-ai/DeepSeek-R1-0528-Qwen3-8B
50
+ # display_name: DeepSeek R1 Qwen3 8B (0528)
51
+ # num_of_parameters: 8B
52
 
53
+ # decent performance when using Groq
54
  - model_id: google/gemma-2-9b-it
55
  display_name: Gemma 2 9B
56
  num_of_parameters: 9B
57
 
 
 
 
 
58
  - model_id: meta-llama/Llama-4-Scout-17B-16E-Instruct
59
  display_name: Llama 4 Scout 17B
60
  num_of_parameters: 17B
61
+
62
+ - model_id: meta-llama/Llama-3.3-70B-Instruct
63
+ display_name: Llama 3.3 70B
64
+ num_of_parameters: 70B
65
 
66
  # no providers found - bug?
67
  #- model_id: mistralai/Mistral-Small-3.2-24B-Instruct-2506
68
  # display_name: Mistral Small 3.2 24B
69
  # num_of_parameters: 24B
70
 
71
+ # too big/slow?
72
+ #- model_id: Qwen/Qwen3-32B
73
+ # display_name: Qwen3 32B
74
+ # num_of_parameters: 32B
75
 
76
+ # too big/slow?
77
+ #- model_id: Qwen/Qwen3-235B-A22B-Instruct-2507
78
+ # display_name: Qwen3 235B A22B
79
+ # num_of_parameters: 235B
80
 
81
+ # this LLM pattern format is not supported by Tikslop yet
82
  #- model_id: deepseek-ai/DeepSeek-V3-0324
83
  # display_name: DeepSeek V3
84
  # num_of_parameters: 685B
85
 
86
+ # too big/slow?
87
+ #- model_id: moonshotai/Kimi-K2-Instruct
88
+ # display_name: Kimi K2
89
+ # num_of_parameters: 1000B
90
 
91
  # Gemma 3n models are not available on the Inference Providers yet
92
  #- model_id: google/gemma-3n-E2B-it
build/web/assets/assets/config/curated_models.yaml CHANGED
@@ -45,42 +45,48 @@ models:
45
  # display_name: Qwen3 4B
46
  # num_of_parameters: 4B
47
 
48
- - model_id: deepseek-ai/DeepSeek-R1-0528-Qwen3-8B
49
- display_name: DeepSeek R1 Qwen3 8B (0528)
50
- num_of_parameters: 8B
 
51
 
 
52
  - model_id: google/gemma-2-9b-it
53
  display_name: Gemma 2 9B
54
  num_of_parameters: 9B
55
 
56
- - model_id: meta-llama/Llama-3.3-70B-Instruct
57
- display_name: Llama 3.3 70B
58
- num_of_parameters: 70B
59
-
60
  - model_id: meta-llama/Llama-4-Scout-17B-16E-Instruct
61
  display_name: Llama 4 Scout 17B
62
  num_of_parameters: 17B
 
 
 
 
63
 
64
  # no providers found - bug?
65
  #- model_id: mistralai/Mistral-Small-3.2-24B-Instruct-2506
66
  # display_name: Mistral Small 3.2 24B
67
  # num_of_parameters: 24B
68
 
69
- - model_id: Qwen/Qwen3-32B
70
- display_name: Qwen3 32B
71
- num_of_parameters: 32B
 
72
 
73
- - model_id: Qwen/Qwen3-235B-A22B-Instruct-2507
74
- display_name: Qwen3 235B A22B
75
- num_of_parameters: 235B
 
76
 
 
77
  #- model_id: deepseek-ai/DeepSeek-V3-0324
78
  # display_name: DeepSeek V3
79
  # num_of_parameters: 685B
80
 
81
- - model_id: moonshotai/Kimi-K2-Instruct
82
- display_name: Kimi K2
83
- num_of_parameters: 1000B
 
84
 
85
  # Gemma 3n models are not available on the Inference Providers yet
86
  #- model_id: google/gemma-3n-E2B-it
 
45
  # display_name: Qwen3 4B
46
  # num_of_parameters: 4B
47
 
48
+ # this LLM pattern format is not supported by Tikslop yet
49
+ #- model_id: deepseek-ai/DeepSeek-R1-0528-Qwen3-8B
50
+ # display_name: DeepSeek R1 Qwen3 8B (0528)
51
+ # num_of_parameters: 8B
52
 
53
+ # decent performance when using Groq
54
  - model_id: google/gemma-2-9b-it
55
  display_name: Gemma 2 9B
56
  num_of_parameters: 9B
57
 
 
 
 
 
58
  - model_id: meta-llama/Llama-4-Scout-17B-16E-Instruct
59
  display_name: Llama 4 Scout 17B
60
  num_of_parameters: 17B
61
+
62
+ - model_id: meta-llama/Llama-3.3-70B-Instruct
63
+ display_name: Llama 3.3 70B
64
+ num_of_parameters: 70B
65
 
66
  # no providers found - bug?
67
  #- model_id: mistralai/Mistral-Small-3.2-24B-Instruct-2506
68
  # display_name: Mistral Small 3.2 24B
69
  # num_of_parameters: 24B
70
 
71
+ # too big/slow?
72
+ #- model_id: Qwen/Qwen3-32B
73
+ # display_name: Qwen3 32B
74
+ # num_of_parameters: 32B
75
 
76
+ # too big/slow?
77
+ #- model_id: Qwen/Qwen3-235B-A22B-Instruct-2507
78
+ # display_name: Qwen3 235B A22B
79
+ # num_of_parameters: 235B
80
 
81
+ # this LLM pattern format is not supported by Tikslop yet
82
  #- model_id: deepseek-ai/DeepSeek-V3-0324
83
  # display_name: DeepSeek V3
84
  # num_of_parameters: 685B
85
 
86
+ # too big/slow?
87
+ #- model_id: moonshotai/Kimi-K2-Instruct
88
+ # display_name: Kimi K2
89
+ # num_of_parameters: 1000B
90
 
91
  # Gemma 3n models are not available on the Inference Providers yet
92
  #- model_id: google/gemma-3n-E2B-it
build/web/flutter_bootstrap.js CHANGED
@@ -38,6 +38,6 @@ _flutter.buildConfig = {"engineRevision":"1c9c20e7c3dd48c66f400a24d48ea806b4ab31
38
 
39
  _flutter.loader.load({
40
  serviceWorkerSettings: {
41
- serviceWorkerVersion: "3912302714"
42
  }
43
  });
 
38
 
39
  _flutter.loader.load({
40
  serviceWorkerSettings: {
41
+ serviceWorkerVersion: "3948338569"
42
  }
43
  });
build/web/flutter_service_worker.js CHANGED
@@ -3,12 +3,12 @@ const MANIFEST = 'flutter-app-manifest';
3
  const TEMP = 'flutter-temp-cache';
4
  const CACHE_NAME = 'flutter-app-cache';
5
 
6
- const RESOURCES = {"flutter_bootstrap.js": "f833cb89d68c8ddba5bc70cec281205c",
7
  "version.json": "68350cac7987de2728345c72918dd067",
8
  "tikslop.png": "570e1db759046e2d224fef729983634e",
9
  "index.html": "3a7029b3672560e7938aab6fa4d30a46",
10
  "/": "3a7029b3672560e7938aab6fa4d30a46",
11
- "main.dart.js": "e97ab1118ef80fcb0e3cbe51e5eae8ec",
12
  "tikslop.svg": "26140ba0d153b213b122bc6ebcc17f6c",
13
  "flutter.js": "888483df48293866f9f41d3d9274a779",
14
  "favicon.png": "c8a183c516004e648a7bac7497c89b97",
@@ -28,7 +28,7 @@ const RESOURCES = {"flutter_bootstrap.js": "f833cb89d68c8ddba5bc70cec281205c",
28
  "assets/assets/ads/smolagents.gif": "45338af5a4d440b707d02f364be8195c",
29
  "assets/assets/ads/README.md": "1959fb6b85a966348396f2f0f9c3f32a",
30
  "assets/assets/ads/lerobot.gif": "0f90b2fc4d15eefb5572363724d6d925",
31
- "assets/assets/config/curated_models.yaml": "94e54843953b4f90c454cd8e5a3176fb",
32
  "assets/assets/config/README.md": "07a87720dd00dd1ca98c9d6884440e31",
33
  "assets/assets/config/custom.yaml": "52bd30aa4d8b980626a5eb02d0871c01",
34
  "assets/assets/config/default.yaml": "9ca1d05d06721c2b6f6382a1ba40af48",
 
3
  const TEMP = 'flutter-temp-cache';
4
  const CACHE_NAME = 'flutter-app-cache';
5
 
6
+ const RESOURCES = {"flutter_bootstrap.js": "b98de8eaf32c5bc183c9879c24e42172",
7
  "version.json": "68350cac7987de2728345c72918dd067",
8
  "tikslop.png": "570e1db759046e2d224fef729983634e",
9
  "index.html": "3a7029b3672560e7938aab6fa4d30a46",
10
  "/": "3a7029b3672560e7938aab6fa4d30a46",
11
+ "main.dart.js": "a897b61edcb30264f6ad2d89a6931afc",
12
  "tikslop.svg": "26140ba0d153b213b122bc6ebcc17f6c",
13
  "flutter.js": "888483df48293866f9f41d3d9274a779",
14
  "favicon.png": "c8a183c516004e648a7bac7497c89b97",
 
28
  "assets/assets/ads/smolagents.gif": "45338af5a4d440b707d02f364be8195c",
29
  "assets/assets/ads/README.md": "1959fb6b85a966348396f2f0f9c3f32a",
30
  "assets/assets/ads/lerobot.gif": "0f90b2fc4d15eefb5572363724d6d925",
31
+ "assets/assets/config/curated_models.yaml": "d196fd3efc99169e1855088153af48c8",
32
  "assets/assets/config/README.md": "07a87720dd00dd1ca98c9d6884440e31",
33
  "assets/assets/config/custom.yaml": "52bd30aa4d8b980626a5eb02d0871c01",
34
  "assets/assets/config/default.yaml": "9ca1d05d06721c2b6f6382a1ba40af48",
build/web/index.html CHANGED
@@ -156,7 +156,7 @@
156
  </script>
157
 
158
  <!-- Add version parameter for cache busting -->
159
- <script src="flutter_bootstrap.js?v=1753281547" async></script>
160
 
161
  <!-- Add cache busting script -->
162
  <script>
 
156
  </script>
157
 
158
  <!-- Add version parameter for cache busting -->
159
+ <script src="flutter_bootstrap.js?v=1753289445" async></script>
160
 
161
  <!-- Add cache busting script -->
162
  <script>
build/web/main.dart.js CHANGED
The diff for this file is too large to render. See raw diff
 
lib/screens/settings_screen.dart CHANGED
@@ -188,7 +188,7 @@ class _SettingsScreenState extends State<SettingsScreen> {
188
  crossAxisAlignment: CrossAxisAlignment.start,
189
  children: [
190
  const Text(
191
- 'LLM Configuration',
192
  style: TextStyle(
193
  color: TikSlopColors.onBackground,
194
  fontSize: 20,
@@ -283,8 +283,7 @@ class _SettingsScreenState extends State<SettingsScreen> {
283
  ? 'Enter HF API key to select models'
284
  : _isCheckingAvailability
285
  ? 'Checking model availability...'
286
- : 'Select a curated model optimized for #tikslop',
287
- helperMaxLines: 2,
288
  suffixIcon: _isCheckingAvailability || _isLoadingModels
289
  ? const SizedBox(
290
  width: 16,
@@ -498,7 +497,7 @@ class _SettingsScreenState extends State<SettingsScreen> {
498
  ? 'Enter HF API key to unlock providers'
499
  : _isCheckingAvailability
500
  ? 'Checking model availability...'
501
- : 'Select from available providers for this model',
502
  helperMaxLines: 2,
503
  ),
504
  value: _selectedLlmProvider == 'built-in' ? null : _selectedLlmProvider,
@@ -598,7 +597,7 @@ class _SettingsScreenState extends State<SettingsScreen> {
598
  crossAxisAlignment: CrossAxisAlignment.start,
599
  children: [
600
  const Text(
601
- 'Video Generation',
602
  style: TextStyle(
603
  color: TikSlopColors.onBackground,
604
  fontSize: 20,
@@ -629,6 +628,29 @@ class _SettingsScreenState extends State<SettingsScreen> {
629
  _settingsService.setNegativeVideoPrompt(value);
630
  },
631
  ),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
632
  ],
633
  ),
634
  ),
@@ -642,7 +664,7 @@ class _SettingsScreenState extends State<SettingsScreen> {
642
  crossAxisAlignment: CrossAxisAlignment.start,
643
  children: [
644
  const Text(
645
- 'Display Options',
646
  style: TextStyle(
647
  color: TikSlopColors.onBackground,
648
  fontSize: 20,
@@ -676,48 +698,6 @@ class _SettingsScreenState extends State<SettingsScreen> {
676
  ),
677
  ),
678
  ),
679
- const SizedBox(height: 16),
680
- // Custom Video Model Card
681
- Card(
682
- child: Padding(
683
- padding: const EdgeInsets.all(16),
684
- child: Column(
685
- crossAxisAlignment: CrossAxisAlignment.start,
686
- children: [
687
- const Text(
688
- 'Custom Video Model',
689
- style: TextStyle(
690
- color: TikSlopColors.onBackground,
691
- fontSize: 20,
692
- fontWeight: FontWeight.bold,
693
- ),
694
- ),
695
- const SizedBox(height: 16),
696
- DropdownButtonFormField<String>(
697
- decoration: const InputDecoration(
698
- labelText: 'Video Generation Model',
699
- ),
700
- initialValue: 'ltx-video-2b-0.9.8',
701
- onChanged: null, // Disabled
702
- items: const [
703
- DropdownMenuItem(
704
- value: 'ltx-video-2b-0.9.8',
705
- child: Text('LTX-Video 2B 0.9.8 (distilled)'),
706
- ),
707
- ],
708
- ),
709
- const SizedBox(height: 8),
710
- const Text(
711
- 'Interested in using custom Hugging Face models? If you already have trained a LoRA model based on LTX-Video 2B 0.9.8 (distilled), please open a thread in the Community forum and I\'ll see for a way to allow for custom models.',
712
- style: TextStyle(
713
- fontSize: 12,
714
- color: Colors.grey,
715
- ),
716
- ),
717
- ],
718
- ),
719
- ),
720
- ),
721
  ],
722
  ),
723
  );
 
188
  crossAxisAlignment: CrossAxisAlignment.start,
189
  children: [
190
  const Text(
191
+ 'LLM (used for search and story generation)',
192
  style: TextStyle(
193
  color: TikSlopColors.onBackground,
194
  fontSize: 20,
 
283
  ? 'Enter HF API key to select models'
284
  : _isCheckingAvailability
285
  ? 'Checking model availability...'
286
+ : 'Tikslop works best with a fast model (recommended: Gemma 2 9B)',
 
287
  suffixIcon: _isCheckingAvailability || _isLoadingModels
288
  ? const SizedBox(
289
  width: 16,
 
497
  ? 'Enter HF API key to unlock providers'
498
  : _isCheckingAvailability
499
  ? 'Checking model availability...'
500
+ : 'Tikslop works best with a fast provider (eg. Groq)',
501
  helperMaxLines: 2,
502
  ),
503
  value: _selectedLlmProvider == 'built-in' ? null : _selectedLlmProvider,
 
597
  crossAxisAlignment: CrossAxisAlignment.start,
598
  children: [
599
  const Text(
600
+ 'Infinite video generator',
601
  style: TextStyle(
602
  color: TikSlopColors.onBackground,
603
  fontSize: 20,
 
628
  _settingsService.setNegativeVideoPrompt(value);
629
  },
630
  ),
631
+
632
+ const SizedBox(height: 16),
633
+ DropdownButtonFormField<String>(
634
+ decoration: const InputDecoration(
635
+ labelText: 'Video Generation Model (cannot be changed yet)',
636
+ ),
637
+ initialValue: 'ltx-video-2b-0.9.8',
638
+ onChanged: null, // Disabled
639
+ items: const [
640
+ DropdownMenuItem(
641
+ value: 'ltx-video-2b-0.9.8',
642
+ child: Text('LTX-Video 2B 0.9.8 (distilled)'),
643
+ ),
644
+ ],
645
+ ),
646
+ const SizedBox(height: 8),
647
+ const Text(
648
+ 'Interested in using custom Hugging Face models? If you already have trained a LoRA model based on LTX-Video 2B 0.9.8 (distilled), please open a thread in the Community forum and I\'ll see for a way to allow for custom models.',
649
+ style: TextStyle(
650
+ fontSize: 12,
651
+ color: Colors.grey,
652
+ ),
653
+ ),
654
  ],
655
  ),
656
  ),
 
664
  crossAxisAlignment: CrossAxisAlignment.start,
665
  children: [
666
  const Text(
667
+ 'Developer Tools (beta)',
668
  style: TextStyle(
669
  color: TikSlopColors.onBackground,
670
  fontSize: 20,
 
698
  ),
699
  ),
700
  ),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
701
  ],
702
  ),
703
  );