a-r-r-o-w HF staff commited on
Commit
bdf5ac0
·
verified ·
1 Parent(s): 0992999

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +41 -0
README.md ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ A tiny random pipeline for testing purposes based on [THUDM/CogView4-6B](https://huggingface.co/THUDM/Cogview4-6B).
2
+
3
+ ```python
4
+ from transformers import AutoTokenizer, GlmConfig, GlmModel
5
+ from diffusers import CogView4Transformer2DModel, FlowMatchEulerDiscreteScheduler, AutoencoderKL, CogView4Pipeline
6
+
7
+ tokenizer = AutoTokenizer.from_pretrained("THUDM/glm-4-9b-chat", trust_remote_code=True)
8
+
9
+ config = GlmConfig(hidden_size=32, intermediate_size=8, num_hidden_layers=2, num_attention_heads=4, head_dim=8)
10
+ text_encoder = GlmModel(config)
11
+
12
+ transformer_kwargs = {
13
+ "patch_size": 2,
14
+ "in_channels": 4,
15
+ "num_layers": 2,
16
+ "attention_head_dim": 4,
17
+ "num_attention_heads": 4,
18
+ "out_channels": 4,
19
+ "text_embed_dim": 32,
20
+ "time_embed_dim": 8,
21
+ "condition_dim": 4,
22
+ }
23
+ transformer = CogView4Transformer2DModel(**transformer_kwargs)
24
+
25
+ vae_kwargs = {
26
+ "block_out_channels": [32, 64],
27
+ "in_channels": 3,
28
+ "out_channels": 3,
29
+ "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
30
+ "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
31
+ "latent_channels": 4,
32
+ "sample_size": 128,
33
+ }
34
+ vae = AutoencoderKL(**vae_kwargs)
35
+
36
+ scheduler = FlowMatchEulerDiscreteScheduler()
37
+
38
+ pipe = CogView4Pipeline(tokenizer=tokenizer, text_encoder=text_encoder, transformer=transformer, vae=vae, scheduler=scheduler)
39
+ pipe.save_pretrained("./dump-cogview4-dummy-pipe")
40
+
41
+ ```