yzhang511 commited on
Commit
abafd4e
1 Parent(s): 8bf982d

Upload config.yaml

Browse files
Files changed (1) hide show
  1. config.yaml +61 -0
config.yaml ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_class: STPatch # NDT2 is a sub-class of STPatch
2
+
3
+
4
+ encoder:
5
+
6
+ stitching: false
7
+ from_pt: null
8
+ embed_region: false
9
+
10
+ masker:
11
+ force_active: true
12
+ mode: random_token
13
+ ratio: 0.3 # ratio of data to predict
14
+ zero_ratio: 1.0 # of the data to predict, ratio of zero-ed out
15
+ random_ratio: 1.0 # of the not zero-ed, ratio of randomly replaced
16
+ expand_prob: 0.0 # probability of expanding the mask in "temporal" mode
17
+ max_timespan: 1 # max span of mask if expanded
18
+ channels: null # neurons to mask in "co-smoothing" mode
19
+ timesteps: null # time steps to mask in "forward-pred" mode
20
+ mask_regions: ['all'] # brain regions to mask in "inter-region" mode
21
+ target_regions: ['all'] # brain regions to predict in "intra-region" mode
22
+ n_mask_regions: 1 # num of regions to choose from the list of mask_regions or target_regions
23
+
24
+ patcher:
25
+ active: true
26
+ time_stride: 0
27
+
28
+ # context available for each timestep
29
+ context:
30
+ forward: -1
31
+ backward: -1
32
+
33
+ embedder:
34
+ n_neurons: 1280
35
+ n_timesteps: 100
36
+ max_time_F: 1
37
+ max_space_F: 128
38
+ max_spikes: 0 # max number of spikes in a single time bin
39
+ mode: linear # linear/embed/identity
40
+ mult: 2 # embedding multiplier. hiddden_sizd = n_channels * mult
41
+ act: softsign # activation for the embedding layers
42
+ scale: 1 # scale the embedding multiplying by this number
43
+ bias: true # use bias in the embedding layer
44
+ dropout: 0.2 # dropout in embedding layer
45
+ use_prompt: false
46
+ use_session: true
47
+
48
+ transformer:
49
+ n_layers: 5 # number of transformer layers
50
+ hidden_size: 128 # hidden space of the transformer
51
+ n_heads: 8 # number of attentiomn heads
52
+ attention_bias: true # learn bias in the attention layers
53
+ act: gelu # activiation function in mlp layers
54
+ inter_size: 512 # intermediate dimension in the mlp layers
55
+ mlp_bias: true # learn bias in the mlp layers
56
+ dropout: 0.4 # dropout in transformer layers
57
+ fixup_init: true # modify weight initialization
58
+
59
+
60
+ decoder:
61
+ from_pt: null