Ricercar commited on
Commit
bac893c
1 Parent(s): 2794bf0

testings for gallery 2.0

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Archive/agraphTest.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import streamlit as st
4
+ import torch
5
+ import pandas as pd
6
+ import numpy as np
7
+
8
+ from datasets import load_dataset, Dataset, load_from_disk
9
+ from huggingface_hub import login
10
+ from streamlit_agraph import agraph, Node, Edge, Config
11
+ from sklearn.manifold import TSNE
12
+
13
+
14
+ @st.cache_data
15
+ def load_hf_dataset():
16
+ # login to huggingface
17
+ login(token=os.environ.get("HF_TOKEN"))
18
+
19
+ # load from huggingface
20
+ roster = pd.DataFrame(load_dataset('MAPS-research/GEMRec-Roster', split='train'))
21
+ promptBook = pd.DataFrame(load_dataset('MAPS-research/GEMRec-Metadata', split='train'))
22
+
23
+ # process dataset
24
+ roster = roster[['model_id', 'model_name', 'modelVersion_id', 'modelVersion_name',
25
+ 'model_download_count']].drop_duplicates().reset_index(drop=True)
26
+
27
+ # add 'custom_score_weights' column to promptBook if not exist
28
+ if 'weighted_score_sum' not in promptBook.columns:
29
+ promptBook.loc[:, 'weighted_score_sum'] = 0
30
+
31
+ # merge roster and promptbook
32
+ promptBook = promptBook.merge(roster[['model_id', 'model_name', 'modelVersion_id', 'modelVersion_name', 'model_download_count']],
33
+ on=['model_id', 'modelVersion_id'], how='left')
34
+
35
+ # add column to record current row index
36
+ promptBook.loc[:, 'row_idx'] = promptBook.index
37
+
38
+ return roster, promptBook
39
+
40
+
41
+ @st.cache_data
42
+ def calc_tsne(prompt_id):
43
+ print('==> loading feats')
44
+ feats = {}
45
+ for pt in os.listdir('../data/feats'):
46
+ if pt.split('.')[-1] == 'pt' and pt.split('.')[0].isdigit():
47
+ feats[pt.split('.')[0]] = torch.load(os.path.join('../data/feats', pt))
48
+
49
+ print('==> applying t-SNE')
50
+ # apply t-SNE to entries in each feat in feats to get 2D coordinates
51
+ tsne = TSNE(n_components=2, random_state=0)
52
+ # for k, v in tqdm(feats.items()):
53
+ # feats[k]['tsne'] = tsne.fit_transform(v['all'].numpy())
54
+ # prompt_id = '90'
55
+ feats[prompt_id]['tsne'] = tsne.fit_transform(feats[prompt_id]['all'].numpy())
56
+
57
+ feats_df = pd.DataFrame(feats[prompt_id]['tsne'], columns=['x', 'y'])
58
+ feats_df['prompt_id'] = prompt_id
59
+
60
+ keys = []
61
+ for k in feats[prompt_id].keys():
62
+ if k != 'all' and k != 'tsne':
63
+ keys.append(int(k.item()))
64
+
65
+ feats_df['modelVersion_id'] = keys
66
+
67
+
68
+ return feats_df
69
+
70
+ # print(feats[prompt_id]['tsne'])
71
+
72
+
73
+ if __name__ == '__main__':
74
+ st.set_page_config(layout="wide")
75
+
76
+ # load dataset
77
+ roster, promptBook = load_hf_dataset()
78
+ # prompt_id = '20'
79
+
80
+ with st.sidebar:
81
+ st.write('## Select Prompt')
82
+ prompts = promptBook['prompt_id'].unique().tolist()
83
+ # sort prompts by prompt_id
84
+ prompts.sort()
85
+ prompt_id = st.selectbox('Select Prompt', prompts, index=0)
86
+ physics = st.checkbox('Enable Physics')
87
+
88
+ feats_df = calc_tsne(str(prompt_id))
89
+
90
+ # keys = []
91
+ # for k in feats[prompt_id].keys():
92
+ # if k != 'all' and k != 'tsne':
93
+ # keys.append(int(k.item()))
94
+
95
+ # print(keys)
96
+
97
+ data = []
98
+ for idx in feats_df.index:
99
+ modelVersion_id = feats_df.loc[idx, 'modelVersion_id']
100
+ image_id = promptBook[(promptBook['modelVersion_id'] == modelVersion_id) & (
101
+ promptBook['prompt_id'] == int(prompt_id))].reset_index(drop=True).loc[0, 'image_id']
102
+ image_url = f"https://modelcofferbucket.s3-accelerate.amazonaws.com/{image_id}.png"
103
+ scale = 50
104
+ data.append((feats_df.loc[idx, 'x'] * scale, feats_df.loc[idx, 'y'] * scale, image_url))
105
+
106
+ image_size = promptBook[(promptBook['image_id'] == image_id)].reset_index(drop=True).loc[0, 'size'].split('x')
107
+
108
+ nodes = []
109
+ edges = []
110
+
111
+ for d in data:
112
+ nodes.append( Node(id=d[2],
113
+ # label=str(items.loc[idx, 'model_name']),
114
+ size=20,
115
+ shape="image",
116
+ image=d[2],
117
+ x=[d[0]],
118
+ y=[d[1]],
119
+ fixed=False if physics else True,
120
+ color={'background': '#00000', 'border': '#ffffff'},
121
+ shadow={'enabled': True, 'color': 'rgba(0,0,0,0.4)', 'size': 10, 'x': 1, 'y': 1},
122
+ # borderWidth=1,
123
+ # shapeProperties={'useBorderWithImage': True},
124
+ )
125
+ )
126
+
127
+
128
+ # nodes.append( Node(id="Spiderman",
129
+ # label="Peter Parker",
130
+ # size=25,
131
+ # shape="circularImage",
132
+ # image="http://marvel-force-chart.surge.sh/marvel_force_chart_img/top_spiderman.png")
133
+ # ) # includes **kwargs
134
+ # nodes.append( Node(id="Captain_Marvel",
135
+ # label="Carol Danvers",
136
+ # fixed=True,
137
+ # size=25,
138
+ # shape="circularImage",
139
+ # image="http://marvel-force-chart.surge.sh/marvel_force_chart_img/top_captainmarvel.png")
140
+ # )
141
+ # edges.append( Edge(source="Captain_Marvel",
142
+ # label="friend_of",
143
+ # target="Spiderman",
144
+ # length=200,
145
+ # # **kwargs
146
+ # )
147
+ # )
148
+ #
149
+ config = Config(width='100%',
150
+ height=800,
151
+ directed=True,
152
+ physics=physics,
153
+ hierarchical=False,
154
+ # **kwargs
155
+ )
156
+
157
+ cols = st.columns([3, 1], gap='large')
158
+
159
+ with cols[0]:
160
+ return_value = agraph(nodes=nodes,
161
+ edges=edges,
162
+ config=config)
163
+
164
+ # st.write(return_value)
165
+
166
+ with cols[1]:
167
+ try:
168
+ st.image(return_value, use_column_width=True)
169
+ except:
170
+ st.write('No image selected')
Archive/bokehTest.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import streamlit as st
4
+ import torch
5
+ import pandas as pd
6
+ import numpy as np
7
+ import requests
8
+
9
+ from bokeh.plotting import figure, show
10
+ from bokeh.models import HoverTool, ColumnDataSource, CustomJSHover
11
+ from bokeh.embed import file_html
12
+ from bokeh.resources import CDN # Import CDN here
13
+ from datasets import load_dataset, Dataset, load_from_disk
14
+ from huggingface_hub import login
15
+ from sklearn.manifold import TSNE
16
+ from tqdm import tqdm
17
+
18
+
19
+ @st.cache_data
20
+ def load_hf_dataset():
21
+ # login to huggingface
22
+ login(token=os.environ.get("HF_TOKEN"))
23
+
24
+ # load from huggingface
25
+ roster = pd.DataFrame(load_dataset('MAPS-research/GEMRec-Roster', split='train'))
26
+ promptBook = pd.DataFrame(load_dataset('MAPS-research/GEMRec-Metadata', split='train'))
27
+
28
+ # process dataset
29
+ roster = roster[['model_id', 'model_name', 'modelVersion_id', 'modelVersion_name',
30
+ 'model_download_count']].drop_duplicates().reset_index(drop=True)
31
+
32
+ # add 'custom_score_weights' column to promptBook if not exist
33
+ if 'weighted_score_sum' not in promptBook.columns:
34
+ promptBook.loc[:, 'weighted_score_sum'] = 0
35
+
36
+ # merge roster and promptbook
37
+ promptBook = promptBook.merge(roster[['model_id', 'model_name', 'modelVersion_id', 'modelVersion_name', 'model_download_count']],
38
+ on=['model_id', 'modelVersion_id'], how='left')
39
+
40
+ # add column to record current row index
41
+ promptBook.loc[:, 'row_idx'] = promptBook.index
42
+
43
+ return roster, promptBook
44
+
45
+ def show_with_bokeh(data, streamlit=False):
46
+ # Extract x, y coordinates and image URLs
47
+ x_coords, y_coords, image_urls = zip(*data)
48
+
49
+ # Create a ColumnDataSource
50
+ source = ColumnDataSource(data=dict(x=x_coords, y=y_coords, image=image_urls))
51
+
52
+ # Create a figure
53
+ p = figure(width=800, height=600)
54
+
55
+ # Add scatter plot
56
+ scatter = p.scatter(x='x', y='y', size=20, source=source)
57
+
58
+ # Define hover tool
59
+ hover = HoverTool()
60
+ # hover.tooltips = """
61
+ # <div>
62
+ # <iframe src="@image" width="512" height="512"></iframe>
63
+ # </div>
64
+ # """
65
+ # hover.formatters = {'@image': CustomJSHover(code="""
66
+ # const index = cb_data.index;
67
+ # const url = cb_data.source.data['image'][index];
68
+ # return '<iframe src="' + url + '" width="512" height="512"></iframe>';
69
+ # """)}
70
+
71
+ hover.tooltips = """
72
+ <div>
73
+ <img src="@image" style='object-fit: contain'; height=100%">
74
+ </div>
75
+ """
76
+ hover.formatters = {'@image': CustomJSHover(code="""
77
+ const index = cb_data.index;
78
+ const url = cb_data.source.data['image'][index];
79
+ return '<img src="' + url + '">';
80
+ """)}
81
+
82
+ p.add_tools(hover)
83
+
84
+ # Generate HTML with the plot
85
+ html = file_html(p, CDN, "Interactive Scatter Plot with Hover Images")
86
+
87
+ # Save the HTML file or show it
88
+ # with open("scatter_plot_with_hover_images.html", "w") as f:
89
+ # f.write(html)
90
+
91
+ if streamlit:
92
+ st.bokeh_chart(p, use_container_width=True)
93
+ else:
94
+ show(p)
95
+
96
+
97
+ def show_with_bokeh_2(data, image_size=[40, 40], streamlit=False):
98
+ # Extract x, y coordinates and image URLs
99
+ x_coords, y_coords, image_urls = zip(*data)
100
+
101
+ # Create a ColumnDataSource
102
+ source = ColumnDataSource(data=dict(x=x_coords, y=y_coords, image=image_urls))
103
+
104
+ # Create a figure
105
+ p = figure(width=800, height=600, aspect_ratio=1.0)
106
+
107
+ # Add image glyphs
108
+ # image_size = 40 # Adjust this size as needed
109
+ scale = 0.1
110
+ image_size = [int(image_size[0])*scale, int(image_size[1])*scale]
111
+ print(image_size)
112
+ p.image_url(url='image', x='x', y='y', source=source, w=image_size[0], h=image_size[1], anchor="center")
113
+
114
+ # Define hover tool
115
+ hover = HoverTool()
116
+ hover.tooltips = """
117
+ <div>
118
+ <img src="@image" style='object-fit: contain'; height=100%'">
119
+ </div>
120
+ """
121
+ p.add_tools(hover)
122
+
123
+ # Generate HTML with the plot
124
+ html = file_html(p, CDN, "Scatter Plot with Images")
125
+
126
+ # Save the HTML file or show it
127
+ # with open("scatter_plot_with_images.html", "w") as f:
128
+ # f.write(html)
129
+
130
+ if streamlit:
131
+ st.bokeh_chart(p, use_container_width=True)
132
+ else:
133
+ show(p)
134
+
135
+
136
+ if __name__ == '__main__':
137
+ # load dataset
138
+ roster, promptBook = load_hf_dataset()
139
+
140
+ print('==> loading feats')
141
+ feats = {}
142
+ for pt in os.listdir('../data/feats'):
143
+ if pt.split('.')[-1] == 'pt' and pt.split('.')[0].isdigit():
144
+ feats[pt.split('.')[0]] = torch.load(os.path.join('../data/feats', pt))
145
+
146
+ print('==> applying t-SNE')
147
+ # apply t-SNE to entries in each feat in feats to get 2D coordinates
148
+ tsne = TSNE(n_components=2, random_state=0)
149
+ # for k, v in tqdm(feats.items()):
150
+ # feats[k]['tsne'] = tsne.fit_transform(v['all'].numpy())
151
+ prompt_id = '49'
152
+ feats[prompt_id]['tsne'] = tsne.fit_transform(feats[prompt_id]['all'].numpy())
153
+
154
+ print(feats[prompt_id]['tsne'])
155
+
156
+ keys = []
157
+ for k in feats[prompt_id].keys():
158
+ if k != 'all' and k != 'tsne':
159
+ keys.append(int(k.item()))
160
+
161
+ print(keys)
162
+
163
+ data = []
164
+ for idx in range(len(keys)):
165
+ modelVersion_id = keys[idx]
166
+ image_id = promptBook[(promptBook['modelVersion_id'] == modelVersion_id) & (promptBook['prompt_id'] == int(prompt_id))].reset_index(drop=True).loc[0, 'image_id']
167
+ image_url = f"https://modelcofferbucket.s3-accelerate.amazonaws.com/{image_id}.png"
168
+ scale = 50
169
+ data.append((feats[prompt_id]['tsne'][idx][0]*scale, feats[prompt_id]['tsne'][idx][1]*scale, image_url))
170
+
171
+ image_size = promptBook[(promptBook['image_id'] == image_id)].reset_index(drop=True).loc[0, 'size'].split('x')
172
+
173
+ # # Sample data: (x, y) coordinates and corresponding image URLs
174
+ # data = [
175
+ # (2, 5, "https://www.crunchyroll.com/imgsrv/display/thumbnail/480x720/catalog/crunchyroll/669dae5dbea3d93bb5f1012078501976.jpeg"),
176
+ # (4, 8, "https://i.pinimg.com/originals/40/6d/38/406d38957bc4fd12f34c5dfa3d73b86d.jpg"),
177
+ # (7, 3, "https://i.pinimg.com/550x/76/27/d2/7627d227adc6fb5fb6662ebfb9d82d7e.jpg"),
178
+ # # Add more data points and image URLs
179
+ # ]
180
+
181
+ # show_with_bokeh(data, streamlit=True)
182
+ show_with_bokeh_2(data, image_size=image_size, streamlit=True)
Archive/test.py DELETED
@@ -1,124 +0,0 @@
1
- import streamlit as st
2
- from streamlit_sortables import sort_items
3
- from torchvision import transforms
4
- from transformers import CLIPProcessor, CLIPModel
5
- from torchmetrics.multimodal import CLIPScore
6
- import torch
7
- import numpy as np
8
- import pandas as pd
9
- from tqdm import tqdm
10
- from datasets import load_dataset, Dataset, load_from_disk
11
- import os
12
-
13
- import clip
14
-
15
- def compute_clip_score(promptbook, device, drop_negative=False):
16
- # if 'clip_score' in promptbook.columns:
17
- # print('==> Skipping CLIP-Score computation')
18
- # return
19
- print('==> CLIP-Score computation started')
20
- clip_scores = []
21
- to_tensor = transforms.ToTensor()
22
- # metric = CLIPScore(model_name_or_path='openai/clip-vit-base-patch16').to(DEVICE)
23
- metric = CLIPScore(model_name_or_path='openai/clip-vit-large-patch14').to(device)
24
- for i in tqdm(range(0, len(promptbook), BATCH_SIZE)):
25
- images = []
26
- prompts = list(promptbook.prompt.values[i:i+BATCH_SIZE])
27
- for image in promptbook.image.values[i:i+BATCH_SIZE]:
28
- images.append(to_tensor(image))
29
- with torch.no_grad():
30
- x = metric.processor(text=prompts, images=images, return_tensors='pt', padding=True)
31
- img_features = metric.model.get_image_features(x['pixel_values'].to(device))
32
- img_features = img_features / img_features.norm(p=2, dim=-1, keepdim=True)
33
- txt_features = metric.model.get_text_features(x['input_ids'].to(device), x['attention_mask'].to(device))
34
- txt_features = txt_features / txt_features.norm(p=2, dim=-1, keepdim=True)
35
- scores = 100 * (img_features * txt_features).sum(axis=-1).detach().cpu()
36
- if drop_negative:
37
- scores = torch.max(scores, torch.zeros_like(scores))
38
- clip_scores += [round(s.item(), 4) for s in scores]
39
- promptbook['clip_score'] = np.asarray(clip_scores)
40
- print('==> CLIP-Score computation completed')
41
- return promptbook
42
-
43
-
44
- def compute_clip_score_hmd(promptbook):
45
-
46
- metric_cpu = CLIPScore(model_name_or_path="openai/clip-vit-large-patch14").to('cpu')
47
- metric_gpu = CLIPScore(model_name_or_path="openai/clip-vit-large-patch14").to('mps')
48
-
49
- for idx in promptbook.index:
50
- clip_score_hm = promptbook.loc[idx, 'clip_score']
51
-
52
- with torch.no_grad():
53
- image = promptbook.loc[idx, 'image']
54
- image.save(f"./tmp/{promptbook.loc[idx, 'image_id']}.png")
55
- image = transforms.ToTensor()(image)
56
- image_cpu = torch.unsqueeze(image, dim=0).to('cpu')
57
- image_gpu = torch.unsqueeze(image, dim=0).to('mps')
58
-
59
- prompts = [promptbook.loc[idx, 'prompt']]
60
- clip_score_cpu = metric_cpu(image_cpu, prompts)
61
- clip_score_gpu = metric_gpu(image_gpu, prompts)
62
-
63
- print(
64
- f'==> clip_score_hm: {clip_score_hm:.4f}, clip_score_cpu: {clip_score_cpu:.4f}, clip_score_gpu: {clip_score_gpu:.4f}')
65
-
66
- def compute_clip_score_transformers(promptbook, device='cpu'):
67
- model = CLIPModel.from_pretrained("openai/clip-vit-large-patch14")
68
- processor = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14")
69
-
70
- with torch.no_grad():
71
- inputs = processor(text=promptbook.prompt.tolist(), images=promptbook.image.tolist(), return_tensors="pt", padding=True)
72
- outputs = model(**inputs)
73
- logits_per_image = outputs.logits_per_image
74
-
75
- promptbook.loc[:, 'clip_score'] = logits_per_image[:, 0].tolist()
76
- return promptbook
77
-
78
- def compute_clip_score_clip(promptbook, device='cpu'):
79
- model, preprocess = clip.load("ViT-B/32", device=device)
80
- with torch.no_grad():
81
-
82
- for idx in promptbook.index:
83
- # image_input = preprocess(promptbook.loc[idx, 'image']).unsqueeze(0).to(device)
84
- image_inputs = preprocess(promptbook.image.tolist()).to(device)
85
- text_inputs = torch.cat([clip.tokenize(promptbook.prompt.tolist()).to(device)]).to(device)
86
-
87
- image_features = model.encode_image(image_inputs)
88
- text_features = model.encode_text(text_inputs)
89
-
90
-
91
- probs = logits_per_image.softmax(dim=-1).cpu().numpy()
92
- promptbook.loc[:, 'clip_score'] = probs[:, 0].tolist()
93
- return promptbook
94
-
95
-
96
- if __name__ == "__main__":
97
- BATCH_SIZE = 200
98
- # DEVICE = 'mps' if torch.has_mps else 'cpu'
99
-
100
- print(torch.__version__)
101
-
102
- images_ds = load_from_disk(os.path.join(os.pardir, 'data', 'promptbook'))
103
- images_ds = images_ds.sort(['prompt_id', 'modelVersion_id'])
104
- print(images_ds)
105
- print(type(images_ds[0]['image']))
106
- promptbook_hmd = pd.DataFrame(images_ds[:20])
107
- promptbook_new = promptbook_hmd.drop(columns=['clip_score'])
108
- promptbook_cpu = compute_clip_score(promptbook_new.copy(deep=True), device='cpu')
109
- promptbook_mps = compute_clip_score(promptbook_new.copy(deep=True), device='mps')
110
- promptbook_tra_cpu = compute_clip_score_transformers(promptbook_new.copy(deep=True))
111
- promptbook_tra_mps = compute_clip_score_transformers(promptbook_new.copy(deep=True), device='mps')
112
- #
113
- for idx in promptbook_mps.index:
114
- print(
115
- 'image id: ', promptbook_mps['image_id'][idx],
116
- 'mps: ', promptbook_mps['clip_score'][idx],
117
- 'cpu: ', promptbook_cpu['clip_score'][idx],
118
- 'tra cpu: ', promptbook_tra_cpu['clip_score'][idx],
119
- 'tra mps: ', promptbook_tra_mps['clip_score'][idx],
120
- 'hmd: ', promptbook_hmd['clip_score'][idx]
121
- )
122
- #
123
- # compute_clip_score_hmd(promptbook_hmd)
124
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Archive/test_form.py DELETED
@@ -1,39 +0,0 @@
1
- import streamlit as st
2
-
3
-
4
- def grid(col=3, row=4, name='grid1'):
5
- cols = st.columns(col)
6
- for i in range(row):
7
- for j in range(col):
8
- with cols[j]:
9
- value = st.session_state.checked_dic[name].get(f"{name}_{i*col+j}", False)
10
-
11
- check = st.checkbox(f"{i*col+j}", key=f"{name}_{i*col+j}", value=value)
12
- if check:
13
- st.session_state.checked_dic[name][f"{name}_{i*col+j}"] = True
14
- else:
15
- st.session_state.checked_dic[name][f"{name}_{i*col+j}"] = False
16
-
17
-
18
- def on_click():
19
- for key in st.session_state:
20
- if st.session_state[key] and key[-1].isdigit():
21
- st.write(key)
22
- # for key in st.session_state.checked_dic[name]:
23
- # if st.session_state.checked_dic[name][key]:
24
- # st.write(key)
25
-
26
-
27
-
28
- if __name__ == "__main__":
29
- if 'checked_dic' not in st.session_state:
30
- st.session_state.checked_dic = {'grid1': {}, 'grid2': {}}
31
-
32
- name = st.selectbox('Select a grid', ['grid1', 'grid2'])
33
-
34
- with st.form(f"{name}_form"):
35
- grid(name=name)
36
- submit_button = st.form_submit_button("Submit", on_click=on_click)
37
-
38
-
39
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/feats/1.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:211566fa05419c50f9713be092f343a4518a68a0ecdec783f957412d8924cda1
3
+ size 639065
data/feats/10.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ddfed333b3a19193d15c23f29d61187e5becc3aaf115c7ad14cbd860c69efcf
3
+ size 639068
data/feats/11.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa36101aba0c01e1e8a4ad47c506847d5540db36217ab31de59155f605713e70
3
+ size 639068
data/feats/12.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6bc9518b972441e3e0d5144025112ad20e0a501914b68806d4ff1ef6e7e7b58a
3
+ size 639068
data/feats/13.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:378aca12dff37e85f2204043bde53c015e85835bee288c317d809abf425b8789
3
+ size 639068
data/feats/14.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30b57b3a83a9ba6b78e161f266ab672e054e7772d763c43c1bac80aecd6c8541
3
+ size 639068
data/feats/15.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8b0115e79c8a4e6a6b9ea5c4985fb63d6241a571a64d752db64ac640f74ca34
3
+ size 639068
data/feats/16.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2a6bd80f3f5dd395a8b0d03e5b39e2e5c17d80f882289a51613a425d09a1ead
3
+ size 639068
data/feats/17.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79dab0b91e00271acb800da17e2d74eebb034cb8a95e14517e4716d5e45c6ad0
3
+ size 639068
data/feats/18.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fe951fd6f45722d23dc67f1680651978e04e0a2ac922bf32c089a70cfb87c42
3
+ size 639068
data/feats/19.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee7b8f1b35d51c87b76e315fd70f6eae65bd3f0722a09ccf8f9bedbe6c964ad8
3
+ size 639068
data/feats/2.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51df1dd4539225460d88d237881af030e0a8919ead4840ee12649902d2cb9e27
3
+ size 639065
data/feats/20.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18c90e42f00fc0fab451ad6abf1a1ebac69effeddb3996c53d9cb7f42c54ddac
3
+ size 639068
data/feats/21.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2291f89dbd9db4a440027b70f32007dfe7a30b71b7b724c8cd5227297b309e8
3
+ size 639068
data/feats/22.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a5b588af6935a96915b4aededf0aab030d5af2a4c90e82ac0f7f287c5676229
3
+ size 639068
data/feats/23.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3829f5cfbeab2191b8732bbb9c0b2588236884d40463d2d9d9a0355f13739c5b
3
+ size 639068
data/feats/24.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e9e05493e450501aea9aa7b2f16fdce7618198790ea55d0f270272f10aabf94
3
+ size 639068
data/feats/25.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9691048bfd0d0bc6e741564f1d33cb45054105e43b709072b645c650fdbe88e0
3
+ size 639068
data/feats/26.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9f2bdf9eec0114be38e77cc9bdf2fe181501069e26a51ba8cc39081182c806a
3
+ size 639068
data/feats/27.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:efc8182b73dcfa2fcc0383fadf7c99f1e8841f21a4d356a4d57a4dbeb74188ce
3
+ size 639068
data/feats/28.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c781fc06cdc504773822725a0cc0c833e752130a6ce5a52e0e7092f98f36db1a
3
+ size 639068
data/feats/29.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac185e131d3430267c3bf68b097f771ca09736e35120654d2a0734d1e4a93464
3
+ size 639068
data/feats/3.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d05afa604a44460a6c6ad0ec33a5f8eeea2c344984de35efd2055956a0598389
3
+ size 639065
data/feats/30.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:579cf8d4bb7bf2acce917b5cc109afc5511feb1ff7830a27373a0507001e5329
3
+ size 639068
data/feats/31.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7815692babecedb80682afe8f8c6f6f4f377b8862490c82b9202680e10973caf
3
+ size 639068
data/feats/32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7a2d4ffb0aa5019d6bb44e7921154e6db20510ac74c1bd45a429ef4a8b744e1
3
+ size 639068
data/feats/33.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d6465a5f845a6d00661c2f27da81e37761ea322e8da4813c8342272620c9319
3
+ size 639068
data/feats/34.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5964f36395a4fbac147126adf4795fecd8eea24604df529b746f2115be51cfc7
3
+ size 639068
data/feats/35.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6bafebef939cb64ff136e928d98ea6efd03d16396f9fedffae74c7fbcfc92f19
3
+ size 639068
data/feats/36.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55eabc3ef67a155f4722ad3064dfa65ad2d5b34b8e84084aa4a37215bc6173a4
3
+ size 639068
data/feats/37.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d81924e4761c5b2ea4d7984fa0536ca3da1c97ff696bab4435cb95bb3a18db4e
3
+ size 639068
data/feats/38.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4b01d882d79d3de213abe45a441f3f5bb059d07bc725e63c5461ec6d5eef430
3
+ size 639068
data/feats/39.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:110d36710a033a95e52dd7dabb150a41a3c793855d5dbfd6016d62f4b5fc43cd
3
+ size 639068
data/feats/4.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c31158adb537f11bb73e7036be5df482e804f3198d74c9a84ba184f08f2acd1e
3
+ size 639065
data/feats/40.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:366b353476790e157edba90bedb93649d145357477e8e1a42b661fec9c4f9f7f
3
+ size 639068
data/feats/41.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e7749780caaabf088b04388af24cf99391f8e4c9dd0a112456a518d74643827
3
+ size 639068
data/feats/42.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5db2fa43372581382aea1b8f8eb0642e3e18c32a431b85732a5021e6868ef8b
3
+ size 639068
data/feats/43.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:405249437414400b5717b3ffb0e572aad382fd888a6f2964d75991a91a6157e5
3
+ size 639068
data/feats/44.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd7ef122fc0fdd1c6df65b9052b75bdf52cb9457673476aca80df5750cfd02e7
3
+ size 639068
data/feats/45.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:860c9a0fdc60a18d7d54d31c98204fee976f6464a83bffd6d6acd5b6d2c45f4b
3
+ size 639068
data/feats/46.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9df2d104b98d7d8467dfa3e1c58a067bc0d8abb4dd8b61cb0ac1d70c614d965c
3
+ size 639068
data/feats/47.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:294149f4bdeda849bb80ff2acf1976c04c2b73225dbd42a27521b7569374ccfc
3
+ size 639068
data/feats/48.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a3bb1dd5c42af317e85f42d848f32d23c3f5315865b48e4b9ceddebf1e36ab5
3
+ size 639068
data/feats/49.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9920294263b7860c8428503b36e46adbd90fb1f3c6c04b61c36bf84e1a6edc4
3
+ size 639068
data/feats/5.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:178b391bad95d3f103bd0c9bc91e335c3a6e160332c6c2621e68331860b1a1e3
3
+ size 639065
data/feats/50.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9075f134133e24420eb65e08b607907965bc32b86466288f0e4ba5e3c189aa60
3
+ size 639068