File size: 7,076 Bytes
32d7e91 37599eb 32d7e91 a361ef5 32d7e91 a361ef5 32d7e91 a361ef5 32d7e91 a361ef5 37599eb a361ef5 32d7e91 37599eb a361ef5 32d7e91 a361ef5 37599eb a361ef5 5c740ef a361ef5 37599eb 79543d6 37599eb 79543d6 a361ef5 37599eb a361ef5 37599eb a361ef5 37599eb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 |
"""
TODO: save the data with different config
TODO: get stats for the frequency based selection
"""
import json
from itertools import product
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from datasets import Dataset
sns.set_theme(style="whitegrid")
# load filtered data
tmp = []
splits = []
for s in ['train', 'validation', 'test']:
with open(f"data/t_rex.filter.{s}.jsonl") as f:
_tmp = [json.loads(i) for i in f.read().split('\n') if len(i) > 0]
tmp += _tmp
splits += [s] * len(_tmp)
data = Dataset.from_list(tmp)
df_main = data.to_pandas()
df_main['split'] = splits
def is_entity(token):
return any(i.isupper() for i in token)
def filtering(row, min_freq: int = 3, target: str = "subject"):
if not row['is_entity']:
return True
return row[target] >= min_freq
def main(min_entity_freq, max_pairs_predicate, min_pairs_predicate: int = 3, random_sampling: bool = True):
df = df_main.copy()
# entity frequency filter
c_sub = df.groupby("subject")['title'].count()
c_obj = df.groupby("object")['title'].count()
key = set(list(c_sub.index) + list(c_obj.index))
count = pd.DataFrame([{'entity': k, "subject": c_sub[k] if k in c_sub else 0, "object": c_obj[k] if k in c_obj else 0} for k in key])
count.index = count.pop('entity')
count['is_entity'] = [is_entity(i) for i in count.index]
count['sum'] = count['subject'] + count['object']
count_filter_sub = count[count.apply(lambda x: filtering(x, min_freq=min_entity_freq, target='subject'), axis=1)]['subject']
count_filter_obj = count[count.apply(lambda x: filtering(x, min_freq=min_entity_freq, target='object'), axis=1)]['object']
vocab_sub = set(count_filter_sub.index)
vocab_obj = set(count_filter_obj.index)
df['flag_subject'] = [i in vocab_sub for i in df['subject']]
df['flag_object'] = [i in vocab_obj for i in df['object']]
df['flag'] = df['flag_subject'] & df['flag_object']
df_filter = df[df['flag']]
df_filter.pop("flag")
df_filter.pop("flag_subject")
df_filter.pop("flag_object")
df_filter['count_subject'] = [count_filter_sub.loc[i] for i in df_filter['subject']]
df_filter['count_object'] = [count_filter_obj.loc[i] for i in df_filter['object']]
df_filter['count_sum'] = df_filter['count_subject'] + df_filter['count_object']
# predicate frequency filter
if random_sampling:
df_balanced = pd.concat(
[g if len(g) <= max_pairs_predicate else g.sample(max_pairs_predicate, random_state=0) for _, g in
df_filter.groupby("predicate") if len(g) >= min_pairs_predicate])
else:
df_balanced = pd.concat(
[g if len(g) <= max_pairs_predicate else g.sort_values(by='count_sum', ascending=False).head(max_pairs_predicate) for _, g in
df_filter.groupby("predicate") if len(g) >= min_pairs_predicate])
df_balanced.pop("count_subject")
df_balanced.pop("count_object")
df_balanced.pop("count_sum")
target_data = [i.to_dict() for _, i in df_balanced.iterrows()]
# return distribution
predicate_dist = df_balanced.groupby("predicate")['text'].count().sort_values(ascending=False).to_dict()
entity, count = np.unique(df_balanced['object'].tolist() + df_balanced['subject'].tolist(), return_counts=True)
entity_dist = dict(list(zip(entity.tolist(), count.tolist())))
return predicate_dist, entity_dist, len(df_balanced), target_data
if __name__ == '__main__':
p_dist_full = []
e_dist_full = []
data_size_full = []
config = []
candidates = list(product([1, 2, 3, 4], [100, 50, 25, 10]))
# run filtering with different configs
for min_e_freq, max_p_freq in candidates:
p_dist, e_dist, data_size, new_data = main(min_entity_freq=min_e_freq, max_pairs_predicate=max_p_freq, random_sampling=False)
p_dist_full.append(p_dist)
e_dist_full.append(e_dist)
data_size_full.append(data_size)
config.append([min_e_freq, max_p_freq])
# save data
for s in ['train', 'validation', 'test']:
new_data_s = [i for i in new_data if i['split'] == s]
for i in new_data_s:
i.pop('split')
with open(f"data/t_rex.clean.min_entity_{min_e_freq}_max_predicate_{max_p_freq}.{s}.jsonl", 'w') as f:
f.write('\n'.join([json.dumps(i) for i in new_data_s]))
# check statistics
print("- Data Size")
df_size = pd.DataFrame([{"min entity": mef, "max predicate": mpf, "freq": x} for x, (mef, mpf) in zip(data_size_full, candidates)])
df_size = df_size.pivot(index="min entity", columns="max predicate", values="freq")
df_size.index.name = "min entity / max predicate"
df_size_p = pd.DataFrame(
[{"min entity": mef, "max predicate": mpf, "freq": len(x)} for x, (mef, mpf) in zip(p_dist_full, candidates)])
df_size_p = df_size_p.pivot(index="max predicate", columns="min entity", values="freq")
df_size['predicate'] = df_size_p.loc[10]
df_size.to_csv("data/stats.data_size.csv")
print(df_size.to_markdown())
# plot predicate distribution
df_p = pd.DataFrame([dict(enumerate(sorted(p.values(), reverse=True))) for p in p_dist_full]).T
df_p.columns = [f"min entity: {mef}, max predicate: {mpf}" for mef, mpf in candidates]
fig, axes = plt.subplots(2, 2, constrained_layout=True)
fig.suptitle('Predicate Distribution over Different Configurations')
for (x, y), mpf in zip([(0, 0), (0, 1), (1, 0), (1, 1)], [100, 50, 25, 10]):
_df = df_p[[f"min entity: {mef}, max predicate: {mpf}" for mef in [1, 2, 3, 4]]]
_df.columns = [f"min entity: {mef}" for mef in [1, 2, 3, 4]]
ax = sns.lineplot(ax=axes[x, y], data=_df, linewidth=1)
if mpf != 100:
ax.legend_.remove()
axes[x, y].set_title(f'max predicate: {mpf}')
fig.supxlabel('unique predicates sorted by frequency')
fig.supylabel('number of triples')
fig.savefig("data/stats.predicate_distribution.png", bbox_inches='tight')
fig.clf()
# plot entity distribution
df_e = pd.DataFrame([dict(enumerate(sorted(e.values(), reverse=True))) for e in e_dist_full]).T
df_e.columns = [f"min entity: {mef}, max predicate: {mpf}" for mef, mpf in candidates]
fig, axes = plt.subplots(2, 2, constrained_layout=True)
fig.suptitle('Entity Distribution over Different Configurations')
for (x, y), mpf in zip([(0, 0), (0, 1), (1, 0), (1, 1)], [100, 50, 25, 10]):
_df = df_e[[f"min entity: {mef}, max predicate: {mpf}" for mef in [1, 2, 3, 4]]]
_df.columns = [f"min entity: {mef}" for mef in [1, 2, 3, 4]]
ax = sns.lineplot(ax=axes[x, y], data=_df, linewidth=1)
ax.set(xscale='log')
if mpf != 100:
ax.legend_.remove()
axes[x, y].set_title(f'max predicate: {mpf}')
fig.supxlabel('unique entities sorted by frequency')
fig.supylabel('number of triples')
fig.savefig("data/stats.entity_distribution.png", bbox_inches='tight')
fig.clf() |