""" TODO: save the data with different config TODO: get stats for the frequency based selection """ import json from itertools import product import numpy as np import pandas as pd import seaborn as sns from matplotlib import pyplot as plt from datasets import Dataset sns.set_theme(style="whitegrid") # load filtered data with open(f"data/t_rex.filter.jsonl") as f: _tmp = [json.loads(i) for i in f.read().split('\n') if len(i) > 0] tmp += _tmp splits += [s] * len(_tmp) data = Dataset.from_list(tmp) df_main = data.to_pandas() df_main['split'] = splits def is_entity(token): return any(i.isupper() for i in token) def filtering(row, min_freq: int = 3, target: str = "subject"): if not row['is_entity']: return True return row[target] >= min_freq def main(min_entity_freq, max_pairs_predicate, min_pairs_predicate: int = 3, random_sampling: bool = True): df = df_main.copy() # entity frequency filter c_sub = df.groupby("subject")['title'].count() c_obj = df.groupby("object")['title'].count() key = set(list(c_sub.index) + list(c_obj.index)) count = pd.DataFrame([{'entity': k, "subject": c_sub[k] if k in c_sub else 0, "object": c_obj[k] if k in c_obj else 0} for k in key]) count.index = count.pop('entity') count['is_entity'] = [is_entity(i) for i in count.index] count['sum'] = count['subject'] + count['object'] count_filter_sub = count[count.apply(lambda x: filtering(x, min_freq=min_entity_freq, target='subject'), axis=1)]['subject'] count_filter_obj = count[count.apply(lambda x: filtering(x, min_freq=min_entity_freq, target='object'), axis=1)]['object'] vocab_sub = set(count_filter_sub.index) vocab_obj = set(count_filter_obj.index) df['flag_subject'] = [i in vocab_sub for i in df['subject']] df['flag_object'] = [i in vocab_obj for i in df['object']] df['flag'] = df['flag_subject'] & df['flag_object'] df_filter = df[df['flag']] df_filter.pop("flag") df_filter.pop("flag_subject") df_filter.pop("flag_object") df_filter['count_subject'] = [count_filter_sub.loc[i] for i in df_filter['subject']] df_filter['count_object'] = [count_filter_obj.loc[i] for i in df_filter['object']] df_filter['count_sum'] = df_filter['count_subject'] + df_filter['count_object'] # predicate frequency filter if random_sampling: df_balanced = pd.concat( [g if len(g) <= max_pairs_predicate else g.sample(max_pairs_predicate, random_state=0) for _, g in df_filter.groupby("predicate") if len(g) >= min_pairs_predicate]) else: df_balanced = pd.concat( [g if len(g) <= max_pairs_predicate else g.sort_values(by='count_sum', ascending=False).head(max_pairs_predicate) for _, g in df_filter.groupby("predicate") if len(g) >= min_pairs_predicate]) df_balanced.pop("count_subject") df_balanced.pop("count_object") df_balanced.pop("count_sum") target_data = [i.to_dict() for _, i in df_balanced.iterrows()] # return distribution predicate_dist = df_balanced.groupby("predicate")['text'].count().sort_values(ascending=False).to_dict() entity, count = np.unique(df_balanced['object'].tolist() + df_balanced['subject'].tolist(), return_counts=True) entity_dist = dict(list(zip(entity.tolist(), count.tolist()))) return predicate_dist, entity_dist, len(df_balanced), target_data if __name__ == '__main__': p_dist_full = [] e_dist_full = [] data_size_full = [] config = [] candidates = list(product([4, 8, 12, 16], [100, 50, 25, 10])) # run filtering with different configs for min_e_freq, max_p_freq in candidates: p_dist, e_dist, data_size, new_data = main(min_entity_freq=min_e_freq, max_pairs_predicate=max_p_freq, random_sampling=False) p_dist_full.append(p_dist) e_dist_full.append(e_dist) data_size_full.append(data_size) config.append([min_e_freq, max_p_freq]) # save data out = {} for s in ['train', 'validation', 'test']: out[s] = [i for i in new_data if i['split'] == s] for s, v in out.items(): for i in v: i.pop('split') with open(f"data/t_rex.clean.min_entity_{min_e_freq}_max_predicate_{max_p_freq}.{s}.jsonl", 'w') as f: f.write('\n'.join([json.dumps(i) for i in new_data_s])) # check statistics print("- Data Size") df_size = pd.DataFrame([{"min entity": mef, "max predicate": mpf, "freq": x} for x, (mef, mpf) in zip(data_size_full, candidates)]) df_size = df_size.pivot(index="min entity", columns="max predicate", values="freq") df_size.index.name = "min entity / max predicate" df_size_p = pd.DataFrame( [{"min entity": mef, "max predicate": mpf, "freq": len(x)} for x, (mef, mpf) in zip(p_dist_full, candidates)]) df_size_p = df_size_p.pivot(index="max predicate", columns="min entity", values="freq") df_size['predicate'] = df_size_p.loc[10] df_size.to_csv("data/stats.data_size.csv") print(df_size.to_markdown()) # plot predicate distribution df_p = pd.DataFrame([dict(enumerate(sorted(p.values(), reverse=True))) for p in p_dist_full]).T df_p.columns = [f"min entity: {mef}, max predicate: {mpf}" for mef, mpf in candidates] fig, axes = plt.subplots(2, 2, constrained_layout=True) fig.suptitle('Predicate Distribution over Different Configurations') for (x, y), mpf in zip([(0, 0), (0, 1), (1, 0), (1, 1)], [100, 50, 25, 10]): _df = df_p[[f"min entity: {mef}, max predicate: {mpf}" for mef in [1, 2, 3, 4]]] _df.columns = [f"min entity: {mef}" for mef in [1, 2, 3, 4]] ax = sns.lineplot(ax=axes[x, y], data=_df, linewidth=1) if mpf != 100: ax.legend_.remove() axes[x, y].set_title(f'max predicate: {mpf}') fig.supxlabel('unique predicates sorted by frequency') fig.supylabel('number of triples') fig.savefig("data/stats.predicate_distribution.png", bbox_inches='tight') fig.clf() # plot entity distribution df_e = pd.DataFrame([dict(enumerate(sorted(e.values(), reverse=True))) for e in e_dist_full]).T df_e.columns = [f"min entity: {mef}, max predicate: {mpf}" for mef, mpf in candidates] fig, axes = plt.subplots(2, 2, constrained_layout=True) fig.suptitle('Entity Distribution over Different Configurations') for (x, y), mpf in zip([(0, 0), (0, 1), (1, 0), (1, 1)], [100, 50, 25, 10]): _df = df_e[[f"min entity: {mef}, max predicate: {mpf}" for mef in [1, 2, 3, 4]]] _df.columns = [f"min entity: {mef}" for mef in [1, 2, 3, 4]] ax = sns.lineplot(ax=axes[x, y], data=_df, linewidth=1) ax.set(xscale='log') if mpf != 100: ax.legend_.remove() axes[x, y].set_title(f'max predicate: {mpf}') fig.supxlabel('unique entities sorted by frequency') fig.supylabel('number of triples') fig.savefig("data/stats.entity_distribution.png", bbox_inches='tight') fig.clf()