import json from itertools import product import numpy as np import pandas as pd import seaborn as sns from matplotlib import pyplot as plt from datasets import Dataset sns.set_theme(style="whitegrid") # load filtered data tmp = [] for s in ['train', 'validation', 'test']: with open(f"data/t_rex.filter.{s}.jsonl") as f: tmp += [json.loads(i) for i in f.read().split('\n') if len(i) > 0] data = Dataset.from_list(tmp) df_main = data.to_pandas() def is_entity(token): return any(i.isupper() for i in token) def filtering(row, min_freq: int = 3, target: str = "subject"): if not row['is_entity']: return True return row[target] >= min_freq def main(min_entity_freq, max_pairs_predicate, min_pairs_predicate: int = 1, return_stats: bool = True, random_sampling: bool = True): df = df_main.copy() # entity frequency filter c_sub = df.groupby("subject")['title'].count() c_obj = df.groupby("object")['title'].count() key = set(list(c_sub.index) + list(c_obj.index)) count = pd.DataFrame([{'entity': k, "subject": c_sub[k] if k in c_sub else 0, "object": c_obj[k] if k in c_obj else 0} for k in key]) count.index = count.pop('entity') count['is_entity'] = [is_entity(i) for i in count.index] count['sum'] = count['subject'] + count['object'] count_filter_sub = count[count.apply(lambda x: filtering(x, min_freq=min_entity_freq, target='subject'), axis=1)]['subject'] count_filter_obj = count[count.apply(lambda x: filtering(x, min_freq=min_entity_freq, target='object'), axis=1)]['object'] vocab_sub = set(count_filter_sub.index) vocab_obj = set(count_filter_obj.index) df['flag_subject'] = [i in vocab_sub for i in df['subject']] df['flag_object'] = [i in vocab_obj for i in df['object']] df['flag'] = df['flag_subject'] & df['flag_object'] df_filter = df[df['flag']] df_filter.pop("flag") df_filter.pop("flag_subject") df_filter.pop("flag_object") df_filter['count_subject'] = [count_filter_sub.loc[i] for i in df_filter['subject']] df_filter['count_object'] = [count_filter_obj.loc[i] for i in df_filter['object']] df_filter['count_sum'] = df_filter['count_subject'] + df_filter['count_object'] # predicate frequency filter if random_sampling: df_balanced = pd.concat( [g if len(g) <= max_pairs_predicate else g.sample(max_pairs_predicate, random_state=0) for _, g in df_filter.groupby("predicate") if len(g) >= min_pairs_predicate]) else: df_balanced = pd.concat( [g if len(g) <= max_pairs_predicate else g.sort_values(by='count_sum', ascending=False).head(max_pairs_predicate) for _, g in df_filter.groupby("predicate") if len(g) >= min_pairs_predicate]) if not return_stats: df_balanced.pop("count_subject") df_balanced.pop("count_object") df_balanced.pop("count_sum") return [i.to_dict() for _, i in df_balanced] # return distribution predicate_dist = df_balanced.groupby("predicate")['text'].count().sort_values(ascending=False).to_dict() entity, count = np.unique(df_balanced['object'].tolist() + df_balanced['subject'].tolist(), return_counts=True) entity_dist = dict(list(zip(entity.tolist(), count.tolist()))) return predicate_dist, entity_dist, len(df_balanced) if __name__ == '__main__': p_dist_full = [] e_dist_full = [] data_size_full = [] config = [] candidates = list(product([1, 2, 3, 4], [100, 50, 25, 10])) # run filtering with different configs for min_e_freq, max_p_freq in candidates: p_dist, e_dist, data_size = main(min_entity_freq=min_e_freq, max_pairs_predicate=max_p_freq) p_dist_full.append(p_dist) e_dist_full.append(e_dist) data_size_full.append(data_size) config.append([min_e_freq, max_p_freq]) # check statistics print("- Data Size") df_size = pd.DataFrame([{"min entity": mef, "max predicate": mpf, "freq": x} for x, (mef, mpf) in zip(data_size_full, candidates)]) df_size = df_size.pivot(index="min entity", columns="max predicate", values="freq") df_size.index.name = "min entity / max predicate" df_size.to_csv("data/stats.data_size.csv") print(df_size.to_markdown()) df_size = pd.DataFrame( [{"min entity": mef, "max predicate": mpf, "freq": len(x)} for x, (mef, mpf) in zip(p_dist_full, candidates)]) df_size = df_size.pivot(index="min entity", columns="max predicate", values="freq") df_size.index.name = "min entity / max predicate" df_size.to_csv("data/stats.predicate_size.csv") print(df_size.to_markdown()) # plot predicate distribution df_p = pd.DataFrame([dict(enumerate(sorted(p.values(), reverse=True))) for p in p_dist_full]).T df_p.columns = [f"min entity: {mef}, max predicate: {mpf}" for mef, mpf in candidates] fig = plt.figure() _df_p = df_p[[f"min entity: {mef}, max predicate: 10" for mef in [1, 2, 3, 4]]] _df_p.columns = [f"min entity: {mef}" for mef in [1, 2, 3, 4]] ax = sns.lineplot(data=_df_p, linewidth=2.5) ax.set(xlabel='unique predicates sorted by frequency', ylabel='number of triples', title='Predicate Distribution (max predicate: 10)') ax.get_figure().savefig("data/stats.predicate_distribution.png", bbox_inches='tight') ax.get_figure().clf() # plot entity distribution df_e = pd.DataFrame([dict(enumerate(sorted(e.values(), reverse=True))) for e in e_dist_full]).T df_e.columns = [f"min entity: {mef}, max predicate: {mpf}" for mef, mpf in candidates] fig, axes = plt.subplots(2, 2, constrained_layout=True) fig.suptitle('Entity Distribution over Different Configurations') for (x, y), mpf in zip([(0, 0), (0, 1), (1, 0), (1, 1)], [100, 50, 25, 10]): _df = df_e[[f"min entity: {mef}, max predicate: {mpf}" for mef in [1, 2, 3, 4]]] _df.columns = [f"min entity: {mef}" for mef in [1, 2, 3, 4]] ax = sns.lineplot(ax=axes[x, y], data=_df, linewidth=1.5) ax.set(xscale='log') if mpf != 100: ax.legend_.remove() axes[x, y].set_title(f'max predicate: {mpf}') fig.supxlabel('unique entities sorted by frequency') fig.supylabel('number of triples') fig.savefig("data/stats.entity_distribution.png", bbox_inches='tight')