File size: 8,401 Bytes
32d7e91
37599eb
 
32d7e91
a361ef5
 
768f7eb
a361ef5
 
 
 
 
 
 
 
6818f83
 
dce486f
 
a361ef5
 
 
a493627
6818f83
 
a361ef5
 
 
 
 
 
 
 
 
 
 
 
768f7eb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37599eb
a361ef5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32d7e91
 
 
37599eb
a361ef5
 
 
 
 
32d7e91
a361ef5
 
 
 
 
 
 
6818f83
a361ef5
 
 
6818f83
 
a361ef5
 
 
 
5c740ef
768f7eb
 
 
 
 
 
 
a361ef5
 
 
 
 
 
84a7df8
 
37599eb
79543d6
37599eb
84a7df8
 
 
a361ef5
 
 
 
37599eb
 
dce486f
 
 
37599eb
 
 
 
 
 
 
 
a361ef5
 
 
 
 
 
dce486f
 
 
37599eb
a361ef5
 
 
 
 
 
 
42d8b59
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
"""
TODO: save the data with different config
TODO: get stats for the frequency based selection
"""
import json
from itertools import product
from random import shuffle, seed

import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt

from datasets import Dataset

parameters_min_e_freq = [4, 8, 12, 16]
parameters_max_p_freq = [100, 50, 25, 10]
assert len(parameters_min_e_freq) == 4
assert len(parameters_max_p_freq) == 4
sns.set_theme(style="whitegrid")

# load filtered data
with open(f"data/t_rex.filter_unified.jsonl") as f:
    data = Dataset.from_list([json.loads(i) for i in f.read().split('\n') if len(i) > 0])
    df_main = data.to_pandas()


def is_entity(token):
    return any(i.isupper() for i in token)


def filtering(row, min_freq: int = 3, target: str = "subject"):
    if not row['is_entity']:
        return True
    return row[target] >= min_freq


def create_split(_data):
    tmp_df = pd.DataFrame(_data)
    predicates_count = tmp_df.groupby("predicate")['text'].count().sort_values(ascending=False).to_dict()
    total_num = sum(predicates_count.values())
    pre_k = list(predicates_count.keys())
    seed(42)
    shuffle(pre_k)
    predicates_train = []
    for k in pre_k:
        predicates_train.append(k)
        if sum([predicates_count[i] for i in predicates_train]) > total_num * 0.8:
            break
    predicates_test = sorted([i for i in pre_k if i not in predicates_train])
    test_data = [i for i in _data if i['predicate'] in predicates_test]
    train_data = [i for i in _data if i['predicate'] in predicates_train]
    shuffle(train_data)
    validation_data = train_data[:int(len(train_data) * 0.1)]
    train_data = train_data[int(len(train_data) * 0.1):]
    return train_data, validation_data, test_data


def main(min_entity_freq, max_pairs_predicate, min_pairs_predicate: int = 3, random_sampling: bool = True):

    df = df_main.copy()

    # entity frequency filter
    c_sub = df.groupby("subject")['title'].count()
    c_obj = df.groupby("object")['title'].count()
    key = set(list(c_sub.index) + list(c_obj.index))
    count = pd.DataFrame([{'entity': k, "subject": c_sub[k] if k in c_sub else 0, "object": c_obj[k] if k in c_obj else 0} for k in key])
    count.index = count.pop('entity')
    count['is_entity'] = [is_entity(i) for i in count.index]
    count['sum'] = count['subject'] + count['object']
    count_filter_sub = count[count.apply(lambda x: filtering(x, min_freq=min_entity_freq, target='subject'), axis=1)]['subject']
    count_filter_obj = count[count.apply(lambda x: filtering(x, min_freq=min_entity_freq, target='object'), axis=1)]['object']
    vocab_sub = set(count_filter_sub.index)
    vocab_obj = set(count_filter_obj.index)
    df['flag_subject'] = [i in vocab_sub for i in df['subject']]
    df['flag_object'] = [i in vocab_obj for i in df['object']]
    df['flag'] = df['flag_subject'] & df['flag_object']
    df_filter = df[df['flag']]
    df_filter.pop("flag")
    df_filter.pop("flag_subject")
    df_filter.pop("flag_object")
    df_filter['count_subject'] = [count_filter_sub.loc[i] for i in df_filter['subject']]
    df_filter['count_object'] = [count_filter_obj.loc[i] for i in df_filter['object']]
    df_filter['count_sum'] = df_filter['count_subject'] + df_filter['count_object']

    # predicate frequency filter
    if random_sampling:
        df_balanced = pd.concat(
            [g if len(g) <= max_pairs_predicate else g.sample(max_pairs_predicate, random_state=0) for _, g in
             df_filter.groupby("predicate") if len(g) >= min_pairs_predicate])
    else:
        df_balanced = pd.concat(
            [g if len(g) <= max_pairs_predicate else g.sort_values(by='count_sum', ascending=False).head(max_pairs_predicate) for _, g in
             df_filter.groupby("predicate") if len(g) >= min_pairs_predicate])

    df_balanced.pop("count_subject")
    df_balanced.pop("count_object")
    df_balanced.pop("count_sum")
    target_data = [i.to_dict() for _, i in df_balanced.iterrows()]

    # return distribution
    predicate_dist = df_balanced.groupby("predicate")['text'].count().sort_values(ascending=False).to_dict()
    entity, count = np.unique(df_balanced['object'].tolist() + df_balanced['subject'].tolist(), return_counts=True)
    entity_dist = dict(list(zip(entity.tolist(), count.tolist())))
    return predicate_dist, entity_dist, len(df_balanced), target_data


if __name__ == '__main__':
    p_dist_full = []
    e_dist_full = []
    data_size_full = []
    config = []
    candidates = list(product(parameters_min_e_freq, parameters_max_p_freq))

    # run filtering with different configs
    for min_e_freq, max_p_freq in candidates:
        p_dist, e_dist, data_size, new_data = main(
            min_entity_freq=min_e_freq, max_pairs_predicate=max_p_freq, random_sampling=False)
        p_dist_full.append(p_dist)
        e_dist_full.append(e_dist)
        data_size_full.append(data_size)
        config.append([min_e_freq, max_p_freq])
        # save data
        train, validation, test = create_split(new_data)
        with open(f"data/t_rex.filter_unified.min_entity_{min_e_freq}_max_predicate_{max_p_freq}.train.jsonl", 'w') as f:
            f.write('\n'.join([json.dumps(i) for i in train]))
        with open(f"data/t_rex.filter_unified.min_entity_{min_e_freq}_max_predicate_{max_p_freq}.validation.jsonl", 'w') as f:
            f.write('\n'.join([json.dumps(i) for i in validation]))
        with open(f"data/t_rex.filter_unified.min_entity_{min_e_freq}_max_predicate_{max_p_freq}.test.jsonl", 'w') as f:
            f.write('\n'.join([json.dumps(i) for i in test]))

    # check statistics
    print("- Data Size")
    df_size = pd.DataFrame([{"min entity": mef, "max predicate": mpf, "freq": x} for x, (mef, mpf) in zip(data_size_full, candidates)])
    df_size = df_size.pivot(index="min entity", columns="max predicate", values="freq")
    df_size.index.name = "min entity / max predicate"
    df_size.to_csv("data/stats.data_size.csv")
    print(df_size.to_markdown())
    df_size_p = pd.DataFrame(
        [{"min entity": mef, "max predicate": mpf, "freq": len(x)} for x, (mef, mpf) in zip(p_dist_full, candidates)])
    df_size_p = df_size_p.pivot(index="max predicate", columns="min entity", values="freq")
    df_size_p = df_size_p.loc[10]
    df_size_p.to_csv("data/stats.predicate_size.csv")
    print(df_size_p.to_markdown())

    # plot predicate distribution
    df_p = pd.DataFrame([dict(enumerate(sorted(p.values(), reverse=True))) for p in p_dist_full]).T
    df_p.columns = [f"min entity: {mef}, max predicate: {mpf}" for mef, mpf in candidates]
    fig, axes = plt.subplots(2, 2, constrained_layout=True)
    fig.suptitle('Predicate Distribution over Different Configurations')
    for (x, y), mpf in zip([(0, 0), (0, 1), (1, 0), (1, 1)], parameters_max_p_freq):
        _df = df_p[[f"min entity: {mef}, max predicate: {mpf}" for mef in parameters_min_e_freq]]
        _df.columns = [f"min entity: {mef}" for mef in parameters_min_e_freq]
        ax = sns.lineplot(ax=axes[x, y], data=_df, linewidth=1)
        if mpf != 100:
            ax.legend_.remove()
        axes[x, y].set_title(f'max predicate: {mpf}')
    fig.supxlabel('unique predicates sorted by frequency')
    fig.supylabel('number of triples')
    fig.savefig("data/stats.predicate_distribution.png", bbox_inches='tight')
    fig.clf()

    # plot entity distribution
    df_e = pd.DataFrame([dict(enumerate(sorted(e.values(), reverse=True))) for e in e_dist_full]).T
    df_e.columns = [f"min entity: {mef}, max predicate: {mpf}" for mef, mpf in candidates]
    fig, axes = plt.subplots(2, 2, constrained_layout=True)
    fig.suptitle('Entity Distribution over Different Configurations')
    for (x, y), mpf in zip([(0, 0), (0, 1), (1, 0), (1, 1)], parameters_max_p_freq):
        _df = df_e[[f"min entity: {mef}, max predicate: {mpf}" for mef in parameters_min_e_freq]]
        _df.columns = [f"min entity: {mef}" for mef in parameters_min_e_freq]
        ax = sns.lineplot(ax=axes[x, y], data=_df, linewidth=1)
        ax.set(xscale='log')
        if mpf != 100:
            ax.legend_.remove()
        axes[x, y].set_title(f'max predicate: {mpf}')
    fig.supxlabel('unique entities sorted by frequency')
    fig.supylabel('number of triples')
    fig.savefig("data/stats.entity_distribution.png", bbox_inches='tight')
    fig.clf()