patriziobellan commited on
Commit
28456d8
1 Parent(s): d0040a3

Upload PET.py

Browse files
Files changed (1) hide show
  1. PET.py +130 -84
PET.py CHANGED
@@ -1,4 +1,13 @@
1
- # TO CREATE dataset_infos.json use: datasets-cli test PET --save_infos --all_configs
 
 
 
 
 
 
 
 
 
2
  #
3
  # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
4
  #
@@ -55,7 +64,35 @@ _URL = "https://pdi.fbk.eu/pet/PETHuggingFace/"
55
  # _TRAINING_FILE = "train.json"
56
  # _DEV_FILE = "dev.json"
57
  _TEST_FILE = "test.json"
58
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
 
60
 
61
  class PETConfig(datasets.BuilderConfig):
@@ -70,105 +107,114 @@ class PETConfig(datasets.BuilderConfig):
70
 
71
  class PET(datasets.GeneratorBasedBuilder):
72
  """PET DATASET."""
73
- BUILDER_CONFIGS = [
74
- PETConfig(
75
- name="PET", version=datasets.Version("1.0.0"), description="The PET Dataset"
76
- ),
77
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
 
79
  def _info(self):
80
- features = datasets.Features(
 
 
 
 
81
  {
82
- "document name": datasets.Value("string"),
83
- "sentence-ID": datasets.Value("int8"),
84
- "tokens": datasets.Sequence(datasets.Value("string")),
85
- "ner-tags": datasets.Sequence(
86
- datasets.features.ClassLabel(
87
- names=[
88
- "O",
89
- "B-Actor",
90
- "I-Actor",
91
- "B-Activity",
92
- "I-Activity",
93
- "B-Activity Data",
94
- "I-Activity Data",
95
- "B-Further Specification",
96
- "I-Further Specification",
97
- "B-XOR Gateway",
98
- "I-XOR Gateway",
99
- "B-Condition Specification",
100
- "I-Condition Specification",
101
- "B-AND Gateway",
102
- "I-AND Gateway",
103
- ]
104
- )
105
- ),
106
  }
107
  )
108
-
109
  return datasets.DatasetInfo(
110
- # This is the description that will appear on the datasets page.
111
  description=_DESCRIPTION,
112
- # This defines the different columns of the dataset and their types
113
- features=features, # Here we define them above because they are different between the two configurations
114
- # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
115
- # specify them. They'll be used if as_supervised=True in builder.as_dataset.
116
- # supervised_keys=("sentence", "label"),
117
- # Homepage of the dataset for documentation
118
  homepage=_HOMEPAGE,
119
- # License for the dataset if available
120
  license=_LICENSE,
121
- # Citation for the dataset
122
  citation=_CITATION,
123
  )
124
 
125
  def _split_generators(self, dl_manager):
126
- urls_to_download = {
127
- # "train": f"{_URL}{_TRAINING_FILE}",
128
- # "dev": f"{_URL}{_DEV_FILE}",
129
- "test": f"{_URL}{_TEST_FILE}",
130
- }
131
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
132
-
133
- return [
134
- # datasets.SplitGenerator(
135
- # name=datasets.Split.TRAIN,
136
- # # These kwargs will be passed to _generate_examples
137
- # gen_kwargs={
138
- # "filepath": downloaded_files["train"],
139
- # "split": "train",
140
- # },
141
- # ),
142
- datasets.SplitGenerator(
 
 
 
 
 
 
143
  name=datasets.Split.TEST,
144
  # These kwargs will be passed to _generate_examples
145
  gen_kwargs={
146
  "filepath": downloaded_files["test"],
147
  "split": "test"
148
  },
149
- ),
150
- #
151
- # datasets.SplitGenerator(
152
- # name=datasets.Split.VALIDATION,
153
- # # These kwargs will be passed to _generate_examples
154
- # gen_kwargs={
155
- # "filepath": os.path.join(data_dir, "dev.jsonl"),
156
- # "split": "dev",
157
- # },
158
- # ),
159
- ]
160
-
161
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
162
  def _generate_examples(self, filepath, split):
163
- # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
164
- # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
165
-
166
- with open(filepath, encoding="utf-8", mode='r') as f:
167
- for key, row in enumerate(f):
168
- row = json.loads(row)
169
- yield key, {
170
- "document name": row["document name"],
171
- "sentence-ID": row["sentence-ID"],
172
- "tokens": row["tokens"],
173
- "ner-tags": row["ner-tags"]
174
- }
 
 
 
 
 
 
 
 
 
 
1
+ # https://huggingface.co/docs/datasets/v1.2.1/add_dataset.html
2
+ # TO login
3
+ # TO CREATE dataset_infos.json use:
4
+ # $ datasets-cli test PET --save_infos --all_configs
5
+ #
6
+ # DO
7
+ # $ huggingface-cli login
8
+ # then.
9
+ # in pytohn:
10
+ # dataset.push_to_hub(patriziobellan/PET) to set the preview on the web interface
11
  #
12
  # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
13
  #
 
64
  # _TRAINING_FILE = "train.json"
65
  # _DEV_FILE = "dev.json"
66
  _TEST_FILE = "test.json"
67
+ _TEST_FILE_RELATIONS = 'PETrelations.json'
68
+
69
+ _NER = 'token-classification'
70
+ _RELATIONS_EXTRACTION = 'relations-extraction'
71
+ _NER_TAGS = [ "O",
72
+ "B-Actor",
73
+ "I-Actor",
74
+ "B-Activity",
75
+ "I-Activity",
76
+ "B-Activity Data",
77
+ "I-Activity Data",
78
+ "B-Further Specification",
79
+ "I-Further Specification",
80
+ "B-XOR Gateway",
81
+ "I-XOR Gateway",
82
+ "B-Condition Specification",
83
+ "I-Condition Specification",
84
+ "B-AND Gateway",
85
+ "I-AND Gateway"]
86
+
87
+ _STR_PET = """\n
88
+ _______ _ _ _______ _____ _______ _______ ______ _______ _______ _______ _______ _______ _______
89
+ | |_____| |______ |_____] |______ | | \ |_____| | |_____| |______ |______ |
90
+ | | | |______ | |______ | |_____/ | | | | | ______| |______ |
91
+
92
+ \n\n\n
93
+
94
+ Discover more at: [https://pdi.fbk.eu/pet-dataset/]
95
+ """
96
 
97
 
98
  class PETConfig(datasets.BuilderConfig):
 
107
 
108
  class PET(datasets.GeneratorBasedBuilder):
109
  """PET DATASET."""
110
+
111
+ features_ner = {
112
+ "document name": datasets.Value("string"),
113
+ "sentence-ID": datasets.Value("int8"),
114
+ "tokens": datasets.Sequence(datasets.Value("string")),
115
+ "ner-tags": datasets.Sequence(datasets.features.ClassLabel(names=_NER_TAGS)),
116
+ }
117
+
118
+ features_relations = datasets.Sequence(
119
+ datasets.Features(
120
+
121
+ {
122
+ 'source-head-sentence-ID': datasets.Value("int8"),
123
+ 'source-head-word-ID': datasets.Value("int8"),
124
+ 'relation-type': datasets.Value("string"),
125
+ 'target-head-sentence-ID': datasets.Value("int8"),
126
+ 'target-head-word-ID' : datasets.Value("int8"),
127
+ }
128
+ ))
129
+ BUILDER_CONFIGS = [ PETConfig(
130
+ name=_NER,
131
+ version=datasets.Version("1.0.1"),
132
+ description="The PET Dataset for Token Classification"
133
+ ),
134
+ PETConfig(
135
+ name=_RELATIONS_EXTRACTION,
136
+ version=datasets.Version("1.0.1"),
137
+ description="The PET Dataset for Relation Extraction"
138
+ ),
139
+ ]
140
+
141
+ DEFAULT_CONFIG_NAME = _RELATIONS_EXTRACTION
142
 
143
  def _info(self):
144
+ print(_STR_PET)
145
+ if self.config.name == _NER:
146
+ features = datasets.Features(self.features_ner)
147
+ else:
148
+ features = datasets.Features(
149
  {
150
+ "document name": datasets.Value("string"),
151
+ 'tokens':datasets.Sequence(datasets.Value("string")),
152
+ 'tokens-IDs':datasets.Sequence(datasets.Value("int8")),
153
+ 'ner_tags': datasets.Sequence(datasets.Value("string")),
154
+ 'sentence-IDs':datasets.Sequence(datasets.Value("int8")),
155
+ "relations": self.features_relations
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156
  }
157
  )
158
+ # print(features)
159
  return datasets.DatasetInfo(
 
160
  description=_DESCRIPTION,
161
+ features=datasets.Features(features),
 
 
 
 
 
162
  homepage=_HOMEPAGE,
 
163
  license=_LICENSE,
 
164
  citation=_CITATION,
165
  )
166
 
167
  def _split_generators(self, dl_manager):
168
+ if self.config.name == _NER:
169
+ urls_to_download = {
170
+ # "train": f"{_URL}{_TRAINING_FILE}",
171
+ # "dev": f"{_URL}{_DEV_FILE}",
172
+ "test": f"{_URL}{_TEST_FILE}",
173
+ }
174
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
175
+ return [datasets.SplitGenerator(
176
+ name=datasets.Split.TEST,
177
+ # These kwargs will be passed to _generate_examples
178
+ gen_kwargs={
179
+ "filepath": downloaded_files["test"],
180
+ "split": "test"
181
+ },
182
+ )]
183
+ else:
184
+ urls_to_download = {
185
+ # "train": f"{_URL}{_TRAINING_FILE}",
186
+ # "dev": f"{_URL}{_DEV_FILE}",
187
+ "test": f"{_URL}{_TEST_FILE_RELATIONS}",
188
+ }
189
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
190
+ return [datasets.SplitGenerator(
191
  name=datasets.Split.TEST,
192
  # These kwargs will be passed to _generate_examples
193
  gen_kwargs={
194
  "filepath": downloaded_files["test"],
195
  "split": "test"
196
  },
197
+ )]
198
+
 
 
 
 
 
 
 
 
 
 
 
199
  def _generate_examples(self, filepath, split):
200
+ if self.config.name == _NER:
201
+ with open(filepath, encoding="utf-8", mode='r') as f:
202
+ for key, row in enumerate(f):
203
+ row = json.loads(row)
204
+ yield key, {
205
+ "document name": row["document name"],
206
+ "sentence-ID": row["sentence-ID"],
207
+ "tokens": row["tokens"],
208
+ "ner-tags": row["ner-tags"]
209
+ }
210
+ else:
211
+ with open(filepath, encoding="utf-8", mode='r') as f:
212
+ for key, row in enumerate(json.load(f)):
213
+ yield key, {"document name": row["document name"], # datasets.Value("string"),
214
+ 'tokens': row["tokens"], # sentences['tokens'],
215
+ 'tokens-IDs': row["tokens-IDs"],
216
+ 'ner_tags': row["ner_tags"],
217
+ 'sentence-IDs': row["sentence-IDs"], # sentences['sentence-IDs'],
218
+
219
+ "relations": row["relations"]
220
+ }