albertvillanova HF staff commited on
Commit
d480ae0
1 Parent(s): f9bd130

Convert dataset to Parquet (#2)

Browse files

- Convert dataset to Parquet (b8e9575c4c6d496a2cfa0116deea9d9df5937259)
- Add python data files (53a1654daa0cce03a2b7e52360c30633084447f8)
- Delete loading script (b4f5252df4ed6a1be8149aa57383cec15391e3f4)
- Delete loading script auxiliary file (ad313167d66ed51838695e9b552f2131e2507390)
- Delete loading script auxiliary file (fc6923dd0a4ff0c9866fd079dc7b4156981ed6bc)

README.md CHANGED
@@ -20,6 +20,13 @@ task_categories:
20
  task_ids:
21
  - slot-filling
22
  pretty_name: CodeXGlueCcCodeCompletionLine
 
 
 
 
 
 
 
23
  dataset_info:
24
  - config_name: java
25
  features:
@@ -31,10 +38,10 @@ dataset_info:
31
  dtype: string
32
  splits:
33
  - name: train
34
- num_bytes: 5454783
35
  num_examples: 3000
36
- download_size: 5523586
37
- dataset_size: 5454783
38
  - config_name: python
39
  features:
40
  - name: id
@@ -45,17 +52,19 @@ dataset_info:
45
  dtype: string
46
  splits:
47
  - name: train
48
- num_bytes: 24021562
49
  num_examples: 10000
50
- download_size: 24266715
51
- dataset_size: 24021562
52
- config_names:
53
- - go
54
- - java
55
- - javascript
56
- - php
57
- - python
58
- - ruby
 
 
59
  ---
60
  # Dataset Card for "code_x_glue_cc_code_completion_line"
61
 
 
20
  task_ids:
21
  - slot-filling
22
  pretty_name: CodeXGlueCcCodeCompletionLine
23
+ config_names:
24
+ - go
25
+ - java
26
+ - javascript
27
+ - php
28
+ - python
29
+ - ruby
30
  dataset_info:
31
  - config_name: java
32
  features:
 
38
  dtype: string
39
  splits:
40
  - name: train
41
+ num_bytes: 5454775
42
  num_examples: 3000
43
+ download_size: 1696679
44
+ dataset_size: 5454775
45
  - config_name: python
46
  features:
47
  - name: id
 
52
  dtype: string
53
  splits:
54
  - name: train
55
+ num_bytes: 24021554
56
  num_examples: 10000
57
+ download_size: 8140670
58
+ dataset_size: 24021554
59
+ configs:
60
+ - config_name: java
61
+ data_files:
62
+ - split: train
63
+ path: java/train-*
64
+ - config_name: python
65
+ data_files:
66
+ - split: train
67
+ path: python/train-*
68
  ---
69
  # Dataset Card for "code_x_glue_cc_code_completion_line"
70
 
code_x_glue_cc_code_completion_line.py DELETED
@@ -1,80 +0,0 @@
1
- import json
2
- from typing import List
3
-
4
- import datasets
5
-
6
- from .common import Child
7
- from .generated_definitions import DEFINITIONS
8
-
9
-
10
- _DESCRIPTION = """Complete the unfinished line given previous context. Models are evaluated by exact match and edit similarity.
11
- We propose line completion task to test model's ability to autocomplete a line. Majority code completion systems behave well in token level completion, but fail in completing an unfinished line like a method call with specific parameters, a function signature, a loop condition, a variable definition and so on. When a software develop finish one or more tokens of the current line, the line level completion model is expected to generate the entire line of syntactically correct code.
12
- Line level code completion task shares the train/dev dataset with token level completion. After training a model on CodeCompletion-token, you could directly use it to test on line-level completion."""
13
-
14
- _CITATION = """@article{raychev2016probabilistic,
15
- title={Probabilistic Model for Code with Decision Trees},
16
- author={Raychev, Veselin and Bielik, Pavol and Vechev, Martin},
17
- journal={ACM SIGPLAN Notices},
18
- pages={731--747},
19
- year={2016},
20
- publisher={ACM New York, NY, USA}
21
- }
22
- @inproceedings{allamanis2013mining,
23
- title={Mining Source Code Repositories at Massive Scale using Language Modeling},
24
- author={Allamanis, Miltiadis and Sutton, Charles},
25
- booktitle={2013 10th Working Conference on Mining Software Repositories (MSR)},
26
- pages={207--216},
27
- year={2013},
28
- organization={IEEE}
29
- }"""
30
-
31
-
32
- class CodeXGlueCcCodeCompletionLineImpl(Child):
33
- _DESCRIPTION = _DESCRIPTION
34
- _CITATION = _CITATION
35
-
36
- _FEATURES = {
37
- "id": datasets.Value("int32"), # Index of the sample
38
- "input": datasets.Value("string"), # Input code string
39
- "gt": datasets.Value("string"), # Code string to be predicted
40
- }
41
-
42
- _SUPERVISED_KEYS = ["gt"]
43
-
44
- def generate_urls(self, split_name):
45
- yield "data", "test.json"
46
-
47
- def _generate_examples(self, split_name, file_paths):
48
- with open(file_paths["data"], encoding="utf-8") as f:
49
- for idx, line in enumerate(f):
50
- entry = json.loads(line)
51
- entry["id"] = idx
52
- yield idx, entry
53
-
54
-
55
- CLASS_MAPPING = {
56
- "CodeXGlueCcCodeCompletionLine": CodeXGlueCcCodeCompletionLineImpl,
57
- }
58
-
59
-
60
- class CodeXGlueCcCodeCompletionLine(datasets.GeneratorBasedBuilder):
61
- BUILDER_CONFIG_CLASS = datasets.BuilderConfig
62
- BUILDER_CONFIGS = [
63
- datasets.BuilderConfig(name=name, description=info["description"]) for name, info in DEFINITIONS.items()
64
- ]
65
-
66
- def _info(self):
67
- name = self.config.name
68
- info = DEFINITIONS[name]
69
- if info["class_name"] in CLASS_MAPPING:
70
- self.child = CLASS_MAPPING[info["class_name"]](info)
71
- else:
72
- raise RuntimeError(f"Unknown python class for dataset configuration {name}")
73
- ret = self.child._info()
74
- return ret
75
-
76
- def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
77
- return self.child._split_generators(dl_manager=dl_manager)
78
-
79
- def _generate_examples(self, split_name, file_paths):
80
- return self.child._generate_examples(split_name, file_paths)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
common.py DELETED
@@ -1,75 +0,0 @@
1
- from typing import List
2
-
3
- import datasets
4
-
5
-
6
- # Citation, taken from https://github.com/microsoft/CodeXGLUE
7
- _DEFAULT_CITATION = """@article{CodeXGLUE,
8
- title={CodeXGLUE: A Benchmark Dataset and Open Challenge for Code Intelligence},
9
- year={2020},}"""
10
-
11
-
12
- class Child:
13
- _DESCRIPTION = None
14
- _FEATURES = None
15
- _CITATION = None
16
- SPLITS = {"train": datasets.Split.TRAIN}
17
- _SUPERVISED_KEYS = None
18
-
19
- def __init__(self, info):
20
- self.info = info
21
-
22
- def homepage(self):
23
- return self.info["project_url"]
24
-
25
- def _info(self):
26
- # This is the description that will appear on the datasets page.
27
- return datasets.DatasetInfo(
28
- description=self.info["description"] + "\n\n" + self._DESCRIPTION,
29
- features=datasets.Features(self._FEATURES),
30
- homepage=self.homepage(),
31
- citation=self._CITATION or _DEFAULT_CITATION,
32
- supervised_keys=self._SUPERVISED_KEYS,
33
- )
34
-
35
- def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
36
- SPLITS = self.SPLITS
37
- _URL = self.info["raw_url"]
38
- urls_to_download = {}
39
- for split in SPLITS:
40
- if split not in urls_to_download:
41
- urls_to_download[split] = {}
42
-
43
- for key, url in self.generate_urls(split):
44
- if not url.startswith("http"):
45
- url = _URL + "/" + url
46
- urls_to_download[split][key] = url
47
-
48
- downloaded_files = {}
49
- for k, v in urls_to_download.items():
50
- downloaded_files[k] = dl_manager.download_and_extract(v)
51
-
52
- return [
53
- datasets.SplitGenerator(
54
- name=SPLITS[k],
55
- gen_kwargs={"split_name": k, "file_paths": downloaded_files[k]},
56
- )
57
- for k in SPLITS
58
- ]
59
-
60
- def check_empty(self, entries):
61
- all_empty = all([v == "" for v in entries.values()])
62
- all_non_empty = all([v != "" for v in entries.values()])
63
-
64
- if not all_non_empty and not all_empty:
65
- raise RuntimeError("Parallel data files should have the same number of lines.")
66
-
67
- return all_empty
68
-
69
-
70
- class TrainValidTestChild(Child):
71
- SPLITS = {
72
- "train": datasets.Split.TRAIN,
73
- "valid": datasets.Split.VALIDATION,
74
- "test": datasets.Split.TEST,
75
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
generated_definitions.py DELETED
@@ -1,24 +0,0 @@
1
- DEFINITIONS = {
2
- "java": {
3
- "class_name": "CodeXGlueCcCodeCompletionLine",
4
- "dataset_type": "Code-Code",
5
- "description": "CodeXGLUE CodeCompletion-line dataset, available at https://github.com/microsoft/CodeXGLUE/tree/main/Code-Code/CodeCompletion-line",
6
- "dir_name": "CodeCompletion-line",
7
- "name": "java",
8
- "parameters": {"language": "java", "original_language_name": "javaCorpus"},
9
- "project_url": "https://github.com/madlag/CodeXGLUE/tree/main/Code-Code/CodeCompletion-line",
10
- "raw_url": "https://raw.githubusercontent.com/madlag/CodeXGLUE/main/Code-Code/CodeCompletion-line/dataset/javaCorpus/line_completion",
11
- "sizes": {"train": 3000},
12
- },
13
- "python": {
14
- "class_name": "CodeXGlueCcCodeCompletionLine",
15
- "dataset_type": "Code-Code",
16
- "description": "CodeXGLUE CodeCompletion-line dataset, available at https://github.com/microsoft/CodeXGLUE/tree/main/Code-Code/CodeCompletion-line",
17
- "dir_name": "CodeCompletion-line",
18
- "name": "python",
19
- "parameters": {"language": "python", "original_language_name": "py150"},
20
- "project_url": "https://github.com/madlag/CodeXGLUE/tree/main/Code-Code/CodeCompletion-line",
21
- "raw_url": "https://raw.githubusercontent.com/madlag/CodeXGLUE/main/Code-Code/CodeCompletion-line/dataset/py150/line_completion",
22
- "sizes": {"train": 10000},
23
- },
24
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
java/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4e9b6099aaafbdfc7c6fab65edafcf2e692cb29db15abc49db50635ef32e63d
3
+ size 1696679
python/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e9da7aa684b7ceac8f1736b329329a12d53dcbfc4813f5e9f1244889ea1e085
3
+ size 8140670