# coding=utf-8 # Copyright 2020 The HuggingFace Datasets Authors and the Semeru Lab and SEART research group. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TODO: Add a description here.""" import csv import glob import os import datasets import numpy as np # TODO: Add BibTeX citation # Find for instance the citation on arxiv or on the dataset repo/website _CITATION = """\ @InProceedings{huggingface:dataset, title = {A great new dataset}, author={huggingface, Inc. }, year={2020} } """ # TODO: Add description of the dataset here # You can copy an official description _DESCRIPTION = """\ This new dataset is designed to solve this great NLP task and is crafted with a lot of care. """ # TODO: Add a link to an official homepage for the dataset here _HOMEPAGE = "" # TODO: Add the licence for the dataset here if you can find it _LICENSE = "" # TODO: Add link to the official dataset URLs here # The HuggingFace dataset library don't host the datasets but only point to the original files # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method) _DATA_URLs = { "long": { "train": "https://huggingface.co/datasets/semeru/completeformer_java_data/resolve/main/long/training_long.csv", "valid": "https://huggingface.co/datasets/semeru/completeformer_java_data/resolve/main/long/validation_long.csv", "test": "https://huggingface.co/datasets/semeru/completeformer_java_data/resolve/main/long/test_long.csv", }, "medium": { "train": "https://huggingface.co/datasets/semeru/completeformer_java_data/resolve/main/medium/training_medium.csv", "valid": "https://huggingface.co/datasets/semeru/completeformer_java_data/resolve/main/medium/validation_medium.csv", "test": "https://huggingface.co/datasets/semeru/completeformer_java_data/resolve/main/medium/test_medium.csv", }, "short": { "train": "https://huggingface.co/datasets/semeru/completeformer_java_data/resolve/main/short/training_short.csv", "valid": "https://huggingface.co/datasets/semeru/completeformer_java_data/resolve/main/short/validation_short.csv", "test": "https://huggingface.co/datasets/semeru/completeformer_java_data/resolve/main/short/test_short.csv", }, "mix": { "train": "https://huggingface.co/datasets/semeru/completeformer_java_data/resolve/main/mix/training_mix.csv", "valid": "https://huggingface.co/datasets/semeru/completeformer_java_data/resolve/main/mix/validation_mix.csv", "test": "https://huggingface.co/datasets/semeru/completeformer_java_data/resolve/main/mix/test_mix.csv", }, } # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case class CSNCHumanJudgementDataset(datasets.GeneratorBasedBuilder): """TODO: Short description of my dataset.""" VERSION = datasets.Version("1.1.0") BUILDER_CONFIGS = [ datasets.BuilderConfig( name="long", version=VERSION, description="", ), datasets.BuilderConfig( name="medium", version=VERSION, description="", ), datasets.BuilderConfig( name="short", version=VERSION, description="", ), datasets.BuilderConfig( name="mix", version=VERSION, description="", ), ] DEFAULT_CONFIG_NAME = "long" def _info(self): features = datasets.Features( { "idx": datasets.Value("int32"), "input": datasets.Value("string"), "target": datasets.Value("string"), } ) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, supervised_keys=None, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" my_urls = _DATA_URLs[self.config.name] data_dirs = {} for k, v in my_urls.items(): data_dirs[k] = dl_manager.download_and_extract(v) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, # These kwargs will be passed to _generate_examples gen_kwargs={ "file_path": data_dirs["train"], }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, # These kwargs will be passed to _generate_examples gen_kwargs={ "file_path": data_dirs["valid"], }, ), datasets.SplitGenerator( name=datasets.Split.TEST, # These kwargs will be passed to _generate_examples gen_kwargs={ "file_path": data_dirs["test"], }, ), ] def _generate_examples( self, file_path, ): """Yields examples as (key, example) tuples.""" # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset. # The `key` is here for legacy reason (tfds) and is not important in itself. with open(file_path, encoding="utf-8") as f: csv_reader = csv.reader(f, quotechar='"', delimiter=",", quoting=csv.QUOTE_ALL, skipinitialspace=True) next(csv_reader, None) # skip header for row_id, row in enumerate(csv_reader): _, idx, input, target = row yield row_id, { "idx": idx, "input": input, "target": target, }