Datasets:
ArXiv:
License:
| #!/usr/bin/python3 | |
| # -*- coding: utf-8 -*- | |
| import argparse | |
| from collections import defaultdict | |
| import json | |
| import os | |
| from pathlib import Path | |
| import shutil | |
| import sys | |
| import tarfile | |
| import tempfile | |
| pwd = os.path.abspath(os.path.dirname(__file__)) | |
| sys.path.append(os.path.join(pwd, "../../")) | |
| from datasets import load_dataset, DownloadMode | |
| from tqdm import tqdm | |
| from language_identification import LANGUAGE_MAP | |
| from project_settings import project_path | |
| def get_args(): | |
| parser = argparse.ArgumentParser() | |
| parser.add_argument( | |
| "--dataset_dir", | |
| default=r"E:\programmer\nlp_datasets\bucc2018", | |
| type=str | |
| ) | |
| parser.add_argument( | |
| "--output_file", | |
| default=(project_path / "data/bucc2018.jsonl"), | |
| type=str | |
| ) | |
| args = parser.parse_args() | |
| return args | |
| def main(): | |
| args = get_args() | |
| dataset_dir = Path(args.dataset_dir) | |
| # extract | |
| out_root = Path(tempfile.gettempdir()) / "bucc2018" | |
| if not out_root.exists(): | |
| out_root.mkdir(parents=True, exist_ok=True) | |
| print(out_root.as_posix()) | |
| train_files = [ | |
| "bucc2018-de-en.training-gold.tar.bz2", | |
| "bucc2018-fr-en.training-gold.tar.bz2", | |
| "bucc2018-ru-en.training-gold.tar.bz2", | |
| "bucc2018-zh-en.training-gold.tar.bz2", | |
| "bucc2018-de-en.test.tar.bz2", | |
| "bucc2018-fr-en.test.tar.bz2", | |
| "bucc2018-ru-en.test.tar.bz2", | |
| "bucc2018-zh-en.test.tar.bz2", | |
| ] | |
| for train_file in train_files: | |
| file_path = dataset_dir / train_file | |
| with tarfile.open(file_path, "r:bz2") as tar: | |
| tar.extractall(path=out_root.as_posix()) | |
| # read | |
| root_path = out_root / "bucc2018" | |
| name_list = [ | |
| "de-en", "fr-en", "ru-en", "zh-en" | |
| ] | |
| split_map = { | |
| "training": "train", | |
| } | |
| text_set = set() | |
| counter = defaultdict(int) | |
| with open(args.output_file, "w", encoding="utf-8") as fout: | |
| for name in name_list: | |
| name_path = root_path / name | |
| for split_ in ["training", "test"]: | |
| for language in name.split("-"): | |
| train_file = name_path / "{}.{}.{}".format(name, split_, language) | |
| with open(train_file, "r", encoding="utf-8") as fin: | |
| for row in fin: | |
| row = str(row).strip() | |
| splits = row.split("\t") | |
| if len(splits) != 2: | |
| print("skip row: {}".format(row)) | |
| continue | |
| text = splits[1] | |
| text = text.strip() | |
| text = text.replace(" ", " ") | |
| text = text.replace("", "-") | |
| if text in text_set: | |
| continue | |
| text_set.add(text) | |
| if language not in LANGUAGE_MAP.keys(): | |
| raise AssertionError(language) | |
| if split_ in split_map.keys(): | |
| split = split_map[split_] | |
| else: | |
| split = split_ | |
| row = { | |
| "text": text, | |
| "language": language, | |
| "data_source": "bucc2018", | |
| "split": split | |
| } | |
| row = json.dumps(row, ensure_ascii=False) | |
| fout.write("{}\n".format(row)) | |
| counter[split] += 1 | |
| print("counter: {}".format(counter)) | |
| shutil.rmtree(out_root.as_posix()) | |
| return | |
| if __name__ == '__main__': | |
| main() | |