Add gqa v0.0.1
Browse files- README.md +46 -0
- gqa.py +68 -0
- test.jsonl +3 -0
- train.jsonl +3 -0
- validation.jsonl +3 -0
README.md
CHANGED
|
@@ -1,3 +1,49 @@
|
|
| 1 |
---
|
| 2 |
license: mit
|
| 3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
license: mit
|
| 3 |
---
|
| 4 |
+
|
| 5 |
+
# GQA: Graph Question Answering
|
| 6 |
+
|
| 7 |
+
This dataset is asks models to make use of embedded graph for question answering.
|
| 8 |
+
|
| 9 |
+
An exmaple of the dataset is as follows:
|
| 10 |
+
|
| 11 |
+
```json
|
| 12 |
+
{
|
| 13 |
+
"id": "mcwq-176119",
|
| 14 |
+
"question": "What was executive produced by Scott Spiegel , Boaz Yakin , and Quentin Tarantino , executive produced by My Best Friend's Birthday 's editor and star , and edited by George Folsey",
|
| 15 |
+
"answers": [
|
| 16 |
+
"Hostel: Part II"
|
| 17 |
+
],
|
| 18 |
+
"subgraph": {
|
| 19 |
+
"entities": [
|
| 20 |
+
"Q1401104",
|
| 21 |
+
"Q887636",
|
| 22 |
+
"Q1048645",
|
| 23 |
+
"Q3772",
|
| 24 |
+
"Q965826"
|
| 25 |
+
],
|
| 26 |
+
"relations": [
|
| 27 |
+
"P1431",
|
| 28 |
+
"P1040"
|
| 29 |
+
],
|
| 30 |
+
"adjacency": [[2, 1, 0],
|
| 31 |
+
[2, 0, 3],
|
| 32 |
+
[2, 0, 1],
|
| 33 |
+
[2, 0, 4]
|
| 34 |
+
],
|
| 35 |
+
"entity_labels": [
|
| 36 |
+
"george folsey, jr.",
|
| 37 |
+
"boaz yakin",
|
| 38 |
+
"hostel: part ii",
|
| 39 |
+
"quentin jerome tarantino",
|
| 40 |
+
"scott spiegel"
|
| 41 |
+
],
|
| 42 |
+
"relation_labels": [
|
| 43 |
+
"showrunner",
|
| 44 |
+
"film editor"
|
| 45 |
+
]
|
| 46 |
+
},
|
| 47 |
+
"sparql": "SELECT DISTINCT ?x0 WHERE {\n?x0 wdt:P1040 wd:Q1401104 .\n?x0 wdt:P1431 ?x1 .\n?x0 wdt:P1431 wd:Q3772 .\n?x0 wdt:P1431 wd:Q887636 .\n?x0 wdt:P1431 wd:Q965826 .\nwd:Q1480733 wdt:P161 ?x1 .\nwd:Q1480733 wdt:P1040 ?x1\n}"
|
| 48 |
+
}
|
| 49 |
+
```
|
gqa.py
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
|
| 3 |
+
import datasets
|
| 4 |
+
from datasets import Features, Sequence, Array2D, Value, DatasetDict
|
| 5 |
+
from datasets.info import DatasetInfo
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
_DESCRIPTION = """\
|
| 9 |
+
GQA is a dataset containing 22M questions about visual scenes. \
|
| 10 |
+
The questions are designed to be challenging, \
|
| 11 |
+
i.e., they require compositional reasoning, \
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
_URLS = {
|
| 15 |
+
"train": "train.jsonl",
|
| 16 |
+
"validation": "validation.jsonl",
|
| 17 |
+
"test": "test.jsonl",
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
class GQAConfig(datasets.BuilderConfig):
|
| 21 |
+
"""BuilderConfig for GQA."""
|
| 22 |
+
|
| 23 |
+
def __init__(self, **kwargs):
|
| 24 |
+
"""BuilderConfig for GQA.
|
| 25 |
+
Args:
|
| 26 |
+
**kwargs: keyword arguments forwarded to super.
|
| 27 |
+
"""
|
| 28 |
+
super(GQAConfig, self).__init__(**kwargs)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class GQA(datasets.GeneratorBasedBuilder):
|
| 32 |
+
"""GQA: A graph question answering dataset."""
|
| 33 |
+
def _info(self) -> DatasetInfo:
|
| 34 |
+
return DatasetInfo(
|
| 35 |
+
description=_DESCRIPTION,
|
| 36 |
+
features=Features(
|
| 37 |
+
{
|
| 38 |
+
"id": Value("string"),
|
| 39 |
+
"question": Value("string"),
|
| 40 |
+
"answers": Sequence(Value("string")),
|
| 41 |
+
"sparql": Value("string"),
|
| 42 |
+
"subgraph":
|
| 43 |
+
{
|
| 44 |
+
"entities": Sequence(Value("string")),
|
| 45 |
+
"relations": Sequence(Value("string")),
|
| 46 |
+
"adjacency": Array2D(shape=(None, 3), dtype='int64'),
|
| 47 |
+
"entity_labels": Sequence(datasets.Value("string")),
|
| 48 |
+
"relation_labels": Sequence(Value("string")),
|
| 49 |
+
}
|
| 50 |
+
}
|
| 51 |
+
)
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
def _split_generators(self, dl_manager: datasets.DownloadManager):
|
| 55 |
+
downloaded_files = dl_manager.download_and_extract(_URLS)
|
| 56 |
+
|
| 57 |
+
return [
|
| 58 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
|
| 59 |
+
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["validation"]}),
|
| 60 |
+
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
|
| 61 |
+
]
|
| 62 |
+
|
| 63 |
+
def _generate_examples(self, filepath):
|
| 64 |
+
with open(filepath, encoding="utf-8") as f:
|
| 65 |
+
for row in f:
|
| 66 |
+
sample = json.loads(row)
|
| 67 |
+
id_ = sample["id"]
|
| 68 |
+
yield id_, sample
|
test.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5b83949e5a98ac9798df40563d781ed98de322be0c4d8fe8f6c79dd2f6f6add7
|
| 3 |
+
size 2739517
|
train.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e0b67c00a4c9fd210e05966d4ebe3a56e5fcb1f1698a8fabed9ff750d1cb2d37
|
| 3 |
+
size 32298864
|
validation.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:95944d204912461d032e63a890266dc95c0ba33ad0564f2fa1d7cd3fbf23545f
|
| 3 |
+
size 2010013
|