autotrain-data-processor commited on
Commit
c4d42c1
·
1 Parent(s): f584829

Processed data from AutoTrain data processor ([2022-04-30 19:52 ]

Browse files
README.md ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ languages:
3
+ - en
4
+
5
+ ---
6
+ # AutoTrain Dataset for project: company
7
+
8
+ ## Dataset Descritpion
9
+
10
+ This dataset has been automatically processed by AutoTrain for project company.
11
+
12
+ ### Languages
13
+
14
+ The BCP-47 code for the dataset's language is en.
15
+
16
+ ## Dataset Structure
17
+
18
+ ### Data Instances
19
+
20
+ A sample from this dataset looks as follows:
21
+
22
+ ```json
23
+ [
24
+ {
25
+ "tokens": [
26
+ "sahil",
27
+ "prasad",
28
+ "president",
29
+ "www",
30
+ "swimcentre",
31
+ "com",
32
+ "banik",
33
+ "baalkrishan",
34
+ "gandhi",
35
+ "com",
36
+ "no",
37
+ "satish",
38
+ "nagar",
39
+ "hisar"
40
+ ],
41
+ "tags": [
42
+ 0,
43
+ 0,
44
+ 0,
45
+ 0,
46
+ 0,
47
+ 0,
48
+ 0,
49
+ 0,
50
+ 0,
51
+ 0,
52
+ 0,
53
+ 0,
54
+ 0,
55
+ 0
56
+ ]
57
+ },
58
+ {
59
+ "tokens": [
60
+ "olivia",
61
+ "wilson",
62
+ "real",
63
+ "estate",
64
+ "agent",
65
+ "reallygreatsite",
66
+ "com",
67
+ "anywhere",
68
+ "st",
69
+ "any",
70
+ "city",
71
+ "st",
72
+ "www",
73
+ "reallygreatsite",
74
+ "com"
75
+ ],
76
+ "tags": [
77
+ 0,
78
+ 0,
79
+ 0,
80
+ 0,
81
+ 0,
82
+ 0,
83
+ 0,
84
+ 0,
85
+ 0,
86
+ 0,
87
+ 0,
88
+ 0,
89
+ 0,
90
+ 0,
91
+ 0
92
+ ]
93
+ }
94
+ ]
95
+ ```
96
+
97
+ ### Dataset Fields
98
+
99
+ The dataset has the following fields (also called "features"):
100
+
101
+ ```json
102
+ {
103
+ "tokens": "Sequence(feature=Value(dtype='string', id=None), length=-1, id=None)",
104
+ "tags": "Sequence(feature=ClassLabel(num_classes=2, names=['0', '9'], id=None), length=-1, id=None)"
105
+ }
106
+ ```
107
+
108
+ ### Dataset Splits
109
+
110
+ This dataset is split into a train and validation split. The split sizes are as follow:
111
+
112
+ | Split name | Num samples |
113
+ | ------------ | ------------------- |
114
+ | train | 999651 |
115
+ | valid | 499630 |
processed/dataset_dict.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"splits": ["train", "valid"]}
processed/train/dataset.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4516366c9cabbd7ba5086ef842865b8fb1e678bfdeb680de8b3ebda6740441fc
3
+ size 326091104
processed/train/dataset_info.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "builder_name": null,
3
+ "citation": "",
4
+ "config_name": null,
5
+ "dataset_size": null,
6
+ "description": "AutoTrain generated dataset",
7
+ "download_checksums": null,
8
+ "download_size": null,
9
+ "features": {
10
+ "tokens": {
11
+ "feature": {
12
+ "dtype": "string",
13
+ "id": null,
14
+ "_type": "Value"
15
+ },
16
+ "length": -1,
17
+ "id": null,
18
+ "_type": "Sequence"
19
+ },
20
+ "tags": {
21
+ "feature": {
22
+ "num_classes": 2,
23
+ "names": [
24
+ "0",
25
+ "9"
26
+ ],
27
+ "id": null,
28
+ "_type": "ClassLabel"
29
+ },
30
+ "length": -1,
31
+ "id": null,
32
+ "_type": "Sequence"
33
+ }
34
+ },
35
+ "homepage": "",
36
+ "license": "",
37
+ "post_processed": null,
38
+ "post_processing_size": null,
39
+ "size_in_bytes": null,
40
+ "splits": {
41
+ "train": {
42
+ "name": "train",
43
+ "num_bytes": 325760952,
44
+ "num_examples": 999651,
45
+ "dataset_name": null
46
+ }
47
+ },
48
+ "supervised_keys": null,
49
+ "task_templates": null,
50
+ "version": null
51
+ }
processed/train/state.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "dataset.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "342cd16c7fb5db69",
8
+ "_format_columns": [
9
+ "tags",
10
+ "tokens"
11
+ ],
12
+ "_format_kwargs": {},
13
+ "_format_type": null,
14
+ "_indexes": {},
15
+ "_output_all_columns": false,
16
+ "_split": null
17
+ }
processed/valid/dataset.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d765fe2da43fcbdaa57181e1c871e1cc8607f137c23b53b56835a9e376dde79e
3
+ size 176299816
processed/valid/dataset_info.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "builder_name": null,
3
+ "citation": "",
4
+ "config_name": null,
5
+ "dataset_size": null,
6
+ "description": "AutoTrain generated dataset",
7
+ "download_checksums": null,
8
+ "download_size": null,
9
+ "features": {
10
+ "tokens": {
11
+ "feature": {
12
+ "dtype": "string",
13
+ "id": null,
14
+ "_type": "Value"
15
+ },
16
+ "length": -1,
17
+ "id": null,
18
+ "_type": "Sequence"
19
+ },
20
+ "tags": {
21
+ "feature": {
22
+ "num_classes": 2,
23
+ "names": [
24
+ "0",
25
+ "9"
26
+ ],
27
+ "id": null,
28
+ "_type": "ClassLabel"
29
+ },
30
+ "length": -1,
31
+ "id": null,
32
+ "_type": "Sequence"
33
+ }
34
+ },
35
+ "homepage": "",
36
+ "license": "",
37
+ "post_processed": null,
38
+ "post_processing_size": null,
39
+ "size_in_bytes": null,
40
+ "splits": {
41
+ "valid": {
42
+ "name": "valid",
43
+ "num_bytes": 176134456,
44
+ "num_examples": 499630,
45
+ "dataset_name": null
46
+ }
47
+ },
48
+ "supervised_keys": null,
49
+ "task_templates": null,
50
+ "version": null
51
+ }
processed/valid/state.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_data_files": [
3
+ {
4
+ "filename": "dataset.arrow"
5
+ }
6
+ ],
7
+ "_fingerprint": "8c1ec0b7f373ef9b",
8
+ "_format_columns": [
9
+ "tags",
10
+ "tokens"
11
+ ],
12
+ "_format_kwargs": {},
13
+ "_format_type": null,
14
+ "_indexes": {},
15
+ "_output_all_columns": false,
16
+ "_split": null
17
+ }