query-id
stringlengths 1
3
| corpus-id
stringlengths 2
4
| score
int64 1
1
|
|---|---|---|
1
|
c1
| 1
|
2
|
c2
| 1
|
3
|
c3
| 1
|
4
|
c4
| 1
|
5
|
c5
| 1
|
6
|
c6
| 1
|
7
|
c7
| 1
|
8
|
c8
| 1
|
9
|
c9
| 1
|
10
|
c10
| 1
|
11
|
c11
| 1
|
12
|
c12
| 1
|
13
|
c13
| 1
|
14
|
c14
| 1
|
15
|
c15
| 1
|
16
|
c16
| 1
|
17
|
c17
| 1
|
18
|
c18
| 1
|
19
|
c19
| 1
|
20
|
c20
| 1
|
21
|
c21
| 1
|
22
|
c22
| 1
|
23
|
c23
| 1
|
24
|
c24
| 1
|
25
|
c25
| 1
|
26
|
c26
| 1
|
27
|
c27
| 1
|
28
|
c28
| 1
|
29
|
c29
| 1
|
30
|
c30
| 1
|
31
|
c31
| 1
|
32
|
c32
| 1
|
33
|
c33
| 1
|
34
|
c34
| 1
|
35
|
c35
| 1
|
36
|
c36
| 1
|
37
|
c37
| 1
|
38
|
c38
| 1
|
39
|
c39
| 1
|
40
|
c40
| 1
|
41
|
c41
| 1
|
42
|
c42
| 1
|
43
|
c43
| 1
|
44
|
c44
| 1
|
45
|
c45
| 1
|
46
|
c46
| 1
|
47
|
c47
| 1
|
48
|
c48
| 1
|
49
|
c49
| 1
|
50
|
c50
| 1
|
51
|
c51
| 1
|
52
|
c52
| 1
|
53
|
c53
| 1
|
54
|
c54
| 1
|
55
|
c55
| 1
|
56
|
c56
| 1
|
57
|
c57
| 1
|
58
|
c58
| 1
|
59
|
c59
| 1
|
60
|
c60
| 1
|
61
|
c61
| 1
|
62
|
c62
| 1
|
63
|
c63
| 1
|
64
|
c64
| 1
|
65
|
c65
| 1
|
66
|
c66
| 1
|
67
|
c67
| 1
|
68
|
c68
| 1
|
69
|
c69
| 1
|
70
|
c70
| 1
|
71
|
c71
| 1
|
72
|
c72
| 1
|
73
|
c73
| 1
|
74
|
c74
| 1
|
75
|
c75
| 1
|
76
|
c76
| 1
|
77
|
c77
| 1
|
78
|
c78
| 1
|
79
|
c79
| 1
|
80
|
c80
| 1
|
81
|
c81
| 1
|
82
|
c82
| 1
|
83
|
c83
| 1
|
84
|
c84
| 1
|
85
|
c85
| 1
|
86
|
c86
| 1
|
87
|
c87
| 1
|
88
|
c88
| 1
|
89
|
c89
| 1
|
90
|
c90
| 1
|
91
|
c91
| 1
|
92
|
c92
| 1
|
93
|
c93
| 1
|
94
|
c94
| 1
|
95
|
c95
| 1
|
96
|
c96
| 1
|
97
|
c97
| 1
|
98
|
c98
| 1
|
99
|
c99
| 1
|
100
|
c100
| 1
|
End of preview. Expand
in Data Studio
Employing the MTEB evaluation framework's dataset version, utilize the code below for assessment:
import mteb
import logging
from sentence_transformers import SentenceTransformer
from mteb import MTEB
logger = logging.getLogger(__name__)
model_name = 'intfloat/e5-base-v2'
model = SentenceTransformer(model_name)
tasks = mteb.get_tasks(
tasks=[
"AppsRetrieval",
"CodeFeedbackMT",
"CodeFeedbackST",
"CodeTransOceanContest",
"CodeTransOceanDL",
"CosQA",
"SyntheticText2SQL",
"StackOverflowQA",
"COIRCodeSearchNetRetrieval",
"CodeSearchNetCCRetrieval",
]
)
evaluation = MTEB(tasks=tasks)
results = evaluation.run(
model=model,
overwrite_results=True
)
print(result)
- Downloads last month
- 432