Datasets:
Tasks:
Image-Text-to-Text
Modalities:
Image
Formats:
imagefolder
Languages:
English
Size:
1K - 10K
ArXiv:
License:
| import os | |
| import pandas as pd | |
| import numpy as np | |
| from ast import literal_eval | |
| from SPARQLWrapper import SPARQLWrapper, JSON | |
| from tqdm import tqdm | |
| from urllib.parse import urlparse | |
| import requests | |
| import re | |
| from ast import literal_eval | |
| from PIL import Image | |
| import math | |
| from tqdm import tqdm | |
| tqdm.pandas() | |
| from dotenv import load_dotenv | |
| load_dotenv() | |
| DATA_DIR = os.environ['DATA_DIR'] | |
| replacements = {"celebs":"the subject of this image", | |
| "brands":"the brand in this image", | |
| "landmarks":"the place in this image", | |
| "paintings":"the painting in this image", | |
| } | |
| def best_obj_type(obj_types): | |
| if type(obj_types) == str: | |
| obj_types = literal_eval(obj_types) | |
| prioritized_obj_types = ["city", "capital city", 'metropolis', 'country', 'occupation', 'language', 'type of sport', 'music genre'] # 'cinematic technique', 'team sport' | |
| for ot in prioritized_obj_types: | |
| if ot in obj_types: | |
| return ot | |
| for ot_ in obj_types: | |
| if "university" in ot_: | |
| return "university" | |
| if "city" in ot_: | |
| return "city" | |
| return obj_types[0] | |
| def replace_for_image(row): | |
| replace_with = replacements[row['type']] | |
| return row["template"].replace("[subj]", replace_with) | |
| class SPARQL: | |
| def __init__(self): | |
| self.agent = "'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'" | |
| self.sparql = SPARQLWrapper("https://query.wikidata.org/sparql", agent=self.agent) | |
| self.sparql.setReturnFormat(JSON) | |
| def parse_value(self, value): | |
| parsed_uri = urlparse(value) | |
| if all([parsed_uri.scheme, parsed_uri.netloc]): | |
| return parsed_uri.path.split('/')[-1] | |
| return value | |
| def execute(self, query): | |
| records = [] | |
| try: | |
| self.sparql.setQuery(query) | |
| responses = self.sparql.query().convert() | |
| for response in responses['results']['bindings']: | |
| record = {} | |
| for key in response: | |
| record[key] = self.parse_value(response[key]['value']) | |
| records.append(record) | |
| if records == 0: | |
| print("request failed") | |
| except Exception as e: | |
| print(e) | |
| return pd.DataFrame(records) | |
| def add_aliases(df): | |
| def _query(uris): | |
| return f''' | |
| SELECT ?s_uri ?alias | |
| WHERE {{ | |
| {{VALUES ?s_uri {{ {" ".join([f"wd:{uri}" for uri in uris])} }} }} | |
| ?s_uri skos:altLabel ?alias. | |
| FILTER(LANG(?alias) = "en") | |
| }} | |
| ''' | |
| sparql = SPARQL() | |
| uris = list(set(df["s_uri"].tolist())) | |
| uri_chunks = [uris[i:i+100] for i in range(0, len(uris), 100)] | |
| aliases = pd.concat([sparql.execute(_query(chunk)) for chunk in tqdm(uri_chunks)]) | |
| aliases = aliases.groupby("s_uri")["alias"].agg(list).reset_index(name="aliases") | |
| res = pd.merge(df, aliases, how='left', on='s_uri') | |
| res['aliases'] = res['aliases'].fillna('[]') | |
| return res | |
| def get_aliases(df): | |
| def _query(uris): | |
| return f''' | |
| SELECT ?uri ?alias | |
| WHERE {{ | |
| {{VALUES ?uri {{ {" ".join([f"wd:{uri}" for uri in uris])} }} }} | |
| ?uri skos:altLabel ?alias. | |
| FILTER(LANG(?alias) = "en") | |
| }} | |
| ''' | |
| sparql = SPARQL() | |
| uris = list(set(df["s_uri"].tolist()))# + df["a_uri"].tolist())) | |
| uri_chunks = [uris[i:i+100] for i in range(0, len(uris), 100)] | |
| aliases = pd.concat([sparql.execute(_query(chunk)) for chunk in tqdm(uri_chunks)]) | |
| aliases = aliases.groupby("uri")["alias"].agg(list).reset_index(name="aliases") | |
| return aliases | |
| def add_images(df): | |
| def _query(uris): | |
| return f''' | |
| SELECT ?s_uri ?image | |
| WHERE {{ | |
| {{VALUES ?s_uri {{ {" ".join([f"wd:{uri}" for uri in uris])} }} }} | |
| ?s_uri wdt:P18 ?image . | |
| }} | |
| ''' | |
| sparql = SPARQL() | |
| uris = list(set(df["s_uri"].tolist())) | |
| uri_chunks = [uris[i:i+100] for i in range(0, len(uris), 100)] | |
| images = pd.concat([sparql.execute(_query(chunk)) for chunk in tqdm(uri_chunks)]) | |
| images['image'] = 'http://commons.wikimedia.org/wiki/Special:FilePath/' + images['image'] | |
| res = pd.merge(df, images, how='inner', on='s_uri') | |
| return res | |
| def get_attribute(df, attribute_name, attribute_id): | |
| def _query(uris): | |
| return f''' | |
| SELECT ?s_uri ?attribute_name | |
| WHERE {{ | |
| {{VALUES ?s_uri {{ {" ".join([f"wd:{uri}" for uri in uris])} }} }} | |
| ?s_uri wdt:{attribute_id} ?{attribute_name} . | |
| }} | |
| ''' | |
| sparql = SPARQL() | |
| uris = list(set(df["s_uri"].tolist())) | |
| uri_chunks = [uris[i:i+100] for i in range(0, len(uris), 100)] | |
| attributes = pd.concat([sparql.execute(_query(chunk)) for chunk in tqdm(uri_chunks)]) | |
| attributes = attributes.groupby("s_uri")[attribute_name].agg(list).reset_index(name=attribute_name) | |
| res = pd.merge(df, attributes, how='inner', on='s_uri') | |
| return res | |
| def extract_year(timestamp): | |
| parts = timestamp.split('-') | |
| neg = False | |
| if parts[0] == '': | |
| year = parts[1] | |
| neg = True | |
| else: | |
| year = parts[0] | |
| if year.isdigit(): | |
| return str(-int(year)) if neg else str(int(year)) | |
| return np.nan | |
| def get_all_properties(df): | |
| def _query(relation_ids): | |
| return f''' | |
| SELECT ?item ?itemLabel ?wd ?wdLabel ?ps_ ?ps_Label WHERE {{ | |
| VALUES ?item {{ | |
| {" ".join([f"wd:{id}" for id in relation_ids])} | |
| }} | |
| ?item ?p ?statement . | |
| ?statement ?ps ?ps_ . | |
| ?wd wikibase:claim ?p . | |
| ?wd wikibase:statementProperty ?ps . | |
| SERVICE wikibase:label {{ bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }} | |
| }} | |
| ''' | |
| sparql = SPARQL() | |
| # df = pd.read_csv(origin) | |
| subjects = df["s_uri"].to_list() | |
| subject_chunks = [subjects[i:i+20] for i in range(0, len(subjects), 20)] | |
| df = pd.concat([sparql.execute(_query(chunk)) for chunk in tqdm(subject_chunks)]) | |
| df = df[~df["wdLabel"].str.contains(r"ID|category|template|username|instance of|gallery|article|handle|url|wiki|copyright|classification|website|described|tag|archive|reddit|profile|image|list|file", case=False, na=False)] | |
| tmp = df[(df['wd'] == 'P569') | (df['wd'] == 'P571')].copy() | |
| tmp['ps_Label'] = tmp['ps_Label'].apply(extract_year) | |
| tmp.dropna(subset=['ps_Label'], inplace=True) | |
| tmp['ps_'] = 'Q000' | |
| df = df[~((df['wd'] == 'P569') | (df['wd'] == 'P571'))] | |
| df = df[~df["ps_Label"].str.contains(r'\d', na=False)] | |
| df = df[df["ps_"].apply(lambda s: bool(re.fullmatch(r"Q\d+", s)))] | |
| df = pd.concat([df, tmp]) | |
| df = df[["item", "itemLabel", "wd", "wdLabel", "ps_", "ps_Label"]] | |
| df = df.rename( | |
| columns = { | |
| "item": "s_uri", | |
| "itemLabel": "subject", | |
| "wd": "r_uri", | |
| "wdLabel": "relation", | |
| "ps_": "a_uri", | |
| "ps_Label": "attribute", | |
| } | |
| ) | |
| return df | |
| def attribute_type(df): | |
| def _query(uris): | |
| return f''' | |
| SELECT ?uri ?typeLabel | |
| WHERE {{ | |
| {{VALUES ?uri {{ {" ".join([f"wd:{uri}" for uri in uris])} }} }} | |
| ?uri wdt:P31 ?type. | |
| SERVICE wikibase:label {{ bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }} | |
| }} | |
| ''' | |
| sparql = SPARQL() | |
| uris = df["a_uri"].drop_duplicates().to_list() | |
| uri_chunks = [uris[i:i+100] for i in range(0, len(uris), 100)] | |
| a_types = pd.concat([sparql.execute(_query(chunk)) for chunk in tqdm(uri_chunks)]) | |
| a_types = a_types.groupby("uri")["typeLabel"].agg(list).reset_index(name="a_type") | |
| a_types['a_type'] = a_types['a_type'].apply(lambda x: x if type(x) == list else []) | |
| a_types = pd.concat([a_types, pd.DataFrame([{'uri': 'Q000', 'a_type': str(['year'])}])]) | |
| return a_types | |
| def get_wikidata_id(name): | |
| url = "https://www.wikidata.org/w/api.php" | |
| params = { | |
| "action": "wbsearchentities", | |
| "format": "json", | |
| "language": "en", | |
| "search": name | |
| } | |
| response = requests.get(url, params=params).json() | |
| if 'search' in response and response['search']: | |
| return response['search'][0]['id'] | |
| return None | |
| def add_wikidata_ids(df, name_col="subject"): | |
| df["wikidata_id"] = df[name_col].apply(get_wikidata_id) | |
| return df | |
| def add_unesco_question(base_df): | |
| def _query(qids): | |
| return f""" | |
| SELECT ?item ?itemLabel ?startTime WHERE {{ | |
| VALUES ?item {{{' '.join(f'wd:{qid}' for qid in qids)}}} | |
| ?item p:P1435 ?heritageStatement. | |
| ?heritageStatement ps:P1435 wd:Q9259. | |
| OPTIONAL {{ | |
| ?heritageStatement pq:P580 ?startTime. | |
| }} | |
| SERVICE wikibase:label {{ bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }} | |
| }} | |
| """ | |
| sparql = SPARQL() | |
| df = base_df[base_df['type'] == 'landmarks'] | |
| subjects = df["s_uri"].to_list() | |
| subject_chunks = [subjects[i:i+20] for i in range(0, len(subjects), 20)] | |
| df = pd.concat([sparql.execute(_query(chunk)) for chunk in tqdm(subject_chunks)]) | |
| df.dropna(subset=['startTime'], inplace=True) | |
| df['startTime'] = df['startTime'].apply(extract_year) | |
| df = df.rename( | |
| columns = { | |
| "item": "s_uri", | |
| "startTime": "attribute", | |
| "itemLabel": "subject", | |
| } | |
| ) | |
| df['possible_answers'] = df['attribute'].apply(lambda x: str([x])) | |
| df['r_uri'] = 'P580' | |
| df['relation'] = 'start time' | |
| df['a_uri'] = 'P580' | |
| df['a_type'] = str(['year']) | |
| return df | |
| def aggregate_triplets(base, aliases, relations, attributes, add_unesco=False): | |
| subjects = base[['s_uri']] | |
| relations = relations.merge(subjects, on="s_uri") | |
| aliases = pd.read_csv("data/all_aliases.csv", index_col=0) | |
| if type(aliases.iloc[0]['aliases']) == str: | |
| aliases["aliases"] = aliases["aliases"].apply(lambda x: literal_eval(x)) | |
| if type(attributes.iloc[0]['a_type']) == str: | |
| attributes["a_type"] = attributes["a_type"].apply(lambda x: literal_eval(x)) | |
| relations = relations.merge(aliases, left_on="a_uri", right_on="uri", how="left") | |
| relations = relations.drop(columns=["uri"]) | |
| relations["possible_answers"] = relations['aliases'].apply(lambda x: x if type(x) == list else []) | |
| relations["possible_answers"] = relations.progress_apply(lambda x: x["possible_answers"] + [x["attribute"]], axis=1) | |
| agg_funcs = {col: 'first' for col in relations.columns if col not in ['s_uri', 'r_uri', 'possible_answers']} | |
| agg_funcs['possible_answers'] = sum | |
| relations = relations.groupby(['s_uri', 'r_uri'], as_index=False).agg(agg_funcs) | |
| relations = relations.drop(columns=["aliases"]) | |
| relations = relations.merge(attributes, left_on="a_uri", right_on="uri", how="left") | |
| relations = relations.drop(columns=["uri"]) | |
| if add_unesco: | |
| unesco = add_unesco_question(base) | |
| relations = pd.concat([relations, unesco]) | |
| return relations | |
| def subj_substitute(row): | |
| if row['type'] == 'brands': | |
| return f"the brand {row['subject']}" | |
| if row['type'] == 'paintings': | |
| return f"the painting {row['subject']}" | |
| return row['subject'] | |
| def build_prompts(base_df, triplets, templates): | |
| subjects = base_df[["s_uri", "subject"]] | |
| base_df = base_df[["s_uri", "type"]] | |
| triplets = triplets.drop("subject", axis=1) | |
| triplets = triplets.merge(subjects, on=["s_uri"]) | |
| triplets = triplets.merge(base_df, on=["s_uri"], how='left') | |
| triplets = triplets.merge(templates[["uri", "template"]], left_on="r_uri", right_on="uri") | |
| triplets = triplets.drop(columns=["uri"]) | |
| triplets = triplets.dropna() | |
| query_counts = triplets.drop_duplicates(["s_uri", "r_uri"]).groupby(["s_uri"])["r_uri"].count().reset_index(name="count") | |
| triplets = triplets.merge(query_counts[query_counts["count"] > 1][["s_uri"]], on="s_uri") | |
| triplets["question_for_image"] = triplets.progress_apply(replace_for_image, axis=1) | |
| triplets["question_for_image"] = triplets.progress_apply(lambda row: row["question_for_image"].replace("[obj_type]", best_obj_type(row["a_type"])) if len(row["a_type"]) > 0 else row["question"], axis=1) | |
| triplets["question"] = triplets.progress_apply(lambda row: row["template"].replace("[subj]", subj_substitute(row)), axis=1) | |
| triplets["question"] = triplets.progress_apply(lambda row: row["question"].replace("[obj_type]", best_obj_type(row["a_type"])) if len(row["a_type"]) > 0 else row["question"], axis=1) | |
| triplets = triplets.drop(columns=["template"]) | |
| triplets = triplets[['type','subject','question_for_image','question','possible_answers', 'relation', 's_uri', 'r_uri','a_uri','attribute','a_type']] | |
| return triplets | |
| def resize_square(image, size=336, resample=Image.LANCZOS): | |
| """ | |
| Resize an image to a square of the given size, first adding a black background if needed. | |
| image: a Pillow image instance | |
| size: an integer, the desired output size (width and height will be the same) | |
| """ | |
| img_format = image.format | |
| image = image.copy() | |
| size = [size, size] | |
| img_size = image.size | |
| ratio = min(size[0] / img_size[0], size[1] / img_size[1]) | |
| new_size = [ | |
| int(math.ceil(img_size[0] * ratio)), | |
| int(math.ceil(img_size[1] * ratio)) | |
| ] | |
| image = image.resize((new_size[0], new_size[1]), resample) | |
| # Make the image square by adding black padding | |
| max_dim = max(image.size) | |
| new_img = Image.new("RGB", (max_dim, max_dim), (0, 0, 0)) | |
| new_img.paste(image, ((max_dim - image.size[0]) // 2, (max_dim - image.size[1]) // 2)) | |
| # Resize to target size | |
| # new_img = new_img.resize((size, size), resample) | |
| new_img.format = img_format | |
| return new_img |