Spaces:
Runtime error
Runtime error
| from PIL import Image, ImageOps | |
| from torchvision import transforms | |
| class ScaleAndPadTransform: | |
| def __init__(self, target_size): | |
| self.target_size = target_size | |
| def transform(self, img): | |
| width, height = img.size | |
| if width > height: | |
| scale = self.target_size / width | |
| new_height = int(height * scale) | |
| img = img.resize((self.target_size, new_height)) | |
| padding = (self.target_size - new_height) // 2 | |
| img = ImageOps.expand(img, (0, padding, 0, self.target_size - new_height - padding)) | |
| else: | |
| scale = self.target_size / height | |
| new_width = int(width * scale) | |
| img = img.resize((new_width, self.target_size)) | |
| padding = (self.target_size - new_width) // 2 | |
| img = ImageOps.expand(img, (padding, 0, self.target_size - new_width - padding, 0)) | |
| IMG_MEAN = [0.485, 0.456, 0.406] | |
| IMG_STD = [0.229, 0.224, 0.225] | |
| transform = transforms.Compose([ | |
| transforms.CenterCrop(self.target_size), | |
| transforms.ToTensor(), | |
| transforms.Normalize(IMG_MEAN, IMG_STD) | |
| ]) | |
| img = transform(img) | |
| return img | |
| class Body_Figure(object): | |
| def __str__(self): | |
| return f"Body Figure Information:\n"\ | |
| f" - Waist-to-Shoulder Ratio (WSR): {self.WSR}\n"\ | |
| f" - Waist-to-Thigh Ratio (WTR): {self.WTR}\n"\ | |
| f" - Waist-to-Hip Ratio (WHpR): {self.WHpR}\n"\ | |
| f" - Waist-to-Head Ratio (WHdR): {self.WHdR}\n"\ | |
| f" - Hip-to-Head Ratio (HpHdR): {self.HpHdR}\n"\ | |
| f" - Area: {self.Area}\n"\ | |
| f" - Height-to-Waist Ratio (H2W): {self.H2W}\n" | |
| def __init__(self, waist_width, thigh_width, hip_width, head_width, Area, height, shoulder_width): | |
| self._waist_width = waist_width | |
| self._thigh_width = thigh_width | |
| self._hip_width = hip_width | |
| self._head_width = head_width | |
| self._Area = Area | |
| self._height = height | |
| self._shoulder_width = shoulder_width | |
| if self._head_width == 0: | |
| self._head_width = self._hip_width/3 | |
| def WSR(self): | |
| return (self._waist_width) / (self._shoulder_width) | |
| def WTR(self): | |
| return (self._waist_width / self._thigh_width) # **2 | |
| def WHpR(self): | |
| return (self._waist_width / self._hip_width) # **2 | |
| def WHdR(self): | |
| return (self._waist_width / self._head_width) # **2 | |
| def HpHdR(self): | |
| return (self._hip_width / self._head_width) # **2 | |
| def Area(self): | |
| return self._Area | |
| def H2W(self): | |
| return self._height / self._waist_width | |
| import torch | |
| import torch.nn as nn | |
| import torch.optim as optim | |
| def custom_resnet(): | |
| # resnet101 | |
| resnet_model = torch.hub.load('pytorch/vision:v0.10.0', 'resnet101', pretrained=True) | |
| resnet_model._modules.pop('fc') #1000 fc | |
| resnet_model.fc1 = nn.Linear(2048, 15) | |
| # resnet_model.fc1 = nn.Linear(2048, 15) | |
| resnet_model.fc2 = nn.Sequential( | |
| nn.ReLU(inplace=True), | |
| nn.Linear(15, 1) | |
| ) | |
| def forward(self, x): | |
| x = self.conv1(x) | |
| x = self.bn1(x) | |
| x = self.relu(x) | |
| x = self.maxpool(x) | |
| x = self.layer1(x) | |
| x = self.layer2(x) | |
| x = self.layer3(x) | |
| x = self.layer4(x) # 2048*7*7 | |
| x = self.avgpool(x) | |
| x = torch.flatten(x, 1) | |
| x = self.fc1(x) | |
| x = self.fc2(x) | |
| return x | |
| # add new_forward function to the resnet instance as a class method | |
| bound_method = forward.__get__(resnet_model, resnet_model.__class__) | |
| setattr(resnet_model, 'forward', bound_method) | |
| return resnet_model | |
| def custom_resnet_optimizer(resnet_model): | |
| optimizer = optim.Adam(resnet_model.parameters(), lr=0.0001, betas=(0.9, 0.999), weight_decay=0.001) | |
| return optimizer | |
| # scaling the longer side of image to 224 and pad the shorter size with zeroes to match 224x224 | |
| from PIL import Image, ImageOps | |
| def scale_and_pad(img): | |
| width, height = img.size | |
| if width > height: | |
| scale = 224 / width | |
| new_height = int(height * scale) | |
| img = img.resize((224, new_height)) | |
| padding = (224 - new_height) // 2 | |
| img = ImageOps.expand(img, (0, padding, 0, 224 - new_height - padding)) | |
| else: | |
| scale = 224 / height | |
| new_width = int(width * scale) | |
| img = img.resize((new_width, 224)) | |
| padding = (224 - new_width) // 2 | |
| img = ImageOps.expand(img, (padding, 0, 224 - new_width - padding, 0)) | |
| return img | |
| from torchvision import transforms | |
| IMG_SIZE = 224 | |
| IMG_MEAN = [0.485, 0.456, 0.406] | |
| IMG_STD = [0.229, 0.224, 0.225] | |
| transform = transforms.Compose([ | |
| transforms.CenterCrop(IMG_SIZE), | |
| transforms.ToTensor(), | |
| transforms.Normalize(IMG_MEAN, IMG_STD) | |
| ]) | |
| from torch.utils.data import Dataset, DataLoader | |
| from PIL import Image | |
| import re | |
| class CustomDataset(Dataset): | |
| def __init__(self, dataset, transform=None): | |
| self.data = dataset | |
| self.transform = transform | |
| def __len__(self): | |
| return len(self.data.index) | |
| def __getitem__(self, idx): | |
| img_name = self.data.iloc[idx, 0] | |
| img_path = 'datasets/Images/' + img_name # adjust the path to your actual image directory | |
| image = Image.open(img_path) | |
| image = scale_and_pad(image) | |
| ret = re.match(r"\d+?_([FMfm])_(\d+?)_(\d+?)_(\d+).+", img_name) | |
| BMI = (int(ret.group(4)) / 100000) / (int(ret.group(3)) / 100000) ** 2 | |
| if self.transform: | |
| image = self.transform(image) | |
| return (image,img_name), BMI | |
| # train the resnet model on the train_img_tensors and train_labels | |
| import torch | |
| import torch.nn as nn | |
| import torch.optim as optim | |
| import numpy as np | |
| from sklearn.metrics import mean_absolute_error | |
| device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') | |
| print(device) | |
| # from detectron2 import detectron2 | |
| import numpy as np | |
| import cv2 | |
| from detectron2 import model_zoo | |
| from detectron2.engine import DefaultPredictor | |
| from detectron2.config import get_cfg | |
| from detectron2.utils.visualizer import Visualizer | |
| from detectron2.data import MetadataCatalog, DatasetCatalog | |
| # from Human_Parse import HumanParser | |
| # "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml" | |
| # "COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml" | |
| class Image_Processor(object): | |
| def __init__(self, masks_file, key_file, key_thresh=0.7): | |
| self._KeypointCfg = self.__init_key(key_file, key_thresh) | |
| self._KeypointsPredictor = DefaultPredictor(self._KeypointCfg) | |
| self._Contourcfg=self.__init_mask(masks_file,key_thresh) | |
| self._ContourPredictor = DefaultPredictor(self._Contourcfg) | |
| # self._HumanParser = HumanParser() | |
| def __init_key(self, key_file, key_thresh): | |
| cfg = get_cfg() | |
| cfg.merge_from_file(model_zoo.get_config_file(key_file)) | |
| cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = key_thresh # set threshold for this model | |
| cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(key_file) | |
| return cfg | |
| def __init_mask(self, mask_file, key_thresh): | |
| cfg = get_cfg() | |
| cfg.merge_from_file(model_zoo.get_config_file(mask_file)) | |
| cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = key_thresh # set threshold for this model | |
| cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(mask_file) | |
| return cfg | |
| def get_keyandcontour_output(self, img): | |
| Keypoints=self._Keypoints_detected(img) | |
| ContourOutput=self._Contour_detected(img) | |
| # """ Detect Arms Mask by Human parser """ | |
| # Arms_mask = self._HumanParser.Arms_detect(img) | |
| # ContourOutput = ContourOutput ^ Arms_mask | |
| return Keypoints, ContourOutput | |
| def _Contour_detected(self,img): | |
| ContourOutput=self._ContourPredictor(img) | |
| sorted_idxs = np.argsort(-ContourOutput["instances"].scores.cpu().numpy()) | |
| ContourMasks = None | |
| for sorted_idx in sorted_idxs: | |
| if ContourOutput["instances"].pred_classes[sorted_idx] == 0: | |
| ContourMasks = ContourOutput["instances"].pred_masks[sorted_idx].cpu().numpy() | |
| ContourOutput = ContourMasks | |
| return ContourOutput | |
| def _Keypoints_detected(self,img): | |
| KeypointsOutput = self._KeypointsPredictor(img) | |
| sorted_idxs = np.argsort(-KeypointsOutput["instances"].scores.cpu().numpy()) | |
| Keypoints = KeypointsOutput["instances"].pred_keypoints[sorted_idxs[0]].cpu().numpy() | |
| return Keypoints | |
| # def Process(self, img_RGB): | |
| def get_figure(self, img): | |
| Keypoints, ContourOutput = self.get_keyandcontour_output(img) | |
| nose,left_ear,right_ear,left_shoulder,right_shoulder = Keypoints[0],Keypoints[4],Keypoints[3],Keypoints[6], Keypoints[5] | |
| left_hip, right_hip, left_knee, right_knee = Keypoints[12], Keypoints[11], Keypoints[14],Keypoints[13] | |
| y_hip = (left_hip[1] + right_hip[1]) / 2 | |
| y_knee = (left_knee[1] + right_knee[1]) / 2 | |
| center_shoulder = (left_shoulder + right_shoulder) / 2 | |
| y_waist = y_hip * 2 / 3 + (nose[1] + center_shoulder[1]) / 6 | |
| left_thigh = (left_knee + left_hip) / 2 | |
| right_thigh = (right_knee + right_hip) / 2 | |
| # estimate the waist width | |
| waist_width = self.waist_width_estimate(center_shoulder, y_waist, ContourOutput) | |
| # estimate the thigh width | |
| thigh_width = self.thigh_width_estimate(left_thigh, right_thigh, ContourOutput) | |
| # estimate the hip width | |
| hip_width = self.hip_width_estimate(center_shoulder, y_hip, ContourOutput) | |
| # estimate the head_width | |
| head_width = self.head_width_estimate(left_ear, right_ear) | |
| # estimate the Area | |
| Area = self.Area_estimate(y_waist, y_hip, waist_width, hip_width, ContourOutput) | |
| # estimate the height2waist | |
| height = self.Height_estimate(y_knee, nose[1]) | |
| # estimate tht shoulder_width | |
| shoulder_width = self.shoulder_width_estimate(left_shoulder, right_shoulder) | |
| figure = Body_Figure(waist_width, thigh_width, hip_width, head_width, Area, height, shoulder_width) | |
| # outputs = self._KeypointsPredictor(img) | |
| # v = Visualizer(img[:,:,::-1], MetadataCatalog.get( self._KeypointCfg.DATASETS.TRAIN[0]), scale=1.2) | |
| # out = v.draw_instance_predictions(outputs["instances"].to("cpu")) | |
| # # cv2_imshow(out.get_image()[:, :, ::-1]) | |
| # cv2.imwrite('random.jpg', out.get_image()[:, :, ::-1]) | |
| # outputs = self._ContourPredictor(img) | |
| # v = Visualizer(img[:,:,::-1], MetadataCatalog.get( self._Contourcfg.DATASETS.TRAIN[0]), scale=1.2) | |
| # out = v.draw_instance_predictions(outputs["instances"].to("cpu")) | |
| # # cv2_imshow(out.get_image()[:, :, ::-1]) | |
| # cv2.imwrite('random1.jpg', out.get_image()[:, :, ::-1]) | |
| return figure | |
| def Height_estimate(self, y_k, y_n): | |
| Height = np.abs(y_n - y_k) | |
| return Height | |
| def Area_estimate(self, y_w, y_h, W_w, H_w, mask): | |
| # ''' | |
| # Area is expressed as thenumber of | |
| # pixels per unit area between waist and hip | |
| # ''' | |
| try: | |
| pixels = np.sum(mask[int(y_w):int(y_h)][:]) | |
| except: | |
| pixels=100 | |
| area = (y_h - y_w) * 0.5 * (W_w + H_w) | |
| Area = pixels / area | |
| return Area | |
| def shoulder_width_estimate(self, left_shoulder, right_shoulder): | |
| shoulder_width = np.sqrt((right_shoulder[0] - left_shoulder[0]) ** 2 + (right_shoulder[1] - left_shoulder[1]) ** 2) | |
| return shoulder_width | |
| def head_width_estimate(self, left_ear, right_eat): | |
| head_width = np.sqrt((right_eat[0] - left_ear[0]) ** 2 + (right_eat[1] - left_ear[1]) ** 2) | |
| return head_width | |
| def hip_width_estimate(self, center_shoulder, y_hip, ContourOutput): | |
| x_hip_center = int(center_shoulder[0]) | |
| try: | |
| x_lhb = np.where(ContourOutput[int(y_hip)][:x_hip_center] == 0)[0] | |
| x_lhb = x_lhb[-1] if len(x_lhb) else 0 | |
| except: | |
| x_lhb = 10 | |
| try: | |
| x_rhb = np.where(ContourOutput[int(y_hip)][x_hip_center:] == 0)[0] | |
| x_rhb = x_rhb[0] + x_hip_center if len(x_rhb) else len(ContourOutput[0]) | |
| except: | |
| x_rhb = 5 | |
| hip_width = x_rhb - x_lhb | |
| return hip_width | |
| def thigh_width_estimate(self, left_thigh, right_thigh, mask): | |
| lx, ly = int(left_thigh[0]), int(left_thigh[1]) | |
| rx, ry = int(right_thigh[0]), int(right_thigh[1]) | |
| try: | |
| x_ltb = np.where(mask[ly][:lx] == 0)[0] | |
| x_ltb = x_ltb[-1] if len(x_ltb) else 0 | |
| except: | |
| x_ltb = 10 | |
| try: | |
| x_rtb = np.where(mask[ry][rx:] == 0)[0] | |
| x_rtb = x_rtb[0] + rx if len(x_rtb) else len(mask[0]) | |
| except: | |
| x_rtb = 0 | |
| l_width = (lx - x_ltb) * 2 | |
| r_width = (x_rtb - rx) * 2 | |
| thigh_width = (l_width + r_width) / 2 | |
| return thigh_width | |
| def waist_width_estimate(self, center_shoulder, y_waist, ContourOutput): | |
| x_waist_center = int(center_shoulder[0]) | |
| # plt.imshow(ContourOutput) | |
| # plt.show() | |
| try: | |
| x_lwb = np.where(ContourOutput[int(y_waist)][:x_waist_center] == 0)[0] | |
| x_lwb = x_lwb[-1] if len(x_lwb) else 0 | |
| except: | |
| x_lwb = 10 | |
| print("err waist width") | |
| try: | |
| x_rwb = np.where(ContourOutput[int(y_waist)][x_waist_center:] == 0)[0] | |
| x_rwb = x_rwb[0] + x_waist_center if len(x_rwb) else len(ContourOutput[0]) | |
| except: | |
| x_rwb=0 | |
| print("err waist width") | |
| # print(x_rwb) | |
| waist_width = x_rwb - x_lwb | |
| return waist_width | |
| import numpy as np | |
| import pandas | |
| import cv2 | |
| from PIL import Image | |
| import torchvision.models.detection | |
| from torchvision.models.detection import maskrcnn_resnet50_fpn, MaskRCNN_ResNet50_FPN_Weights | |
| class Data_Processor(object): | |
| def __init__(self,mask_model="COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml", | |
| keypoints_model = "COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml"): | |
| self._img_pro = Image_Processor(mask_model,keypoints_model) | |
| def get_image_info(self,df): | |
| return df | |
| def test(self,img): | |
| # img = cv2.imread(img_path) | |
| img = np.array(img) | |
| figure = self._img_pro.get_figure(img) | |
| return figure | |
| class LayerActivations: | |
| features = None | |
| def __init__(self, model, layer_num): | |
| self.hook = model.register_forward_hook(self.hook_fn) | |
| def hook_fn(self, module, input, output): | |
| self.features = output | |
| def remove(self): | |
| self.hook.remove() |