|
| 1 | +# %% |
| 2 | +import glob |
| 3 | +# read in fmri data |
| 4 | +import os |
| 5 | +import matplotlib.pyplot as plt |
| 6 | +import numpy as np |
| 7 | +import torch |
| 8 | +import pandas as pd |
| 9 | +import matplotlib.pyplot as plt |
| 10 | + |
| 11 | +# Read in video features |
| 12 | +VID_DIR = "/home/ubuntu/hcp_data/stimuli_jpg/" |
| 13 | +video1 = "7T_MOVIE1_CC1_v2_224x224_72.npy" |
| 14 | +video2 = "7T_MOVIE2_HO1_v2_224x224_72.npy" |
| 15 | +video3 = "7T_MOVIE3_CC2_v2_224x224_72.npy" |
| 16 | +video4 = "7T_MOVIE4_HO2_v2_224x224_72.npy" |
| 17 | + |
| 18 | +vid1_feat = np.load(VID_DIR + video1, allow_pickle=True) |
| 19 | +vid2_feat = np.load(VID_DIR + video2, allow_pickle=True) |
| 20 | +vid3_feat = np.load(VID_DIR + video3, allow_pickle=True) |
| 21 | +vid4_feat = np.load(VID_DIR + video4, allow_pickle=True) |
| 22 | + |
| 23 | +# %% |
| 24 | + |
| 25 | +# Choose the `slowfast_r50` model |
| 26 | +model = torch.hub.load('facebookresearch/pytorchvideo', 'slowfast_r50', pretrained=True) |
| 27 | +proj_layer = model.blocks[6].proj |
| 28 | + |
| 29 | + |
| 30 | +# %% |
| 31 | + |
| 32 | +df_labels = pd.read_csv("kinetics_400_labels.csv") |
| 33 | + |
| 34 | +labels = df_labels.name.values |
| 35 | +print(labels.shape) |
| 36 | + |
| 37 | + |
| 38 | +# %% |
| 39 | + |
| 40 | +videos = [video1,video2,video3,video4] |
| 41 | +video_raw_outputs = [] |
| 42 | +video_classifications = [] |
| 43 | +threshold = 0.80 |
| 44 | +with torch.no_grad(): |
| 45 | + for video in videos: |
| 46 | + vid_feat = np.load(VID_DIR + video, allow_pickle = True) |
| 47 | + vid_feat_torch = torch.Tensor(vid_feat) |
| 48 | + vid_feat_last = proj_layer(vid_feat_torch) |
| 49 | + vid_classification = torch.sigmoid(vid_feat_last) |
| 50 | + vid_classification[vid_classification >= threshold] = 1 |
| 51 | + vid_classification[vid_classification < threshold] = 0 |
| 52 | + video_raw_outputs.append(vid_feat_last) |
| 53 | + video_classifications.append(vid_classification) |
| 54 | + np.save((VID_DIR + video).replace("_72","_72_last_layer"), vid_feat_last.detach().numpy(), allow_pickle=True) |
| 55 | + np.save((VID_DIR + video).replace("_72","_72_sigmoid"), vid_classification.detach().numpy(), allow_pickle=True) |
| 56 | + |
| 57 | +# %% |
| 58 | + |
| 59 | + |
| 60 | + |
| 61 | +frame = 1050 |
| 62 | +labels_for_frame = labels[video_classifications[0][frame]==1] |
| 63 | +path_frames = "/home/ubuntu/hcp_data/stimuli_jpg/7T_MOVIE1_CC1_v2_224x224" |
| 64 | +path_frame = path_frames+ "/frame"+ str(frame+1).zfill(4) + ".jpg" |
| 65 | +img = plt.imread(path_frame) |
| 66 | +plt.imshow(img) |
| 67 | +plt.title(str(labels_for_frame)) |
| 68 | +# %% |
0 commit comments