re-id contained within this repo now

This commit is contained in:
Aditya Pulipaka
2026-03-15 22:30:34 -05:00
parent b000850d68
commit 1389959e96
3 changed files with 651 additions and 13 deletions

View File

@@ -0,0 +1,298 @@
import torch
import torch.nn as nn
import copy
from .vit_ID import TransReID, Block
from functools import partial
from torch.nn import functional as F
from .vit_ID import resize_pos_embed
def TCSS(features, shift, b,t):
# aggregate features at patch level
features = features.view(b, features.size(1), t*features.size(2))
token = features[:, 0:1]
batchsize = features.size(0)
dim = features.size(-1)
# shift the patches with amount=shift
features= torch.cat([features[:, shift:], features[:, 1:shift]], dim=1)
# Patch Shuffling by 2 part
try:
features = features.view(batchsize, 2, -1, dim)
except:
features = torch.cat([features, features[:, -2:-1, :]], dim=1)
features = features.view(batchsize, 2, -1, dim)
features = torch.transpose(features, 1, 2).contiguous()
features = features.view(batchsize, -1, dim)
return features, token
def weights_init_kaiming(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')
nn.init.constant_(m.bias, 0.0)
elif classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
if m.bias is not None:
nn.init.constant_(m.bias, 0.0)
elif classname.find('BatchNorm') != -1:
if m.affine:
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0.0)
def weights_init_classifier(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.normal_(m.weight, std=0.001)
if m.bias:
nn.init.constant_(m.bias, 0.0)
class KeyRe_ID(nn.Module):
def __init__(self, num_classes, camera_num, pretrainpath):
super(KeyRe_ID, self).__init__()
self.in_planes = 768
self.num_classes = num_classes
self.base =TransReID(
img_size=[256, 128], patch_size=16, stride_size=[16, 16], embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,\
camera=camera_num, drop_path_rate=0.1, drop_rate=0.0, attn_drop_rate=0.0,norm_layer=partial(nn.LayerNorm, eps=1e-6), cam_lambda=3.0)
# state_dict = torch.load(pretrainpath, map_location='cpu')
# self.base.load_param(state_dict,load=True)
if pretrainpath:
state_dict = torch.load(pretrainpath, map_location='cpu', weights_only=False)
self.base.load_param(state_dict, load=True)
#-------------------Global Branch-------------
block= self.base.blocks[-1]
layer_norm = self.base.norm
self.b1 = nn.Sequential(
copy.deepcopy(block),
copy.deepcopy(layer_norm)
)
self.bottleneck = nn.BatchNorm1d(self.in_planes)
self.bottleneck.bias.requires_grad_(False)
self.bottleneck.apply(weights_init_kaiming)
self.classifier = nn.Linear(self.in_planes, self.num_classes, bias=False)
self.classifier.apply(weights_init_classifier)
#-------------------Local Branch-------------
# building local video stream
dpr = [x.item() for x in torch.linspace(0, 0, 12)] # stochastic depth decay rule
self.block1 = Block(
dim=3072, num_heads=12, mlp_ratio=4, qkv_bias=True, qk_scale=None,
drop=0, attn_drop=0, drop_path=dpr[11], norm_layer=partial(nn.LayerNorm, eps=1e-6))
self.b2 = nn.Sequential(
self.block1,
nn.LayerNorm(3072) # copy.deepcopy(layer_norm)
)
self.bottleneck_1 = nn.BatchNorm1d(3072)
self.bottleneck_1.bias.requires_grad_(False)
self.bottleneck_1.apply(weights_init_kaiming)
self.bottleneck_2 = nn.BatchNorm1d(3072)
self.bottleneck_2.bias.requires_grad_(False)
self.bottleneck_2.apply(weights_init_kaiming)
self.bottleneck_3 = nn.BatchNorm1d(3072)
self.bottleneck_3.bias.requires_grad_(False)
self.bottleneck_3.apply(weights_init_kaiming)
self.bottleneck_4 = nn.BatchNorm1d(3072)
self.bottleneck_4.bias.requires_grad_(False)
self.bottleneck_4.apply(weights_init_kaiming)
self.bottleneck_5 = nn.BatchNorm1d(3072)
self.bottleneck_5.bias.requires_grad_(False)
self.bottleneck_5.apply(weights_init_kaiming)
self.bottleneck_6 = nn.BatchNorm1d(3072)
self.bottleneck_6.bias.requires_grad_(False)
self.bottleneck_6.apply(weights_init_kaiming)
self.classifier_1 = nn.Linear(3072, self.num_classes, bias=False)
self.classifier_1.apply(weights_init_classifier)
self.classifier_2 = nn.Linear(3072, self.num_classes, bias=False)
self.classifier_2.apply(weights_init_classifier)
self.classifier_3 = nn.Linear(3072, self.num_classes, bias=False)
self.classifier_3.apply(weights_init_classifier)
self.classifier_4 = nn.Linear(3072, self.num_classes, bias=False)
self.classifier_4.apply(weights_init_classifier)
self.classifier_5 = nn.Linear(3072, self.num_classes, bias=False)
self.classifier_5.apply(weights_init_classifier)
self.classifier_6 = nn.Linear(3072, self.num_classes, bias=False)
self.classifier_6.apply(weights_init_classifier)
#-------------------video attention-------------
self.middle_dim = 256 # middle layer dimension
self.attention_conv = nn.Conv2d(self.in_planes, self.middle_dim, [1,1]) # 7,4 cooresponds to 224, 112 input image size
self.attention_tconv = nn.Conv1d(self.middle_dim, 1, 3, padding=1)
self.attention_conv.apply(weights_init_kaiming)
self.attention_tconv.apply(weights_init_kaiming)
#------------------------------------------
self.shift_num = 5
self.part = 6
self.rearrange=True
def forward(self, x, heatmaps, label=None, cam_label= None, view_label=None): # label is unused if self.cos_layer == 'no'
b = x.size(0)
t = x.size(1)
x = x.view(x.size(0)*x.size(1), x.size(2), x.size(3), x.size(4))
features = self.base(x, cam_label=cam_label)
#-------------------Global Branch-------------
b1_feat = self.b1(features) # [64, 129, 3072]
global_feat = b1_feat[:, 0]
global_feat = global_feat.unsqueeze(dim=2).unsqueeze(dim=3)
a = F.relu(self.attention_conv(global_feat))
a = a.view(b, t, self.middle_dim)
a = a.permute(0,2,1)
a = F.relu(self.attention_tconv(a))
a = a.view(b, t)
a_vals = a
a = F.softmax(a, dim=1)
x = global_feat.view(b, t, -1)
a = torch.unsqueeze(a, -1)
a = a.expand(b, t, self.in_planes)
att_x = torch.mul(x,a)
att_x = torch.sum(att_x, 1)
global_feat = att_x.view(b, self.in_planes)
feat = self.bottleneck(global_feat)
#-------------------Local Branch-------------
# Heatmap Processing
heatmaps = heatmaps.view(b*t, 6, 256, 128) # [B*T, 6, 256, 128]
heatmap_patches = F.unfold(heatmaps, kernel_size=16, stride=16) # [B*T, 6*16*16, 128]
heatmap_patches = heatmap_patches.view(b*t, 6, 16*16, 128).mean(dim=2) # [B*T, 6, 128]
heatmap_weights = heatmap_patches.transpose(1, 2) # [B*T, 128, 6]
heatmap_weights = heatmap_weights.view(b, t, 128, 6).mean(dim=1) # [B, 128, 6]
# Temporal clip shift and shuffled
x ,token = TCSS(features, self.shift_num, b, t)
patch_feats = x
# Part 1: Head
part1_weight = heatmap_weights[:, :, 0].unsqueeze(-1)
part1 = patch_feats * part1_weight
part1 = self.b2(torch.cat((token, part1), dim=1))
part1_f = part1[:, 0]
# Part 2: Torso
part2_weight = heatmap_weights[:, :, 1].unsqueeze(-1)
part2 = patch_feats * part2_weight
part2 = self.b2(torch.cat((token, part2), dim=1))
part2_f = part2[:, 0]
# Part 3: Left Arm
part3_weight = heatmap_weights[:, :, 2].unsqueeze(-1)
part3 = patch_feats * part3_weight
part3 = self.b2(torch.cat((token, part3), dim=1))
part3_f = part3[:, 0]
# Part 4: Right Arm
part4_weight = heatmap_weights[:, :, 3].unsqueeze(-1)
part4 = patch_feats * part4_weight
part4 = self.b2(torch.cat((token, part4), dim=1))
part4_f = part4[:, 0]
# Part 5: Left Leg
part5_weight = heatmap_weights[:, :, 4].unsqueeze(-1)
part5 = patch_feats * part5_weight
part5 = self.b2(torch.cat((token, part5), dim=1))
part5_f = part5[:, 0]
# Part 6: Right Leg
part6_weight = heatmap_weights[:, :, 5].unsqueeze(-1)
part6 = patch_feats * part6_weight
part6 = self.b2(torch.cat((token, part6), dim=1))
part6_f = part6[:, 0]
# Apply batch normalization
part1_bn = self.bottleneck_1(part1_f)
part2_bn = self.bottleneck_2(part2_f)
part3_bn = self.bottleneck_3(part3_f)
part4_bn = self.bottleneck_4(part4_f)
part5_bn = self.bottleneck_5(part5_f)
part6_bn = self.bottleneck_6(part6_f)
if self.training:
Global_ID = self.classifier(feat)
Local_ID1 = self.classifier_1(part1_bn)
Local_ID2 = self.classifier_2(part2_bn)
Local_ID3 = self.classifier_3(part3_bn)
Local_ID4 = self.classifier_4(part4_bn)
Local_ID5 = self.classifier_5(part5_bn)
Local_ID6 = self.classifier_6(part6_bn)
return [Global_ID, Local_ID1, Local_ID2, Local_ID3, Local_ID4, Local_ID5, Local_ID6],\
[global_feat, part1_f, part2_f, part3_f, part4_f, part5_f, part6_f], a_vals
else:
return torch.cat([feat, part1_bn/self.part, part2_bn/self.part, part3_bn/self.part,
part4_bn/self.part, part5_bn/self.part, part6_bn/self.part], dim=1)
def load_param(self, trained_path, load=False):
print("Run load_param")
if not load:
param_dict = torch.load(trained_path, map_location='cpu', weights_only=False)
else:
param_dict = trained_path
if 'model' in param_dict:
param_dict = param_dict['model']
if 'state_dict' in param_dict:
param_dict = param_dict['state_dict']
model_dict = self.state_dict() # Get the state_dict of the current model
new_param_dict = {}
for k, v in param_dict.items():
if 'head' in k or 'dist' in k:
continue
# Patch embedding Conv-based transformation processing
if 'patch_embed.proj.weight' in k and len(v.shape) < 4:
O, I, H, W = self.base.patch_embed.proj.weight.shape
v = v.reshape(O, -1, H, W)
# Resize Positional Embedding
elif k == 'pos_embed' and v.shape != self.base.pos_embed.shape:
v = resize_pos_embed(v, self.base.pos_embed, self.base.patch_embed.num_y, self.base.patch_embed.num_x)
# Handling `base.` prefix
new_k = k
if k.startswith("base.") and k[5:] in model_dict:
new_k = k[5:] # Remove base.
elif not k.startswith("base.") and ("base." + k) in model_dict:
new_k = "base." + k # Add base.
if new_k in ['Cam', 'base.Cam'] and new_k in model_dict:
expected_shape = model_dict[new_k].shape # Cam size that the current model expects
print(f"[Before Resizing] {new_k}: {v.shape} -> Expected: {expected_shape}")
if v.shape[0] > expected_shape[0]: # Keep only the front part if the size is larger
v = v[:expected_shape[0], :, :]
elif v.shape[0] < expected_shape[0]: # Create a new tensor for smaller sizes
new_v = torch.randn(expected_shape) # Random initialization (other values are possible)
new_v[:v.shape[0], :, :] = v # Keep existing values
v = new_v
print(f"[After Resizing] {new_k}: {v.shape}") # Confirm after changing the size
new_param_dict[new_k] = v
continue
# Update only if Shape fits
if new_k in model_dict and model_dict[new_k].shape == v.shape:
new_param_dict[new_k] = v
# Finally, update the state_dict
model_dict.update(new_param_dict)
self.load_state_dict(model_dict, strict=False)
print("Checkpoint loaded successfully.")

View File

@@ -31,7 +31,6 @@ Pipeline (per frame)
Parameters Parameters
────────── ──────────
weights_path str path to iLIDSVIDbest_CMC.pth (required) weights_path str path to iLIDSVIDbest_CMC.pth (required)
keyreID_path str path to KeyRe-ID source directory
num_classes int training split size (150 for iLIDS-VID split-0) num_classes int training split size (150 for iLIDS-VID split-0)
camera_num int cameras in training set (2 for iLIDS-VID) camera_num int cameras in training set (2 for iLIDS-VID)
device str 'cuda:0' or 'cpu' device str 'cuda:0' or 'cpu'
@@ -44,7 +43,6 @@ Parameters
""" """
import os import os
import sys
import time import time
import colorsys import colorsys
@@ -140,8 +138,6 @@ class ReIDNode(Node):
os.path.join( os.path.join(
get_package_share_directory('tracking_re_id'), get_package_share_directory('tracking_re_id'),
'weights', 'iLIDSVIDbest_CMC.pth')) 'weights', 'iLIDSVIDbest_CMC.pth'))
self.declare_parameter('keyreID_path',
os.path.expanduser('~/KeyRe-ID'))
self.declare_parameter('num_classes', 150) self.declare_parameter('num_classes', 150)
self.declare_parameter('camera_num', 2) self.declare_parameter('camera_num', 2)
self.declare_parameter('device', 'cuda:0') self.declare_parameter('device', 'cuda:0')
@@ -153,7 +149,6 @@ class ReIDNode(Node):
self.declare_parameter('headless', False) self.declare_parameter('headless', False)
weights_path = self.get_parameter('weights_path').value weights_path = self.get_parameter('weights_path').value
keyreID_path = self.get_parameter('keyreID_path').value
num_classes = self.get_parameter('num_classes').value num_classes = self.get_parameter('num_classes').value
camera_num = self.get_parameter('camera_num').value camera_num = self.get_parameter('camera_num').value
device_str = self.get_parameter('device').value device_str = self.get_parameter('device').value
@@ -175,14 +170,7 @@ class ReIDNode(Node):
self.get_logger().info('MMPose loaded.') self.get_logger().info('MMPose loaded.')
# ── KeyRe-ID ───────────────────────────────────────────────────────── # ── KeyRe-ID ─────────────────────────────────────────────────────────
if keyreID_path not in sys.path: from .KeyRe_ID_model import KeyRe_ID # noqa: PLC0415
sys.path.insert(0, keyreID_path)
try:
from KeyRe_ID_model import KeyRe_ID # noqa: PLC0415
except ImportError as exc:
self.get_logger().fatal(
f'Cannot import KeyRe_ID_model from {keyreID_path}: {exc}')
raise
self.get_logger().info(f'Loading KeyRe-ID weights from {weights_path}') self.get_logger().info(f'Loading KeyRe-ID weights from {weights_path}')
self._model = KeyRe_ID( self._model = KeyRe_ID(

View File

@@ -0,0 +1,352 @@
import math
from itertools import repeat
import torch
import torch.nn as nn
import torch.nn.functional as F
import collections.abc
# From PyTorch internals
def _ntuple(n):
def parse(x):
if isinstance(x, collections.abc.Iterable):
return x
return tuple(repeat(x, n))
return parse
IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406)
IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225)
to_2tuple = _ntuple(2)
def drop_path(x, drop_prob: float = 0., training: bool = False):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class PatchEmbed_overlap(nn.Module):
""" Image to Patch Embedding with overlapping patches"""
def __init__(self, img_size=224, patch_size=16, stride_size=20, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
stride_size_tuple = to_2tuple(stride_size)
self.num_x = (img_size[1] - patch_size[1]) // stride_size_tuple[1] + 1
self.num_y = (img_size[0] - patch_size[0]) // stride_size_tuple[0] + 1
print('using stride: {}, and patch number is num_y{} * num_x{}'.format(stride_size, self.num_y, self.num_x))
num_patches = self.num_x * self.num_y
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride_size)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.InstanceNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x)
x = x.flatten(2).transpose(1, 2) # [64, 8, 768]
return x
class TransReID(nn.Module):
""" Transformer-based Object Re-Identification"""
def __init__(self, img_size=224, patch_size=16, stride_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0., camera=0,
drop_path_rate=0., hybrid_backbone=None, norm_layer=nn.LayerNorm, cam_lambda =3.0):
super().__init__()
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.cam_num = camera
self.cam_lambda = cam_lambda
self.patch_embed = PatchEmbed_overlap(img_size=img_size, patch_size=patch_size, stride_size=stride_size, in_chans=in_chans,embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.Cam = nn.Parameter(torch.zeros(camera, 1, embed_dim))
trunc_normal_(self.Cam, std=.02)
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# Classifier head
self.fc = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.cls_token, std=.02)
trunc_normal_(self.pos_embed, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.fc = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x, camera_id):
B = x.shape[0]
x = self.patch_embed(x)
cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
x = x + self.pos_embed + self.cam_lambda * self.Cam[camera_id]
x = self.pos_drop(x)
for blk in self.blocks[:-1]:
x = blk(x)
return x
def forward(self, x, cam_label=None):
x = self.forward_features(x, cam_label)
return x
def load_param(self, model_path, load=False):
print("Run load_param")
if not load:
param_dict = torch.load(model_path, map_location='cpu', weights_only=False)
else:
param_dict = model_path
if 'model' in param_dict:
param_dict = param_dict['model']
if 'state_dict' in param_dict:
param_dict = param_dict['state_dict']
model_dict = self.state_dict()
new_param_dict = {}
for k, v in param_dict.items():
if 'head' in k or 'dist' in k:
continue
if k in ['Cam', 'base.Cam'] and k in model_dict:
expected_shape = model_dict[k].shape
if v.shape[0] > expected_shape[0]:
print(f"⚠️ Resizing '{k}' from {v.shape} to {expected_shape}")
v = v[:expected_shape[0], :, :]
elif v.shape[0] < expected_shape[0]:
print(f"⚠️ Expanding '{k}' from {v.shape} to {expected_shape}")
new_v = torch.randn(expected_shape)
new_v[:v.shape[0], :, :] = v
v = new_v
new_param_dict[k] = v
continue
if k in model_dict and model_dict[k].shape == v.shape:
new_param_dict[k] = v
model_dict.update(new_param_dict)
self.load_state_dict(model_dict, strict=False)
print("✅ Checkpoint loaded successfully.")
def resize_pos_embed(posemb, posemb_new, hight, width):
# Rescale the grid of position embeddings when loading from state_dict. Adapted from
# https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224
ntok_new = posemb_new.shape[1]
posemb_token, posemb_grid = posemb[:, :1], posemb[0, 1:]
ntok_new -= 1
gs_old = int(math.sqrt(len(posemb_grid)))
print('Resized position embedding from size:{} to size: {} with height:{} width: {}'.format(posemb.shape, posemb_new.shape, hight, width))
posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
posemb_grid = F.interpolate(posemb_grid, size=(hight, width), mode='bilinear')
posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, hight * width, -1)
posemb = torch.cat([posemb_token, posemb_grid], dim=1)
return posemb
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
print("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)