LoRA icon indicating copy to clipboard operation
LoRA copied to clipboard

How to adjust LoRA into nn.ConvTranspose2d?

Open vanmeruso opened this issue 1 year ago • 3 comments

How can i adjust LoRA into nn.ConvTranspose2d?

In Convnd, There are _conv_forward but, in ConvTransposeNd has no _conv_forward.

### Tasks

vanmeruso avatar Mar 05 '24 07:03 vanmeruso

In torch 1.10.0, I write some Convtranspose2d Lora code like

class ConvTransposeLoRA(nn.Module, lora.LoRALayer): def init(self, conv_module, in_channels, out_channels, kernel_size, r=0, lora_alpha=1, lora_dropout=0., merge_weights=True, **kwargs): super(ConvTransposeLoRA, self).init() self.conv = conv_module(in_channels, out_channels, kernel_size, **kwargs) lora.LoRALayer.init(self, r=r, lora_alpha=lora_alpha, lora_dropout=lora_dropout, merge_weights=merge_weights) assert isinstance(kernel_size, int) # Actual trainable parameters if r > 0: self.lora_A = nn.Parameter( self.conv.weight.new_zeros((r * kernel_size, in_channels * kernel_size)) ) self.lora_B = nn.Parameter( self.conv.weight.new_zeros((out_channels//self.conv.groupskernel_size, rkernel_size)) ) self.scaling = self.lora_alpha / self.r # Freezing the pre-trained weight matrix self.conv.weight.requires_grad = False self.reset_parameters() self.merged = False

def reset_parameters(self):
    self.conv.reset_parameters()
    if hasattr(self, 'lora_A'):
        # initialize A the same way as the default for nn.Linear and B to zero
        nn.init.kaiming_uniform_(self.lora_A, a=math.sqrt(5))
        nn.init.zeros_(self.lora_B)

def train(self, mode=True):
    super(lora.ConvLoRA, self).train(mode)
    if mode:
        if self.merge_weights and self.merged:
            if self.r > 0:
                # Make sure that the weights are not merged
                self.conv.weight.data -= (self.lora_B @ self.lora_A).view(self.conv.weight.shape) * self.scaling
            self.merged = False
    else:
        if self.merge_weights and not self.merged:
            if self.r > 0:
                # Merge the weights and mark it
                self.conv.weight.data += (self.lora_B @ self.lora_A).view(self.conv.weight.shape) * self.scaling
            self.merged = True

def forward(self, x, output_size = None):
    if self.r > 0 and not self.merged:
        num_spatial_dims = 2

        output_padding = self.conv._output_padding(
        input = x, output_size = output_size, stride = self.conv.stride, padding = self.conv.padding, kernel_size = self.conv.kernel_size, 
        dilation = self.conv.dilation)  

        return F.conv_transpose2d(
        x, self.conv.weight + (self.lora_B @ self.lora_A).view(self.conv.weight.shape) * self.scaling, self.conv.bias, self.conv.stride, self.conv.padding,output_padding, self.conv.groups, self.conv.dilation)

    return self.conv(x, output_size)

    

vanmeruso avatar Mar 05 '24 07:03 vanmeruso

I keep getting error "conv object has no attribute '_output_padding', do you know how I could solve this?

meeselizabeth avatar May 15 '24 23:05 meeselizabeth