PyTorch-VeLO icon indicating copy to clipboard operation
PyTorch-VeLO copied to clipboard

Whitening the Gradient before Velo

Open opooladz opened this issue 1 year ago • 3 comments

For the life of me I cannot get velo working using pytorch or jax. I am curious what happens if we whiten the gradient before giving it to VeLo. I was wondering if you might be interested to compare.

def zeropower_via_newtonschulz5(G, steps=10, eps=1e-7):
    """
    Newton-Schulz iteration to compute the zeroth power / orthogonalization of G. We opt to use a
    quintic iteration whose coefficients are selected to maximize the slope at zero. For the purpose
    of minimizing steps, it turns out to be empirically effective to keep increasing the slope at
    zero even beyond the point where the iteration no longer converges all the way to one everywhere
    on the interval. This iteration therefore does not produce UV^T but rather something like US'V^T
    where S' is diagonal with S_{ii}' ~ Uniform(0.5, 1.5), which turns out not to hurt model
    performance at all relative to UV^T, where USV^T = G is the SVD.
    """
    assert len(G.shape) == 2
    a, b, c = (3.4445, -4.7750,  2.0315)
    X = G.bfloat16()
    X /= (X.norm() + eps) # ensure top singular value <= 1
    if G.size(0) > G.size(1):
        X = X.T
    for _ in range(steps):
        A = X @ X.T
        B = b * A + c * A @ A
        X = a * X + B @ X
    if G.size(0) > G.size(1):
        X = X.T
    return X

    @th.no_grad()
    def step(self, closure: LossClosure) -> Union[th.Tensor, float, None]:
        with th.enable_grad():
            closure_result = closure()
            if isinstance(closure_result, tuple):
                assert len(closure_result) == 2
                loss, model_state = closure_result
            elif isinstance(closure_result, th.Tensor):
                loss = closure_result
                assert loss.numel() == 1
                model_state = None
            else:
                raise TypeError('closure returned unexpected type: ' + str(type(closure_result)))

        # Collect all gradients into a single matrix
        all_grads = []
        for group in self.param_groups:
            for p in group['params']:
                if p.grad is not None:
                    all_grads.append(p.grad.view(-1))
        
        # Stack all gradients into a matrix
        combined_grads = th.cat(all_grads)
        n = combined_grads.shape[0]
        grad_matrix = combined_grads.view(n, 1)  # Make it 2D for zeropower
        
        # Apply zeropower
        processed_grads = zeropower_via_newtonschulz5(grad_matrix)
        
        # Reshape and redistribute the processed gradients
        processed_grads = processed_grads.view(-1)
        start_idx = 0
        for group in self.param_groups:
            for p in group['params']:
                if p.grad is not None:
                    numel = p.grad.numel()
                    p.grad.copy_(processed_grads[start_idx:start_idx + numel].view_as(p.grad))
                    start_idx += numel

        # Continue with normal VeLO update
        jax_grad = {
            str(i): [_th_to_jax(p.grad.ravel()) for p in group['params']]
            for (i, group) in enumerate(self.param_groups)
        }
        jax_model_state = (
            _th_to_jax(model_state.ravel())
            if model_state is not None
            else model_state
        )

        self.state['rng_key'], opt_key = jax.random.split(self.state['rng_key'])
        self.state['opt_state'] = self.opt.update(
            self.state['opt_state'],
            jax_grad,
            model_state=jax_model_state,
            loss=_th_to_jax(loss),
            key=opt_key,
        )

        for (i, group) in enumerate(self.param_groups):
            for (param, jax_param) in zip(
                    group['params'],
                    self.opt.get_params(self.state['opt_state'])[str(i)],
            ):
                param.data[:] = _jax_to_th(jax_param).reshape(param.shape)
        return loss
        ```
        
        

opooladz avatar Nov 29 '24 06:11 opooladz

Hey! Love that the Newton-Schulz iterations are getting more attention! Sadly, this code base has probably become vastly out-of-date with developments on the JAX side of VeLO. From my side, the code is just way too slow, which is why I ended up not using it in practice. I believe the only way to properly speed it up would be to convert the JAX model to PyTorch.

I'll try to remember to run some tests with the recent PyTorch version next week. Feel free to ping again if I don't reply again on Wednesday. :)

janEbert avatar Nov 29 '24 14:11 janEbert

For the life of me I cannot get velo working using pytorch or jax. I am curious what happens if we whiten the gradient before giving it to VeLo. I was wondering if you might be interested to compare.

def zeropower_via_newtonschulz5(G, steps=10, eps=1e-7): """ Newton-Schulz iteration to compute the zeroth power / orthogonalization of G. We opt to use a quintic iteration whose coefficients are selected to maximize the slope at zero. For the purpose of minimizing steps, it turns out to be empirically effective to keep increasing the slope at zero even beyond the point where the iteration no longer converges all the way to one everywhere on the interval. This iteration therefore does not produce UV^T but rather something like US'V^T where S' is diagonal with S_{ii}' ~ Uniform(0.5, 1.5), which turns out not to hurt model performance at all relative to UV^T, where USV^T = G is the SVD. """ assert len(G.shape) == 2 a, b, c = (3.4445, -4.7750, 2.0315) X = G.bfloat16() X /= (X.norm() + eps) # ensure top singular value <= 1 if G.size(0) > G.size(1): X = X.T for _ in range(steps): A = X @ X.T B = b * A + c * A @ A X = a * X + B @ X if G.size(0) > G.size(1): X = X.T return X

@th.no_grad()
def step(self, closure: LossClosure) -> Union[th.Tensor, float, None]:
    with th.enable_grad():
        closure_result = closure()
        if isinstance(closure_result, tuple):
            assert len(closure_result) == 2
            loss, model_state = closure_result
        elif isinstance(closure_result, th.Tensor):
            loss = closure_result
            assert loss.numel() == 1
            model_state = None
        else:
            raise TypeError('closure returned unexpected type: ' + str(type(closure_result)))

    # Collect all gradients into a single matrix
    all_grads = []
    for group in self.param_groups:
        for p in group['params']:
            if p.grad is not None:
                all_grads.append(p.grad.view(-1))
    
    # Stack all gradients into a matrix
    combined_grads = th.cat(all_grads)
    n = combined_grads.shape[0]
    grad_matrix = combined_grads.view(n, 1)  # Make it 2D for zeropower
    
    # Apply zeropower
    processed_grads = zeropower_via_newtonschulz5(grad_matrix)
    
    # Reshape and redistribute the processed gradients
    processed_grads = processed_grads.view(-1)
    start_idx = 0
    for group in self.param_groups:
        for p in group['params']:
            if p.grad is not None:
                numel = p.grad.numel()
                p.grad.copy_(processed_grads[start_idx:start_idx + numel].view_as(p.grad))
                start_idx += numel

    # Continue with normal VeLO update
    jax_grad = {
        str(i): [_th_to_jax(p.grad.ravel()) for p in group['params']]
        for (i, group) in enumerate(self.param_groups)
    }
    jax_model_state = (
        _th_to_jax(model_state.ravel())
        if model_state is not None
        else model_state
    )

    self.state['rng_key'], opt_key = jax.random.split(self.state['rng_key'])
    self.state['opt_state'] = self.opt.update(
        self.state['opt_state'],
        jax_grad,
        model_state=jax_model_state,
        loss=_th_to_jax(loss),
        key=opt_key,
    )

    for (i, group) in enumerate(self.param_groups):
        for (param, jax_param) in zip(
                group['params'],
                self.opt.get_params(self.state['opt_state'])[str(i)],
        ):
            param.data[:] = _jax_to_th(jax_param).reshape(param.shape)
    return loss
    ```

Oh, is this optimizer similar to Muon?

matrix-net avatar Jul 12 '25 12:07 matrix-net

Nope zero relation.

opooladz avatar Jul 12 '25 12:07 opooladz