Answering https://discuss.pytorch.org/t/a-set-of-data-sum-as-the-dividend-how-to-find-grad/182303:
We can find
Now let's concentrate on finding one element
| sudo rmmod nvidia_uvm | |
| sudo modprobe nvidia_uvm |
| curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add - | |
| distribution=$(. /etc/os-release;echo $ID$VERSION_ID) | |
| curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list | |
| sudo apt-get update | |
| sudo apt-get install -y nvidia-docker2 | |
| sudo pkill -SIGHUP dockerd |
Answering https://discuss.pytorch.org/t/a-set-of-data-sum-as-the-dividend-how-to-find-grad/182303:
We can find
Now let's concentrate on finding one element
| from turtle import forward | |
| import torch | |
| import torch.nn as nn, torch.nn.functional as F | |
| class CNN(nn.Module): | |
| def __init__(self) -> None: | |
| super().__init__() | |
| self.conv1 = nn.Conv2d(in_channels=1, out_channels=8, kernel_size=6, stride=1, padding=2) | |
| self.RL1 = nn.ReLU() |
| import torch, torch.nn as nn | |
| import torch.optim as optim | |
| def print_grads(modules, string): | |
| print(string) | |
| for mod in modules: | |
| for p in mod.parameters(): | |
| print(p.grad) | |
| print('**') | |
| print("-----") |
| command: | |
| -------- | |
| sudo useradd -m -d /home/<user> -s /bin/bash -c "<rollnumber>" -U <user> | |
| password: | |
| --------- | |
| sudo passwd <user> | |
| Add user to sudo | |
| ----------------- |
| import torch, torch.nn as nn | |
| import torch.optim as optim, torch.nn.functional as F | |
| class CustomLinearNoWeightDecay(nn.Module): | |
| def __init__(self, mask): | |
| super().__init__() | |
| self.register_buffer("mask", mask) | |
| out_channels, in_channels = mask.shape | |
| self.weight = nn.Parameter(torch.randn(out_channels, in_channels)) | |
| import torch, torch.nn as nn | |
| class LowlevelModule(nn.Module): | |
| def __init__(self, custom_val): | |
| super().__init__() | |
| self.custom_val = custom_val | |
| def print_custom_val(self): | |
| print(self.custom_val.item()) |
| import torchvision, copy | |
| import torch, torch.nn as nn | |
| def reset_all_weights(model: nn.Module) -> None: | |
| """ | |
| refs: | |
| - https://discuss.pytorch.org/t/how-to-re-set-alll-parameters-in-a-network/20819/6 | |
| - https://stackoverflow.com/questions/63627997/reset-parameters-of-a-neural-network-in-pytorch | |
| - https://pytorch.org/docs/stable/generated/torch.nn.Module.html | |
| """ |
| TORCH_CUDA_ARCH_LIST="5.2 6.0 6.1 7.0 7.5 8.0 8.6+PTX" python setup.py build | |
| python setup.py install | |
| D:\cmder_mini\Cmder.exe "%ActivDir%" |