the nmr sequence wavelet coefficients are input as cnn-resnet, and the corresponding nmr sequence wavelet coefficients are used as cnn-resnet18 output for denoising in python

To use the wavelet coefficients as input for a ResNet-18 model in Python for denoising, you can follow these steps:

  1. Load the NMR sequence wavelet coefficients data as input dataset.
main.py
import numpy as np

X = np.load('wavelet_coefficients.npy')
60 chars
4 lines
  1. Load the corresponding NMR sequence wavelet coefficients data as output dataset.
main.py
Y = np.load('clean_wavelet_coefficients.npy')
46 chars
2 lines
  1. Split the dataset into training and testing sets.
main.py
from sklearn.model_selection import train_test_split

X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=42)
144 chars
4 lines
  1. Preprocess the data by scaling it.
main.py
from sklearn.preprocessing import StandardScaler

scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
150 chars
6 lines
  1. Define the ResNet-18 model.
main.py
import torch
import torch.nn as nn
import torch.optim as optim

class ResNet18(nn.Module):
    def __init__(self):
        super().__init__()
        self.conv1 = nn.Conv1d(in_channels=1, out_channels=64, kernel_size=3, padding=1, bias=False)
        self.bn1 = nn.BatchNorm1d(64)
        self.relu = nn.ReLU(inplace=True)
        self.layer1 = nn.Sequential(
            nn.Conv1d(64, 64, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm1d(64),
            nn.ReLU(inplace=True),
            nn.Conv1d(64, 64, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm1d(64)
        )
        self.layer2 = nn.Sequential(
            nn.Conv1d(64, 128, kernel_size=3, stride=2, padding=1, bias=False),
            nn.BatchNorm1d(128),
            nn.ReLU(inplace=True),
            nn.Conv1d(128, 128, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm1d(128),
        )
        self.layer3 = nn.Sequential(
            nn.Conv1d(128, 256, kernel_size=3, stride=2, padding=1, bias=False),
            nn.BatchNorm1d(256),
            nn.ReLU(inplace=True),
            nn.Conv1d(256, 256, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm1d(256),
        )
        self.layer4 = nn.Sequential(
            nn.Conv1d(256, 512, kernel_size=3, stride=2, padding=1, bias=False),
            nn.BatchNorm1d(512),
            nn.ReLU(inplace=True),
            nn.Conv1d(512, 512, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm1d(512),
        )
        self.avgpool = nn.AdaptiveAvgPool1d(1)
        self.fc = nn.Linear(512, 1)

    def forward(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = self.layer1(x) + x
        x = self.layer2(x) + x
        x = self.layer3(x) + x
        x = self.layer4(x) + x
        x = self.avgpool(x)
        x = x.view(x.size(0), -1)
        x = self.fc(x)
        return x
1966 chars
54 lines
  1. Define the loss function and optimizer.
main.py
criterion = nn.MSELoss()
optimizer = optim.Adam(resnet.parameters(), lr=0.001)
79 chars
3 lines
  1. Train the model.
main.py
n_epochs = 100

for epoch in range(1, n_epochs+1):
    train_loss = 0.0
    resnet.train()
    for data, target in zip(X_train, Y_train):
        data = torch.from_numpy(data).float().view(1, 1, -1)
        target = torch.from_numpy(target).float().view(1, 1, -1)
        optimizer.zero_grad()
        output = resnet(data)
        loss = criterion(output, target)
        loss.backward()
        optimizer.step()
        train_loss += loss.item()*data.size(0)
    train_loss /= len(X_train)
    print(f'Epoch: {epoch}, Training Loss: {train_loss:.4f}')
554 chars
17 lines
  1. Test the model.
main.py
resnet.eval()
test_loss = 0.0
with torch.no_grad():
    for data, target in zip(X_test, Y_test):
        data = torch.from_numpy(data).float().view(1, 1, -1)
        target = torch.from_numpy(target).float().view(1, 1, -1)
        output = resnet(data)
        loss = criterion(output, target)
        test_loss += loss.item()*data.size(0)
    test_loss /= len(X_test)
print(f'Test Loss: {test_loss:.4f}')
406 chars
12 lines
  1. Use the model to denoise new data.
main.py
def denoise(data):
    data = scaler.transform(data)
    resnet.eval()
    with torch.no_grad():
        data = torch.from_numpy(data).float().view(1, 1, -1)
        output = resnet(data)
    return output.numpy().squeeze()
224 chars
8 lines

gistlibby LogSnag