A sample for Tensorial Recurrent Neural NetworkΒΆ

By replacing input-to-hidden layer of a RNN with tensor cores, tensorial RNN is constructed.

Here is an tensor ring example to use a TR-based model with tednet.

[1]:
from managpu import GpuManager
my_gpu = GpuManager()
my_gpu.set_by_memory(1)

import random
from collections import namedtuple

import tednet as tdt
import tednet.tnn.tensor_ring as tr

import numpy as np

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

from torchvision import datasets, transforms
No GPU Util Limit!
Sorted by memory:
    GPU Index: 2       GPU FreeMemory: 11176 MB       GPU Util: 0%
    GPU Index: 4       GPU FreeMemory: 11176 MB       GPU Util: 0%
    GPU Index: 1       GPU FreeMemory: 10129 MB       GPU Util: 0%
    GPU Index: 0       GPU FreeMemory: 6133 MB        GPU Util: 37%
    GPU Index: 3       GPU FreeMemory: 1109 MB        GPU Util: 94%
    GPU Index: 5       GPU FreeMemory: 1109 MB        GPU Util: 100%
    GPU Index: 6       GPU FreeMemory: 1109 MB        GPU Util: 100%
    GPU Index: 7       GPU FreeMemory: 1109 MB        GPU Util: 95%
Qualified GPU Index is: [2]

Set basic environment

[2]:
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
seed = 233
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if use_cuda:
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.benchmark = True
    torch.backends.cudnn.deterministic = True

LSTMState = namedtuple('LSTMState', ['hx', 'cx'])
Input_Size = np.prod([28, 28])
Hidden_Size = 256

Set dataloader

[3]:
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(
    datasets.MNIST('./data', train=True, download=True,
                   transform=transforms.Compose([
                       transforms.ToTensor(),
                       transforms.Normalize((0.1307,), (0.3081,))
                   ])),
    batch_size=128, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
    datasets.MNIST('./data', train=False, transform=transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))
    ])),
    batch_size=256, shuffle=True, **kwargs)

Set TR-LSTM Classifier

[4]:
class ClassifierTR(nn.Module):
    def __init__(self, num_class=10):
        super(ClassifierTR, self).__init__()
        in_shape = [28, 28]
        hidden_shape = [16, 16]

        self.hidden_size = Hidden_Size

        self.lstm = tr.TRLSTM(in_shape, hidden_shape, [5, 5, 5, 5])
        self.fc = nn.Linear(self.hidden_size, num_class)

    def forward(self, x, state):
        input_shape = x.shape
        batch_size = input_shape[0]
        seq_size = input_shape[1]
        x = x.view(batch_size, seq_size, -1)
        x = x.permute(1, 0, 2)
        _, x = self.lstm(x, state)
        x = self.fc(x[0])
        return x

Set training and testing process

[5]:
def train(model, device, train_loader, optimizer, epoch, log_interval=200):
    model.train()
    for batch_idx, (data, target) in enumerate(train_loader):
        data, target = data.to(device), target.to(device)
        optimizer.zero_grad()

        batch_size = data.shape[0]
        state = LSTMState(torch.zeros(batch_size, Hidden_Size, device=device),
                              torch.zeros(batch_size, Hidden_Size, device=device))
        output = model(data, state)

        loss = F.cross_entropy(output, target)
        loss.backward()
        optimizer.step()
        if batch_idx % log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                       100. * batch_idx / len(train_loader), loss.item()))


def test(model, device, test_loader):
    model.eval()
    test_loss = 0
    correct = 0
    with torch.no_grad():
        for data, target in test_loader:
            data, target = data.to(device), target.to(device)

            batch_size = data.shape[0]
            state = LSTMState(torch.zeros(batch_size, Hidden_Size, device=device),
                              torch.zeros(batch_size, Hidden_Size, device=device))
            output = model(data, state)

            test_loss += F.cross_entropy(output, target, reduction='sum').item()  # sum up batch loss
            pred = output.argmax(dim=1, keepdim=True)  # get the index of the max log-probability
            correct += pred.eq(target.view_as(pred)).sum().item()

    test_loss /= len(test_loader.dataset)

    print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        test_loss, correct, len(test_loader.dataset),
        100. * correct / len(test_loader.dataset)))

Begin training

[6]:
# Define a TR-LSTM
model = ClassifierTR()
model.to(device)
optimizer = optim.Adam(model.parameters(), lr=2e-4, weight_decay=0.00016667)

for epoch in range(20):
    train(model, device, train_loader, optimizer, epoch)
    test(model, device, test_loader)
compression_ration is:  236.12235294117647
Train Epoch: 0 [0/60000 (0%)]   Loss: 2.271237
Train Epoch: 0 [25600/60000 (43%)]      Loss: 2.037606
Train Epoch: 0 [51200/60000 (85%)]      Loss: 1.804040

Test set: Average loss: 1.5393, Accuracy: 5888/10000 (59%)

Train Epoch: 1 [0/60000 (0%)]   Loss: 1.675199
Train Epoch: 1 [25600/60000 (43%)]      Loss: 1.413317
Train Epoch: 1 [51200/60000 (85%)]      Loss: 1.376522

Test set: Average loss: 1.0163, Accuracy: 6931/10000 (69%)

Train Epoch: 2 [0/60000 (0%)]   Loss: 1.204728
Train Epoch: 2 [25600/60000 (43%)]      Loss: 1.068120
Train Epoch: 2 [51200/60000 (85%)]      Loss: 1.048317

Test set: Average loss: 0.7734, Accuracy: 7470/10000 (75%)

Train Epoch: 3 [0/60000 (0%)]   Loss: 0.902623
Train Epoch: 3 [25600/60000 (43%)]      Loss: 0.709798
Train Epoch: 3 [51200/60000 (85%)]      Loss: 0.772015

Test set: Average loss: 0.6653, Accuracy: 7714/10000 (77%)

Train Epoch: 4 [0/60000 (0%)]   Loss: 0.793773
Train Epoch: 4 [25600/60000 (43%)]      Loss: 0.747470
Train Epoch: 4 [51200/60000 (85%)]      Loss: 0.739394

Test set: Average loss: 0.5988, Accuracy: 8006/10000 (80%)

Train Epoch: 5 [0/60000 (0%)]   Loss: 0.711895
Train Epoch: 5 [25600/60000 (43%)]      Loss: 0.610803
Train Epoch: 5 [51200/60000 (85%)]      Loss: 0.705731

Test set: Average loss: 0.5535, Accuracy: 8195/10000 (82%)

Train Epoch: 6 [0/60000 (0%)]   Loss: 0.803615
Train Epoch: 6 [25600/60000 (43%)]      Loss: 0.608962
Train Epoch: 6 [51200/60000 (85%)]      Loss: 0.600730

Test set: Average loss: 0.5210, Accuracy: 8317/10000 (83%)

Train Epoch: 7 [0/60000 (0%)]   Loss: 0.507197
Train Epoch: 7 [25600/60000 (43%)]      Loss: 0.634771
Train Epoch: 7 [51200/60000 (85%)]      Loss: 0.603676

Test set: Average loss: 0.4965, Accuracy: 8445/10000 (84%)

Train Epoch: 8 [0/60000 (0%)]   Loss: 0.553993
Train Epoch: 8 [25600/60000 (43%)]      Loss: 0.539877
Train Epoch: 8 [51200/60000 (85%)]      Loss: 0.589516

Test set: Average loss: 0.4719, Accuracy: 8535/10000 (85%)

Train Epoch: 9 [0/60000 (0%)]   Loss: 0.575935
Train Epoch: 9 [25600/60000 (43%)]      Loss: 0.494978
Train Epoch: 9 [51200/60000 (85%)]      Loss: 0.600699

Test set: Average loss: 0.4522, Accuracy: 8601/10000 (86%)

Train Epoch: 10 [0/60000 (0%)]  Loss: 0.425709
Train Epoch: 10 [25600/60000 (43%)]     Loss: 0.439076
Train Epoch: 10 [51200/60000 (85%)]     Loss: 0.427697

Test set: Average loss: 0.4368, Accuracy: 8677/10000 (87%)

Train Epoch: 11 [0/60000 (0%)]  Loss: 0.512469
Train Epoch: 11 [25600/60000 (43%)]     Loss: 0.499898
Train Epoch: 11 [51200/60000 (85%)]     Loss: 0.412309

Test set: Average loss: 0.4227, Accuracy: 8710/10000 (87%)

Train Epoch: 12 [0/60000 (0%)]  Loss: 0.555337
Train Epoch: 12 [25600/60000 (43%)]     Loss: 0.330346
Train Epoch: 12 [51200/60000 (85%)]     Loss: 0.340294

Test set: Average loss: 0.4089, Accuracy: 8746/10000 (87%)

Train Epoch: 13 [0/60000 (0%)]  Loss: 0.419118
Train Epoch: 13 [25600/60000 (43%)]     Loss: 0.335568
Train Epoch: 13 [51200/60000 (85%)]     Loss: 0.328040

Test set: Average loss: 0.3973, Accuracy: 8792/10000 (88%)

Train Epoch: 14 [0/60000 (0%)]  Loss: 0.384958
Train Epoch: 14 [25600/60000 (43%)]     Loss: 0.436771
Train Epoch: 14 [51200/60000 (85%)]     Loss: 0.440793

Test set: Average loss: 0.3865, Accuracy: 8819/10000 (88%)

Train Epoch: 15 [0/60000 (0%)]  Loss: 0.483415
Train Epoch: 15 [25600/60000 (43%)]     Loss: 0.395679
Train Epoch: 15 [51200/60000 (85%)]     Loss: 0.482825

Test set: Average loss: 0.3761, Accuracy: 8861/10000 (89%)

Train Epoch: 16 [0/60000 (0%)]  Loss: 0.436840
Train Epoch: 16 [25600/60000 (43%)]     Loss: 0.339861
Train Epoch: 16 [51200/60000 (85%)]     Loss: 0.366399

Test set: Average loss: 0.3689, Accuracy: 8894/10000 (89%)

Train Epoch: 17 [0/60000 (0%)]  Loss: 0.442870
Train Epoch: 17 [25600/60000 (43%)]     Loss: 0.370757
Train Epoch: 17 [51200/60000 (85%)]     Loss: 0.403360

Test set: Average loss: 0.3585, Accuracy: 8924/10000 (89%)

Train Epoch: 18 [0/60000 (0%)]  Loss: 0.346232
Train Epoch: 18 [25600/60000 (43%)]     Loss: 0.452554
Train Epoch: 18 [51200/60000 (85%)]     Loss: 0.318595

Test set: Average loss: 0.3496, Accuracy: 8960/10000 (90%)

Train Epoch: 19 [0/60000 (0%)]  Loss: 0.272001
Train Epoch: 19 [25600/60000 (43%)]     Loss: 0.430083
Train Epoch: 19 [51200/60000 (85%)]     Loss: 0.446394

Test set: Average loss: 0.3433, Accuracy: 8976/10000 (90%)