Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import torch
- import torch.nn as nn
- import torch.utils.data as data
- from torchtnt.utils.data import CudaDataPrefetcher
- from torchvision import datasets as imageDatasets, transforms as imageTransforms
- from ptflops import get_model_complexity_info
- from sklearn.model_selection import train_test_split
- import numpy as np
- from sklearn.metrics import precision_score, recall_score, f1_score, confusion_matrix, ConfusionMatrixDisplay
- import matplotlib
- import matplotlib.pyplot as plt
- import os
- from pathlib import Path
- import time
- import signal
- from enum import Enum
- charset_file_path:Path = Path(__file__).parent / "problem1CharSet.txt"
- dataset:str = charset_file_path.read_text(encoding="utf-8")
- chars = sorted(list(set(dataset)))
- ix_to_char = {i: ch for i, ch in enumerate(chars)}
- char_to_ix = {ch: i for i, ch in enumerate(chars)}
- DROPOUT_PROB:float = 0.0
- SEQUENCE_LENGTH:int = 10
- NUM_CHARS:int = len(chars)
- class LSTMNet(nn.Module):
- class lstmIndices(Enum):
- OUTPUT = 0
- def __init__(self):
- super().__init__()
- self.charEmbeddingLayer:nn.Embedding = nn.Embedding(num_embeddings=NUM_CHARS, embedding_dim=128)
- self.lstm:nn.LSTM = nn.LSTM(input_size=128, hidden_size=128,
- batch_first=True, dropout=DROPOUT_PROB)
- self.outputDenseLayer = nn.Sequential(
- nn.Linear(in_features=128, out_features=NUM_CHARS),
- )
- def forward(self, x):
- # x = self.charEmbeddingLayer(x)
- # x = self.lstm(x)[self.lstmIndices.OUTPUT][:, -1, :]
- # return self.outputDenseLayer(x)
- return self.outputDenseLayer(
- self.lstm(
- self.charEmbeddingLayer(x)
- )[self.lstmIndices.OUTPUT.value][:, -1, :] # next predicted character output at end of sequence
- )
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement