# 如何還原經過Normalize的PyTorch Tensor?

class UnNormalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
for t, m, s in zip(tensor, self.mean, self.std):
return tensor
#假設transforms.Normalize的mean和std長這樣：
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
mean=[0.485, 0.456, 0.406]
std=[0.229, 0.224, 0.225]
unorm = UnNormalize(mean=mean, std=std)#所得到的image就是被還原後的tensor
image = unorm(normalized_image)

# 如何根據PyTorch的Model預測的output繪製出混淆矩陣(Confusion Matrix)並取得每個class的accuracy？

y_pred = []
y_true = []
model.eval()

for i, (images, target) in enumerate(test_loader):
output = model(images)
_, preds = torch.max(output, 1) #preds是預測結果
loss = criterion(output, target)
y_pred.extend(preds.view(-1).d …

# 如何取得PyTorch模型中特定Layer的輸出？

1. register_forward_hook(CSDN)

import torch
import torch.nn as nn
import torch.nn.functional as F

class LeNet(nn.Module):
def __init__(self):
super(LeNet …

# tsne-cuda: 使用GPU加速的t-SNE

t-SNE-CUDA專案的詳細介紹

# https://github.com/CannyLab/tsne-cuda/wiki/Installation
conda install tsnecuda cuda100 -c cannylab

import numpy as np
from tsnecuda import TSNE
X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
X_embedded = TSNE().fit_transform(X)
X_embedded.shape

import numpy as np
from sklearn.manifold import TSNE

X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
X_embedded = TSNE(n_components=2).fit_transform(X)
X_embedded.shape

PyTorch實作

# Difference between DQN and Policy Gradient

DQN: we feed the state as an input to the network, and it returns the Q values of all possible actions in that state, then we select an action that has a maximum Q value.

Policy gradient: we feed the state as input to the network, and it
returns the probability distribution over an action space, and our stochastic policy uses the probability distribution returned by the neural network to select an action.

## Yanwei Liu

Machine Learning | Deep Learning | https://linktr.ee/yanwei

Get the Medium app