-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathsome basics.py
executable file
·97 lines (81 loc) · 2.44 KB
/
some basics.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 8 09:35:52 2017
some basics in pytorch
@author: zengliang
"""
import torch
import torchvision
import torch.nn as nn
import numpy as np
import torch.utils.data as data
import torchvision.transforms as transforms
import torchvision.datasets as dsets
from torch.autograd import Variable
#---------------------------
x = torch.rand(5,3)
print x
print x.size() #torch.Size is in fact a tuple.
print x.t_() #Any operation that mutates a tensor in-place is post-fixed with an _
#torch.add(x, 1, out=x)
print x[:,1] #numpy-like slicing
#if torch.cuda.is_available(): #Tensors can be moved onto GPU using the .cuda function.
# x = x.cuda()
# y = y.cuda()
# x + y
#---------------------------------
#autograd: Variable and Function
#Each variable has a .grad_fn attribute that references a Function that has created the Variable
x = Variable(torch.ones(2,2), requires_grad=True)
print x.size()
y = x+1
print y.grad_fn
x = torch.rand(3)
x = Variable(x, requires_grad=True)
y = x*2
gradients = torch.FloatTensor([0.1,1,0.001])
y.backward(gradients)
print x.grad #mutiply different scale of gradients
x = Variable(torch.Tensor([1]), requires_grad=True)
w = Variable(torch.Tensor([2]), requires_grad=True)
b = Variable(torch.Tensor([3]), requires_grad=True)
y = w*x+b
y.backward()
print x.grad, w.grad, b.grad
#-------------------------------
x = Variable(torch.randn(5,3))
y = Variable(torch.randn(5,2))
linear = nn.Linear(3,2,bias=True)
print 'w: ',linear.weight
print 'b: ',linear.bias
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(linear.parameters(),lr=0.01)
pred = linear(x)
loss = criterion(pred,y)
print 'loss: ',loss.data[0]
loss.backward()
print 'dl/dw: ',linear.weight.grad
print 'dl/db: ',linear.bias.grad
optimizer.step()
#linear.weight.data.sub_(0.01*linear.weight.grad.data)
#linear.bias.data.sub_(0.01*linear.bias.grad.data)
pred = linear(x)
loss = criterion(pred,y)
print 'loss after optimization',loss
#-------------------------------------
a = np.array([[1,2],[3,4]])
b = torch.from_numpy(a) #numpy to torch tensor
c = b.numpy() #torch tensor to numpy
print b
print c
#---------------------------------
dtype = torch.FloatTensor
w1 = torch.randn(D_in, H).type(dtype)
#---------------
#get value form cuda value
#a.data # equals a.data.cpu().numpy()
#-----------------------------
#eval()
#Sets the module in evaluation mode.
#This has any effect only on modules such as Dropout or BatchNorm.