import numpy as np
import torch
t = np.array([0., 1., 2., 3., 4., 5., 6.])
print(t)
[0. 1. 2. 3. 4. 5. 6.]
print('Rank of t: ', t.ndim) # dimension 차원
print('Shape of t: ', t.shape) # shape = number of element
Rank of t: 1 Shape of t: (7,)
print('t[0] t[1], t[-1] = ', t[0], t[1], t[-1]) # Element
print('t[2:5] t[4:-1] = ', t[2:5], t[4:-1]) # Slicing
print('t[:2] t[3:] = ', t[:2], t[3:]) # Slicing
t[0] t[1], t[-1] = 0.0 1.0 6.0 t[2:5] t[4:-1] = [2. 3. 4.] [4. 5.] t[:2] t[3:] = [0. 1.] [3. 4. 5. 6.]
t = np.array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.], [10., 11., 12.]])
print(t)
[[ 1. 2. 3.] [ 4. 5. 6.] [ 7. 8. 9.] [10. 11. 12.]]
print('Rank of t: ', t.ndim)
print('Shape of t: ', t.shape) # 4 by 3 의 element
Rank of t: 2 Shape of t: (4, 3)
t = torch.FloatTensor([0., 1., 2., 3., 4., 5., 6.])
print(t)
tensor([0., 1., 2., 3., 4., 5., 6.])
print(t.dim()) # rank = dimension
print(t.shape) # shape
print(t.size()) # shape
print('t[0] t[1], t[-1] = ', t[0], t[1], t[-1]) # Element
print('t[2:5] t[4:-1] = ', t[2:5], t[4:-1]) # Slicing
print('t[:2] t[3:] = ', t[:2], t[3:]) # Slicing
1 torch.Size([7]) torch.Size([7]) t[0] t[1], t[-1] = tensor(0.) tensor(1.) tensor(6.) t[2:5] t[4:-1] = tensor([2., 3., 4.]) tensor([4., 5.]) t[:2] t[3:] = tensor([0., 1.]) tensor([3., 4., 5., 6.])
t = torch.FloatTensor([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.], [10., 11., 12.]])
print(t)
tensor([[ 1., 2., 3.], [ 4., 5., 6.], [ 7., 8., 9.], [10., 11., 12.]])
print(t.dim()) # rank = dimension
print(t.shape) # shape
print(t.size()) # shape
print(t[:, 1])
print(t[:, 1].size())
print(t[:, :-1])
2 torch.Size([4, 3]) torch.Size([4, 3]) tensor([ 2., 5., 8., 11.]) torch.Size([4]) tensor([[ 1., 2.], [ 4., 5.], [ 7., 8.], [10., 11.]])
# Same shape
m1 = torch.FloatTensor([3, 3])
m2 = torch.FloatTensor([2, 2])
print(m1 + m2)
tensor([5., 5.])
# Vector + scalar
m1 = torch.FloatTensor([[1, 2]])
m2 = torch.FloatTensor([3]) # 3 -> [[3, 3]] 연산을 할 수 있도록 자동변환
print(m1 + m2)
tensor([[4., 5.]])
# 2 x 1 Vector + 1 x 2 Vector
m1 = torch.FloatTensor([[1, 2]])
m2 = torch.FloatTensor([[3], [4]])
print(m1 + m2)
tensor([[4., 5.], [5., 6.]])
print()
print('-------------------')
print('Mul vs Matmul')
print('-------------------')
m1 = torch.FloatTensor([[1, 2], [3, 4]]) # 2 x 2
m2 = torch.FloatTensor([[1], [2]]) # 2 x 1
print(m1)
print('Shape of Matrix1 :', m1.shape)
print(m2)
print('Shape of Matrix2 :', m2.shape)
print('--Matmul--')
print(m1.matmul(m2)) # 2 x 1
print('--Element Mul--')
print(m1 * m2) # 2 x 2 (broadcasting)
print('--Mul--')
print(m1.mul(m2))
------------------- Mul vs Matmul ------------------- tensor([[1., 2.], [3., 4.]]) Shape of Matrix1 : torch.Size([2, 2]) tensor([[1.], [2.]]) Shape of Matrix2 : torch.Size([2, 1]) --Matmul-- tensor([[ 5.], [11.]]) --Element Mul-- tensor([[1., 2.], [6., 8.]]) --Mul-- tensor([[1., 2.], [6., 8.]])
t = torch.FloatTensor([1, 2])
print(t.mean())
tensor(1.5000)
# Can't use mean() on integers
t = torch.LongTensor([1, 2])
try:
print(t.mean())
except Exception as exc:
print(exc)
mean(): input dtype should be either floating point or complex dtypes. Got Long instead.
t = torch.FloatTensor([[1, 2], [3, 4]])
print(t)
tensor([[1., 2.], [3., 4.]])
print(t.mean())
print(t.mean(dim=0))
print(t.mean(dim=1))
print(t.mean(dim=-1))
tensor(2.5000) tensor([2., 3.]) tensor([1.5000, 3.5000]) tensor([1.5000, 3.5000])
t = torch.FloatTensor([[1, 2], [3, 4]])
print(t)
tensor([[1., 2.], [3., 4.]])
print(t.sum())
print(t.sum(dim=0))
print(t.sum(dim=1))
print(t.sum(dim=-1))
tensor(10.) tensor([4., 6.]) tensor([3., 7.]) tensor([3., 7.])
t = torch.FloatTensor([[1, 2], [3, 4]])
print(t)
tensor([[1., 2.], [3., 4.]])
print(t.max())
tensor(4.)
print(t.max(dim=0))
torch.return_types.max( values=tensor([3., 4.]), indices=tensor([1, 1]))
print(t.max(dim=1))
print(t.max(dim=-1))
torch.return_types.max( values=tensor([2., 4.]), indices=tensor([1, 1])) torch.return_types.max( values=tensor([2., 4.]), indices=tensor([1, 1]))