本篇對tensor的一些基礎屬性進行展示
使用方法和含義均在程式碼的批註中給出,因爲有較多的輸出,所以設定輸出內容的第一個值爲當前print()方法所在的行
import torch
import numpy as np
import sys
loc = sys._getframe()
'''常用數據型別'''
a = torch.randn(2, 3)
# 輸出數據型別
print(loc.f_lineno, '\n', a.type())
print(loc.f_lineno, '\n', type(a))
# 參數型別檢驗
print(loc.f_lineno, '\n', isinstance(a, torch.FloatTensor))
# 通過cuda使用GPU
print(loc.f_lineno, '\n', isinstance(a, torch.cuda.DoubleTensor))
'''標量,維度爲0'''
b = torch.tensor(1)
print(loc.f_lineno, '\n', b, '\n', b.type(), '\n', b.shape, '\n', b.size(), '\n', len(b.shape))
b = torch.tensor(1.)
print(loc.f_lineno, '\n', b, '\n', b.type(), '\n', b.shape, '\n', b.size())
'''一維向量,長度爲1的一維向量可以用來表示標量'''
c = torch.tensor([1.1])
print(loc.f_lineno, '\n', c, '\n', c.type(), '\n', c.shape, '\n', c.size(), '\n', len(c.shape))
d = torch.FloatTensor(3)
print(loc.f_lineno, '\n', d, '\n', d.type(), '\n', d.shape, '\n', d.size(), '\n', len(d.shape))
# 將張量從numpy引入torch,此時數據型別與上一個不同
dd = np.ones(3)
d = torch.from_numpy(dd)
print(loc.f_lineno, '\n', d, '\n', d.type(), '\n', d.shape, '\n', d.size(), '\n', len(d.shape))
# 這裏d指代具體的張量,shape和Size()函數指代張量的長度,len()和dim()函數指代維度
print(loc.f_lineno, '\n', d, '\n', d.type(), '\n', d.shape, d.size(), '\n', len(d.shape), '\n', d.dim())
'''二維張量的表示方法'''
e = torch.rand(2, 3)
print(loc.f_lineno, '\n', e, '\n', e.size(), '\n', e.size(0), '\n', e.size(1), '\n', e.shape, '\n', e.shape[0], '\n', e.shape[1], '\n', e.dim())
'''三維張量的表示'''
# 第一位表示個數,第二三位表示矩陣;多用於自然語言或文字資訊處理
f = torch.rand(2, 3, 3)
print(loc.f_lineno, '\n', f, '\n', f.size(), '\n', f.size(0), '\n', f.size(1), '\n', f.size(2), '\n', f.dim())
'''三維張量的表示'''
# 多用於影象處理,第一位是圖片個數,第二位是通道數:1表示灰度圖,2表示二值圖,3表示彩色圖
f = torch.rand(2, 1, 6, 6)
print(loc.f_lineno, '\n', f, '\n', f.size(), '\n', f.size(0), '\n', f.size(1), '\n', f.size(2), '\n', f.size(3), '\n', f.dim())
'''其他常用方法'''
g = torch.rand(3, 2, 8, 8)
# numel(),表示佔用的記憶體數量,number of element,就是將各個維度相乘
# len(g)返回第一層的個數,len(g.shape)返回層數(維度)== g.dim()
print(loc.f_lineno, '\n', g.numel(), '\n', len(g), '\n', len(g.shape))
9
torch.FloatTensor
10
<class 'torch.Tensor'>
13
True
16
False
21
tensor(1)
torch.LongTensor
torch.Size([])
torch.Size([])
0
24
tensor(1.)
torch.FloatTensor
torch.Size([])
torch.Size([])
29
tensor([1.1000])
torch.FloatTensor
torch.Size([1])
torch.Size([1])
1
32
tensor([0., 0., 0.])
torch.FloatTensor
torch.Size([3])
torch.Size([3])
1
37
tensor([1., 1., 1.], dtype=torch.float64)
torch.DoubleTensor
torch.Size([3])
torch.Size([3])
1
40
tensor([1., 1., 1.], dtype=torch.float64)
torch.DoubleTensor
torch.Size([3]) torch.Size([3])
1
1
45
tensor([[0.3977, 0.7449, 0.4745],
[0.9286, 0.4098, 0.0678]])
torch.Size([2, 3])
2
3
torch.Size([2, 3])
2
3
2
51
tensor([[[0.6129, 0.3520, 0.2642],
[0.5524, 0.7050, 0.7392],
[0.9686, 0.2139, 0.6605]],
[[0.2225, 0.7580, 0.3679],
[0.5003, 0.3760, 0.9981],
[0.0406, 0.4280, 0.7820]]])
torch.Size([2, 3, 3])
2
3
3
3
57
tensor([[[[0.9623, 0.2979, 0.2493, 0.3637, 0.2498, 0.1763],
[0.5971, 0.5217, 0.5105, 0.9453, 0.8698, 0.6992],
[0.2464, 0.9166, 0.2880, 0.3745, 0.7343, 0.3802],
[0.2005, 0.7070, 0.3118, 0.3963, 0.2714, 0.7901],
[0.6297, 0.1833, 0.3807, 0.0254, 0.4433, 0.3827],
[0.9271, 0.6796, 0.3230, 0.1679, 0.7331, 0.4239]]],
[[[0.2768, 0.4959, 0.7481, 0.5627, 0.3791, 0.7192],
[0.8359, 0.0660, 0.7783, 0.3048, 0.7053, 0.7260],
[0.9088, 0.1210, 0.4699, 0.7889, 0.4881, 0.2162],
[0.0561, 0.5272, 0.4205, 0.1259, 0.9970, 0.5771],
[0.7308, 0.8814, 0.4205, 0.0934, 0.9490, 0.6797],
[0.5244, 0.9846, 0.5593, 0.9826, 0.4169, 0.9384]]]])
torch.Size([2, 1, 6, 6])
2
1
6
6
4
64
384
3
4
Process finished with exit code 0