Tensor的创建
从list和元组及numpy中创建
torch.tensor 可以从元组,list,以及 numpy 数组中创建张量
import torch
import numpy as np
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"a =[1.1,2,3]
b = [1,2,3]
print(type(a))c = torch.tensor(a)
d = torch.tensor(b)
c,c.dtype,c.shape
d,d.dtype,d.shapeshape = (5.00005,2)
type(shape)
torch.tensor(shape)### 从numpy中创建np_array = np.random.normal((2,3))
np_array
t = torch.tensor(np_array)
t.shape
t
<class 'list'>(tensor([1.1000, 2.0000, 3.0000]), torch.float32, torch.Size([3]))(tensor([1, 2, 3]), torch.int64, torch.Size([3]))tupletensor([5.0001, 2.0000])array([4.00105954, 3.62163162])torch.Size([2])tensor([4.0011, 3.6216], dtype=torch.float64)
创建指定形状的张量
ones_like,zeros_like,rand_like 可以从现有的张量中去创建与之形状相同的张量
# 指定形状创建张量,形状可以用 元组 或许 list去指定
torch.rand([2,3])
torch.rand((2,3))
torch.randn((2,3)) # 数据服从正态分布
torch.ones([2,3])
torch.zeros(2,3) # 传入 2,3 和 (2,3) 或许[2,3] 都是一样的
torch.empty([2,3],dtype=torch.int32).random_(0,10) # 创建 形状[2,3]的张量,并填充 [0,10)之间的随机数
torch.randint(0, 10, [2,2]) #创建形状[2,2]的张量,使用数字 [0,10] 之间的数字填充
tensor([[0.8735, 0.9741, 0.1682],[0.5625, 0.8731, 0.8622]])tensor([[0.8106, 0.1381, 0.1399],[0.1976, 0.5628, 0.9983]])tensor([[-1.7315, 0.2933, 0.9307],[ 1.2782, 0.9797, -1.0693]])tensor([[1., 1., 1.],[1., 1., 1.]])tensor([[0., 0., 0.],[0., 0., 0.]])tensor([[8, 6, 7],[9, 9, 1]], dtype=torch.int32)tensor([[7, 2],[5, 0]])
torch.arange(1,5,1) # 创建一维张量, start =1 ,end= 5, step=1, 前闭后开
torch.eye(4,5) # 创建二维张量, 对角线为1, 其它位置为0
torch.full([2,3],4) # 创建一个形状为[2,3],值全部填充为4t = torch.tensor([1,2,3])
torch.full_like(t,6) # 以t为形状,填充6
tensor([1, 2, 3, 4])tensor([[1., 0., 0., 0., 0.],[0., 1., 0., 0., 0.],[0., 0., 1., 0., 0.],[0., 0., 0., 1., 0.]])tensor([[4, 4, 4],[4, 4, 4]])tensor([6, 6, 6])
torch.ones_like(t)
torch.ones_like(d)
torch.zeros_like(t)
torch.rand_like(d,dtype=torch.float) #rand_like 只能生成浮点型的张量
tensor([1., 1.], dtype=torch.float64)tensor([1, 1, 1])tensor([0., 0.], dtype=torch.float64)tensor([0.5891, 0.7410, 0.1948])
tensor 的属性
t=torch.ones([2,3])
t.shape # 类型 torch.Size
t.shape[1]
t.dtype # 类型 torch.dtype
t.device # 类型 torch.device
t.device.type
torch.Size([2, 3])3torch.float32device(type='cpu')'cpu'
Tensor 操作
张量有 1200 多种操作
t = torch.tensor(0)
torch.is_nonzero(t) # 判断张量是否非零, 张量必须为标量(Scalar)t = torch.tensor([[2,2,0],[4,5,6]])
torch.count_nonzero(t) #统计非0元素个数
torch.any(t == 0) # 传入的参数中有任意元素为True,则返回True
torch.numel(t) # 返回张量中所有元素的个数 , number of elements的缩写
Falsetensor(5)tensor(True)6
索引 切片 join 旋转
a = torch.randn([2,4,5])
a
b = torch.randn([2,3,5])
b
c = torch.cat([a,b],dim=1) # 被连接的张量必须维度相同, 除了连接的维度可以不同外,其它维度必须相同
c
c.shape
tensor([[[ 0.6906, 0.2675, 0.2471, 1.0537, -0.4351],[-0.0208, 1.1438, 0.6911, 0.9678, -0.1997],[-0.9350, -0.6659, -1.5559, 0.1381, 0.9874],[ 1.3853, 0.3179, 1.1886, 1.4987, 0.5392]],[[-0.1250, -0.3843, 0.9554, -0.2116, 0.3880],[-0.0915, 1.5961, -1.6635, -0.2928, 1.1061],[ 0.6826, 0.3181, 0.9228, -0.7005, 0.0926],[-0.5291, 0.8106, -0.9351, -0.6701, -1.6545]]])tensor([[[-1.3547, -0.2265, -0.8044, 0.3939, -2.0248],[ 1.5856, 0.6330, 0.0404, -0.0779, -0.5388],[ 0.4213, -0.4322, -0.1037, 0.4425, 0.1877]],[[-1.0303, 0.7566, -1.0224, 0.3280, 1.1948],[ 1.2372, -1.1186, 0.2090, -0.5584, 0.4578],[ 0.5972, -0.2805, 0.7278, 1.4204, -0.3414]]])tensor([[[ 0.6906, 0.2675, 0.2471, 1.0537, -0.4351],[-0.0208, 1.1438, 0.6911, 0.9678, -0.1997],[-0.9350, -0.6659, -1.5559, 0.1381, 0.9874],[ 1.3853, 0.3179, 1.1886, 1.4987, 0.5392],[-1.3547, -0.2265, -0.8044, 0.3939, -2.0248],[ 1.5856, 0.6330, 0.0404, -0.0779, -0.5388],[ 0.4213, -0.4322, -0.1037, 0.4425, 0.1877]],[[-0.1250, -0.3843, 0.9554, -0.2116, 0.3880],[-0.0915, 1.5961, -1.6635, -0.2928, 1.1061],[ 0.6826, 0.3181, 0.9228, -0.7005, 0.0926],[-0.5291, 0.8106, -0.9351, -0.6701, -1.6545],[-1.0303, 0.7566, -1.0224, 0.3280, 1.1948],[ 1.2372, -1.1186, 0.2090, -0.5584, 0.4578],[ 0.5972, -0.2805, 0.7278, 1.4204, -0.3414]]])torch.Size([2, 7, 5])
a = torch.randn([3,3,4])
a
torch.chunk(a,2) # 默认在第0个维度上 划分 张量torch.chunk(a,2,1) # 划分为2 个张量,在第1个维度上划分,原张量的视图
tensor([[[ 0.4793, 0.8505, -1.2706, 0.6878],[ 0.8232, -0.4249, 1.7251, -0.8768],[ 1.2160, 1.4610, -0.6511, -1.0861]],[[-0.0923, 0.3947, -0.2836, 0.1569],[ 0.1133, -0.8642, 0.2384, -0.5672],[-0.1942, -0.0093, -2.1721, -1.0552]],[[-0.2236, -0.2623, 0.6086, -1.0173],[ 0.8949, -0.5393, 0.1488, -0.5906],[ 0.0518, 0.7505, -1.0105, -0.6504]]])(tensor([[[ 0.4793, 0.8505, -1.2706, 0.6878],[ 0.8232, -0.4249, 1.7251, -0.8768],[ 1.2160, 1.4610, -0.6511, -1.0861]],[[-0.0923, 0.3947, -0.2836, 0.1569],[ 0.1133, -0.8642, 0.2384, -0.5672],[-0.1942, -0.0093, -2.1721, -1.0552]]]),tensor([[[-0.2236, -0.2623, 0.6086, -1.0173],[ 0.8949, -0.5393, 0.1488, -0.5906],[ 0.0518, 0.7505, -1.0105, -0.6504]]]))(tensor([[[ 0.4793, 0.8505, -1.2706, 0.6878],[ 0.8232, -0.4249, 1.7251, -0.8768]],[[-0.0923, 0.3947, -0.2836, 0.1569],[ 0.1133, -0.8642, 0.2384, -0.5672]],[[-0.2236, -0.2623, 0.6086, -1.0173],[ 0.8949, -0.5393, 0.1488, -0.5906]]]),tensor([[[ 1.2160, 1.4610, -0.6511, -1.0861]],[[-0.1942, -0.0093, -2.1721, -1.0552]],[[ 0.0518, 0.7505, -1.0105, -0.6504]]]))
a = torch.randint(0,10,[2,3,4])
a.shape
b = torch.reshape(a,(-1,4))
b
b.shape
torch.Size([2, 3, 4])tensor([[1, 1, 4, 2],[1, 6, 7, 9],[9, 8, 9, 6],[9, 2, 8, 0],[2, 8, 7, 2],[9, 7, 8, 7]])torch.Size([6, 4])
a = torch.randint(0,10,[2,3,4])
a
torch.split(a,2,1) # 同 torch.chunk, 原张量的视图
torch.split(a,[1,2],1) # 指定划分的比列
tensor([[[6, 0, 3, 5],[6, 1, 6, 5],[0, 9, 4, 0]],[[6, 0, 5, 3],[6, 6, 1, 8],[5, 3, 8, 9]]])(tensor([[[6, 0, 3, 5],[6, 1, 6, 5]],[[6, 0, 5, 3],[6, 6, 1, 8]]]),tensor([[[0, 9, 4, 0]],[[5, 3, 8, 9]]]))(tensor([[[6, 0, 3, 5]],[[6, 0, 5, 3]]]),tensor([[[6, 1, 6, 5],[0, 9, 4, 0]],[[6, 6, 1, 8],[5, 3, 8, 9]]]))
a = torch.randint(0,10,[2,1,3])
a
torch.squeeze(a) # 压缩所有维度为1的位置torch.squeeze(a,[1]) # 压缩指定维度的位置
tensor([[[5, 7, 2]],[[3, 4, 4]]])tensor([[5, 7, 2],[3, 4, 4]])tensor([[5, 7, 2],[3, 4, 4]])
a = torch.randint(0,10,[2,3])
a
b = torch.randint(0,10,[2,3])
b
c = torch.randint(0,10,[2,3])
c
d = torch.stack([a,b,c],1) # 将张量给堆叠起来, 张量形状必须相同 与cat相比,该操作会扩充维度
d
d.shape
tensor([[0, 0, 4],[1, 6, 1]])tensor([[4, 4, 5],[2, 1, 8]])tensor([[2, 2, 6],[1, 6, 1]])tensor([[[0, 0, 4],[4, 4, 5],[2, 2, 6]],[[1, 6, 1],[2, 1, 8],[1, 6, 1]]])torch.Size([2, 3, 3])
a = torch.randint(0,10,[2,3])
a
b = torch.tile(a,(2,2)) # 对a的第0维重复两次,第1维重复两次。如果传入 (2,) ,会被处理成(1,2)
b
b.shapea.tile((2,2)) #同 torch.tile(a,(2,2))
a
tensor([[3, 9, 4],[5, 6, 6]])tensor([[3, 9, 4, 3, 9, 4],[5, 6, 6, 5, 6, 6],[3, 9, 4, 3, 9, 4],[5, 6, 6, 5, 6, 6]])torch.Size([4, 6])tensor([[3, 9, 4, 3, 9, 4],[5, 6, 6, 5, 6, 6],[3, 9, 4, 3, 9, 4],[5, 6, 6, 5, 6, 6]])tensor([[3, 9, 4],[5, 6, 6]])
a = torch.empty([2,3,4],dtype=torch.int32).random_(0,10)
b = torch.unbind(a,1) # 拆分指定维度,并压缩, 默认拆分0维
b
b[0].shape
(tensor([[6, 2, 3, 0],[5, 7, 3, 2]], dtype=torch.int32),tensor([[5, 3, 5, 3],[0, 8, 6, 5]], dtype=torch.int32),tensor([[3, 4, 4, 5],[2, 2, 9, 4]], dtype=torch.int32))torch.Size([2, 4])
a = torch.empty([2,3],dtype=torch.int32).random_(0,10)
a
b = torch.ones([2,3],dtype=torch.int32)
b
torch.where(a>b,a,b) # 依次对比, 如果a>b ,给 a,否则给btorch.where(a>2,a,2)
tensor([[6, 2, 1],[8, 0, 5]], dtype=torch.int32)tensor([[1, 1, 1],[1, 1, 1]], dtype=torch.int32)tensor([[6, 2, 1],[8, 1, 5]], dtype=torch.int32)tensor([[6, 2, 2],[8, 2, 5]], dtype=torch.int32)
torch.manual_seed(1) # 随机种子设定
<torch._C.Generator at 0x240d615ab70>
torch.randperm(4) # [0,4) 创建一维张量,元素个数维4个,数字从[0,4)随机打乱# 数据打乱
data = torch.tensor([10, 20, 30, 40, 50])
perm = torch.randperm(data.size(0))
shuffled_data = data[perm]
shuffled_data
tensor([3, 0, 1, 2])tensor([30, 50, 10, 40, 20])
t = torch.randint(0,10,[2,3]);
t.shape
t.size()
t.size(1)
torch.Size([2, 3])torch.Size([2, 3])3
a = torch.randn(2, 3)
a
a.max()
a.argmax() # 返回张量中最大值索引
a.argmax(1) # 指定维度查找,返回所在维度的索引
tensor([[-0.3013, -0.7432, -0.6355],[ 0.4731, -0.1942, 0.3147]])tensor(0.4731)tensor(3)tensor([0, 0])
a = torch.randn(1,1)
a
a.item() # 获取张量中的值, 当张量中仅有一个值的时候有效
tensor([[-0.3149]])-0.3148927390575409
张量的索引选择, 易混乱部分
t = torch.rand([2,3])
t
t[0][-1]
t[0,-1]
tensor([[0.1808, 0.2796, 0.3273],[0.3835, 0.2156, 0.6563]])tensor(0.3273)tensor(0.3273)
# 列表索引
x = torch.tensor([[[10, 20, 30], [40, 50, 60]]])
x[[0, 0]] #作用在第0维度,列表[0,0],这意味着在维0上选择第0个元素两次
x[[0, 0], [0, 1]] #分别作用在维0和维1上,选择两个元素, 分别是0维上的 0 和 1维度上的 0 与0维度上的0和1维度上的1
x[0, [0, 1], [0, 1]] #作用在三个维度,分别是0维上的 0 和 1维度上的 0 和2维度上的0 与 0维度上的0和1维度上的1和2维度上的1
tensor([[[10, 20, 30],[40, 50, 60]],[[10, 20, 30],[40, 50, 60]]])tensor([[10, 20, 30],[40, 50, 60]])tensor([10, 50])
x4 = torch.tensor([[10, 20, 30], [40, 50, 60]])
x4[[False, True], [False, True, True]]
tensor([50, 60])
x = torch.tensor([[10, 20, 30], [40, 50, 60]])
x[torch.tensor([[0, 1], [1, 0]])] #选择x的第一个维度,并重新排序
tensor([[[10, 20, 30],[40, 50, 60]],[[40, 50, 60],[10, 20, 30]]])
import torchx = torch.tensor([[10, 20, 30], [40, 50, 60]]) # 形状 (2, 3)
index = torch.tensor([[False, True, True], [False, True, True]]) # 形状 (2, 3)
x[index]
tensor([20, 30, 50, 60])
x = torch.tensor([[10, 20, 30], [40, 50, 60]])
index = torch.tensor([False, True])
x[index]
tensor([[40, 50, 60]])