numpy实现cnn, rnn, lstm, gru

news/2024/12/24 10:00:04/文章来源:https://www.cnblogs.com/yyyccs/p/18626678
  • CNN
#coding:utf8import torch
import torch.nn as nn
import numpy as np"""
使用pytorch实现CNN
手动实现CNN
对比
"""
#一个二维卷积
class TorchCNN(nn.Module):def __init__(self, in_channel, out_channel, kernel):super(TorchCNN, self).__init__()self.layer = nn.Conv2d(in_channel, out_channel, kernel, bias=False)def forward(self, x):return self.layer(x)#自定义CNN模型
class DiyCNN:def __init__(self, input_height, input_width, weights, kernel_size):self.height = input_heightself.width = input_widthself.weights = weightsself.kernel_size = kernel_sizedef forward(self, x):output = []for kernel_weight in self.weights:kernel_weight = kernel_weight.squeeze().numpy() #shape : 2x2kernel_output = np.zeros((self.height - kernel_size + 1, self.width - kernel_size + 1))for i in range(self.height - kernel_size + 1):for j in range(self.width - kernel_size + 1):window = x[i:i+kernel_size, j:j+kernel_size]kernel_output[i, j] = np.sum(kernel_weight * window) # np.dot(a, b) != a * boutput.append(kernel_output)return np.array(output)x = np.array([[0.1, 0.2, 0.3, 0.4],[-3, -4, -5, -6],[5.1, 6.2, 7.3, 8.4],[-0.7, -0.8, -0.9, -1]])  #网络输入#torch实验
in_channel = 1
out_channel = 3
kernel_size = 2
torch_model = TorchCNN(in_channel, out_channel, kernel_size)
print(torch_model.state_dict())
# OrderedDict([('layer.weight', tensor([[[[-0.1427,  0.1269],
#           [-0.3778,  0.0182]]],
#         [[[ 0.3406,  0.3941],
#           [-0.2500, -0.0908]]],
#         [[[-0.3362, -0.1736],
#           [-0.4303,  0.3141]]]]))])
torch_w = torch_model.state_dict()["layer.weight"]torch_x = torch.FloatTensor([[x]])
output = torch_model.forward(torch_x)
output = output.detach().numpy()
print(output, output.shape, "torch模型预测结果\n")
# [[[[ 1.0716785   1.4296913   1.787704  ]
#    [-1.8935721  -2.2733717  -2.6531715 ]
#    [ 0.30921668  0.32785434  0.3464917 ]]
# 
#   [[ 1.2261667   1.6404585   2.0547502 ]
#    [-4.4363394  -5.545947   -6.6555543 ]
#    [ 4.428199    5.2704554   6.112712  ]]
# 
#   [[-0.03400278  0.03117025  0.09634328]
#    [ 1.456101    1.8381113   2.220121  ]
#    [-2.7409387  -3.290077   -3.8392155 ]]]] (1, 3, 3, 3) torch模型预测结果
diy_model = DiyCNN(x.shape[0], x.shape[1], torch_w, kernel_size)
output = diy_model.forward(x)
# [[[ 1.07167848  1.42969126  1.78770404]
#   [-1.89357215 -2.27337179 -2.65317143]
#   [ 0.30921673  0.32785429  0.34649184]]
# 
#  [[ 1.22616674  1.64045846  2.05475019]
#   [-4.43633947 -5.54594688 -6.6555543 ]
#   [ 4.42819927  5.27045575  6.11271224]]
# 
#  [[-0.03400279  0.03117025  0.09634329]
#   [ 1.45610113  1.8381112   2.22012128]
#   [-2.74093884 -3.29007711 -3.83921538]]] diy模型预测结果
  • RNN
#coding:utf8import torch
import torch.nn as nn
import numpy as np"""
使用pytorch实现RNN
手动实现RNN
对比
"""class TorchRNN(nn.Module):def __init__(self, input_size, hidden_size):super(TorchRNN, self).__init__()self.layer = nn.RNN(input_size, hidden_size, bias=False, batch_first=True)def forward(self, x):return self.layer(x)#自定义RNN模型
class DiyRNN:def __init__(self, w_ih, w_hh, hidden_size):self.w_ih = w_ihself.w_hh = w_hhself.hidden_size = hidden_sizedef forward(self, x):ht = np.zeros((self.hidden_size))output = []for xt in x:ux = np.dot(self.w_ih, xt)wh = np.dot(self.w_hh, ht)ht_next = np.tanh(ux + wh)output.append(ht_next)ht = ht_nextreturn np.array(output), htx = np.array([[1, 2, 3],[3, 4, 5],[5, 6, 7]])  #网络输入#torch实验
hidden_size = 4
torch_model = TorchRNN(3, hidden_size)# print(torch_model.state_dict())
w_ih = torch_model.state_dict()["layer.weight_ih_l0"]
w_hh = torch_model.state_dict()["layer.weight_hh_l0"]torch_x = torch.FloatTensor([x])
output, h = torch_model.forward(torch_x)
print(output.detach().numpy(), "torch模型预测结果")
print(h.detach().numpy(), "torch模型预测隐含层结果")
# [[[-0.71792674 -0.9736518   0.84708744  0.9771833 ]
#   [-0.9477417  -0.99978185  0.9003902   0.99973494]
#   [-0.9902414  -0.99999815  0.98547614  0.99999344]]] torch模型预测结果
# [[[-0.9902414  -0.99999815  0.98547614  0.99999344]]] torch模型预测隐含层结果
diy_model = DiyRNN(w_ih, w_hh, hidden_size)
output, h = diy_model.forward(x)
print(output, "diy模型预测结果")
print(h, "diy模型预测隐含层结果")
# [[-0.71792672 -0.97365181  0.84708744  0.97718326]
#  [-0.94774171 -0.99978183  0.9003902   0.99973496]
#  [-0.99024139 -0.99999813  0.98547611  0.99999344]] diy模型预测结果
# [-0.99024139 -0.99999813  0.98547611  0.99999344] diy模型预测隐含层结果
  • LSTM

import torch
import torch.nn as nn
import numpy as np'''
使用pytorch实现LSTM
手动实现LSTM
对比
'''#构造一个输入
length = 6
input_dim = 12
hidden_size = 7
x = np.random.random((length, input_dim))
# print(x)#使用pytorch的lstm层
torch_lstm = nn.LSTM(input_dim, hidden_size, batch_first=True)def sigmoid(x):return 1/(1 + np.exp(-x))#将pytorch的lstm网络权重拿出来,用numpy通过矩阵运算实现lstm的计算
def numpy_lstm(x, state_dict):weight_ih = state_dict["weight_ih_l0"].numpy()weight_hh = state_dict["weight_hh_l0"].numpy()bias_ih = state_dict["bias_ih_l0"].numpy()bias_hh = state_dict["bias_hh_l0"].numpy()#pytorch将四个门的权重拼接存储,我们将它拆开w_i_x, w_f_x, w_c_x, w_o_x = weight_ih[0:hidden_size, :], \weight_ih[hidden_size:hidden_size*2, :],\weight_ih[hidden_size*2:hidden_size*3, :],\weight_ih[hidden_size*3:hidden_size*4, :]w_i_h, w_f_h, w_c_h, w_o_h = weight_hh[0:hidden_size, :], \weight_hh[hidden_size:hidden_size * 2, :], \weight_hh[hidden_size * 2:hidden_size * 3, :], \weight_hh[hidden_size * 3:hidden_size * 4, :]b_i_x, b_f_x, b_c_x, b_o_x = bias_ih[0:hidden_size], \bias_ih[hidden_size:hidden_size * 2], \bias_ih[hidden_size * 2:hidden_size * 3], \bias_ih[hidden_size * 3:hidden_size * 4]b_i_h, b_f_h, b_c_h, b_o_h = bias_hh[0:hidden_size], \bias_hh[hidden_size:hidden_size * 2], \bias_hh[hidden_size * 2:hidden_size * 3], \bias_hh[hidden_size * 3:hidden_size * 4]w_i = np.concatenate([w_i_h, w_i_x], axis=1)w_f = np.concatenate([w_f_h, w_f_x], axis=1)w_c = np.concatenate([w_c_h, w_c_x], axis=1)w_o = np.concatenate([w_o_h, w_o_x], axis=1)b_f = b_f_h + b_f_xb_i = b_i_h + b_i_xb_c = b_c_h + b_c_xb_o = b_o_h + b_o_xc_t = np.zeros((1, hidden_size))h_t = np.zeros((1, hidden_size))sequence_output = []for x_t in x:x_t = x_t[np.newaxis, :]hx = np.concatenate([h_t, x_t], axis=1)f_t = sigmoid(np.dot(hx, w_f.T) + b_f)i_t = sigmoid(np.dot(hx, w_i.T) + b_i)g = np.tanh(np.dot(hx, w_c.T) + b_c)c_t = f_t * c_t + i_t * go_t = sigmoid(np.dot(hx, w_o.T) + b_o)h_t = o_t * np.tanh(c_t)sequence_output.append(h_t)return np.array(sequence_output), (h_t, c_t)torch_sequence_output, (torch_h, torch_c) = torch_lstm(torch.Tensor([x]))
numpy_sequence_output, (numpy_h, numpy_c) = numpy_lstm(x, torch_lstm.state_dict())print(torch_sequence_output)
print(numpy_sequence_output)
# tensor([[[ 0.1510,  0.0955, -0.0583, -0.1020, -0.0862,  0.0469, -0.1222],
#          [ 0.3499,  0.3118,  0.0412, -0.1871, -0.1361,  0.0328, -0.1430],
#          [ 0.4267,  0.2855,  0.0044, -0.2064, -0.2331, -0.0331, -0.1591],
#          [ 0.3349,  0.2281,  0.1308, -0.2007, -0.2060, -0.0087, -0.1636],
#          [ 0.4035,  0.2886,  0.2859, -0.2461, -0.2607,  0.0434, -0.1699],
#          [ 0.3989,  0.3567,  0.1628, -0.1877, -0.2917,  0.0045, -0.2407]]],
#        grad_fn=<TransposeBackward0>)
# [[[ 0.15099122  0.09554478 -0.05826778 -0.10197903 -0.08624412
#     0.04691251 -0.12215472]]
#  [[ 0.34985161  0.31178944  0.04115072 -0.18708519 -0.13612159
#     0.03282586 -0.14295764]]
#  [[ 0.42666856  0.28547003  0.00443672 -0.20642571 -0.23312739
#    -0.0331053  -0.15905215]]
#  [[ 0.33494093  0.22813763  0.13078913 -0.20069858 -0.2059699
#    -0.00873538 -0.16356931]]
#  [[ 0.40352166  0.28857645  0.28591458 -0.24610013 -0.26066421
#     0.04338363 -0.1699052 ]]
#  [[ 0.39891811  0.35666413  0.16277961 -0.18774825 -0.29172435
#     0.00446361 -0.24067678]]]print(torch_h)
print(numpy_h)
# tensor([[[ 0.3989,  0.3567,  0.1628, -0.1877, -0.2917,  0.0045, -0.2407]]],
#        grad_fn=<StackBackward0>)
# [[ 0.39891811  0.35666413  0.16277961 -0.18774825 -0.29172435  0.00446361
#   -0.24067678]]print(torch_c)
print(numpy_c)
# tensor([[[ 0.5560,  0.6629,  0.2972, -0.9371, -0.5549,  0.0061, -0.7561]]],
#        grad_fn=<StackBackward0>)
# [[ 0.55596722  0.6628956   0.29717248 -0.93707451 -0.5548541   0.00609941
#   -0.7561093 ]]
  • GRU

import torch
import torch.nn as nn
import numpy as np'''
使用pytorch实现GRU
手动实现GRU
对比
'''#构造一个输入
length = 6
input_dim = 12
hidden_size = 7
x = np.random.random((length, input_dim))def sigmoid(x):return 1/(1 + np.exp(-x))#使用pytorch的GRU层
torch_gru = nn.GRU(input_dim, hidden_size, batch_first=True)#将pytorch的GRU网络权重拿出来,用numpy通过矩阵运算实现GRU的计算
def numpy_gru(x, state_dict):weight_ih = state_dict["weight_ih_l0"].numpy()weight_hh = state_dict["weight_hh_l0"].numpy()bias_ih = state_dict["bias_ih_l0"].numpy()bias_hh = state_dict["bias_hh_l0"].numpy()#pytorch将3个门的权重拼接存储,我们将它拆开w_r_x, w_z_x, w_x = weight_ih[0:hidden_size, :], \weight_ih[hidden_size:hidden_size * 2, :],\weight_ih[hidden_size * 2:hidden_size * 3, :]w_r_h, w_z_h, w_h = weight_hh[0:hidden_size, :], \weight_hh[hidden_size:hidden_size * 2, :], \weight_hh[hidden_size * 2:hidden_size * 3, :]b_r_x, b_z_x, b_x = bias_ih[0:hidden_size], \bias_ih[hidden_size:hidden_size * 2], \bias_ih[hidden_size * 2:hidden_size * 3]b_r_h, b_z_h, b_h = bias_hh[0:hidden_size], \bias_hh[hidden_size:hidden_size * 2], \bias_hh[hidden_size * 2:hidden_size * 3]w_z = np.concatenate([w_z_h, w_z_x], axis=1)w_r = np.concatenate([w_r_h, w_r_x], axis=1)b_z = b_z_h + b_z_xb_r = b_r_h + b_r_xh_t = np.zeros((1, hidden_size))sequence_output = []for x_t in x:x_t = x_t[np.newaxis, :]hx = np.concatenate([h_t, x_t], axis=1)z_t = sigmoid(np.dot(hx, w_z.T) + b_z)r_t = sigmoid(np.dot(hx, w_r.T) + b_r)h = np.tanh(r_t * (np.dot(h_t, w_h.T) + b_h) + np.dot(x_t, w_x.T) + b_x)h_t = (1 - z_t) * h + z_t * h_tsequence_output.append(h_t)return np.array(sequence_output), h_ttorch_sequence_output, torch_h = torch_gru(torch.Tensor([x]))
numpy_sequence_output, numpy_h = numpy_gru(x, torch_gru.state_dict())
print(torch_sequence_output)
print(numpy_sequence_output)
# tensor([[[ 0.1594, -0.1153, -0.4586, -0.0186,  0.2189,  0.1135, -0.3679],
#          [ 0.4451, -0.2922, -0.7419, -0.1512,  0.3880,  0.1821, -0.5169],
#          [ 0.5038, -0.3299, -0.5477,  0.0747,  0.5901,  0.1773, -0.4521],
#          [ 0.4910, -0.3767, -0.7289,  0.0494,  0.7221,  0.1721, -0.5471],
#          [ 0.4542, -0.4090, -0.7239,  0.1361,  0.6370,  0.1548, -0.5263],
#          [ 0.4567, -0.2905, -0.5265,  0.2888,  0.7370,  0.0703, -0.3895]]],
#        grad_fn=<TransposeBackward1>)
# [[[ 0.15942519 -0.11533116 -0.45862967 -0.01859021  0.21886492
#     0.1134777  -0.36790504]]
#  [[ 0.44507494 -0.29216005 -0.74194526 -0.15115649  0.38796698
#     0.18213155 -0.51694817]]
#  [[ 0.50378448 -0.32986759 -0.54772077  0.07470304  0.5900535
#     0.17727659 -0.45206073]]
#  [[ 0.49101454 -0.37669122 -0.72891378  0.04940939  0.72214786
#     0.17210141 -0.54709095]]
#  [[ 0.45421076 -0.40898236 -0.72388504  0.13608405  0.63704706
#     0.15476695 -0.52628761]]
#  [[ 0.45674199 -0.2905453  -0.52648383  0.28878751  0.73695645
#     0.07033669 -0.38946996]]]
print(torch_h)
print(numpy_h)
# tensor([[[ 0.4567, -0.2905, -0.5265,  0.2888,  0.7370,  0.0703, -0.3895]]],
#        grad_fn=<StackBackward0>)
# [[ 0.45674199 -0.2905453  -0.52648383  0.28878751  0.73695645  0.07033669
#   -0.38946996]]

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.hqwc.cn/news/858007.html

如若内容造成侵权/违法违规/事实不符,请联系编程知识网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

PostgreSQL 的特点

title: PostgreSQL 的特点 date: 2024/12/24 updated: 2024/12/24 author: cmdragon excerpt: PostgreSQL 是当今最流行的开源关系型数据库之一,凭借其优秀的性能、稳定性和丰富的功能集在用户群中享有极高声誉。相比于其他关系型数据库管理系统,PostgreSQL 拥有许多独特的特…

高效物流管理:项目管理工具如何助力快速响应

一、物流行业的痛点 物流行业的管理面临着复杂的任务、庞大的数据量和高度动态的工作环境,以下是一些主要的痛点: 1.任务与进度难以实时跟踪 在物流企业中,任务通常涉及多个环节,包括货物调度、仓储管理、配送规划、运输监控等。这些任务往往由不同部门或团队共同完成,任务…

2024.12.23 周一

2024.12.23 周一Q1. 1100 Alice and Bob are playing a game. They have an array $a_1, a_2,\ldots,a_n$. The game consists of two steps:First, Alice will remove at most $k$ elements from the array. Second, Bob will multiply at most $x$ elements of the array by …

yum源一键安装脚本

一、本地yum源镜像挂载到本地mkdir /mnt/cdrom mount /dev/sr0 /mnt/cdrom/ [root@test yum.repos.d]# df -h ...... /dev/sr0 4.4G 4.4G 0 100% /mnt/centos7将原有源进行备份(处理方式自行决定)cd /etc/yum.repos.d && mkdir bak && …

创建响应式数据

创建响应式数据 Vue2中 ​ 在vue2中数据写在对应的data中就是响应式的。 Vue3 ref :可以定义基本类型的响应式数据 先要导入对应的ref,然后才能使用 import {ref} from vue​ 作用:定义响应式变量。 ​ 语法:let xxx = ref(初始值)。 ​ 返回值:一个RefImpl的实例对象…

【深度剖析】自主可控的全国产方案,基于龙芯LS2K1000LA-i!

龙芯LS2K1000LA-i产品简介 LS2K1000LA-i是龙芯双核LoongArch LA264自主架构处理器。创龙科技基于LS2K1000LA-i设计的工业核心板(SOM-TL2K1000)板载的CPU、ROM、RAM、电源、晶振、连接器等所有元器件均采用国产工业级方案,国产化率100%。 此外,创龙科技基于LS2K1000LA-i设计的…

协作文档让销售工作事半功倍的秘密

在现代医疗销售行业中,高效协同是成功的关键。无论是销售方案的制定,客户拜访记录的共享,还是跨部门的合作,在线协作文档正在成为推动团队效率和精准度的核心工具。特别是在医疗销售这种信息密集型领域,在线协作文档不仅改变了团队的沟通方式,更重塑了销售流程,助力企业…

这款跨网文件安全交换系统 凭什么受到各行业的欢迎?

跨隔离网的文件传输交换,这是各个行业都会面临的场景,能解决传输问题的工具也不少,可为什么说有一款跨网文件安全交换系统,在各行业中应用都很广泛,受到各行业的欢迎呢?首先我们来看看跨网文件传输有哪些需求。一、跨网文件传输的普遍需求 跨网文件传输的普遍需求与挑战可…

模型上下文协议MCP

MCP(Model Context Protocol) Anthropic推出的一种开放协议,旨在统一LLM应用于外部数据源之间的通讯协议使之无缝集成,MCP提供了标准化协议使得LLM与所需要的上下文无缝衔接。使用MCP可以插件式为LLM的集成各种外部数据源。MCP概念上图为MCP官方所描述的MCP架构图,MCP Hosts…

TB级大文件如何安全又轻松地发送?FMail文件邮能实现

许多行业的企业存在着发送GB级、TB级大文件的业务场景,如半导体企业、汽车制造企业、跨境电商、地图测绘、生物科研等,都涉及大量大文件的内部及内外部流转需求。 在进行大文件传输时,企业常用的方式主要包括传统邮件、移动U盘拷贝、FTP传输,以及硬盘刻录通过车辆物理运输等…

客户不回消息?试试这些超实用沟通技巧

在销售与客户沟通过程中,我们时常会面临客户未回复消息的情境,这时应该如何妥善处理呢?以下提供了一些实用的沟通话术,旨在帮助你在各种情境下都能更有效地与客户取得联系。 初次接触后客户未回应 客户或许对初次接收的信息不感兴趣,又或是信息众多而被忽略。 推荐话术:“…