CUDA内存组织
CUDA设备内存的分类与特征
内存类型 | 物理位置 | 访问权限 | 可见范围 | 生命周期 | |
---|---|---|---|---|---|
1 | 全局内存 | 芯片外 | 可读写 | 所有线程和主机端 | 由主机分配与释放 |
2 | 常量内存 | 芯片外 | 只读 | 所有线程和主机端 | 由主机分配与释放 |
3 | 纹理和表面内存 | 芯片外 | 一般只读 | 所有线程和主机端 | 由主机分配与释放 |
4 | 寄存器内存 | 芯片内 | 可读写 | 单个线程 | 所在线程 |
5 | 局部内存 | 芯片外 | 可读性 | 单个线程 | 所在线程 |
6 | 共享内存 | 芯片内 | 可读性 | 单个线程块 | 所在线程块 |
-
全局内存:核函数中所有线程都能访问其中的数据。
用cudaMalloc()为全局内存变量分配设备内存;
用cudaMemcpy()将主机数据复制到全局内存; -
常量内存:一共64KB,只读,可见范围与生命周期与全局内存一样,访问速度比全局内存快;在核函数未满用 _constant_ 定义变量;并使用cudaMemcpyToSymbol()将数据从主机端复制到设备的常量内存。
-
纹理内存与表面内存:类似于常量内存(可见范围与生命周期相同);
-
寄存器:在核函数中定义的不加任何限定符的变量一般来说放在寄存器中,核函数定义不加任何限定符的数组可能放于寄存器,也可能放于局部内存中;
-
局部内存:寄存器放不下的变量,索引值不能在编译时确定的数组;
-
共享内存:与寄存器类似,存在于芯片上,仅次于寄存器的读写速度;
CUDA中的内存组织示意图
GPU设备规格查询
#include <stdio.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"int main()
{int device_id = 0;cudaDeviceProp prop;cudaGetDeviceProperties(&prop, device_id);printf("Device id: %d\n", device_id);printf("Device name: %s\n", prop.name);printf("Compute capability: %d.%d\n", prop.major, prop.minor);printf("Amount of global memory: %g GB\n", prop.totalGlobalMem / 1024.0);printf("Amount of constant memory: %g KB\n", prop.totalConstMem / 1024.0);printf("Maximum grid size: %d %d %d\n",prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);printf("Maximum block size: %d %d %d\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);printf("Number of SMs: %d\n", prop.multiProcessorCount);printf("----------------------------- \n");printf("Maximum amount of shared memory per block: %g KB\n", prop.sharedMemPerBlock / 1024.0);printf("Maximum amount of shared memory per SM: %g KB\n",prop.sharedMemPerMultiprocessor / 1024.0);printf("Maximum number of registers per block: %d K\n", prop.regsPerBlock / 1024.0);printf("Maximum number of registers per SM: %d K\n", prop.regsPerMultiprocessor / 1024.0);printf("Maximum number of threads per block: %d \n", prop.maxThreadsPerBlock);printf("Maximum number of threads per SM: %d \n", prop.maxThreadsPerMultiProcessor);return 0;
}
全局内存的合并与非合并访问
合并访问:一个线程束对全局内存的一次访问(读/写)导致最少数量的数据传输;否则为非合并访问。
利用共享内存和统一内存优化矩阵乘
#include <stdio.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include<math.h>
#include <malloc.h>
#include <opencv2/opencv.hpp>
#include <stdlib.h>//利用share memory 和统一内存优化矩阵乘#define M 1000
#define N 500
#define K 1000__managed__ int a[M*N];
__managed__ int b[N*K];
__managed__ int c_gpu[M*K];
__managed__ int c_cpu[M*N];#define BLOCK_SIZE 16__global__ void gpu_matrix(int* a, int* b, int* c, int m, int n, int k)
{__shared__ int sub_a[BLOCK_SIZE][BLOCK_SIZE];__shared__ int sub_b[BLOCK_SIZE][BLOCK_SIZE];int x = blockIdx.x*blockDim.x + threadIdx.x;int y = blockIdx.y*blockDim.y + threadIdx.y;int tmp = 0;int idx;for (int step = 0; step < N/BLOCK_SIZE; step++){int step_x = step*BLOCK_SIZE + threadIdx.x;int step_y = y;idx = step_y*n + step_x;if (step_x>n || step_y>m){sub_a[threadIdx.y][threadIdx.x] = 0;}else{sub_a[threadIdx.x][threadIdx.x] = a[idx];}step_x = x;step_y = step*BLOCK_SIZE + threadIdx.y;idx = step * k + step_x;if (step_x >= k || step_y>=n){sub_b[threadIdx.y][threadIdx.x] = 0;}else{sub_b[threadIdx.y][threadIdx.x] = b[idx];}__syncthreads();for (int i = 0; i < BLOCK_SIZE; i++){tmp += sub_a[threadIdx.y][i] * sub_b[i][threadIdx.x];}__syncthreads();}if (x<k && y<m){c[y*k + x] = tmp;}}void cpu_matrix(int* a, int* b, int* c, int m, int n, int k)
{for (int y = 0; y < m; y++){for (int x = 0; x < k; x++){int tmp = 0;for (int step = 0; step < n; step++){tmp += a[y*n + step] * b[step*n + x];}c[y*k + x] = tmp;}}}int main()
{for (int y = 0; y < M; y++){for (int x = 0; x < N; x++){a[y * N + x] = rand() % 1024;}}for (int y = 0; y < N; y++){for (int x = 0; x < K; x++){b[y*K + x] = rand() % 1024;}}unsigned int grid_x = (K + BLOCK_SIZE - 1) / BLOCK_SIZE;unsigned int grid_y = (M + BLOCK_SIZE - 1) / BLOCK_SIZE;dim3 dimGrid(grid_x, grid_y);dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);gpu_matrix<<<dimGrid, dimBlock>>>(a, b, c_gpu, M, N, K);cpu_matrix(a, b, c_cpu, M, N, K);bool errors = false;for (int y = 0; y < M; y++){for (int x = 0; x < K; x++){if (fabs(c_cpu[y*K + x] - c_gpu[y*K + x]) > (1.0e-10)){errors = true;}}}printf("Result: %s\n", errors ? "Error" : "Pass");return 0;
}