-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathopencl_gpu.cpp
More file actions
216 lines (194 loc) · 6.36 KB
/
opencl_gpu.cpp
File metadata and controls
216 lines (194 loc) · 6.36 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
#include <vector>
#include <cstdlib>
#include <omp.h>
#include <algorithm>
#include <iostream>
#include <ctime>
#include <cmath>
#define CL_TARGET_OPENCL_VERSION 200
#include <CL/cl.h>
using namespace std;
#define TILE_SIZE 16
// 矩阵乘法
void gemm(vector<float> &C, const vector<float> &A, const vector<float> &B, int m, int n, int k)
{
for (int i = 0; i < m; i++)
{
for (int j = 0; j < n; j++)
{
float sum = 0;
for (int l = 0; l < k; l++)
{
sum += A[i * k + l] * B[j * k + l];
}
C[i * n + j] = sum;
}
}
}
// 随机生成矩阵
vector<float> generateMatrix(int rows, int cols)
{
vector<float> matrix(rows * cols);
for (int i = 0; i < rows * cols; ++i)
{
matrix[i] = (float)(rand() % 99999 + 1) / 100000;
}
return matrix;
}
// 打印矩阵
void printMatrix(const vector<float> &matrix, int rows, int cols)
{
for (int i = 0; i < rows; ++i)
{
for (int j = 0; j < cols; ++j)
{
cout << matrix[i * cols + j] << " ";
}
cout << endl;
}
}
// 计算绝对误差
float computeAbsoluteError(const vector<float> &C_cpu, const vector<float> &C_gpu)
{
float max_abs_error = 0.0f;
for (size_t i = 0; i < C_cpu.size(); ++i)
{
float abs_error = fabs(C_cpu[i] - C_gpu[i]);
if (abs_error > max_abs_error)
{
max_abs_error = abs_error;
}
}
return max_abs_error;
}
// 计算相对误差
float computeRelativeError(const vector<float> &C_cpu, const vector<float> &C_gpu)
{
float max_rel_error = 0.0f;
for (size_t i = 0; i < C_cpu.size(); ++i)
{
float rel_error = fabs((C_cpu[i] - C_gpu[i]) / C_cpu[i]);
if (rel_error > max_rel_error)
{
max_rel_error = rel_error;
}
}
return max_rel_error;
}
int main()
{
srand(time(0));
int m = 512, n = 512, k = 512;
// 生成随机矩阵 A 和 B
vector<float> A = generateMatrix(m, k);
vector<float> B = generateMatrix(n, k);
vector<float> C_GPU(m * n);
vector<float> C_CPU(m * n);
// --- OpenCL 初始化 ---
cl_int err;
cl_platform_id platform;
cl_device_id device;
cl_program program;
cl_kernel kernel;
const char *kernelSource = R"(
#define CL_TARGET_OPENCL_VERSION 200
#define TILE_SIZE 16
__kernel void matrixMul(__global const float *A, // 矩阵 A
__global const float *B, // 矩阵 B
__global float *C, // 矩阵 C
const int M, const int N, const int K)
{
int row = get_global_id(0);
int col = get_global_id(1);
float value = 0.0f;
for (int i = 0; i < K; i++)
{
value += A[row * K + i] * B[col * K + i];
}
C[row * N + col] = value;
}
)";
// 选择平台和设备
clGetPlatformIDs(1, &platform, NULL);
clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, 1, &device, NULL);
// 创建上下文
cl_context context = clCreateContext(NULL, 1, &device, NULL, NULL, NULL);
// 创建命令队列
cl_queue_properties properties[] = {0}; // 可以根据需要添加属性
cl_command_queue queue = clCreateCommandQueueWithProperties(context, device, properties, &err);
// 创建并编译程序
program = clCreateProgramWithSource(context, 1, &kernelSource, NULL, &err);
if (err != CL_SUCCESS)
{
cerr << "Failed to create program" << endl;
exit(1);
}
err = clBuildProgram(program, 1, &device, NULL, NULL, NULL);
if (err != CL_SUCCESS)
{
cerr << "Failed to build program" << endl;
// 获取并输出编译日志
char buildLog[16384];
clGetProgramBuildInfo(program, device, CL_PROGRAM_BUILD_LOG, sizeof(buildLog), buildLog, NULL);
cerr << "Build log:\n"
<< buildLog << endl;
exit(1);
}
// 创建内核
kernel = clCreateKernel(program, "matrixMul", &err);
if (err != CL_SUCCESS)
{
cerr << "Failed to create kernel" << endl;
exit(1);
}
// 创建缓冲区
cl_mem bufferA = clCreateBuffer(context, CL_MEM_READ_ONLY, m * k * sizeof(float), NULL, NULL);
cl_mem bufferB = clCreateBuffer(context, CL_MEM_READ_ONLY, n * k * sizeof(float), NULL, NULL);
cl_mem bufferC = clCreateBuffer(context, CL_MEM_WRITE_ONLY, m * n * sizeof(float), NULL, NULL);
clEnqueueWriteBuffer(queue, bufferA, CL_TRUE, 0, m * k * sizeof(float), A.data(), 0, NULL, NULL);
clEnqueueWriteBuffer(queue, bufferB, CL_TRUE, 0, n * k * sizeof(float), B.data(), 0, NULL, NULL);
// 设置内核参数
clSetKernelArg(kernel, 0, sizeof(cl_mem), &bufferA);
clSetKernelArg(kernel, 1, sizeof(cl_mem), &bufferB);
clSetKernelArg(kernel, 2, sizeof(cl_mem), &bufferC);
clSetKernelArg(kernel, 3, sizeof(int), &m);
clSetKernelArg(kernel, 4, sizeof(int), &n);
clSetKernelArg(kernel, 5, sizeof(int), &k);
cout << "7 seconds" << endl;
// 设置全局和局部工作组大小
size_t globalWorkSize[2] = {size_t(m), size_t(n)};
size_t localWorkSize[2] = {TILE_SIZE, TILE_SIZE};
err = clEnqueueNDRangeKernel(queue, kernel, 2, NULL, globalWorkSize, localWorkSize, 0, NULL, NULL);
if (err != CL_SUCCESS)
{
cerr << "Failed to enqueue NDRange kernel. Error code: " << err << endl;
}
// 等待内核执行完成
clFinish(queue);
// 读取结果
clEnqueueReadBuffer(queue, bufferC, CL_TRUE, 0, m * n * sizeof(float), C_GPU.data(), 0, NULL, NULL);
// 测试正确性和误差
gemm(C_CPU, A, B, m, n, k);
// 计算绝对误差和相对误差
float max_abs_error = computeAbsoluteError(C_CPU, C_GPU);
float max_rel_error = computeRelativeError(C_CPU, C_GPU);
cout << "Max absolute error: " << max_abs_error << endl;
cout << "Max relative error: " << max_rel_error << endl;
// cout << "A: " << endl;
// printMatrix(A, m, k);
// cout << "B: " << endl;
// printMatrix(B, n, k);
// cout << "C_GPU: " << endl;
// printMatrix(C_GPU, m, n);
// cout << "C_CPU: " << endl;
// printMatrix(C_CPU, m, n);
// 清理资源
clReleaseMemObject(bufferA);
clReleaseMemObject(bufferB);
clReleaseMemObject(bufferC);
clReleaseKernel(kernel);
clReleaseProgram(program);
clReleaseCommandQueue(queue);
clReleaseContext(context);
return 0;
}