Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- #include <cmath>
- #include <iostream>
- #include <cuda_fp16.h> // Include for FP16 support
- #include "gpu-new-forward.h"
- #define TILE_WIDTH 16
- #define BLOCK_SIZE 512
- __global__ void convert_float_to_half_kernel(const float *input, __half *output, int size) {
- int idx = blockIdx.x * blockDim.x + threadIdx.x;
- if (idx < size) {
- output[idx] = __float2half(input[idx]);
- }
- }
- __global__ void convert_half_to_float_kernel(const __half *input, float *output, int size) {
- int idx = blockIdx.x * blockDim.x + threadIdx.x;
- if (idx < size) {
- output[idx] = __half2float(input[idx]);
- }
- }
- __global__ void matrix_unrolling_kernel(const __half *input, __half *output,
- const int Batch, const int Channel,
- const int Height, const int Width,
- const int K) {
- /*
- Modify this function to implement the input matrix unrolling kernel.
- Function parameter definitions:
- input - input
- output - output
- Batch - batch_size (number of images in x)
- Channel - number of input feature maps
- Height - input height dimension
- Width - input width dimension
- K - kernel height and width (K x K)
- */
- #define in_4d(i3, i2, i1, i0) input[(i3) * (Channel * Height * Width) + (i2) * (Height * Width) + (i1) * (Width) + i0]
- #define out_3d(i1, i0) output[(i1) * (Batch * W_unroll) + i0]
- // Calculate output dimensions
- const size_t Height_out = Height - K + 1;
- const size_t Width_out = Width - K + 1;
- const size_t W_unroll = Height_out * Width_out;
- const size_t H_unroll = Channel * K * K;
- const size_t W_total_unroll = Batch * W_unroll;
- // Calculate thread indices
- const size_t c = blockIdx.x * blockDim.x + threadIdx.x; // Channel/map index
- const size_t hw_pos = blockIdx.y * blockDim.y + threadIdx.y; // Combined height-width position
- const size_t batch_idx = blockIdx.z * blockDim.z + threadIdx.z;// Batch index
- // Extract height and width positions
- const size_t h_out = hw_pos / Width_out; // Height position
- const size_t w_out = hw_pos % Width_out; // Width position
- // Boundary check
- if (c >= Channel || h_out >= Height_out || w_out >= Width_out || batch_idx >= Batch) {
- return;
- }
- // Calculate position in unrolled matrix
- const size_t w_unroll = h_out * Width_out + w_out;
- const size_t w_total_unroll = batch_idx * W_unroll + w_unroll;
- const size_t w_base = c * K * K;
- // Perform unrolling
- for (int p = 0; p < K; p++) {
- for (int q = 0; q < K; q++) {
- int h_unroll = w_base + p * K + q;
- out_3d(h_unroll, w_total_unroll) = in_4d(batch_idx, c, h_out + p, w_out + q);
- }
- }
- #undef in_4d
- #undef out_3d
- }
- // Tiled matrix multiplication kernel. Computes C = AB
- // You don't need to modify this kernel.
- __global__ void matrixMultiplyShared(const __half *A, const __half *B, float *C,
- int numARows, int numAColumns,
- int numBRows, int numBColumns,
- int numCRows, int numCColumns)
- {
- __shared__ __half tileA[TILE_WIDTH][TILE_WIDTH];
- __shared__ __half tileB[TILE_WIDTH][TILE_WIDTH];
- int by = blockIdx.y, bx = blockIdx.x, ty = threadIdx.y, tx = threadIdx.x;
- int row = by * TILE_WIDTH + ty, col = bx * TILE_WIDTH + tx;
- float val = 0.0f;
- for (int tileId = 0; tileId < (numAColumns - 1) / TILE_WIDTH + 1; tileId++) {
- if (row < numARows && tileId * TILE_WIDTH + tx < numAColumns) {
- tileA[ty][tx] = A[(size_t) row * numAColumns + tileId * TILE_WIDTH + tx];
- } else {
- tileA[ty][tx] = __float2half(0.0f);
- }
- if (col < numBColumns && tileId * TILE_WIDTH + ty < numBRows) {
- tileB[ty][tx] = B[((size_t) tileId * TILE_WIDTH + ty) * numBColumns + col];
- } else {
- tileB[ty][tx] = __float2half(0.0f);
- }
- __syncthreads();
- if (row < numCRows && col < numCColumns) {
- for (int i = 0; i < TILE_WIDTH; i++) {
- val += __half2float(tileA[ty][i]) * __half2float(tileB[i][tx]);
- }
- }
- __syncthreads();
- }
- if (row < numCRows && col < numCColumns) {
- C[row * numCColumns + col] = val;
- }
- }
- // Permutes the matmul result.
- // The output feature map after matmul is of shape Map_out x Batch x Height_out x Width_out,
- // and we need to permute it into Batch x Map_out x Height_out x Width_out.
- // You don't need to modify this kernel.
- __global__ void matrix_permute_kernel(const float *input, float *output, int Map_out,
- int Batch, int image_size) {
- int b = blockIdx.y;
- int x = blockIdx.x * BLOCK_SIZE + threadIdx.x;
- if (x < image_size) {
- for (int m = 0; m < Map_out; m++) {
- output[b * Map_out * image_size + m * image_size + x] =
- input[m * Batch * image_size + b * image_size + x];
- }
- }
- }
- __host__ void GPUInterface::conv_forward_gpu_prolog(const float *host_output, const float *host_input, const float *host_mask,
- float **device_output_ptr, float **device_input_ptr, float **device_mask_ptr,
- const int Batch, const int Map_out, const int Channel, const int Height, const int Width, const int K)
- {
- // allocating memory
- // Calculate sizes
- const int Height_out = Height - K + 1;
- const int Width_out = Width - K + 1;
- const int input_size = Batch * Channel * Height * Width;
- const int mask_size = Map_out * Channel * K * K;
- const int output_size = Batch * Map_out * Height_out * Width_out;
- // Allocate device memory for output (float)
- cudaMalloc((void**)device_output_ptr, output_size * sizeof(float));
- // Allocate device memory for input and mask in float
- float *device_input_float;
- float *device_mask_float;
- cudaMalloc((void**)&device_input_float, input_size * sizeof(float));
- cudaMalloc((void**)&device_mask_float, mask_size * sizeof(float));
- // Copy host input and mask to device input and mask (float)
- cudaMemcpy(device_input_float, host_input, input_size * sizeof(float), cudaMemcpyHostToDevice);
- cudaMemcpy(device_mask_float, host_mask, mask_size * sizeof(float), cudaMemcpyHostToDevice);
- // Allocate device memory for input and mask in half precision
- __half *device_input_half;
- __half *device_mask_half;
- cudaMalloc((void**)&device_input_half, input_size * sizeof(__half));
- cudaMalloc((void**)&device_mask_half, mask_size * sizeof(__half));
- // Convert input and mask from float to half precision on device
- int threads_per_block = 1024;
- int blocks_per_grid_input = (input_size + threads_per_block - 1) / threads_per_block;
- convert_float_to_half_kernel<<<blocks_per_grid_input, threads_per_block>>>(device_input_float, device_input_half, input_size);
- int blocks_per_grid_mask = (mask_size + threads_per_block - 1) / threads_per_block;
- convert_float_to_half_kernel<<<blocks_per_grid_mask, threads_per_block>>>(device_mask_float, device_mask_half, mask_size);
- // Free the float input and mask
- cudaFree(device_input_float);
- cudaFree(device_mask_float);
- // Pass back the half precision pointers as float pointers
- *device_input_ptr = (float*)device_input_half;
- *device_mask_ptr = (float*)device_mask_half;
- }
- __host__ void GPUInterface::conv_forward_gpu(float *device_output, const float *device_input, const float *device_mask,
- const int Batch, const int Map_out, const int Channel, const int Height, const int Width, const int K)
- {
- const int Height_out = Height - K + 1;
- const int Width_out = Width - K + 1;
- const int Height_unrolled = Channel * K * K;
- const int Width_unrolled = Batch * Height_out * Width_out;
- // Reinterpret input and mask pointers as half precision
- const __half *device_input_half = reinterpret_cast<const __half*>(device_input);
- const __half *device_mask_half = reinterpret_cast<const __half*>(device_mask);
- // Allocating temporary storage for unrolling matrix
- __half *unrolled_matrix; // Pointer to device memory for storing the unrolled matrix
- float *matmul_output; // Pointer to device memory for storing the result of matrix multiplication
- cudaMalloc((void**)&unrolled_matrix, (size_t) Height_unrolled * Width_unrolled * sizeof(__half));
- cudaMalloc((void**)&matmul_output, (size_t) Map_out * Width_unrolled * sizeof(float));
- // Set the kernel dimensions and call the matrix unrolling kernel.
- dim3 blockDim(4, 256, 1);
- dim3 gridDim(
- (Channel + blockDim.x - 1) / blockDim.x, // Channel dimension
- (Height_out * Width_out + blockDim.y - 1) / blockDim.y, // Combined Height/Width
- (Batch + blockDim.z - 1) / blockDim.z); // Batch dimension
- matrix_unrolling_kernel<<<gridDim, blockDim>>>(device_input_half, unrolled_matrix, Batch, Channel, Height, Width, K);
- // Set the kernel dimensions and call the matmul kernel
- dim3 dimGrid((Width_unrolled - 1)/TILE_WIDTH + 1, (Map_out - 1)/TILE_WIDTH + 1, 1);
- dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1);
- matrixMultiplyShared<<<dimGrid, dimBlock>>>(device_mask_half, unrolled_matrix, matmul_output, Map_out, Height_unrolled, Height_unrolled, Width_unrolled,
- Map_out, Width_unrolled);
- // Permute the result of matrix multiplication
- const int out_image_size = Height_out * Width_out;
- dim3 permute_kernel_grid_dim((out_image_size - 1) / BLOCK_SIZE + 1, Batch, 1);
- matrix_permute_kernel<<<permute_kernel_grid_dim, BLOCK_SIZE>>>(matmul_output, device_output, Map_out, Batch, out_image_size);
- cudaFree(matmul_output);
- cudaFree(unrolled_matrix);
- }
- __host__ void GPUInterface::conv_forward_gpu_epilog(float *host_output, float *device_output, float *device_input, float *device_mask,
- const int Batch, const int Map_out, const int Channel, const int Height, const int Width, const int K)
- {
- // Calculate output size
- const int Height_out = Height - K + 1;
- const int Width_out = Width - K + 1;
- const int output_size = Batch * Map_out * Height_out * Width_out;
- // Copy the output back to host
- cudaMemcpy(host_output, device_output, output_size * sizeof(float), cudaMemcpyDeviceToHost);
- // Free device memory
- cudaFree(device_output);
- cudaFree(device_input); // device_input is __half* cast to float*
- cudaFree(device_mask); // device_mask is __half* cast to float*
- }
- __host__ void GPUInterface::get_device_properties()
- {
- int deviceCount;
- cudaGetDeviceCount(&deviceCount);
- for(int dev = 0; dev < deviceCount; dev++)
- {
- cudaDeviceProp deviceProp;
- cudaGetDeviceProperties(&deviceProp, dev);
- std::cout<<"Device "<<dev<<" name: "<<deviceProp.name<<std::endl;
- std::cout<<"Computational capabilities: "<<deviceProp.major<<"."<<deviceProp.minor<<std::endl;
- std::cout<<"Max Global memory size: "<<deviceProp.totalGlobalMem<<std::endl;
- std::cout<<"Max Constant memory size: "<<deviceProp.totalConstMem<<std::endl;
- std::cout<<"Max Shared memory size per block: "<<deviceProp.sharedMemPerBlock<<std::endl;
- std::cout<<"Max threads per block: "<<deviceProp.maxThreadsPerBlock<<std::endl;
- std::cout<<"Max block dimensions: "<<deviceProp.maxThreadsDim[0]<<" x, "<<deviceProp.maxThreadsDim[1]<<" y, "<<deviceProp.maxThreadsDim[2]<<" z"<<std::endl;
- std::cout<<"Max grid dimensions: "<<deviceProp.maxGridSize[0]<<" x, "<<deviceProp.maxGridSize[1]<<" y, "<<deviceProp.maxGridSize[2]<<" z"<<std::endl;
- std::cout<<"Warp Size: "<<deviceProp.warpSize<<std::endl;
- }
- }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement