6 #define _N_SPLINES_ NSplines_GPU
9 #if defined(__CUDA_ARCH__)
10 #if __CUDA_ARCH__ >= 1200
11 #pragma message("Compiling with CUDA Architecture: 12.x")
12 #elif __CUDA_ARCH__ >= 1100
13 #pragma message("Compiling with CUDA Architecture: 11.x")
14 #elif __CUDA_ARCH__ >= 1000
15 #pragma message("Compiling with CUDA Architecture: 10.x")
16 #elif __CUDA_ARCH__ >= 900
17 #pragma message("Compiling with CUDA Architecture: 9.x")
18 #elif __CUDA_ARCH__ >= 800
19 #pragma message("Compiling with CUDA Architecture: 8.x")
20 #elif __CUDA_ARCH__ >= 750
21 #pragma message("Compiling with CUDA Architecture: 7.5")
22 #elif __CUDA_ARCH__ >= 730
23 #pragma message("Compiling with CUDA Architecture: 7.3")
24 #elif __CUDA_ARCH__ >= 720
25 #pragma message("Compiling with CUDA Architecture: 7.2")
26 #elif __CUDA_ARCH__ >= 710
27 #pragma message("Compiling with CUDA Architecture: 7.1")
28 #elif __CUDA_ARCH__ >= 700
29 #pragma message("Compiling with CUDA Architecture: 7.x")
30 #elif __CUDA_ARCH__ >= 650
31 #pragma message("Compiling with CUDA Architecture: 6.5")
32 #elif __CUDA_ARCH__ >= 600
33 #pragma message("Compiling with CUDA Architecture: 6.x")
34 #elif __CUDA_ARCH__ >= 530
35 #pragma message("Compiling with CUDA Architecture: 5.3")
36 #elif __CUDA_ARCH__ >= 520
37 #pragma message("Compiling with CUDA Architecture: 5.2")
38 #elif __CUDA_ARCH__ >= 510
39 #pragma message("Compiling with CUDA Architecture: 5.1")
40 #elif __CUDA_ARCH__ >= 500
41 #pragma message("Compiling with CUDA Architecture: 5.x")
42 #elif __CUDA_ARCH__ >= 400
43 #pragma message("Compiling with CUDA Architecture: 4.x")
44 #elif __CUDA_ARCH__ >= 300
45 #pragma message("Compiling with CUDA Architecture: 3.x")
47 #pragma message("Compiling with CUDA Architecture: < 3.x")
59 __device__ __constant__
unsigned int d_n_TF1;
62 #ifndef Weight_On_SplineBySpline_Basis
74 cudaDeviceSynchronize();
106 #ifndef Weight_On_SplineBySpline_Basis
107 float **cpu_total_weights,
110 unsigned int total_nknots,
111 unsigned int n_splines,
119 cudaMalloc((
void **) &
gpu_nKnots_arr, n_splines*
sizeof(
unsigned int));
122 cudaMalloc((
void **) &
gpu_coeff_x, Eve_size*
sizeof(
float));
129 cudaMalloc((
void **) &
gpu_weights, n_splines*
sizeof(
float));
142 #ifndef Weight_On_SplineBySpline_Basis
144 cudaMallocHost((
void **) cpu_total_weights, n_events*
sizeof(
float));
161 printf(
"Allocated %i entries for paramNo and nKnots arrays, size = %f MB\n",
162 n_splines,
static_cast<double>(
sizeof(
short int) * n_splines +
sizeof(
unsigned int) * n_splines) / 1.0e6);
163 printf(
"Allocated %i entries for x coeff arrays, size = %f MB\n",
164 Eve_size,
static_cast<double>(
sizeof(
float) * Eve_size) / 1.0e6);
165 printf(
"Allocated %i entries for {ybcd} coeff arrays, size = %f MB\n",
166 _nCoeff_ * total_nknots,
static_cast<double>(
sizeof(
float) *
_nCoeff_ * total_nknots) / 1.0e6);
167 printf(
"Allocated %i entries for TF1 coefficient arrays, size = %f MB\n",
180 cudaMallocHost((
void **) segment,
_N_SPLINES_*
sizeof(
short int));
189 cudaMallocHost((
void **) vals,
_N_SPLINES_*
sizeof(
float));
204 std::vector<float> cpu_many_array_TF1,
205 std::vector<short int> cpu_paramNo_arr_TF1,
206 #ifndef Weight_On_SplineBySpline_Basis
208 std::vector<unsigned int> cpu_nParamPerEvent,
210 std::vector<unsigned int> cpu_nParamPerEvent_TF1,
213 unsigned int n_splines,
214 short int spline_size,
215 unsigned int total_nknots,
216 unsigned int n_tf1) {
219 printf(
"Number of splines not equal to %i, GPU code for event-by-event splines will fail\n",
_N_SPLINES_);
220 printf(
"n_params = %i\n", n_params);
221 printf(
"%s : %i\n", __FILE__, __LINE__);
227 #ifndef Weight_On_SplineBySpline_Basis
232 cudaMemcpyToSymbol(
d_n_splines, &n_splines,
sizeof(n_splines));
236 cudaMemcpyToSymbol(
d_n_TF1, &n_tf1,
sizeof(n_tf1));
240 cudaMemcpyToSymbol(
d_spline_size, &spline_size,
sizeof(spline_size));
242 #ifndef Weight_On_SplineBySpline_Basis
251 cudaMemcpy(
gpu_coeff_x, cpu_spline_handler->
coeff_x.data(),
sizeof(
float)*spline_size*n_params, cudaMemcpyHostToDevice);
256 struct cudaResourceDesc resDesc_coeff_x;
257 memset(&resDesc_coeff_x, 0,
sizeof(resDesc_coeff_x));
258 resDesc_coeff_x.resType = cudaResourceTypeLinear;
260 resDesc_coeff_x.res.linear.desc = cudaCreateChannelDesc<float>();
261 resDesc_coeff_x.res.linear.sizeInBytes =
sizeof(float)*spline_size*n_params;
264 struct cudaTextureDesc texDesc_coeff_x;
265 memset(&texDesc_coeff_x, 0,
sizeof(texDesc_coeff_x));
266 texDesc_coeff_x.readMode = cudaReadModeElementType;
269 cudaCreateTextureObject(&
text_coeff_x, &resDesc_coeff_x, &texDesc_coeff_x,
nullptr);
277 cudaMemcpy(
gpu_nKnots_arr, cpu_spline_handler->
nKnots_arr.data(), n_splines*
sizeof(
unsigned int), cudaMemcpyHostToDevice);
286 cudaMemcpy(
gpu_paramNo_TF1_arr, cpu_paramNo_arr_TF1.data(), n_tf1*
sizeof(
short int), cudaMemcpyHostToDevice);
289 #ifndef Weight_On_SplineBySpline_Basis
291 cudaMemcpy(
gpu_nParamPerEvent, cpu_nParamPerEvent.data(), 2*n_events*
sizeof(
unsigned int), cudaMemcpyHostToDevice);
296 struct cudaResourceDesc resDesc_nParamPerEvent;
297 memset(&resDesc_nParamPerEvent, 0,
sizeof(resDesc_nParamPerEvent));
298 resDesc_nParamPerEvent.resType = cudaResourceTypeLinear;
300 resDesc_nParamPerEvent.res.linear.desc = cudaCreateChannelDesc<unsigned int>();
301 resDesc_nParamPerEvent.res.linear.sizeInBytes = 2*n_events*
sizeof(
unsigned int);
304 struct cudaTextureDesc texDesc_nParamPerEvent;
305 memset(&texDesc_nParamPerEvent, 0,
sizeof(texDesc_nParamPerEvent));
306 texDesc_nParamPerEvent.readMode = cudaReadModeElementType;
309 cudaCreateTextureObject(&
text_nParamPerEvent, &resDesc_nParamPerEvent, &texDesc_nParamPerEvent,
nullptr);
313 cudaMemcpy(
gpu_nParamPerEvent_TF1, cpu_nParamPerEvent_TF1.data(), 2*n_events*
sizeof(
unsigned int), cudaMemcpyHostToDevice);
318 struct cudaResourceDesc resDesc_nParamPerEvent_tf1;
319 memset(&resDesc_nParamPerEvent_tf1, 0,
sizeof(resDesc_nParamPerEvent_tf1));
320 resDesc_nParamPerEvent_tf1.resType = cudaResourceTypeLinear;
322 resDesc_nParamPerEvent_tf1.res.linear.desc = cudaCreateChannelDesc<unsigned int>();
323 resDesc_nParamPerEvent_tf1.res.linear.sizeInBytes = 2*n_events*
sizeof(
unsigned int);
326 struct cudaTextureDesc texDesc_nParamPerEvent_tf1;
327 memset(&texDesc_nParamPerEvent_tf1, 0,
sizeof(texDesc_nParamPerEvent_tf1));
328 texDesc_nParamPerEvent_tf1.readMode = cudaReadModeElementType;
331 cudaCreateTextureObject(&
text_nParamPerEvent_TF1, &resDesc_nParamPerEvent_tf1, &texDesc_nParamPerEvent_tf1,
nullptr);
348 const short int* __restrict__ gpu_paramNo_arr,
349 const unsigned int* __restrict__ gpu_nKnots_arr,
350 const float* __restrict__ gpu_coeff_many,
351 float* __restrict__ gpu_weights,
352 const cudaTextureObject_t __restrict__ text_coeff_x) {
355 const unsigned int splineNum = (blockIdx.x * blockDim.x + threadIdx.x);
363 const short int Param = gpu_paramNo_arr[splineNum];
372 const unsigned int CurrentKnotPos = gpu_nKnots_arr[splineNum]*
_nCoeff_+segment*
_nCoeff_;
376 const float fY = gpu_coeff_many[CurrentKnotPos];
377 const float fB = gpu_coeff_many[CurrentKnotPos + 1];
378 const float fC = gpu_coeff_many[CurrentKnotPos + 2];
379 const float fD = gpu_coeff_many[CurrentKnotPos + 3];
381 const float dx =
val_gpu[Param] - tex1Dfetch<float>(text_coeff_x, segment_X);
384 gpu_weights[splineNum] = fmaf(dx, fmaf(dx, fmaf(dx, fD, fC), fB), fY);
397 const float* __restrict__ gpu_coeffs_tf1,
398 const short int* __restrict__ gpu_paramNo_arr_tf1,
399 float* __restrict__ gpu_weights_tf1) {
402 const unsigned int tf1Num = (blockIdx.x * blockDim.x + threadIdx.x);
406 const float x =
val_gpu[gpu_paramNo_arr_tf1[tf1Num]];
409 const unsigned int TF1_Index = tf1Num *
_nTF1Coeff_;
410 const float a = gpu_coeffs_tf1[TF1_Index];
411 const float b = gpu_coeffs_tf1[TF1_Index+1];
413 gpu_weights_tf1[tf1Num] = fmaf(a, x, b);
420 #ifndef Weight_On_SplineBySpline_Basis
424 const float* __restrict__ gpu_weights,
425 const float* __restrict__ gpu_weights_tf1,
427 float* __restrict__ gpu_total_weights,
429 const cudaTextureObject_t __restrict__ text_nParamPerEvent,
430 const cudaTextureObject_t __restrict__ text_nParamPerEvent_TF1) {
432 const unsigned int EventNum = (blockIdx.x * blockDim.x + threadIdx.x);
436 float local_total_weight = 1.f;
438 const unsigned int EventOffset = 2 * EventNum;
440 for (
unsigned int id = 0; id < tex1Dfetch<unsigned int>(text_nParamPerEvent, EventOffset); ++id) {
441 local_total_weight *= gpu_weights[tex1Dfetch<unsigned int>(text_nParamPerEvent, EventOffset+1) + id];
444 for (
unsigned int id = 0; id < tex1Dfetch<unsigned int>(text_nParamPerEvent_TF1, EventOffset); ++id) {
445 local_total_weight *= gpu_weights_tf1[tex1Dfetch<unsigned int>(text_nParamPerEvent_TF1, EventOffset+1) + id];
447 gpu_total_weights[EventNum] = local_total_weight;
457 #ifdef Weight_On_SplineBySpline_Basis
459 float* cpu_weights_tf1,
461 float* cpu_total_weights,
467 const unsigned int h_n_splines,
468 const unsigned int h_n_tf1) {
474 grid_size.x = (h_n_splines / block_size.x) + 1;
487 EvalOnGPU_Splines<<<grid_size, block_size>>>(
498 grid_size.x = (h_n_tf1 / block_size.x) + 1;
499 EvalOnGPU_TF1<<<grid_size, block_size>>>(
508 #ifdef Weight_On_SplineBySpline_Basis
510 cudaMemcpy(cpu_weights,
gpu_weights, h_n_splines*
sizeof(
float), cudaMemcpyDeviceToHost);
513 cudaMemcpy(cpu_weights_tf1,
gpu_weights_tf1, h_n_tf1*
sizeof(
float), cudaMemcpyDeviceToHost);
518 grid_size.x = (
h_n_events / block_size.x) + 1;
520 EvalOnGPU_TotWeight<<<grid_size, block_size>>>(
538 printf(
"Copied GPU total weights to CPU with SUCCESS (drink more tea)\n");
539 printf(
"Released calculated response from GPU with SUCCESS (drink most tea)\n");
550 #ifndef Weight_On_SplineBySpline_Basis
551 float *cpu_total_weights
569 #ifndef Weight_On_SplineBySpline_Basis
577 cudaFreeHost(cpu_total_weights);
578 cpu_total_weights =
nullptr;
586 cudaFreeHost(segment);
constexpr int _nCoeff_
KS: We store coefficients {y,b,c,d} in one array one by one, this is only to define it once rather th...
constexpr int _nTF1Coeff_
KS: For TF1 we store at most 5 coefficients, we could make it more flexible but for now define it her...
cudaTextureObject_t text_nParamPerEvent_TF1
KS: Map keeping track how many parameters applies to each event, we keep two numbers here {number of ...
unsigned int * gpu_nParamPerEvent
KS: GPU map keeping track how many parameters applies to each event, we keep two numbers here {number...
__host__ void CleanupGPU_SplineMonolith(float *cpu_total_weights)
This function deallocates the resources allocated for the separate {x} and {ybcd} arrays in the and T...
cudaTextureObject_t text_coeff_x
KS: Textures are L1 cache variables which are well optimised for fetching. Make texture only for vari...
short int * gpu_paramNo_TF1_arr
CW: GPU array with the number of points per TF1 object.
__host__ void InitGPU_Vals(float **vals)
Allocate memory for spline segments.
int h_n_events
Number of events living on CPU.
float * gpu_weights
GPU arrays to hold weight for each spline.
float * gpu_coeff_many
GPU arrays to hold other coefficients.
unsigned int * gpu_nParamPerEvent_TF1
KS: GPU map keeping track how many parameters applies to each event, we keep two numbers here {number...
cudaTextureObject_t text_nParamPerEvent
KS: Map keeping track how many parameters applies to each event, we keep two numbers here {number of ...
__host__ void InitGPU_SplineMonolith(float **cpu_total_weights, int n_events, unsigned int total_nknots, unsigned int n_splines, unsigned int n_tf1, int Eve_size)
Allocate memory on gpu for spline monolith.
virtual ~SMonolithGPU()
destructor
__host__ void RunGPU_SplineMonolith(float *cpu_total_weights, float *vals, short int *segment, const unsigned int h_n_splines, const unsigned int h_n_tf1)
Run the GPU code for the separate many arrays. As in separate {x}, {y,b,c,d} arrays Pass the segment ...
float * gpu_coeff_x
KS: GPU arrays to hold X coefficient.
SMonolithGPU()
constructor
short int * gpu_nPoints_arr
GPU arrays to hold number of points.
float * gpu_weights_tf1
GPU arrays to hold weight for each TF1.
__host__ void InitGPU_Segments(short int **segment)
Allocate memory for spline segments.
unsigned int * gpu_nKnots_arr
KS: GPU Number of knots per spline.
int h_n_params
Number of params living on CPU.
float * gpu_coeff_TF1_many
GPU arrays to hold TF1 coefficients.
short int * gpu_paramNo_arr
CW: GPU array with the number of points per spline (not per spline point!)
__host__ void CopyToGPU_SplineMonolith(SplineMonoStruct *cpu_spline_handler, std::vector< float > cpu_many_array_TF1, std::vector< short int > cpu_paramNo_arr_TF1, int n_events, std::vector< unsigned int > cpu_nParamPerEvent, std::vector< unsigned int > cpu_nParamPerEvent_TF1, int n_params, unsigned int n_splines, short int spline_size, unsigned int total_nknots, unsigned int n_tf1)
Copies data from CPU to GPU for the spline monolith.
float * gpu_total_weights
GPU arrays to hold weight for event.
__host__ void CleanupGPU_Segments(short int *segment, float *vals)
Clean up pinned variables at CPU.
__global__ void EvalOnGPU_TotWeight(const float *__restrict__ gpu_weights, const float *__restrict__ gpu_weights_tf1, float *__restrict__ gpu_total_weights, const cudaTextureObject_t __restrict__ text_nParamPerEvent, const cudaTextureObject_t __restrict__ text_nParamPerEvent_TF1)
KS: Evaluate the total spline event weight on the GPU, as in most cases GPU is faster,...
__global__ void EvalOnGPU_Splines(const short int *__restrict__ gpu_paramNo_arr, const unsigned int *__restrict__ gpu_nKnots_arr, const float *__restrict__ gpu_coeff_many, float *__restrict__ gpu_weights, const cudaTextureObject_t __restrict__ text_coeff_x)
Evaluate the spline on the GPU Using one {y,b,c,d} array and one {x} array Should be most efficient a...
__device__ __constant__ unsigned int d_n_TF1
Number of tf1 living on GPU.
__device__ __constant__ unsigned int d_n_splines
Number of splines living on GPU.
__host__ void SynchroniseSplines()
Make sure all Cuda threads finished execution.
__global__ void EvalOnGPU_TF1(const float *__restrict__ gpu_coeffs_tf1, const short int *__restrict__ gpu_paramNo_arr_tf1, float *__restrict__ gpu_weights_tf1)
Evaluate the TF1 on the GPU Using 5th order polynomial.
__device__ __constant__ short int segment_gpu[NSplines_GPU]
__device__ __constant__ float val_gpu[NSplines_GPU]
CW: Constant memory needs to be hard-coded on compile time. Could make this texture memory instead,...
__device__ __constant__ int d_n_events
Number of events living on GPU.
__device__ __constant__ short int d_spline_size
Size of splines living on GPU.
MaCh3 event-by-event cross-section spline code.
void checkGpuMem()
KS: Get some fancy info about VRAM usage.
void PrintNdevices()
KS: Get some fancy info about GPU.
#define _BlockSize_
KS: Need it for shared memory, there is way to use dynamic shared memory but I am lazy right now.
KS: Struct storing information for spline monolith.
std::vector< unsigned int > nKnots_arr
KS: CPU Number of knots per spline.
std::vector< float > coeff_x
KS: CPU arrays to hold X coefficient.
std::vector< float > coeff_many
CPU arrays to hold other coefficients.
std::vector< short int > paramNo_arr
CW: CPU array with the number of points per spline (not per spline point!)