Translations:CUDA tutorial/34/en

From Alliance Doc
Jump to navigation Jump to search

int main(void){

 int a, b, c;
 int *dev_a, *dev_b, *dev_c;
 int size = sizeof(int);
 
 //  allocate device copies of a,b, c
 cudaMalloc ( (void**) &dev_a, size);
 cudaMalloc ( (void**) &dev_b, size);
 cudaMalloc ( (void**) &dev_c, size);
 
 a=2; b=7;
 //  copy inputs to device
 cudaMemcpy (dev_a, &a, size, cudaMemcpyHostToDevice);
 cudaMemcpy (dev_b, &b, size, cudaMemcpyHostToDevice);
 
 // launch add() kernel on GPU, passing parameters
 add <<< 1, 1 >>> (dev_a, dev_b, dev_c);
 
 // copy device result back to host
 cudaMemcpy (&c, dev_c, size, cudaMemcpyDeviceToHost);
 std::cout<<a<<"+"<<b<<"="<<c<<std::endl;
 
 cudaFree ( dev_a ); cudaFree ( dev_b ); cudaFree ( dev_c );

} }} To build the program use the command below, which will create an executable named add.

$ nvcc add.cu -o add

To run the program first create a Slurm job script called gpu_job.sh. Be sure to replace def-someuser with your specific account (see accounts and projects). For various ways to schedule jobs with GPUs see using GPUs with Slurm.

File : gpu_job.sh

#!/bin/bash
#SBATCH --account=def-someuser
#SBATCH --gres=gpu:1              # Number of GPUs (per node)
#SBATCH --mem=400M                # memory (per node)
#SBATCH --time=0-00:10            # time (DD-HH:MM)
./add #name of your program