• 获取应用程序可使用的 GPU 的数量
  • 激活任意可用的 GPU
  • 在多个 GPU 上分配显存
  • 在多个 GPU 上传入和转出显存数据
  • 在多个 GPU 上启动核函数

获取多个 GPU 的相关信息

如要以运行程序的方式得出可用 GPU 的数量,请使用 cudaGetDeviceCount

uint64_t num_gpus;
cudaGetDeviceCount(&num_gpus);

如要以运行程序的方式得到当前处于活动状态的 GPU,请使用 cudaGetDevice:

uint64_t device;
cudaGetDevice(&device); // `device` is now a 0-based index of the current GPU.

设置当前的 GPU

对于每个主机线程,每次只有一个 GPU 设备处于活动状态。如要将特定的 GPU 设置为活动状态,请使用 cudaSetDevice 以及所需 GPU 的索引(从 0 开始):

cudaSetDevice(0);

循环使用可用的 GPU

一种常见的模式为,遍历可用的 GPU,并为每个 GPU 执行相应操作:

uint64_t num_gpus;
cudaGetDeviceCount(&num_gpus);

for (uint64_t gpu = 0; gpu < num_gpus; gpu++) {

    cudaSetDevice(gpu);
    // Perform operations for this GPU.
}

为多个 GPU 执行数据分块

与多个非默认流相同,多个 GPU 中的每个 GPU 都可处理一个数据块。我们将创建和利用数据指针数组,为每个可用的 GPU 分配显存:

uint64_t num_gpus;
cudaGetDeviceCount(&num_gpus);

const uint64_t num_entries = 1UL << 26;
const uint64_t chunk_size = sdiv(num_entries, num_gpus);

uint64_t *data_gpu[num_gpus]; // One pointer for each GPU.

for (uint64_t gpu = 0; gpu < num_gpus; gpu++) {

    cudaSetDevice(gpu);

    const uint64_t lower = chunk_size*gpu;
    const uint64_t upper = min(lower+chunk_size, num_entries);
    const uint64_t width = upper-lower;

    cudaMalloc(&data_gpu[gpu], sizeof(uint64_t)*width); // Allocate chunk of data for current GPU.
}

为多个 GPU 复制数据
通过使用相同的循环遍历和分块技术,我们可在多个 GPU 上传入和传出数据:

// ...Assume data has been allocated on host and for each GPU

for (uint64_t gpu = 0; gpu < num_gpus; gpu++) {

    cudaSetDevice(gpu);

    const uint64_t lower = chunk_size*gpu;
    const uint64_t upper = min(lower+chunk_size, num_entries);
    const uint64_t width = upper-lower;

    // Note use of `cudaMemcpy` and not `cudaMemcpyAsync` since we are not
    // presently using non-default streams.
    cudaMemcpy(data_gpu[gpu], data_cpu+lower, 
           sizeof(uint64_t)*width, cudaMemcpyHostToDevice); // ...or cudaMemcpyDeviceToHost
}

为多个 GPU 启动核函数

通过使用相同的循环遍历和分块技术,我们可在多个 GPU 上启动核函数并处理数据块:

// ...Assume data has been allocated on host and for each GPU

for (uint64_t gpu = 0; gpu < num_gpus; gpu++) {

    cudaSetDevice(gpu);

    const uint64_t lower = chunk_size*gpu;
    const uint64_t upper = min(lower+chunk_size, num_entries);
    const uint64_t width = upper-lower;

    kernel<<<grid, block>>>(data_gpu[gpu], width); // Pass chunk of data for current GPU to work on.
}

在上面,使用深度优先的方法将一部分工作传递给每个GPU。在某些情况下,尤其是在数据量极高的情况下,使用宽度优先的方法可能更有意义。这种方法上的改变并不是需要额外的CUDA知识。不过,此stack overflow的回答提供了一些使用深度优先和宽度优先方法的CUDA代码示例。

多个GPU之间进行对等内存传输,以及在多个节点上使用多个GPU, 此超级计算会议演示文稿

例子:
多个GPU使用默认流

#include <cstdint>
#include <iostream>
#include "helpers.cuh"
#include "encryption.cuh"

void encrypt_cpu(uint64_t * data, uint64_t num_entries, 
                 uint64_t num_iters, bool parallel=true) {

    #pragma omp parallel for if (parallel)
    for (uint64_t entry = 0; entry < num_entries; entry++)
        data[entry] = permute64(entry, num_iters);
}

__global__ 
void decrypt_gpu(uint64_t * data, uint64_t num_entries, 
                 uint64_t num_iters) {

    const uint64_t thrdID = blockIdx.x*blockDim.x+threadIdx.x;
    const uint64_t stride = blockDim.x*gridDim.x;

    for (uint64_t entry = thrdID; entry < num_entries; entry += stride)
        data[entry] = unpermute64(data[entry], num_iters);
}

bool check_result_cpu(uint64_t * data, uint64_t num_entries,
                      bool parallel=true) {

    uint64_t counter = 0;

    #pragma omp parallel for reduction(+: counter) if (parallel)
    for (uint64_t entry = 0; entry < num_entries; entry++)
        counter += data[entry] == entry;

    return counter == num_entries;
}

int main (int argc, char * argv[]) {

    Timer timer;
    Timer overall;

    const uint64_t num_entries = 1UL << 26;
    const uint64_t num_iters = 1UL << 10;
    const bool openmp = true;

    timer.start();
    uint64_t * data_cpu;
    cudaMallocHost(&data_cpu, sizeof(uint64_t)*num_entries);
    // cudaMalloc    (&data_gpu, sizeof(uint64_t)*num_entries);
    
    timer.stop("allocate memory");
    check_last_error();

    timer.start();
    encrypt_cpu(data_cpu, num_entries, num_iters, openmp);
    timer.stop("encrypt data on CPU");

    overall.start();
    timer.start();
    
    int num_gpus;
    cudaGetDeviceCount(&num_gpus);
    const uint64_t chunk_size = sdiv(num_entries, num_gpus);
    uint64_t *data_gpu[num_gpus]; // One pointer for each GPU.
    for (uint64_t gpu = 0; gpu < num_gpus; gpu++) {

        cudaSetDevice(gpu);

        const uint64_t lower = chunk_size*gpu;
        const uint64_t upper = min(lower+chunk_size, num_entries);
        const uint64_t width = upper-lower;

        cudaMalloc(&data_gpu[gpu], sizeof(uint64_t)*width); // Allocate chunk of data for current GPU.
        
        cudaMemcpy(data_gpu[gpu], data_cpu+lower, 
           sizeof(uint64_t)*width, cudaMemcpyHostToDevice); // ...or cudaMemcpyDeviceToHost
       
    }
    
    for (uint64_t gpu = 0; gpu < num_gpus; gpu++) {
        cudaSetDevice(gpu);
        const uint64_t lower = chunk_size*gpu;
        const uint64_t upper = min(lower+chunk_size, num_entries);
        const uint64_t width = upper-lower;
        decrypt_gpu<<<32*80, 64>>>(data_gpu[gpu], width, num_iters); // Pass chunk of data for current GPU to work on.
    }
    
    for (uint64_t gpu = 0; gpu < num_gpus; gpu++) {
        cudaSetDevice(gpu);
        const uint64_t lower = chunk_size*gpu;
        const uint64_t upper = min(lower+chunk_size, num_entries);
        const uint64_t width = upper-lower;
        cudaMemcpy(data_cpu+lower, data_gpu[gpu], 
               sizeof(uint64_t)*width, cudaMemcpyDeviceToHost);
    }
  
   
    timer.stop("multi GPU times");
    check_last_error();
  
    overall.stop("total time on GPU");
    check_last_error();

    timer.start();
    const bool success = check_result_cpu(data_cpu, num_entries, openmp);
    std::cout << "STATUS: test " 
              << ( success ? "passed" : "failed")
              << std::endl;
    timer.stop("checking result on CPU");

    timer.start();
    cudaFreeHost(data_cpu);
    // cudaFree    (data_gpu);
    for (uint64_t gpu = 0; gpu < num_gpus; gpu++) {
      cudaFree(data_gpu[gpu]);
    }
    timer.stop("free memory");
    check_last_error();
}

在上面代码,可以看到内存传输没有重叠。 为什么会这样?
代码既不使用非默认流,也不使用“cudaMemcpyAsync”存储复制。 因此,它们阻止了操作。

在多个 GPU上实现数据复制与计算的重叠

  • 流与每个 GPU 设备是如何关联的
  • 如何为多个 GPU 创建非默认流
  • 如何在多个 GPU 上实现复制与计算的重叠

每个 GPU 都有各自的默认流。我们可以为当前处于活动状态的 GPU 设备创建、使用和销毁非默认流。切记不要在未与当前处于活动状态的 GPU 建立关联的流中启动核函数

为多个 GPU 创建多个非默认流
在多个 GPU 上使用多个非默认流时,与之前不同的是,我们不是简单地将流存储在数组中,而是将其存储于二维数组中,且数组中的每一行皆包含单个 GPU 的流:

cudaStream_t streams[num_gpus][num_streams]; // 2D array containing number of streams for each GPU.

// For each available GPU...
for (uint64_t gpu = 0; gpu < num_gpus; gpu++) {
    // ...set as active device...
    cudaSetDevice(gpu);
    for (uint64_t stream = 0; stream < num_streams; stream++)
        // ...create and store its number of streams.
        cudaStreamCreate(&streams[gpu][stream]);
}

多个 GPU 上多流的数据块大小
当在多个 GPU 上使用多个非默认流时,全局数据索引尤为棘手。为帮助实现索引,我们可以为单个流和整个 GPU 分别定义数据块大小。

// Each stream needs num_entries/num_gpus/num_streams data. We use round up division for
// reasons previously discussed.
const uint64_t stream_chunk_size = sdiv(sdiv(num_entries, num_gpus), num_streams);

// It will be helpful to also to have handy the chunk size for an entire GPU.
const uint64_t gpu_chunk_size = stream_chunk_size*num_streams;

为多个 GPU 的多个流分配显存
GPU 的显存并未分配给各个流,所以此处的分配操作看起来与之前的多 GPU 任务相似,我们只需注意数据块的大小是分配给整个 GPU 的而非其中一个流的即可:

```c
// For each GPU...
for (uint64_t gpu = 0; gpu < num_gpus; gpu++) {

    // ...set device as active...
    cudaSetDevice(gpu);

    // ...use a GPU chunk's worth of data to calculate indices and width...
    const uint64_t lower = gpu_chunk_size*gpu;
    const uint64_t upper = min(lower+gpu_chunk_size, num_entries);
    const uint64_t width = upper-lower;

    // ...allocate data.
    cudaMalloc(&data_gpu[gpu], sizeof(uint64_t)*width);
}

在多个 GPU 的多个流上实现复制与计算的重叠

#include <cstdint>
#include <iostream>
#include "helpers.cuh"
#include "encryption.cuh"

void encrypt_cpu(uint64_t * data, uint64_t num_entries, 
                 uint64_t num_iters, bool parallel=true) {

    #pragma omp parallel for if (parallel)
    for (uint64_t entry = 0; entry < num_entries; entry++)
        data[entry] = permute64(entry, num_iters);
}

__global__ 
void decrypt_gpu(uint64_t * data, uint64_t num_entries, 
                 uint64_t num_iters) {

    const uint64_t thrdID = blockIdx.x*blockDim.x+threadIdx.x;
    const uint64_t stride = blockDim.x*gridDim.x;

    for (uint64_t entry = thrdID; entry < num_entries; entry += stride)
        data[entry] = unpermute64(data[entry], num_iters);
}

bool check_result_cpu(uint64_t * data, uint64_t num_entries,
                      bool parallel=true) {

    uint64_t counter = 0;

    #pragma omp parallel for reduction(+: counter) if (parallel)
    for (uint64_t entry = 0; entry < num_entries; entry++)
        counter += data[entry] == entry;

    return counter == num_entries;
}

int main (int argc, char * argv[]) {

    Timer timer;
    Timer overall;

    const uint64_t num_entries = 1UL << 26;
    const uint64_t num_iters = 1UL << 10;
    const bool openmp = true;

    timer.start();
    uint64_t * data_cpu;
    cudaMallocHost(&data_cpu, sizeof(uint64_t)*num_entries);
    timer.stop("allocate memory");
    check_last_error();

    timer.start();
    encrypt_cpu(data_cpu, num_entries, num_iters, openmp);
    timer.stop("encrypt data on CPU");

    
    int num_gpus;
    cudaGetDeviceCount(&num_gpus);
    uint64_t num_streams = 32;
    uint64_t *data_gpu[num_gpus];
    cudaStream_t streams[num_gpus][num_streams];
    
    uint64_t gpu_chunk_size = sdiv(num_entries, num_gpus);
    
    overall.start();
        
    for (uint64_t gpu = 0; gpu < num_gpus; gpu++) {

        cudaSetDevice(gpu);
        
        for (uint64_t s = 0; s < num_streams; s++) {
            cudaStreamCreate(&streams[gpu][s]);
        }
        
        uint64_t gpu_lower = gpu_chunk_size* gpu;
        
        uint64_t gpu_upper = min(gpu_lower+gpu_chunk_size, num_entries);
        
        uint64_t gpu_width = gpu_upper-gpu_lower;
        
        cudaMalloc(&data_gpu[gpu], sizeof(uint64_t)*gpu_width);      
    }
    
    
    for (uint64_t gpu = 0; gpu < num_gpus; gpu++) {
        
        cudaSetDevice(gpu);
        
        uint64_t gpu_lower = gpu_chunk_size * gpu;
        
        uint64_t gpu_upper = min(gpu_lower+gpu_chunk_size, num_entries);
        
        uint64_t gpu_width = gpu_upper - gpu_lower;
        
        
        uint64_t s_chunk_size = sdiv(gpu_width, num_streams);
        
        for (uint64_t s = 0; s < num_streams; s++) {
            uint64_t s_offset = s * s_chunk_size;
            uint64_t s_lower = gpu_lower + s_offset;
            uint64_t s_upper = min(s_lower + s_chunk_size, gpu_upper);
            uint64_t s_width = s_upper - s_lower;
       
            cudaMemcpyAsync(data_gpu[gpu] + s_offset, data_cpu+s_lower, 
                   sizeof(uint64_t)*s_width, cudaMemcpyHostToDevice, streams[gpu][s]);
            
            
            decrypt_gpu<<<80*32, 64, 0, streams[gpu][s]>>>(data_gpu[gpu]+s_offset, s_width, num_iters);
            
            
            cudaMemcpyAsync(data_cpu+s_lower, data_gpu[gpu] + s_offset, 
               sizeof(uint64_t)*s_width, cudaMemcpyDeviceToHost, streams[gpu][s]);
        }
    }
    
    check_last_error();
    
    for (uint64_t gpu = 0; gpu < num_gpus; gpu++) {
        cudaSetDevice(gpu);
        for (uint64_t s = 0; s < num_streams; s++) {
            cudaStreamSynchronize(streams[gpu][s]);
        }
    }
    
    for (uint64_t gpu = 0; gpu < num_gpus; gpu++) {
        cudaSetDevice(gpu);
        for (uint64_t s = 0; s < num_streams; s++) {
           cudaStreamDestroy(streams[gpu][s]);
        }
    }

    overall.stop("total time on GPU");
    check_last_error();

    timer.start();
    bool success = true;
    success = check_result_cpu(data_cpu, num_entries, openmp);
    std::cout << "STATUS: test " 
              << ( success ? "passed" : "failed")
              << std::endl;
    timer.stop("checking result on CPU");

    timer.start();
    cudaFreeHost(data_cpu);
    for (uint64_t gpu = 0; gpu < num_gpus; gpu++) {
        cudaSetDevice(gpu);
        cudaFree(data_gpu[gpu]);
    }
    timer.stop("free memory");
    check_last_error();
}