如果本文帮助到了你,欢迎[点赞、收藏、关注]哦~

目录

参考代码

运行结果


参考代码

import torch
import time
import matplotlib.pyplot as plt

# 初始化设备和张量
device = torch.device('cuda')
data_sizes = [100, 1000, 5000, 10000, 50000, 100000, 300000, 500000]  # 不同数据量
results = {'Shared to Pinned': [], 'Pinned to Shared': [],
           'GPU to Pinned': [], 'Pinned to GPU': [],
           'GPU to Shared': [], 'Shared to GPU': []}

# 测试不同数据量
for size in data_sizes:
    shared_tensor = torch.randn((size, 1000), dtype=torch.float32, device='cpu').share_memory_()
    pinned_tensor = torch.randn((size, 1000), dtype=torch.float32, device='cpu').pin_memory()
    gpu_tensor = torch.randn((size, 1000), dtype=torch.float32, device=device)

    # Shared Memory => Pinned Memory
    start_time = time.time()
    pinned_tensor.copy_(shared_tensor, non_blocking=True)
    end_time = time.time()
    results['Shared to Pinned'].append(end_time - start_time)
    torch.cuda.synchronize()

    # Pinned Memory => Shared Memory
    start_time = time.time()
    shared_tensor.copy_(pinned_tensor, non_blocking=True)
    end_time = time.time()
    results['Pinned to Shared'].append(end_time - start_time)
    torch.cuda.synchronize()

    # GPU Memory => Pinned Memory
    start_time = time.time()
    pinned_tensor.copy_(gpu_tensor, non_blocking=True)
    end_time = time.time()
    results['GPU to Pinned'].append(end_time - start_time)
    torch.cuda.synchronize()

    # Pinned Memory => GPU Memory
    start_time = time.time()
    gpu_tensor.copy_(pinned_tensor, non_blocking=True)
    end_time = time.time()
    results['Pinned to GPU'].append(end_time - start_time)
    torch.cuda.synchronize()

    # GPU Memory => Shared Memory
    start_time = time.time()
    shared_tensor.copy_(gpu_tensor, non_blocking=True)
    end_time = time.time()
    results['GPU to Shared'].append(end_time - start_time)
    torch.cuda.synchronize()

    # Shared Memory => GPU Memory
    start_time = time.time()
    gpu_tensor.copy_(shared_tensor, non_blocking=True)
    end_time = time.time()
    results['Shared to GPU'].append(end_time - start_time)
    torch.cuda.synchronize()

# 绘制图表
plt.figure(figsize=(10, 6))
for key, values in results.items():
    plt.plot(data_sizes, values, marker='o', label=key)

plt.xlabel('Data Size (rows)')
plt.ylabel('Time (seconds)')
plt.title('Memory Copy Time for Different Data Sizes')
plt.legend()
plt.grid(True)
plt.show()

运行结果

【知识】对比Share mem/Pin mem/GPU mem之间的传输速度_python

        所以,share to gpu是最慢的,而对于pin和gpu之间的互传非常快(异步传输)。以后如何选,心里也大概有个数了。