Apache Arrow (C++)
A columnar in-memory analytics layer designed to accelerate big data.
cuda_context.h
Go to the documentation of this file.
1 // Licensed to the Apache Software Foundation (ASF) under one
2 // or more contributor license agreements. See the NOTICE file
3 // distributed with this work for additional information
4 // regarding copyright ownership. The ASF licenses this file
5 // to you under the Apache License, Version 2.0 (the
6 // "License"); you may not use this file except in compliance
7 // with the License. You may obtain a copy of the License at
8 //
9 // http://www.apache.org/licenses/LICENSE-2.0
10 //
11 // Unless required by applicable law or agreed to in writing,
12 // software distributed under the License is distributed on an
13 // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 // KIND, either express or implied. See the License for the
15 // specific language governing permissions and limitations
16 // under the License.
17 
18 #ifndef ARROW_GPU_CUDA_CONTEXT_H
19 #define ARROW_GPU_CUDA_CONTEXT_H
20 
21 #include <cstdint>
22 #include <memory>
23 
24 #include "arrow/status.h"
25 #include "arrow/util/visibility.h"
26 
27 #include "arrow/gpu/cuda_memory.h"
28 
29 namespace arrow {
30 namespace gpu {
31 
32 // Forward declaration
33 class CudaContext;
34 
35 class ARROW_EXPORT CudaDeviceManager {
36  public:
37  static Status GetInstance(CudaDeviceManager** manager);
38 
40  Status GetContext(int gpu_number, std::shared_ptr<CudaContext>* ctx);
41 
45  Status CreateNewContext(int gpu_number, std::shared_ptr<CudaContext>* ctx);
46 
47  Status AllocateHost(int64_t nbytes, std::shared_ptr<CudaHostBuffer>* buffer);
48 
49  Status FreeHost(uint8_t* data, int64_t nbytes);
50 
51  int num_devices() const;
52 
53  private:
55  static std::unique_ptr<CudaDeviceManager> instance_;
56 
57  class CudaDeviceManagerImpl;
58  std::unique_ptr<CudaDeviceManagerImpl> impl_;
59 
60  friend CudaContext;
61 };
62 
63 struct ARROW_EXPORT CudaDeviceInfo {};
64 
67 class ARROW_EXPORT CudaContext : public std::enable_shared_from_this<CudaContext> {
68  public:
69  ~CudaContext();
70 
71  Status Close();
72 
77  Status Allocate(int64_t nbytes, std::shared_ptr<CudaBuffer>* out);
78 
83  Status OpenIpcBuffer(const CudaIpcMemHandle& ipc_handle,
84  std::shared_ptr<CudaBuffer>* buffer);
85 
86  int64_t bytes_allocated() const;
87 
88  private:
89  CudaContext();
90 
91  Status ExportIpcBuffer(uint8_t* data, std::unique_ptr<CudaIpcMemHandle>* handle);
92  Status CopyHostToDevice(uint8_t* dst, const uint8_t* src, int64_t nbytes);
93  Status CopyDeviceToHost(uint8_t* dst, const uint8_t* src, int64_t nbytes);
94  Status Free(uint8_t* device_ptr, int64_t nbytes);
95 
96  class CudaContextImpl;
97  std::unique_ptr<CudaContextImpl> impl_;
98 
99  friend CudaBuffer;
100  friend CudaBufferReader;
101  friend CudaBufferWriter;
102  friend CudaDeviceManager::CudaDeviceManagerImpl;
103 };
104 
105 } // namespace gpu
106 } // namespace arrow
107 
108 #endif // ARROW_GPU_CUDA_CONTEXT_H
File interface for writing to CUDA buffers, with optional buffering.
Definition: cuda_memory.h:148
Definition: cuda_context.h:63
Friendlier interface to the CUDA driver API.
Definition: cuda_context.h:67
Definition: status.h:106
Definition: cuda_context.h:35
Definition: cuda_memory.h:89
An Arrow buffer located on a GPU device.
Definition: cuda_memory.h:39
File interface for zero-copy read from CUDA buffers.
Definition: cuda_memory.h:124
Top-level namespace for Apache Arrow C++ API.
Definition: allocator.h:29