Halide  12.0.1
Halide compiler and libraries
device_buffer_utils.h
Go to the documentation of this file.
1 #ifndef HALIDE_RUNTIME_DEVICE_BUFFER_UTILS_H
2 #define HALIDE_RUNTIME_DEVICE_BUFFER_UTILS_H
3 
4 #include "HalideRuntime.h"
5 #include "device_interface.h"
6 #include "printer.h"
7 
8 namespace Halide {
9 namespace Runtime {
10 namespace Internal {
11 
12 // A host <-> dev copy should be done with the fewest possible number
13 // of contiguous copies to minimize driver overhead. If our
14 // halide_buffer_t has strides larger than its extents (e.g. because
15 // it represents a sub-region of a larger halide_buffer_t) we can't
16 // safely copy it back and forth using a single contiguous copy,
17 // because we'd clobber in-between values that another thread might be
18 // using. In the best case we can do a single contiguous copy, but in
19 // the worst case we need to individually copy over every pixel.
20 //
21 // This problem is made extra difficult by the fact that the ordering
22 // of the dimensions in a halide_buffer_t doesn't relate to memory layout at
23 // all, so the strides could be in any order.
24 //
25 // We solve it by representing a copy job we need to perform as a
26 // device_copy struct. It describes a multi-dimensional array of
27 // copies to perform. Initially it describes copying over a single
28 // pixel at a time. We then try to discover contiguous groups of
29 // copies that can be coalesced into a single larger copy.
30 
31 // The struct that describes a host <-> dev copy to perform.
32 #define MAX_COPY_DIMS 16
33 struct device_copy {
34  // opaque handles for source and device memory.
36  // The offset in the source memory to start
38  // The multidimensional array of contiguous copy tasks that need to be done.
40  // The strides (in bytes) that separate adjacent copy tasks in each dimension.
43  // How many contiguous bytes to copy per task
45 };
46 
47 WEAK void copy_memory_helper(const device_copy &copy, int d, int64_t src_off, int64_t dst_off) {
48  // Skip size-1 dimensions
49  while (d >= 0 && copy.extent[d] == 1) {
50  d--;
51  }
52 
53  if (d == -1) {
54  const void *from = (void *)(copy.src + src_off);
55  void *to = (void *)(copy.dst + dst_off);
56  memcpy(to, from, copy.chunk_size);
57  } else {
58  for (uint64_t i = 0; i < copy.extent[d]; i++) {
59  copy_memory_helper(copy, d - 1, src_off, dst_off);
60  src_off += copy.src_stride_bytes[d];
61  dst_off += copy.dst_stride_bytes[d];
62  }
63  }
64 }
65 
66 WEAK void copy_memory(const device_copy &copy, void *user_context) {
67  // If this is a zero copy buffer, these pointers will be the same.
68  if (copy.src != copy.dst) {
69  copy_memory_helper(copy, MAX_COPY_DIMS - 1, copy.src_begin, 0);
70  } else {
71  debug(user_context) << "copy_memory: no copy needed as pointers are the same.\n";
72  }
73 }
74 
75 // Fills the entire dst buffer, which must be contained within src
77  const halide_buffer_t *dst, bool dst_host) {
78  // Make a copy job representing copying the first pixel only.
79  device_copy c;
80  c.src = src_host ? (uint64_t)src->host : src->device;
81  c.dst = dst_host ? (uint64_t)dst->host : dst->device;
82  c.chunk_size = src->type.bytes();
83  for (int i = 0; i < MAX_COPY_DIMS; i++) {
84  c.extent[i] = 1;
85  c.src_stride_bytes[i] = 0;
86  c.dst_stride_bytes[i] = 0;
87  }
88 
89  // Offset the src base pointer to the right point in its buffer.
90  c.src_begin = 0;
91  for (int i = 0; i < src->dimensions; i++) {
92  c.src_begin += (uint64_t)src->dim[i].stride * (dst->dim[i].min - src->dim[i].min);
93  }
94  c.src_begin *= c.chunk_size;
95 
96  if (src->dimensions != dst->dimensions ||
97  src->type.bytes() != dst->type.bytes() ||
98  dst->dimensions > MAX_COPY_DIMS) {
99  // These conditions should also be checked for outside this fn.
100  device_copy zero = {0};
101  return zero;
102  }
103 
104  if (c.chunk_size == 0) {
105  // This buffer apparently represents no memory. Return a zero'd copy
106  // task.
107  device_copy zero = {0};
108  return zero;
109  }
110 
111  // Now expand it to copy all the pixels (one at a time) by taking
112  // the extents and strides from the halide_buffer_ts. Dimensions
113  // are added to the copy by inserting it such that the stride is
114  // in ascending order in the dst.
115  for (int i = 0; i < dst->dimensions; i++) {
116  // TODO: deal with negative strides.
117  uint64_t dst_stride_bytes = (uint64_t)dst->dim[i].stride * dst->type.bytes();
118  uint64_t src_stride_bytes = (uint64_t)src->dim[i].stride * src->type.bytes();
119  // Insert the dimension sorted into the buffer copy.
120  int insert;
121  for (insert = 0; insert < i; insert++) {
122  // If the stride is 0, we put it at the end because it can't be
123  // folded.
124  if (dst_stride_bytes < c.dst_stride_bytes[insert] && dst_stride_bytes != 0) {
125  break;
126  }
127  }
128  for (int j = i; j > insert; j--) {
129  c.extent[j] = c.extent[j - 1];
130  c.dst_stride_bytes[j] = c.dst_stride_bytes[j - 1];
131  c.src_stride_bytes[j] = c.src_stride_bytes[j - 1];
132  }
133  c.extent[insert] = dst->dim[i].extent;
134  // debug(nullptr) << "c.extent[" << insert << "] = " << (int)(c.extent[insert]) << "\n";
135  c.dst_stride_bytes[insert] = dst_stride_bytes;
136  c.src_stride_bytes[insert] = src_stride_bytes;
137  };
138 
139  // Attempt to fold contiguous dimensions into the chunk
140  // size. Since the dimensions are sorted by stride, and the
141  // strides must be greater than or equal to the chunk size, this
142  // means we can just delete the innermost dimension as long as its
143  // stride in both src and dst is equal to the chunk size.
144  while (c.chunk_size == c.src_stride_bytes[0] &&
145  c.chunk_size == c.dst_stride_bytes[0]) {
146  // Fold the innermost dimension's extent into the chunk_size.
147  c.chunk_size *= c.extent[0];
148 
149  // Erase the innermost dimension from the list of dimensions to
150  // iterate over.
151  for (int j = 1; j < MAX_COPY_DIMS; j++) {
152  c.extent[j - 1] = c.extent[j];
153  c.src_stride_bytes[j - 1] = c.src_stride_bytes[j];
154  c.dst_stride_bytes[j - 1] = c.dst_stride_bytes[j];
155  }
156  c.extent[MAX_COPY_DIMS - 1] = 1;
157  c.src_stride_bytes[MAX_COPY_DIMS - 1] = 0;
158  c.dst_stride_bytes[MAX_COPY_DIMS - 1] = 0;
159  }
160  return c;
161 }
162 
164  return make_buffer_copy(buf, true, buf, false);
165 }
166 
168  return make_buffer_copy(buf, false, buf, true);
169 }
170 
171 // Caller is expected to verify that src->dimensions == dst->dimensions
173  int64_t offset = 0;
174  for (int i = 0; i < src->dimensions; i++) {
175  offset += (dst->dim[i].min - src->dim[i].min) * (int64_t)src->dim[i].stride;
176  }
177  offset *= src->type.bytes();
178  return offset;
179 }
180 
181 // Caller is expected to verify that src->dimensions == dst->dimensions + 1,
182 // and that slice_dim and slice_pos are valid within src
183 ALWAYS_INLINE int64_t calc_device_slice_byte_offset(const struct halide_buffer_t *src, int slice_dim, int slice_pos) {
184  int64_t offset = (slice_pos - src->dim[slice_dim].min) * (int64_t)src->dim[slice_dim].stride;
185  offset *= src->type.bytes();
186  return offset;
187 }
188 
189 } // namespace Internal
190 } // namespace Runtime
191 } // namespace Halide
192 
193 #endif // HALIDE_DEVICE_BUFFER_UTILS_H
This file declares the routines used by Halide internally in its runtime.
#define MAX_COPY_DIMS
WEAK void copy_memory(const device_copy &copy, void *user_context)
WEAK device_copy make_host_to_device_copy(const halide_buffer_t *buf)
WEAK void copy_memory_helper(const device_copy &copy, int d, int64_t src_off, int64_t dst_off)
WEAK device_copy make_device_to_host_copy(const halide_buffer_t *buf)
ALWAYS_INLINE int64_t calc_device_slice_byte_offset(const struct halide_buffer_t *src, int slice_dim, int slice_pos)
ALWAYS_INLINE int64_t calc_device_crop_byte_offset(const struct halide_buffer_t *src, struct halide_buffer_t *dst)
WEAK device_copy make_buffer_copy(const halide_buffer_t *src, bool src_host, const halide_buffer_t *dst, bool dst_host)
This file defines the class FunctionDAG, which is our representation of a Halide pipeline,...
@ Internal
Not visible externally, similar to 'static' linkage in C.
char * buf
Definition: printer.h:32
char * dst
Definition: printer.h:32
void * user_context
Definition: printer.h:33
unsigned __INT64_TYPE__ uint64_t
signed __INT64_TYPE__ int64_t
#define ALWAYS_INLINE
#define WEAK
void * memcpy(void *s1, const void *s2, size_t n)
The raw representation of an image passed around by generated Halide code.
int32_t dimensions
The dimensionality of the buffer.
halide_dimension_t * dim
The shape of the buffer.
uint64_t device
A device-handle for e.g.
uint8_t * host
A pointer to the start of the data in main memory.
struct halide_type_t type
The type of each buffer element.