1 | /******************************************************************************* |
2 | * Copyright 2016-2022 Intel Corporation |
3 | * |
4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
5 | * you may not use this file except in compliance with the License. |
6 | * You may obtain a copy of the License at |
7 | * |
8 | * http://www.apache.org/licenses/LICENSE-2.0 |
9 | * |
10 | * Unless required by applicable law or agreed to in writing, software |
11 | * distributed under the License is distributed on an "AS IS" BASIS, |
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | * See the License for the specific language governing permissions and |
14 | * limitations under the License. |
15 | *******************************************************************************/ |
16 | |
17 | #include <assert.h> |
18 | #include <stddef.h> |
19 | #include <stdint.h> |
20 | |
21 | #include "oneapi/dnnl/dnnl.h" |
22 | #include "oneapi/dnnl/dnnl.hpp" |
23 | |
24 | #ifdef DNNL_WITH_SYCL |
25 | #include "oneapi/dnnl/dnnl_sycl.h" |
26 | #endif |
27 | |
28 | #include "c_types_map.hpp" |
29 | #include "engine.hpp" |
30 | #include "memory.hpp" |
31 | #include "memory_desc_wrapper.hpp" |
32 | #include "stream.hpp" |
33 | #include "type_helpers.hpp" |
34 | #include "utils.hpp" |
35 | |
36 | using namespace dnnl::impl; |
37 | using namespace dnnl::impl::utils; |
38 | using namespace dnnl::impl::status; |
39 | using namespace dnnl::impl::data_type; |
40 | |
41 | namespace dnnl { |
42 | namespace impl { |
43 | memory_desc_t glob_zero_md = memory_desc_t(); |
44 | } |
45 | } // namespace dnnl |
46 | |
47 | namespace { |
48 | // Returns the size required for memory descriptor mapping. |
49 | // Caveats: |
50 | // 1. If memory descriptor with run-time parameters, the mapping cannot be done; |
51 | // hence return DNNL_RUNTIME_SIZE_VAL |
52 | // 2. Otherwise, the size returned includes `offset0` and holes (for the case |
53 | // of non-trivial strides). Strictly speaking, the mapping should happen only |
54 | // for elements accessible with `md.off_l(0 .. md.nelems())`. However, for |
55 | // the sake of simple implementation let's have such limitation hoping that |
56 | // no one will do concurrent mapping for overlapping memory objects. |
57 | // |
58 | // XXX: remove limitation mentioned in 2nd bullet. |
59 | size_t memory_desc_map_size(const memory_desc_t *md) { |
60 | auto mdw = memory_desc_wrapper(md); |
61 | |
62 | if (mdw.has_runtime_dims_or_strides()) return DNNL_RUNTIME_SIZE_VAL; |
63 | if (mdw.offset0() == 0) return mdw.size(); |
64 | |
65 | memory_desc_t md_no_offset0 = *md; |
66 | md_no_offset0.offset0 = 0; |
67 | return memory_desc_wrapper(md_no_offset0).size() |
68 | + md->offset0 * mdw.data_type_size(); |
69 | } |
70 | } // namespace |
71 | |
72 | dnnl_memory::dnnl_memory(dnnl::impl::engine_t *engine, |
73 | const dnnl::impl::memory_desc_t *md, unsigned flags, void *handle) |
74 | : engine_(engine), md_(*md) { |
75 | const size_t size = memory_desc_wrapper(md_).size(); |
76 | |
77 | memory_storage_t *memory_storage_ptr; |
78 | status_t status = engine->create_memory_storage( |
79 | &memory_storage_ptr, flags, size, handle); |
80 | if (status != success) return; |
81 | |
82 | memory_storage_.reset(memory_storage_ptr); |
83 | } |
84 | |
85 | dnnl_memory::dnnl_memory(dnnl::impl::engine_t *engine, |
86 | const dnnl::impl::memory_desc_t *md, |
87 | std::unique_ptr<dnnl::impl::memory_storage_t> &&memory_storage) |
88 | : engine_(engine), md_(*md) { |
89 | this->reset_memory_storage(std::move(memory_storage)); |
90 | } |
91 | |
92 | status_t dnnl_memory::set_data_handle(void *handle) { |
93 | using namespace dnnl::impl; |
94 | |
95 | void *old_handle; |
96 | CHECK(memory_storage()->get_data_handle(&old_handle)); |
97 | |
98 | if (handle != old_handle) { |
99 | CHECK(memory_storage_->set_data_handle(handle)); |
100 | } |
101 | return status::success; |
102 | } |
103 | |
104 | status_t dnnl_memory::reset_memory_storage( |
105 | std::unique_ptr<dnnl::impl::memory_storage_t> &&memory_storage) { |
106 | if (memory_storage) { |
107 | memory_storage_ = std::move(memory_storage); |
108 | } else { |
109 | memory_storage_t *memory_storage_ptr; |
110 | status_t status = engine_->create_memory_storage( |
111 | &memory_storage_ptr, use_runtime_ptr, 0, nullptr); |
112 | if (status != status::success) return status; |
113 | |
114 | memory_storage_.reset(memory_storage_ptr); |
115 | } |
116 | |
117 | return status::success; |
118 | } |
119 | |
120 | status_t dnnl_memory_create(memory_t **memory, const memory_desc_t *md, |
121 | engine_t *engine, void *handle) { |
122 | #ifdef DNNL_WITH_SYCL |
123 | #if DNNL_CPU_RUNTIME != DNNL_RUNTIME_SYCL |
124 | if (engine->kind() == engine_kind::gpu) |
125 | #endif |
126 | return dnnl_sycl_interop_memory_create( |
127 | memory, md, engine, dnnl_sycl_interop_usm, handle); |
128 | #endif |
129 | if (any_null(memory, engine)) return invalid_arguments; |
130 | |
131 | memory_desc_t z_md = types::zero_md(); |
132 | if (md == nullptr) md = &z_md; |
133 | |
134 | const auto mdw = memory_desc_wrapper(md); |
135 | if (mdw.format_any() || mdw.has_runtime_dims_or_strides()) |
136 | return invalid_arguments; |
137 | |
138 | unsigned flags = (handle == DNNL_MEMORY_ALLOCATE) |
139 | ? memory_flags_t::alloc |
140 | : memory_flags_t::use_runtime_ptr; |
141 | void *handle_ptr = (handle == DNNL_MEMORY_ALLOCATE) ? nullptr : handle; |
142 | auto _memory = new memory_t(engine, md, flags, handle_ptr); |
143 | if (_memory == nullptr) return out_of_memory; |
144 | if (_memory->memory_storage() == nullptr) { |
145 | delete _memory; |
146 | return out_of_memory; |
147 | } |
148 | *memory = _memory; |
149 | return success; |
150 | } |
151 | |
152 | status_t dnnl_memory_get_memory_desc( |
153 | const memory_t *memory, const memory_desc_t **md) { |
154 | if (any_null(memory, md)) return invalid_arguments; |
155 | *md = memory->md(); |
156 | return success; |
157 | } |
158 | |
159 | status_t dnnl_memory_get_engine(const memory_t *memory, engine_t **engine) { |
160 | if (any_null(memory, engine)) return invalid_arguments; |
161 | *engine = memory->engine(); |
162 | return success; |
163 | } |
164 | |
165 | status_t dnnl_memory_get_data_handle(const memory_t *memory, void **handle) { |
166 | if (any_null(handle)) return invalid_arguments; |
167 | if (memory == nullptr) { |
168 | *handle = nullptr; |
169 | return success; |
170 | } |
171 | return memory->get_data_handle(handle); |
172 | } |
173 | |
174 | status_t dnnl_memory_set_data_handle(memory_t *memory, void *handle) { |
175 | if (any_null(memory)) return invalid_arguments; |
176 | CHECK(memory->set_data_handle(handle)); |
177 | return status::success; |
178 | } |
179 | |
180 | status_t dnnl_memory_map_data(const memory_t *memory, void **mapped_ptr) { |
181 | bool args_ok = !any_null(memory, mapped_ptr); |
182 | if (!args_ok) return invalid_arguments; |
183 | |
184 | const memory_desc_t *md = memory->md(); |
185 | // See caveats in the comment to `memory_desc_map_size()` function. |
186 | const size_t map_size = memory_desc_map_size(md); |
187 | |
188 | if (map_size == 0) { |
189 | *mapped_ptr = nullptr; |
190 | return success; |
191 | } else if (map_size == DNNL_RUNTIME_SIZE_VAL) { |
192 | return invalid_arguments; |
193 | } |
194 | |
195 | return memory->memory_storage()->map_data(mapped_ptr, nullptr, map_size); |
196 | } |
197 | |
198 | status_t dnnl_memory_unmap_data(const memory_t *memory, void *mapped_ptr) { |
199 | bool args_ok = !any_null(memory); |
200 | if (!args_ok) return invalid_arguments; |
201 | |
202 | return memory->memory_storage()->unmap_data(mapped_ptr, nullptr); |
203 | } |
204 | |
205 | status_t dnnl_memory_destroy(memory_t *memory) { |
206 | delete memory; |
207 | return success; |
208 | } |
209 | |
210 | // vim: et ts=4 sw=4 cindent cino+=l0,\:4,N-s |
211 | |