1 | /******************************************************************************* |
2 | * Copyright 2021 Intel Corporation |
3 | * |
4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
5 | * you may not use this file except in compliance with the License. |
6 | * You may obtain a copy of the License at |
7 | * |
8 | * http://www.apache.org/licenses/LICENSE-2.0 |
9 | * |
10 | * Unless required by applicable law or agreed to in writing, software |
11 | * distributed under the License is distributed on an "AS IS" BASIS, |
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | * See the License for the specific language governing permissions and |
14 | * limitations under the License. |
15 | *******************************************************************************/ |
16 | |
17 | #include "oneapi/dnnl/dnnl.h" |
18 | |
19 | #include "common/dnnl_thread.hpp" |
20 | #include "common/engine.hpp" |
21 | #include "common/primitive_desc.hpp" |
22 | #include "common/serialization.hpp" |
23 | #include "common/serialization_stream.hpp" |
24 | |
25 | namespace dnnl { |
26 | namespace impl { |
27 | |
28 | const std::vector<uint8_t> &cache_blob_id_t::get( |
29 | const engine_t *engine, const primitive_desc_t *pd) { |
30 | if (is_initialized_) return sstream_.get_data(); |
31 | |
32 | auto engine_kind = engine->kind(); |
33 | auto runtime_kind = engine->runtime_kind(); |
34 | |
35 | if (engine_kind != engine_kind::gpu |
36 | || (engine_kind == engine_kind::gpu |
37 | && runtime_kind != runtime_kind::ocl)) { |
38 | return sstream_.get_data(); |
39 | } |
40 | |
41 | if (pd->op_desc()->kind == primitive_kind::zero_pad) { |
42 | return sstream_.get_data(); |
43 | } |
44 | |
45 | assert(engine->kind() == engine_kind::gpu |
46 | && engine->runtime_kind() == runtime_kind::ocl); |
47 | |
48 | const auto init_id = [&]() { |
49 | serialization::serialize_desc(sstream_, pd->op_desc()); |
50 | serialization::serialize_attr(sstream_, *pd->attr()); |
51 | |
52 | const int nthr = engine->kind() == engine_kind::gpu |
53 | ? 0 |
54 | : dnnl_get_max_threads(); |
55 | sstream_.write(&nthr); |
56 | |
57 | for (const auto &md : pd->hint_mds(false /* is_hint */)) { |
58 | serialization::serialize_md(sstream_, md); |
59 | } |
60 | |
61 | sstream_.write(&engine_kind); |
62 | // TODO: blob object can probably be re-used for different runtimes |
63 | // if the engine kind is the same. Check this assumption when extending |
64 | // this API to DPCPP runtime. |
65 | sstream_.write(&runtime_kind); |
66 | |
67 | engine->serialize_device(sstream_); |
68 | |
69 | auto pd_iterator_offset = pd->pd_iterator_offset(); |
70 | sstream_.write(&pd_iterator_offset); |
71 | |
72 | auto version = dnnl_version(); |
73 | sstream_.write(&version->major); |
74 | sstream_.write(&version->minor); |
75 | sstream_.write(&version->patch); |
76 | |
77 | sstream_.write(version->hash, std::strlen(version->hash)); |
78 | |
79 | is_initialized_ = true; |
80 | }; |
81 | |
82 | std::call_once(flag_, init_id); |
83 | return sstream_.get_data(); |
84 | } |
85 | |
86 | } // namespace impl |
87 | } // namespace dnnl |
88 | |