1 | /* |
2 | * Copyright 2018-2021 Arm Limited |
3 | * SPDX-License-Identifier: Apache-2.0 OR MIT |
4 | * |
5 | * Licensed under the Apache License, Version 2.0 (the "License"); |
6 | * you may not use this file except in compliance with the License. |
7 | * You may obtain a copy of the License at |
8 | * |
9 | * http://www.apache.org/licenses/LICENSE-2.0 |
10 | * |
11 | * Unless required by applicable law or agreed to in writing, software |
12 | * distributed under the License is distributed on an "AS IS" BASIS, |
13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
14 | * See the License for the specific language governing permissions and |
15 | * limitations under the License. |
16 | */ |
17 | |
18 | /* |
19 | * At your option, you may choose to accept this material under either: |
20 | * 1. The Apache License, Version 2.0, found at <http://www.apache.org/licenses/LICENSE-2.0>, or |
21 | * 2. The MIT License, found at <http://opensource.org/licenses/MIT>. |
22 | */ |
23 | |
24 | #include "spirv_parser.hpp" |
25 | #include <assert.h> |
26 | |
27 | using namespace std; |
28 | using namespace spv; |
29 | |
30 | namespace SPIRV_CROSS_NAMESPACE |
31 | { |
32 | Parser::Parser(vector<uint32_t> spirv) |
33 | { |
34 | ir.spirv = move(spirv); |
35 | } |
36 | |
37 | Parser::Parser(const uint32_t *spirv_data, size_t word_count) |
38 | { |
39 | ir.spirv = vector<uint32_t>(spirv_data, spirv_data + word_count); |
40 | } |
41 | |
42 | static bool decoration_is_string(Decoration decoration) |
43 | { |
44 | switch (decoration) |
45 | { |
46 | case DecorationHlslSemanticGOOGLE: |
47 | return true; |
48 | |
49 | default: |
50 | return false; |
51 | } |
52 | } |
53 | |
54 | static inline uint32_t swap_endian(uint32_t v) |
55 | { |
56 | return ((v >> 24) & 0x000000ffu) | ((v >> 8) & 0x0000ff00u) | ((v << 8) & 0x00ff0000u) | ((v << 24) & 0xff000000u); |
57 | } |
58 | |
59 | static bool is_valid_spirv_version(uint32_t version) |
60 | { |
61 | switch (version) |
62 | { |
63 | // Allow v99 since it tends to just work. |
64 | case 99: |
65 | case 0x10000: // SPIR-V 1.0 |
66 | case 0x10100: // SPIR-V 1.1 |
67 | case 0x10200: // SPIR-V 1.2 |
68 | case 0x10300: // SPIR-V 1.3 |
69 | case 0x10400: // SPIR-V 1.4 |
70 | case 0x10500: // SPIR-V 1.5 |
71 | case 0x10600: // SPIR-V 1.6 |
72 | return true; |
73 | |
74 | default: |
75 | return false; |
76 | } |
77 | } |
78 | |
79 | void Parser::parse() |
80 | { |
81 | auto &spirv = ir.spirv; |
82 | |
83 | auto len = spirv.size(); |
84 | if (len < 5) |
85 | SPIRV_CROSS_THROW("SPIRV file too small." ); |
86 | |
87 | auto s = spirv.data(); |
88 | |
89 | // Endian-swap if we need to. |
90 | if (s[0] == swap_endian(MagicNumber)) |
91 | transform(begin(spirv), end(spirv), begin(spirv), [](uint32_t c) { return swap_endian(c); }); |
92 | |
93 | if (s[0] != MagicNumber || !is_valid_spirv_version(s[1])) |
94 | SPIRV_CROSS_THROW("Invalid SPIRV format." ); |
95 | |
96 | uint32_t bound = s[3]; |
97 | |
98 | const uint32_t MaximumNumberOfIDs = 0x3fffff; |
99 | if (bound > MaximumNumberOfIDs) |
100 | SPIRV_CROSS_THROW("ID bound exceeds limit of 0x3fffff.\n" ); |
101 | |
102 | ir.set_id_bounds(bound); |
103 | |
104 | uint32_t offset = 5; |
105 | |
106 | SmallVector<Instruction> instructions; |
107 | while (offset < len) |
108 | { |
109 | Instruction instr = {}; |
110 | instr.op = spirv[offset] & 0xffff; |
111 | instr.count = (spirv[offset] >> 16) & 0xffff; |
112 | |
113 | if (instr.count == 0) |
114 | SPIRV_CROSS_THROW("SPIR-V instructions cannot consume 0 words. Invalid SPIR-V file." ); |
115 | |
116 | instr.offset = offset + 1; |
117 | instr.length = instr.count - 1; |
118 | |
119 | offset += instr.count; |
120 | |
121 | if (offset > spirv.size()) |
122 | SPIRV_CROSS_THROW("SPIR-V instruction goes out of bounds." ); |
123 | |
124 | instructions.push_back(instr); |
125 | } |
126 | |
127 | for (auto &i : instructions) |
128 | parse(i); |
129 | |
130 | for (auto &fixup : forward_pointer_fixups) |
131 | { |
132 | auto &target = get<SPIRType>(fixup.first); |
133 | auto &source = get<SPIRType>(fixup.second); |
134 | target.member_types = source.member_types; |
135 | target.basetype = source.basetype; |
136 | target.self = source.self; |
137 | } |
138 | forward_pointer_fixups.clear(); |
139 | |
140 | if (current_function) |
141 | SPIRV_CROSS_THROW("Function was not terminated." ); |
142 | if (current_block) |
143 | SPIRV_CROSS_THROW("Block was not terminated." ); |
144 | if (ir.default_entry_point == 0) |
145 | SPIRV_CROSS_THROW("There is no entry point in the SPIR-V module." ); |
146 | } |
147 | |
148 | const uint32_t *Parser::stream(const Instruction &instr) const |
149 | { |
150 | // If we're not going to use any arguments, just return nullptr. |
151 | // We want to avoid case where we return an out of range pointer |
152 | // that trips debug assertions on some platforms. |
153 | if (!instr.length) |
154 | return nullptr; |
155 | |
156 | if (instr.offset + instr.length > ir.spirv.size()) |
157 | SPIRV_CROSS_THROW("Compiler::stream() out of range." ); |
158 | return &ir.spirv[instr.offset]; |
159 | } |
160 | |
161 | static string (const vector<uint32_t> &spirv, uint32_t offset) |
162 | { |
163 | string ret; |
164 | for (uint32_t i = offset; i < spirv.size(); i++) |
165 | { |
166 | uint32_t w = spirv[i]; |
167 | |
168 | for (uint32_t j = 0; j < 4; j++, w >>= 8) |
169 | { |
170 | char c = w & 0xff; |
171 | if (c == '\0') |
172 | return ret; |
173 | ret += c; |
174 | } |
175 | } |
176 | |
177 | SPIRV_CROSS_THROW("String was not terminated before EOF" ); |
178 | } |
179 | |
180 | void Parser::parse(const Instruction &instruction) |
181 | { |
182 | auto *ops = stream(instruction); |
183 | auto op = static_cast<Op>(instruction.op); |
184 | uint32_t length = instruction.length; |
185 | |
186 | switch (op) |
187 | { |
188 | case OpSourceContinued: |
189 | case OpSourceExtension: |
190 | case OpNop: |
191 | case OpModuleProcessed: |
192 | break; |
193 | |
194 | case OpString: |
195 | { |
196 | set<SPIRString>(ops[0], extract_string(ir.spirv, instruction.offset + 1)); |
197 | break; |
198 | } |
199 | |
200 | case OpMemoryModel: |
201 | ir.addressing_model = static_cast<AddressingModel>(ops[0]); |
202 | ir.memory_model = static_cast<MemoryModel>(ops[1]); |
203 | break; |
204 | |
205 | case OpSource: |
206 | { |
207 | auto lang = static_cast<SourceLanguage>(ops[0]); |
208 | switch (lang) |
209 | { |
210 | case SourceLanguageESSL: |
211 | ir.source.es = true; |
212 | ir.source.version = ops[1]; |
213 | ir.source.known = true; |
214 | ir.source.hlsl = false; |
215 | break; |
216 | |
217 | case SourceLanguageGLSL: |
218 | ir.source.es = false; |
219 | ir.source.version = ops[1]; |
220 | ir.source.known = true; |
221 | ir.source.hlsl = false; |
222 | break; |
223 | |
224 | case SourceLanguageHLSL: |
225 | // For purposes of cross-compiling, this is GLSL 450. |
226 | ir.source.es = false; |
227 | ir.source.version = 450; |
228 | ir.source.known = true; |
229 | ir.source.hlsl = true; |
230 | break; |
231 | |
232 | default: |
233 | ir.source.known = false; |
234 | break; |
235 | } |
236 | break; |
237 | } |
238 | |
239 | case OpUndef: |
240 | { |
241 | uint32_t result_type = ops[0]; |
242 | uint32_t id = ops[1]; |
243 | set<SPIRUndef>(id, result_type); |
244 | if (current_block) |
245 | current_block->ops.push_back(instruction); |
246 | break; |
247 | } |
248 | |
249 | case OpCapability: |
250 | { |
251 | uint32_t cap = ops[0]; |
252 | if (cap == CapabilityKernel) |
253 | SPIRV_CROSS_THROW("Kernel capability not supported." ); |
254 | |
255 | ir.declared_capabilities.push_back(static_cast<Capability>(ops[0])); |
256 | break; |
257 | } |
258 | |
259 | case OpExtension: |
260 | { |
261 | auto ext = extract_string(ir.spirv, instruction.offset); |
262 | ir.declared_extensions.push_back(move(ext)); |
263 | break; |
264 | } |
265 | |
266 | case OpExtInstImport: |
267 | { |
268 | uint32_t id = ops[0]; |
269 | auto ext = extract_string(ir.spirv, instruction.offset + 1); |
270 | if (ext == "GLSL.std.450" ) |
271 | set<SPIRExtension>(id, SPIRExtension::GLSL); |
272 | else if (ext == "DebugInfo" ) |
273 | set<SPIRExtension>(id, SPIRExtension::SPV_debug_info); |
274 | else if (ext == "SPV_AMD_shader_ballot" ) |
275 | set<SPIRExtension>(id, SPIRExtension::SPV_AMD_shader_ballot); |
276 | else if (ext == "SPV_AMD_shader_explicit_vertex_parameter" ) |
277 | set<SPIRExtension>(id, SPIRExtension::SPV_AMD_shader_explicit_vertex_parameter); |
278 | else if (ext == "SPV_AMD_shader_trinary_minmax" ) |
279 | set<SPIRExtension>(id, SPIRExtension::SPV_AMD_shader_trinary_minmax); |
280 | else if (ext == "SPV_AMD_gcn_shader" ) |
281 | set<SPIRExtension>(id, SPIRExtension::SPV_AMD_gcn_shader); |
282 | else |
283 | set<SPIRExtension>(id, SPIRExtension::Unsupported); |
284 | |
285 | // Other SPIR-V extensions which have ExtInstrs are currently not supported. |
286 | |
287 | break; |
288 | } |
289 | |
290 | case OpExtInst: |
291 | { |
292 | // The SPIR-V debug information extended instructions might come at global scope. |
293 | if (current_block) |
294 | current_block->ops.push_back(instruction); |
295 | break; |
296 | } |
297 | |
298 | case OpEntryPoint: |
299 | { |
300 | auto itr = |
301 | ir.entry_points.insert(make_pair(ops[1], SPIREntryPoint(ops[1], static_cast<ExecutionModel>(ops[0]), |
302 | extract_string(ir.spirv, instruction.offset + 2)))); |
303 | auto &e = itr.first->second; |
304 | |
305 | // Strings need nul-terminator and consume the whole word. |
306 | uint32_t strlen_words = uint32_t((e.name.size() + 1 + 3) >> 2); |
307 | |
308 | for (uint32_t i = strlen_words + 2; i < instruction.length; i++) |
309 | e.interface_variables.push_back(ops[i]); |
310 | |
311 | // Set the name of the entry point in case OpName is not provided later. |
312 | ir.set_name(ops[1], e.name); |
313 | |
314 | // If we don't have an entry, make the first one our "default". |
315 | if (!ir.default_entry_point) |
316 | ir.default_entry_point = ops[1]; |
317 | break; |
318 | } |
319 | |
320 | case OpExecutionMode: |
321 | { |
322 | auto &execution = ir.entry_points[ops[0]]; |
323 | auto mode = static_cast<ExecutionMode>(ops[1]); |
324 | execution.flags.set(mode); |
325 | |
326 | switch (mode) |
327 | { |
328 | case ExecutionModeInvocations: |
329 | execution.invocations = ops[2]; |
330 | break; |
331 | |
332 | case ExecutionModeLocalSize: |
333 | execution.workgroup_size.x = ops[2]; |
334 | execution.workgroup_size.y = ops[3]; |
335 | execution.workgroup_size.z = ops[4]; |
336 | break; |
337 | |
338 | case ExecutionModeOutputVertices: |
339 | execution.output_vertices = ops[2]; |
340 | break; |
341 | |
342 | default: |
343 | break; |
344 | } |
345 | break; |
346 | } |
347 | |
348 | case OpExecutionModeId: |
349 | { |
350 | auto &execution = ir.entry_points[ops[0]]; |
351 | auto mode = static_cast<ExecutionMode>(ops[1]); |
352 | execution.flags.set(mode); |
353 | |
354 | if (mode == ExecutionModeLocalSizeId) |
355 | { |
356 | execution.workgroup_size.id_x = ops[2]; |
357 | execution.workgroup_size.id_y = ops[3]; |
358 | execution.workgroup_size.id_z = ops[4]; |
359 | } |
360 | |
361 | break; |
362 | } |
363 | |
364 | case OpName: |
365 | { |
366 | uint32_t id = ops[0]; |
367 | ir.set_name(id, extract_string(ir.spirv, instruction.offset + 1)); |
368 | break; |
369 | } |
370 | |
371 | case OpMemberName: |
372 | { |
373 | uint32_t id = ops[0]; |
374 | uint32_t member = ops[1]; |
375 | ir.set_member_name(id, member, extract_string(ir.spirv, instruction.offset + 2)); |
376 | break; |
377 | } |
378 | |
379 | case OpDecorationGroup: |
380 | { |
381 | // Noop, this simply means an ID should be a collector of decorations. |
382 | // The meta array is already a flat array of decorations which will contain the relevant decorations. |
383 | break; |
384 | } |
385 | |
386 | case OpGroupDecorate: |
387 | { |
388 | uint32_t group_id = ops[0]; |
389 | auto &decorations = ir.meta[group_id].decoration; |
390 | auto &flags = decorations.decoration_flags; |
391 | |
392 | // Copies decorations from one ID to another. Only copy decorations which are set in the group, |
393 | // i.e., we cannot just copy the meta structure directly. |
394 | for (uint32_t i = 1; i < length; i++) |
395 | { |
396 | uint32_t target = ops[i]; |
397 | flags.for_each_bit([&](uint32_t bit) { |
398 | auto decoration = static_cast<Decoration>(bit); |
399 | |
400 | if (decoration_is_string(decoration)) |
401 | { |
402 | ir.set_decoration_string(target, decoration, ir.get_decoration_string(group_id, decoration)); |
403 | } |
404 | else |
405 | { |
406 | ir.meta[target].decoration_word_offset[decoration] = |
407 | ir.meta[group_id].decoration_word_offset[decoration]; |
408 | ir.set_decoration(target, decoration, ir.get_decoration(group_id, decoration)); |
409 | } |
410 | }); |
411 | } |
412 | break; |
413 | } |
414 | |
415 | case OpGroupMemberDecorate: |
416 | { |
417 | uint32_t group_id = ops[0]; |
418 | auto &flags = ir.meta[group_id].decoration.decoration_flags; |
419 | |
420 | // Copies decorations from one ID to another. Only copy decorations which are set in the group, |
421 | // i.e., we cannot just copy the meta structure directly. |
422 | for (uint32_t i = 1; i + 1 < length; i += 2) |
423 | { |
424 | uint32_t target = ops[i + 0]; |
425 | uint32_t index = ops[i + 1]; |
426 | flags.for_each_bit([&](uint32_t bit) { |
427 | auto decoration = static_cast<Decoration>(bit); |
428 | |
429 | if (decoration_is_string(decoration)) |
430 | ir.set_member_decoration_string(target, index, decoration, |
431 | ir.get_decoration_string(group_id, decoration)); |
432 | else |
433 | ir.set_member_decoration(target, index, decoration, ir.get_decoration(group_id, decoration)); |
434 | }); |
435 | } |
436 | break; |
437 | } |
438 | |
439 | case OpDecorate: |
440 | case OpDecorateId: |
441 | { |
442 | // OpDecorateId technically supports an array of arguments, but our only supported decorations are single uint, |
443 | // so merge decorate and decorate-id here. |
444 | uint32_t id = ops[0]; |
445 | |
446 | auto decoration = static_cast<Decoration>(ops[1]); |
447 | if (length >= 3) |
448 | { |
449 | ir.meta[id].decoration_word_offset[decoration] = uint32_t(&ops[2] - ir.spirv.data()); |
450 | ir.set_decoration(id, decoration, ops[2]); |
451 | } |
452 | else |
453 | ir.set_decoration(id, decoration); |
454 | |
455 | break; |
456 | } |
457 | |
458 | case OpDecorateStringGOOGLE: |
459 | { |
460 | uint32_t id = ops[0]; |
461 | auto decoration = static_cast<Decoration>(ops[1]); |
462 | ir.set_decoration_string(id, decoration, extract_string(ir.spirv, instruction.offset + 2)); |
463 | break; |
464 | } |
465 | |
466 | case OpMemberDecorate: |
467 | { |
468 | uint32_t id = ops[0]; |
469 | uint32_t member = ops[1]; |
470 | auto decoration = static_cast<Decoration>(ops[2]); |
471 | if (length >= 4) |
472 | ir.set_member_decoration(id, member, decoration, ops[3]); |
473 | else |
474 | ir.set_member_decoration(id, member, decoration); |
475 | break; |
476 | } |
477 | |
478 | case OpMemberDecorateStringGOOGLE: |
479 | { |
480 | uint32_t id = ops[0]; |
481 | uint32_t member = ops[1]; |
482 | auto decoration = static_cast<Decoration>(ops[2]); |
483 | ir.set_member_decoration_string(id, member, decoration, extract_string(ir.spirv, instruction.offset + 3)); |
484 | break; |
485 | } |
486 | |
487 | // Build up basic types. |
488 | case OpTypeVoid: |
489 | { |
490 | uint32_t id = ops[0]; |
491 | auto &type = set<SPIRType>(id); |
492 | type.basetype = SPIRType::Void; |
493 | break; |
494 | } |
495 | |
496 | case OpTypeBool: |
497 | { |
498 | uint32_t id = ops[0]; |
499 | auto &type = set<SPIRType>(id); |
500 | type.basetype = SPIRType::Boolean; |
501 | type.width = 1; |
502 | break; |
503 | } |
504 | |
505 | case OpTypeFloat: |
506 | { |
507 | uint32_t id = ops[0]; |
508 | uint32_t width = ops[1]; |
509 | auto &type = set<SPIRType>(id); |
510 | if (width == 64) |
511 | type.basetype = SPIRType::Double; |
512 | else if (width == 32) |
513 | type.basetype = SPIRType::Float; |
514 | else if (width == 16) |
515 | type.basetype = SPIRType::Half; |
516 | else |
517 | SPIRV_CROSS_THROW("Unrecognized bit-width of floating point type." ); |
518 | type.width = width; |
519 | break; |
520 | } |
521 | |
522 | case OpTypeInt: |
523 | { |
524 | uint32_t id = ops[0]; |
525 | uint32_t width = ops[1]; |
526 | bool signedness = ops[2] != 0; |
527 | auto &type = set<SPIRType>(id); |
528 | type.basetype = signedness ? to_signed_basetype(width) : to_unsigned_basetype(width); |
529 | type.width = width; |
530 | break; |
531 | } |
532 | |
533 | // Build composite types by "inheriting". |
534 | // NOTE: The self member is also copied! For pointers and array modifiers this is a good thing |
535 | // since we can refer to decorations on pointee classes which is needed for UBO/SSBO, I/O blocks in geometry/tess etc. |
536 | case OpTypeVector: |
537 | { |
538 | uint32_t id = ops[0]; |
539 | uint32_t vecsize = ops[2]; |
540 | |
541 | auto &base = get<SPIRType>(ops[1]); |
542 | auto &vecbase = set<SPIRType>(id); |
543 | |
544 | vecbase = base; |
545 | vecbase.vecsize = vecsize; |
546 | vecbase.self = id; |
547 | vecbase.parent_type = ops[1]; |
548 | break; |
549 | } |
550 | |
551 | case OpTypeMatrix: |
552 | { |
553 | uint32_t id = ops[0]; |
554 | uint32_t colcount = ops[2]; |
555 | |
556 | auto &base = get<SPIRType>(ops[1]); |
557 | auto &matrixbase = set<SPIRType>(id); |
558 | |
559 | matrixbase = base; |
560 | matrixbase.columns = colcount; |
561 | matrixbase.self = id; |
562 | matrixbase.parent_type = ops[1]; |
563 | break; |
564 | } |
565 | |
566 | case OpTypeArray: |
567 | { |
568 | uint32_t id = ops[0]; |
569 | auto &arraybase = set<SPIRType>(id); |
570 | |
571 | uint32_t tid = ops[1]; |
572 | auto &base = get<SPIRType>(tid); |
573 | |
574 | arraybase = base; |
575 | arraybase.parent_type = tid; |
576 | |
577 | uint32_t cid = ops[2]; |
578 | ir.mark_used_as_array_length(cid); |
579 | auto *c = maybe_get<SPIRConstant>(cid); |
580 | bool literal = c && !c->specialization; |
581 | |
582 | // We're copying type information into Array types, so we'll need a fixup for any physical pointer |
583 | // references. |
584 | if (base.forward_pointer) |
585 | forward_pointer_fixups.push_back({ id, tid }); |
586 | |
587 | arraybase.array_size_literal.push_back(literal); |
588 | arraybase.array.push_back(literal ? c->scalar() : cid); |
589 | // Do NOT set arraybase.self! |
590 | break; |
591 | } |
592 | |
593 | case OpTypeRuntimeArray: |
594 | { |
595 | uint32_t id = ops[0]; |
596 | |
597 | auto &base = get<SPIRType>(ops[1]); |
598 | auto &arraybase = set<SPIRType>(id); |
599 | |
600 | // We're copying type information into Array types, so we'll need a fixup for any physical pointer |
601 | // references. |
602 | if (base.forward_pointer) |
603 | forward_pointer_fixups.push_back({ id, ops[1] }); |
604 | |
605 | arraybase = base; |
606 | arraybase.array.push_back(0); |
607 | arraybase.array_size_literal.push_back(true); |
608 | arraybase.parent_type = ops[1]; |
609 | // Do NOT set arraybase.self! |
610 | break; |
611 | } |
612 | |
613 | case OpTypeImage: |
614 | { |
615 | uint32_t id = ops[0]; |
616 | auto &type = set<SPIRType>(id); |
617 | type.basetype = SPIRType::Image; |
618 | type.image.type = ops[1]; |
619 | type.image.dim = static_cast<Dim>(ops[2]); |
620 | type.image.depth = ops[3] == 1; |
621 | type.image.arrayed = ops[4] != 0; |
622 | type.image.ms = ops[5] != 0; |
623 | type.image.sampled = ops[6]; |
624 | type.image.format = static_cast<ImageFormat>(ops[7]); |
625 | type.image.access = (length >= 9) ? static_cast<AccessQualifier>(ops[8]) : AccessQualifierMax; |
626 | break; |
627 | } |
628 | |
629 | case OpTypeSampledImage: |
630 | { |
631 | uint32_t id = ops[0]; |
632 | uint32_t imagetype = ops[1]; |
633 | auto &type = set<SPIRType>(id); |
634 | type = get<SPIRType>(imagetype); |
635 | type.basetype = SPIRType::SampledImage; |
636 | type.self = id; |
637 | break; |
638 | } |
639 | |
640 | case OpTypeSampler: |
641 | { |
642 | uint32_t id = ops[0]; |
643 | auto &type = set<SPIRType>(id); |
644 | type.basetype = SPIRType::Sampler; |
645 | break; |
646 | } |
647 | |
648 | case OpTypePointer: |
649 | { |
650 | uint32_t id = ops[0]; |
651 | |
652 | // Very rarely, we might receive a FunctionPrototype here. |
653 | // We won't be able to compile it, but we shouldn't crash when parsing. |
654 | // We should be able to reflect. |
655 | auto *base = maybe_get<SPIRType>(ops[2]); |
656 | auto &ptrbase = set<SPIRType>(id); |
657 | |
658 | if (base) |
659 | ptrbase = *base; |
660 | |
661 | ptrbase.pointer = true; |
662 | ptrbase.pointer_depth++; |
663 | ptrbase.storage = static_cast<StorageClass>(ops[1]); |
664 | |
665 | if (ptrbase.storage == StorageClassAtomicCounter) |
666 | ptrbase.basetype = SPIRType::AtomicCounter; |
667 | |
668 | if (base && base->forward_pointer) |
669 | forward_pointer_fixups.push_back({ id, ops[2] }); |
670 | |
671 | ptrbase.parent_type = ops[2]; |
672 | |
673 | // Do NOT set ptrbase.self! |
674 | break; |
675 | } |
676 | |
677 | case OpTypeForwardPointer: |
678 | { |
679 | uint32_t id = ops[0]; |
680 | auto &ptrbase = set<SPIRType>(id); |
681 | ptrbase.pointer = true; |
682 | ptrbase.pointer_depth++; |
683 | ptrbase.storage = static_cast<StorageClass>(ops[1]); |
684 | ptrbase.forward_pointer = true; |
685 | |
686 | if (ptrbase.storage == StorageClassAtomicCounter) |
687 | ptrbase.basetype = SPIRType::AtomicCounter; |
688 | |
689 | break; |
690 | } |
691 | |
692 | case OpTypeStruct: |
693 | { |
694 | uint32_t id = ops[0]; |
695 | auto &type = set<SPIRType>(id); |
696 | type.basetype = SPIRType::Struct; |
697 | for (uint32_t i = 1; i < length; i++) |
698 | type.member_types.push_back(ops[i]); |
699 | |
700 | // Check if we have seen this struct type before, with just different |
701 | // decorations. |
702 | // |
703 | // Add workaround for issue #17 as well by looking at OpName for the struct |
704 | // types, which we shouldn't normally do. |
705 | // We should not normally have to consider type aliases like this to begin with |
706 | // however ... glslang issues #304, #307 cover this. |
707 | |
708 | // For stripped names, never consider struct type aliasing. |
709 | // We risk declaring the same struct multiple times, but type-punning is not allowed |
710 | // so this is safe. |
711 | bool consider_aliasing = !ir.get_name(type.self).empty(); |
712 | if (consider_aliasing) |
713 | { |
714 | for (auto &other : global_struct_cache) |
715 | { |
716 | if (ir.get_name(type.self) == ir.get_name(other) && |
717 | types_are_logically_equivalent(type, get<SPIRType>(other))) |
718 | { |
719 | type.type_alias = other; |
720 | break; |
721 | } |
722 | } |
723 | |
724 | if (type.type_alias == TypeID(0)) |
725 | global_struct_cache.push_back(id); |
726 | } |
727 | break; |
728 | } |
729 | |
730 | case OpTypeFunction: |
731 | { |
732 | uint32_t id = ops[0]; |
733 | uint32_t ret = ops[1]; |
734 | |
735 | auto &func = set<SPIRFunctionPrototype>(id, ret); |
736 | for (uint32_t i = 2; i < length; i++) |
737 | func.parameter_types.push_back(ops[i]); |
738 | break; |
739 | } |
740 | |
741 | case OpTypeAccelerationStructureKHR: |
742 | { |
743 | uint32_t id = ops[0]; |
744 | auto &type = set<SPIRType>(id); |
745 | type.basetype = SPIRType::AccelerationStructure; |
746 | break; |
747 | } |
748 | |
749 | case OpTypeRayQueryKHR: |
750 | { |
751 | uint32_t id = ops[0]; |
752 | auto &type = set<SPIRType>(id); |
753 | type.basetype = SPIRType::RayQuery; |
754 | break; |
755 | } |
756 | |
757 | // Variable declaration |
758 | // All variables are essentially pointers with a storage qualifier. |
759 | case OpVariable: |
760 | { |
761 | uint32_t type = ops[0]; |
762 | uint32_t id = ops[1]; |
763 | auto storage = static_cast<StorageClass>(ops[2]); |
764 | uint32_t initializer = length == 4 ? ops[3] : 0; |
765 | |
766 | if (storage == StorageClassFunction) |
767 | { |
768 | if (!current_function) |
769 | SPIRV_CROSS_THROW("No function currently in scope" ); |
770 | current_function->add_local_variable(id); |
771 | } |
772 | |
773 | set<SPIRVariable>(id, type, storage, initializer); |
774 | break; |
775 | } |
776 | |
777 | // OpPhi |
778 | // OpPhi is a fairly magical opcode. |
779 | // It selects temporary variables based on which parent block we *came from*. |
780 | // In high-level languages we can "de-SSA" by creating a function local, and flush out temporaries to this function-local |
781 | // variable to emulate SSA Phi. |
782 | case OpPhi: |
783 | { |
784 | if (!current_function) |
785 | SPIRV_CROSS_THROW("No function currently in scope" ); |
786 | if (!current_block) |
787 | SPIRV_CROSS_THROW("No block currently in scope" ); |
788 | |
789 | uint32_t result_type = ops[0]; |
790 | uint32_t id = ops[1]; |
791 | |
792 | // Instead of a temporary, create a new function-wide temporary with this ID instead. |
793 | auto &var = set<SPIRVariable>(id, result_type, spv::StorageClassFunction); |
794 | var.phi_variable = true; |
795 | |
796 | current_function->add_local_variable(id); |
797 | |
798 | for (uint32_t i = 2; i + 2 <= length; i += 2) |
799 | current_block->phi_variables.push_back({ ops[i], ops[i + 1], id }); |
800 | break; |
801 | } |
802 | |
803 | // Constants |
804 | case OpSpecConstant: |
805 | case OpConstant: |
806 | { |
807 | uint32_t id = ops[1]; |
808 | auto &type = get<SPIRType>(ops[0]); |
809 | |
810 | if (type.width > 32) |
811 | set<SPIRConstant>(id, ops[0], ops[2] | (uint64_t(ops[3]) << 32), op == OpSpecConstant); |
812 | else |
813 | set<SPIRConstant>(id, ops[0], ops[2], op == OpSpecConstant); |
814 | break; |
815 | } |
816 | |
817 | case OpSpecConstantFalse: |
818 | case OpConstantFalse: |
819 | { |
820 | uint32_t id = ops[1]; |
821 | set<SPIRConstant>(id, ops[0], uint32_t(0), op == OpSpecConstantFalse); |
822 | break; |
823 | } |
824 | |
825 | case OpSpecConstantTrue: |
826 | case OpConstantTrue: |
827 | { |
828 | uint32_t id = ops[1]; |
829 | set<SPIRConstant>(id, ops[0], uint32_t(1), op == OpSpecConstantTrue); |
830 | break; |
831 | } |
832 | |
833 | case OpConstantNull: |
834 | { |
835 | uint32_t id = ops[1]; |
836 | uint32_t type = ops[0]; |
837 | ir.make_constant_null(id, type, true); |
838 | break; |
839 | } |
840 | |
841 | case OpSpecConstantComposite: |
842 | case OpConstantComposite: |
843 | { |
844 | uint32_t id = ops[1]; |
845 | uint32_t type = ops[0]; |
846 | |
847 | auto &ctype = get<SPIRType>(type); |
848 | |
849 | // We can have constants which are structs and arrays. |
850 | // In this case, our SPIRConstant will be a list of other SPIRConstant ids which we |
851 | // can refer to. |
852 | if (ctype.basetype == SPIRType::Struct || !ctype.array.empty()) |
853 | { |
854 | set<SPIRConstant>(id, type, ops + 2, length - 2, op == OpSpecConstantComposite); |
855 | } |
856 | else |
857 | { |
858 | uint32_t elements = length - 2; |
859 | if (elements > 4) |
860 | SPIRV_CROSS_THROW("OpConstantComposite only supports 1, 2, 3 and 4 elements." ); |
861 | |
862 | SPIRConstant remapped_constant_ops[4]; |
863 | const SPIRConstant *c[4]; |
864 | for (uint32_t i = 0; i < elements; i++) |
865 | { |
866 | // Specialization constants operations can also be part of this. |
867 | // We do not know their value, so any attempt to query SPIRConstant later |
868 | // will fail. We can only propagate the ID of the expression and use to_expression on it. |
869 | auto *constant_op = maybe_get<SPIRConstantOp>(ops[2 + i]); |
870 | auto *undef_op = maybe_get<SPIRUndef>(ops[2 + i]); |
871 | if (constant_op) |
872 | { |
873 | if (op == OpConstantComposite) |
874 | SPIRV_CROSS_THROW("Specialization constant operation used in OpConstantComposite." ); |
875 | |
876 | remapped_constant_ops[i].make_null(get<SPIRType>(constant_op->basetype)); |
877 | remapped_constant_ops[i].self = constant_op->self; |
878 | remapped_constant_ops[i].constant_type = constant_op->basetype; |
879 | remapped_constant_ops[i].specialization = true; |
880 | c[i] = &remapped_constant_ops[i]; |
881 | } |
882 | else if (undef_op) |
883 | { |
884 | // Undefined, just pick 0. |
885 | remapped_constant_ops[i].make_null(get<SPIRType>(undef_op->basetype)); |
886 | remapped_constant_ops[i].constant_type = undef_op->basetype; |
887 | c[i] = &remapped_constant_ops[i]; |
888 | } |
889 | else |
890 | c[i] = &get<SPIRConstant>(ops[2 + i]); |
891 | } |
892 | set<SPIRConstant>(id, type, c, elements, op == OpSpecConstantComposite); |
893 | } |
894 | break; |
895 | } |
896 | |
897 | // Functions |
898 | case OpFunction: |
899 | { |
900 | uint32_t res = ops[0]; |
901 | uint32_t id = ops[1]; |
902 | // Control |
903 | uint32_t type = ops[3]; |
904 | |
905 | if (current_function) |
906 | SPIRV_CROSS_THROW("Must end a function before starting a new one!" ); |
907 | |
908 | current_function = &set<SPIRFunction>(id, res, type); |
909 | break; |
910 | } |
911 | |
912 | case OpFunctionParameter: |
913 | { |
914 | uint32_t type = ops[0]; |
915 | uint32_t id = ops[1]; |
916 | |
917 | if (!current_function) |
918 | SPIRV_CROSS_THROW("Must be in a function!" ); |
919 | |
920 | current_function->add_parameter(type, id); |
921 | set<SPIRVariable>(id, type, StorageClassFunction); |
922 | break; |
923 | } |
924 | |
925 | case OpFunctionEnd: |
926 | { |
927 | if (current_block) |
928 | { |
929 | // Very specific error message, but seems to come up quite often. |
930 | SPIRV_CROSS_THROW( |
931 | "Cannot end a function before ending the current block.\n" |
932 | "Likely cause: If this SPIR-V was created from glslang HLSL, make sure the entry point is valid." ); |
933 | } |
934 | current_function = nullptr; |
935 | break; |
936 | } |
937 | |
938 | // Blocks |
939 | case OpLabel: |
940 | { |
941 | // OpLabel always starts a block. |
942 | if (!current_function) |
943 | SPIRV_CROSS_THROW("Blocks cannot exist outside functions!" ); |
944 | |
945 | uint32_t id = ops[0]; |
946 | |
947 | current_function->blocks.push_back(id); |
948 | if (!current_function->entry_block) |
949 | current_function->entry_block = id; |
950 | |
951 | if (current_block) |
952 | SPIRV_CROSS_THROW("Cannot start a block before ending the current block." ); |
953 | |
954 | current_block = &set<SPIRBlock>(id); |
955 | break; |
956 | } |
957 | |
958 | // Branch instructions end blocks. |
959 | case OpBranch: |
960 | { |
961 | if (!current_block) |
962 | SPIRV_CROSS_THROW("Trying to end a non-existing block." ); |
963 | |
964 | uint32_t target = ops[0]; |
965 | current_block->terminator = SPIRBlock::Direct; |
966 | current_block->next_block = target; |
967 | current_block = nullptr; |
968 | break; |
969 | } |
970 | |
971 | case OpBranchConditional: |
972 | { |
973 | if (!current_block) |
974 | SPIRV_CROSS_THROW("Trying to end a non-existing block." ); |
975 | |
976 | current_block->condition = ops[0]; |
977 | current_block->true_block = ops[1]; |
978 | current_block->false_block = ops[2]; |
979 | |
980 | current_block->terminator = SPIRBlock::Select; |
981 | |
982 | if (current_block->true_block == current_block->false_block) |
983 | { |
984 | // Bogus conditional, translate to a direct branch. |
985 | // Avoids some ugly edge cases later when analyzing CFGs. |
986 | |
987 | // There are some super jank cases where the merge block is different from the true/false, |
988 | // and later branches can "break" out of the selection construct this way. |
989 | // This is complete nonsense, but CTS hits this case. |
990 | // In this scenario, we should see the selection construct as more of a Switch with one default case. |
991 | // The problem here is that this breaks any attempt to break out of outer switch statements, |
992 | // but it's theoretically solvable if this ever comes up using the ladder breaking system ... |
993 | |
994 | if (current_block->true_block != current_block->next_block && |
995 | current_block->merge == SPIRBlock::MergeSelection) |
996 | { |
997 | uint32_t ids = ir.increase_bound_by(2); |
998 | |
999 | SPIRType type; |
1000 | type.basetype = SPIRType::Int; |
1001 | type.width = 32; |
1002 | set<SPIRType>(ids, type); |
1003 | auto &c = set<SPIRConstant>(ids + 1, ids); |
1004 | |
1005 | current_block->condition = c.self; |
1006 | current_block->default_block = current_block->true_block; |
1007 | current_block->terminator = SPIRBlock::MultiSelect; |
1008 | ir.block_meta[current_block->next_block] &= ~ParsedIR::BLOCK_META_SELECTION_MERGE_BIT; |
1009 | ir.block_meta[current_block->next_block] |= ParsedIR::BLOCK_META_MULTISELECT_MERGE_BIT; |
1010 | } |
1011 | else |
1012 | { |
1013 | ir.block_meta[current_block->next_block] &= ~ParsedIR::BLOCK_META_SELECTION_MERGE_BIT; |
1014 | current_block->next_block = current_block->true_block; |
1015 | current_block->condition = 0; |
1016 | current_block->true_block = 0; |
1017 | current_block->false_block = 0; |
1018 | current_block->merge_block = 0; |
1019 | current_block->merge = SPIRBlock::MergeNone; |
1020 | current_block->terminator = SPIRBlock::Direct; |
1021 | } |
1022 | } |
1023 | |
1024 | current_block = nullptr; |
1025 | break; |
1026 | } |
1027 | |
1028 | case OpSwitch: |
1029 | { |
1030 | if (!current_block) |
1031 | SPIRV_CROSS_THROW("Trying to end a non-existing block." ); |
1032 | |
1033 | current_block->terminator = SPIRBlock::MultiSelect; |
1034 | |
1035 | current_block->condition = ops[0]; |
1036 | current_block->default_block = ops[1]; |
1037 | |
1038 | uint32_t remaining_ops = length - 2; |
1039 | if ((remaining_ops % 2) == 0) |
1040 | { |
1041 | for (uint32_t i = 2; i + 2 <= length; i += 2) |
1042 | current_block->cases_32bit.push_back({ ops[i], ops[i + 1] }); |
1043 | } |
1044 | |
1045 | if ((remaining_ops % 3) == 0) |
1046 | { |
1047 | for (uint32_t i = 2; i + 3 <= length; i += 3) |
1048 | { |
1049 | uint64_t value = (static_cast<uint64_t>(ops[i + 1]) << 32) | ops[i]; |
1050 | current_block->cases_64bit.push_back({ value, ops[i + 2] }); |
1051 | } |
1052 | } |
1053 | |
1054 | // If we jump to next block, make it break instead since we're inside a switch case block at that point. |
1055 | ir.block_meta[current_block->next_block] |= ParsedIR::BLOCK_META_MULTISELECT_MERGE_BIT; |
1056 | |
1057 | current_block = nullptr; |
1058 | break; |
1059 | } |
1060 | |
1061 | case OpKill: |
1062 | { |
1063 | if (!current_block) |
1064 | SPIRV_CROSS_THROW("Trying to end a non-existing block." ); |
1065 | current_block->terminator = SPIRBlock::Kill; |
1066 | current_block = nullptr; |
1067 | break; |
1068 | } |
1069 | |
1070 | case OpTerminateRayKHR: |
1071 | // NV variant is not a terminator. |
1072 | if (!current_block) |
1073 | SPIRV_CROSS_THROW("Trying to end a non-existing block." ); |
1074 | current_block->terminator = SPIRBlock::TerminateRay; |
1075 | current_block = nullptr; |
1076 | break; |
1077 | |
1078 | case OpIgnoreIntersectionKHR: |
1079 | // NV variant is not a terminator. |
1080 | if (!current_block) |
1081 | SPIRV_CROSS_THROW("Trying to end a non-existing block." ); |
1082 | current_block->terminator = SPIRBlock::IgnoreIntersection; |
1083 | current_block = nullptr; |
1084 | break; |
1085 | |
1086 | case OpReturn: |
1087 | { |
1088 | if (!current_block) |
1089 | SPIRV_CROSS_THROW("Trying to end a non-existing block." ); |
1090 | current_block->terminator = SPIRBlock::Return; |
1091 | current_block = nullptr; |
1092 | break; |
1093 | } |
1094 | |
1095 | case OpReturnValue: |
1096 | { |
1097 | if (!current_block) |
1098 | SPIRV_CROSS_THROW("Trying to end a non-existing block." ); |
1099 | current_block->terminator = SPIRBlock::Return; |
1100 | current_block->return_value = ops[0]; |
1101 | current_block = nullptr; |
1102 | break; |
1103 | } |
1104 | |
1105 | case OpUnreachable: |
1106 | { |
1107 | if (!current_block) |
1108 | SPIRV_CROSS_THROW("Trying to end a non-existing block." ); |
1109 | current_block->terminator = SPIRBlock::Unreachable; |
1110 | current_block = nullptr; |
1111 | break; |
1112 | } |
1113 | |
1114 | case OpSelectionMerge: |
1115 | { |
1116 | if (!current_block) |
1117 | SPIRV_CROSS_THROW("Trying to modify a non-existing block." ); |
1118 | |
1119 | current_block->next_block = ops[0]; |
1120 | current_block->merge = SPIRBlock::MergeSelection; |
1121 | ir.block_meta[current_block->next_block] |= ParsedIR::BLOCK_META_SELECTION_MERGE_BIT; |
1122 | |
1123 | if (length >= 2) |
1124 | { |
1125 | if (ops[1] & SelectionControlFlattenMask) |
1126 | current_block->hint = SPIRBlock::HintFlatten; |
1127 | else if (ops[1] & SelectionControlDontFlattenMask) |
1128 | current_block->hint = SPIRBlock::HintDontFlatten; |
1129 | } |
1130 | break; |
1131 | } |
1132 | |
1133 | case OpLoopMerge: |
1134 | { |
1135 | if (!current_block) |
1136 | SPIRV_CROSS_THROW("Trying to modify a non-existing block." ); |
1137 | |
1138 | current_block->merge_block = ops[0]; |
1139 | current_block->continue_block = ops[1]; |
1140 | current_block->merge = SPIRBlock::MergeLoop; |
1141 | |
1142 | ir.block_meta[current_block->self] |= ParsedIR::BLOCK_META_LOOP_HEADER_BIT; |
1143 | ir.block_meta[current_block->merge_block] |= ParsedIR::BLOCK_META_LOOP_MERGE_BIT; |
1144 | |
1145 | ir.continue_block_to_loop_header[current_block->continue_block] = BlockID(current_block->self); |
1146 | |
1147 | // Don't add loop headers to continue blocks, |
1148 | // which would make it impossible branch into the loop header since |
1149 | // they are treated as continues. |
1150 | if (current_block->continue_block != BlockID(current_block->self)) |
1151 | ir.block_meta[current_block->continue_block] |= ParsedIR::BLOCK_META_CONTINUE_BIT; |
1152 | |
1153 | if (length >= 3) |
1154 | { |
1155 | if (ops[2] & LoopControlUnrollMask) |
1156 | current_block->hint = SPIRBlock::HintUnroll; |
1157 | else if (ops[2] & LoopControlDontUnrollMask) |
1158 | current_block->hint = SPIRBlock::HintDontUnroll; |
1159 | } |
1160 | break; |
1161 | } |
1162 | |
1163 | case OpSpecConstantOp: |
1164 | { |
1165 | if (length < 3) |
1166 | SPIRV_CROSS_THROW("OpSpecConstantOp not enough arguments." ); |
1167 | |
1168 | uint32_t result_type = ops[0]; |
1169 | uint32_t id = ops[1]; |
1170 | auto spec_op = static_cast<Op>(ops[2]); |
1171 | |
1172 | set<SPIRConstantOp>(id, result_type, spec_op, ops + 3, length - 3); |
1173 | break; |
1174 | } |
1175 | |
1176 | case OpLine: |
1177 | { |
1178 | // OpLine might come at global scope, but we don't care about those since they will not be declared in any |
1179 | // meaningful correct order. |
1180 | // Ignore all OpLine directives which live outside a function. |
1181 | if (current_block) |
1182 | current_block->ops.push_back(instruction); |
1183 | |
1184 | // Line directives may arrive before first OpLabel. |
1185 | // Treat this as the line of the function declaration, |
1186 | // so warnings for arguments can propagate properly. |
1187 | if (current_function) |
1188 | { |
1189 | // Store the first one we find and emit it before creating the function prototype. |
1190 | if (current_function->entry_line.file_id == 0) |
1191 | { |
1192 | current_function->entry_line.file_id = ops[0]; |
1193 | current_function->entry_line.line_literal = ops[1]; |
1194 | } |
1195 | } |
1196 | break; |
1197 | } |
1198 | |
1199 | case OpNoLine: |
1200 | { |
1201 | // OpNoLine might come at global scope. |
1202 | if (current_block) |
1203 | current_block->ops.push_back(instruction); |
1204 | break; |
1205 | } |
1206 | |
1207 | // Actual opcodes. |
1208 | default: |
1209 | { |
1210 | if (length >= 2) |
1211 | { |
1212 | const auto *type = maybe_get<SPIRType>(ops[0]); |
1213 | if (type) |
1214 | { |
1215 | ir.load_type_width.insert({ ops[1], type->width }); |
1216 | } |
1217 | } |
1218 | if (!current_block) |
1219 | SPIRV_CROSS_THROW("Currently no block to insert opcode." ); |
1220 | |
1221 | current_block->ops.push_back(instruction); |
1222 | break; |
1223 | } |
1224 | } |
1225 | } |
1226 | |
1227 | bool Parser::types_are_logically_equivalent(const SPIRType &a, const SPIRType &b) const |
1228 | { |
1229 | if (a.basetype != b.basetype) |
1230 | return false; |
1231 | if (a.width != b.width) |
1232 | return false; |
1233 | if (a.vecsize != b.vecsize) |
1234 | return false; |
1235 | if (a.columns != b.columns) |
1236 | return false; |
1237 | if (a.array.size() != b.array.size()) |
1238 | return false; |
1239 | |
1240 | size_t array_count = a.array.size(); |
1241 | if (array_count && memcmp(a.array.data(), b.array.data(), array_count * sizeof(uint32_t)) != 0) |
1242 | return false; |
1243 | |
1244 | if (a.basetype == SPIRType::Image || a.basetype == SPIRType::SampledImage) |
1245 | { |
1246 | if (memcmp(&a.image, &b.image, sizeof(SPIRType::Image)) != 0) |
1247 | return false; |
1248 | } |
1249 | |
1250 | if (a.member_types.size() != b.member_types.size()) |
1251 | return false; |
1252 | |
1253 | size_t member_types = a.member_types.size(); |
1254 | for (size_t i = 0; i < member_types; i++) |
1255 | { |
1256 | if (!types_are_logically_equivalent(get<SPIRType>(a.member_types[i]), get<SPIRType>(b.member_types[i]))) |
1257 | return false; |
1258 | } |
1259 | |
1260 | return true; |
1261 | } |
1262 | |
1263 | bool Parser::variable_storage_is_aliased(const SPIRVariable &v) const |
1264 | { |
1265 | auto &type = get<SPIRType>(v.basetype); |
1266 | |
1267 | auto *type_meta = ir.find_meta(type.self); |
1268 | |
1269 | bool ssbo = v.storage == StorageClassStorageBuffer || |
1270 | (type_meta && type_meta->decoration.decoration_flags.get(DecorationBufferBlock)); |
1271 | bool image = type.basetype == SPIRType::Image; |
1272 | bool counter = type.basetype == SPIRType::AtomicCounter; |
1273 | |
1274 | bool is_restrict; |
1275 | if (ssbo) |
1276 | is_restrict = ir.get_buffer_block_flags(v).get(DecorationRestrict); |
1277 | else |
1278 | is_restrict = ir.has_decoration(v.self, DecorationRestrict); |
1279 | |
1280 | return !is_restrict && (ssbo || image || counter); |
1281 | } |
1282 | } // namespace SPIRV_CROSS_NAMESPACE |
1283 | |