1// Protocol Buffers - Google's data interchange format
2// Copyright 2008 Google Inc. All rights reserved.
3// https://developers.google.com/protocol-buffers/
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are
7// met:
8//
9// * Redistributions of source code must retain the above copyright
10// notice, this list of conditions and the following disclaimer.
11// * Redistributions in binary form must reproduce the above
12// copyright notice, this list of conditions and the following disclaimer
13// in the documentation and/or other materials provided with the
14// distribution.
15// * Neither the name of Google Inc. nor the names of its
16// contributors may be used to endorse or promote products derived from
17// this software without specific prior written permission.
18//
19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31#ifndef GOOGLE_PROTOBUF_PARSE_CONTEXT_H__
32#define GOOGLE_PROTOBUF_PARSE_CONTEXT_H__
33
34#include <cstdint>
35#include <cstring>
36#include <string>
37
38#include <google/protobuf/io/coded_stream.h>
39#include <google/protobuf/io/zero_copy_stream.h>
40#include <google/protobuf/arena.h>
41#include <google/protobuf/arenastring.h>
42#include <google/protobuf/implicit_weak_message.h>
43#include <google/protobuf/metadata_lite.h>
44#include <google/protobuf/port.h>
45#include <google/protobuf/repeated_field.h>
46#include <google/protobuf/wire_format_lite.h>
47#include <google/protobuf/stubs/strutil.h>
48
49#include <google/protobuf/port_def.inc>
50
51
52namespace google {
53namespace protobuf {
54
55class UnknownFieldSet;
56class DescriptorPool;
57class MessageFactory;
58
59namespace internal {
60
61// Template code below needs to know about the existence of these functions.
62PROTOBUF_EXPORT void WriteVarint(uint32 num, uint64 val, std::string* s);
63PROTOBUF_EXPORT void WriteLengthDelimited(uint32 num, StringPiece val,
64 std::string* s);
65// Inline because it is just forwarding to s->WriteVarint
66inline void WriteVarint(uint32 num, uint64 val, UnknownFieldSet* s);
67inline void WriteLengthDelimited(uint32 num, StringPiece val,
68 UnknownFieldSet* s);
69
70
71// The basic abstraction the parser is designed for is a slight modification
72// of the ZeroCopyInputStream (ZCIS) abstraction. A ZCIS presents a serialized
73// stream as a series of buffers that concatenate to the full stream.
74// Pictorially a ZCIS presents a stream in chunks like so
75// [---------------------------------------------------------------]
76// [---------------------] chunk 1
77// [----------------------------] chunk 2
78// chunk 3 [--------------]
79//
80// Where the '-' represent the bytes which are vertically lined up with the
81// bytes of the stream. The proto parser requires its input to be presented
82// similarily with the extra
83// property that each chunk has kSlopBytes past its end that overlaps with the
84// first kSlopBytes of the next chunk, or if there is no next chunk at least its
85// still valid to read those bytes. Again, pictorially, we now have
86//
87// [---------------------------------------------------------------]
88// [-------------------....] chunk 1
89// [------------------------....] chunk 2
90// chunk 3 [------------------..**]
91// chunk 4 [--****]
92// Here '-' mean the bytes of the stream or chunk and '.' means bytes past the
93// chunk that match up with the start of the next chunk. Above each chunk has
94// 4 '.' after the chunk. In the case these 'overflow' bytes represents bytes
95// past the stream, indicated by '*' above, their values are unspecified. It is
96// still legal to read them (ie. should not segfault). Reading past the
97// end should be detected by the user and indicated as an error.
98//
99// The reason for this, admittedly, unconventional invariant is to ruthlessly
100// optimize the protobuf parser. Having an overlap helps in two important ways.
101// Firstly it alleviates having to performing bounds checks if a piece of code
102// is guaranteed to not read more than kSlopBytes. Secondly, and more
103// importantly, the protobuf wireformat is such that reading a key/value pair is
104// always less than 16 bytes. This removes the need to change to next buffer in
105// the middle of reading primitive values. Hence there is no need to store and
106// load the current position.
107
108class PROTOBUF_EXPORT EpsCopyInputStream {
109 public:
110 enum { kSlopBytes = 16, kMaxCordBytesToCopy = 512 };
111
112 explicit EpsCopyInputStream(bool enable_aliasing)
113 : aliasing_(enable_aliasing ? kOnPatch : kNoAliasing) {}
114
115 void BackUp(const char* ptr) {
116 GOOGLE_DCHECK(ptr <= buffer_end_ + kSlopBytes);
117 int count;
118 if (next_chunk_ == buffer_) {
119 count = static_cast<int>(buffer_end_ + kSlopBytes - ptr);
120 } else {
121 count = size_ + static_cast<int>(buffer_end_ - ptr);
122 }
123 if (count > 0) StreamBackUp(count);
124 }
125
126 // If return value is negative it's an error
127 PROTOBUF_MUST_USE_RESULT int PushLimit(const char* ptr, int limit) {
128 GOOGLE_DCHECK(limit >= 0 && limit <= INT_MAX - kSlopBytes);
129 // This add is safe due to the invariant above, because
130 // ptr - buffer_end_ <= kSlopBytes.
131 limit += static_cast<int>(ptr - buffer_end_);
132 limit_end_ = buffer_end_ + (std::min)(0, limit);
133 auto old_limit = limit_;
134 limit_ = limit;
135 return old_limit - limit;
136 }
137
138 PROTOBUF_MUST_USE_RESULT bool PopLimit(int delta) {
139 if (PROTOBUF_PREDICT_FALSE(!EndedAtLimit())) return false;
140 limit_ = limit_ + delta;
141 // TODO(gerbens) We could remove this line and hoist the code to
142 // DoneFallback. Study the perf/bin-size effects.
143 limit_end_ = buffer_end_ + (std::min)(0, limit_);
144 return true;
145 }
146
147 PROTOBUF_MUST_USE_RESULT const char* Skip(const char* ptr, int size) {
148 if (size <= buffer_end_ + kSlopBytes - ptr) {
149 return ptr + size;
150 }
151 return SkipFallback(ptr, size);
152 }
153 PROTOBUF_MUST_USE_RESULT const char* ReadString(const char* ptr, int size,
154 std::string* s) {
155 if (size <= buffer_end_ + kSlopBytes - ptr) {
156 s->assign(ptr, size);
157 return ptr + size;
158 }
159 return ReadStringFallback(ptr, size, s);
160 }
161 PROTOBUF_MUST_USE_RESULT const char* AppendString(const char* ptr, int size,
162 std::string* s) {
163 if (size <= buffer_end_ + kSlopBytes - ptr) {
164 s->append(ptr, size);
165 return ptr + size;
166 }
167 return AppendStringFallback(ptr, size, s);
168 }
169
170 template <typename Tag, typename T>
171 PROTOBUF_MUST_USE_RESULT const char* ReadRepeatedFixed(const char* ptr,
172 Tag expected_tag,
173 RepeatedField<T>* out);
174
175 template <typename T>
176 PROTOBUF_MUST_USE_RESULT const char* ReadPackedFixed(const char* ptr,
177 int size,
178 RepeatedField<T>* out);
179 template <typename Add>
180 PROTOBUF_MUST_USE_RESULT const char* ReadPackedVarint(const char* ptr,
181 Add add);
182
183 uint32 LastTag() const { return last_tag_minus_1_ + 1; }
184 bool ConsumeEndGroup(uint32 start_tag) {
185 bool res = last_tag_minus_1_ == start_tag;
186 last_tag_minus_1_ = 0;
187 return res;
188 }
189 bool EndedAtLimit() const { return last_tag_minus_1_ == 0; }
190 bool EndedAtEndOfStream() const { return last_tag_minus_1_ == 1; }
191 void SetLastTag(uint32 tag) { last_tag_minus_1_ = tag - 1; }
192 void SetEndOfStream() { last_tag_minus_1_ = 1; }
193 bool IsExceedingLimit(const char* ptr) {
194 return ptr > limit_end_ &&
195 (next_chunk_ == nullptr || ptr - buffer_end_ > limit_);
196 }
197 int BytesUntilLimit(const char* ptr) const {
198 return limit_ + static_cast<int>(buffer_end_ - ptr);
199 }
200 // Returns true if more data is available, if false is returned one has to
201 // call Done for further checks.
202 bool DataAvailable(const char* ptr) { return ptr < limit_end_; }
203
204 protected:
205 // Returns true is limit (either an explicit limit or end of stream) is
206 // reached. It aligns *ptr across buffer seams.
207 // If limit is exceeded it returns true and ptr is set to null.
208 bool DoneWithCheck(const char** ptr, int d) {
209 GOOGLE_DCHECK(*ptr);
210 if (PROTOBUF_PREDICT_TRUE(*ptr < limit_end_)) return false;
211 // No need to fetch buffer if we ended on a limit in the slop region
212 if ((*ptr - buffer_end_) == limit_) return true;
213 auto res = DoneFallback(*ptr, d);
214 *ptr = res.first;
215 return res.second;
216 }
217
218 const char* InitFrom(StringPiece flat) {
219 overall_limit_ = 0;
220 if (flat.size() > kSlopBytes) {
221 limit_ = kSlopBytes;
222 limit_end_ = buffer_end_ = flat.data() + flat.size() - kSlopBytes;
223 next_chunk_ = buffer_;
224 if (aliasing_ == kOnPatch) aliasing_ = kNoDelta;
225 return flat.data();
226 } else {
227 std::memcpy(buffer_, flat.data(), flat.size());
228 limit_ = 0;
229 limit_end_ = buffer_end_ = buffer_ + flat.size();
230 next_chunk_ = nullptr;
231 if (aliasing_ == kOnPatch) {
232 aliasing_ = reinterpret_cast<std::uintptr_t>(flat.data()) -
233 reinterpret_cast<std::uintptr_t>(buffer_);
234 }
235 return buffer_;
236 }
237 }
238
239 const char* InitFrom(io::ZeroCopyInputStream* zcis);
240
241 const char* InitFrom(io::ZeroCopyInputStream* zcis, int limit) {
242 overall_limit_ = limit;
243 auto res = InitFrom(zcis);
244 limit_ = limit - static_cast<int>(buffer_end_ - res);
245 limit_end_ = buffer_end_ + (std::min)(0, limit_);
246 return res;
247 }
248
249 private:
250 const char* limit_end_; // buffer_end_ + min(limit_, 0)
251 const char* buffer_end_;
252 const char* next_chunk_;
253 int size_;
254 int limit_; // relative to buffer_end_;
255 io::ZeroCopyInputStream* zcis_ = nullptr;
256 char buffer_[2 * kSlopBytes] = {};
257 enum { kNoAliasing = 0, kOnPatch = 1, kNoDelta = 2 };
258 std::uintptr_t aliasing_ = kNoAliasing;
259 // This variable is used to communicate how the parse ended, in order to
260 // completely verify the parsed data. A wire-format parse can end because of
261 // one of the following conditions:
262 // 1) A parse can end on a pushed limit.
263 // 2) A parse can end on End Of Stream (EOS).
264 // 3) A parse can end on 0 tag (only valid for toplevel message).
265 // 4) A parse can end on an end-group tag.
266 // This variable should always be set to 0, which indicates case 1. If the
267 // parse terminated due to EOS (case 2), it's set to 1. In case the parse
268 // ended due to a terminating tag (case 3 and 4) it's set to (tag - 1).
269 // This var doesn't really belong in EpsCopyInputStream and should be part of
270 // the ParseContext, but case 2 is most easily and optimally implemented in
271 // DoneFallback.
272 uint32 last_tag_minus_1_ = 0;
273 int overall_limit_ = INT_MAX; // Overall limit independent of pushed limits.
274 // Pretty random large number that seems like a safe allocation on most
275 // systems. TODO(gerbens) do we need to set this as build flag?
276 enum { kSafeStringSize = 50000000 };
277
278 std::pair<const char*, bool> DoneFallback(const char* ptr, int d);
279 const char* Next(int overrun, int d);
280 const char* SkipFallback(const char* ptr, int size);
281 const char* AppendStringFallback(const char* ptr, int size, std::string* str);
282 const char* ReadStringFallback(const char* ptr, int size, std::string* str);
283 bool StreamNext(const void** data) {
284 bool res = zcis_->Next(data, &size_);
285 if (res) overall_limit_ -= size_;
286 return res;
287 }
288 void StreamBackUp(int count) {
289 zcis_->BackUp(count);
290 overall_limit_ += count;
291 }
292
293 template <typename A>
294 const char* AppendSize(const char* ptr, int size, const A& append) {
295 int chunk_size = buffer_end_ + kSlopBytes - ptr;
296 do {
297 GOOGLE_DCHECK(size > chunk_size);
298 append(ptr, chunk_size);
299 ptr += chunk_size;
300 size -= chunk_size;
301 // DoneFallBack asserts it isn't called when exactly on the limit. If this
302 // happens we fail the parse, as we are at the limit and still more bytes
303 // to read.
304 if (limit_ == kSlopBytes) return nullptr;
305 auto res = DoneFallback(ptr, -1);
306 if (res.second) return nullptr; // If done we passed the limit
307 ptr = res.first;
308 chunk_size = buffer_end_ + kSlopBytes - ptr;
309 } while (size > chunk_size);
310 append(ptr, size);
311 return ptr + size;
312 }
313
314 // AppendUntilEnd appends data until a limit (either a PushLimit or end of
315 // stream. Normal payloads are from length delimited fields which have an
316 // explicit size. Reading until limit only comes when the string takes
317 // the place of a protobuf, ie RawMessage/StringRawMessage, lazy fields and
318 // implicit weak messages. We keep these methods private and friend them.
319 template <typename A>
320 const char* AppendUntilEnd(const char* ptr, const A& append) {
321 while (!DoneWithCheck(&ptr, -1)) {
322 append(ptr, limit_end_ - ptr);
323 ptr = limit_end_;
324 }
325 return ptr;
326 }
327
328 PROTOBUF_MUST_USE_RESULT const char* AppendString(const char* ptr,
329 std::string* str) {
330 return AppendUntilEnd(
331 ptr, [str](const char* p, ptrdiff_t s) { str->append(p, s); });
332 }
333 friend class ImplicitWeakMessage;
334};
335
336// ParseContext holds all data that is global to the entire parse. Most
337// importantly it contains the input stream, but also recursion depth and also
338// stores the end group tag, in case a parser ended on a endgroup, to verify
339// matching start/end group tags.
340class PROTOBUF_EXPORT ParseContext : public EpsCopyInputStream {
341 public:
342 struct Data {
343 const DescriptorPool* pool = nullptr;
344 MessageFactory* factory = nullptr;
345 };
346
347 template <typename... T>
348 ParseContext(int depth, bool aliasing, const char** start, T&&... args)
349 : EpsCopyInputStream(aliasing), depth_(depth) {
350 *start = InitFrom(std::forward<T>(args)...);
351 }
352
353 void TrackCorrectEnding() { group_depth_ = 0; }
354
355 bool Done(const char** ptr) { return DoneWithCheck(ptr, group_depth_); }
356 bool DoneNoSlopCheck(const char** ptr) { return DoneWithCheck(ptr, -1); }
357
358 int depth() const { return depth_; }
359
360 Data& data() { return data_; }
361 const Data& data() const { return data_; }
362
363 template <typename T>
364 PROTOBUF_MUST_USE_RESULT const char* ParseMessage(T* msg, const char* ptr);
365 // We outline when the type is generic and we go through a virtual
366 const char* ParseMessage(MessageLite* msg, const char* ptr);
367 const char* ParseMessage(Message* msg, const char* ptr);
368
369 template <typename T>
370 PROTOBUF_MUST_USE_RESULT PROTOBUF_ALWAYS_INLINE const char* ParseGroup(
371 T* msg, const char* ptr, uint32 tag) {
372 if (--depth_ < 0) return nullptr;
373 group_depth_++;
374 ptr = msg->_InternalParse(ptr, this);
375 group_depth_--;
376 depth_++;
377 if (PROTOBUF_PREDICT_FALSE(!ConsumeEndGroup(tag))) return nullptr;
378 return ptr;
379 }
380
381 private:
382 // The context keeps an internal stack to keep track of the recursive
383 // part of the parse state.
384 // Current depth of the active parser, depth counts down.
385 // This is used to limit recursion depth (to prevent overflow on malicious
386 // data), but is also used to index in stack_ to store the current state.
387 int depth_;
388 // Unfortunately necessary for the fringe case of ending on 0 or end-group tag
389 // in the last kSlopBytes of a ZeroCopyInputStream chunk.
390 int group_depth_ = INT_MIN;
391 Data data_;
392};
393
394template <uint32 tag>
395bool ExpectTag(const char* ptr) {
396 if (tag < 128) {
397 return *ptr == tag;
398 } else {
399 static_assert(tag < 128 * 128, "We only expect tags for 1 or 2 bytes");
400 char buf[2] = {static_cast<char>(tag | 0x80), static_cast<char>(tag >> 7)};
401 return std::memcmp(ptr, buf, 2) == 0;
402 }
403}
404
405template <int>
406struct EndianHelper;
407
408template <>
409struct EndianHelper<1> {
410 static uint8 Load(const void* p) { return *static_cast<const uint8*>(p); }
411};
412
413template <>
414struct EndianHelper<2> {
415 static uint16 Load(const void* p) {
416 uint16 tmp;
417 std::memcpy(&tmp, p, 2);
418#ifndef PROTOBUF_LITTLE_ENDIAN
419 tmp = bswap_16(tmp);
420#endif
421 return tmp;
422 }
423};
424
425template <>
426struct EndianHelper<4> {
427 static uint32 Load(const void* p) {
428 uint32 tmp;
429 std::memcpy(&tmp, p, 4);
430#ifndef PROTOBUF_LITTLE_ENDIAN
431 tmp = bswap_32(tmp);
432#endif
433 return tmp;
434 }
435};
436
437template <>
438struct EndianHelper<8> {
439 static uint64 Load(const void* p) {
440 uint64 tmp;
441 std::memcpy(&tmp, p, 8);
442#ifndef PROTOBUF_LITTLE_ENDIAN
443 tmp = bswap_64(tmp);
444#endif
445 return tmp;
446 }
447};
448
449template <typename T>
450T UnalignedLoad(const char* p) {
451 auto tmp = EndianHelper<sizeof(T)>::Load(p);
452 T res;
453 memcpy(&res, &tmp, sizeof(T));
454 return res;
455}
456
457PROTOBUF_EXPORT
458std::pair<const char*, uint32> VarintParseSlow32(const char* p, uint32 res);
459PROTOBUF_EXPORT
460std::pair<const char*, uint64> VarintParseSlow64(const char* p, uint32 res);
461
462inline const char* VarintParseSlow(const char* p, uint32 res, uint32* out) {
463 auto tmp = VarintParseSlow32(p, res);
464 *out = tmp.second;
465 return tmp.first;
466}
467
468inline const char* VarintParseSlow(const char* p, uint32 res, uint64* out) {
469 auto tmp = VarintParseSlow64(p, res);
470 *out = tmp.second;
471 return tmp.first;
472}
473
474template <typename T>
475PROTOBUF_MUST_USE_RESULT const char* VarintParse(const char* p, T* out) {
476 auto ptr = reinterpret_cast<const uint8*>(p);
477 uint32 res = ptr[0];
478 if (!(res & 0x80)) {
479 *out = res;
480 return p + 1;
481 }
482 uint32 byte = ptr[1];
483 res += (byte - 1) << 7;
484 if (!(byte & 0x80)) {
485 *out = res;
486 return p + 2;
487 }
488 return VarintParseSlow(p, res, out);
489}
490
491// Used for tags, could read up to 5 bytes which must be available.
492// Caller must ensure its safe to call.
493
494PROTOBUF_EXPORT
495std::pair<const char*, uint32> ReadTagFallback(const char* p, uint32 res);
496
497// Same as ParseVarint but only accept 5 bytes at most.
498inline const char* ReadTag(const char* p, uint32* out, uint32 /*max_tag*/ = 0) {
499 uint32 res = static_cast<uint8>(p[0]);
500 if (res < 128) {
501 *out = res;
502 return p + 1;
503 }
504 uint32 second = static_cast<uint8>(p[1]);
505 res += (second - 1) << 7;
506 if (second < 128) {
507 *out = res;
508 return p + 2;
509 }
510 auto tmp = ReadTagFallback(p, res);
511 *out = tmp.second;
512 return tmp.first;
513}
514
515// Decode 2 consecutive bytes of a varint and returns the value, shifted left
516// by 1. It simultaneous updates *ptr to *ptr + 1 or *ptr + 2 depending if the
517// first byte's continuation bit is set.
518// If bit 15 of return value is set (equivalent to the continuation bits of both
519// bytes being set) the varint continues, otherwise the parse is done. On x86
520// movsx eax, dil
521// add edi, eax
522// adc [rsi], 1
523// add eax, eax
524// and eax, edi
525inline uint32 DecodeTwoBytes(const char** ptr) {
526 uint32 value = UnalignedLoad<uint16>(*ptr);
527 // Sign extend the low byte continuation bit
528 uint32_t x = static_cast<int8_t>(value);
529 // This add is an amazing operation, it cancels the low byte continuation bit
530 // from y transferring it to the carry. Simultaneously it also shifts the 7
531 // LSB left by one tightly against high byte varint bits. Hence value now
532 // contains the unpacked value shifted left by 1.
533 value += x;
534 // Use the carry to update the ptr appropriately.
535 *ptr += value < x ? 2 : 1;
536 return value & (x + x); // Mask out the high byte iff no continuation
537}
538
539// More efficient varint parsing for big varints
540inline const char* ParseBigVarint(const char* p, uint64* out) {
541 auto pnew = p;
542 auto tmp = DecodeTwoBytes(&pnew);
543 uint64 res = tmp >> 1;
544 if (PROTOBUF_PREDICT_TRUE(std::int16_t(tmp) >= 0)) {
545 *out = res;
546 return pnew;
547 }
548 for (std::uint32_t i = 1; i < 5; i++) {
549 pnew = p + 2 * i;
550 tmp = DecodeTwoBytes(&pnew);
551 res += (static_cast<std::uint64_t>(tmp) - 2) << (14 * i - 1);
552 if (PROTOBUF_PREDICT_TRUE(std::int16_t(tmp) >= 0)) {
553 *out = res;
554 return pnew;
555 }
556 }
557 return nullptr;
558}
559
560PROTOBUF_EXPORT
561std::pair<const char*, int32> ReadSizeFallback(const char* p, uint32 first);
562// Used for tags, could read up to 5 bytes which must be available. Additionally
563// it makes sure the unsigned value fits a int32, otherwise returns nullptr.
564// Caller must ensure its safe to call.
565inline uint32 ReadSize(const char** pp) {
566 auto p = *pp;
567 uint32 res = static_cast<uint8>(p[0]);
568 if (res < 128) {
569 *pp = p + 1;
570 return res;
571 }
572 auto x = ReadSizeFallback(p, res);
573 *pp = x.first;
574 return x.second;
575}
576
577// Some convenience functions to simplify the generated parse loop code.
578// Returning the value and updating the buffer pointer allows for nicer
579// function composition. We rely on the compiler to inline this.
580// Also in debug compiles having local scoped variables tend to generated
581// stack frames that scale as O(num fields).
582inline uint64 ReadVarint(const char** p) {
583 uint64 tmp;
584 *p = VarintParse(*p, &tmp);
585 return tmp;
586}
587
588inline int64 ReadVarintZigZag64(const char** p) {
589 uint64 tmp;
590 *p = VarintParse(*p, &tmp);
591 return WireFormatLite::ZigZagDecode64(tmp);
592}
593
594inline int32 ReadVarintZigZag32(const char** p) {
595 uint64 tmp;
596 *p = VarintParse(*p, &tmp);
597 return WireFormatLite::ZigZagDecode32(static_cast<uint32>(tmp));
598}
599
600template <typename T>
601PROTOBUF_MUST_USE_RESULT const char* ParseContext::ParseMessage(
602 T* msg, const char* ptr) {
603 int size = ReadSize(&ptr);
604 if (!ptr) return nullptr;
605 auto old = PushLimit(ptr, size);
606 if (--depth_ < 0) return nullptr;
607 ptr = msg->_InternalParse(ptr, this);
608 if (PROTOBUF_PREDICT_FALSE(ptr == nullptr)) return nullptr;
609 depth_++;
610 if (!PopLimit(old)) return nullptr;
611 return ptr;
612}
613
614template <typename Add>
615const char* EpsCopyInputStream::ReadPackedVarint(const char* ptr, Add add) {
616 int size = ReadSize(&ptr);
617 if (ptr == nullptr) return nullptr;
618 auto old = PushLimit(ptr, size);
619 if (old < 0) return nullptr;
620 while (!DoneWithCheck(&ptr, -1)) {
621 uint64 varint;
622 ptr = VarintParse(ptr, &varint);
623 if (!ptr) return nullptr;
624 add(varint);
625 }
626 if (!PopLimit(old)) return nullptr;
627 return ptr;
628}
629
630// Helper for verification of utf8
631PROTOBUF_EXPORT
632bool VerifyUTF8(StringPiece s, const char* field_name);
633
634inline bool VerifyUTF8(const std::string* s, const char* field_name) {
635 return VerifyUTF8(*s, field_name);
636}
637
638// All the string parsers with or without UTF checking and for all CTypes.
639PROTOBUF_EXPORT PROTOBUF_MUST_USE_RESULT const char* InlineGreedyStringParser(
640 std::string* s, const char* ptr, ParseContext* ctx);
641
642
643// Add any of the following lines to debug which parse function is failing.
644
645#define GOOGLE_PROTOBUF_ASSERT_RETURN(predicate, ret) \
646 if (!(predicate)) { \
647 /* ::raise(SIGINT); */ \
648 /* GOOGLE_LOG(ERROR) << "Parse failure"; */ \
649 return ret; \
650 }
651
652#define GOOGLE_PROTOBUF_PARSER_ASSERT(predicate) \
653 GOOGLE_PROTOBUF_ASSERT_RETURN(predicate, nullptr)
654
655template <typename T>
656PROTOBUF_MUST_USE_RESULT const char* FieldParser(uint64 tag, T& field_parser,
657 const char* ptr,
658 ParseContext* ctx) {
659 uint32 number = tag >> 3;
660 GOOGLE_PROTOBUF_PARSER_ASSERT(number != 0);
661 using WireType = internal::WireFormatLite::WireType;
662 switch (tag & 7) {
663 case WireType::WIRETYPE_VARINT: {
664 uint64 value;
665 ptr = VarintParse(ptr, &value);
666 GOOGLE_PROTOBUF_PARSER_ASSERT(ptr);
667 field_parser.AddVarint(number, value);
668 break;
669 }
670 case WireType::WIRETYPE_FIXED64: {
671 uint64 value = UnalignedLoad<uint64>(ptr);
672 ptr += 8;
673 field_parser.AddFixed64(number, value);
674 break;
675 }
676 case WireType::WIRETYPE_LENGTH_DELIMITED: {
677 ptr = field_parser.ParseLengthDelimited(number, ptr, ctx);
678 GOOGLE_PROTOBUF_PARSER_ASSERT(ptr);
679 break;
680 }
681 case WireType::WIRETYPE_START_GROUP: {
682 ptr = field_parser.ParseGroup(number, ptr, ctx);
683 GOOGLE_PROTOBUF_PARSER_ASSERT(ptr);
684 break;
685 }
686 case WireType::WIRETYPE_END_GROUP: {
687 GOOGLE_LOG(FATAL) << "Can't happen";
688 break;
689 }
690 case WireType::WIRETYPE_FIXED32: {
691 uint32 value = UnalignedLoad<uint32>(ptr);
692 ptr += 4;
693 field_parser.AddFixed32(number, value);
694 break;
695 }
696 default:
697 return nullptr;
698 }
699 return ptr;
700}
701
702template <typename T>
703PROTOBUF_MUST_USE_RESULT const char* WireFormatParser(T& field_parser,
704 const char* ptr,
705 ParseContext* ctx) {
706 while (!ctx->Done(&ptr)) {
707 uint32 tag;
708 ptr = ReadTag(ptr, &tag);
709 GOOGLE_PROTOBUF_PARSER_ASSERT(ptr != nullptr);
710 if (tag == 0 || (tag & 7) == 4) {
711 ctx->SetLastTag(tag);
712 return ptr;
713 }
714 ptr = FieldParser(tag, field_parser, ptr, ctx);
715 GOOGLE_PROTOBUF_PARSER_ASSERT(ptr != nullptr);
716 }
717 return ptr;
718}
719
720// The packed parsers parse repeated numeric primitives directly into the
721// corresponding field
722
723// These are packed varints
724PROTOBUF_EXPORT PROTOBUF_MUST_USE_RESULT const char* PackedInt32Parser(
725 void* object, const char* ptr, ParseContext* ctx);
726PROTOBUF_EXPORT PROTOBUF_MUST_USE_RESULT const char* PackedUInt32Parser(
727 void* object, const char* ptr, ParseContext* ctx);
728PROTOBUF_EXPORT PROTOBUF_MUST_USE_RESULT const char* PackedInt64Parser(
729 void* object, const char* ptr, ParseContext* ctx);
730PROTOBUF_EXPORT PROTOBUF_MUST_USE_RESULT const char* PackedUInt64Parser(
731 void* object, const char* ptr, ParseContext* ctx);
732PROTOBUF_EXPORT PROTOBUF_MUST_USE_RESULT const char* PackedSInt32Parser(
733 void* object, const char* ptr, ParseContext* ctx);
734PROTOBUF_EXPORT PROTOBUF_MUST_USE_RESULT const char* PackedSInt64Parser(
735 void* object, const char* ptr, ParseContext* ctx);
736PROTOBUF_EXPORT PROTOBUF_MUST_USE_RESULT const char* PackedEnumParser(
737 void* object, const char* ptr, ParseContext* ctx);
738PROTOBUF_EXPORT PROTOBUF_MUST_USE_RESULT const char* PackedEnumParser(
739 void* object, const char* ptr, ParseContext* ctx, bool (*is_valid)(int),
740 InternalMetadataWithArenaLite* metadata, int field_num);
741PROTOBUF_EXPORT PROTOBUF_MUST_USE_RESULT const char* PackedEnumParserArg(
742 void* object, const char* ptr, ParseContext* ctx,
743 bool (*is_valid)(const void*, int), const void* data,
744 InternalMetadataWithArenaLite* metadata, int field_num);
745
746PROTOBUF_EXPORT PROTOBUF_MUST_USE_RESULT const char* PackedBoolParser(
747 void* object, const char* ptr, ParseContext* ctx);
748PROTOBUF_EXPORT PROTOBUF_MUST_USE_RESULT const char* PackedFixed32Parser(
749 void* object, const char* ptr, ParseContext* ctx);
750PROTOBUF_EXPORT PROTOBUF_MUST_USE_RESULT const char* PackedSFixed32Parser(
751 void* object, const char* ptr, ParseContext* ctx);
752PROTOBUF_EXPORT PROTOBUF_MUST_USE_RESULT const char* PackedFixed64Parser(
753 void* object, const char* ptr, ParseContext* ctx);
754PROTOBUF_EXPORT PROTOBUF_MUST_USE_RESULT const char* PackedSFixed64Parser(
755 void* object, const char* ptr, ParseContext* ctx);
756PROTOBUF_EXPORT PROTOBUF_MUST_USE_RESULT const char* PackedFloatParser(
757 void* object, const char* ptr, ParseContext* ctx);
758PROTOBUF_EXPORT PROTOBUF_MUST_USE_RESULT const char* PackedDoubleParser(
759 void* object, const char* ptr, ParseContext* ctx);
760
761// This is the only recursive parser.
762PROTOBUF_EXPORT PROTOBUF_MUST_USE_RESULT const char* UnknownGroupLiteParse(
763 std::string* unknown, const char* ptr, ParseContext* ctx);
764// This is a helper to for the UnknownGroupLiteParse but is actually also
765// useful in the generated code. It uses overload on std::string* vs
766// UnknownFieldSet* to make the generated code isomorphic between full and lite.
767PROTOBUF_EXPORT PROTOBUF_MUST_USE_RESULT const char* UnknownFieldParse(
768 uint32 tag, std::string* unknown, const char* ptr, ParseContext* ctx);
769PROTOBUF_EXPORT PROTOBUF_MUST_USE_RESULT const char* UnknownFieldParse(
770 uint32 tag, InternalMetadataWithArenaLite* metadata, const char* ptr,
771 ParseContext* ctx);
772
773} // namespace internal
774} // namespace protobuf
775} // namespace google
776
777#include <google/protobuf/port_undef.inc>
778
779#endif // GOOGLE_PROTOBUF_PARSE_CONTEXT_H__
780