1// Protocol Buffers - Google's data interchange format
2// Copyright 2008 Google Inc. All rights reserved.
3// https://developers.google.com/protocol-buffers/
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are
7// met:
8//
9// * Redistributions of source code must retain the above copyright
10// notice, this list of conditions and the following disclaimer.
11// * Redistributions in binary form must reproduce the above
12// copyright notice, this list of conditions and the following disclaimer
13// in the documentation and/or other materials provided with the
14// distribution.
15// * Neither the name of Google Inc. nor the names of its
16// contributors may be used to endorse or promote products derived from
17// this software without specific prior written permission.
18//
19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31// Author: [email protected] (Kenton Varda)
32// Based on original Protocol Buffers design by
33// Sanjay Ghemawat, Jeff Dean, and others.
34//
35// This file contains the CodedInputStream and CodedOutputStream classes,
36// which wrap a ZeroCopyInputStream or ZeroCopyOutputStream, respectively,
37// and allow you to read or write individual pieces of data in various
38// formats. In particular, these implement the varint encoding for
39// integers, a simple variable-length encoding in which smaller numbers
40// take fewer bytes.
41//
42// Typically these classes will only be used internally by the protocol
43// buffer library in order to encode and decode protocol buffers. Clients
44// of the library only need to know about this class if they wish to write
45// custom message parsing or serialization procedures.
46//
47// CodedOutputStream example:
48// // Write some data to "myfile". First we write a 4-byte "magic number"
49// // to identify the file type, then write a length-delimited string. The
50// // string is composed of a varint giving the length followed by the raw
51// // bytes.
52// int fd = open("myfile", O_CREAT | O_WRONLY);
53// ZeroCopyOutputStream* raw_output = new FileOutputStream(fd);
54// CodedOutputStream* coded_output = new CodedOutputStream(raw_output);
55//
56// int magic_number = 1234;
57// char text[] = "Hello world!";
58// coded_output->WriteLittleEndian32(magic_number);
59// coded_output->WriteVarint32(strlen(text));
60// coded_output->WriteRaw(text, strlen(text));
61//
62// delete coded_output;
63// delete raw_output;
64// close(fd);
65//
66// CodedInputStream example:
67// // Read a file created by the above code.
68// int fd = open("myfile", O_RDONLY);
69// ZeroCopyInputStream* raw_input = new FileInputStream(fd);
70// CodedInputStream* coded_input = new CodedInputStream(raw_input);
71//
72// coded_input->ReadLittleEndian32(&magic_number);
73// if (magic_number != 1234) {
74// cerr << "File not in expected format." << endl;
75// return;
76// }
77//
78// uint32 size;
79// coded_input->ReadVarint32(&size);
80//
81// char* text = new char[size + 1];
82// coded_input->ReadRaw(buffer, size);
83// text[size] = '\0';
84//
85// delete coded_input;
86// delete raw_input;
87// close(fd);
88//
89// cout << "Text is: " << text << endl;
90// delete [] text;
91//
92// For those who are interested, varint encoding is defined as follows:
93//
94// The encoding operates on unsigned integers of up to 64 bits in length.
95// Each byte of the encoded value has the format:
96// * bits 0-6: Seven bits of the number being encoded.
97// * bit 7: Zero if this is the last byte in the encoding (in which
98// case all remaining bits of the number are zero) or 1 if
99// more bytes follow.
100// The first byte contains the least-significant 7 bits of the number, the
101// second byte (if present) contains the next-least-significant 7 bits,
102// and so on. So, the binary number 1011000101011 would be encoded in two
103// bytes as "10101011 00101100".
104//
105// In theory, varint could be used to encode integers of any length.
106// However, for practicality we set a limit at 64 bits. The maximum encoded
107// length of a number is thus 10 bytes.
108
109#ifndef GOOGLE_PROTOBUF_IO_CODED_STREAM_H__
110#define GOOGLE_PROTOBUF_IO_CODED_STREAM_H__
111
112
113#include <assert.h>
114
115#include <atomic>
116#include <climits>
117#include <cstddef>
118#include <cstring>
119#include <string>
120#include <type_traits>
121#include <utility>
122
123#ifdef _MSC_VER
124// Assuming windows is always little-endian.
125#if !defined(PROTOBUF_DISABLE_LITTLE_ENDIAN_OPT_FOR_TEST)
126#define PROTOBUF_LITTLE_ENDIAN 1
127#endif
128#if _MSC_VER >= 1300 && !defined(__INTEL_COMPILER)
129// If MSVC has "/RTCc" set, it will complain about truncating casts at
130// runtime. This file contains some intentional truncating casts.
131#pragma runtime_checks("c", off)
132#endif
133#else
134#include <sys/param.h> // __BYTE_ORDER
135#if ((defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__)) || \
136 (defined(__BYTE_ORDER) && __BYTE_ORDER == __LITTLE_ENDIAN)) && \
137 !defined(PROTOBUF_DISABLE_LITTLE_ENDIAN_OPT_FOR_TEST)
138#define PROTOBUF_LITTLE_ENDIAN 1
139#endif
140#endif
141#include <google/protobuf/stubs/common.h>
142#include <google/protobuf/stubs/logging.h>
143#include <google/protobuf/stubs/strutil.h>
144#include <google/protobuf/port.h>
145#include <google/protobuf/stubs/port.h>
146
147
148#include <google/protobuf/port_def.inc>
149
150namespace google {
151namespace protobuf {
152
153class DescriptorPool;
154class MessageFactory;
155class ZeroCopyCodedInputStream;
156
157namespace internal {
158void MapTestForceDeterministic();
159class EpsCopyByteStream;
160} // namespace internal
161
162namespace io {
163
164// Defined in this file.
165class CodedInputStream;
166class CodedOutputStream;
167
168// Defined in other files.
169class ZeroCopyInputStream; // zero_copy_stream.h
170class ZeroCopyOutputStream; // zero_copy_stream.h
171
172// Class which reads and decodes binary data which is composed of varint-
173// encoded integers and fixed-width pieces. Wraps a ZeroCopyInputStream.
174// Most users will not need to deal with CodedInputStream.
175//
176// Most methods of CodedInputStream that return a bool return false if an
177// underlying I/O error occurs or if the data is malformed. Once such a
178// failure occurs, the CodedInputStream is broken and is no longer useful.
179// After a failure, callers also should assume writes to "out" args may have
180// occurred, though nothing useful can be determined from those writes.
181class PROTOBUF_EXPORT CodedInputStream {
182 public:
183 // Create a CodedInputStream that reads from the given ZeroCopyInputStream.
184 explicit CodedInputStream(ZeroCopyInputStream* input);
185
186 // Create a CodedInputStream that reads from the given flat array. This is
187 // faster than using an ArrayInputStream. PushLimit(size) is implied by
188 // this constructor.
189 explicit CodedInputStream(const uint8* buffer, int size);
190
191 // Destroy the CodedInputStream and position the underlying
192 // ZeroCopyInputStream at the first unread byte. If an error occurred while
193 // reading (causing a method to return false), then the exact position of
194 // the input stream may be anywhere between the last value that was read
195 // successfully and the stream's byte limit.
196 ~CodedInputStream();
197
198 // Return true if this CodedInputStream reads from a flat array instead of
199 // a ZeroCopyInputStream.
200 inline bool IsFlat() const;
201
202 // Skips a number of bytes. Returns false if an underlying read error
203 // occurs.
204 inline bool Skip(int count);
205
206 // Sets *data to point directly at the unread part of the CodedInputStream's
207 // underlying buffer, and *size to the size of that buffer, but does not
208 // advance the stream's current position. This will always either produce
209 // a non-empty buffer or return false. If the caller consumes any of
210 // this data, it should then call Skip() to skip over the consumed bytes.
211 // This may be useful for implementing external fast parsing routines for
212 // types of data not covered by the CodedInputStream interface.
213 bool GetDirectBufferPointer(const void** data, int* size);
214
215 // Like GetDirectBufferPointer, but this method is inlined, and does not
216 // attempt to Refresh() if the buffer is currently empty.
217 PROTOBUF_ALWAYS_INLINE
218 void GetDirectBufferPointerInline(const void** data, int* size);
219
220 // Read raw bytes, copying them into the given buffer.
221 bool ReadRaw(void* buffer, int size);
222
223 // Like ReadRaw, but reads into a string.
224 bool ReadString(std::string* buffer, int size);
225
226
227 // Read a 32-bit little-endian integer.
228 bool ReadLittleEndian32(uint32* value);
229 // Read a 64-bit little-endian integer.
230 bool ReadLittleEndian64(uint64* value);
231
232 // These methods read from an externally provided buffer. The caller is
233 // responsible for ensuring that the buffer has sufficient space.
234 // Read a 32-bit little-endian integer.
235 static const uint8* ReadLittleEndian32FromArray(const uint8* buffer,
236 uint32* value);
237 // Read a 64-bit little-endian integer.
238 static const uint8* ReadLittleEndian64FromArray(const uint8* buffer,
239 uint64* value);
240
241 // Read an unsigned integer with Varint encoding, truncating to 32 bits.
242 // Reading a 32-bit value is equivalent to reading a 64-bit one and casting
243 // it to uint32, but may be more efficient.
244 bool ReadVarint32(uint32* value);
245 // Read an unsigned integer with Varint encoding.
246 bool ReadVarint64(uint64* value);
247
248 // Reads a varint off the wire into an "int". This should be used for reading
249 // sizes off the wire (sizes of strings, submessages, bytes fields, etc).
250 //
251 // The value from the wire is interpreted as unsigned. If its value exceeds
252 // the representable value of an integer on this platform, instead of
253 // truncating we return false. Truncating (as performed by ReadVarint32()
254 // above) is an acceptable approach for fields representing an integer, but
255 // when we are parsing a size from the wire, truncating the value would result
256 // in us misparsing the payload.
257 bool ReadVarintSizeAsInt(int* value);
258
259 // Read a tag. This calls ReadVarint32() and returns the result, or returns
260 // zero (which is not a valid tag) if ReadVarint32() fails. Also, ReadTag
261 // (but not ReadTagNoLastTag) updates the last tag value, which can be checked
262 // with LastTagWas().
263 //
264 // Always inline because this is only called in one place per parse loop
265 // but it is called for every iteration of said loop, so it should be fast.
266 // GCC doesn't want to inline this by default.
267 PROTOBUF_ALWAYS_INLINE uint32 ReadTag() {
268 return last_tag_ = ReadTagNoLastTag();
269 }
270
271 PROTOBUF_ALWAYS_INLINE uint32 ReadTagNoLastTag();
272
273 // This usually a faster alternative to ReadTag() when cutoff is a manifest
274 // constant. It does particularly well for cutoff >= 127. The first part
275 // of the return value is the tag that was read, though it can also be 0 in
276 // the cases where ReadTag() would return 0. If the second part is true
277 // then the tag is known to be in [0, cutoff]. If not, the tag either is
278 // above cutoff or is 0. (There's intentional wiggle room when tag is 0,
279 // because that can arise in several ways, and for best performance we want
280 // to avoid an extra "is tag == 0?" check here.)
281 PROTOBUF_ALWAYS_INLINE
282 std::pair<uint32, bool> ReadTagWithCutoff(uint32 cutoff) {
283 std::pair<uint32, bool> result = ReadTagWithCutoffNoLastTag(cutoff);
284 last_tag_ = result.first;
285 return result;
286 }
287
288 PROTOBUF_ALWAYS_INLINE
289 std::pair<uint32, bool> ReadTagWithCutoffNoLastTag(uint32 cutoff);
290
291 // Usually returns true if calling ReadVarint32() now would produce the given
292 // value. Will always return false if ReadVarint32() would not return the
293 // given value. If ExpectTag() returns true, it also advances past
294 // the varint. For best performance, use a compile-time constant as the
295 // parameter.
296 // Always inline because this collapses to a small number of instructions
297 // when given a constant parameter, but GCC doesn't want to inline by default.
298 PROTOBUF_ALWAYS_INLINE bool ExpectTag(uint32 expected);
299
300 // Like above, except this reads from the specified buffer. The caller is
301 // responsible for ensuring that the buffer is large enough to read a varint
302 // of the expected size. For best performance, use a compile-time constant as
303 // the expected tag parameter.
304 //
305 // Returns a pointer beyond the expected tag if it was found, or NULL if it
306 // was not.
307 PROTOBUF_ALWAYS_INLINE
308 static const uint8* ExpectTagFromArray(const uint8* buffer, uint32 expected);
309
310 // Usually returns true if no more bytes can be read. Always returns false
311 // if more bytes can be read. If ExpectAtEnd() returns true, a subsequent
312 // call to LastTagWas() will act as if ReadTag() had been called and returned
313 // zero, and ConsumedEntireMessage() will return true.
314 bool ExpectAtEnd();
315
316 // If the last call to ReadTag() or ReadTagWithCutoff() returned the given
317 // value, returns true. Otherwise, returns false.
318 // ReadTagNoLastTag/ReadTagWithCutoffNoLastTag do not preserve the last
319 // returned value.
320 //
321 // This is needed because parsers for some types of embedded messages
322 // (with field type TYPE_GROUP) don't actually know that they've reached the
323 // end of a message until they see an ENDGROUP tag, which was actually part
324 // of the enclosing message. The enclosing message would like to check that
325 // tag to make sure it had the right number, so it calls LastTagWas() on
326 // return from the embedded parser to check.
327 bool LastTagWas(uint32 expected);
328 void SetLastTag(uint32 tag) { last_tag_ = tag; }
329
330 // When parsing message (but NOT a group), this method must be called
331 // immediately after MergeFromCodedStream() returns (if it returns true)
332 // to further verify that the message ended in a legitimate way. For
333 // example, this verifies that parsing did not end on an end-group tag.
334 // It also checks for some cases where, due to optimizations,
335 // MergeFromCodedStream() can incorrectly return true.
336 bool ConsumedEntireMessage();
337 void SetConsumed() { legitimate_message_end_ = true; }
338
339 // Limits ----------------------------------------------------------
340 // Limits are used when parsing length-delimited embedded messages.
341 // After the message's length is read, PushLimit() is used to prevent
342 // the CodedInputStream from reading beyond that length. Once the
343 // embedded message has been parsed, PopLimit() is called to undo the
344 // limit.
345
346 // Opaque type used with PushLimit() and PopLimit(). Do not modify
347 // values of this type yourself. The only reason that this isn't a
348 // struct with private internals is for efficiency.
349 typedef int Limit;
350
351 // Places a limit on the number of bytes that the stream may read,
352 // starting from the current position. Once the stream hits this limit,
353 // it will act like the end of the input has been reached until PopLimit()
354 // is called.
355 //
356 // As the names imply, the stream conceptually has a stack of limits. The
357 // shortest limit on the stack is always enforced, even if it is not the
358 // top limit.
359 //
360 // The value returned by PushLimit() is opaque to the caller, and must
361 // be passed unchanged to the corresponding call to PopLimit().
362 Limit PushLimit(int byte_limit);
363
364 // Pops the last limit pushed by PushLimit(). The input must be the value
365 // returned by that call to PushLimit().
366 void PopLimit(Limit limit);
367
368 // Returns the number of bytes left until the nearest limit on the
369 // stack is hit, or -1 if no limits are in place.
370 int BytesUntilLimit() const;
371
372 // Returns current position relative to the beginning of the input stream.
373 int CurrentPosition() const;
374
375 // Total Bytes Limit -----------------------------------------------
376 // To prevent malicious users from sending excessively large messages
377 // and causing memory exhaustion, CodedInputStream imposes a hard limit on
378 // the total number of bytes it will read.
379
380 // Sets the maximum number of bytes that this CodedInputStream will read
381 // before refusing to continue. To prevent servers from allocating enormous
382 // amounts of memory to hold parsed messages, the maximum message length
383 // should be limited to the shortest length that will not harm usability.
384 // The default limit is INT_MAX (~2GB) and apps should set shorter limits
385 // if possible. An error will always be printed to stderr if the limit is
386 // reached.
387 //
388 // Note: setting a limit less than the current read position is interpreted
389 // as a limit on the current position.
390 //
391 // This is unrelated to PushLimit()/PopLimit().
392 void SetTotalBytesLimit(int total_bytes_limit);
393
394 PROTOBUF_DEPRECATED_MSG(
395 "Please use the single parameter version of SetTotalBytesLimit(). The "
396 "second parameter is ignored.")
397 void SetTotalBytesLimit(int total_bytes_limit, int) {
398 SetTotalBytesLimit(total_bytes_limit);
399 }
400
401 // The Total Bytes Limit minus the Current Position, or -1 if the total bytes
402 // limit is INT_MAX.
403 int BytesUntilTotalBytesLimit() const;
404
405 // Recursion Limit -------------------------------------------------
406 // To prevent corrupt or malicious messages from causing stack overflows,
407 // we must keep track of the depth of recursion when parsing embedded
408 // messages and groups. CodedInputStream keeps track of this because it
409 // is the only object that is passed down the stack during parsing.
410
411 // Sets the maximum recursion depth. The default is 100.
412 void SetRecursionLimit(int limit);
413 int RecursionBudget() { return recursion_budget_; }
414
415 static int GetDefaultRecursionLimit() { return default_recursion_limit_; }
416
417 // Increments the current recursion depth. Returns true if the depth is
418 // under the limit, false if it has gone over.
419 bool IncrementRecursionDepth();
420
421 // Decrements the recursion depth if possible.
422 void DecrementRecursionDepth();
423
424 // Decrements the recursion depth blindly. This is faster than
425 // DecrementRecursionDepth(). It should be used only if all previous
426 // increments to recursion depth were successful.
427 void UnsafeDecrementRecursionDepth();
428
429 // Shorthand for make_pair(PushLimit(byte_limit), --recursion_budget_).
430 // Using this can reduce code size and complexity in some cases. The caller
431 // is expected to check that the second part of the result is non-negative (to
432 // bail out if the depth of recursion is too high) and, if all is well, to
433 // later pass the first part of the result to PopLimit() or similar.
434 std::pair<CodedInputStream::Limit, int> IncrementRecursionDepthAndPushLimit(
435 int byte_limit);
436
437 // Shorthand for PushLimit(ReadVarint32(&length) ? length : 0).
438 Limit ReadLengthAndPushLimit();
439
440 // Helper that is equivalent to: {
441 // bool result = ConsumedEntireMessage();
442 // PopLimit(limit);
443 // UnsafeDecrementRecursionDepth();
444 // return result; }
445 // Using this can reduce code size and complexity in some cases.
446 // Do not use unless the current recursion depth is greater than zero.
447 bool DecrementRecursionDepthAndPopLimit(Limit limit);
448
449 // Helper that is equivalent to: {
450 // bool result = ConsumedEntireMessage();
451 // PopLimit(limit);
452 // return result; }
453 // Using this can reduce code size and complexity in some cases.
454 bool CheckEntireMessageConsumedAndPopLimit(Limit limit);
455
456 // Extension Registry ----------------------------------------------
457 // ADVANCED USAGE: 99.9% of people can ignore this section.
458 //
459 // By default, when parsing extensions, the parser looks for extension
460 // definitions in the pool which owns the outer message's Descriptor.
461 // However, you may call SetExtensionRegistry() to provide an alternative
462 // pool instead. This makes it possible, for example, to parse a message
463 // using a generated class, but represent some extensions using
464 // DynamicMessage.
465
466 // Set the pool used to look up extensions. Most users do not need to call
467 // this as the correct pool will be chosen automatically.
468 //
469 // WARNING: It is very easy to misuse this. Carefully read the requirements
470 // below. Do not use this unless you are sure you need it. Almost no one
471 // does.
472 //
473 // Let's say you are parsing a message into message object m, and you want
474 // to take advantage of SetExtensionRegistry(). You must follow these
475 // requirements:
476 //
477 // The given DescriptorPool must contain m->GetDescriptor(). It is not
478 // sufficient for it to simply contain a descriptor that has the same name
479 // and content -- it must be the *exact object*. In other words:
480 // assert(pool->FindMessageTypeByName(m->GetDescriptor()->full_name()) ==
481 // m->GetDescriptor());
482 // There are two ways to satisfy this requirement:
483 // 1) Use m->GetDescriptor()->pool() as the pool. This is generally useless
484 // because this is the pool that would be used anyway if you didn't call
485 // SetExtensionRegistry() at all.
486 // 2) Use a DescriptorPool which has m->GetDescriptor()->pool() as an
487 // "underlay". Read the documentation for DescriptorPool for more
488 // information about underlays.
489 //
490 // You must also provide a MessageFactory. This factory will be used to
491 // construct Message objects representing extensions. The factory's
492 // GetPrototype() MUST return non-NULL for any Descriptor which can be found
493 // through the provided pool.
494 //
495 // If the provided factory might return instances of protocol-compiler-
496 // generated (i.e. compiled-in) types, or if the outer message object m is
497 // a generated type, then the given factory MUST have this property: If
498 // GetPrototype() is given a Descriptor which resides in
499 // DescriptorPool::generated_pool(), the factory MUST return the same
500 // prototype which MessageFactory::generated_factory() would return. That
501 // is, given a descriptor for a generated type, the factory must return an
502 // instance of the generated class (NOT DynamicMessage). However, when
503 // given a descriptor for a type that is NOT in generated_pool, the factory
504 // is free to return any implementation.
505 //
506 // The reason for this requirement is that generated sub-objects may be
507 // accessed via the standard (non-reflection) extension accessor methods,
508 // and these methods will down-cast the object to the generated class type.
509 // If the object is not actually of that type, the results would be undefined.
510 // On the other hand, if an extension is not compiled in, then there is no
511 // way the code could end up accessing it via the standard accessors -- the
512 // only way to access the extension is via reflection. When using reflection,
513 // DynamicMessage and generated messages are indistinguishable, so it's fine
514 // if these objects are represented using DynamicMessage.
515 //
516 // Using DynamicMessageFactory on which you have called
517 // SetDelegateToGeneratedFactory(true) should be sufficient to satisfy the
518 // above requirement.
519 //
520 // If either pool or factory is NULL, both must be NULL.
521 //
522 // Note that this feature is ignored when parsing "lite" messages as they do
523 // not have descriptors.
524 void SetExtensionRegistry(const DescriptorPool* pool,
525 MessageFactory* factory);
526
527 // Get the DescriptorPool set via SetExtensionRegistry(), or NULL if no pool
528 // has been provided.
529 const DescriptorPool* GetExtensionPool();
530
531 // Get the MessageFactory set via SetExtensionRegistry(), or NULL if no
532 // factory has been provided.
533 MessageFactory* GetExtensionFactory();
534
535 private:
536 GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(CodedInputStream);
537
538 const uint8* buffer_;
539 const uint8* buffer_end_; // pointer to the end of the buffer.
540 ZeroCopyInputStream* input_;
541 int total_bytes_read_; // total bytes read from input_, including
542 // the current buffer
543
544 // If total_bytes_read_ surpasses INT_MAX, we record the extra bytes here
545 // so that we can BackUp() on destruction.
546 int overflow_bytes_;
547
548 // LastTagWas() stuff.
549 uint32 last_tag_; // result of last ReadTag() or ReadTagWithCutoff().
550
551 // This is set true by ReadTag{Fallback/Slow}() if it is called when exactly
552 // at EOF, or by ExpectAtEnd() when it returns true. This happens when we
553 // reach the end of a message and attempt to read another tag.
554 bool legitimate_message_end_;
555
556 // See EnableAliasing().
557 bool aliasing_enabled_;
558
559 // Limits
560 Limit current_limit_; // if position = -1, no limit is applied
561
562 // For simplicity, if the current buffer crosses a limit (either a normal
563 // limit created by PushLimit() or the total bytes limit), buffer_size_
564 // only tracks the number of bytes before that limit. This field
565 // contains the number of bytes after it. Note that this implies that if
566 // buffer_size_ == 0 and buffer_size_after_limit_ > 0, we know we've
567 // hit a limit. However, if both are zero, it doesn't necessarily mean
568 // we aren't at a limit -- the buffer may have ended exactly at the limit.
569 int buffer_size_after_limit_;
570
571 // Maximum number of bytes to read, period. This is unrelated to
572 // current_limit_. Set using SetTotalBytesLimit().
573 int total_bytes_limit_;
574
575 // Current recursion budget, controlled by IncrementRecursionDepth() and
576 // similar. Starts at recursion_limit_ and goes down: if this reaches
577 // -1 we are over budget.
578 int recursion_budget_;
579 // Recursion depth limit, set by SetRecursionLimit().
580 int recursion_limit_;
581
582 // See SetExtensionRegistry().
583 const DescriptorPool* extension_pool_;
584 MessageFactory* extension_factory_;
585
586 // Private member functions.
587
588 // Fallback when Skip() goes past the end of the current buffer.
589 bool SkipFallback(int count, int original_buffer_size);
590
591 // Advance the buffer by a given number of bytes.
592 void Advance(int amount);
593
594 // Back up input_ to the current buffer position.
595 void BackUpInputToCurrentPosition();
596
597 // Recomputes the value of buffer_size_after_limit_. Must be called after
598 // current_limit_ or total_bytes_limit_ changes.
599 void RecomputeBufferLimits();
600
601 // Writes an error message saying that we hit total_bytes_limit_.
602 void PrintTotalBytesLimitError();
603
604 // Called when the buffer runs out to request more data. Implies an
605 // Advance(BufferSize()).
606 bool Refresh();
607
608 // When parsing varints, we optimize for the common case of small values, and
609 // then optimize for the case when the varint fits within the current buffer
610 // piece. The Fallback method is used when we can't use the one-byte
611 // optimization. The Slow method is yet another fallback when the buffer is
612 // not large enough. Making the slow path out-of-line speeds up the common
613 // case by 10-15%. The slow path is fairly uncommon: it only triggers when a
614 // message crosses multiple buffers. Note: ReadVarint32Fallback() and
615 // ReadVarint64Fallback() are called frequently and generally not inlined, so
616 // they have been optimized to avoid "out" parameters. The former returns -1
617 // if it fails and the uint32 it read otherwise. The latter has a bool
618 // indicating success or failure as part of its return type.
619 int64 ReadVarint32Fallback(uint32 first_byte_or_zero);
620 int ReadVarintSizeAsIntFallback();
621 std::pair<uint64, bool> ReadVarint64Fallback();
622 bool ReadVarint32Slow(uint32* value);
623 bool ReadVarint64Slow(uint64* value);
624 int ReadVarintSizeAsIntSlow();
625 bool ReadLittleEndian32Fallback(uint32* value);
626 bool ReadLittleEndian64Fallback(uint64* value);
627
628 // Fallback/slow methods for reading tags. These do not update last_tag_,
629 // but will set legitimate_message_end_ if we are at the end of the input
630 // stream.
631 uint32 ReadTagFallback(uint32 first_byte_or_zero);
632 uint32 ReadTagSlow();
633 bool ReadStringFallback(std::string* buffer, int size);
634
635 // Return the size of the buffer.
636 int BufferSize() const;
637
638 static const int kDefaultTotalBytesLimit = INT_MAX;
639
640 static int default_recursion_limit_; // 100 by default.
641
642 friend class google::protobuf::ZeroCopyCodedInputStream;
643 friend class google::protobuf::internal::EpsCopyByteStream;
644};
645
646// EpsCopyOutputStream wraps a ZeroCopyOutputStream and exposes a new stream,
647// which has the property you can write kSlopBytes (16 bytes) from the current
648// position without bounds checks. The cursor into the stream is managed by
649// the user of the class and is an explicit parameter in the methods. Careful
650// use of this class, ie. keep ptr a local variable, eliminates the need to
651// for the compiler to sync the ptr value between register and memory.
652class PROTOBUF_EXPORT EpsCopyOutputStream {
653 public:
654 enum { kSlopBytes = 16 };
655
656 // Initialize from a stream.
657 EpsCopyOutputStream(ZeroCopyOutputStream* stream, bool deterministic,
658 uint8** pp)
659 : end_(buffer_),
660 stream_(stream),
661 is_serialization_deterministic_(deterministic) {
662 *pp = buffer_;
663 }
664
665 // Only for array serialization. No overflow protection, end_ will be the
666 // pointed to the end of the array. When using this the total size is already
667 // known, so no need to maintain the slop region.
668 EpsCopyOutputStream(void* data, int size, bool deterministic)
669 : end_(static_cast<uint8*>(data) + size),
670 buffer_end_(nullptr),
671 stream_(nullptr),
672 is_serialization_deterministic_(deterministic) {}
673
674 // Initialize from stream but with the first buffer already given (eager).
675 EpsCopyOutputStream(void* data, int size, ZeroCopyOutputStream* stream,
676 bool deterministic, uint8** pp)
677 : stream_(stream), is_serialization_deterministic_(deterministic) {
678 *pp = SetInitialBuffer(data, size);
679 }
680
681 // Flush everything that's written into the underlying ZeroCopyOutputStream
682 // and trims the underlying stream to the location of ptr.
683 uint8* Trim(uint8* ptr);
684
685 // After this it's guaranteed you can safely write kSlopBytes to ptr. This
686 // will never fail! The underlying stream can produce an error. Use HadError
687 // to check for errors.
688 PROTOBUF_MUST_USE_RESULT uint8* EnsureSpace(uint8* ptr) {
689 if (PROTOBUF_PREDICT_FALSE(ptr >= end_)) {
690 return EnsureSpaceFallback(ptr);
691 }
692 return ptr;
693 }
694
695 uint8* WriteRaw(const void* data, int size, uint8* ptr) {
696 if (PROTOBUF_PREDICT_FALSE(end_ - ptr < size)) {
697 return WriteRawFallback(data, size, ptr);
698 }
699 std::memcpy(ptr, data, size);
700 return ptr + size;
701 }
702 // Writes the buffer specified by data, size to the stream. Possibly by
703 // aliasing the buffer (ie. not copying the data). The caller is responsible
704 // to make sure the buffer is alive for the duration of the
705 // ZeroCopyOutputStream.
706 uint8* WriteRawMaybeAliased(const void* data, int size, uint8* ptr) {
707 if (aliasing_enabled_) {
708 return WriteAliasedRaw(data, size, ptr);
709 } else {
710 return WriteRaw(data, size, ptr);
711 }
712 }
713
714
715 uint8* WriteStringMaybeAliased(uint32 num, const std::string& s, uint8* ptr) {
716 std::ptrdiff_t size = s.size();
717 if (PROTOBUF_PREDICT_FALSE(
718 size >= 128 || end_ - ptr + 16 - TagSize(num << 3) - 1 < size)) {
719 return WriteStringMaybeAliasedOutline(num, s, ptr);
720 }
721 ptr = UnsafeVarint((num << 3) | 2, ptr);
722 *ptr++ = static_cast<uint8>(size);
723 std::memcpy(ptr, s.data(), size);
724 return ptr + size;
725 }
726 uint8* WriteBytesMaybeAliased(uint32 num, const std::string& s, uint8* ptr) {
727 return WriteStringMaybeAliased(num, s, ptr);
728 }
729
730 template <typename T>
731 PROTOBUF_ALWAYS_INLINE uint8* WriteString(uint32 num, const T& s,
732 uint8* ptr) {
733 std::ptrdiff_t size = s.size();
734 if (PROTOBUF_PREDICT_FALSE(
735 size >= 128 || end_ - ptr + 16 - TagSize(num << 3) - 1 < size)) {
736 return WriteStringOutline(num, s, ptr);
737 }
738 ptr = UnsafeVarint((num << 3) | 2, ptr);
739 *ptr++ = static_cast<uint8>(size);
740 std::memcpy(ptr, s.data(), size);
741 return ptr + size;
742 }
743 template <typename T>
744 uint8* WriteBytes(uint32 num, const T& s, uint8* ptr) {
745 return WriteString(num, s, ptr);
746 }
747
748 template <typename T>
749 PROTOBUF_ALWAYS_INLINE uint8* WriteInt32Packed(int num, const T& r, int size,
750 uint8* ptr) {
751 return WriteVarintPacked(num, r, size, ptr, Encode64);
752 }
753 template <typename T>
754 PROTOBUF_ALWAYS_INLINE uint8* WriteUInt32Packed(int num, const T& r, int size,
755 uint8* ptr) {
756 return WriteVarintPacked(num, r, size, ptr, Encode32);
757 }
758 template <typename T>
759 PROTOBUF_ALWAYS_INLINE uint8* WriteSInt32Packed(int num, const T& r, int size,
760 uint8* ptr) {
761 return WriteVarintPacked(num, r, size, ptr, ZigZagEncode32);
762 }
763 template <typename T>
764 PROTOBUF_ALWAYS_INLINE uint8* WriteInt64Packed(int num, const T& r, int size,
765 uint8* ptr) {
766 return WriteVarintPacked(num, r, size, ptr, Encode64);
767 }
768 template <typename T>
769 PROTOBUF_ALWAYS_INLINE uint8* WriteUInt64Packed(int num, const T& r, int size,
770 uint8* ptr) {
771 return WriteVarintPacked(num, r, size, ptr, Encode64);
772 }
773 template <typename T>
774 PROTOBUF_ALWAYS_INLINE uint8* WriteSInt64Packed(int num, const T& r, int size,
775 uint8* ptr) {
776 return WriteVarintPacked(num, r, size, ptr, ZigZagEncode64);
777 }
778 template <typename T>
779 PROTOBUF_ALWAYS_INLINE uint8* WriteEnumPacked(int num, const T& r, int size,
780 uint8* ptr) {
781 return WriteVarintPacked(num, r, size, ptr, Encode64);
782 }
783
784 template <typename T>
785 PROTOBUF_ALWAYS_INLINE uint8* WriteFixedPacked(int num, const T& r,
786 uint8* ptr) {
787 ptr = EnsureSpace(ptr);
788 constexpr auto element_size = sizeof(typename T::value_type);
789 auto size = r.size() * element_size;
790 ptr = WriteLengthDelim(num, size, ptr);
791 return WriteRawLittleEndian<element_size>(r.data(), static_cast<int>(size),
792 ptr);
793 }
794
795 // Returns true if there was an underlying I/O error since this object was
796 // created.
797 bool HadError() const { return had_error_; }
798
799 // Instructs the EpsCopyOutputStream to allow the underlying
800 // ZeroCopyOutputStream to hold pointers to the original structure instead of
801 // copying, if it supports it (i.e. output->AllowsAliasing() is true). If the
802 // underlying stream does not support aliasing, then enabling it has no
803 // affect. For now, this only affects the behavior of
804 // WriteRawMaybeAliased().
805 //
806 // NOTE: It is caller's responsibility to ensure that the chunk of memory
807 // remains live until all of the data has been consumed from the stream.
808 void EnableAliasing(bool enabled);
809
810 // See documentation on CodedOutputStream::SetSerializationDeterministic.
811 void SetSerializationDeterministic(bool value) {
812 is_serialization_deterministic_ = value;
813 }
814
815 // See documentation on CodedOutputStream::IsSerializationDeterministic.
816 bool IsSerializationDeterministic() const {
817 return is_serialization_deterministic_;
818 }
819
820 // The number of bytes writen to the stream at position ptr, relative to the
821 // stream's overall position.
822 int64 ByteCount(uint8* ptr) const;
823
824
825 private:
826 uint8* end_;
827 uint8* buffer_end_ = buffer_;
828 uint8 buffer_[2 * kSlopBytes];
829 ZeroCopyOutputStream* stream_;
830 bool had_error_ = false;
831 bool aliasing_enabled_ = false; // See EnableAliasing().
832 bool is_serialization_deterministic_;
833
834 uint8* EnsureSpaceFallback(uint8* ptr);
835 inline uint8* Next();
836 int Flush(uint8* ptr);
837 std::ptrdiff_t GetSize(uint8* ptr) const {
838 GOOGLE_DCHECK(ptr <= end_ + kSlopBytes); // NOLINT
839 return end_ + kSlopBytes - ptr;
840 }
841
842 uint8* Error() {
843 had_error_ = true;
844 // We use the patch buffer to always guarantee space to write to.
845 end_ = buffer_ + kSlopBytes;
846 return buffer_;
847 }
848
849 static constexpr int TagSize(uint32 tag) {
850 return (tag < (1 << 7))
851 ? 1
852 : (tag < (1 << 14))
853 ? 2
854 : (tag < (1 << 21)) ? 3 : (tag < (1 << 28)) ? 4 : 5;
855 }
856
857 PROTOBUF_ALWAYS_INLINE uint8* WriteTag(uint32 num, uint32 wt, uint8* ptr) {
858 GOOGLE_DCHECK(ptr < end_); // NOLINT
859 return UnsafeVarint((num << 3) | wt, ptr);
860 }
861
862 PROTOBUF_ALWAYS_INLINE uint8* WriteLengthDelim(int num, uint32 size,
863 uint8* ptr) {
864 ptr = WriteTag(num, 2, ptr);
865 return UnsafeWriteSize(size, ptr);
866 }
867
868 uint8* WriteRawFallback(const void* data, int size, uint8* ptr);
869
870 uint8* WriteAliasedRaw(const void* data, int size, uint8* ptr);
871
872 uint8* WriteStringMaybeAliasedOutline(uint32 num, const std::string& s,
873 uint8* ptr);
874 uint8* WriteStringOutline(uint32 num, const std::string& s, uint8* ptr);
875
876 template <typename T, typename E>
877 PROTOBUF_ALWAYS_INLINE uint8* WriteVarintPacked(int num, const T& r, int size,
878 uint8* ptr, const E& encode) {
879 ptr = EnsureSpace(ptr);
880 ptr = WriteLengthDelim(num, size, ptr);
881 auto it = r.data();
882 auto end = it + r.size();
883 do {
884 ptr = EnsureSpace(ptr);
885 ptr = UnsafeVarint(encode(*it++), ptr);
886 } while (it < end);
887 return ptr;
888 }
889
890 static uint32 Encode32(uint32 v) { return v; }
891 static uint64 Encode64(uint64 v) { return v; }
892 static uint32 ZigZagEncode32(int32 v) {
893 return (static_cast<uint32>(v) << 1) ^ static_cast<uint32>(v >> 31);
894 }
895 static uint64 ZigZagEncode64(int64 v) {
896 return (static_cast<uint64>(v) << 1) ^ static_cast<uint64>(v >> 63);
897 }
898
899 template <typename T>
900 PROTOBUF_ALWAYS_INLINE static uint8* UnsafeVarint(T value, uint8* ptr) {
901 static_assert(std::is_unsigned<T>::value,
902 "Varint serialization must be unsigned");
903 if (value < 0x80) {
904 ptr[0] = static_cast<uint8>(value);
905 return ptr + 1;
906 }
907 ptr[0] = static_cast<uint8>(value | 0x80);
908 value >>= 7;
909 if (value < 0x80) {
910 ptr[1] = static_cast<uint8>(value);
911 return ptr + 2;
912 }
913 ptr++;
914 do {
915 *ptr = static_cast<uint8>(value | 0x80);
916 value >>= 7;
917 ++ptr;
918 } while (PROTOBUF_PREDICT_FALSE(value >= 0x80));
919 *ptr++ = static_cast<uint8>(value);
920 return ptr;
921 }
922
923 PROTOBUF_ALWAYS_INLINE static uint8* UnsafeWriteSize(uint32 value,
924 uint8* ptr) {
925 while (PROTOBUF_PREDICT_FALSE(value >= 0x80)) {
926 *ptr = static_cast<uint8>(value | 0x80);
927 value >>= 7;
928 ++ptr;
929 }
930 *ptr++ = static_cast<uint8>(value);
931 return ptr;
932 }
933
934 template <int S>
935 uint8* WriteRawLittleEndian(const void* data, int size, uint8* ptr);
936#ifndef PROTOBUF_LITTLE_ENDIAN
937 uint8* WriteRawLittleEndian32(const void* data, int size, uint8* ptr);
938 uint8* WriteRawLittleEndian64(const void* data, int size, uint8* ptr);
939#endif
940
941 // These methods are for CodedOutputStream. Ideally they should be private
942 // but to match current behavior of CodedOutputStream as close as possible
943 // we allow it some functionality.
944 public:
945 uint8* SetInitialBuffer(void* data, int size) {
946 auto ptr = static_cast<uint8*>(data);
947 if (size > kSlopBytes) {
948 end_ = ptr + size - kSlopBytes;
949 buffer_end_ = nullptr;
950 return ptr;
951 } else {
952 end_ = buffer_ + size;
953 buffer_end_ = ptr;
954 return buffer_;
955 }
956 }
957
958 private:
959 // Needed by CodedOutputStream HadError. HadError needs to flush the patch
960 // buffers to ensure there is no error as of yet.
961 uint8* FlushAndResetBuffer(uint8*);
962
963 // The following functions mimick the old CodedOutputStream behavior as close
964 // as possible. They flush the current state to the stream, behave as
965 // the old CodedOutputStream and then return to normal operation.
966 bool Skip(int count, uint8** pp);
967 bool GetDirectBufferPointer(void** data, int* size, uint8** pp);
968 uint8* GetDirectBufferForNBytesAndAdvance(int size, uint8** pp);
969
970 friend class CodedOutputStream;
971};
972
973template <>
974inline uint8* EpsCopyOutputStream::WriteRawLittleEndian<1>(const void* data,
975 int size,
976 uint8* ptr) {
977 return WriteRaw(data, size, ptr);
978}
979template <>
980inline uint8* EpsCopyOutputStream::WriteRawLittleEndian<4>(const void* data,
981 int size,
982 uint8* ptr) {
983#ifdef PROTOBUF_LITTLE_ENDIAN
984 return WriteRaw(data, size, ptr);
985#else
986 return WriteRawLittleEndian32(data, size, ptr);
987#endif
988}
989template <>
990inline uint8* EpsCopyOutputStream::WriteRawLittleEndian<8>(const void* data,
991 int size,
992 uint8* ptr) {
993#ifdef PROTOBUF_LITTLE_ENDIAN
994 return WriteRaw(data, size, ptr);
995#else
996 return WriteRawLittleEndian64(data, size, ptr);
997#endif
998}
999
1000// Class which encodes and writes binary data which is composed of varint-
1001// encoded integers and fixed-width pieces. Wraps a ZeroCopyOutputStream.
1002// Most users will not need to deal with CodedOutputStream.
1003//
1004// Most methods of CodedOutputStream which return a bool return false if an
1005// underlying I/O error occurs. Once such a failure occurs, the
1006// CodedOutputStream is broken and is no longer useful. The Write* methods do
1007// not return the stream status, but will invalidate the stream if an error
1008// occurs. The client can probe HadError() to determine the status.
1009//
1010// Note that every method of CodedOutputStream which writes some data has
1011// a corresponding static "ToArray" version. These versions write directly
1012// to the provided buffer, returning a pointer past the last written byte.
1013// They require that the buffer has sufficient capacity for the encoded data.
1014// This allows an optimization where we check if an output stream has enough
1015// space for an entire message before we start writing and, if there is, we
1016// call only the ToArray methods to avoid doing bound checks for each
1017// individual value.
1018// i.e., in the example above:
1019//
1020// CodedOutputStream* coded_output = new CodedOutputStream(raw_output);
1021// int magic_number = 1234;
1022// char text[] = "Hello world!";
1023//
1024// int coded_size = sizeof(magic_number) +
1025// CodedOutputStream::VarintSize32(strlen(text)) +
1026// strlen(text);
1027//
1028// uint8* buffer =
1029// coded_output->GetDirectBufferForNBytesAndAdvance(coded_size);
1030// if (buffer != nullptr) {
1031// // The output stream has enough space in the buffer: write directly to
1032// // the array.
1033// buffer = CodedOutputStream::WriteLittleEndian32ToArray(magic_number,
1034// buffer);
1035// buffer = CodedOutputStream::WriteVarint32ToArray(strlen(text), buffer);
1036// buffer = CodedOutputStream::WriteRawToArray(text, strlen(text), buffer);
1037// } else {
1038// // Make bound-checked writes, which will ask the underlying stream for
1039// // more space as needed.
1040// coded_output->WriteLittleEndian32(magic_number);
1041// coded_output->WriteVarint32(strlen(text));
1042// coded_output->WriteRaw(text, strlen(text));
1043// }
1044//
1045// delete coded_output;
1046class PROTOBUF_EXPORT CodedOutputStream {
1047 public:
1048 // Create an CodedOutputStream that writes to the given ZeroCopyOutputStream.
1049 explicit CodedOutputStream(ZeroCopyOutputStream* stream)
1050 : CodedOutputStream(stream, true) {}
1051 CodedOutputStream(ZeroCopyOutputStream* stream, bool do_eager_refresh);
1052
1053 // Destroy the CodedOutputStream and position the underlying
1054 // ZeroCopyOutputStream immediately after the last byte written.
1055 ~CodedOutputStream();
1056
1057 // Returns true if there was an underlying I/O error since this object was
1058 // created. On should call Trim before this function in order to catch all
1059 // errors.
1060 bool HadError() {
1061 cur_ = impl_.FlushAndResetBuffer(cur_);
1062 GOOGLE_DCHECK(cur_);
1063 return impl_.HadError();
1064 }
1065
1066 // Trims any unused space in the underlying buffer so that its size matches
1067 // the number of bytes written by this stream. The underlying buffer will
1068 // automatically be trimmed when this stream is destroyed; this call is only
1069 // necessary if the underlying buffer is accessed *before* the stream is
1070 // destroyed.
1071 void Trim() { cur_ = impl_.Trim(cur_); }
1072
1073 // Skips a number of bytes, leaving the bytes unmodified in the underlying
1074 // buffer. Returns false if an underlying write error occurs. This is
1075 // mainly useful with GetDirectBufferPointer().
1076 // Note of caution, the skipped bytes may contain uninitialized data. The
1077 // caller must make sure that the skipped bytes are properly initialized,
1078 // otherwise you might leak bytes from your heap.
1079 bool Skip(int count) { return impl_.Skip(count, &cur_); }
1080
1081 // Sets *data to point directly at the unwritten part of the
1082 // CodedOutputStream's underlying buffer, and *size to the size of that
1083 // buffer, but does not advance the stream's current position. This will
1084 // always either produce a non-empty buffer or return false. If the caller
1085 // writes any data to this buffer, it should then call Skip() to skip over
1086 // the consumed bytes. This may be useful for implementing external fast
1087 // serialization routines for types of data not covered by the
1088 // CodedOutputStream interface.
1089 bool GetDirectBufferPointer(void** data, int* size) {
1090 return impl_.GetDirectBufferPointer(data, size, &cur_);
1091 }
1092
1093 // If there are at least "size" bytes available in the current buffer,
1094 // returns a pointer directly into the buffer and advances over these bytes.
1095 // The caller may then write directly into this buffer (e.g. using the
1096 // *ToArray static methods) rather than go through CodedOutputStream. If
1097 // there are not enough bytes available, returns NULL. The return pointer is
1098 // invalidated as soon as any other non-const method of CodedOutputStream
1099 // is called.
1100 inline uint8* GetDirectBufferForNBytesAndAdvance(int size) {
1101 return impl_.GetDirectBufferForNBytesAndAdvance(size, &cur_);
1102 }
1103
1104 // Write raw bytes, copying them from the given buffer.
1105 void WriteRaw(const void* buffer, int size) {
1106 cur_ = impl_.WriteRaw(buffer, size, cur_);
1107 }
1108 // Like WriteRaw() but will try to write aliased data if aliasing is
1109 // turned on.
1110 void WriteRawMaybeAliased(const void* data, int size);
1111 // Like WriteRaw() but writing directly to the target array.
1112 // This is _not_ inlined, as the compiler often optimizes memcpy into inline
1113 // copy loops. Since this gets called by every field with string or bytes
1114 // type, inlining may lead to a significant amount of code bloat, with only a
1115 // minor performance gain.
1116 static uint8* WriteRawToArray(const void* buffer, int size, uint8* target);
1117
1118 // Equivalent to WriteRaw(str.data(), str.size()).
1119 void WriteString(const std::string& str);
1120 // Like WriteString() but writing directly to the target array.
1121 static uint8* WriteStringToArray(const std::string& str, uint8* target);
1122 // Write the varint-encoded size of str followed by str.
1123 static uint8* WriteStringWithSizeToArray(const std::string& str,
1124 uint8* target);
1125
1126
1127 // Write a 32-bit little-endian integer.
1128 void WriteLittleEndian32(uint32 value) {
1129 cur_ = impl_.EnsureSpace(cur_);
1130 SetCur(WriteLittleEndian32ToArray(value, Cur()));
1131 }
1132 // Like WriteLittleEndian32() but writing directly to the target array.
1133 static uint8* WriteLittleEndian32ToArray(uint32 value, uint8* target);
1134 // Write a 64-bit little-endian integer.
1135 void WriteLittleEndian64(uint64 value) {
1136 cur_ = impl_.EnsureSpace(cur_);
1137 SetCur(WriteLittleEndian64ToArray(value, Cur()));
1138 }
1139 // Like WriteLittleEndian64() but writing directly to the target array.
1140 static uint8* WriteLittleEndian64ToArray(uint64 value, uint8* target);
1141
1142 // Write an unsigned integer with Varint encoding. Writing a 32-bit value
1143 // is equivalent to casting it to uint64 and writing it as a 64-bit value,
1144 // but may be more efficient.
1145 void WriteVarint32(uint32 value);
1146 // Like WriteVarint32() but writing directly to the target array.
1147 static uint8* WriteVarint32ToArray(uint32 value, uint8* target);
1148 // Write an unsigned integer with Varint encoding.
1149 void WriteVarint64(uint64 value);
1150 // Like WriteVarint64() but writing directly to the target array.
1151 static uint8* WriteVarint64ToArray(uint64 value, uint8* target);
1152
1153 // Equivalent to WriteVarint32() except when the value is negative,
1154 // in which case it must be sign-extended to a full 10 bytes.
1155 void WriteVarint32SignExtended(int32 value);
1156 // Like WriteVarint32SignExtended() but writing directly to the target array.
1157 static uint8* WriteVarint32SignExtendedToArray(int32 value, uint8* target);
1158
1159 // This is identical to WriteVarint32(), but optimized for writing tags.
1160 // In particular, if the input is a compile-time constant, this method
1161 // compiles down to a couple instructions.
1162 // Always inline because otherwise the aformentioned optimization can't work,
1163 // but GCC by default doesn't want to inline this.
1164 void WriteTag(uint32 value);
1165 // Like WriteTag() but writing directly to the target array.
1166 PROTOBUF_ALWAYS_INLINE
1167 static uint8* WriteTagToArray(uint32 value, uint8* target);
1168
1169 // Returns the number of bytes needed to encode the given value as a varint.
1170 static size_t VarintSize32(uint32 value);
1171 // Returns the number of bytes needed to encode the given value as a varint.
1172 static size_t VarintSize64(uint64 value);
1173
1174 // If negative, 10 bytes. Otheriwse, same as VarintSize32().
1175 static size_t VarintSize32SignExtended(int32 value);
1176
1177 // Compile-time equivalent of VarintSize32().
1178 template <uint32 Value>
1179 struct StaticVarintSize32 {
1180 static const size_t value =
1181 (Value < (1 << 7))
1182 ? 1
1183 : (Value < (1 << 14))
1184 ? 2
1185 : (Value < (1 << 21)) ? 3 : (Value < (1 << 28)) ? 4 : 5;
1186 };
1187
1188 // Returns the total number of bytes written since this object was created.
1189 int ByteCount() const {
1190 return static_cast<int>(impl_.ByteCount(cur_) - start_count_);
1191 }
1192
1193 // Instructs the CodedOutputStream to allow the underlying
1194 // ZeroCopyOutputStream to hold pointers to the original structure instead of
1195 // copying, if it supports it (i.e. output->AllowsAliasing() is true). If the
1196 // underlying stream does not support aliasing, then enabling it has no
1197 // affect. For now, this only affects the behavior of
1198 // WriteRawMaybeAliased().
1199 //
1200 // NOTE: It is caller's responsibility to ensure that the chunk of memory
1201 // remains live until all of the data has been consumed from the stream.
1202 void EnableAliasing(bool enabled) { impl_.EnableAliasing(enabled); }
1203
1204 // Indicate to the serializer whether the user wants derministic
1205 // serialization. The default when this is not called comes from the global
1206 // default, controlled by SetDefaultSerializationDeterministic.
1207 //
1208 // What deterministic serialization means is entirely up to the driver of the
1209 // serialization process (i.e. the caller of methods like WriteVarint32). In
1210 // the case of serializing a proto buffer message using one of the methods of
1211 // MessageLite, this means that for a given binary equal messages will always
1212 // be serialized to the same bytes. This implies:
1213 //
1214 // * Repeated serialization of a message will return the same bytes.
1215 //
1216 // * Different processes running the same binary (including on different
1217 // machines) will serialize equal messages to the same bytes.
1218 //
1219 // Note that this is *not* canonical across languages. It is also unstable
1220 // across different builds with intervening message definition changes, due to
1221 // unknown fields. Users who need canonical serialization (e.g. persistent
1222 // storage in a canonical form, fingerprinting) should define their own
1223 // canonicalization specification and implement the serializer using
1224 // reflection APIs rather than relying on this API.
1225 void SetSerializationDeterministic(bool value) {
1226 impl_.SetSerializationDeterministic(value);
1227 }
1228
1229 // Return whether the user wants deterministic serialization. See above.
1230 bool IsSerializationDeterministic() const {
1231 return impl_.IsSerializationDeterministic();
1232 }
1233
1234 static bool IsDefaultSerializationDeterministic() {
1235 return default_serialization_deterministic_.load(
1236 std::memory_order_relaxed) != 0;
1237 }
1238
1239 template <typename Func>
1240 void Serialize(const Func& func);
1241
1242 uint8* Cur() const { return cur_; }
1243 void SetCur(uint8* ptr) { cur_ = ptr; }
1244 EpsCopyOutputStream* EpsCopy() { return &impl_; }
1245
1246 private:
1247 EpsCopyOutputStream impl_;
1248 uint8* cur_;
1249 int64 start_count_;
1250 static std::atomic<bool> default_serialization_deterministic_;
1251
1252 // See above. Other projects may use "friend" to allow them to call this.
1253 // After SetDefaultSerializationDeterministic() completes, all protocol
1254 // buffer serializations will be deterministic by default. Thread safe.
1255 // However, the meaning of "after" is subtle here: to be safe, each thread
1256 // that wants deterministic serialization by default needs to call
1257 // SetDefaultSerializationDeterministic() or ensure on its own that another
1258 // thread has done so.
1259 friend void internal::MapTestForceDeterministic();
1260 static void SetDefaultSerializationDeterministic() {
1261 default_serialization_deterministic_.store(true, std::memory_order_relaxed);
1262 }
1263 GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(CodedOutputStream);
1264};
1265
1266// inline methods ====================================================
1267// The vast majority of varints are only one byte. These inline
1268// methods optimize for that case.
1269
1270inline bool CodedInputStream::ReadVarint32(uint32* value) {
1271 uint32 v = 0;
1272 if (PROTOBUF_PREDICT_TRUE(buffer_ < buffer_end_)) {
1273 v = *buffer_;
1274 if (v < 0x80) {
1275 *value = v;
1276 Advance(1);
1277 return true;
1278 }
1279 }
1280 int64 result = ReadVarint32Fallback(v);
1281 *value = static_cast<uint32>(result);
1282 return result >= 0;
1283}
1284
1285inline bool CodedInputStream::ReadVarint64(uint64* value) {
1286 if (PROTOBUF_PREDICT_TRUE(buffer_ < buffer_end_) && *buffer_ < 0x80) {
1287 *value = *buffer_;
1288 Advance(1);
1289 return true;
1290 }
1291 std::pair<uint64, bool> p = ReadVarint64Fallback();
1292 *value = p.first;
1293 return p.second;
1294}
1295
1296inline bool CodedInputStream::ReadVarintSizeAsInt(int* value) {
1297 if (PROTOBUF_PREDICT_TRUE(buffer_ < buffer_end_)) {
1298 int v = *buffer_;
1299 if (v < 0x80) {
1300 *value = v;
1301 Advance(1);
1302 return true;
1303 }
1304 }
1305 *value = ReadVarintSizeAsIntFallback();
1306 return *value >= 0;
1307}
1308
1309// static
1310inline const uint8* CodedInputStream::ReadLittleEndian32FromArray(
1311 const uint8* buffer, uint32* value) {
1312#if defined(PROTOBUF_LITTLE_ENDIAN)
1313 memcpy(value, buffer, sizeof(*value));
1314 return buffer + sizeof(*value);
1315#else
1316 *value = (static_cast<uint32>(buffer[0])) |
1317 (static_cast<uint32>(buffer[1]) << 8) |
1318 (static_cast<uint32>(buffer[2]) << 16) |
1319 (static_cast<uint32>(buffer[3]) << 24);
1320 return buffer + sizeof(*value);
1321#endif
1322}
1323// static
1324inline const uint8* CodedInputStream::ReadLittleEndian64FromArray(
1325 const uint8* buffer, uint64* value) {
1326#if defined(PROTOBUF_LITTLE_ENDIAN)
1327 memcpy(value, buffer, sizeof(*value));
1328 return buffer + sizeof(*value);
1329#else
1330 uint32 part0 = (static_cast<uint32>(buffer[0])) |
1331 (static_cast<uint32>(buffer[1]) << 8) |
1332 (static_cast<uint32>(buffer[2]) << 16) |
1333 (static_cast<uint32>(buffer[3]) << 24);
1334 uint32 part1 = (static_cast<uint32>(buffer[4])) |
1335 (static_cast<uint32>(buffer[5]) << 8) |
1336 (static_cast<uint32>(buffer[6]) << 16) |
1337 (static_cast<uint32>(buffer[7]) << 24);
1338 *value = static_cast<uint64>(part0) | (static_cast<uint64>(part1) << 32);
1339 return buffer + sizeof(*value);
1340#endif
1341}
1342
1343inline bool CodedInputStream::ReadLittleEndian32(uint32* value) {
1344#if defined(PROTOBUF_LITTLE_ENDIAN)
1345 if (PROTOBUF_PREDICT_TRUE(BufferSize() >= static_cast<int>(sizeof(*value)))) {
1346 buffer_ = ReadLittleEndian32FromArray(buffer_, value);
1347 return true;
1348 } else {
1349 return ReadLittleEndian32Fallback(value);
1350 }
1351#else
1352 return ReadLittleEndian32Fallback(value);
1353#endif
1354}
1355
1356inline bool CodedInputStream::ReadLittleEndian64(uint64* value) {
1357#if defined(PROTOBUF_LITTLE_ENDIAN)
1358 if (PROTOBUF_PREDICT_TRUE(BufferSize() >= static_cast<int>(sizeof(*value)))) {
1359 buffer_ = ReadLittleEndian64FromArray(buffer_, value);
1360 return true;
1361 } else {
1362 return ReadLittleEndian64Fallback(value);
1363 }
1364#else
1365 return ReadLittleEndian64Fallback(value);
1366#endif
1367}
1368
1369inline uint32 CodedInputStream::ReadTagNoLastTag() {
1370 uint32 v = 0;
1371 if (PROTOBUF_PREDICT_TRUE(buffer_ < buffer_end_)) {
1372 v = *buffer_;
1373 if (v < 0x80) {
1374 Advance(1);
1375 return v;
1376 }
1377 }
1378 v = ReadTagFallback(v);
1379 return v;
1380}
1381
1382inline std::pair<uint32, bool> CodedInputStream::ReadTagWithCutoffNoLastTag(
1383 uint32 cutoff) {
1384 // In performance-sensitive code we can expect cutoff to be a compile-time
1385 // constant, and things like "cutoff >= kMax1ByteVarint" to be evaluated at
1386 // compile time.
1387 uint32 first_byte_or_zero = 0;
1388 if (PROTOBUF_PREDICT_TRUE(buffer_ < buffer_end_)) {
1389 // Hot case: buffer_ non_empty, buffer_[0] in [1, 128).
1390 // TODO(gpike): Is it worth rearranging this? E.g., if the number of fields
1391 // is large enough then is it better to check for the two-byte case first?
1392 first_byte_or_zero = buffer_[0];
1393 if (static_cast<int8>(buffer_[0]) > 0) {
1394 const uint32 kMax1ByteVarint = 0x7f;
1395 uint32 tag = buffer_[0];
1396 Advance(1);
1397 return std::make_pair(tag, cutoff >= kMax1ByteVarint || tag <= cutoff);
1398 }
1399 // Other hot case: cutoff >= 0x80, buffer_ has at least two bytes available,
1400 // and tag is two bytes. The latter is tested by bitwise-and-not of the
1401 // first byte and the second byte.
1402 if (cutoff >= 0x80 && PROTOBUF_PREDICT_TRUE(buffer_ + 1 < buffer_end_) &&
1403 PROTOBUF_PREDICT_TRUE((buffer_[0] & ~buffer_[1]) >= 0x80)) {
1404 const uint32 kMax2ByteVarint = (0x7f << 7) + 0x7f;
1405 uint32 tag = (1u << 7) * buffer_[1] + (buffer_[0] - 0x80);
1406 Advance(2);
1407 // It might make sense to test for tag == 0 now, but it is so rare that
1408 // that we don't bother. A varint-encoded 0 should be one byte unless
1409 // the encoder lost its mind. The second part of the return value of
1410 // this function is allowed to be either true or false if the tag is 0,
1411 // so we don't have to check for tag == 0. We may need to check whether
1412 // it exceeds cutoff.
1413 bool at_or_below_cutoff = cutoff >= kMax2ByteVarint || tag <= cutoff;
1414 return std::make_pair(tag, at_or_below_cutoff);
1415 }
1416 }
1417 // Slow path
1418 const uint32 tag = ReadTagFallback(first_byte_or_zero);
1419 return std::make_pair(tag, static_cast<uint32>(tag - 1) < cutoff);
1420}
1421
1422inline bool CodedInputStream::LastTagWas(uint32 expected) {
1423 return last_tag_ == expected;
1424}
1425
1426inline bool CodedInputStream::ConsumedEntireMessage() {
1427 return legitimate_message_end_;
1428}
1429
1430inline bool CodedInputStream::ExpectTag(uint32 expected) {
1431 if (expected < (1 << 7)) {
1432 if (PROTOBUF_PREDICT_TRUE(buffer_ < buffer_end_) &&
1433 buffer_[0] == expected) {
1434 Advance(1);
1435 return true;
1436 } else {
1437 return false;
1438 }
1439 } else if (expected < (1 << 14)) {
1440 if (PROTOBUF_PREDICT_TRUE(BufferSize() >= 2) &&
1441 buffer_[0] == static_cast<uint8>(expected | 0x80) &&
1442 buffer_[1] == static_cast<uint8>(expected >> 7)) {
1443 Advance(2);
1444 return true;
1445 } else {
1446 return false;
1447 }
1448 } else {
1449 // Don't bother optimizing for larger values.
1450 return false;
1451 }
1452}
1453
1454inline const uint8* CodedInputStream::ExpectTagFromArray(const uint8* buffer,
1455 uint32 expected) {
1456 if (expected < (1 << 7)) {
1457 if (buffer[0] == expected) {
1458 return buffer + 1;
1459 }
1460 } else if (expected < (1 << 14)) {
1461 if (buffer[0] == static_cast<uint8>(expected | 0x80) &&
1462 buffer[1] == static_cast<uint8>(expected >> 7)) {
1463 return buffer + 2;
1464 }
1465 }
1466 return nullptr;
1467}
1468
1469inline void CodedInputStream::GetDirectBufferPointerInline(const void** data,
1470 int* size) {
1471 *data = buffer_;
1472 *size = static_cast<int>(buffer_end_ - buffer_);
1473}
1474
1475inline bool CodedInputStream::ExpectAtEnd() {
1476 // If we are at a limit we know no more bytes can be read. Otherwise, it's
1477 // hard to say without calling Refresh(), and we'd rather not do that.
1478
1479 if (buffer_ == buffer_end_ && ((buffer_size_after_limit_ != 0) ||
1480 (total_bytes_read_ == current_limit_))) {
1481 last_tag_ = 0; // Pretend we called ReadTag()...
1482 legitimate_message_end_ = true; // ... and it hit EOF.
1483 return true;
1484 } else {
1485 return false;
1486 }
1487}
1488
1489inline int CodedInputStream::CurrentPosition() const {
1490 return total_bytes_read_ - (BufferSize() + buffer_size_after_limit_);
1491}
1492
1493inline void CodedInputStream::Advance(int amount) { buffer_ += amount; }
1494
1495inline void CodedInputStream::SetRecursionLimit(int limit) {
1496 recursion_budget_ += limit - recursion_limit_;
1497 recursion_limit_ = limit;
1498}
1499
1500inline bool CodedInputStream::IncrementRecursionDepth() {
1501 --recursion_budget_;
1502 return recursion_budget_ >= 0;
1503}
1504
1505inline void CodedInputStream::DecrementRecursionDepth() {
1506 if (recursion_budget_ < recursion_limit_) ++recursion_budget_;
1507}
1508
1509inline void CodedInputStream::UnsafeDecrementRecursionDepth() {
1510 assert(recursion_budget_ < recursion_limit_);
1511 ++recursion_budget_;
1512}
1513
1514inline void CodedInputStream::SetExtensionRegistry(const DescriptorPool* pool,
1515 MessageFactory* factory) {
1516 extension_pool_ = pool;
1517 extension_factory_ = factory;
1518}
1519
1520inline const DescriptorPool* CodedInputStream::GetExtensionPool() {
1521 return extension_pool_;
1522}
1523
1524inline MessageFactory* CodedInputStream::GetExtensionFactory() {
1525 return extension_factory_;
1526}
1527
1528inline int CodedInputStream::BufferSize() const {
1529 return static_cast<int>(buffer_end_ - buffer_);
1530}
1531
1532inline CodedInputStream::CodedInputStream(ZeroCopyInputStream* input)
1533 : buffer_(nullptr),
1534 buffer_end_(nullptr),
1535 input_(input),
1536 total_bytes_read_(0),
1537 overflow_bytes_(0),
1538 last_tag_(0),
1539 legitimate_message_end_(false),
1540 aliasing_enabled_(false),
1541 current_limit_(kint32max),
1542 buffer_size_after_limit_(0),
1543 total_bytes_limit_(kDefaultTotalBytesLimit),
1544 recursion_budget_(default_recursion_limit_),
1545 recursion_limit_(default_recursion_limit_),
1546 extension_pool_(nullptr),
1547 extension_factory_(nullptr) {
1548 // Eagerly Refresh() so buffer space is immediately available.
1549 Refresh();
1550}
1551
1552inline CodedInputStream::CodedInputStream(const uint8* buffer, int size)
1553 : buffer_(buffer),
1554 buffer_end_(buffer + size),
1555 input_(nullptr),
1556 total_bytes_read_(size),
1557 overflow_bytes_(0),
1558 last_tag_(0),
1559 legitimate_message_end_(false),
1560 aliasing_enabled_(false),
1561 current_limit_(size),
1562 buffer_size_after_limit_(0),
1563 total_bytes_limit_(kDefaultTotalBytesLimit),
1564 recursion_budget_(default_recursion_limit_),
1565 recursion_limit_(default_recursion_limit_),
1566 extension_pool_(nullptr),
1567 extension_factory_(nullptr) {
1568 // Note that setting current_limit_ == size is important to prevent some
1569 // code paths from trying to access input_ and segfaulting.
1570}
1571
1572inline bool CodedInputStream::IsFlat() const { return input_ == nullptr; }
1573
1574inline bool CodedInputStream::Skip(int count) {
1575 if (count < 0) return false; // security: count is often user-supplied
1576
1577 const int original_buffer_size = BufferSize();
1578
1579 if (count <= original_buffer_size) {
1580 // Just skipping within the current buffer. Easy.
1581 Advance(count);
1582 return true;
1583 }
1584
1585 return SkipFallback(count, original_buffer_size);
1586}
1587
1588inline uint8* CodedOutputStream::WriteVarint32ToArray(uint32 value,
1589 uint8* target) {
1590 return EpsCopyOutputStream::UnsafeVarint(value, target);
1591}
1592
1593inline uint8* CodedOutputStream::WriteVarint64ToArray(uint64 value,
1594 uint8* target) {
1595 return EpsCopyOutputStream::UnsafeVarint(value, target);
1596}
1597
1598inline void CodedOutputStream::WriteVarint32SignExtended(int32 value) {
1599 WriteVarint64(static_cast<uint64>(value));
1600}
1601
1602inline uint8* CodedOutputStream::WriteVarint32SignExtendedToArray(
1603 int32 value, uint8* target) {
1604 return WriteVarint64ToArray(static_cast<uint64>(value), target);
1605}
1606
1607inline uint8* CodedOutputStream::WriteLittleEndian32ToArray(uint32 value,
1608 uint8* target) {
1609#if defined(PROTOBUF_LITTLE_ENDIAN)
1610 memcpy(target, &value, sizeof(value));
1611#else
1612 target[0] = static_cast<uint8>(value);
1613 target[1] = static_cast<uint8>(value >> 8);
1614 target[2] = static_cast<uint8>(value >> 16);
1615 target[3] = static_cast<uint8>(value >> 24);
1616#endif
1617 return target + sizeof(value);
1618}
1619
1620inline uint8* CodedOutputStream::WriteLittleEndian64ToArray(uint64 value,
1621 uint8* target) {
1622#if defined(PROTOBUF_LITTLE_ENDIAN)
1623 memcpy(target, &value, sizeof(value));
1624#else
1625 uint32 part0 = static_cast<uint32>(value);
1626 uint32 part1 = static_cast<uint32>(value >> 32);
1627
1628 target[0] = static_cast<uint8>(part0);
1629 target[1] = static_cast<uint8>(part0 >> 8);
1630 target[2] = static_cast<uint8>(part0 >> 16);
1631 target[3] = static_cast<uint8>(part0 >> 24);
1632 target[4] = static_cast<uint8>(part1);
1633 target[5] = static_cast<uint8>(part1 >> 8);
1634 target[6] = static_cast<uint8>(part1 >> 16);
1635 target[7] = static_cast<uint8>(part1 >> 24);
1636#endif
1637 return target + sizeof(value);
1638}
1639
1640inline void CodedOutputStream::WriteVarint32(uint32 value) {
1641 cur_ = impl_.EnsureSpace(cur_);
1642 SetCur(WriteVarint32ToArray(value, Cur()));
1643}
1644
1645inline void CodedOutputStream::WriteVarint64(uint64 value) {
1646 cur_ = impl_.EnsureSpace(cur_);
1647 SetCur(WriteVarint64ToArray(value, Cur()));
1648}
1649
1650inline void CodedOutputStream::WriteTag(uint32 value) { WriteVarint32(value); }
1651
1652inline uint8* CodedOutputStream::WriteTagToArray(uint32 value, uint8* target) {
1653 return WriteVarint32ToArray(value, target);
1654}
1655
1656inline size_t CodedOutputStream::VarintSize32(uint32 value) {
1657 // This computes value == 0 ? 1 : floor(log2(value)) / 7 + 1
1658 // Use an explicit multiplication to implement the divide of
1659 // a number in the 1..31 range.
1660 // Explicit OR 0x1 to avoid calling Bits::Log2FloorNonZero(0), which is
1661 // undefined.
1662 uint32 log2value = Bits::Log2FloorNonZero(value | 0x1);
1663 return static_cast<size_t>((log2value * 9 + 73) / 64);
1664}
1665
1666inline size_t CodedOutputStream::VarintSize64(uint64 value) {
1667 // This computes value == 0 ? 1 : floor(log2(value)) / 7 + 1
1668 // Use an explicit multiplication to implement the divide of
1669 // a number in the 1..63 range.
1670 // Explicit OR 0x1 to avoid calling Bits::Log2FloorNonZero(0), which is
1671 // undefined.
1672 uint32 log2value = Bits::Log2FloorNonZero64(value | 0x1);
1673 return static_cast<size_t>((log2value * 9 + 73) / 64);
1674}
1675
1676inline size_t CodedOutputStream::VarintSize32SignExtended(int32 value) {
1677 if (value < 0) {
1678 return 10; // TODO(kenton): Make this a symbolic constant.
1679 } else {
1680 return VarintSize32(static_cast<uint32>(value));
1681 }
1682}
1683
1684inline void CodedOutputStream::WriteString(const std::string& str) {
1685 WriteRaw(str.data(), static_cast<int>(str.size()));
1686}
1687
1688inline void CodedOutputStream::WriteRawMaybeAliased(const void* data,
1689 int size) {
1690 cur_ = impl_.WriteRawMaybeAliased(data, size, cur_);
1691}
1692
1693inline uint8* CodedOutputStream::WriteRawToArray(const void* data, int size,
1694 uint8* target) {
1695 memcpy(target, data, size);
1696 return target + size;
1697}
1698
1699inline uint8* CodedOutputStream::WriteStringToArray(const std::string& str,
1700 uint8* target) {
1701 return WriteRawToArray(str.data(), static_cast<int>(str.size()), target);
1702}
1703
1704} // namespace io
1705} // namespace protobuf
1706} // namespace google
1707
1708#if defined(_MSC_VER) && _MSC_VER >= 1300 && !defined(__INTEL_COMPILER)
1709#pragma runtime_checks("c", restore)
1710#endif // _MSC_VER && !defined(__INTEL_COMPILER)
1711
1712#include <google/protobuf/port_undef.inc>
1713
1714#endif // GOOGLE_PROTOBUF_IO_CODED_STREAM_H__
1715