1/*
2 * jmemmgr.c
3 *
4 * This file was part of the Independent JPEG Group's software:
5 * Copyright (C) 1991-1997, Thomas G. Lane.
6 * libjpeg-turbo Modifications:
7 * Copyright (C) 2016, D. R. Commander.
8 * For conditions of distribution and use, see the accompanying README.ijg
9 * file.
10 *
11 * This file contains the JPEG system-independent memory management
12 * routines. This code is usable across a wide variety of machines; most
13 * of the system dependencies have been isolated in a separate file.
14 * The major functions provided here are:
15 * * pool-based allocation and freeing of memory;
16 * * policy decisions about how to divide available memory among the
17 * virtual arrays;
18 * * control logic for swapping virtual arrays between main memory and
19 * backing storage.
20 * The separate system-dependent file provides the actual backing-storage
21 * access code, and it contains the policy decision about how much total
22 * main memory to use.
23 * This file is system-dependent in the sense that some of its functions
24 * are unnecessary in some systems. For example, if there is enough virtual
25 * memory so that backing storage will never be used, much of the virtual
26 * array control logic could be removed. (Of course, if you have that much
27 * memory then you shouldn't care about a little bit of unused code...)
28 */
29
30#define JPEG_INTERNALS
31#define AM_MEMORY_MANAGER /* we define jvirt_Xarray_control structs */
32#include "jinclude.h"
33#include "jpeglib.h"
34#include "jmemsys.h" /* import the system-dependent declarations */
35#if !defined(_MSC_VER) || _MSC_VER > 1600
36#include <stdint.h>
37#endif
38#include <limits.h>
39
40#ifndef NO_GETENV
41#ifndef HAVE_STDLIB_H /* <stdlib.h> should declare getenv() */
42extern char *getenv(const char *name);
43#endif
44#endif
45
46
47LOCAL(size_t)
48round_up_pow2(size_t a, size_t b)
49/* a rounded up to the next multiple of b, i.e. ceil(a/b)*b */
50/* Assumes a >= 0, b > 0, and b is a power of 2 */
51{
52 return ((a + b - 1) & (~(b - 1)));
53}
54
55
56/*
57 * Some important notes:
58 * The allocation routines provided here must never return NULL.
59 * They should exit to error_exit if unsuccessful.
60 *
61 * It's not a good idea to try to merge the sarray and barray routines,
62 * even though they are textually almost the same, because samples are
63 * usually stored as bytes while coefficients are shorts or ints. Thus,
64 * in machines where byte pointers have a different representation from
65 * word pointers, the resulting machine code could not be the same.
66 */
67
68
69/*
70 * Many machines require storage alignment: longs must start on 4-byte
71 * boundaries, doubles on 8-byte boundaries, etc. On such machines, malloc()
72 * always returns pointers that are multiples of the worst-case alignment
73 * requirement, and we had better do so too.
74 * There isn't any really portable way to determine the worst-case alignment
75 * requirement. This module assumes that the alignment requirement is
76 * multiples of ALIGN_SIZE.
77 * By default, we define ALIGN_SIZE as sizeof(double). This is necessary on
78 * some workstations (where doubles really do need 8-byte alignment) and will
79 * work fine on nearly everything. If your machine has lesser alignment needs,
80 * you can save a few bytes by making ALIGN_SIZE smaller.
81 * The only place I know of where this will NOT work is certain Macintosh
82 * 680x0 compilers that define double as a 10-byte IEEE extended float.
83 * Doing 10-byte alignment is counterproductive because longwords won't be
84 * aligned well. Put "#define ALIGN_SIZE 4" in jconfig.h if you have
85 * such a compiler.
86 */
87
88#ifndef ALIGN_SIZE /* so can override from jconfig.h */
89#ifndef WITH_SIMD
90#define ALIGN_SIZE sizeof(double)
91#else
92#define ALIGN_SIZE 32 /* Most of the SIMD instructions we support require
93 16-byte (128-bit) alignment, but AVX2 requires
94 32-byte alignment. */
95#endif
96#endif
97
98/*
99 * We allocate objects from "pools", where each pool is gotten with a single
100 * request to jpeg_get_small() or jpeg_get_large(). There is no per-object
101 * overhead within a pool, except for alignment padding. Each pool has a
102 * header with a link to the next pool of the same class.
103 * Small and large pool headers are identical.
104 */
105
106typedef struct small_pool_struct *small_pool_ptr;
107
108typedef struct small_pool_struct {
109 small_pool_ptr next; /* next in list of pools */
110 size_t bytes_used; /* how many bytes already used within pool */
111 size_t bytes_left; /* bytes still available in this pool */
112} small_pool_hdr;
113
114typedef struct large_pool_struct *large_pool_ptr;
115
116typedef struct large_pool_struct {
117 large_pool_ptr next; /* next in list of pools */
118 size_t bytes_used; /* how many bytes already used within pool */
119 size_t bytes_left; /* bytes still available in this pool */
120} large_pool_hdr;
121
122/*
123 * Here is the full definition of a memory manager object.
124 */
125
126typedef struct {
127 struct jpeg_memory_mgr pub; /* public fields */
128
129 /* Each pool identifier (lifetime class) names a linked list of pools. */
130 small_pool_ptr small_list[JPOOL_NUMPOOLS];
131 large_pool_ptr large_list[JPOOL_NUMPOOLS];
132
133 /* Since we only have one lifetime class of virtual arrays, only one
134 * linked list is necessary (for each datatype). Note that the virtual
135 * array control blocks being linked together are actually stored somewhere
136 * in the small-pool list.
137 */
138 jvirt_sarray_ptr virt_sarray_list;
139 jvirt_barray_ptr virt_barray_list;
140
141 /* This counts total space obtained from jpeg_get_small/large */
142 size_t total_space_allocated;
143
144 /* alloc_sarray and alloc_barray set this value for use by virtual
145 * array routines.
146 */
147 JDIMENSION last_rowsperchunk; /* from most recent alloc_sarray/barray */
148} my_memory_mgr;
149
150typedef my_memory_mgr *my_mem_ptr;
151
152
153/*
154 * The control blocks for virtual arrays.
155 * Note that these blocks are allocated in the "small" pool area.
156 * System-dependent info for the associated backing store (if any) is hidden
157 * inside the backing_store_info struct.
158 */
159
160struct jvirt_sarray_control {
161 JSAMPARRAY mem_buffer; /* => the in-memory buffer */
162 JDIMENSION rows_in_array; /* total virtual array height */
163 JDIMENSION samplesperrow; /* width of array (and of memory buffer) */
164 JDIMENSION maxaccess; /* max rows accessed by access_virt_sarray */
165 JDIMENSION rows_in_mem; /* height of memory buffer */
166 JDIMENSION rowsperchunk; /* allocation chunk size in mem_buffer */
167 JDIMENSION cur_start_row; /* first logical row # in the buffer */
168 JDIMENSION first_undef_row; /* row # of first uninitialized row */
169 boolean pre_zero; /* pre-zero mode requested? */
170 boolean dirty; /* do current buffer contents need written? */
171 boolean b_s_open; /* is backing-store data valid? */
172 jvirt_sarray_ptr next; /* link to next virtual sarray control block */
173 backing_store_info b_s_info; /* System-dependent control info */
174};
175
176struct jvirt_barray_control {
177 JBLOCKARRAY mem_buffer; /* => the in-memory buffer */
178 JDIMENSION rows_in_array; /* total virtual array height */
179 JDIMENSION blocksperrow; /* width of array (and of memory buffer) */
180 JDIMENSION maxaccess; /* max rows accessed by access_virt_barray */
181 JDIMENSION rows_in_mem; /* height of memory buffer */
182 JDIMENSION rowsperchunk; /* allocation chunk size in mem_buffer */
183 JDIMENSION cur_start_row; /* first logical row # in the buffer */
184 JDIMENSION first_undef_row; /* row # of first uninitialized row */
185 boolean pre_zero; /* pre-zero mode requested? */
186 boolean dirty; /* do current buffer contents need written? */
187 boolean b_s_open; /* is backing-store data valid? */
188 jvirt_barray_ptr next; /* link to next virtual barray control block */
189 backing_store_info b_s_info; /* System-dependent control info */
190};
191
192
193#ifdef MEM_STATS /* optional extra stuff for statistics */
194
195LOCAL(void)
196print_mem_stats(j_common_ptr cinfo, int pool_id)
197{
198 my_mem_ptr mem = (my_mem_ptr)cinfo->mem;
199 small_pool_ptr shdr_ptr;
200 large_pool_ptr lhdr_ptr;
201
202 /* Since this is only a debugging stub, we can cheat a little by using
203 * fprintf directly rather than going through the trace message code.
204 * This is helpful because message parm array can't handle longs.
205 */
206 fprintf(stderr, "Freeing pool %d, total space = %ld\n",
207 pool_id, mem->total_space_allocated);
208
209 for (lhdr_ptr = mem->large_list[pool_id]; lhdr_ptr != NULL;
210 lhdr_ptr = lhdr_ptr->next) {
211 fprintf(stderr, " Large chunk used %ld\n", (long)lhdr_ptr->bytes_used);
212 }
213
214 for (shdr_ptr = mem->small_list[pool_id]; shdr_ptr != NULL;
215 shdr_ptr = shdr_ptr->next) {
216 fprintf(stderr, " Small chunk used %ld free %ld\n",
217 (long)shdr_ptr->bytes_used, (long)shdr_ptr->bytes_left);
218 }
219}
220
221#endif /* MEM_STATS */
222
223
224LOCAL(void)
225out_of_memory(j_common_ptr cinfo, int which)
226/* Report an out-of-memory error and stop execution */
227/* If we compiled MEM_STATS support, report alloc requests before dying */
228{
229#ifdef MEM_STATS
230 cinfo->err->trace_level = 2; /* force self_destruct to report stats */
231#endif
232 ERREXIT1(cinfo, JERR_OUT_OF_MEMORY, which);
233}
234
235
236/*
237 * Allocation of "small" objects.
238 *
239 * For these, we use pooled storage. When a new pool must be created,
240 * we try to get enough space for the current request plus a "slop" factor,
241 * where the slop will be the amount of leftover space in the new pool.
242 * The speed vs. space tradeoff is largely determined by the slop values.
243 * A different slop value is provided for each pool class (lifetime),
244 * and we also distinguish the first pool of a class from later ones.
245 * NOTE: the values given work fairly well on both 16- and 32-bit-int
246 * machines, but may be too small if longs are 64 bits or more.
247 *
248 * Since we do not know what alignment malloc() gives us, we have to
249 * allocate ALIGN_SIZE-1 extra space per pool to have room for alignment
250 * adjustment.
251 */
252
253static const size_t first_pool_slop[JPOOL_NUMPOOLS] = {
254 1600, /* first PERMANENT pool */
255 16000 /* first IMAGE pool */
256};
257
258static const size_t extra_pool_slop[JPOOL_NUMPOOLS] = {
259 0, /* additional PERMANENT pools */
260 5000 /* additional IMAGE pools */
261};
262
263#define MIN_SLOP 50 /* greater than 0 to avoid futile looping */
264
265
266METHODDEF(void *)
267alloc_small(j_common_ptr cinfo, int pool_id, size_t sizeofobject)
268/* Allocate a "small" object */
269{
270 my_mem_ptr mem = (my_mem_ptr)cinfo->mem;
271 small_pool_ptr hdr_ptr, prev_hdr_ptr;
272 char *data_ptr;
273 size_t min_request, slop;
274
275 /*
276 * Round up the requested size to a multiple of ALIGN_SIZE in order
277 * to assure alignment for the next object allocated in the same pool
278 * and so that algorithms can straddle outside the proper area up
279 * to the next alignment.
280 */
281 if (sizeofobject > MAX_ALLOC_CHUNK) {
282 /* This prevents overflow/wrap-around in round_up_pow2() if sizeofobject
283 is close to SIZE_MAX. */
284 out_of_memory(cinfo, 7);
285 }
286 sizeofobject = round_up_pow2(sizeofobject, ALIGN_SIZE);
287
288 /* Check for unsatisfiable request (do now to ensure no overflow below) */
289 if ((sizeof(small_pool_hdr) + sizeofobject + ALIGN_SIZE - 1) >
290 MAX_ALLOC_CHUNK)
291 out_of_memory(cinfo, 1); /* request exceeds malloc's ability */
292
293 /* See if space is available in any existing pool */
294 if (pool_id < 0 || pool_id >= JPOOL_NUMPOOLS)
295 ERREXIT1(cinfo, JERR_BAD_POOL_ID, pool_id); /* safety check */
296 prev_hdr_ptr = NULL;
297 hdr_ptr = mem->small_list[pool_id];
298 while (hdr_ptr != NULL) {
299 if (hdr_ptr->bytes_left >= sizeofobject)
300 break; /* found pool with enough space */
301 prev_hdr_ptr = hdr_ptr;
302 hdr_ptr = hdr_ptr->next;
303 }
304
305 /* Time to make a new pool? */
306 if (hdr_ptr == NULL) {
307 /* min_request is what we need now, slop is what will be leftover */
308 min_request = sizeof(small_pool_hdr) + sizeofobject + ALIGN_SIZE - 1;
309 if (prev_hdr_ptr == NULL) /* first pool in class? */
310 slop = first_pool_slop[pool_id];
311 else
312 slop = extra_pool_slop[pool_id];
313 /* Don't ask for more than MAX_ALLOC_CHUNK */
314 if (slop > (size_t)(MAX_ALLOC_CHUNK - min_request))
315 slop = (size_t)(MAX_ALLOC_CHUNK - min_request);
316 /* Try to get space, if fail reduce slop and try again */
317 for (;;) {
318 hdr_ptr = (small_pool_ptr)jpeg_get_small(cinfo, min_request + slop);
319 if (hdr_ptr != NULL)
320 break;
321 slop /= 2;
322 if (slop < MIN_SLOP) /* give up when it gets real small */
323 out_of_memory(cinfo, 2); /* jpeg_get_small failed */
324 }
325 mem->total_space_allocated += min_request + slop;
326 /* Success, initialize the new pool header and add to end of list */
327 hdr_ptr->next = NULL;
328 hdr_ptr->bytes_used = 0;
329 hdr_ptr->bytes_left = sizeofobject + slop;
330 if (prev_hdr_ptr == NULL) /* first pool in class? */
331 mem->small_list[pool_id] = hdr_ptr;
332 else
333 prev_hdr_ptr->next = hdr_ptr;
334 }
335
336 /* OK, allocate the object from the current pool */
337 data_ptr = (char *)hdr_ptr; /* point to first data byte in pool... */
338 data_ptr += sizeof(small_pool_hdr); /* ...by skipping the header... */
339 if ((size_t)data_ptr % ALIGN_SIZE) /* ...and adjust for alignment */
340 data_ptr += ALIGN_SIZE - (size_t)data_ptr % ALIGN_SIZE;
341 data_ptr += hdr_ptr->bytes_used; /* point to place for object */
342 hdr_ptr->bytes_used += sizeofobject;
343 hdr_ptr->bytes_left -= sizeofobject;
344
345 return (void *)data_ptr;
346}
347
348
349/*
350 * Allocation of "large" objects.
351 *
352 * The external semantics of these are the same as "small" objects. However,
353 * the pool management heuristics are quite different. We assume that each
354 * request is large enough that it may as well be passed directly to
355 * jpeg_get_large; the pool management just links everything together
356 * so that we can free it all on demand.
357 * Note: the major use of "large" objects is in JSAMPARRAY and JBLOCKARRAY
358 * structures. The routines that create these structures (see below)
359 * deliberately bunch rows together to ensure a large request size.
360 */
361
362METHODDEF(void *)
363alloc_large(j_common_ptr cinfo, int pool_id, size_t sizeofobject)
364/* Allocate a "large" object */
365{
366 my_mem_ptr mem = (my_mem_ptr)cinfo->mem;
367 large_pool_ptr hdr_ptr;
368 char *data_ptr;
369
370 /*
371 * Round up the requested size to a multiple of ALIGN_SIZE so that
372 * algorithms can straddle outside the proper area up to the next
373 * alignment.
374 */
375 if (sizeofobject > MAX_ALLOC_CHUNK) {
376 /* This prevents overflow/wrap-around in round_up_pow2() if sizeofobject
377 is close to SIZE_MAX. */
378 out_of_memory(cinfo, 8);
379 }
380 sizeofobject = round_up_pow2(sizeofobject, ALIGN_SIZE);
381
382 /* Check for unsatisfiable request (do now to ensure no overflow below) */
383 if ((sizeof(large_pool_hdr) + sizeofobject + ALIGN_SIZE - 1) >
384 MAX_ALLOC_CHUNK)
385 out_of_memory(cinfo, 3); /* request exceeds malloc's ability */
386
387 /* Always make a new pool */
388 if (pool_id < 0 || pool_id >= JPOOL_NUMPOOLS)
389 ERREXIT1(cinfo, JERR_BAD_POOL_ID, pool_id); /* safety check */
390
391 hdr_ptr = (large_pool_ptr)jpeg_get_large(cinfo, sizeofobject +
392 sizeof(large_pool_hdr) +
393 ALIGN_SIZE - 1);
394 if (hdr_ptr == NULL)
395 out_of_memory(cinfo, 4); /* jpeg_get_large failed */
396 mem->total_space_allocated += sizeofobject + sizeof(large_pool_hdr) +
397 ALIGN_SIZE - 1;
398
399 /* Success, initialize the new pool header and add to list */
400 hdr_ptr->next = mem->large_list[pool_id];
401 /* We maintain space counts in each pool header for statistical purposes,
402 * even though they are not needed for allocation.
403 */
404 hdr_ptr->bytes_used = sizeofobject;
405 hdr_ptr->bytes_left = 0;
406 mem->large_list[pool_id] = hdr_ptr;
407
408 data_ptr = (char *)hdr_ptr; /* point to first data byte in pool... */
409 data_ptr += sizeof(small_pool_hdr); /* ...by skipping the header... */
410 if ((size_t)data_ptr % ALIGN_SIZE) /* ...and adjust for alignment */
411 data_ptr += ALIGN_SIZE - (size_t)data_ptr % ALIGN_SIZE;
412
413 return (void *)data_ptr;
414}
415
416
417/*
418 * Creation of 2-D sample arrays.
419 *
420 * To minimize allocation overhead and to allow I/O of large contiguous
421 * blocks, we allocate the sample rows in groups of as many rows as possible
422 * without exceeding MAX_ALLOC_CHUNK total bytes per allocation request.
423 * NB: the virtual array control routines, later in this file, know about
424 * this chunking of rows. The rowsperchunk value is left in the mem manager
425 * object so that it can be saved away if this sarray is the workspace for
426 * a virtual array.
427 *
428 * Since we are often upsampling with a factor 2, we align the size (not
429 * the start) to 2 * ALIGN_SIZE so that the upsampling routines don't have
430 * to be as careful about size.
431 */
432
433METHODDEF(JSAMPARRAY)
434alloc_sarray(j_common_ptr cinfo, int pool_id, JDIMENSION samplesperrow,
435 JDIMENSION numrows)
436/* Allocate a 2-D sample array */
437{
438 my_mem_ptr mem = (my_mem_ptr)cinfo->mem;
439 JSAMPARRAY result;
440 JSAMPROW workspace;
441 JDIMENSION rowsperchunk, currow, i;
442 long ltemp;
443
444 /* Make sure each row is properly aligned */
445 if ((ALIGN_SIZE % sizeof(JSAMPLE)) != 0)
446 out_of_memory(cinfo, 5); /* safety check */
447
448 if (samplesperrow > MAX_ALLOC_CHUNK) {
449 /* This prevents overflow/wrap-around in round_up_pow2() if sizeofobject
450 is close to SIZE_MAX. */
451 out_of_memory(cinfo, 9);
452 }
453 samplesperrow = (JDIMENSION)round_up_pow2(samplesperrow, (2 * ALIGN_SIZE) /
454 sizeof(JSAMPLE));
455
456 /* Calculate max # of rows allowed in one allocation chunk */
457 ltemp = (MAX_ALLOC_CHUNK - sizeof(large_pool_hdr)) /
458 ((long)samplesperrow * sizeof(JSAMPLE));
459 if (ltemp <= 0)
460 ERREXIT(cinfo, JERR_WIDTH_OVERFLOW);
461 if (ltemp < (long)numrows)
462 rowsperchunk = (JDIMENSION)ltemp;
463 else
464 rowsperchunk = numrows;
465 mem->last_rowsperchunk = rowsperchunk;
466
467 /* Get space for row pointers (small object) */
468 result = (JSAMPARRAY)alloc_small(cinfo, pool_id,
469 (size_t)(numrows * sizeof(JSAMPROW)));
470
471 /* Get the rows themselves (large objects) */
472 currow = 0;
473 while (currow < numrows) {
474 rowsperchunk = MIN(rowsperchunk, numrows - currow);
475 workspace = (JSAMPROW)alloc_large(cinfo, pool_id,
476 (size_t)((size_t)rowsperchunk * (size_t)samplesperrow *
477 sizeof(JSAMPLE)));
478 for (i = rowsperchunk; i > 0; i--) {
479 result[currow++] = workspace;
480 workspace += samplesperrow;
481 }
482 }
483
484 return result;
485}
486
487
488/*
489 * Creation of 2-D coefficient-block arrays.
490 * This is essentially the same as the code for sample arrays, above.
491 */
492
493METHODDEF(JBLOCKARRAY)
494alloc_barray(j_common_ptr cinfo, int pool_id, JDIMENSION blocksperrow,
495 JDIMENSION numrows)
496/* Allocate a 2-D coefficient-block array */
497{
498 my_mem_ptr mem = (my_mem_ptr)cinfo->mem;
499 JBLOCKARRAY result;
500 JBLOCKROW workspace;
501 JDIMENSION rowsperchunk, currow, i;
502 long ltemp;
503
504 /* Make sure each row is properly aligned */
505 if ((sizeof(JBLOCK) % ALIGN_SIZE) != 0)
506 out_of_memory(cinfo, 6); /* safety check */
507
508 /* Calculate max # of rows allowed in one allocation chunk */
509 ltemp = (MAX_ALLOC_CHUNK - sizeof(large_pool_hdr)) /
510 ((long)blocksperrow * sizeof(JBLOCK));
511 if (ltemp <= 0)
512 ERREXIT(cinfo, JERR_WIDTH_OVERFLOW);
513 if (ltemp < (long)numrows)
514 rowsperchunk = (JDIMENSION)ltemp;
515 else
516 rowsperchunk = numrows;
517 mem->last_rowsperchunk = rowsperchunk;
518
519 /* Get space for row pointers (small object) */
520 result = (JBLOCKARRAY)alloc_small(cinfo, pool_id,
521 (size_t)(numrows * sizeof(JBLOCKROW)));
522
523 /* Get the rows themselves (large objects) */
524 currow = 0;
525 while (currow < numrows) {
526 rowsperchunk = MIN(rowsperchunk, numrows - currow);
527 workspace = (JBLOCKROW)alloc_large(cinfo, pool_id,
528 (size_t)((size_t)rowsperchunk * (size_t)blocksperrow *
529 sizeof(JBLOCK)));
530 for (i = rowsperchunk; i > 0; i--) {
531 result[currow++] = workspace;
532 workspace += blocksperrow;
533 }
534 }
535
536 return result;
537}
538
539
540/*
541 * About virtual array management:
542 *
543 * The above "normal" array routines are only used to allocate strip buffers
544 * (as wide as the image, but just a few rows high). Full-image-sized buffers
545 * are handled as "virtual" arrays. The array is still accessed a strip at a
546 * time, but the memory manager must save the whole array for repeated
547 * accesses. The intended implementation is that there is a strip buffer in
548 * memory (as high as is possible given the desired memory limit), plus a
549 * backing file that holds the rest of the array.
550 *
551 * The request_virt_array routines are told the total size of the image and
552 * the maximum number of rows that will be accessed at once. The in-memory
553 * buffer must be at least as large as the maxaccess value.
554 *
555 * The request routines create control blocks but not the in-memory buffers.
556 * That is postponed until realize_virt_arrays is called. At that time the
557 * total amount of space needed is known (approximately, anyway), so free
558 * memory can be divided up fairly.
559 *
560 * The access_virt_array routines are responsible for making a specific strip
561 * area accessible (after reading or writing the backing file, if necessary).
562 * Note that the access routines are told whether the caller intends to modify
563 * the accessed strip; during a read-only pass this saves having to rewrite
564 * data to disk. The access routines are also responsible for pre-zeroing
565 * any newly accessed rows, if pre-zeroing was requested.
566 *
567 * In current usage, the access requests are usually for nonoverlapping
568 * strips; that is, successive access start_row numbers differ by exactly
569 * num_rows = maxaccess. This means we can get good performance with simple
570 * buffer dump/reload logic, by making the in-memory buffer be a multiple
571 * of the access height; then there will never be accesses across bufferload
572 * boundaries. The code will still work with overlapping access requests,
573 * but it doesn't handle bufferload overlaps very efficiently.
574 */
575
576
577METHODDEF(jvirt_sarray_ptr)
578request_virt_sarray(j_common_ptr cinfo, int pool_id, boolean pre_zero,
579 JDIMENSION samplesperrow, JDIMENSION numrows,
580 JDIMENSION maxaccess)
581/* Request a virtual 2-D sample array */
582{
583 my_mem_ptr mem = (my_mem_ptr)cinfo->mem;
584 jvirt_sarray_ptr result;
585
586 /* Only IMAGE-lifetime virtual arrays are currently supported */
587 if (pool_id != JPOOL_IMAGE)
588 ERREXIT1(cinfo, JERR_BAD_POOL_ID, pool_id); /* safety check */
589
590 /* get control block */
591 result = (jvirt_sarray_ptr)alloc_small(cinfo, pool_id,
592 sizeof(struct jvirt_sarray_control));
593
594 result->mem_buffer = NULL; /* marks array not yet realized */
595 result->rows_in_array = numrows;
596 result->samplesperrow = samplesperrow;
597 result->maxaccess = maxaccess;
598 result->pre_zero = pre_zero;
599 result->b_s_open = FALSE; /* no associated backing-store object */
600 result->next = mem->virt_sarray_list; /* add to list of virtual arrays */
601 mem->virt_sarray_list = result;
602
603 return result;
604}
605
606
607METHODDEF(jvirt_barray_ptr)
608request_virt_barray(j_common_ptr cinfo, int pool_id, boolean pre_zero,
609 JDIMENSION blocksperrow, JDIMENSION numrows,
610 JDIMENSION maxaccess)
611/* Request a virtual 2-D coefficient-block array */
612{
613 my_mem_ptr mem = (my_mem_ptr)cinfo->mem;
614 jvirt_barray_ptr result;
615
616 /* Only IMAGE-lifetime virtual arrays are currently supported */
617 if (pool_id != JPOOL_IMAGE)
618 ERREXIT1(cinfo, JERR_BAD_POOL_ID, pool_id); /* safety check */
619
620 /* get control block */
621 result = (jvirt_barray_ptr)alloc_small(cinfo, pool_id,
622 sizeof(struct jvirt_barray_control));
623
624 result->mem_buffer = NULL; /* marks array not yet realized */
625 result->rows_in_array = numrows;
626 result->blocksperrow = blocksperrow;
627 result->maxaccess = maxaccess;
628 result->pre_zero = pre_zero;
629 result->b_s_open = FALSE; /* no associated backing-store object */
630 result->next = mem->virt_barray_list; /* add to list of virtual arrays */
631 mem->virt_barray_list = result;
632
633 return result;
634}
635
636
637METHODDEF(void)
638realize_virt_arrays(j_common_ptr cinfo)
639/* Allocate the in-memory buffers for any unrealized virtual arrays */
640{
641 my_mem_ptr mem = (my_mem_ptr)cinfo->mem;
642 size_t space_per_minheight, maximum_space, avail_mem;
643 size_t minheights, max_minheights;
644 jvirt_sarray_ptr sptr;
645 jvirt_barray_ptr bptr;
646
647 /* Compute the minimum space needed (maxaccess rows in each buffer)
648 * and the maximum space needed (full image height in each buffer).
649 * These may be of use to the system-dependent jpeg_mem_available routine.
650 */
651 space_per_minheight = 0;
652 maximum_space = 0;
653 for (sptr = mem->virt_sarray_list; sptr != NULL; sptr = sptr->next) {
654 if (sptr->mem_buffer == NULL) { /* if not realized yet */
655 size_t new_space = (long)sptr->rows_in_array *
656 (long)sptr->samplesperrow * sizeof(JSAMPLE);
657
658 space_per_minheight += (long)sptr->maxaccess *
659 (long)sptr->samplesperrow * sizeof(JSAMPLE);
660 if (SIZE_MAX - maximum_space < new_space)
661 out_of_memory(cinfo, 10);
662 maximum_space += new_space;
663 }
664 }
665 for (bptr = mem->virt_barray_list; bptr != NULL; bptr = bptr->next) {
666 if (bptr->mem_buffer == NULL) { /* if not realized yet */
667 size_t new_space = (long)bptr->rows_in_array *
668 (long)bptr->blocksperrow * sizeof(JBLOCK);
669
670 space_per_minheight += (long)bptr->maxaccess *
671 (long)bptr->blocksperrow * sizeof(JBLOCK);
672 if (SIZE_MAX - maximum_space < new_space)
673 out_of_memory(cinfo, 11);
674 maximum_space += new_space;
675 }
676 }
677
678 if (space_per_minheight <= 0)
679 return; /* no unrealized arrays, no work */
680
681 /* Determine amount of memory to actually use; this is system-dependent. */
682 avail_mem = jpeg_mem_available(cinfo, space_per_minheight, maximum_space,
683 mem->total_space_allocated);
684
685 /* If the maximum space needed is available, make all the buffers full
686 * height; otherwise parcel it out with the same number of minheights
687 * in each buffer.
688 */
689 if (avail_mem >= maximum_space)
690 max_minheights = 1000000000L;
691 else {
692 max_minheights = avail_mem / space_per_minheight;
693 /* If there doesn't seem to be enough space, try to get the minimum
694 * anyway. This allows a "stub" implementation of jpeg_mem_available().
695 */
696 if (max_minheights <= 0)
697 max_minheights = 1;
698 }
699
700 /* Allocate the in-memory buffers and initialize backing store as needed. */
701
702 for (sptr = mem->virt_sarray_list; sptr != NULL; sptr = sptr->next) {
703 if (sptr->mem_buffer == NULL) { /* if not realized yet */
704 minheights = ((long)sptr->rows_in_array - 1L) / sptr->maxaccess + 1L;
705 if (minheights <= max_minheights) {
706 /* This buffer fits in memory */
707 sptr->rows_in_mem = sptr->rows_in_array;
708 } else {
709 /* It doesn't fit in memory, create backing store. */
710 sptr->rows_in_mem = (JDIMENSION)(max_minheights * sptr->maxaccess);
711 jpeg_open_backing_store(cinfo, &sptr->b_s_info,
712 (long)sptr->rows_in_array *
713 (long)sptr->samplesperrow *
714 (long)sizeof(JSAMPLE));
715 sptr->b_s_open = TRUE;
716 }
717 sptr->mem_buffer = alloc_sarray(cinfo, JPOOL_IMAGE,
718 sptr->samplesperrow, sptr->rows_in_mem);
719 sptr->rowsperchunk = mem->last_rowsperchunk;
720 sptr->cur_start_row = 0;
721 sptr->first_undef_row = 0;
722 sptr->dirty = FALSE;
723 }
724 }
725
726 for (bptr = mem->virt_barray_list; bptr != NULL; bptr = bptr->next) {
727 if (bptr->mem_buffer == NULL) { /* if not realized yet */
728 minheights = ((long)bptr->rows_in_array - 1L) / bptr->maxaccess + 1L;
729 if (minheights <= max_minheights) {
730 /* This buffer fits in memory */
731 bptr->rows_in_mem = bptr->rows_in_array;
732 } else {
733 /* It doesn't fit in memory, create backing store. */
734 bptr->rows_in_mem = (JDIMENSION)(max_minheights * bptr->maxaccess);
735 jpeg_open_backing_store(cinfo, &bptr->b_s_info,
736 (long)bptr->rows_in_array *
737 (long)bptr->blocksperrow *
738 (long)sizeof(JBLOCK));
739 bptr->b_s_open = TRUE;
740 }
741 bptr->mem_buffer = alloc_barray(cinfo, JPOOL_IMAGE,
742 bptr->blocksperrow, bptr->rows_in_mem);
743 bptr->rowsperchunk = mem->last_rowsperchunk;
744 bptr->cur_start_row = 0;
745 bptr->first_undef_row = 0;
746 bptr->dirty = FALSE;
747 }
748 }
749}
750
751
752LOCAL(void)
753do_sarray_io(j_common_ptr cinfo, jvirt_sarray_ptr ptr, boolean writing)
754/* Do backing store read or write of a virtual sample array */
755{
756 long bytesperrow, file_offset, byte_count, rows, thisrow, i;
757
758 bytesperrow = (long)ptr->samplesperrow * sizeof(JSAMPLE);
759 file_offset = ptr->cur_start_row * bytesperrow;
760 /* Loop to read or write each allocation chunk in mem_buffer */
761 for (i = 0; i < (long)ptr->rows_in_mem; i += ptr->rowsperchunk) {
762 /* One chunk, but check for short chunk at end of buffer */
763 rows = MIN((long)ptr->rowsperchunk, (long)ptr->rows_in_mem - i);
764 /* Transfer no more than is currently defined */
765 thisrow = (long)ptr->cur_start_row + i;
766 rows = MIN(rows, (long)ptr->first_undef_row - thisrow);
767 /* Transfer no more than fits in file */
768 rows = MIN(rows, (long)ptr->rows_in_array - thisrow);
769 if (rows <= 0) /* this chunk might be past end of file! */
770 break;
771 byte_count = rows * bytesperrow;
772 if (writing)
773 (*ptr->b_s_info.write_backing_store) (cinfo, &ptr->b_s_info,
774 (void *)ptr->mem_buffer[i],
775 file_offset, byte_count);
776 else
777 (*ptr->b_s_info.read_backing_store) (cinfo, &ptr->b_s_info,
778 (void *)ptr->mem_buffer[i],
779 file_offset, byte_count);
780 file_offset += byte_count;
781 }
782}
783
784
785LOCAL(void)
786do_barray_io(j_common_ptr cinfo, jvirt_barray_ptr ptr, boolean writing)
787/* Do backing store read or write of a virtual coefficient-block array */
788{
789 long bytesperrow, file_offset, byte_count, rows, thisrow, i;
790
791 bytesperrow = (long)ptr->blocksperrow * sizeof(JBLOCK);
792 file_offset = ptr->cur_start_row * bytesperrow;
793 /* Loop to read or write each allocation chunk in mem_buffer */
794 for (i = 0; i < (long)ptr->rows_in_mem; i += ptr->rowsperchunk) {
795 /* One chunk, but check for short chunk at end of buffer */
796 rows = MIN((long)ptr->rowsperchunk, (long)ptr->rows_in_mem - i);
797 /* Transfer no more than is currently defined */
798 thisrow = (long)ptr->cur_start_row + i;
799 rows = MIN(rows, (long)ptr->first_undef_row - thisrow);
800 /* Transfer no more than fits in file */
801 rows = MIN(rows, (long)ptr->rows_in_array - thisrow);
802 if (rows <= 0) /* this chunk might be past end of file! */
803 break;
804 byte_count = rows * bytesperrow;
805 if (writing)
806 (*ptr->b_s_info.write_backing_store) (cinfo, &ptr->b_s_info,
807 (void *)ptr->mem_buffer[i],
808 file_offset, byte_count);
809 else
810 (*ptr->b_s_info.read_backing_store) (cinfo, &ptr->b_s_info,
811 (void *)ptr->mem_buffer[i],
812 file_offset, byte_count);
813 file_offset += byte_count;
814 }
815}
816
817
818METHODDEF(JSAMPARRAY)
819access_virt_sarray(j_common_ptr cinfo, jvirt_sarray_ptr ptr,
820 JDIMENSION start_row, JDIMENSION num_rows, boolean writable)
821/* Access the part of a virtual sample array starting at start_row */
822/* and extending for num_rows rows. writable is true if */
823/* caller intends to modify the accessed area. */
824{
825 JDIMENSION end_row = start_row + num_rows;
826 JDIMENSION undef_row;
827
828 /* debugging check */
829 if (end_row > ptr->rows_in_array || num_rows > ptr->maxaccess ||
830 ptr->mem_buffer == NULL)
831 ERREXIT(cinfo, JERR_BAD_VIRTUAL_ACCESS);
832
833 /* Make the desired part of the virtual array accessible */
834 if (start_row < ptr->cur_start_row ||
835 end_row > ptr->cur_start_row + ptr->rows_in_mem) {
836 if (!ptr->b_s_open)
837 ERREXIT(cinfo, JERR_VIRTUAL_BUG);
838 /* Flush old buffer contents if necessary */
839 if (ptr->dirty) {
840 do_sarray_io(cinfo, ptr, TRUE);
841 ptr->dirty = FALSE;
842 }
843 /* Decide what part of virtual array to access.
844 * Algorithm: if target address > current window, assume forward scan,
845 * load starting at target address. If target address < current window,
846 * assume backward scan, load so that target area is top of window.
847 * Note that when switching from forward write to forward read, will have
848 * start_row = 0, so the limiting case applies and we load from 0 anyway.
849 */
850 if (start_row > ptr->cur_start_row) {
851 ptr->cur_start_row = start_row;
852 } else {
853 /* use long arithmetic here to avoid overflow & unsigned problems */
854 long ltemp;
855
856 ltemp = (long)end_row - (long)ptr->rows_in_mem;
857 if (ltemp < 0)
858 ltemp = 0; /* don't fall off front end of file */
859 ptr->cur_start_row = (JDIMENSION)ltemp;
860 }
861 /* Read in the selected part of the array.
862 * During the initial write pass, we will do no actual read
863 * because the selected part is all undefined.
864 */
865 do_sarray_io(cinfo, ptr, FALSE);
866 }
867 /* Ensure the accessed part of the array is defined; prezero if needed.
868 * To improve locality of access, we only prezero the part of the array
869 * that the caller is about to access, not the entire in-memory array.
870 */
871 if (ptr->first_undef_row < end_row) {
872 if (ptr->first_undef_row < start_row) {
873 if (writable) /* writer skipped over a section of array */
874 ERREXIT(cinfo, JERR_BAD_VIRTUAL_ACCESS);
875 undef_row = start_row; /* but reader is allowed to read ahead */
876 } else {
877 undef_row = ptr->first_undef_row;
878 }
879 if (writable)
880 ptr->first_undef_row = end_row;
881 if (ptr->pre_zero) {
882 size_t bytesperrow = (size_t)ptr->samplesperrow * sizeof(JSAMPLE);
883 undef_row -= ptr->cur_start_row; /* make indexes relative to buffer */
884 end_row -= ptr->cur_start_row;
885 while (undef_row < end_row) {
886 jzero_far((void *)ptr->mem_buffer[undef_row], bytesperrow);
887 undef_row++;
888 }
889 } else {
890 if (!writable) /* reader looking at undefined data */
891 ERREXIT(cinfo, JERR_BAD_VIRTUAL_ACCESS);
892 }
893 }
894 /* Flag the buffer dirty if caller will write in it */
895 if (writable)
896 ptr->dirty = TRUE;
897 /* Return address of proper part of the buffer */
898 return ptr->mem_buffer + (start_row - ptr->cur_start_row);
899}
900
901
902METHODDEF(JBLOCKARRAY)
903access_virt_barray(j_common_ptr cinfo, jvirt_barray_ptr ptr,
904 JDIMENSION start_row, JDIMENSION num_rows, boolean writable)
905/* Access the part of a virtual block array starting at start_row */
906/* and extending for num_rows rows. writable is true if */
907/* caller intends to modify the accessed area. */
908{
909 JDIMENSION end_row = start_row + num_rows;
910 JDIMENSION undef_row;
911
912 /* debugging check */
913 if (end_row > ptr->rows_in_array || num_rows > ptr->maxaccess ||
914 ptr->mem_buffer == NULL)
915 ERREXIT(cinfo, JERR_BAD_VIRTUAL_ACCESS);
916
917 /* Make the desired part of the virtual array accessible */
918 if (start_row < ptr->cur_start_row ||
919 end_row > ptr->cur_start_row + ptr->rows_in_mem) {
920 if (!ptr->b_s_open)
921 ERREXIT(cinfo, JERR_VIRTUAL_BUG);
922 /* Flush old buffer contents if necessary */
923 if (ptr->dirty) {
924 do_barray_io(cinfo, ptr, TRUE);
925 ptr->dirty = FALSE;
926 }
927 /* Decide what part of virtual array to access.
928 * Algorithm: if target address > current window, assume forward scan,
929 * load starting at target address. If target address < current window,
930 * assume backward scan, load so that target area is top of window.
931 * Note that when switching from forward write to forward read, will have
932 * start_row = 0, so the limiting case applies and we load from 0 anyway.
933 */
934 if (start_row > ptr->cur_start_row) {
935 ptr->cur_start_row = start_row;
936 } else {
937 /* use long arithmetic here to avoid overflow & unsigned problems */
938 long ltemp;
939
940 ltemp = (long)end_row - (long)ptr->rows_in_mem;
941 if (ltemp < 0)
942 ltemp = 0; /* don't fall off front end of file */
943 ptr->cur_start_row = (JDIMENSION)ltemp;
944 }
945 /* Read in the selected part of the array.
946 * During the initial write pass, we will do no actual read
947 * because the selected part is all undefined.
948 */
949 do_barray_io(cinfo, ptr, FALSE);
950 }
951 /* Ensure the accessed part of the array is defined; prezero if needed.
952 * To improve locality of access, we only prezero the part of the array
953 * that the caller is about to access, not the entire in-memory array.
954 */
955 if (ptr->first_undef_row < end_row) {
956 if (ptr->first_undef_row < start_row) {
957 if (writable) /* writer skipped over a section of array */
958 ERREXIT(cinfo, JERR_BAD_VIRTUAL_ACCESS);
959 undef_row = start_row; /* but reader is allowed to read ahead */
960 } else {
961 undef_row = ptr->first_undef_row;
962 }
963 if (writable)
964 ptr->first_undef_row = end_row;
965 if (ptr->pre_zero) {
966 size_t bytesperrow = (size_t)ptr->blocksperrow * sizeof(JBLOCK);
967 undef_row -= ptr->cur_start_row; /* make indexes relative to buffer */
968 end_row -= ptr->cur_start_row;
969 while (undef_row < end_row) {
970 jzero_far((void *)ptr->mem_buffer[undef_row], bytesperrow);
971 undef_row++;
972 }
973 } else {
974 if (!writable) /* reader looking at undefined data */
975 ERREXIT(cinfo, JERR_BAD_VIRTUAL_ACCESS);
976 }
977 }
978 /* Flag the buffer dirty if caller will write in it */
979 if (writable)
980 ptr->dirty = TRUE;
981 /* Return address of proper part of the buffer */
982 return ptr->mem_buffer + (start_row - ptr->cur_start_row);
983}
984
985
986/*
987 * Release all objects belonging to a specified pool.
988 */
989
990METHODDEF(void)
991free_pool(j_common_ptr cinfo, int pool_id)
992{
993 my_mem_ptr mem = (my_mem_ptr)cinfo->mem;
994 small_pool_ptr shdr_ptr;
995 large_pool_ptr lhdr_ptr;
996 size_t space_freed;
997
998 if (pool_id < 0 || pool_id >= JPOOL_NUMPOOLS)
999 ERREXIT1(cinfo, JERR_BAD_POOL_ID, pool_id); /* safety check */
1000
1001#ifdef MEM_STATS
1002 if (cinfo->err->trace_level > 1)
1003 print_mem_stats(cinfo, pool_id); /* print pool's memory usage statistics */
1004#endif
1005
1006 /* If freeing IMAGE pool, close any virtual arrays first */
1007 if (pool_id == JPOOL_IMAGE) {
1008 jvirt_sarray_ptr sptr;
1009 jvirt_barray_ptr bptr;
1010
1011 for (sptr = mem->virt_sarray_list; sptr != NULL; sptr = sptr->next) {
1012 if (sptr->b_s_open) { /* there may be no backing store */
1013 sptr->b_s_open = FALSE; /* prevent recursive close if error */
1014 (*sptr->b_s_info.close_backing_store) (cinfo, &sptr->b_s_info);
1015 }
1016 }
1017 mem->virt_sarray_list = NULL;
1018 for (bptr = mem->virt_barray_list; bptr != NULL; bptr = bptr->next) {
1019 if (bptr->b_s_open) { /* there may be no backing store */
1020 bptr->b_s_open = FALSE; /* prevent recursive close if error */
1021 (*bptr->b_s_info.close_backing_store) (cinfo, &bptr->b_s_info);
1022 }
1023 }
1024 mem->virt_barray_list = NULL;
1025 }
1026
1027 /* Release large objects */
1028 lhdr_ptr = mem->large_list[pool_id];
1029 mem->large_list[pool_id] = NULL;
1030
1031 while (lhdr_ptr != NULL) {
1032 large_pool_ptr next_lhdr_ptr = lhdr_ptr->next;
1033 space_freed = lhdr_ptr->bytes_used +
1034 lhdr_ptr->bytes_left +
1035 sizeof(large_pool_hdr);
1036 jpeg_free_large(cinfo, (void *)lhdr_ptr, space_freed);
1037 mem->total_space_allocated -= space_freed;
1038 lhdr_ptr = next_lhdr_ptr;
1039 }
1040
1041 /* Release small objects */
1042 shdr_ptr = mem->small_list[pool_id];
1043 mem->small_list[pool_id] = NULL;
1044
1045 while (shdr_ptr != NULL) {
1046 small_pool_ptr next_shdr_ptr = shdr_ptr->next;
1047 space_freed = shdr_ptr->bytes_used + shdr_ptr->bytes_left +
1048 sizeof(small_pool_hdr);
1049 jpeg_free_small(cinfo, (void *)shdr_ptr, space_freed);
1050 mem->total_space_allocated -= space_freed;
1051 shdr_ptr = next_shdr_ptr;
1052 }
1053}
1054
1055
1056/*
1057 * Close up shop entirely.
1058 * Note that this cannot be called unless cinfo->mem is non-NULL.
1059 */
1060
1061METHODDEF(void)
1062self_destruct(j_common_ptr cinfo)
1063{
1064 int pool;
1065
1066 /* Close all backing store, release all memory.
1067 * Releasing pools in reverse order might help avoid fragmentation
1068 * with some (brain-damaged) malloc libraries.
1069 */
1070 for (pool = JPOOL_NUMPOOLS - 1; pool >= JPOOL_PERMANENT; pool--) {
1071 free_pool(cinfo, pool);
1072 }
1073
1074 /* Release the memory manager control block too. */
1075 jpeg_free_small(cinfo, (void *)cinfo->mem, sizeof(my_memory_mgr));
1076 cinfo->mem = NULL; /* ensures I will be called only once */
1077
1078 jpeg_mem_term(cinfo); /* system-dependent cleanup */
1079}
1080
1081
1082/*
1083 * Memory manager initialization.
1084 * When this is called, only the error manager pointer is valid in cinfo!
1085 */
1086
1087GLOBAL(void)
1088jinit_memory_mgr(j_common_ptr cinfo)
1089{
1090 my_mem_ptr mem;
1091 long max_to_use;
1092 int pool;
1093 size_t test_mac;
1094
1095 cinfo->mem = NULL; /* for safety if init fails */
1096
1097 /* Check for configuration errors.
1098 * sizeof(ALIGN_TYPE) should be a power of 2; otherwise, it probably
1099 * doesn't reflect any real hardware alignment requirement.
1100 * The test is a little tricky: for X>0, X and X-1 have no one-bits
1101 * in common if and only if X is a power of 2, ie has only one one-bit.
1102 * Some compilers may give an "unreachable code" warning here; ignore it.
1103 */
1104 if ((ALIGN_SIZE & (ALIGN_SIZE - 1)) != 0)
1105 ERREXIT(cinfo, JERR_BAD_ALIGN_TYPE);
1106 /* MAX_ALLOC_CHUNK must be representable as type size_t, and must be
1107 * a multiple of ALIGN_SIZE.
1108 * Again, an "unreachable code" warning may be ignored here.
1109 * But a "constant too large" warning means you need to fix MAX_ALLOC_CHUNK.
1110 */
1111 test_mac = (size_t)MAX_ALLOC_CHUNK;
1112 if ((long)test_mac != MAX_ALLOC_CHUNK ||
1113 (MAX_ALLOC_CHUNK % ALIGN_SIZE) != 0)
1114 ERREXIT(cinfo, JERR_BAD_ALLOC_CHUNK);
1115
1116 max_to_use = jpeg_mem_init(cinfo); /* system-dependent initialization */
1117
1118 /* Attempt to allocate memory manager's control block */
1119 mem = (my_mem_ptr)jpeg_get_small(cinfo, sizeof(my_memory_mgr));
1120
1121 if (mem == NULL) {
1122 jpeg_mem_term(cinfo); /* system-dependent cleanup */
1123 ERREXIT1(cinfo, JERR_OUT_OF_MEMORY, 0);
1124 }
1125
1126 /* OK, fill in the method pointers */
1127 mem->pub.alloc_small = alloc_small;
1128 mem->pub.alloc_large = alloc_large;
1129 mem->pub.alloc_sarray = alloc_sarray;
1130 mem->pub.alloc_barray = alloc_barray;
1131 mem->pub.request_virt_sarray = request_virt_sarray;
1132 mem->pub.request_virt_barray = request_virt_barray;
1133 mem->pub.realize_virt_arrays = realize_virt_arrays;
1134 mem->pub.access_virt_sarray = access_virt_sarray;
1135 mem->pub.access_virt_barray = access_virt_barray;
1136 mem->pub.free_pool = free_pool;
1137 mem->pub.self_destruct = self_destruct;
1138
1139 /* Make MAX_ALLOC_CHUNK accessible to other modules */
1140 mem->pub.max_alloc_chunk = MAX_ALLOC_CHUNK;
1141
1142 /* Initialize working state */
1143 mem->pub.max_memory_to_use = max_to_use;
1144
1145 for (pool = JPOOL_NUMPOOLS - 1; pool >= JPOOL_PERMANENT; pool--) {
1146 mem->small_list[pool] = NULL;
1147 mem->large_list[pool] = NULL;
1148 }
1149 mem->virt_sarray_list = NULL;
1150 mem->virt_barray_list = NULL;
1151
1152 mem->total_space_allocated = sizeof(my_memory_mgr);
1153
1154 /* Declare ourselves open for business */
1155 cinfo->mem = &mem->pub;
1156
1157 /* Check for an environment variable JPEGMEM; if found, override the
1158 * default max_memory setting from jpeg_mem_init. Note that the
1159 * surrounding application may again override this value.
1160 * If your system doesn't support getenv(), define NO_GETENV to disable
1161 * this feature.
1162 */
1163#ifndef NO_GETENV
1164 {
1165 char *memenv;
1166
1167 if ((memenv = getenv("JPEGMEM")) != NULL) {
1168 char ch = 'x';
1169
1170 if (sscanf(memenv, "%ld%c", &max_to_use, &ch) > 0) {
1171 if (ch == 'm' || ch == 'M')
1172 max_to_use *= 1000L;
1173 mem->pub.max_memory_to_use = max_to_use * 1000L;
1174 }
1175 }
1176 }
1177#endif
1178
1179}
1180