1
2/* audioopmodule - Module to detect peak values in arrays */
3
4#define PY_SSIZE_T_CLEAN
5
6#include "Python.h"
7
8static const int maxvals[] = {0, 0x7F, 0x7FFF, 0x7FFFFF, 0x7FFFFFFF};
9/* -1 trick is needed on Windows to support -0x80000000 without a warning */
10static const int minvals[] = {0, -0x80, -0x8000, -0x800000, -0x7FFFFFFF-1};
11static const unsigned int masks[] = {0, 0xFF, 0xFFFF, 0xFFFFFF, 0xFFFFFFFF};
12
13static int
14fbound(double val, double minval, double maxval)
15{
16 if (val > maxval) {
17 val = maxval;
18 }
19 else if (val < minval + 1.0) {
20 val = minval;
21 }
22
23 /* Round towards minus infinity (-inf) */
24 val = floor(val);
25
26 /* Cast double to integer: round towards zero */
27 return (int)val;
28}
29
30
31/* Code shamelessly stolen from sox, 12.17.7, g711.c
32** (c) Craig Reese, Joe Campbell and Jeff Poskanzer 1989 */
33
34/* From g711.c:
35 *
36 * December 30, 1994:
37 * Functions linear2alaw, linear2ulaw have been updated to correctly
38 * convert unquantized 16 bit values.
39 * Tables for direct u- to A-law and A- to u-law conversions have been
40 * corrected.
41 * Borge Lindberg, Center for PersonKommunikation, Aalborg University.
42 * [email protected]
43 *
44 */
45#define BIAS 0x84 /* define the add-in bias for 16 bit samples */
46#define CLIP 32635
47#define SIGN_BIT (0x80) /* Sign bit for an A-law byte. */
48#define QUANT_MASK (0xf) /* Quantization field mask. */
49#define SEG_SHIFT (4) /* Left shift for segment number. */
50#define SEG_MASK (0x70) /* Segment field mask. */
51
52static const int16_t seg_aend[8] = {
53 0x1F, 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, 0xFFF
54};
55static const int16_t seg_uend[8] = {
56 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF
57};
58
59static int16_t
60search(int16_t val, const int16_t *table, int size)
61{
62 int i;
63
64 for (i = 0; i < size; i++) {
65 if (val <= *table++)
66 return (i);
67 }
68 return (size);
69}
70#define st_ulaw2linear16(uc) (_st_ulaw2linear16[uc])
71#define st_alaw2linear16(uc) (_st_alaw2linear16[uc])
72
73static const int16_t _st_ulaw2linear16[256] = {
74 -32124, -31100, -30076, -29052, -28028, -27004, -25980,
75 -24956, -23932, -22908, -21884, -20860, -19836, -18812,
76 -17788, -16764, -15996, -15484, -14972, -14460, -13948,
77 -13436, -12924, -12412, -11900, -11388, -10876, -10364,
78 -9852, -9340, -8828, -8316, -7932, -7676, -7420,
79 -7164, -6908, -6652, -6396, -6140, -5884, -5628,
80 -5372, -5116, -4860, -4604, -4348, -4092, -3900,
81 -3772, -3644, -3516, -3388, -3260, -3132, -3004,
82 -2876, -2748, -2620, -2492, -2364, -2236, -2108,
83 -1980, -1884, -1820, -1756, -1692, -1628, -1564,
84 -1500, -1436, -1372, -1308, -1244, -1180, -1116,
85 -1052, -988, -924, -876, -844, -812, -780,
86 -748, -716, -684, -652, -620, -588, -556,
87 -524, -492, -460, -428, -396, -372, -356,
88 -340, -324, -308, -292, -276, -260, -244,
89 -228, -212, -196, -180, -164, -148, -132,
90 -120, -112, -104, -96, -88, -80, -72,
91 -64, -56, -48, -40, -32, -24, -16,
92 -8, 0, 32124, 31100, 30076, 29052, 28028,
93 27004, 25980, 24956, 23932, 22908, 21884, 20860,
94 19836, 18812, 17788, 16764, 15996, 15484, 14972,
95 14460, 13948, 13436, 12924, 12412, 11900, 11388,
96 10876, 10364, 9852, 9340, 8828, 8316, 7932,
97 7676, 7420, 7164, 6908, 6652, 6396, 6140,
98 5884, 5628, 5372, 5116, 4860, 4604, 4348,
99 4092, 3900, 3772, 3644, 3516, 3388, 3260,
100 3132, 3004, 2876, 2748, 2620, 2492, 2364,
101 2236, 2108, 1980, 1884, 1820, 1756, 1692,
102 1628, 1564, 1500, 1436, 1372, 1308, 1244,
103 1180, 1116, 1052, 988, 924, 876, 844,
104 812, 780, 748, 716, 684, 652, 620,
105 588, 556, 524, 492, 460, 428, 396,
106 372, 356, 340, 324, 308, 292, 276,
107 260, 244, 228, 212, 196, 180, 164,
108 148, 132, 120, 112, 104, 96, 88,
109 80, 72, 64, 56, 48, 40, 32,
110 24, 16, 8, 0
111};
112
113/*
114 * linear2ulaw() accepts a 14-bit signed integer and encodes it as u-law data
115 * stored in an unsigned char. This function should only be called with
116 * the data shifted such that it only contains information in the lower
117 * 14-bits.
118 *
119 * In order to simplify the encoding process, the original linear magnitude
120 * is biased by adding 33 which shifts the encoding range from (0 - 8158) to
121 * (33 - 8191). The result can be seen in the following encoding table:
122 *
123 * Biased Linear Input Code Compressed Code
124 * ------------------------ ---------------
125 * 00000001wxyza 000wxyz
126 * 0000001wxyzab 001wxyz
127 * 000001wxyzabc 010wxyz
128 * 00001wxyzabcd 011wxyz
129 * 0001wxyzabcde 100wxyz
130 * 001wxyzabcdef 101wxyz
131 * 01wxyzabcdefg 110wxyz
132 * 1wxyzabcdefgh 111wxyz
133 *
134 * Each biased linear code has a leading 1 which identifies the segment
135 * number. The value of the segment number is equal to 7 minus the number
136 * of leading 0's. The quantization interval is directly available as the
137 * four bits wxyz. * The trailing bits (a - h) are ignored.
138 *
139 * Ordinarily the complement of the resulting code word is used for
140 * transmission, and so the code word is complemented before it is returned.
141 *
142 * For further information see John C. Bellamy's Digital Telephony, 1982,
143 * John Wiley & Sons, pps 98-111 and 472-476.
144 */
145static unsigned char
146st_14linear2ulaw(int16_t pcm_val) /* 2's complement (14-bit range) */
147{
148 int16_t mask;
149 int16_t seg;
150 unsigned char uval;
151
152 /* u-law inverts all bits */
153 /* Get the sign and the magnitude of the value. */
154 if (pcm_val < 0) {
155 pcm_val = -pcm_val;
156 mask = 0x7F;
157 } else {
158 mask = 0xFF;
159 }
160 if ( pcm_val > CLIP ) pcm_val = CLIP; /* clip the magnitude */
161 pcm_val += (BIAS >> 2);
162
163 /* Convert the scaled magnitude to segment number. */
164 seg = search(pcm_val, seg_uend, 8);
165
166 /*
167 * Combine the sign, segment, quantization bits;
168 * and complement the code word.
169 */
170 if (seg >= 8) /* out of range, return maximum value. */
171 return (unsigned char) (0x7F ^ mask);
172 else {
173 uval = (unsigned char) (seg << 4) | ((pcm_val >> (seg + 1)) & 0xF);
174 return (uval ^ mask);
175 }
176
177}
178
179static const int16_t _st_alaw2linear16[256] = {
180 -5504, -5248, -6016, -5760, -4480, -4224, -4992,
181 -4736, -7552, -7296, -8064, -7808, -6528, -6272,
182 -7040, -6784, -2752, -2624, -3008, -2880, -2240,
183 -2112, -2496, -2368, -3776, -3648, -4032, -3904,
184 -3264, -3136, -3520, -3392, -22016, -20992, -24064,
185 -23040, -17920, -16896, -19968, -18944, -30208, -29184,
186 -32256, -31232, -26112, -25088, -28160, -27136, -11008,
187 -10496, -12032, -11520, -8960, -8448, -9984, -9472,
188 -15104, -14592, -16128, -15616, -13056, -12544, -14080,
189 -13568, -344, -328, -376, -360, -280, -264,
190 -312, -296, -472, -456, -504, -488, -408,
191 -392, -440, -424, -88, -72, -120, -104,
192 -24, -8, -56, -40, -216, -200, -248,
193 -232, -152, -136, -184, -168, -1376, -1312,
194 -1504, -1440, -1120, -1056, -1248, -1184, -1888,
195 -1824, -2016, -1952, -1632, -1568, -1760, -1696,
196 -688, -656, -752, -720, -560, -528, -624,
197 -592, -944, -912, -1008, -976, -816, -784,
198 -880, -848, 5504, 5248, 6016, 5760, 4480,
199 4224, 4992, 4736, 7552, 7296, 8064, 7808,
200 6528, 6272, 7040, 6784, 2752, 2624, 3008,
201 2880, 2240, 2112, 2496, 2368, 3776, 3648,
202 4032, 3904, 3264, 3136, 3520, 3392, 22016,
203 20992, 24064, 23040, 17920, 16896, 19968, 18944,
204 30208, 29184, 32256, 31232, 26112, 25088, 28160,
205 27136, 11008, 10496, 12032, 11520, 8960, 8448,
206 9984, 9472, 15104, 14592, 16128, 15616, 13056,
207 12544, 14080, 13568, 344, 328, 376, 360,
208 280, 264, 312, 296, 472, 456, 504,
209 488, 408, 392, 440, 424, 88, 72,
210 120, 104, 24, 8, 56, 40, 216,
211 200, 248, 232, 152, 136, 184, 168,
212 1376, 1312, 1504, 1440, 1120, 1056, 1248,
213 1184, 1888, 1824, 2016, 1952, 1632, 1568,
214 1760, 1696, 688, 656, 752, 720, 560,
215 528, 624, 592, 944, 912, 1008, 976,
216 816, 784, 880, 848
217};
218
219/*
220 * linear2alaw() accepts a 13-bit signed integer and encodes it as A-law data
221 * stored in an unsigned char. This function should only be called with
222 * the data shifted such that it only contains information in the lower
223 * 13-bits.
224 *
225 * Linear Input Code Compressed Code
226 * ------------------------ ---------------
227 * 0000000wxyza 000wxyz
228 * 0000001wxyza 001wxyz
229 * 000001wxyzab 010wxyz
230 * 00001wxyzabc 011wxyz
231 * 0001wxyzabcd 100wxyz
232 * 001wxyzabcde 101wxyz
233 * 01wxyzabcdef 110wxyz
234 * 1wxyzabcdefg 111wxyz
235 *
236 * For further information see John C. Bellamy's Digital Telephony, 1982,
237 * John Wiley & Sons, pps 98-111 and 472-476.
238 */
239static unsigned char
240st_linear2alaw(int16_t pcm_val) /* 2's complement (13-bit range) */
241{
242 int16_t mask;
243 int16_t seg;
244 unsigned char aval;
245
246 /* A-law using even bit inversion */
247 if (pcm_val >= 0) {
248 mask = 0xD5; /* sign (7th) bit = 1 */
249 } else {
250 mask = 0x55; /* sign bit = 0 */
251 pcm_val = -pcm_val - 1;
252 }
253
254 /* Convert the scaled magnitude to segment number. */
255 seg = search(pcm_val, seg_aend, 8);
256
257 /* Combine the sign, segment, and quantization bits. */
258
259 if (seg >= 8) /* out of range, return maximum value. */
260 return (unsigned char) (0x7F ^ mask);
261 else {
262 aval = (unsigned char) seg << SEG_SHIFT;
263 if (seg < 2)
264 aval |= (pcm_val >> 1) & QUANT_MASK;
265 else
266 aval |= (pcm_val >> seg) & QUANT_MASK;
267 return (aval ^ mask);
268 }
269}
270/* End of code taken from sox */
271
272/* Intel ADPCM step variation table */
273static const int indexTable[16] = {
274 -1, -1, -1, -1, 2, 4, 6, 8,
275 -1, -1, -1, -1, 2, 4, 6, 8,
276};
277
278static const int stepsizeTable[89] = {
279 7, 8, 9, 10, 11, 12, 13, 14, 16, 17,
280 19, 21, 23, 25, 28, 31, 34, 37, 41, 45,
281 50, 55, 60, 66, 73, 80, 88, 97, 107, 118,
282 130, 143, 157, 173, 190, 209, 230, 253, 279, 307,
283 337, 371, 408, 449, 494, 544, 598, 658, 724, 796,
284 876, 963, 1060, 1166, 1282, 1411, 1552, 1707, 1878, 2066,
285 2272, 2499, 2749, 3024, 3327, 3660, 4026, 4428, 4871, 5358,
286 5894, 6484, 7132, 7845, 8630, 9493, 10442, 11487, 12635, 13899,
287 15289, 16818, 18500, 20350, 22385, 24623, 27086, 29794, 32767
288};
289
290#define GETINTX(T, cp, i) (*(T *)((unsigned char *)(cp) + (i)))
291#define SETINTX(T, cp, i, val) do { \
292 *(T *)((unsigned char *)(cp) + (i)) = (T)(val); \
293 } while (0)
294
295
296#define GETINT8(cp, i) GETINTX(signed char, (cp), (i))
297#define GETINT16(cp, i) GETINTX(int16_t, (cp), (i))
298#define GETINT32(cp, i) GETINTX(int32_t, (cp), (i))
299
300#if WORDS_BIGENDIAN
301#define GETINT24(cp, i) ( \
302 ((unsigned char *)(cp) + (i))[2] + \
303 (((unsigned char *)(cp) + (i))[1] << 8) + \
304 (((signed char *)(cp) + (i))[0] << 16) )
305#else
306#define GETINT24(cp, i) ( \
307 ((unsigned char *)(cp) + (i))[0] + \
308 (((unsigned char *)(cp) + (i))[1] << 8) + \
309 (((signed char *)(cp) + (i))[2] << 16) )
310#endif
311
312
313#define SETINT8(cp, i, val) SETINTX(signed char, (cp), (i), (val))
314#define SETINT16(cp, i, val) SETINTX(int16_t, (cp), (i), (val))
315#define SETINT32(cp, i, val) SETINTX(int32_t, (cp), (i), (val))
316
317#if WORDS_BIGENDIAN
318#define SETINT24(cp, i, val) do { \
319 ((unsigned char *)(cp) + (i))[2] = (int)(val); \
320 ((unsigned char *)(cp) + (i))[1] = (int)(val) >> 8; \
321 ((signed char *)(cp) + (i))[0] = (int)(val) >> 16; \
322 } while (0)
323#else
324#define SETINT24(cp, i, val) do { \
325 ((unsigned char *)(cp) + (i))[0] = (int)(val); \
326 ((unsigned char *)(cp) + (i))[1] = (int)(val) >> 8; \
327 ((signed char *)(cp) + (i))[2] = (int)(val) >> 16; \
328 } while (0)
329#endif
330
331
332#define GETRAWSAMPLE(size, cp, i) ( \
333 (size == 1) ? (int)GETINT8((cp), (i)) : \
334 (size == 2) ? (int)GETINT16((cp), (i)) : \
335 (size == 3) ? (int)GETINT24((cp), (i)) : \
336 (int)GETINT32((cp), (i)))
337
338#define SETRAWSAMPLE(size, cp, i, val) do { \
339 if (size == 1) \
340 SETINT8((cp), (i), (val)); \
341 else if (size == 2) \
342 SETINT16((cp), (i), (val)); \
343 else if (size == 3) \
344 SETINT24((cp), (i), (val)); \
345 else \
346 SETINT32((cp), (i), (val)); \
347 } while(0)
348
349
350#define GETSAMPLE32(size, cp, i) ( \
351 (size == 1) ? (int)GETINT8((cp), (i)) << 24 : \
352 (size == 2) ? (int)GETINT16((cp), (i)) << 16 : \
353 (size == 3) ? (int)GETINT24((cp), (i)) << 8 : \
354 (int)GETINT32((cp), (i)))
355
356#define SETSAMPLE32(size, cp, i, val) do { \
357 if (size == 1) \
358 SETINT8((cp), (i), (val) >> 24); \
359 else if (size == 2) \
360 SETINT16((cp), (i), (val) >> 16); \
361 else if (size == 3) \
362 SETINT24((cp), (i), (val) >> 8); \
363 else \
364 SETINT32((cp), (i), (val)); \
365 } while(0)
366
367static PyModuleDef audioopmodule;
368
369typedef struct {
370 PyObject *AudioopError;
371} audioop_state;
372
373static inline audioop_state *
374get_audioop_state(PyObject *module)
375{
376 void *state = PyModule_GetState(module);
377 assert(state != NULL);
378 return (audioop_state *)state;
379}
380
381static int
382audioop_check_size(PyObject *module, int size)
383{
384 if (size < 1 || size > 4) {
385 PyErr_SetString(get_audioop_state(module)->AudioopError,
386 "Size should be 1, 2, 3 or 4");
387 return 0;
388 }
389 else
390 return 1;
391}
392
393static int
394audioop_check_parameters(PyObject *module, Py_ssize_t len, int size)
395{
396 if (!audioop_check_size(module, size))
397 return 0;
398 if (len % size != 0) {
399 PyErr_SetString(get_audioop_state(module)->AudioopError,
400 "not a whole number of frames");
401 return 0;
402 }
403 return 1;
404}
405
406/*[clinic input]
407module audioop
408[clinic start generated code]*/
409/*[clinic end generated code: output=da39a3ee5e6b4b0d input=8fa8f6611be3591a]*/
410
411/*[clinic input]
412audioop.getsample
413
414 fragment: Py_buffer
415 width: int
416 index: Py_ssize_t
417 /
418
419Return the value of sample index from the fragment.
420[clinic start generated code]*/
421
422static PyObject *
423audioop_getsample_impl(PyObject *module, Py_buffer *fragment, int width,
424 Py_ssize_t index)
425/*[clinic end generated code: output=8fe1b1775134f39a input=88edbe2871393549]*/
426{
427 int val;
428
429 if (!audioop_check_parameters(module, fragment->len, width))
430 return NULL;
431 if (index < 0 || index >= fragment->len/width) {
432 PyErr_SetString(get_audioop_state(module)->AudioopError,
433 "Index out of range");
434 return NULL;
435 }
436 val = GETRAWSAMPLE(width, fragment->buf, index*width);
437 return PyLong_FromLong(val);
438}
439
440/*[clinic input]
441audioop.max
442
443 fragment: Py_buffer
444 width: int
445 /
446
447Return the maximum of the absolute value of all samples in a fragment.
448[clinic start generated code]*/
449
450static PyObject *
451audioop_max_impl(PyObject *module, Py_buffer *fragment, int width)
452/*[clinic end generated code: output=e6c5952714f1c3f0 input=32bea5ea0ac8c223]*/
453{
454 Py_ssize_t i;
455 unsigned int absval, max = 0;
456
457 if (!audioop_check_parameters(module, fragment->len, width))
458 return NULL;
459 for (i = 0; i < fragment->len; i += width) {
460 int val = GETRAWSAMPLE(width, fragment->buf, i);
461 /* Cast to unsigned before negating. Unsigned overflow is well-
462 defined, but signed overflow is not. */
463 if (val < 0) absval = (unsigned int)-(int64_t)val;
464 else absval = val;
465 if (absval > max) max = absval;
466 }
467 return PyLong_FromUnsignedLong(max);
468}
469
470/*[clinic input]
471audioop.minmax
472
473 fragment: Py_buffer
474 width: int
475 /
476
477Return the minimum and maximum values of all samples in the sound fragment.
478[clinic start generated code]*/
479
480static PyObject *
481audioop_minmax_impl(PyObject *module, Py_buffer *fragment, int width)
482/*[clinic end generated code: output=473fda66b15c836e input=89848e9b927a0696]*/
483{
484 Py_ssize_t i;
485 /* -1 trick below is needed on Windows to support -0x80000000 without
486 a warning */
487 int min = 0x7fffffff, max = -0x7FFFFFFF-1;
488
489 if (!audioop_check_parameters(module, fragment->len, width))
490 return NULL;
491 for (i = 0; i < fragment->len; i += width) {
492 int val = GETRAWSAMPLE(width, fragment->buf, i);
493 if (val > max) max = val;
494 if (val < min) min = val;
495 }
496 return Py_BuildValue("(ii)", min, max);
497}
498
499/*[clinic input]
500audioop.avg
501
502 fragment: Py_buffer
503 width: int
504 /
505
506Return the average over all samples in the fragment.
507[clinic start generated code]*/
508
509static PyObject *
510audioop_avg_impl(PyObject *module, Py_buffer *fragment, int width)
511/*[clinic end generated code: output=4410a4c12c3586e6 input=1114493c7611334d]*/
512{
513 Py_ssize_t i;
514 int avg;
515 double sum = 0.0;
516
517 if (!audioop_check_parameters(module, fragment->len, width))
518 return NULL;
519 for (i = 0; i < fragment->len; i += width)
520 sum += GETRAWSAMPLE(width, fragment->buf, i);
521 if (fragment->len == 0)
522 avg = 0;
523 else
524 avg = (int)floor(sum / (double)(fragment->len/width));
525 return PyLong_FromLong(avg);
526}
527
528/*[clinic input]
529audioop.rms
530
531 fragment: Py_buffer
532 width: int
533 /
534
535Return the root-mean-square of the fragment, i.e. sqrt(sum(S_i^2)/n).
536[clinic start generated code]*/
537
538static PyObject *
539audioop_rms_impl(PyObject *module, Py_buffer *fragment, int width)
540/*[clinic end generated code: output=1e7871c826445698 input=4cc57c6c94219d78]*/
541{
542 Py_ssize_t i;
543 unsigned int res;
544 double sum_squares = 0.0;
545
546 if (!audioop_check_parameters(module, fragment->len, width))
547 return NULL;
548 for (i = 0; i < fragment->len; i += width) {
549 double val = GETRAWSAMPLE(width, fragment->buf, i);
550 sum_squares += val*val;
551 }
552 if (fragment->len == 0)
553 res = 0;
554 else
555 res = (unsigned int)sqrt(sum_squares / (double)(fragment->len/width));
556 return PyLong_FromUnsignedLong(res);
557}
558
559static double _sum2(const int16_t *a, const int16_t *b, Py_ssize_t len)
560{
561 Py_ssize_t i;
562 double sum = 0.0;
563
564 for( i=0; i<len; i++) {
565 sum = sum + (double)a[i]*(double)b[i];
566 }
567 return sum;
568}
569
570/*
571** Findfit tries to locate a sample within another sample. Its main use
572** is in echo-cancellation (to find the feedback of the output signal in
573** the input signal).
574** The method used is as follows:
575**
576** let R be the reference signal (length n) and A the input signal (length N)
577** with N > n, and let all sums be over i from 0 to n-1.
578**
579** Now, for each j in {0..N-n} we compute a factor fj so that -fj*R matches A
580** as good as possible, i.e. sum( (A[j+i]+fj*R[i])^2 ) is minimal. This
581** equation gives fj = sum( A[j+i]R[i] ) / sum(R[i]^2).
582**
583** Next, we compute the relative distance between the original signal and
584** the modified signal and minimize that over j:
585** vj = sum( (A[j+i]-fj*R[i])^2 ) / sum( A[j+i]^2 ) =>
586** vj = ( sum(A[j+i]^2)*sum(R[i]^2) - sum(A[j+i]R[i])^2 ) / sum( A[j+i]^2 )
587**
588** In the code variables correspond as follows:
589** cp1 A
590** cp2 R
591** len1 N
592** len2 n
593** aj_m1 A[j-1]
594** aj_lm1 A[j+n-1]
595** sum_ri_2 sum(R[i]^2)
596** sum_aij_2 sum(A[i+j]^2)
597** sum_aij_ri sum(A[i+j]R[i])
598**
599** sum_ri is calculated once, sum_aij_2 is updated each step and sum_aij_ri
600** is completely recalculated each step.
601*/
602/*[clinic input]
603audioop.findfit
604
605 fragment: Py_buffer
606 reference: Py_buffer
607 /
608
609Try to match reference as well as possible to a portion of fragment.
610[clinic start generated code]*/
611
612static PyObject *
613audioop_findfit_impl(PyObject *module, Py_buffer *fragment,
614 Py_buffer *reference)
615/*[clinic end generated code: output=5752306d83cbbada input=62c305605e183c9a]*/
616{
617 const int16_t *cp1, *cp2;
618 Py_ssize_t len1, len2;
619 Py_ssize_t j, best_j;
620 double aj_m1, aj_lm1;
621 double sum_ri_2, sum_aij_2, sum_aij_ri, result, best_result, factor;
622
623 if (fragment->len & 1 || reference->len & 1) {
624 PyErr_SetString(get_audioop_state(module)->AudioopError,
625 "Strings should be even-sized");
626 return NULL;
627 }
628 cp1 = (const int16_t *)fragment->buf;
629 len1 = fragment->len >> 1;
630 cp2 = (const int16_t *)reference->buf;
631 len2 = reference->len >> 1;
632
633 if (len1 < len2) {
634 PyErr_SetString(get_audioop_state(module)->AudioopError,
635 "First sample should be longer");
636 return NULL;
637 }
638 sum_ri_2 = _sum2(cp2, cp2, len2);
639 sum_aij_2 = _sum2(cp1, cp1, len2);
640 sum_aij_ri = _sum2(cp1, cp2, len2);
641
642 result = (sum_ri_2*sum_aij_2 - sum_aij_ri*sum_aij_ri) / sum_aij_2;
643
644 best_result = result;
645 best_j = 0;
646
647 for ( j=1; j<=len1-len2; j++) {
648 aj_m1 = (double)cp1[j-1];
649 aj_lm1 = (double)cp1[j+len2-1];
650
651 sum_aij_2 = sum_aij_2 + aj_lm1*aj_lm1 - aj_m1*aj_m1;
652 sum_aij_ri = _sum2(cp1+j, cp2, len2);
653
654 result = (sum_ri_2*sum_aij_2 - sum_aij_ri*sum_aij_ri)
655 / sum_aij_2;
656
657 if ( result < best_result ) {
658 best_result = result;
659 best_j = j;
660 }
661
662 }
663
664 factor = _sum2(cp1+best_j, cp2, len2) / sum_ri_2;
665
666 return Py_BuildValue("(nf)", best_j, factor);
667}
668
669/*
670** findfactor finds a factor f so that the energy in A-fB is minimal.
671** See the comment for findfit for details.
672*/
673/*[clinic input]
674audioop.findfactor
675
676 fragment: Py_buffer
677 reference: Py_buffer
678 /
679
680Return a factor F such that rms(add(fragment, mul(reference, -F))) is minimal.
681[clinic start generated code]*/
682
683static PyObject *
684audioop_findfactor_impl(PyObject *module, Py_buffer *fragment,
685 Py_buffer *reference)
686/*[clinic end generated code: output=14ea95652c1afcf8 input=816680301d012b21]*/
687{
688 const int16_t *cp1, *cp2;
689 Py_ssize_t len;
690 double sum_ri_2, sum_aij_ri, result;
691
692 if (fragment->len & 1 || reference->len & 1) {
693 PyErr_SetString(get_audioop_state(module)->AudioopError,
694 "Strings should be even-sized");
695 return NULL;
696 }
697 if (fragment->len != reference->len) {
698 PyErr_SetString(get_audioop_state(module)->AudioopError,
699 "Samples should be same size");
700 return NULL;
701 }
702 cp1 = (const int16_t *)fragment->buf;
703 cp2 = (const int16_t *)reference->buf;
704 len = fragment->len >> 1;
705 sum_ri_2 = _sum2(cp2, cp2, len);
706 sum_aij_ri = _sum2(cp1, cp2, len);
707
708 result = sum_aij_ri / sum_ri_2;
709
710 return PyFloat_FromDouble(result);
711}
712
713/*
714** findmax returns the index of the n-sized segment of the input sample
715** that contains the most energy.
716*/
717/*[clinic input]
718audioop.findmax
719
720 fragment: Py_buffer
721 length: Py_ssize_t
722 /
723
724Search fragment for a slice of specified number of samples with maximum energy.
725[clinic start generated code]*/
726
727static PyObject *
728audioop_findmax_impl(PyObject *module, Py_buffer *fragment,
729 Py_ssize_t length)
730/*[clinic end generated code: output=f008128233523040 input=2f304801ed42383c]*/
731{
732 const int16_t *cp1;
733 Py_ssize_t len1;
734 Py_ssize_t j, best_j;
735 double aj_m1, aj_lm1;
736 double result, best_result;
737
738 if (fragment->len & 1) {
739 PyErr_SetString(get_audioop_state(module)->AudioopError,
740 "Strings should be even-sized");
741 return NULL;
742 }
743 cp1 = (const int16_t *)fragment->buf;
744 len1 = fragment->len >> 1;
745
746 if (length < 0 || len1 < length) {
747 PyErr_SetString(get_audioop_state(module)->AudioopError,
748 "Input sample should be longer");
749 return NULL;
750 }
751
752 result = _sum2(cp1, cp1, length);
753
754 best_result = result;
755 best_j = 0;
756
757 for ( j=1; j<=len1-length; j++) {
758 aj_m1 = (double)cp1[j-1];
759 aj_lm1 = (double)cp1[j+length-1];
760
761 result = result + aj_lm1*aj_lm1 - aj_m1*aj_m1;
762
763 if ( result > best_result ) {
764 best_result = result;
765 best_j = j;
766 }
767
768 }
769
770 return PyLong_FromSsize_t(best_j);
771}
772
773/*[clinic input]
774audioop.avgpp
775
776 fragment: Py_buffer
777 width: int
778 /
779
780Return the average peak-peak value over all samples in the fragment.
781[clinic start generated code]*/
782
783static PyObject *
784audioop_avgpp_impl(PyObject *module, Py_buffer *fragment, int width)
785/*[clinic end generated code: output=269596b0d5ae0b2b input=0b3cceeae420a7d9]*/
786{
787 Py_ssize_t i;
788 int prevval, prevextremevalid = 0, prevextreme = 0;
789 double sum = 0.0;
790 unsigned int avg;
791 int diff, prevdiff, nextreme = 0;
792
793 if (!audioop_check_parameters(module, fragment->len, width))
794 return NULL;
795 if (fragment->len <= width)
796 return PyLong_FromLong(0);
797 prevval = GETRAWSAMPLE(width, fragment->buf, 0);
798 prevdiff = 17; /* Anything != 0, 1 */
799 for (i = width; i < fragment->len; i += width) {
800 int val = GETRAWSAMPLE(width, fragment->buf, i);
801 if (val != prevval) {
802 diff = val < prevval;
803 if (prevdiff == !diff) {
804 /* Derivative changed sign. Compute difference to last
805 ** extreme value and remember.
806 */
807 if (prevextremevalid) {
808 if (prevval < prevextreme)
809 sum += (double)((unsigned int)prevextreme -
810 (unsigned int)prevval);
811 else
812 sum += (double)((unsigned int)prevval -
813 (unsigned int)prevextreme);
814 nextreme++;
815 }
816 prevextremevalid = 1;
817 prevextreme = prevval;
818 }
819 prevval = val;
820 prevdiff = diff;
821 }
822 }
823 if ( nextreme == 0 )
824 avg = 0;
825 else
826 avg = (unsigned int)(sum / (double)nextreme);
827 return PyLong_FromUnsignedLong(avg);
828}
829
830/*[clinic input]
831audioop.maxpp
832
833 fragment: Py_buffer
834 width: int
835 /
836
837Return the maximum peak-peak value in the sound fragment.
838[clinic start generated code]*/
839
840static PyObject *
841audioop_maxpp_impl(PyObject *module, Py_buffer *fragment, int width)
842/*[clinic end generated code: output=5b918ed5dbbdb978 input=671a13e1518f80a1]*/
843{
844 Py_ssize_t i;
845 int prevval, prevextremevalid = 0, prevextreme = 0;
846 unsigned int max = 0, extremediff;
847 int diff, prevdiff;
848
849 if (!audioop_check_parameters(module, fragment->len, width))
850 return NULL;
851 if (fragment->len <= width)
852 return PyLong_FromLong(0);
853 prevval = GETRAWSAMPLE(width, fragment->buf, 0);
854 prevdiff = 17; /* Anything != 0, 1 */
855 for (i = width; i < fragment->len; i += width) {
856 int val = GETRAWSAMPLE(width, fragment->buf, i);
857 if (val != prevval) {
858 diff = val < prevval;
859 if (prevdiff == !diff) {
860 /* Derivative changed sign. Compute difference to
861 ** last extreme value and remember.
862 */
863 if (prevextremevalid) {
864 if (prevval < prevextreme)
865 extremediff = (unsigned int)prevextreme -
866 (unsigned int)prevval;
867 else
868 extremediff = (unsigned int)prevval -
869 (unsigned int)prevextreme;
870 if ( extremediff > max )
871 max = extremediff;
872 }
873 prevextremevalid = 1;
874 prevextreme = prevval;
875 }
876 prevval = val;
877 prevdiff = diff;
878 }
879 }
880 return PyLong_FromUnsignedLong(max);
881}
882
883/*[clinic input]
884audioop.cross
885
886 fragment: Py_buffer
887 width: int
888 /
889
890Return the number of zero crossings in the fragment passed as an argument.
891[clinic start generated code]*/
892
893static PyObject *
894audioop_cross_impl(PyObject *module, Py_buffer *fragment, int width)
895/*[clinic end generated code: output=5938dcdd74a1f431 input=b1b3f15b83f6b41a]*/
896{
897 Py_ssize_t i;
898 int prevval;
899 Py_ssize_t ncross;
900
901 if (!audioop_check_parameters(module, fragment->len, width))
902 return NULL;
903 ncross = -1;
904 prevval = 17; /* Anything <> 0,1 */
905 for (i = 0; i < fragment->len; i += width) {
906 int val = GETRAWSAMPLE(width, fragment->buf, i) < 0;
907 if (val != prevval) ncross++;
908 prevval = val;
909 }
910 return PyLong_FromSsize_t(ncross);
911}
912
913/*[clinic input]
914audioop.mul
915
916 fragment: Py_buffer
917 width: int
918 factor: double
919 /
920
921Return a fragment that has all samples in the original fragment multiplied by the floating-point value factor.
922[clinic start generated code]*/
923
924static PyObject *
925audioop_mul_impl(PyObject *module, Py_buffer *fragment, int width,
926 double factor)
927/*[clinic end generated code: output=6cd48fe796da0ea4 input=c726667baa157d3c]*/
928{
929 signed char *ncp;
930 Py_ssize_t i;
931 double maxval, minval;
932 PyObject *rv;
933
934 if (!audioop_check_parameters(module, fragment->len, width))
935 return NULL;
936
937 maxval = (double) maxvals[width];
938 minval = (double) minvals[width];
939
940 rv = PyBytes_FromStringAndSize(NULL, fragment->len);
941 if (rv == NULL)
942 return NULL;
943 ncp = (signed char *)PyBytes_AsString(rv);
944
945 for (i = 0; i < fragment->len; i += width) {
946 double val = GETRAWSAMPLE(width, fragment->buf, i);
947 int ival = fbound(val * factor, minval, maxval);
948 SETRAWSAMPLE(width, ncp, i, ival);
949 }
950 return rv;
951}
952
953/*[clinic input]
954audioop.tomono
955
956 fragment: Py_buffer
957 width: int
958 lfactor: double
959 rfactor: double
960 /
961
962Convert a stereo fragment to a mono fragment.
963[clinic start generated code]*/
964
965static PyObject *
966audioop_tomono_impl(PyObject *module, Py_buffer *fragment, int width,
967 double lfactor, double rfactor)
968/*[clinic end generated code: output=235c8277216d4e4e input=c4ec949b3f4dddfa]*/
969{
970 signed char *cp, *ncp;
971 Py_ssize_t len, i;
972 double maxval, minval;
973 PyObject *rv;
974
975 cp = fragment->buf;
976 len = fragment->len;
977 if (!audioop_check_parameters(module, len, width))
978 return NULL;
979 if (((len / width) & 1) != 0) {
980 PyErr_SetString(get_audioop_state(module)->AudioopError,
981 "not a whole number of frames");
982 return NULL;
983 }
984
985 maxval = (double) maxvals[width];
986 minval = (double) minvals[width];
987
988 rv = PyBytes_FromStringAndSize(NULL, len/2);
989 if (rv == NULL)
990 return NULL;
991 ncp = (signed char *)PyBytes_AsString(rv);
992
993 for (i = 0; i < len; i += width*2) {
994 double val1 = GETRAWSAMPLE(width, cp, i);
995 double val2 = GETRAWSAMPLE(width, cp, i + width);
996 double val = val1 * lfactor + val2 * rfactor;
997 int ival = fbound(val, minval, maxval);
998 SETRAWSAMPLE(width, ncp, i/2, ival);
999 }
1000 return rv;
1001}
1002
1003/*[clinic input]
1004audioop.tostereo
1005
1006 fragment: Py_buffer
1007 width: int
1008 lfactor: double
1009 rfactor: double
1010 /
1011
1012Generate a stereo fragment from a mono fragment.
1013[clinic start generated code]*/
1014
1015static PyObject *
1016audioop_tostereo_impl(PyObject *module, Py_buffer *fragment, int width,
1017 double lfactor, double rfactor)
1018/*[clinic end generated code: output=046f13defa5f1595 input=27b6395ebfdff37a]*/
1019{
1020 signed char *ncp;
1021 Py_ssize_t i;
1022 double maxval, minval;
1023 PyObject *rv;
1024
1025 if (!audioop_check_parameters(module, fragment->len, width))
1026 return NULL;
1027
1028 maxval = (double) maxvals[width];
1029 minval = (double) minvals[width];
1030
1031 if (fragment->len > PY_SSIZE_T_MAX/2) {
1032 PyErr_SetString(PyExc_MemoryError,
1033 "not enough memory for output buffer");
1034 return NULL;
1035 }
1036
1037 rv = PyBytes_FromStringAndSize(NULL, fragment->len*2);
1038 if (rv == NULL)
1039 return NULL;
1040 ncp = (signed char *)PyBytes_AsString(rv);
1041
1042 for (i = 0; i < fragment->len; i += width) {
1043 double val = GETRAWSAMPLE(width, fragment->buf, i);
1044 int val1 = fbound(val * lfactor, minval, maxval);
1045 int val2 = fbound(val * rfactor, minval, maxval);
1046 SETRAWSAMPLE(width, ncp, i*2, val1);
1047 SETRAWSAMPLE(width, ncp, i*2 + width, val2);
1048 }
1049 return rv;
1050}
1051
1052/*[clinic input]
1053audioop.add
1054
1055 fragment1: Py_buffer
1056 fragment2: Py_buffer
1057 width: int
1058 /
1059
1060Return a fragment which is the addition of the two samples passed as parameters.
1061[clinic start generated code]*/
1062
1063static PyObject *
1064audioop_add_impl(PyObject *module, Py_buffer *fragment1,
1065 Py_buffer *fragment2, int width)
1066/*[clinic end generated code: output=60140af4d1aab6f2 input=4a8d4bae4c1605c7]*/
1067{
1068 signed char *ncp;
1069 Py_ssize_t i;
1070 int minval, maxval, newval;
1071 PyObject *rv;
1072
1073 if (!audioop_check_parameters(module, fragment1->len, width))
1074 return NULL;
1075 if (fragment1->len != fragment2->len) {
1076 PyErr_SetString(get_audioop_state(module)->AudioopError,
1077 "Lengths should be the same");
1078 return NULL;
1079 }
1080
1081 maxval = maxvals[width];
1082 minval = minvals[width];
1083
1084 rv = PyBytes_FromStringAndSize(NULL, fragment1->len);
1085 if (rv == NULL)
1086 return NULL;
1087 ncp = (signed char *)PyBytes_AsString(rv);
1088
1089 for (i = 0; i < fragment1->len; i += width) {
1090 int val1 = GETRAWSAMPLE(width, fragment1->buf, i);
1091 int val2 = GETRAWSAMPLE(width, fragment2->buf, i);
1092
1093 if (width < 4) {
1094 newval = val1 + val2;
1095 /* truncate in case of overflow */
1096 if (newval > maxval)
1097 newval = maxval;
1098 else if (newval < minval)
1099 newval = minval;
1100 }
1101 else {
1102 double fval = (double)val1 + (double)val2;
1103 /* truncate in case of overflow */
1104 newval = fbound(fval, minval, maxval);
1105 }
1106
1107 SETRAWSAMPLE(width, ncp, i, newval);
1108 }
1109 return rv;
1110}
1111
1112/*[clinic input]
1113audioop.bias
1114
1115 fragment: Py_buffer
1116 width: int
1117 bias: int
1118 /
1119
1120Return a fragment that is the original fragment with a bias added to each sample.
1121[clinic start generated code]*/
1122
1123static PyObject *
1124audioop_bias_impl(PyObject *module, Py_buffer *fragment, int width, int bias)
1125/*[clinic end generated code: output=6e0aa8f68f045093 input=2b5cce5c3bb4838c]*/
1126{
1127 signed char *ncp;
1128 Py_ssize_t i;
1129 unsigned int val = 0, mask;
1130 PyObject *rv;
1131
1132 if (!audioop_check_parameters(module, fragment->len, width))
1133 return NULL;
1134
1135 rv = PyBytes_FromStringAndSize(NULL, fragment->len);
1136 if (rv == NULL)
1137 return NULL;
1138 ncp = (signed char *)PyBytes_AsString(rv);
1139
1140 mask = masks[width];
1141
1142 for (i = 0; i < fragment->len; i += width) {
1143 if (width == 1)
1144 val = GETINTX(unsigned char, fragment->buf, i);
1145 else if (width == 2)
1146 val = GETINTX(uint16_t, fragment->buf, i);
1147 else if (width == 3)
1148 val = ((unsigned int)GETINT24(fragment->buf, i)) & 0xffffffu;
1149 else {
1150 assert(width == 4);
1151 val = GETINTX(uint32_t, fragment->buf, i);
1152 }
1153
1154 val += (unsigned int)bias;
1155 /* wrap around in case of overflow */
1156 val &= mask;
1157
1158 if (width == 1)
1159 SETINTX(unsigned char, ncp, i, val);
1160 else if (width == 2)
1161 SETINTX(uint16_t, ncp, i, val);
1162 else if (width == 3)
1163 SETINT24(ncp, i, (int)val);
1164 else {
1165 assert(width == 4);
1166 SETINTX(uint32_t, ncp, i, val);
1167 }
1168 }
1169 return rv;
1170}
1171
1172/*[clinic input]
1173audioop.reverse
1174
1175 fragment: Py_buffer
1176 width: int
1177 /
1178
1179Reverse the samples in a fragment and returns the modified fragment.
1180[clinic start generated code]*/
1181
1182static PyObject *
1183audioop_reverse_impl(PyObject *module, Py_buffer *fragment, int width)
1184/*[clinic end generated code: output=b44135698418da14 input=668f890cf9f9d225]*/
1185{
1186 unsigned char *ncp;
1187 Py_ssize_t i;
1188 PyObject *rv;
1189
1190 if (!audioop_check_parameters(module, fragment->len, width))
1191 return NULL;
1192
1193 rv = PyBytes_FromStringAndSize(NULL, fragment->len);
1194 if (rv == NULL)
1195 return NULL;
1196 ncp = (unsigned char *)PyBytes_AsString(rv);
1197
1198 for (i = 0; i < fragment->len; i += width) {
1199 int val = GETRAWSAMPLE(width, fragment->buf, i);
1200 SETRAWSAMPLE(width, ncp, fragment->len - i - width, val);
1201 }
1202 return rv;
1203}
1204
1205/*[clinic input]
1206audioop.byteswap
1207
1208 fragment: Py_buffer
1209 width: int
1210 /
1211
1212Convert big-endian samples to little-endian and vice versa.
1213[clinic start generated code]*/
1214
1215static PyObject *
1216audioop_byteswap_impl(PyObject *module, Py_buffer *fragment, int width)
1217/*[clinic end generated code: output=50838a9e4b87cd4d input=fae7611ceffa5c82]*/
1218{
1219 unsigned char *ncp;
1220 Py_ssize_t i;
1221 PyObject *rv;
1222
1223 if (!audioop_check_parameters(module, fragment->len, width))
1224 return NULL;
1225
1226 rv = PyBytes_FromStringAndSize(NULL, fragment->len);
1227 if (rv == NULL)
1228 return NULL;
1229 ncp = (unsigned char *)PyBytes_AsString(rv);
1230
1231 for (i = 0; i < fragment->len; i += width) {
1232 int j;
1233 for (j = 0; j < width; j++)
1234 ncp[i + width - 1 - j] = ((unsigned char *)fragment->buf)[i + j];
1235 }
1236 return rv;
1237}
1238
1239/*[clinic input]
1240audioop.lin2lin
1241
1242 fragment: Py_buffer
1243 width: int
1244 newwidth: int
1245 /
1246
1247Convert samples between 1-, 2-, 3- and 4-byte formats.
1248[clinic start generated code]*/
1249
1250static PyObject *
1251audioop_lin2lin_impl(PyObject *module, Py_buffer *fragment, int width,
1252 int newwidth)
1253/*[clinic end generated code: output=17b14109248f1d99 input=5ce08c8aa2f24d96]*/
1254{
1255 unsigned char *ncp;
1256 Py_ssize_t i, j;
1257 PyObject *rv;
1258
1259 if (!audioop_check_parameters(module, fragment->len, width))
1260 return NULL;
1261 if (!audioop_check_size(module, newwidth))
1262 return NULL;
1263
1264 if (fragment->len/width > PY_SSIZE_T_MAX/newwidth) {
1265 PyErr_SetString(PyExc_MemoryError,
1266 "not enough memory for output buffer");
1267 return NULL;
1268 }
1269 rv = PyBytes_FromStringAndSize(NULL, (fragment->len/width)*newwidth);
1270 if (rv == NULL)
1271 return NULL;
1272 ncp = (unsigned char *)PyBytes_AsString(rv);
1273
1274 for (i = j = 0; i < fragment->len; i += width, j += newwidth) {
1275 int val = GETSAMPLE32(width, fragment->buf, i);
1276 SETSAMPLE32(newwidth, ncp, j, val);
1277 }
1278 return rv;
1279}
1280
1281static int
1282gcd(int a, int b)
1283{
1284 while (b > 0) {
1285 int tmp = a % b;
1286 a = b;
1287 b = tmp;
1288 }
1289 return a;
1290}
1291
1292/*[clinic input]
1293audioop.ratecv
1294
1295 fragment: Py_buffer
1296 width: int
1297 nchannels: int
1298 inrate: int
1299 outrate: int
1300 state: object
1301 weightA: int = 1
1302 weightB: int = 0
1303 /
1304
1305Convert the frame rate of the input fragment.
1306[clinic start generated code]*/
1307
1308static PyObject *
1309audioop_ratecv_impl(PyObject *module, Py_buffer *fragment, int width,
1310 int nchannels, int inrate, int outrate, PyObject *state,
1311 int weightA, int weightB)
1312/*[clinic end generated code: output=624038e843243139 input=aff3acdc94476191]*/
1313{
1314 char *cp, *ncp;
1315 Py_ssize_t len;
1316 int chan, d, *prev_i, *cur_i, cur_o;
1317 PyObject *samps, *str, *rv = NULL, *channel;
1318 int bytes_per_frame;
1319
1320 if (!audioop_check_size(module, width))
1321 return NULL;
1322 if (nchannels < 1) {
1323 PyErr_SetString(get_audioop_state(module)->AudioopError,
1324 "# of channels should be >= 1");
1325 return NULL;
1326 }
1327 if (width > INT_MAX / nchannels) {
1328 /* This overflow test is rigorously correct because
1329 both multiplicands are >= 1. Use the argument names
1330 from the docs for the error msg. */
1331 PyErr_SetString(PyExc_OverflowError,
1332 "width * nchannels too big for a C int");
1333 return NULL;
1334 }
1335 bytes_per_frame = width * nchannels;
1336 if (weightA < 1 || weightB < 0) {
1337 PyErr_SetString(get_audioop_state(module)->AudioopError,
1338 "weightA should be >= 1, weightB should be >= 0");
1339 return NULL;
1340 }
1341 assert(fragment->len >= 0);
1342 if (fragment->len % bytes_per_frame != 0) {
1343 PyErr_SetString(get_audioop_state(module)->AudioopError,
1344 "not a whole number of frames");
1345 return NULL;
1346 }
1347 if (inrate <= 0 || outrate <= 0) {
1348 PyErr_SetString(get_audioop_state(module)->AudioopError,
1349 "sampling rate not > 0");
1350 return NULL;
1351 }
1352 /* divide inrate and outrate by their greatest common divisor */
1353 d = gcd(inrate, outrate);
1354 inrate /= d;
1355 outrate /= d;
1356 /* divide weightA and weightB by their greatest common divisor */
1357 d = gcd(weightA, weightB);
1358 weightA /= d;
1359 weightB /= d;
1360
1361 if ((size_t)nchannels > SIZE_MAX/sizeof(int)) {
1362 PyErr_SetString(PyExc_MemoryError,
1363 "not enough memory for output buffer");
1364 return NULL;
1365 }
1366 prev_i = (int *) PyMem_Malloc(nchannels * sizeof(int));
1367 cur_i = (int *) PyMem_Malloc(nchannels * sizeof(int));
1368 if (prev_i == NULL || cur_i == NULL) {
1369 (void) PyErr_NoMemory();
1370 goto exit;
1371 }
1372
1373 len = fragment->len / bytes_per_frame; /* # of frames */
1374
1375 if (state == Py_None) {
1376 d = -outrate;
1377 for (chan = 0; chan < nchannels; chan++)
1378 prev_i[chan] = cur_i[chan] = 0;
1379 }
1380 else {
1381 if (!PyTuple_Check(state)) {
1382 PyErr_SetString(PyExc_TypeError, "state must be a tuple or None");
1383 goto exit;
1384 }
1385 if (!PyArg_ParseTuple(state,
1386 "iO!;ratecv(): illegal state argument",
1387 &d, &PyTuple_Type, &samps))
1388 goto exit;
1389 if (PyTuple_Size(samps) != nchannels) {
1390 PyErr_SetString(get_audioop_state(module)->AudioopError,
1391 "illegal state argument");
1392 goto exit;
1393 }
1394 for (chan = 0; chan < nchannels; chan++) {
1395 channel = PyTuple_GetItem(samps, chan);
1396 if (!PyTuple_Check(channel)) {
1397 PyErr_SetString(PyExc_TypeError,
1398 "ratecv(): illegal state argument");
1399 goto exit;
1400 }
1401 if (!PyArg_ParseTuple(channel,
1402 "ii;ratecv(): illegal state argument",
1403 &prev_i[chan], &cur_i[chan]))
1404 {
1405 goto exit;
1406 }
1407 }
1408 }
1409
1410 /* str <- Space for the output buffer. */
1411 if (len == 0)
1412 str = PyBytes_FromStringAndSize(NULL, 0);
1413 else {
1414 /* There are len input frames, so we need (mathematically)
1415 ceiling(len*outrate/inrate) output frames, and each frame
1416 requires bytes_per_frame bytes. Computing this
1417 without spurious overflow is the challenge; we can
1418 settle for a reasonable upper bound, though, in this
1419 case ceiling(len/inrate) * outrate. */
1420
1421 /* compute ceiling(len/inrate) without overflow */
1422 Py_ssize_t q = 1 + (len - 1) / inrate;
1423 if (outrate > PY_SSIZE_T_MAX / q / bytes_per_frame)
1424 str = NULL;
1425 else
1426 str = PyBytes_FromStringAndSize(NULL,
1427 q * outrate * bytes_per_frame);
1428 }
1429 if (str == NULL) {
1430 PyErr_SetString(PyExc_MemoryError,
1431 "not enough memory for output buffer");
1432 goto exit;
1433 }
1434 ncp = PyBytes_AsString(str);
1435 cp = fragment->buf;
1436
1437 for (;;) {
1438 while (d < 0) {
1439 if (len == 0) {
1440 samps = PyTuple_New(nchannels);
1441 if (samps == NULL)
1442 goto exit;
1443 for (chan = 0; chan < nchannels; chan++)
1444 PyTuple_SetItem(samps, chan,
1445 Py_BuildValue("(ii)",
1446 prev_i[chan],
1447 cur_i[chan]));
1448 if (PyErr_Occurred())
1449 goto exit;
1450 /* We have checked before that the length
1451 * of the string fits into int. */
1452 len = (Py_ssize_t)(ncp - PyBytes_AsString(str));
1453 rv = PyBytes_FromStringAndSize
1454 (PyBytes_AsString(str), len);
1455 Py_DECREF(str);
1456 str = rv;
1457 if (str == NULL)
1458 goto exit;
1459 rv = Py_BuildValue("(O(iO))", str, d, samps);
1460 Py_DECREF(samps);
1461 Py_DECREF(str);
1462 goto exit; /* return rv */
1463 }
1464 for (chan = 0; chan < nchannels; chan++) {
1465 prev_i[chan] = cur_i[chan];
1466 cur_i[chan] = GETSAMPLE32(width, cp, 0);
1467 cp += width;
1468 /* implements a simple digital filter */
1469 cur_i[chan] = (int)(
1470 ((double)weightA * (double)cur_i[chan] +
1471 (double)weightB * (double)prev_i[chan]) /
1472 ((double)weightA + (double)weightB));
1473 }
1474 len--;
1475 d += outrate;
1476 }
1477 while (d >= 0) {
1478 for (chan = 0; chan < nchannels; chan++) {
1479 cur_o = (int)(((double)prev_i[chan] * (double)d +
1480 (double)cur_i[chan] * (double)(outrate - d)) /
1481 (double)outrate);
1482 SETSAMPLE32(width, ncp, 0, cur_o);
1483 ncp += width;
1484 }
1485 d -= inrate;
1486 }
1487 }
1488 exit:
1489 PyMem_Free(prev_i);
1490 PyMem_Free(cur_i);
1491 return rv;
1492}
1493
1494/*[clinic input]
1495audioop.lin2ulaw
1496
1497 fragment: Py_buffer
1498 width: int
1499 /
1500
1501Convert samples in the audio fragment to u-LAW encoding.
1502[clinic start generated code]*/
1503
1504static PyObject *
1505audioop_lin2ulaw_impl(PyObject *module, Py_buffer *fragment, int width)
1506/*[clinic end generated code: output=14fb62b16fe8ea8e input=2450d1b870b6bac2]*/
1507{
1508 unsigned char *ncp;
1509 Py_ssize_t i;
1510 PyObject *rv;
1511
1512 if (!audioop_check_parameters(module, fragment->len, width))
1513 return NULL;
1514
1515 rv = PyBytes_FromStringAndSize(NULL, fragment->len/width);
1516 if (rv == NULL)
1517 return NULL;
1518 ncp = (unsigned char *)PyBytes_AsString(rv);
1519
1520 for (i = 0; i < fragment->len; i += width) {
1521 int val = GETSAMPLE32(width, fragment->buf, i);
1522 *ncp++ = st_14linear2ulaw(val >> 18);
1523 }
1524 return rv;
1525}
1526
1527/*[clinic input]
1528audioop.ulaw2lin
1529
1530 fragment: Py_buffer
1531 width: int
1532 /
1533
1534Convert sound fragments in u-LAW encoding to linearly encoded sound fragments.
1535[clinic start generated code]*/
1536
1537static PyObject *
1538audioop_ulaw2lin_impl(PyObject *module, Py_buffer *fragment, int width)
1539/*[clinic end generated code: output=378356b047521ba2 input=45d53ddce5be7d06]*/
1540{
1541 unsigned char *cp;
1542 signed char *ncp;
1543 Py_ssize_t i;
1544 PyObject *rv;
1545
1546 if (!audioop_check_size(module, width))
1547 return NULL;
1548
1549 if (fragment->len > PY_SSIZE_T_MAX/width) {
1550 PyErr_SetString(PyExc_MemoryError,
1551 "not enough memory for output buffer");
1552 return NULL;
1553 }
1554 rv = PyBytes_FromStringAndSize(NULL, fragment->len*width);
1555 if (rv == NULL)
1556 return NULL;
1557 ncp = (signed char *)PyBytes_AsString(rv);
1558
1559 cp = fragment->buf;
1560 for (i = 0; i < fragment->len*width; i += width) {
1561 int val = st_ulaw2linear16(*cp++) << 16;
1562 SETSAMPLE32(width, ncp, i, val);
1563 }
1564 return rv;
1565}
1566
1567/*[clinic input]
1568audioop.lin2alaw
1569
1570 fragment: Py_buffer
1571 width: int
1572 /
1573
1574Convert samples in the audio fragment to a-LAW encoding.
1575[clinic start generated code]*/
1576
1577static PyObject *
1578audioop_lin2alaw_impl(PyObject *module, Py_buffer *fragment, int width)
1579/*[clinic end generated code: output=d076f130121a82f0 input=ffb1ef8bb39da945]*/
1580{
1581 unsigned char *ncp;
1582 Py_ssize_t i;
1583 PyObject *rv;
1584
1585 if (!audioop_check_parameters(module, fragment->len, width))
1586 return NULL;
1587
1588 rv = PyBytes_FromStringAndSize(NULL, fragment->len/width);
1589 if (rv == NULL)
1590 return NULL;
1591 ncp = (unsigned char *)PyBytes_AsString(rv);
1592
1593 for (i = 0; i < fragment->len; i += width) {
1594 int val = GETSAMPLE32(width, fragment->buf, i);
1595 *ncp++ = st_linear2alaw(val >> 19);
1596 }
1597 return rv;
1598}
1599
1600/*[clinic input]
1601audioop.alaw2lin
1602
1603 fragment: Py_buffer
1604 width: int
1605 /
1606
1607Convert sound fragments in a-LAW encoding to linearly encoded sound fragments.
1608[clinic start generated code]*/
1609
1610static PyObject *
1611audioop_alaw2lin_impl(PyObject *module, Py_buffer *fragment, int width)
1612/*[clinic end generated code: output=85c365ec559df647 input=4140626046cd1772]*/
1613{
1614 unsigned char *cp;
1615 signed char *ncp;
1616 Py_ssize_t i;
1617 int val;
1618 PyObject *rv;
1619
1620 if (!audioop_check_size(module, width))
1621 return NULL;
1622
1623 if (fragment->len > PY_SSIZE_T_MAX/width) {
1624 PyErr_SetString(PyExc_MemoryError,
1625 "not enough memory for output buffer");
1626 return NULL;
1627 }
1628 rv = PyBytes_FromStringAndSize(NULL, fragment->len*width);
1629 if (rv == NULL)
1630 return NULL;
1631 ncp = (signed char *)PyBytes_AsString(rv);
1632 cp = fragment->buf;
1633
1634 for (i = 0; i < fragment->len*width; i += width) {
1635 val = st_alaw2linear16(*cp++) << 16;
1636 SETSAMPLE32(width, ncp, i, val);
1637 }
1638 return rv;
1639}
1640
1641/*[clinic input]
1642audioop.lin2adpcm
1643
1644 fragment: Py_buffer
1645 width: int
1646 state: object
1647 /
1648
1649Convert samples to 4 bit Intel/DVI ADPCM encoding.
1650[clinic start generated code]*/
1651
1652static PyObject *
1653audioop_lin2adpcm_impl(PyObject *module, Py_buffer *fragment, int width,
1654 PyObject *state)
1655/*[clinic end generated code: output=cc19f159f16c6793 input=12919d549b90c90a]*/
1656{
1657 signed char *ncp;
1658 Py_ssize_t i;
1659 int step, valpred, delta,
1660 index, sign, vpdiff, diff;
1661 PyObject *rv = NULL, *str;
1662 int outputbuffer = 0, bufferstep;
1663
1664 if (!audioop_check_parameters(module, fragment->len, width))
1665 return NULL;
1666
1667 /* Decode state, should have (value, step) */
1668 if ( state == Py_None ) {
1669 /* First time, it seems. Set defaults */
1670 valpred = 0;
1671 index = 0;
1672 }
1673 else if (!PyTuple_Check(state)) {
1674 PyErr_SetString(PyExc_TypeError, "state must be a tuple or None");
1675 return NULL;
1676 }
1677 else if (!PyArg_ParseTuple(state, "ii;lin2adpcm(): illegal state argument",
1678 &valpred, &index))
1679 {
1680 return NULL;
1681 }
1682 else if (valpred >= 0x8000 || valpred < -0x8000 ||
1683 (size_t)index >= Py_ARRAY_LENGTH(stepsizeTable)) {
1684 PyErr_SetString(PyExc_ValueError, "bad state");
1685 return NULL;
1686 }
1687
1688 str = PyBytes_FromStringAndSize(NULL, fragment->len/(width*2));
1689 if (str == NULL)
1690 return NULL;
1691 ncp = (signed char *)PyBytes_AsString(str);
1692
1693 step = stepsizeTable[index];
1694 bufferstep = 1;
1695
1696 for (i = 0; i < fragment->len; i += width) {
1697 int val = GETSAMPLE32(width, fragment->buf, i) >> 16;
1698
1699 /* Step 1 - compute difference with previous value */
1700 if (val < valpred) {
1701 diff = valpred - val;
1702 sign = 8;
1703 }
1704 else {
1705 diff = val - valpred;
1706 sign = 0;
1707 }
1708
1709 /* Step 2 - Divide and clamp */
1710 /* Note:
1711 ** This code *approximately* computes:
1712 ** delta = diff*4/step;
1713 ** vpdiff = (delta+0.5)*step/4;
1714 ** but in shift step bits are dropped. The net result of this
1715 ** is that even if you have fast mul/div hardware you cannot
1716 ** put it to good use since the fixup would be too expensive.
1717 */
1718 delta = 0;
1719 vpdiff = (step >> 3);
1720
1721 if ( diff >= step ) {
1722 delta = 4;
1723 diff -= step;
1724 vpdiff += step;
1725 }
1726 step >>= 1;
1727 if ( diff >= step ) {
1728 delta |= 2;
1729 diff -= step;
1730 vpdiff += step;
1731 }
1732 step >>= 1;
1733 if ( diff >= step ) {
1734 delta |= 1;
1735 vpdiff += step;
1736 }
1737
1738 /* Step 3 - Update previous value */
1739 if ( sign )
1740 valpred -= vpdiff;
1741 else
1742 valpred += vpdiff;
1743
1744 /* Step 4 - Clamp previous value to 16 bits */
1745 if ( valpred > 32767 )
1746 valpred = 32767;
1747 else if ( valpred < -32768 )
1748 valpred = -32768;
1749
1750 /* Step 5 - Assemble value, update index and step values */
1751 delta |= sign;
1752
1753 index += indexTable[delta];
1754 if ( index < 0 ) index = 0;
1755 if ( index > 88 ) index = 88;
1756 step = stepsizeTable[index];
1757
1758 /* Step 6 - Output value */
1759 if ( bufferstep ) {
1760 outputbuffer = (delta << 4) & 0xf0;
1761 } else {
1762 *ncp++ = (delta & 0x0f) | outputbuffer;
1763 }
1764 bufferstep = !bufferstep;
1765 }
1766 rv = Py_BuildValue("(O(ii))", str, valpred, index);
1767 Py_DECREF(str);
1768 return rv;
1769}
1770
1771/*[clinic input]
1772audioop.adpcm2lin
1773
1774 fragment: Py_buffer
1775 width: int
1776 state: object
1777 /
1778
1779Decode an Intel/DVI ADPCM coded fragment to a linear fragment.
1780[clinic start generated code]*/
1781
1782static PyObject *
1783audioop_adpcm2lin_impl(PyObject *module, Py_buffer *fragment, int width,
1784 PyObject *state)
1785/*[clinic end generated code: output=3440ea105acb3456 input=f5221144f5ca9ef0]*/
1786{
1787 signed char *cp;
1788 signed char *ncp;
1789 Py_ssize_t i, outlen;
1790 int valpred, step, delta, index, sign, vpdiff;
1791 PyObject *rv, *str;
1792 int inputbuffer = 0, bufferstep;
1793
1794 if (!audioop_check_size(module, width))
1795 return NULL;
1796
1797 /* Decode state, should have (value, step) */
1798 if ( state == Py_None ) {
1799 /* First time, it seems. Set defaults */
1800 valpred = 0;
1801 index = 0;
1802 }
1803 else if (!PyTuple_Check(state)) {
1804 PyErr_SetString(PyExc_TypeError, "state must be a tuple or None");
1805 return NULL;
1806 }
1807 else if (!PyArg_ParseTuple(state, "ii;adpcm2lin(): illegal state argument",
1808 &valpred, &index))
1809 {
1810 return NULL;
1811 }
1812 else if (valpred >= 0x8000 || valpred < -0x8000 ||
1813 (size_t)index >= Py_ARRAY_LENGTH(stepsizeTable)) {
1814 PyErr_SetString(PyExc_ValueError, "bad state");
1815 return NULL;
1816 }
1817
1818 if (fragment->len > (PY_SSIZE_T_MAX/2)/width) {
1819 PyErr_SetString(PyExc_MemoryError,
1820 "not enough memory for output buffer");
1821 return NULL;
1822 }
1823 outlen = fragment->len*width*2;
1824 str = PyBytes_FromStringAndSize(NULL, outlen);
1825 if (str == NULL)
1826 return NULL;
1827 ncp = (signed char *)PyBytes_AsString(str);
1828 cp = fragment->buf;
1829
1830 step = stepsizeTable[index];
1831 bufferstep = 0;
1832
1833 for (i = 0; i < outlen; i += width) {
1834 /* Step 1 - get the delta value and compute next index */
1835 if ( bufferstep ) {
1836 delta = inputbuffer & 0xf;
1837 } else {
1838 inputbuffer = *cp++;
1839 delta = (inputbuffer >> 4) & 0xf;
1840 }
1841
1842 bufferstep = !bufferstep;
1843
1844 /* Step 2 - Find new index value (for later) */
1845 index += indexTable[delta];
1846 if ( index < 0 ) index = 0;
1847 if ( index > 88 ) index = 88;
1848
1849 /* Step 3 - Separate sign and magnitude */
1850 sign = delta & 8;
1851 delta = delta & 7;
1852
1853 /* Step 4 - Compute difference and new predicted value */
1854 /*
1855 ** Computes 'vpdiff = (delta+0.5)*step/4', but see comment
1856 ** in adpcm_coder.
1857 */
1858 vpdiff = step >> 3;
1859 if ( delta & 4 ) vpdiff += step;
1860 if ( delta & 2 ) vpdiff += step>>1;
1861 if ( delta & 1 ) vpdiff += step>>2;
1862
1863 if ( sign )
1864 valpred -= vpdiff;
1865 else
1866 valpred += vpdiff;
1867
1868 /* Step 5 - clamp output value */
1869 if ( valpred > 32767 )
1870 valpred = 32767;
1871 else if ( valpred < -32768 )
1872 valpred = -32768;
1873
1874 /* Step 6 - Update step value */
1875 step = stepsizeTable[index];
1876
1877 /* Step 6 - Output value */
1878 SETSAMPLE32(width, ncp, i, valpred << 16);
1879 }
1880
1881 rv = Py_BuildValue("(O(ii))", str, valpred, index);
1882 Py_DECREF(str);
1883 return rv;
1884}
1885
1886#include "clinic/audioop.c.h"
1887
1888static PyMethodDef audioop_methods[] = {
1889 AUDIOOP_MAX_METHODDEF
1890 AUDIOOP_MINMAX_METHODDEF
1891 AUDIOOP_AVG_METHODDEF
1892 AUDIOOP_MAXPP_METHODDEF
1893 AUDIOOP_AVGPP_METHODDEF
1894 AUDIOOP_RMS_METHODDEF
1895 AUDIOOP_FINDFIT_METHODDEF
1896 AUDIOOP_FINDMAX_METHODDEF
1897 AUDIOOP_FINDFACTOR_METHODDEF
1898 AUDIOOP_CROSS_METHODDEF
1899 AUDIOOP_MUL_METHODDEF
1900 AUDIOOP_ADD_METHODDEF
1901 AUDIOOP_BIAS_METHODDEF
1902 AUDIOOP_ULAW2LIN_METHODDEF
1903 AUDIOOP_LIN2ULAW_METHODDEF
1904 AUDIOOP_ALAW2LIN_METHODDEF
1905 AUDIOOP_LIN2ALAW_METHODDEF
1906 AUDIOOP_LIN2LIN_METHODDEF
1907 AUDIOOP_ADPCM2LIN_METHODDEF
1908 AUDIOOP_LIN2ADPCM_METHODDEF
1909 AUDIOOP_TOMONO_METHODDEF
1910 AUDIOOP_TOSTEREO_METHODDEF
1911 AUDIOOP_GETSAMPLE_METHODDEF
1912 AUDIOOP_REVERSE_METHODDEF
1913 AUDIOOP_BYTESWAP_METHODDEF
1914 AUDIOOP_RATECV_METHODDEF
1915 { 0, 0 }
1916};
1917
1918static int
1919audioop_traverse(PyObject *module, visitproc visit, void *arg)
1920{
1921 audioop_state *state = get_audioop_state(module);
1922 Py_VISIT(state->AudioopError);
1923 return 0;
1924}
1925
1926static int
1927audioop_clear(PyObject *module)
1928{
1929 audioop_state *state = get_audioop_state(module);
1930 Py_CLEAR(state->AudioopError);
1931 return 0;
1932}
1933
1934static void
1935audioop_free(void *module) {
1936 audioop_clear((PyObject *)module);
1937}
1938
1939static int
1940audioop_exec(PyObject* module)
1941{
1942 audioop_state *state = get_audioop_state(module);
1943
1944 state->AudioopError = PyErr_NewException("audioop.error", NULL, NULL);
1945 if (state->AudioopError == NULL) {
1946 return -1;
1947 }
1948
1949 Py_INCREF(state->AudioopError);
1950 if (PyModule_AddObject(module, "error", state->AudioopError) < 0) {
1951 Py_DECREF(state->AudioopError);
1952 return -1;
1953 }
1954
1955 return 0;
1956}
1957
1958static PyModuleDef_Slot audioop_slots[] = {
1959 {Py_mod_exec, audioop_exec},
1960 {0, NULL}
1961};
1962
1963static struct PyModuleDef audioopmodule = {
1964 PyModuleDef_HEAD_INIT,
1965 "audioop",
1966 NULL,
1967 sizeof(audioop_state),
1968 audioop_methods,
1969 audioop_slots,
1970 audioop_traverse,
1971 audioop_clear,
1972 audioop_free
1973};
1974
1975PyMODINIT_FUNC
1976PyInit_audioop(void)
1977{
1978 return PyModuleDef_Init(&audioopmodule);
1979}
1980