Line data Source code
1 :
2 : /* audioopmodule - Module to detect peak values in arrays */
3 :
4 : #define PY_SSIZE_T_CLEAN
5 :
6 : #include "Python.h"
7 :
8 : static const int maxvals[] = {0, 0x7F, 0x7FFF, 0x7FFFFF, 0x7FFFFFFF};
9 : /* -1 trick is needed on Windows to support -0x80000000 without a warning */
10 : static const int minvals[] = {0, -0x80, -0x8000, -0x800000, -0x7FFFFFFF-1};
11 : static const unsigned int masks[] = {0, 0xFF, 0xFFFF, 0xFFFFFF, 0xFFFFFFFF};
12 :
13 : static int
14 518 : fbound(double val, double minval, double maxval)
15 : {
16 518 : if (val > maxval) {
17 10 : val = maxval;
18 : }
19 508 : else if (val < minval + 1.0) {
20 59 : val = minval;
21 : }
22 :
23 : /* Round towards minus infinity (-inf) */
24 518 : val = floor(val);
25 :
26 : /* Cast double to integer: round towards zero */
27 518 : return (int)val;
28 : }
29 :
30 :
31 : /* Code shamelessly stolen from sox, 12.17.7, g711.c
32 : ** (c) Craig Reese, Joe Campbell and Jeff Poskanzer 1989 */
33 :
34 : /* From g711.c:
35 : *
36 : * December 30, 1994:
37 : * Functions linear2alaw, linear2ulaw have been updated to correctly
38 : * convert unquantized 16 bit values.
39 : * Tables for direct u- to A-law and A- to u-law conversions have been
40 : * corrected.
41 : * Borge Lindberg, Center for PersonKommunikation, Aalborg University.
42 : * bli@cpk.auc.dk
43 : *
44 : */
45 : #define BIAS 0x84 /* define the add-in bias for 16 bit samples */
46 : #define CLIP 32635
47 : #define SIGN_BIT (0x80) /* Sign bit for an A-law byte. */
48 : #define QUANT_MASK (0xf) /* Quantization field mask. */
49 : #define SEG_SHIFT (4) /* Left shift for segment number. */
50 : #define SEG_MASK (0x70) /* Segment field mask. */
51 :
52 : static const int16_t seg_aend[8] = {
53 : 0x1F, 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, 0xFFF
54 : };
55 : static const int16_t seg_uend[8] = {
56 : 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF
57 : };
58 :
59 : static int16_t
60 24723 : search(int16_t val, const int16_t *table, int size)
61 : {
62 : int i;
63 :
64 112307 : for (i = 0; i < size; i++) {
65 112298 : if (val <= *table++)
66 24714 : return (i);
67 : }
68 9 : return (size);
69 : }
70 : #define st_ulaw2linear16(uc) (_st_ulaw2linear16[uc])
71 : #define st_alaw2linear16(uc) (_st_alaw2linear16[uc])
72 :
73 : static const int16_t _st_ulaw2linear16[256] = {
74 : -32124, -31100, -30076, -29052, -28028, -27004, -25980,
75 : -24956, -23932, -22908, -21884, -20860, -19836, -18812,
76 : -17788, -16764, -15996, -15484, -14972, -14460, -13948,
77 : -13436, -12924, -12412, -11900, -11388, -10876, -10364,
78 : -9852, -9340, -8828, -8316, -7932, -7676, -7420,
79 : -7164, -6908, -6652, -6396, -6140, -5884, -5628,
80 : -5372, -5116, -4860, -4604, -4348, -4092, -3900,
81 : -3772, -3644, -3516, -3388, -3260, -3132, -3004,
82 : -2876, -2748, -2620, -2492, -2364, -2236, -2108,
83 : -1980, -1884, -1820, -1756, -1692, -1628, -1564,
84 : -1500, -1436, -1372, -1308, -1244, -1180, -1116,
85 : -1052, -988, -924, -876, -844, -812, -780,
86 : -748, -716, -684, -652, -620, -588, -556,
87 : -524, -492, -460, -428, -396, -372, -356,
88 : -340, -324, -308, -292, -276, -260, -244,
89 : -228, -212, -196, -180, -164, -148, -132,
90 : -120, -112, -104, -96, -88, -80, -72,
91 : -64, -56, -48, -40, -32, -24, -16,
92 : -8, 0, 32124, 31100, 30076, 29052, 28028,
93 : 27004, 25980, 24956, 23932, 22908, 21884, 20860,
94 : 19836, 18812, 17788, 16764, 15996, 15484, 14972,
95 : 14460, 13948, 13436, 12924, 12412, 11900, 11388,
96 : 10876, 10364, 9852, 9340, 8828, 8316, 7932,
97 : 7676, 7420, 7164, 6908, 6652, 6396, 6140,
98 : 5884, 5628, 5372, 5116, 4860, 4604, 4348,
99 : 4092, 3900, 3772, 3644, 3516, 3388, 3260,
100 : 3132, 3004, 2876, 2748, 2620, 2492, 2364,
101 : 2236, 2108, 1980, 1884, 1820, 1756, 1692,
102 : 1628, 1564, 1500, 1436, 1372, 1308, 1244,
103 : 1180, 1116, 1052, 988, 924, 876, 844,
104 : 812, 780, 748, 716, 684, 652, 620,
105 : 588, 556, 524, 492, 460, 428, 396,
106 : 372, 356, 340, 324, 308, 292, 276,
107 : 260, 244, 228, 212, 196, 180, 164,
108 : 148, 132, 120, 112, 104, 96, 88,
109 : 80, 72, 64, 56, 48, 40, 32,
110 : 24, 16, 8, 0
111 : };
112 :
113 : /*
114 : * linear2ulaw() accepts a 14-bit signed integer and encodes it as u-law data
115 : * stored in an unsigned char. This function should only be called with
116 : * the data shifted such that it only contains information in the lower
117 : * 14-bits.
118 : *
119 : * In order to simplify the encoding process, the original linear magnitude
120 : * is biased by adding 33 which shifts the encoding range from (0 - 8158) to
121 : * (33 - 8191). The result can be seen in the following encoding table:
122 : *
123 : * Biased Linear Input Code Compressed Code
124 : * ------------------------ ---------------
125 : * 00000001wxyza 000wxyz
126 : * 0000001wxyzab 001wxyz
127 : * 000001wxyzabc 010wxyz
128 : * 00001wxyzabcd 011wxyz
129 : * 0001wxyzabcde 100wxyz
130 : * 001wxyzabcdef 101wxyz
131 : * 01wxyzabcdefg 110wxyz
132 : * 1wxyzabcdefgh 111wxyz
133 : *
134 : * Each biased linear code has a leading 1 which identifies the segment
135 : * number. The value of the segment number is equal to 7 minus the number
136 : * of leading 0's. The quantization interval is directly available as the
137 : * four bits wxyz. * The trailing bits (a - h) are ignored.
138 : *
139 : * Ordinarily the complement of the resulting code word is used for
140 : * transmission, and so the code word is complemented before it is returned.
141 : *
142 : * For further information see John C. Bellamy's Digital Telephony, 1982,
143 : * John Wiley & Sons, pps 98-111 and 472-476.
144 : */
145 : static unsigned char
146 16243 : st_14linear2ulaw(int16_t pcm_val) /* 2's complement (14-bit range) */
147 : {
148 : int16_t mask;
149 : int16_t seg;
150 : unsigned char uval;
151 :
152 : /* u-law inverts all bits */
153 : /* Get the sign and the magnitude of the value. */
154 16243 : if (pcm_val < 0) {
155 7545 : pcm_val = -pcm_val;
156 7545 : mask = 0x7F;
157 : } else {
158 8698 : mask = 0xFF;
159 : }
160 16243 : if ( pcm_val > CLIP ) pcm_val = CLIP; /* clip the magnitude */
161 16243 : pcm_val += (BIAS >> 2);
162 :
163 : /* Convert the scaled magnitude to segment number. */
164 16243 : seg = search(pcm_val, seg_uend, 8);
165 :
166 : /*
167 : * Combine the sign, segment, quantization bits;
168 : * and complement the code word.
169 : */
170 16243 : if (seg >= 8) /* out of range, return maximum value. */
171 9 : return (unsigned char) (0x7F ^ mask);
172 : else {
173 16234 : uval = (unsigned char) (seg << 4) | ((pcm_val >> (seg + 1)) & 0xF);
174 16234 : return (uval ^ mask);
175 : }
176 :
177 : }
178 :
179 : static const int16_t _st_alaw2linear16[256] = {
180 : -5504, -5248, -6016, -5760, -4480, -4224, -4992,
181 : -4736, -7552, -7296, -8064, -7808, -6528, -6272,
182 : -7040, -6784, -2752, -2624, -3008, -2880, -2240,
183 : -2112, -2496, -2368, -3776, -3648, -4032, -3904,
184 : -3264, -3136, -3520, -3392, -22016, -20992, -24064,
185 : -23040, -17920, -16896, -19968, -18944, -30208, -29184,
186 : -32256, -31232, -26112, -25088, -28160, -27136, -11008,
187 : -10496, -12032, -11520, -8960, -8448, -9984, -9472,
188 : -15104, -14592, -16128, -15616, -13056, -12544, -14080,
189 : -13568, -344, -328, -376, -360, -280, -264,
190 : -312, -296, -472, -456, -504, -488, -408,
191 : -392, -440, -424, -88, -72, -120, -104,
192 : -24, -8, -56, -40, -216, -200, -248,
193 : -232, -152, -136, -184, -168, -1376, -1312,
194 : -1504, -1440, -1120, -1056, -1248, -1184, -1888,
195 : -1824, -2016, -1952, -1632, -1568, -1760, -1696,
196 : -688, -656, -752, -720, -560, -528, -624,
197 : -592, -944, -912, -1008, -976, -816, -784,
198 : -880, -848, 5504, 5248, 6016, 5760, 4480,
199 : 4224, 4992, 4736, 7552, 7296, 8064, 7808,
200 : 6528, 6272, 7040, 6784, 2752, 2624, 3008,
201 : 2880, 2240, 2112, 2496, 2368, 3776, 3648,
202 : 4032, 3904, 3264, 3136, 3520, 3392, 22016,
203 : 20992, 24064, 23040, 17920, 16896, 19968, 18944,
204 : 30208, 29184, 32256, 31232, 26112, 25088, 28160,
205 : 27136, 11008, 10496, 12032, 11520, 8960, 8448,
206 : 9984, 9472, 15104, 14592, 16128, 15616, 13056,
207 : 12544, 14080, 13568, 344, 328, 376, 360,
208 : 280, 264, 312, 296, 472, 456, 504,
209 : 488, 408, 392, 440, 424, 88, 72,
210 : 120, 104, 24, 8, 56, 40, 216,
211 : 200, 248, 232, 152, 136, 184, 168,
212 : 1376, 1312, 1504, 1440, 1120, 1056, 1248,
213 : 1184, 1888, 1824, 2016, 1952, 1632, 1568,
214 : 1760, 1696, 688, 656, 752, 720, 560,
215 : 528, 624, 592, 944, 912, 1008, 976,
216 : 816, 784, 880, 848
217 : };
218 :
219 : /*
220 : * linear2alaw() accepts a 13-bit signed integer and encodes it as A-law data
221 : * stored in an unsigned char. This function should only be called with
222 : * the data shifted such that it only contains information in the lower
223 : * 13-bits.
224 : *
225 : * Linear Input Code Compressed Code
226 : * ------------------------ ---------------
227 : * 0000000wxyza 000wxyz
228 : * 0000001wxyza 001wxyz
229 : * 000001wxyzab 010wxyz
230 : * 00001wxyzabc 011wxyz
231 : * 0001wxyzabcd 100wxyz
232 : * 001wxyzabcde 101wxyz
233 : * 01wxyzabcdef 110wxyz
234 : * 1wxyzabcdefg 111wxyz
235 : *
236 : * For further information see John C. Bellamy's Digital Telephony, 1982,
237 : * John Wiley & Sons, pps 98-111 and 472-476.
238 : */
239 : static unsigned char
240 8480 : st_linear2alaw(int16_t pcm_val) /* 2's complement (13-bit range) */
241 : {
242 : int16_t mask;
243 : int16_t seg;
244 : unsigned char aval;
245 :
246 : /* A-law using even bit inversion */
247 8480 : if (pcm_val >= 0) {
248 4523 : mask = 0xD5; /* sign (7th) bit = 1 */
249 : } else {
250 3957 : mask = 0x55; /* sign bit = 0 */
251 3957 : pcm_val = -pcm_val - 1;
252 : }
253 :
254 : /* Convert the scaled magnitude to segment number. */
255 8480 : seg = search(pcm_val, seg_aend, 8);
256 :
257 : /* Combine the sign, segment, and quantization bits. */
258 :
259 8480 : if (seg >= 8) /* out of range, return maximum value. */
260 0 : return (unsigned char) (0x7F ^ mask);
261 : else {
262 8480 : aval = (unsigned char) seg << SEG_SHIFT;
263 8480 : if (seg < 2)
264 1370 : aval |= (pcm_val >> 1) & QUANT_MASK;
265 : else
266 7110 : aval |= (pcm_val >> seg) & QUANT_MASK;
267 8480 : return (aval ^ mask);
268 : }
269 : }
270 : /* End of code taken from sox */
271 :
272 : /* Intel ADPCM step variation table */
273 : static const int indexTable[16] = {
274 : -1, -1, -1, -1, 2, 4, 6, 8,
275 : -1, -1, -1, -1, 2, 4, 6, 8,
276 : };
277 :
278 : static const int stepsizeTable[89] = {
279 : 7, 8, 9, 10, 11, 12, 13, 14, 16, 17,
280 : 19, 21, 23, 25, 28, 31, 34, 37, 41, 45,
281 : 50, 55, 60, 66, 73, 80, 88, 97, 107, 118,
282 : 130, 143, 157, 173, 190, 209, 230, 253, 279, 307,
283 : 337, 371, 408, 449, 494, 544, 598, 658, 724, 796,
284 : 876, 963, 1060, 1166, 1282, 1411, 1552, 1707, 1878, 2066,
285 : 2272, 2499, 2749, 3024, 3327, 3660, 4026, 4428, 4871, 5358,
286 : 5894, 6484, 7132, 7845, 8630, 9493, 10442, 11487, 12635, 13899,
287 : 15289, 16818, 18500, 20350, 22385, 24623, 27086, 29794, 32767
288 : };
289 :
290 : #define GETINTX(T, cp, i) (*(T *)((unsigned char *)(cp) + (i)))
291 : #define SETINTX(T, cp, i, val) do { \
292 : *(T *)((unsigned char *)(cp) + (i)) = (T)(val); \
293 : } while (0)
294 :
295 :
296 : #define GETINT8(cp, i) GETINTX(signed char, (cp), (i))
297 : #define GETINT16(cp, i) GETINTX(int16_t, (cp), (i))
298 : #define GETINT32(cp, i) GETINTX(int32_t, (cp), (i))
299 :
300 : #ifdef WORDS_BIGENDIAN
301 : #define GETINT24(cp, i) ( \
302 : ((unsigned char *)(cp) + (i))[2] + \
303 : (((unsigned char *)(cp) + (i))[1] << 8) + \
304 : (((signed char *)(cp) + (i))[0] << 16) )
305 : #else
306 : #define GETINT24(cp, i) ( \
307 : ((unsigned char *)(cp) + (i))[0] + \
308 : (((unsigned char *)(cp) + (i))[1] << 8) + \
309 : (((signed char *)(cp) + (i))[2] << 16) )
310 : #endif
311 :
312 :
313 : #define SETINT8(cp, i, val) SETINTX(signed char, (cp), (i), (val))
314 : #define SETINT16(cp, i, val) SETINTX(int16_t, (cp), (i), (val))
315 : #define SETINT32(cp, i, val) SETINTX(int32_t, (cp), (i), (val))
316 :
317 : #ifdef WORDS_BIGENDIAN
318 : #define SETINT24(cp, i, val) do { \
319 : ((unsigned char *)(cp) + (i))[2] = (int)(val); \
320 : ((unsigned char *)(cp) + (i))[1] = (int)(val) >> 8; \
321 : ((signed char *)(cp) + (i))[0] = (int)(val) >> 16; \
322 : } while (0)
323 : #else
324 : #define SETINT24(cp, i, val) do { \
325 : ((unsigned char *)(cp) + (i))[0] = (int)(val); \
326 : ((unsigned char *)(cp) + (i))[1] = (int)(val) >> 8; \
327 : ((signed char *)(cp) + (i))[2] = (int)(val) >> 16; \
328 : } while (0)
329 : #endif
330 :
331 :
332 : #define GETRAWSAMPLE(size, cp, i) ( \
333 : (size == 1) ? (int)GETINT8((cp), (i)) : \
334 : (size == 2) ? (int)GETINT16((cp), (i)) : \
335 : (size == 3) ? (int)GETINT24((cp), (i)) : \
336 : (int)GETINT32((cp), (i)))
337 :
338 : #define SETRAWSAMPLE(size, cp, i, val) do { \
339 : if (size == 1) \
340 : SETINT8((cp), (i), (val)); \
341 : else if (size == 2) \
342 : SETINT16((cp), (i), (val)); \
343 : else if (size == 3) \
344 : SETINT24((cp), (i), (val)); \
345 : else \
346 : SETINT32((cp), (i), (val)); \
347 : } while(0)
348 :
349 :
350 : #define GETSAMPLE32(size, cp, i) ( \
351 : (size == 1) ? (int)GETINT8((cp), (i)) << 24 : \
352 : (size == 2) ? (int)GETINT16((cp), (i)) << 16 : \
353 : (size == 3) ? (int)GETINT24((cp), (i)) << 8 : \
354 : (int)GETINT32((cp), (i)))
355 :
356 : #define SETSAMPLE32(size, cp, i, val) do { \
357 : if (size == 1) \
358 : SETINT8((cp), (i), (val) >> 24); \
359 : else if (size == 2) \
360 : SETINT16((cp), (i), (val) >> 16); \
361 : else if (size == 3) \
362 : SETINT24((cp), (i), (val) >> 8); \
363 : else \
364 : SETINT32((cp), (i), (val)); \
365 : } while(0)
366 :
367 : static PyModuleDef audioopmodule;
368 :
369 : typedef struct {
370 : PyObject *AudioopError;
371 : } audioop_state;
372 :
373 : static inline audioop_state *
374 183 : get_audioop_state(PyObject *module)
375 : {
376 183 : void *state = PyModule_GetState(module);
377 183 : assert(state != NULL);
378 183 : return (audioop_state *)state;
379 : }
380 :
381 : static int
382 1322 : audioop_check_size(PyObject *module, int size)
383 : {
384 1322 : if (size < 1 || size > 4) {
385 31 : PyErr_SetString(get_audioop_state(module)->AudioopError,
386 : "Size should be 1, 2, 3 or 4");
387 31 : return 0;
388 : }
389 : else
390 1291 : return 1;
391 : }
392 :
393 : static int
394 866 : audioop_check_parameters(PyObject *module, Py_ssize_t len, int size)
395 : {
396 866 : if (!audioop_check_size(module, size))
397 18 : return 0;
398 848 : if (len % size != 0) {
399 54 : PyErr_SetString(get_audioop_state(module)->AudioopError,
400 : "not a whole number of frames");
401 54 : return 0;
402 : }
403 794 : return 1;
404 : }
405 :
406 : /*[clinic input]
407 : module audioop
408 : [clinic start generated code]*/
409 : /*[clinic end generated code: output=da39a3ee5e6b4b0d input=8fa8f6611be3591a]*/
410 :
411 : /*[clinic input]
412 : audioop.getsample
413 :
414 : fragment: Py_buffer
415 : width: int
416 : index: Py_ssize_t
417 : /
418 :
419 : Return the value of sample index from the fragment.
420 : [clinic start generated code]*/
421 :
422 : static PyObject *
423 32 : audioop_getsample_impl(PyObject *module, Py_buffer *fragment, int width,
424 : Py_ssize_t index)
425 : /*[clinic end generated code: output=8fe1b1775134f39a input=88edbe2871393549]*/
426 : {
427 : int val;
428 :
429 32 : if (!audioop_check_parameters(module, fragment->len, width))
430 4 : return NULL;
431 28 : if (index < 0 || index >= fragment->len/width) {
432 0 : PyErr_SetString(get_audioop_state(module)->AudioopError,
433 : "Index out of range");
434 0 : return NULL;
435 : }
436 28 : val = GETRAWSAMPLE(width, fragment->buf, index*width);
437 28 : return PyLong_FromLong(val);
438 : }
439 :
440 : /*[clinic input]
441 : audioop.max
442 :
443 : fragment: Py_buffer
444 : width: int
445 : /
446 :
447 : Return the maximum of the absolute value of all samples in a fragment.
448 : [clinic start generated code]*/
449 :
450 : static PyObject *
451 36 : audioop_max_impl(PyObject *module, Py_buffer *fragment, int width)
452 : /*[clinic end generated code: output=e6c5952714f1c3f0 input=32bea5ea0ac8c223]*/
453 : {
454 : Py_ssize_t i;
455 36 : unsigned int absval, max = 0;
456 :
457 36 : if (!audioop_check_parameters(module, fragment->len, width))
458 4 : return NULL;
459 84 : for (i = 0; i < fragment->len; i += width) {
460 52 : int val = GETRAWSAMPLE(width, fragment->buf, i);
461 : /* Cast to unsigned before negating. Unsigned overflow is well-
462 : defined, but signed overflow is not. */
463 52 : if (val < 0) absval = (unsigned int)-(int64_t)val;
464 28 : else absval = val;
465 52 : if (absval > max) max = absval;
466 : }
467 32 : return PyLong_FromUnsignedLong(max);
468 : }
469 :
470 : /*[clinic input]
471 : audioop.minmax
472 :
473 : fragment: Py_buffer
474 : width: int
475 : /
476 :
477 : Return the minimum and maximum values of all samples in the sound fragment.
478 : [clinic start generated code]*/
479 :
480 : static PyObject *
481 36 : audioop_minmax_impl(PyObject *module, Py_buffer *fragment, int width)
482 : /*[clinic end generated code: output=473fda66b15c836e input=89848e9b927a0696]*/
483 : {
484 : Py_ssize_t i;
485 : /* -1 trick below is needed on Windows to support -0x80000000 without
486 : a warning */
487 36 : int min = 0x7fffffff, max = -0x7FFFFFFF-1;
488 :
489 36 : if (!audioop_check_parameters(module, fragment->len, width))
490 4 : return NULL;
491 84 : for (i = 0; i < fragment->len; i += width) {
492 52 : int val = GETRAWSAMPLE(width, fragment->buf, i);
493 52 : if (val > max) max = val;
494 52 : if (val < min) min = val;
495 : }
496 32 : return Py_BuildValue("(ii)", min, max);
497 : }
498 :
499 : /*[clinic input]
500 : audioop.avg
501 :
502 : fragment: Py_buffer
503 : width: int
504 : /
505 :
506 : Return the average over all samples in the fragment.
507 : [clinic start generated code]*/
508 :
509 : static PyObject *
510 38 : audioop_avg_impl(PyObject *module, Py_buffer *fragment, int width)
511 : /*[clinic end generated code: output=4410a4c12c3586e6 input=1114493c7611334d]*/
512 : {
513 : Py_ssize_t i;
514 : int avg;
515 38 : double sum = 0.0;
516 :
517 38 : if (!audioop_check_parameters(module, fragment->len, width))
518 4 : return NULL;
519 74 : for (i = 0; i < fragment->len; i += width)
520 40 : sum += GETRAWSAMPLE(width, fragment->buf, i);
521 34 : if (fragment->len == 0)
522 12 : avg = 0;
523 : else
524 22 : avg = (int)floor(sum / (double)(fragment->len/width));
525 34 : return PyLong_FromLong(avg);
526 : }
527 :
528 : /*[clinic input]
529 : audioop.rms
530 :
531 : fragment: Py_buffer
532 : width: int
533 : /
534 :
535 : Return the root-mean-square of the fragment, i.e. sqrt(sum(S_i^2)/n).
536 : [clinic start generated code]*/
537 :
538 : static PyObject *
539 32 : audioop_rms_impl(PyObject *module, Py_buffer *fragment, int width)
540 : /*[clinic end generated code: output=1e7871c826445698 input=4cc57c6c94219d78]*/
541 : {
542 : Py_ssize_t i;
543 : unsigned int res;
544 32 : double sum_squares = 0.0;
545 :
546 32 : if (!audioop_check_parameters(module, fragment->len, width))
547 4 : return NULL;
548 496 : for (i = 0; i < fragment->len; i += width) {
549 468 : double val = GETRAWSAMPLE(width, fragment->buf, i);
550 468 : sum_squares += val*val;
551 : }
552 28 : if (fragment->len == 0)
553 12 : res = 0;
554 : else
555 16 : res = (unsigned int)sqrt(sum_squares / (double)(fragment->len/width));
556 28 : return PyLong_FromUnsignedLong(res);
557 : }
558 :
559 65 : static double _sum2(const int16_t *a, const int16_t *b, Py_ssize_t len)
560 : {
561 : Py_ssize_t i;
562 65 : double sum = 0.0;
563 :
564 470 : for( i=0; i<len; i++) {
565 405 : sum = sum + (double)a[i]*(double)b[i];
566 : }
567 65 : return sum;
568 : }
569 :
570 : /*
571 : ** Findfit tries to locate a sample within another sample. Its main use
572 : ** is in echo-cancellation (to find the feedback of the output signal in
573 : ** the input signal).
574 : ** The method used is as follows:
575 : **
576 : ** let R be the reference signal (length n) and A the input signal (length N)
577 : ** with N > n, and let all sums be over i from 0 to n-1.
578 : **
579 : ** Now, for each j in {0..N-n} we compute a factor fj so that -fj*R matches A
580 : ** as good as possible, i.e. sum( (A[j+i]+fj*R[i])^2 ) is minimal. This
581 : ** equation gives fj = sum( A[j+i]R[i] ) / sum(R[i]^2).
582 : **
583 : ** Next, we compute the relative distance between the original signal and
584 : ** the modified signal and minimize that over j:
585 : ** vj = sum( (A[j+i]-fj*R[i])^2 ) / sum( A[j+i]^2 ) =>
586 : ** vj = ( sum(A[j+i]^2)*sum(R[i]^2) - sum(A[j+i]R[i])^2 ) / sum( A[j+i]^2 )
587 : **
588 : ** In the code variables correspond as follows:
589 : ** cp1 A
590 : ** cp2 R
591 : ** len1 N
592 : ** len2 n
593 : ** aj_m1 A[j-1]
594 : ** aj_lm1 A[j+n-1]
595 : ** sum_ri_2 sum(R[i]^2)
596 : ** sum_aij_2 sum(A[i+j]^2)
597 : ** sum_aij_ri sum(A[i+j]R[i])
598 : **
599 : ** sum_ri is calculated once, sum_aij_2 is updated each step and sum_aij_ri
600 : ** is completely recalculated each step.
601 : */
602 : /*[clinic input]
603 : audioop.findfit
604 :
605 : fragment: Py_buffer
606 : reference: Py_buffer
607 : /
608 :
609 : Try to match reference as well as possible to a portion of fragment.
610 : [clinic start generated code]*/
611 :
612 : static PyObject *
613 5 : audioop_findfit_impl(PyObject *module, Py_buffer *fragment,
614 : Py_buffer *reference)
615 : /*[clinic end generated code: output=5752306d83cbbada input=62c305605e183c9a]*/
616 : {
617 : const int16_t *cp1, *cp2;
618 : Py_ssize_t len1, len2;
619 : Py_ssize_t j, best_j;
620 : double aj_m1, aj_lm1;
621 : double sum_ri_2, sum_aij_2, sum_aij_ri, result, best_result, factor;
622 :
623 5 : if (fragment->len & 1 || reference->len & 1) {
624 0 : PyErr_SetString(get_audioop_state(module)->AudioopError,
625 : "Strings should be even-sized");
626 0 : return NULL;
627 : }
628 5 : cp1 = (const int16_t *)fragment->buf;
629 5 : len1 = fragment->len >> 1;
630 5 : cp2 = (const int16_t *)reference->buf;
631 5 : len2 = reference->len >> 1;
632 :
633 5 : if (len1 < len2) {
634 0 : PyErr_SetString(get_audioop_state(module)->AudioopError,
635 : "First sample should be longer");
636 0 : return NULL;
637 : }
638 5 : sum_ri_2 = _sum2(cp2, cp2, len2);
639 5 : sum_aij_2 = _sum2(cp1, cp1, len2);
640 5 : sum_aij_ri = _sum2(cp1, cp2, len2);
641 :
642 5 : result = (sum_ri_2*sum_aij_2 - sum_aij_ri*sum_aij_ri) / sum_aij_2;
643 :
644 5 : best_result = result;
645 5 : best_j = 0;
646 :
647 39 : for ( j=1; j<=len1-len2; j++) {
648 34 : aj_m1 = (double)cp1[j-1];
649 34 : aj_lm1 = (double)cp1[j+len2-1];
650 :
651 34 : sum_aij_2 = sum_aij_2 + aj_lm1*aj_lm1 - aj_m1*aj_m1;
652 34 : sum_aij_ri = _sum2(cp1+j, cp2, len2);
653 :
654 34 : result = (sum_ri_2*sum_aij_2 - sum_aij_ri*sum_aij_ri)
655 : / sum_aij_2;
656 :
657 34 : if ( result < best_result ) {
658 2 : best_result = result;
659 2 : best_j = j;
660 : }
661 :
662 : }
663 :
664 5 : factor = _sum2(cp1+best_j, cp2, len2) / sum_ri_2;
665 :
666 5 : return Py_BuildValue("(nf)", best_j, factor);
667 : }
668 :
669 : /*
670 : ** findfactor finds a factor f so that the energy in A-fB is minimal.
671 : ** See the comment for findfit for details.
672 : */
673 : /*[clinic input]
674 : audioop.findfactor
675 :
676 : fragment: Py_buffer
677 : reference: Py_buffer
678 : /
679 :
680 : Return a factor F such that rms(add(fragment, mul(reference, -F))) is minimal.
681 : [clinic start generated code]*/
682 :
683 : static PyObject *
684 4 : audioop_findfactor_impl(PyObject *module, Py_buffer *fragment,
685 : Py_buffer *reference)
686 : /*[clinic end generated code: output=14ea95652c1afcf8 input=816680301d012b21]*/
687 : {
688 : const int16_t *cp1, *cp2;
689 : Py_ssize_t len;
690 : double sum_ri_2, sum_aij_ri, result;
691 :
692 4 : if (fragment->len & 1 || reference->len & 1) {
693 0 : PyErr_SetString(get_audioop_state(module)->AudioopError,
694 : "Strings should be even-sized");
695 0 : return NULL;
696 : }
697 4 : if (fragment->len != reference->len) {
698 0 : PyErr_SetString(get_audioop_state(module)->AudioopError,
699 : "Samples should be same size");
700 0 : return NULL;
701 : }
702 4 : cp1 = (const int16_t *)fragment->buf;
703 4 : cp2 = (const int16_t *)reference->buf;
704 4 : len = fragment->len >> 1;
705 4 : sum_ri_2 = _sum2(cp2, cp2, len);
706 4 : sum_aij_ri = _sum2(cp1, cp2, len);
707 :
708 4 : result = sum_aij_ri / sum_ri_2;
709 :
710 4 : return PyFloat_FromDouble(result);
711 : }
712 :
713 : /*
714 : ** findmax returns the index of the n-sized segment of the input sample
715 : ** that contains the most energy.
716 : */
717 : /*[clinic input]
718 : audioop.findmax
719 :
720 : fragment: Py_buffer
721 : length: Py_ssize_t
722 : /
723 :
724 : Search fragment for a slice of specified number of samples with maximum energy.
725 : [clinic start generated code]*/
726 :
727 : static PyObject *
728 4 : audioop_findmax_impl(PyObject *module, Py_buffer *fragment,
729 : Py_ssize_t length)
730 : /*[clinic end generated code: output=f008128233523040 input=2f304801ed42383c]*/
731 : {
732 : const int16_t *cp1;
733 : Py_ssize_t len1;
734 : Py_ssize_t j, best_j;
735 : double aj_m1, aj_lm1;
736 : double result, best_result;
737 :
738 4 : if (fragment->len & 1) {
739 0 : PyErr_SetString(get_audioop_state(module)->AudioopError,
740 : "Strings should be even-sized");
741 0 : return NULL;
742 : }
743 4 : cp1 = (const int16_t *)fragment->buf;
744 4 : len1 = fragment->len >> 1;
745 :
746 4 : if (length < 0 || len1 < length) {
747 1 : PyErr_SetString(get_audioop_state(module)->AudioopError,
748 : "Input sample should be longer");
749 1 : return NULL;
750 : }
751 :
752 3 : result = _sum2(cp1, cp1, length);
753 :
754 3 : best_result = result;
755 3 : best_j = 0;
756 :
757 21 : for ( j=1; j<=len1-length; j++) {
758 18 : aj_m1 = (double)cp1[j-1];
759 18 : aj_lm1 = (double)cp1[j+length-1];
760 :
761 18 : result = result + aj_lm1*aj_lm1 - aj_m1*aj_m1;
762 :
763 18 : if ( result > best_result ) {
764 12 : best_result = result;
765 12 : best_j = j;
766 : }
767 :
768 : }
769 :
770 3 : return PyLong_FromSsize_t(best_j);
771 : }
772 :
773 : /*[clinic input]
774 : audioop.avgpp
775 :
776 : fragment: Py_buffer
777 : width: int
778 : /
779 :
780 : Return the average peak-peak value over all samples in the fragment.
781 : [clinic start generated code]*/
782 :
783 : static PyObject *
784 28 : audioop_avgpp_impl(PyObject *module, Py_buffer *fragment, int width)
785 : /*[clinic end generated code: output=269596b0d5ae0b2b input=0b3cceeae420a7d9]*/
786 : {
787 : Py_ssize_t i;
788 28 : int prevval, prevextremevalid = 0, prevextreme = 0;
789 28 : double sum = 0.0;
790 : unsigned int avg;
791 28 : int diff, prevdiff, nextreme = 0;
792 :
793 28 : if (!audioop_check_parameters(module, fragment->len, width))
794 4 : return NULL;
795 24 : if (fragment->len <= width)
796 12 : return PyLong_FromLong(0);
797 12 : prevval = GETRAWSAMPLE(width, fragment->buf, 0);
798 12 : prevdiff = 17; /* Anything != 0, 1 */
799 452 : for (i = width; i < fragment->len; i += width) {
800 440 : int val = GETRAWSAMPLE(width, fragment->buf, i);
801 440 : if (val != prevval) {
802 436 : diff = val < prevval;
803 436 : if (prevdiff == !diff) {
804 : /* Derivative changed sign. Compute difference to last
805 : ** extreme value and remember.
806 : */
807 24 : if (prevextremevalid) {
808 16 : if (prevval < prevextreme)
809 12 : sum += (double)((unsigned int)prevextreme -
810 12 : (unsigned int)prevval);
811 : else
812 4 : sum += (double)((unsigned int)prevval -
813 4 : (unsigned int)prevextreme);
814 16 : nextreme++;
815 : }
816 24 : prevextremevalid = 1;
817 24 : prevextreme = prevval;
818 : }
819 436 : prevval = val;
820 436 : prevdiff = diff;
821 : }
822 : }
823 12 : if ( nextreme == 0 )
824 4 : avg = 0;
825 : else
826 8 : avg = (unsigned int)(sum / (double)nextreme);
827 12 : return PyLong_FromUnsignedLong(avg);
828 : }
829 :
830 : /*[clinic input]
831 : audioop.maxpp
832 :
833 : fragment: Py_buffer
834 : width: int
835 : /
836 :
837 : Return the maximum peak-peak value in the sound fragment.
838 : [clinic start generated code]*/
839 :
840 : static PyObject *
841 28 : audioop_maxpp_impl(PyObject *module, Py_buffer *fragment, int width)
842 : /*[clinic end generated code: output=5b918ed5dbbdb978 input=671a13e1518f80a1]*/
843 : {
844 : Py_ssize_t i;
845 28 : int prevval, prevextremevalid = 0, prevextreme = 0;
846 28 : unsigned int max = 0, extremediff;
847 : int diff, prevdiff;
848 :
849 28 : if (!audioop_check_parameters(module, fragment->len, width))
850 4 : return NULL;
851 24 : if (fragment->len <= width)
852 12 : return PyLong_FromLong(0);
853 12 : prevval = GETRAWSAMPLE(width, fragment->buf, 0);
854 12 : prevdiff = 17; /* Anything != 0, 1 */
855 452 : for (i = width; i < fragment->len; i += width) {
856 440 : int val = GETRAWSAMPLE(width, fragment->buf, i);
857 440 : if (val != prevval) {
858 436 : diff = val < prevval;
859 436 : if (prevdiff == !diff) {
860 : /* Derivative changed sign. Compute difference to
861 : ** last extreme value and remember.
862 : */
863 24 : if (prevextremevalid) {
864 16 : if (prevval < prevextreme)
865 12 : extremediff = (unsigned int)prevextreme -
866 12 : (unsigned int)prevval;
867 : else
868 4 : extremediff = (unsigned int)prevval -
869 4 : (unsigned int)prevextreme;
870 16 : if ( extremediff > max )
871 16 : max = extremediff;
872 : }
873 24 : prevextremevalid = 1;
874 24 : prevextreme = prevval;
875 : }
876 436 : prevval = val;
877 436 : prevdiff = diff;
878 : }
879 : }
880 12 : return PyLong_FromUnsignedLong(max);
881 : }
882 :
883 : /*[clinic input]
884 : audioop.cross
885 :
886 : fragment: Py_buffer
887 : width: int
888 : /
889 :
890 : Return the number of zero crossings in the fragment passed as an argument.
891 : [clinic start generated code]*/
892 :
893 : static PyObject *
894 36 : audioop_cross_impl(PyObject *module, Py_buffer *fragment, int width)
895 : /*[clinic end generated code: output=5938dcdd74a1f431 input=b1b3f15b83f6b41a]*/
896 : {
897 : Py_ssize_t i;
898 : int prevval;
899 : Py_ssize_t ncross;
900 :
901 36 : if (!audioop_check_parameters(module, fragment->len, width))
902 4 : return NULL;
903 32 : ncross = -1;
904 32 : prevval = 17; /* Anything <> 0,1 */
905 92 : for (i = 0; i < fragment->len; i += width) {
906 60 : int val = GETRAWSAMPLE(width, fragment->buf, i) < 0;
907 60 : if (val != prevval) ncross++;
908 60 : prevval = val;
909 : }
910 32 : return PyLong_FromSsize_t(ncross);
911 : }
912 :
913 : /*[clinic input]
914 : audioop.mul
915 :
916 : fragment: Py_buffer
917 : width: int
918 : factor: double
919 : /
920 :
921 : Return a fragment that has all samples in the original fragment multiplied by the floating-point value factor.
922 : [clinic start generated code]*/
923 :
924 : static PyObject *
925 28 : audioop_mul_impl(PyObject *module, Py_buffer *fragment, int width,
926 : double factor)
927 : /*[clinic end generated code: output=6cd48fe796da0ea4 input=c726667baa157d3c]*/
928 : {
929 : signed char *ncp;
930 : Py_ssize_t i;
931 : double maxval, minval;
932 : PyObject *rv;
933 :
934 28 : if (!audioop_check_parameters(module, fragment->len, width))
935 4 : return NULL;
936 :
937 24 : maxval = (double) maxvals[width];
938 24 : minval = (double) minvals[width];
939 :
940 24 : rv = PyBytes_FromStringAndSize(NULL, fragment->len);
941 24 : if (rv == NULL)
942 0 : return NULL;
943 24 : ncp = (signed char *)PyBytes_AsString(rv);
944 :
945 108 : for (i = 0; i < fragment->len; i += width) {
946 84 : double val = GETRAWSAMPLE(width, fragment->buf, i);
947 84 : int ival = fbound(val * factor, minval, maxval);
948 84 : SETRAWSAMPLE(width, ncp, i, ival);
949 : }
950 24 : return rv;
951 : }
952 :
953 : /*[clinic input]
954 : audioop.tomono
955 :
956 : fragment: Py_buffer
957 : width: int
958 : lfactor: double
959 : rfactor: double
960 : /
961 :
962 : Convert a stereo fragment to a mono fragment.
963 : [clinic start generated code]*/
964 :
965 : static PyObject *
966 24 : audioop_tomono_impl(PyObject *module, Py_buffer *fragment, int width,
967 : double lfactor, double rfactor)
968 : /*[clinic end generated code: output=235c8277216d4e4e input=c4ec949b3f4dddfa]*/
969 : {
970 : signed char *cp, *ncp;
971 : Py_ssize_t len, i;
972 : double maxval, minval;
973 : PyObject *rv;
974 :
975 24 : cp = fragment->buf;
976 24 : len = fragment->len;
977 24 : if (!audioop_check_parameters(module, len, width))
978 4 : return NULL;
979 20 : if (((len / width) & 1) != 0) {
980 0 : PyErr_SetString(get_audioop_state(module)->AudioopError,
981 : "not a whole number of frames");
982 0 : return NULL;
983 : }
984 :
985 20 : maxval = (double) maxvals[width];
986 20 : minval = (double) minvals[width];
987 :
988 20 : rv = PyBytes_FromStringAndSize(NULL, len/2);
989 20 : if (rv == NULL)
990 0 : return NULL;
991 20 : ncp = (signed char *)PyBytes_AsString(rv);
992 :
993 160 : for (i = 0; i < len; i += width*2) {
994 140 : double val1 = GETRAWSAMPLE(width, cp, i);
995 140 : double val2 = GETRAWSAMPLE(width, cp, i + width);
996 140 : double val = val1 * lfactor + val2 * rfactor;
997 140 : int ival = fbound(val, minval, maxval);
998 140 : SETRAWSAMPLE(width, ncp, i/2, ival);
999 : }
1000 20 : return rv;
1001 : }
1002 :
1003 : /*[clinic input]
1004 : audioop.tostereo
1005 :
1006 : fragment: Py_buffer
1007 : width: int
1008 : lfactor: double
1009 : rfactor: double
1010 : /
1011 :
1012 : Generate a stereo fragment from a mono fragment.
1013 : [clinic start generated code]*/
1014 :
1015 : static PyObject *
1016 24 : audioop_tostereo_impl(PyObject *module, Py_buffer *fragment, int width,
1017 : double lfactor, double rfactor)
1018 : /*[clinic end generated code: output=046f13defa5f1595 input=27b6395ebfdff37a]*/
1019 : {
1020 : signed char *ncp;
1021 : Py_ssize_t i;
1022 : double maxval, minval;
1023 : PyObject *rv;
1024 :
1025 24 : if (!audioop_check_parameters(module, fragment->len, width))
1026 4 : return NULL;
1027 :
1028 20 : maxval = (double) maxvals[width];
1029 20 : minval = (double) minvals[width];
1030 :
1031 20 : if (fragment->len > PY_SSIZE_T_MAX/2) {
1032 0 : PyErr_SetString(PyExc_MemoryError,
1033 : "not enough memory for output buffer");
1034 0 : return NULL;
1035 : }
1036 :
1037 20 : rv = PyBytes_FromStringAndSize(NULL, fragment->len*2);
1038 20 : if (rv == NULL)
1039 0 : return NULL;
1040 20 : ncp = (signed char *)PyBytes_AsString(rv);
1041 :
1042 160 : for (i = 0; i < fragment->len; i += width) {
1043 140 : double val = GETRAWSAMPLE(width, fragment->buf, i);
1044 140 : int val1 = fbound(val * lfactor, minval, maxval);
1045 140 : int val2 = fbound(val * rfactor, minval, maxval);
1046 140 : SETRAWSAMPLE(width, ncp, i*2, val1);
1047 140 : SETRAWSAMPLE(width, ncp, i*2 + width, val2);
1048 : }
1049 20 : return rv;
1050 : }
1051 :
1052 : /*[clinic input]
1053 : audioop.add
1054 :
1055 : fragment1: Py_buffer
1056 : fragment2: Py_buffer
1057 : width: int
1058 : /
1059 :
1060 : Return a fragment which is the addition of the two samples passed as parameters.
1061 : [clinic start generated code]*/
1062 :
1063 : static PyObject *
1064 24 : audioop_add_impl(PyObject *module, Py_buffer *fragment1,
1065 : Py_buffer *fragment2, int width)
1066 : /*[clinic end generated code: output=60140af4d1aab6f2 input=4a8d4bae4c1605c7]*/
1067 : {
1068 : signed char *ncp;
1069 : Py_ssize_t i;
1070 : int minval, maxval, newval;
1071 : PyObject *rv;
1072 :
1073 24 : if (!audioop_check_parameters(module, fragment1->len, width))
1074 4 : return NULL;
1075 20 : if (fragment1->len != fragment2->len) {
1076 0 : PyErr_SetString(get_audioop_state(module)->AudioopError,
1077 : "Lengths should be the same");
1078 0 : return NULL;
1079 : }
1080 :
1081 20 : maxval = maxvals[width];
1082 20 : minval = minvals[width];
1083 :
1084 20 : rv = PyBytes_FromStringAndSize(NULL, fragment1->len);
1085 20 : if (rv == NULL)
1086 0 : return NULL;
1087 20 : ncp = (signed char *)PyBytes_AsString(rv);
1088 :
1089 76 : for (i = 0; i < fragment1->len; i += width) {
1090 56 : int val1 = GETRAWSAMPLE(width, fragment1->buf, i);
1091 56 : int val2 = GETRAWSAMPLE(width, fragment2->buf, i);
1092 :
1093 56 : if (width < 4) {
1094 42 : newval = val1 + val2;
1095 : /* truncate in case of overflow */
1096 42 : if (newval > maxval)
1097 6 : newval = maxval;
1098 36 : else if (newval < minval)
1099 6 : newval = minval;
1100 : }
1101 : else {
1102 14 : double fval = (double)val1 + (double)val2;
1103 : /* truncate in case of overflow */
1104 14 : newval = fbound(fval, minval, maxval);
1105 : }
1106 :
1107 56 : SETRAWSAMPLE(width, ncp, i, newval);
1108 : }
1109 20 : return rv;
1110 : }
1111 :
1112 : /*[clinic input]
1113 : audioop.bias
1114 :
1115 : fragment: Py_buffer
1116 : width: int
1117 : bias: int
1118 : /
1119 :
1120 : Return a fragment that is the original fragment with a bias added to each sample.
1121 : [clinic start generated code]*/
1122 :
1123 : static PyObject *
1124 104 : audioop_bias_impl(PyObject *module, Py_buffer *fragment, int width, int bias)
1125 : /*[clinic end generated code: output=6e0aa8f68f045093 input=2b5cce5c3bb4838c]*/
1126 : {
1127 : signed char *ncp;
1128 : Py_ssize_t i;
1129 104 : unsigned int val = 0, mask;
1130 : PyObject *rv;
1131 :
1132 104 : if (!audioop_check_parameters(module, fragment->len, width))
1133 4 : return NULL;
1134 :
1135 100 : rv = PyBytes_FromStringAndSize(NULL, fragment->len);
1136 100 : if (rv == NULL)
1137 0 : return NULL;
1138 100 : ncp = (signed char *)PyBytes_AsString(rv);
1139 :
1140 100 : mask = masks[width];
1141 :
1142 212 : for (i = 0; i < fragment->len; i += width) {
1143 112 : if (width == 1)
1144 28 : val = GETINTX(unsigned char, fragment->buf, i);
1145 84 : else if (width == 2)
1146 28 : val = GETINTX(uint16_t, fragment->buf, i);
1147 56 : else if (width == 3)
1148 28 : val = ((unsigned int)GETINT24(fragment->buf, i)) & 0xffffffu;
1149 : else {
1150 28 : assert(width == 4);
1151 28 : val = GETINTX(uint32_t, fragment->buf, i);
1152 : }
1153 :
1154 112 : val += (unsigned int)bias;
1155 : /* wrap around in case of overflow */
1156 112 : val &= mask;
1157 :
1158 112 : if (width == 1)
1159 28 : SETINTX(unsigned char, ncp, i, val);
1160 84 : else if (width == 2)
1161 28 : SETINTX(uint16_t, ncp, i, val);
1162 56 : else if (width == 3)
1163 28 : SETINT24(ncp, i, (int)val);
1164 : else {
1165 28 : assert(width == 4);
1166 28 : SETINTX(uint32_t, ncp, i, val);
1167 : }
1168 : }
1169 100 : return rv;
1170 : }
1171 :
1172 : /*[clinic input]
1173 : audioop.reverse
1174 :
1175 : fragment: Py_buffer
1176 : width: int
1177 : /
1178 :
1179 : Reverse the samples in a fragment and returns the modified fragment.
1180 : [clinic start generated code]*/
1181 :
1182 : static PyObject *
1183 20 : audioop_reverse_impl(PyObject *module, Py_buffer *fragment, int width)
1184 : /*[clinic end generated code: output=b44135698418da14 input=668f890cf9f9d225]*/
1185 : {
1186 : unsigned char *ncp;
1187 : Py_ssize_t i;
1188 : PyObject *rv;
1189 :
1190 20 : if (!audioop_check_parameters(module, fragment->len, width))
1191 4 : return NULL;
1192 :
1193 16 : rv = PyBytes_FromStringAndSize(NULL, fragment->len);
1194 16 : if (rv == NULL)
1195 0 : return NULL;
1196 16 : ncp = (unsigned char *)PyBytes_AsString(rv);
1197 :
1198 28 : for (i = 0; i < fragment->len; i += width) {
1199 12 : int val = GETRAWSAMPLE(width, fragment->buf, i);
1200 12 : SETRAWSAMPLE(width, ncp, fragment->len - i - width, val);
1201 : }
1202 16 : return rv;
1203 : }
1204 :
1205 : /*[clinic input]
1206 : audioop.byteswap
1207 :
1208 : fragment: Py_buffer
1209 : width: int
1210 : /
1211 :
1212 : Convert big-endian samples to little-endian and vice versa.
1213 : [clinic start generated code]*/
1214 :
1215 : static PyObject *
1216 23 : audioop_byteswap_impl(PyObject *module, Py_buffer *fragment, int width)
1217 : /*[clinic end generated code: output=50838a9e4b87cd4d input=fae7611ceffa5c82]*/
1218 : {
1219 : unsigned char *ncp;
1220 : Py_ssize_t i;
1221 : PyObject *rv;
1222 :
1223 23 : if (!audioop_check_parameters(module, fragment->len, width))
1224 0 : return NULL;
1225 :
1226 23 : rv = PyBytes_FromStringAndSize(NULL, fragment->len);
1227 23 : if (rv == NULL)
1228 0 : return NULL;
1229 23 : ncp = (unsigned char *)PyBytes_AsString(rv);
1230 :
1231 423 : for (i = 0; i < fragment->len; i += width) {
1232 : int j;
1233 1256 : for (j = 0; j < width; j++)
1234 856 : ncp[i + width - 1 - j] = ((unsigned char *)fragment->buf)[i + j];
1235 : }
1236 23 : return rv;
1237 : }
1238 :
1239 : /*[clinic input]
1240 : audioop.lin2lin
1241 :
1242 : fragment: Py_buffer
1243 : width: int
1244 : newwidth: int
1245 : /
1246 :
1247 : Convert samples between 1-, 2-, 3- and 4-byte formats.
1248 : [clinic start generated code]*/
1249 :
1250 : static PyObject *
1251 28 : audioop_lin2lin_impl(PyObject *module, Py_buffer *fragment, int width,
1252 : int newwidth)
1253 : /*[clinic end generated code: output=17b14109248f1d99 input=5ce08c8aa2f24d96]*/
1254 : {
1255 : unsigned char *ncp;
1256 : Py_ssize_t i, j;
1257 : PyObject *rv;
1258 :
1259 28 : if (!audioop_check_parameters(module, fragment->len, width))
1260 4 : return NULL;
1261 24 : if (!audioop_check_size(module, newwidth))
1262 0 : return NULL;
1263 :
1264 24 : if (fragment->len/width > PY_SSIZE_T_MAX/newwidth) {
1265 0 : PyErr_SetString(PyExc_MemoryError,
1266 : "not enough memory for output buffer");
1267 0 : return NULL;
1268 : }
1269 24 : rv = PyBytes_FromStringAndSize(NULL, (fragment->len/width)*newwidth);
1270 24 : if (rv == NULL)
1271 0 : return NULL;
1272 24 : ncp = (unsigned char *)PyBytes_AsString(rv);
1273 :
1274 192 : for (i = j = 0; i < fragment->len; i += width, j += newwidth) {
1275 168 : int val = GETSAMPLE32(width, fragment->buf, i);
1276 168 : SETSAMPLE32(newwidth, ncp, j, val);
1277 : }
1278 24 : return rv;
1279 : }
1280 :
1281 : static int
1282 144 : gcd(int a, int b)
1283 : {
1284 262 : while (b > 0) {
1285 118 : int tmp = a % b;
1286 118 : a = b;
1287 118 : b = tmp;
1288 : }
1289 144 : return a;
1290 : }
1291 :
1292 : /*[clinic input]
1293 : audioop.ratecv
1294 :
1295 : fragment: Py_buffer
1296 : width: int
1297 : nchannels: int
1298 : inrate: int
1299 : outrate: int
1300 : state: object
1301 : weightA: int = 1
1302 : weightB: int = 0
1303 : /
1304 :
1305 : Convert the frame rate of the input fragment.
1306 : [clinic start generated code]*/
1307 :
1308 : static PyObject *
1309 76 : audioop_ratecv_impl(PyObject *module, Py_buffer *fragment, int width,
1310 : int nchannels, int inrate, int outrate, PyObject *state,
1311 : int weightA, int weightB)
1312 : /*[clinic end generated code: output=624038e843243139 input=aff3acdc94476191]*/
1313 : {
1314 : char *cp, *ncp;
1315 : Py_ssize_t len;
1316 : int chan, d, *prev_i, *cur_i, cur_o;
1317 76 : PyObject *samps, *str, *rv = NULL, *channel;
1318 : int bytes_per_frame;
1319 :
1320 76 : if (!audioop_check_size(module, width))
1321 1 : return NULL;
1322 75 : if (nchannels < 1) {
1323 0 : PyErr_SetString(get_audioop_state(module)->AudioopError,
1324 : "# of channels should be >= 1");
1325 0 : return NULL;
1326 : }
1327 75 : if (width > INT_MAX / nchannels) {
1328 : /* This overflow test is rigorously correct because
1329 : both multiplicands are >= 1. Use the argument names
1330 : from the docs for the error msg. */
1331 0 : PyErr_SetString(PyExc_OverflowError,
1332 : "width * nchannels too big for a C int");
1333 0 : return NULL;
1334 : }
1335 75 : bytes_per_frame = width * nchannels;
1336 75 : if (weightA < 1 || weightB < 0) {
1337 0 : PyErr_SetString(get_audioop_state(module)->AudioopError,
1338 : "weightA should be >= 1, weightB should be >= 0");
1339 0 : return NULL;
1340 : }
1341 75 : assert(fragment->len >= 0);
1342 75 : if (fragment->len % bytes_per_frame != 0) {
1343 3 : PyErr_SetString(get_audioop_state(module)->AudioopError,
1344 : "not a whole number of frames");
1345 3 : return NULL;
1346 : }
1347 72 : if (inrate <= 0 || outrate <= 0) {
1348 0 : PyErr_SetString(get_audioop_state(module)->AudioopError,
1349 : "sampling rate not > 0");
1350 0 : return NULL;
1351 : }
1352 : /* divide inrate and outrate by their greatest common divisor */
1353 72 : d = gcd(inrate, outrate);
1354 72 : inrate /= d;
1355 72 : outrate /= d;
1356 : /* divide weightA and weightB by their greatest common divisor */
1357 72 : d = gcd(weightA, weightB);
1358 72 : weightA /= d;
1359 72 : weightB /= d;
1360 :
1361 72 : if ((size_t)nchannels > SIZE_MAX/sizeof(int)) {
1362 0 : PyErr_SetString(PyExc_MemoryError,
1363 : "not enough memory for output buffer");
1364 0 : return NULL;
1365 : }
1366 72 : prev_i = (int *) PyMem_Malloc(nchannels * sizeof(int));
1367 72 : cur_i = (int *) PyMem_Malloc(nchannels * sizeof(int));
1368 72 : if (prev_i == NULL || cur_i == NULL) {
1369 0 : (void) PyErr_NoMemory();
1370 0 : goto exit;
1371 : }
1372 :
1373 72 : len = fragment->len / bytes_per_frame; /* # of frames */
1374 :
1375 72 : if (state == Py_None) {
1376 45 : d = -outrate;
1377 106 : for (chan = 0; chan < nchannels; chan++)
1378 61 : prev_i[chan] = cur_i[chan] = 0;
1379 : }
1380 : else {
1381 27 : if (!PyTuple_Check(state)) {
1382 1 : PyErr_SetString(PyExc_TypeError, "state must be a tuple or None");
1383 1 : goto exit;
1384 : }
1385 26 : if (!PyArg_ParseTuple(state,
1386 : "iO!;ratecv(): illegal state argument",
1387 : &d, &PyTuple_Type, &samps))
1388 0 : goto exit;
1389 26 : if (PyTuple_Size(samps) != nchannels) {
1390 0 : PyErr_SetString(get_audioop_state(module)->AudioopError,
1391 : "illegal state argument");
1392 0 : goto exit;
1393 : }
1394 51 : for (chan = 0; chan < nchannels; chan++) {
1395 26 : channel = PyTuple_GetItem(samps, chan);
1396 26 : if (!PyTuple_Check(channel)) {
1397 1 : PyErr_SetString(PyExc_TypeError,
1398 : "ratecv(): illegal state argument");
1399 1 : goto exit;
1400 : }
1401 25 : if (!PyArg_ParseTuple(channel,
1402 : "ii;ratecv(): illegal state argument",
1403 25 : &prev_i[chan], &cur_i[chan]))
1404 : {
1405 0 : goto exit;
1406 : }
1407 : }
1408 : }
1409 :
1410 : /* str <- Space for the output buffer. */
1411 70 : if (len == 0)
1412 20 : str = PyBytes_FromStringAndSize(NULL, 0);
1413 : else {
1414 : /* There are len input frames, so we need (mathematically)
1415 : ceiling(len*outrate/inrate) output frames, and each frame
1416 : requires bytes_per_frame bytes. Computing this
1417 : without spurious overflow is the challenge; we can
1418 : settle for a reasonable upper bound, though, in this
1419 : case ceiling(len/inrate) * outrate. */
1420 :
1421 : /* compute ceiling(len/inrate) without overflow */
1422 50 : Py_ssize_t q = 1 + (len - 1) / inrate;
1423 50 : if (outrate > PY_SSIZE_T_MAX / q / bytes_per_frame)
1424 0 : str = NULL;
1425 : else
1426 50 : str = PyBytes_FromStringAndSize(NULL,
1427 50 : q * outrate * bytes_per_frame);
1428 : }
1429 70 : if (str == NULL) {
1430 0 : PyErr_SetString(PyExc_MemoryError,
1431 : "not enough memory for output buffer");
1432 0 : goto exit;
1433 : }
1434 70 : ncp = PyBytes_AsString(str);
1435 70 : cp = fragment->buf;
1436 :
1437 : for (;;) {
1438 418 : while (d < 0) {
1439 244 : if (len == 0) {
1440 70 : samps = PyTuple_New(nchannels);
1441 70 : if (samps == NULL)
1442 0 : goto exit;
1443 156 : for (chan = 0; chan < nchannels; chan++)
1444 86 : PyTuple_SetItem(samps, chan,
1445 : Py_BuildValue("(ii)",
1446 86 : prev_i[chan],
1447 86 : cur_i[chan]));
1448 70 : if (PyErr_Occurred())
1449 0 : goto exit;
1450 : /* We have checked before that the length
1451 : * of the string fits into int. */
1452 70 : len = (Py_ssize_t)(ncp - PyBytes_AsString(str));
1453 70 : rv = PyBytes_FromStringAndSize
1454 70 : (PyBytes_AsString(str), len);
1455 70 : Py_DECREF(str);
1456 70 : str = rv;
1457 70 : if (str == NULL)
1458 0 : goto exit;
1459 70 : rv = Py_BuildValue("(O(iO))", str, d, samps);
1460 70 : Py_DECREF(samps);
1461 70 : Py_DECREF(str);
1462 70 : goto exit; /* return rv */
1463 : }
1464 348 : for (chan = 0; chan < nchannels; chan++) {
1465 174 : prev_i[chan] = cur_i[chan];
1466 174 : cur_i[chan] = GETSAMPLE32(width, cp, 0);
1467 174 : cp += width;
1468 : /* implements a simple digital filter */
1469 174 : cur_i[chan] = (int)(
1470 174 : ((double)weightA * (double)cur_i[chan] +
1471 174 : (double)weightB * (double)prev_i[chan]) /
1472 174 : ((double)weightA + (double)weightB));
1473 : }
1474 174 : len--;
1475 174 : d += outrate;
1476 : }
1477 401 : while (d >= 0) {
1478 454 : for (chan = 0; chan < nchannels; chan++) {
1479 227 : cur_o = (int)(((double)prev_i[chan] * (double)d +
1480 227 : (double)cur_i[chan] * (double)(outrate - d)) /
1481 227 : (double)outrate);
1482 227 : SETSAMPLE32(width, ncp, 0, cur_o);
1483 227 : ncp += width;
1484 : }
1485 227 : d -= inrate;
1486 : }
1487 : }
1488 72 : exit:
1489 72 : PyMem_Free(prev_i);
1490 72 : PyMem_Free(cur_i);
1491 72 : return rv;
1492 : }
1493 :
1494 : /*[clinic input]
1495 : audioop.lin2ulaw
1496 :
1497 : fragment: Py_buffer
1498 : width: int
1499 : /
1500 :
1501 : Convert samples in the audio fragment to u-LAW encoding.
1502 : [clinic start generated code]*/
1503 :
1504 : static PyObject *
1505 200 : audioop_lin2ulaw_impl(PyObject *module, Py_buffer *fragment, int width)
1506 : /*[clinic end generated code: output=14fb62b16fe8ea8e input=2450d1b870b6bac2]*/
1507 : {
1508 : unsigned char *ncp;
1509 : Py_ssize_t i;
1510 : PyObject *rv;
1511 :
1512 200 : if (!audioop_check_parameters(module, fragment->len, width))
1513 4 : return NULL;
1514 :
1515 196 : rv = PyBytes_FromStringAndSize(NULL, fragment->len/width);
1516 196 : if (rv == NULL)
1517 0 : return NULL;
1518 196 : ncp = (unsigned char *)PyBytes_AsString(rv);
1519 :
1520 16439 : for (i = 0; i < fragment->len; i += width) {
1521 16243 : int val = GETSAMPLE32(width, fragment->buf, i);
1522 16243 : *ncp++ = st_14linear2ulaw(val >> 18);
1523 : }
1524 196 : return rv;
1525 : }
1526 :
1527 : /*[clinic input]
1528 : audioop.ulaw2lin
1529 :
1530 : fragment: Py_buffer
1531 : width: int
1532 : /
1533 :
1534 : Convert sound fragments in u-LAW encoding to linearly encoded sound fragments.
1535 : [clinic start generated code]*/
1536 :
1537 : static PyObject *
1538 219 : audioop_ulaw2lin_impl(PyObject *module, Py_buffer *fragment, int width)
1539 : /*[clinic end generated code: output=378356b047521ba2 input=45d53ddce5be7d06]*/
1540 : {
1541 : unsigned char *cp;
1542 : signed char *ncp;
1543 : Py_ssize_t i;
1544 : PyObject *rv;
1545 :
1546 219 : if (!audioop_check_size(module, width))
1547 4 : return NULL;
1548 :
1549 215 : if (fragment->len > PY_SSIZE_T_MAX/width) {
1550 0 : PyErr_SetString(PyExc_MemoryError,
1551 : "not enough memory for output buffer");
1552 0 : return NULL;
1553 : }
1554 215 : rv = PyBytes_FromStringAndSize(NULL, fragment->len*width);
1555 215 : if (rv == NULL)
1556 0 : return NULL;
1557 215 : ncp = (signed char *)PyBytes_AsString(rv);
1558 :
1559 215 : cp = fragment->buf;
1560 43148 : for (i = 0; i < fragment->len*width; i += width) {
1561 42933 : int val = st_ulaw2linear16(*cp++) << 16;
1562 42933 : SETSAMPLE32(width, ncp, i, val);
1563 : }
1564 215 : return rv;
1565 : }
1566 :
1567 : /*[clinic input]
1568 : audioop.lin2alaw
1569 :
1570 : fragment: Py_buffer
1571 : width: int
1572 : /
1573 :
1574 : Convert samples in the audio fragment to a-LAW encoding.
1575 : [clinic start generated code]*/
1576 :
1577 : static PyObject *
1578 106 : audioop_lin2alaw_impl(PyObject *module, Py_buffer *fragment, int width)
1579 : /*[clinic end generated code: output=d076f130121a82f0 input=ffb1ef8bb39da945]*/
1580 : {
1581 : unsigned char *ncp;
1582 : Py_ssize_t i;
1583 : PyObject *rv;
1584 :
1585 106 : if (!audioop_check_parameters(module, fragment->len, width))
1586 4 : return NULL;
1587 :
1588 102 : rv = PyBytes_FromStringAndSize(NULL, fragment->len/width);
1589 102 : if (rv == NULL)
1590 0 : return NULL;
1591 102 : ncp = (unsigned char *)PyBytes_AsString(rv);
1592 :
1593 8582 : for (i = 0; i < fragment->len; i += width) {
1594 8480 : int val = GETSAMPLE32(width, fragment->buf, i);
1595 8480 : *ncp++ = st_linear2alaw(val >> 19);
1596 : }
1597 102 : return rv;
1598 : }
1599 :
1600 : /*[clinic input]
1601 : audioop.alaw2lin
1602 :
1603 : fragment: Py_buffer
1604 : width: int
1605 : /
1606 :
1607 : Convert sound fragments in a-LAW encoding to linearly encoded sound fragments.
1608 : [clinic start generated code]*/
1609 :
1610 : static PyObject *
1611 118 : audioop_alaw2lin_impl(PyObject *module, Py_buffer *fragment, int width)
1612 : /*[clinic end generated code: output=85c365ec559df647 input=4140626046cd1772]*/
1613 : {
1614 : unsigned char *cp;
1615 : signed char *ncp;
1616 : Py_ssize_t i;
1617 : int val;
1618 : PyObject *rv;
1619 :
1620 118 : if (!audioop_check_size(module, width))
1621 4 : return NULL;
1622 :
1623 114 : if (fragment->len > PY_SSIZE_T_MAX/width) {
1624 0 : PyErr_SetString(PyExc_MemoryError,
1625 : "not enough memory for output buffer");
1626 0 : return NULL;
1627 : }
1628 114 : rv = PyBytes_FromStringAndSize(NULL, fragment->len*width);
1629 114 : if (rv == NULL)
1630 0 : return NULL;
1631 114 : ncp = (signed char *)PyBytes_AsString(rv);
1632 114 : cp = fragment->buf;
1633 :
1634 22062 : for (i = 0; i < fragment->len*width; i += width) {
1635 21948 : val = st_alaw2linear16(*cp++) << 16;
1636 21948 : SETSAMPLE32(width, ncp, i, val);
1637 : }
1638 114 : return rv;
1639 : }
1640 :
1641 : /*[clinic input]
1642 : audioop.lin2adpcm
1643 :
1644 : fragment: Py_buffer
1645 : width: int
1646 : state: object
1647 : /
1648 :
1649 : Convert samples to 4 bit Intel/DVI ADPCM encoding.
1650 : [clinic start generated code]*/
1651 :
1652 : static PyObject *
1653 19 : audioop_lin2adpcm_impl(PyObject *module, Py_buffer *fragment, int width,
1654 : PyObject *state)
1655 : /*[clinic end generated code: output=cc19f159f16c6793 input=12919d549b90c90a]*/
1656 : {
1657 : signed char *ncp;
1658 : Py_ssize_t i;
1659 : int step, valpred, delta,
1660 : index, sign, vpdiff, diff;
1661 19 : PyObject *rv = NULL, *str;
1662 19 : int outputbuffer = 0, bufferstep;
1663 :
1664 19 : if (!audioop_check_parameters(module, fragment->len, width))
1665 4 : return NULL;
1666 :
1667 : /* Decode state, should have (value, step) */
1668 15 : if ( state == Py_None ) {
1669 : /* First time, it seems. Set defaults */
1670 10 : valpred = 0;
1671 10 : index = 0;
1672 : }
1673 5 : else if (!PyTuple_Check(state)) {
1674 1 : PyErr_SetString(PyExc_TypeError, "state must be a tuple or None");
1675 1 : return NULL;
1676 : }
1677 4 : else if (!PyArg_ParseTuple(state, "ii;lin2adpcm(): illegal state argument",
1678 : &valpred, &index))
1679 : {
1680 0 : return NULL;
1681 : }
1682 4 : else if (valpred >= 0x8000 || valpred < -0x8000 ||
1683 2 : (size_t)index >= Py_ARRAY_LENGTH(stepsizeTable)) {
1684 4 : PyErr_SetString(PyExc_ValueError, "bad state");
1685 4 : return NULL;
1686 : }
1687 :
1688 10 : str = PyBytes_FromStringAndSize(NULL, fragment->len/(width*2));
1689 10 : if (str == NULL)
1690 0 : return NULL;
1691 10 : ncp = (signed char *)PyBytes_AsString(str);
1692 :
1693 10 : step = stepsizeTable[index];
1694 10 : bufferstep = 1;
1695 :
1696 92 : for (i = 0; i < fragment->len; i += width) {
1697 82 : int val = GETSAMPLE32(width, fragment->buf, i) >> 16;
1698 :
1699 : /* Step 1 - compute difference with previous value */
1700 82 : if (val < valpred) {
1701 15 : diff = valpred - val;
1702 15 : sign = 8;
1703 : }
1704 : else {
1705 67 : diff = val - valpred;
1706 67 : sign = 0;
1707 : }
1708 :
1709 : /* Step 2 - Divide and clamp */
1710 : /* Note:
1711 : ** This code *approximately* computes:
1712 : ** delta = diff*4/step;
1713 : ** vpdiff = (delta+0.5)*step/4;
1714 : ** but in shift step bits are dropped. The net result of this
1715 : ** is that even if you have fast mul/div hardware you cannot
1716 : ** put it to good use since the fixup would be too expensive.
1717 : */
1718 82 : delta = 0;
1719 82 : vpdiff = (step >> 3);
1720 :
1721 82 : if ( diff >= step ) {
1722 30 : delta = 4;
1723 30 : diff -= step;
1724 30 : vpdiff += step;
1725 : }
1726 82 : step >>= 1;
1727 82 : if ( diff >= step ) {
1728 33 : delta |= 2;
1729 33 : diff -= step;
1730 33 : vpdiff += step;
1731 : }
1732 82 : step >>= 1;
1733 82 : if ( diff >= step ) {
1734 30 : delta |= 1;
1735 30 : vpdiff += step;
1736 : }
1737 :
1738 : /* Step 3 - Update previous value */
1739 82 : if ( sign )
1740 15 : valpred -= vpdiff;
1741 : else
1742 67 : valpred += vpdiff;
1743 :
1744 : /* Step 4 - Clamp previous value to 16 bits */
1745 82 : if ( valpred > 32767 )
1746 0 : valpred = 32767;
1747 82 : else if ( valpred < -32768 )
1748 0 : valpred = -32768;
1749 :
1750 : /* Step 5 - Assemble value, update index and step values */
1751 82 : delta |= sign;
1752 :
1753 82 : index += indexTable[delta];
1754 82 : if ( index < 0 ) index = 0;
1755 82 : if ( index > 88 ) index = 88;
1756 82 : step = stepsizeTable[index];
1757 :
1758 : /* Step 6 - Output value */
1759 82 : if ( bufferstep ) {
1760 44 : outputbuffer = (delta << 4) & 0xf0;
1761 : } else {
1762 38 : *ncp++ = (delta & 0x0f) | outputbuffer;
1763 : }
1764 82 : bufferstep = !bufferstep;
1765 : }
1766 10 : rv = Py_BuildValue("(O(ii))", str, valpred, index);
1767 10 : Py_DECREF(str);
1768 10 : return rv;
1769 : }
1770 :
1771 : /*[clinic input]
1772 : audioop.adpcm2lin
1773 :
1774 : fragment: Py_buffer
1775 : width: int
1776 : state: object
1777 : /
1778 :
1779 : Decode an Intel/DVI ADPCM coded fragment to a linear fragment.
1780 : [clinic start generated code]*/
1781 :
1782 : static PyObject *
1783 19 : audioop_adpcm2lin_impl(PyObject *module, Py_buffer *fragment, int width,
1784 : PyObject *state)
1785 : /*[clinic end generated code: output=3440ea105acb3456 input=f5221144f5ca9ef0]*/
1786 : {
1787 : signed char *cp;
1788 : signed char *ncp;
1789 : Py_ssize_t i, outlen;
1790 : int valpred, step, delta, index, sign, vpdiff;
1791 : PyObject *rv, *str;
1792 19 : int inputbuffer = 0, bufferstep;
1793 :
1794 19 : if (!audioop_check_size(module, width))
1795 4 : return NULL;
1796 :
1797 : /* Decode state, should have (value, step) */
1798 15 : if ( state == Py_None ) {
1799 : /* First time, it seems. Set defaults */
1800 10 : valpred = 0;
1801 10 : index = 0;
1802 : }
1803 5 : else if (!PyTuple_Check(state)) {
1804 1 : PyErr_SetString(PyExc_TypeError, "state must be a tuple or None");
1805 1 : return NULL;
1806 : }
1807 4 : else if (!PyArg_ParseTuple(state, "ii;adpcm2lin(): illegal state argument",
1808 : &valpred, &index))
1809 : {
1810 0 : return NULL;
1811 : }
1812 4 : else if (valpred >= 0x8000 || valpred < -0x8000 ||
1813 2 : (size_t)index >= Py_ARRAY_LENGTH(stepsizeTable)) {
1814 4 : PyErr_SetString(PyExc_ValueError, "bad state");
1815 4 : return NULL;
1816 : }
1817 :
1818 10 : if (fragment->len > (PY_SSIZE_T_MAX/2)/width) {
1819 0 : PyErr_SetString(PyExc_MemoryError,
1820 : "not enough memory for output buffer");
1821 0 : return NULL;
1822 : }
1823 10 : outlen = fragment->len*width*2;
1824 10 : str = PyBytes_FromStringAndSize(NULL, outlen);
1825 10 : if (str == NULL)
1826 0 : return NULL;
1827 10 : ncp = (signed char *)PyBytes_AsString(str);
1828 10 : cp = fragment->buf;
1829 :
1830 10 : step = stepsizeTable[index];
1831 10 : bufferstep = 0;
1832 :
1833 86 : for (i = 0; i < outlen; i += width) {
1834 : /* Step 1 - get the delta value and compute next index */
1835 76 : if ( bufferstep ) {
1836 38 : delta = inputbuffer & 0xf;
1837 : } else {
1838 38 : inputbuffer = *cp++;
1839 38 : delta = (inputbuffer >> 4) & 0xf;
1840 : }
1841 :
1842 76 : bufferstep = !bufferstep;
1843 :
1844 : /* Step 2 - Find new index value (for later) */
1845 76 : index += indexTable[delta];
1846 76 : if ( index < 0 ) index = 0;
1847 76 : if ( index > 88 ) index = 88;
1848 :
1849 : /* Step 3 - Separate sign and magnitude */
1850 76 : sign = delta & 8;
1851 76 : delta = delta & 7;
1852 :
1853 : /* Step 4 - Compute difference and new predicted value */
1854 : /*
1855 : ** Computes 'vpdiff = (delta+0.5)*step/4', but see comment
1856 : ** in adpcm_coder.
1857 : */
1858 76 : vpdiff = step >> 3;
1859 76 : if ( delta & 4 ) vpdiff += step;
1860 76 : if ( delta & 2 ) vpdiff += step>>1;
1861 76 : if ( delta & 1 ) vpdiff += step>>2;
1862 :
1863 76 : if ( sign )
1864 12 : valpred -= vpdiff;
1865 : else
1866 64 : valpred += vpdiff;
1867 :
1868 : /* Step 5 - clamp output value */
1869 76 : if ( valpred > 32767 )
1870 0 : valpred = 32767;
1871 76 : else if ( valpred < -32768 )
1872 0 : valpred = -32768;
1873 :
1874 : /* Step 6 - Update step value */
1875 76 : step = stepsizeTable[index];
1876 :
1877 : /* Step 6 - Output value */
1878 76 : SETSAMPLE32(width, ncp, i, valpred << 16);
1879 : }
1880 :
1881 10 : rv = Py_BuildValue("(O(ii))", str, valpred, index);
1882 10 : Py_DECREF(str);
1883 10 : return rv;
1884 : }
1885 :
1886 : #include "clinic/audioop.c.h"
1887 :
1888 : static PyMethodDef audioop_methods[] = {
1889 : AUDIOOP_MAX_METHODDEF
1890 : AUDIOOP_MINMAX_METHODDEF
1891 : AUDIOOP_AVG_METHODDEF
1892 : AUDIOOP_MAXPP_METHODDEF
1893 : AUDIOOP_AVGPP_METHODDEF
1894 : AUDIOOP_RMS_METHODDEF
1895 : AUDIOOP_FINDFIT_METHODDEF
1896 : AUDIOOP_FINDMAX_METHODDEF
1897 : AUDIOOP_FINDFACTOR_METHODDEF
1898 : AUDIOOP_CROSS_METHODDEF
1899 : AUDIOOP_MUL_METHODDEF
1900 : AUDIOOP_ADD_METHODDEF
1901 : AUDIOOP_BIAS_METHODDEF
1902 : AUDIOOP_ULAW2LIN_METHODDEF
1903 : AUDIOOP_LIN2ULAW_METHODDEF
1904 : AUDIOOP_ALAW2LIN_METHODDEF
1905 : AUDIOOP_LIN2ALAW_METHODDEF
1906 : AUDIOOP_LIN2LIN_METHODDEF
1907 : AUDIOOP_ADPCM2LIN_METHODDEF
1908 : AUDIOOP_LIN2ADPCM_METHODDEF
1909 : AUDIOOP_TOMONO_METHODDEF
1910 : AUDIOOP_TOSTEREO_METHODDEF
1911 : AUDIOOP_GETSAMPLE_METHODDEF
1912 : AUDIOOP_REVERSE_METHODDEF
1913 : AUDIOOP_BYTESWAP_METHODDEF
1914 : AUDIOOP_RATECV_METHODDEF
1915 : { 0, 0 }
1916 : };
1917 :
1918 : static int
1919 82 : audioop_traverse(PyObject *module, visitproc visit, void *arg)
1920 : {
1921 82 : audioop_state *state = get_audioop_state(module);
1922 82 : Py_VISIT(state->AudioopError);
1923 82 : return 0;
1924 : }
1925 :
1926 : static int
1927 8 : audioop_clear(PyObject *module)
1928 : {
1929 8 : audioop_state *state = get_audioop_state(module);
1930 8 : Py_CLEAR(state->AudioopError);
1931 8 : return 0;
1932 : }
1933 :
1934 : static void
1935 4 : audioop_free(void *module) {
1936 4 : audioop_clear((PyObject *)module);
1937 4 : }
1938 :
1939 : static int
1940 4 : audioop_exec(PyObject* module)
1941 : {
1942 4 : audioop_state *state = get_audioop_state(module);
1943 :
1944 4 : state->AudioopError = PyErr_NewException("audioop.error", NULL, NULL);
1945 4 : if (state->AudioopError == NULL) {
1946 0 : return -1;
1947 : }
1948 :
1949 4 : Py_INCREF(state->AudioopError);
1950 4 : if (PyModule_AddObject(module, "error", state->AudioopError) < 0) {
1951 0 : Py_DECREF(state->AudioopError);
1952 0 : return -1;
1953 : }
1954 :
1955 4 : return 0;
1956 : }
1957 :
1958 : static PyModuleDef_Slot audioop_slots[] = {
1959 : {Py_mod_exec, audioop_exec},
1960 : {0, NULL}
1961 : };
1962 :
1963 : static struct PyModuleDef audioopmodule = {
1964 : PyModuleDef_HEAD_INIT,
1965 : "audioop",
1966 : NULL,
1967 : sizeof(audioop_state),
1968 : audioop_methods,
1969 : audioop_slots,
1970 : audioop_traverse,
1971 : audioop_clear,
1972 : audioop_free
1973 : };
1974 :
1975 : PyMODINIT_FUNC
1976 4 : PyInit_audioop(void)
1977 : {
1978 4 : if (PyErr_WarnEx(PyExc_DeprecationWarning,
1979 : "'audioop' is deprecated and slated for removal in "
1980 : "Python 3.13",
1981 : 7)) {
1982 0 : return NULL;
1983 : }
1984 :
1985 4 : return PyModuleDef_Init(&audioopmodule);
1986 : }
|