2015-10-02 17:20:50 +00:00
|
|
|
/* Copyright (C) 2013 Xiph.Org Foundation and contributors */
|
|
|
|
/*
|
|
|
|
Redistribution and use in source and binary forms, with or without
|
|
|
|
modification, are permitted provided that the following conditions
|
|
|
|
are met:
|
|
|
|
|
|
|
|
- Redistributions of source code must retain the above copyright
|
|
|
|
notice, this list of conditions and the following disclaimer.
|
|
|
|
|
|
|
|
- Redistributions in binary form must reproduce the above copyright
|
|
|
|
notice, this list of conditions and the following disclaimer in the
|
|
|
|
documentation and/or other materials provided with the distribution.
|
|
|
|
|
|
|
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
|
|
|
|
OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
|
|
|
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
|
|
|
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
|
|
|
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
|
|
|
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
|
|
|
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
|
|
|
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef FIXED_ARMv4_H
|
|
|
|
#define FIXED_ARMv4_H
|
|
|
|
|
|
|
|
/** 16x32 multiplication, followed by a 16-bit shift right. Results fits in 32 bits */
|
|
|
|
#undef MULT16_32_Q16
|
|
|
|
static OPUS_INLINE opus_val32 MULT16_32_Q16_armv4(opus_val16 a, opus_val32 b)
|
|
|
|
{
|
|
|
|
unsigned rd_lo;
|
|
|
|
int rd_hi;
|
|
|
|
__asm__(
|
|
|
|
"#MULT16_32_Q16\n\t"
|
|
|
|
"smull %0, %1, %2, %3\n\t"
|
|
|
|
: "=&r"(rd_lo), "=&r"(rd_hi)
|
2019-11-18 08:56:18 +00:00
|
|
|
: "%r"(b),"r"(a<<16)
|
2015-10-02 17:20:50 +00:00
|
|
|
);
|
|
|
|
return rd_hi;
|
|
|
|
}
|
|
|
|
#define MULT16_32_Q16(a, b) (MULT16_32_Q16_armv4(a, b))
|
|
|
|
|
|
|
|
|
|
|
|
/** 16x32 multiplication, followed by a 15-bit shift right. Results fits in 32 bits */
|
|
|
|
#undef MULT16_32_Q15
|
|
|
|
static OPUS_INLINE opus_val32 MULT16_32_Q15_armv4(opus_val16 a, opus_val32 b)
|
|
|
|
{
|
|
|
|
unsigned rd_lo;
|
|
|
|
int rd_hi;
|
|
|
|
__asm__(
|
|
|
|
"#MULT16_32_Q15\n\t"
|
|
|
|
"smull %0, %1, %2, %3\n\t"
|
|
|
|
: "=&r"(rd_lo), "=&r"(rd_hi)
|
2019-11-18 08:56:18 +00:00
|
|
|
: "%r"(b), "r"(a<<16)
|
2015-10-02 17:20:50 +00:00
|
|
|
);
|
|
|
|
/*We intentionally don't OR in the high bit of rd_lo for speed.*/
|
2019-11-18 08:56:18 +00:00
|
|
|
return rd_hi<<1;
|
2015-10-02 17:20:50 +00:00
|
|
|
}
|
|
|
|
#define MULT16_32_Q15(a, b) (MULT16_32_Q15_armv4(a, b))
|
|
|
|
|
|
|
|
|
|
|
|
/** 16x32 multiply, followed by a 15-bit shift right and 32-bit add.
|
|
|
|
b must fit in 31 bits.
|
|
|
|
Result fits in 32 bits. */
|
|
|
|
#undef MAC16_32_Q15
|
|
|
|
#define MAC16_32_Q15(c, a, b) ADD32(c, MULT16_32_Q15(a, b))
|
|
|
|
|
2016-05-01 15:48:46 +00:00
|
|
|
/** 16x32 multiply, followed by a 16-bit shift right and 32-bit add.
|
|
|
|
Result fits in 32 bits. */
|
|
|
|
#undef MAC16_32_Q16
|
|
|
|
#define MAC16_32_Q16(c, a, b) ADD32(c, MULT16_32_Q16(a, b))
|
2015-10-02 17:20:50 +00:00
|
|
|
|
|
|
|
/** 32x32 multiplication, followed by a 31-bit shift right. Results fits in 32 bits */
|
|
|
|
#undef MULT32_32_Q31
|
|
|
|
#define MULT32_32_Q31(a,b) (opus_val32)((((opus_int64)(a)) * ((opus_int64)(b)))>>31)
|
|
|
|
|
|
|
|
#endif
|