asm_arm.h 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228
  1. /********************************************************************
  2. * *
  3. * THIS FILE IS PART OF THE OggVorbis 'TREMOR' CODEC SOURCE CODE. *
  4. * *
  5. * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
  6. * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
  7. * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
  8. * *
  9. * THE OggVorbis 'TREMOR' SOURCE CODE IS (C) COPYRIGHT 1994-2002 *
  10. * BY THE Xiph.Org FOUNDATION http://www.xiph.org/ *
  11. * *
  12. ********************************************************************
  13. function: arm7 and later wide math functions
  14. ********************************************************************/
  15. #ifndef __ASM_ARM_H__
  16. #define __ASM_ARM_H__
  17. #if !defined(_V_WIDE_MATH) && !defined(_LOW_ACCURACY_)
  18. #define _V_WIDE_MATH
  19. static inline int32_t MULT32(int32_t x, int32_t y) {
  20. int lo,hi;
  21. asm volatile("smull\t%0, %1, %2, %3"
  22. : "=&r"(lo),"=&r"(hi)
  23. : "%r"(x),"r"(y)
  24. : "cc");
  25. return(hi);
  26. }
  27. static inline int32_t MULT31(int32_t x, int32_t y) {
  28. return MULT32(x,y)<<1;
  29. }
  30. static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) {
  31. int lo,hi;
  32. asm volatile("smull %0, %1, %2, %3\n\t"
  33. "movs %0, %0, lsr #15\n\t"
  34. "adc %1, %0, %1, lsl #17\n\t"
  35. : "=&r"(lo),"=&r"(hi)
  36. : "%r"(x),"r"(y)
  37. : "cc");
  38. return(hi);
  39. }
  40. #define MB() asm volatile ("" : : : "memory")
  41. #define XPROD32(a, b, t, v, x, y) \
  42. { \
  43. long l; \
  44. asm( "smull %0, %1, %4, %6\n\t" \
  45. "smlal %0, %1, %5, %7\n\t" \
  46. "rsb %3, %4, #0\n\t" \
  47. "smull %0, %2, %5, %6\n\t" \
  48. "smlal %0, %2, %3, %7" \
  49. : "=&r" (l), "=&r" (x), "=&r" (y), "=r" ((a)) \
  50. : "3" ((a)), "r" ((b)), "r" ((t)), "r" ((v)) \
  51. : "cc" ); \
  52. }
  53. static inline void XPROD31(int32_t a, int32_t b,
  54. int32_t t, int32_t v,
  55. int32_t *x, int32_t *y)
  56. {
  57. int x1, y1, l;
  58. asm( "smull %0, %1, %4, %6\n\t"
  59. "smlal %0, %1, %5, %7\n\t"
  60. "rsb %3, %4, #0\n\t"
  61. "smull %0, %2, %5, %6\n\t"
  62. "smlal %0, %2, %3, %7"
  63. : "=&r" (l), "=&r" (x1), "=&r" (y1), "=r" (a)
  64. : "3" (a), "r" (b), "r" (t), "r" (v)
  65. : "cc" );
  66. *x = x1 << 1;
  67. MB();
  68. *y = y1 << 1;
  69. }
  70. static inline void XNPROD31(int32_t a, int32_t b,
  71. int32_t t, int32_t v,
  72. int32_t *x, int32_t *y)
  73. {
  74. int x1, y1, l;
  75. asm( "rsb %2, %4, #0\n\t"
  76. "smull %0, %1, %3, %5\n\t"
  77. "smlal %0, %1, %2, %6\n\t"
  78. "smull %0, %2, %4, %5\n\t"
  79. "smlal %0, %2, %3, %6"
  80. : "=&r" (l), "=&r" (x1), "=&r" (y1)
  81. : "r" (a), "r" (b), "r" (t), "r" (v)
  82. : "cc" );
  83. *x = x1 << 1;
  84. MB();
  85. *y = y1 << 1;
  86. }
  87. #ifndef _V_VECT_OPS
  88. #define _V_VECT_OPS
  89. /* asm versions of vector operations for block.c, window.c */
  90. static inline
  91. void vect_add(int32_t *x, int32_t *y, int n)
  92. {
  93. while (n>=4) {
  94. asm volatile ("ldmia %[x], {r0, r1, r2, r3};"
  95. "ldmia %[y]!, {r4, r5, r6, r7};"
  96. "add r0, r0, r4;"
  97. "add r1, r1, r5;"
  98. "add r2, r2, r6;"
  99. "add r3, r3, r7;"
  100. "stmia %[x]!, {r0, r1, r2, r3};"
  101. : [x] "+r" (x), [y] "+r" (y)
  102. : : "r0", "r1", "r2", "r3",
  103. "r4", "r5", "r6", "r7",
  104. "memory");
  105. n -= 4;
  106. }
  107. /* add final elements */
  108. while (n>0) {
  109. *x++ += *y++;
  110. n--;
  111. }
  112. }
  113. static inline
  114. void vect_copy(int32_t *x, int32_t *y, int n)
  115. {
  116. while (n>=4) {
  117. asm volatile ("ldmia %[y]!, {r0, r1, r2, r3};"
  118. "stmia %[x]!, {r0, r1, r2, r3};"
  119. : [x] "+r" (x), [y] "+r" (y)
  120. : : "r0", "r1", "r2", "r3",
  121. "memory");
  122. n -= 4;
  123. }
  124. /* copy final elements */
  125. while (n>0) {
  126. *x++ = *y++;
  127. n--;
  128. }
  129. }
  130. static inline
  131. void vect_mult_fw(int32_t *data, int32_t *window, int n)
  132. {
  133. while (n>=4) {
  134. asm volatile ("ldmia %[d], {r0, r1, r2, r3};"
  135. "ldmia %[w]!, {r4, r5, r6, r7};"
  136. "smull r8, r9, r0, r4;"
  137. "mov r0, r9, lsl #1;"
  138. "smull r8, r9, r1, r5;"
  139. "mov r1, r9, lsl #1;"
  140. "smull r8, r9, r2, r6;"
  141. "mov r2, r9, lsl #1;"
  142. "smull r8, r9, r3, r7;"
  143. "mov r3, r9, lsl #1;"
  144. "stmia %[d]!, {r0, r1, r2, r3};"
  145. : [d] "+r" (data), [w] "+r" (window)
  146. : : "r0", "r1", "r2", "r3",
  147. "r4", "r5", "r6", "r7", "r8", "r9",
  148. "memory", "cc");
  149. n -= 4;
  150. }
  151. while(n>0) {
  152. *data = MULT31(*data, *window);
  153. data++;
  154. window++;
  155. n--;
  156. }
  157. }
  158. static inline
  159. void vect_mult_bw(int32_t *data, int32_t *window, int n)
  160. {
  161. while (n>=4) {
  162. asm volatile ("ldmia %[d], {r0, r1, r2, r3};"
  163. "ldmda %[w]!, {r4, r5, r6, r7};"
  164. "smull r8, r9, r0, r7;"
  165. "mov r0, r9, lsl #1;"
  166. "smull r8, r9, r1, r6;"
  167. "mov r1, r9, lsl #1;"
  168. "smull r8, r9, r2, r5;"
  169. "mov r2, r9, lsl #1;"
  170. "smull r8, r9, r3, r4;"
  171. "mov r3, r9, lsl #1;"
  172. "stmia %[d]!, {r0, r1, r2, r3};"
  173. : [d] "+r" (data), [w] "+r" (window)
  174. : : "r0", "r1", "r2", "r3",
  175. "r4", "r5", "r6", "r7", "r8", "r9",
  176. "memory", "cc");
  177. n -= 4;
  178. }
  179. while(n>0) {
  180. *data = MULT31(*data, *window);
  181. data++;
  182. window--;
  183. n--;
  184. }
  185. }
  186. #endif
  187. #endif
  188. #ifndef _V_CLIP_MATH
  189. #define _V_CLIP_MATH
  190. static inline int32_t CLIP_TO_15(int32_t x) {
  191. int tmp;
  192. asm volatile("subs %1, %0, #32768\n\t"
  193. "movpl %0, #0x7f00\n\t"
  194. "orrpl %0, %0, #0xff\n"
  195. "adds %1, %0, #32768\n\t"
  196. "movmi %0, #0x8000"
  197. : "+r"(x),"=r"(tmp)
  198. :
  199. : "cc");
  200. return(x);
  201. }
  202. #endif
  203. #ifndef _V_LSP_MATH_ASM
  204. #define _V_LSP_MATH_ASM
  205. #endif
  206. #endif