csi_rv64_gcc.h 115 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340
  1. /*
  2. * Copyright (C) 2017-2019 Alibaba Group Holding Limited
  3. */
  4. /******************************************************************************
  5. * @file csi_rv64_gcc.h
  6. * @brief CSI Header File for GCC.
  7. * @version V1.0
  8. * @date 01. Sep 2018
  9. ******************************************************************************/
  10. #ifndef _CSI_RV64_GCC_H_
  11. #define _CSI_RV64_GCC_H_
  12. #include <stdlib.h>
  13. #ifndef __ASM
  14. #define __ASM __asm /*!< asm keyword for GNU Compiler */
  15. #endif
  16. #ifndef __INLINE
  17. #define __INLINE inline /*!< inline keyword for GNU Compiler */
  18. #endif
  19. #ifndef __ALWAYS_STATIC_INLINE
  20. #define __ALWAYS_STATIC_INLINE __attribute__((always_inline)) static inline
  21. #endif
  22. #ifndef __STATIC_INLINE
  23. #define __STATIC_INLINE static inline
  24. #endif
  25. #ifndef __NO_RETURN
  26. #define __NO_RETURN __attribute__((__noreturn__))
  27. #endif
  28. #ifndef __USED
  29. #define __USED __attribute__((used))
  30. #endif
  31. #ifndef __WEAK
  32. #define __WEAK __attribute__((weak))
  33. #endif
  34. #ifndef __PACKED
  35. #define __PACKED __attribute__((packed, aligned(1)))
  36. #endif
  37. #ifndef __PACKED_STRUCT
  38. #define __PACKED_STRUCT struct __attribute__((packed, aligned(1)))
  39. #endif
  40. #ifndef __PACKED_UNION
  41. #define __PACKED_UNION union __attribute__((packed, aligned(1)))
  42. #endif
  43. /* ########################### Core Function Access ########################### */
  44. /** \ingroup CSI_Core_FunctionInterface
  45. \defgroup CSI_Core_RegAccFunctions CSI Core Register Access Functions
  46. @{
  47. */
  48. /**
  49. \brief Enable IRQ Interrupts
  50. \details Enables IRQ interrupts by setting the IE-bit in the PSR.
  51. Can only be executed in Privileged modes.
  52. */
  53. __ALWAYS_STATIC_INLINE void __enable_irq(void)
  54. {
  55. #if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
  56. __ASM volatile("csrs sstatus, 2");
  57. __ASM volatile("li a0, 0x222");
  58. __ASM volatile("csrs sie, a0");
  59. #else
  60. __ASM volatile("csrs mstatus, 8");
  61. __ASM volatile("li a0, 0x888");
  62. __ASM volatile("csrs mie, a0");
  63. #endif
  64. }
  65. /**
  66. \brief Enable supervisor IRQ Interrupts
  67. \details Enables IRQ interrupts by setting the IE-bit in the PSR.
  68. Can only be executed in Privileged modes.
  69. */
  70. __ALWAYS_STATIC_INLINE void __enable_supervisor_irq(void)
  71. {
  72. __ASM volatile("csrs sstatus, 2");
  73. __ASM volatile("li a0, 0x222");
  74. __ASM volatile("csrs sie, a0");
  75. }
  76. /**
  77. \brief Disable IRQ Interrupts
  78. \details Disables IRQ interrupts by clearing the IE-bit in the PSR.
  79. Can only be executed in Privileged modes.
  80. */
  81. __ALWAYS_STATIC_INLINE void __disable_irq(void)
  82. {
  83. #if defined(CONFIG_RISCV_SMODE) && CONFIG_RISCV_SMODE
  84. __ASM volatile("csrc sstatus, 2");
  85. #else
  86. __ASM volatile("csrc mstatus, 8");
  87. #endif
  88. }
  89. /**
  90. \brief Disable supervisor IRQ Interrupts
  91. \details Disables supervisor IRQ interrupts by clearing the IE-bit in the PSR.
  92. Can only be executed in Privileged modes.
  93. */
  94. __ALWAYS_STATIC_INLINE void __disable_supervisor_irq(void)
  95. {
  96. __ASM volatile("csrc sstatus, 2");
  97. }
  98. /**
  99. \brief Get MXSTATUS
  100. \details Returns the content of the MXSTATUS Register.
  101. \return MXSTATUS Register value
  102. */
  103. __ALWAYS_STATIC_INLINE uint64_t __get_MXSTATUS(void)
  104. {
  105. uint64_t result;
  106. __ASM volatile("csrr %0, mxstatus" : "=r"(result));
  107. return (result);
  108. }
  109. /**
  110. \brief Get SXSTATUS
  111. \details Returns the content of the SXSTATUS Register.
  112. \return SXSTATUS Register value
  113. */
  114. __ALWAYS_STATIC_INLINE uint64_t __get_SXSTATUS(void)
  115. {
  116. uint64_t result;
  117. __ASM volatile("csrr %0, sxstatus" : "=r"(result));
  118. return (result);
  119. }
  120. /**
  121. \brief Get CPU WORK MODE
  122. \details Returns CPU WORK MODE.
  123. \return CPU WORK MODE
  124. */
  125. __ALWAYS_STATIC_INLINE uint64_t __get_CPU_WORK_MODE(void)
  126. {
  127. uint64_t result;
  128. __ASM volatile("csrr %0, sxstatus" : "=r"(result));
  129. return ((result >> 30U) & 0x3U);
  130. }
  131. /**
  132. \brief Get SATP
  133. \details Returns the content of the SATP Register.
  134. \return SATP Register value
  135. */
  136. __ALWAYS_STATIC_INLINE uint64_t __get_SATP(void)
  137. {
  138. uint64_t result;
  139. __ASM volatile("csrr %0, satp" : "=r"(result));
  140. return (result);
  141. }
  142. /**
  143. \brief Set SATP
  144. \details Writes the given value to the SATP Register.
  145. \param [in] satp SATP Register value to set
  146. */
  147. __ALWAYS_STATIC_INLINE void __set_SATP(uint64_t satp)
  148. {
  149. __ASM volatile("csrw satp, %0" : : "r"(satp));
  150. }
  151. /**
  152. \brief Set MEPC
  153. \details Writes the given value to the MEPC Register.
  154. \param [in] mstatus MEPC Register value to set
  155. */
  156. __ALWAYS_STATIC_INLINE void __set_MEPC(uint64_t mepc)
  157. {
  158. __ASM volatile("csrw mepc, %0" : : "r"(mepc));
  159. }
  160. /**
  161. \brief Set MXSTATUS
  162. \details Writes the given value to the MXSTATUS Register.
  163. \param [in] mxstatus MXSTATUS Register value to set
  164. */
  165. __ALWAYS_STATIC_INLINE void __set_MXSTATUS(uint64_t mxstatus)
  166. {
  167. __ASM volatile("csrw mxstatus, %0" : : "r"(mxstatus));
  168. }
  169. /**
  170. \brief Get MSTATUS
  171. \details Returns the content of the MSTATUS Register.
  172. \return MSTATUS Register value
  173. */
  174. __ALWAYS_STATIC_INLINE uint64_t __get_MSTATUS(void)
  175. {
  176. uint64_t result;
  177. __ASM volatile("csrr %0, mstatus" : "=r"(result));
  178. return (result);
  179. }
  180. /**
  181. \brief Set MSTATUS
  182. \details Writes the given value to the MSTATUS Register.
  183. \param [in] mstatus MSTATUS Register value to set
  184. */
  185. __ALWAYS_STATIC_INLINE void __set_MSTATUS(uint64_t mstatus)
  186. {
  187. __ASM volatile("csrw mstatus, %0" : : "r"(mstatus));
  188. }
  189. /**
  190. \brief Get MCOR
  191. \details Returns the content of the MCOR Register.
  192. \return MCOR Register value
  193. */
  194. __ALWAYS_STATIC_INLINE uint64_t __get_MCOR(void)
  195. {
  196. uint64_t result;
  197. __ASM volatile("csrr %0, mcor" : "=r"(result));
  198. return (result);
  199. }
  200. /**
  201. \brief Set MCOR
  202. \details Writes the given value to the MCOR Register.
  203. \param [in] mstatus MCOR Register value to set
  204. */
  205. __ALWAYS_STATIC_INLINE void __set_MCOR(uint64_t mcor)
  206. {
  207. __ASM volatile("csrw mcor, %0" : : "r"(mcor));
  208. }
  209. /**
  210. \brief Get MHCR
  211. \details Returns the content of the MHCR Register.
  212. \return MHCR Register value
  213. */
  214. __ALWAYS_STATIC_INLINE uint64_t __get_MHCR(void)
  215. {
  216. uint64_t result;
  217. __ASM volatile("csrr %0, mhcr" : "=r"(result));
  218. return (result);
  219. }
  220. /**
  221. \brief Set MHCR
  222. \details Writes the given value to the MHCR Register.
  223. \param [in] mstatus MHCR Register value to set
  224. */
  225. __ALWAYS_STATIC_INLINE void __set_MHCR(uint64_t mhcr)
  226. {
  227. __ASM volatile("csrw mhcr, %0" : : "r"(mhcr));
  228. }
  229. /**
  230. \brief Get MHINT
  231. \details Returns the content of the MHINT Register.
  232. \return MHINT Register value
  233. */
  234. __ALWAYS_STATIC_INLINE uint64_t __get_MHINT(void)
  235. {
  236. uint64_t result;
  237. __ASM volatile("csrr %0, mhint" : "=r"(result));
  238. return (result);
  239. }
  240. /**
  241. \brief Set MHINT
  242. \details Writes the given value to the MHINT Register.
  243. \param [in] mstatus MHINT Register value to set
  244. */
  245. __ALWAYS_STATIC_INLINE void __set_MHINT(uint64_t mhint)
  246. {
  247. __ASM volatile("csrw mhint, %0" : : "r"(mhint));
  248. }
  249. /**
  250. \brief Get MCCR2
  251. \details Returns the content of the MCCR2 Register.
  252. \return MCCR2 Register value
  253. */
  254. __ALWAYS_STATIC_INLINE uint64_t __get_MCCR2(void)
  255. {
  256. uint64_t result;
  257. __ASM volatile("csrr %0, mccr2" : "=r"(result));
  258. return (result);
  259. }
  260. /**
  261. \brief Set MCCR2
  262. \details Writes the given value to the MCCR2 Register.
  263. \param [in] mstatus MCCR2 Register value to set
  264. */
  265. __ALWAYS_STATIC_INLINE void __set_MCCR2(uint64_t mccr2)
  266. {
  267. __ASM volatile("csrw mccr2, %0" : : "r"(mccr2));
  268. }
  269. /**
  270. \brief Get MISA Register
  271. \details Returns the content of the MISA Register.
  272. \return MISA Register value
  273. */
  274. __ALWAYS_STATIC_INLINE uint64_t __get_MISA(void)
  275. {
  276. uint64_t result;
  277. __ASM volatile("csrr %0, misa" : "=r"(result));
  278. return (result);
  279. }
  280. /**
  281. \brief Set MISA
  282. \details Writes the given value to the MISA Register.
  283. \param [in] misa MISA Register value to set
  284. */
  285. __ALWAYS_STATIC_INLINE void __set_MISA(uint64_t misa)
  286. {
  287. __ASM volatile("csrw misa, %0" : : "r"(misa));
  288. }
  289. /**
  290. \brief Get MIE Register
  291. \details Returns the content of the MIE Register.
  292. \return MIE Register value
  293. */
  294. __ALWAYS_STATIC_INLINE uint64_t __get_MIE(void)
  295. {
  296. uint64_t result;
  297. __ASM volatile("csrr %0, mie" : "=r"(result));
  298. return (result);
  299. }
  300. /**
  301. \brief Set MIE
  302. \details Writes the given value to the MIE Register.
  303. \param [in] mie MIE Register value to set
  304. */
  305. __ALWAYS_STATIC_INLINE void __set_MIE(uint64_t mie)
  306. {
  307. __ASM volatile("csrw mie, %0" : : "r"(mie));
  308. }
  309. /**
  310. \brief Get MTVEC Register
  311. \details Returns the content of the MTVEC Register.
  312. \return MTVEC Register value
  313. */
  314. __ALWAYS_STATIC_INLINE uint64_t __get_MTVEC(void)
  315. {
  316. uint64_t result;
  317. __ASM volatile("csrr %0, mtvec" : "=r"(result));
  318. return (result);
  319. }
  320. /**
  321. \brief Set MTVEC
  322. \details Writes the given value to the MTVEC Register.
  323. \param [in] mtvec MTVEC Register value to set
  324. */
  325. __ALWAYS_STATIC_INLINE void __set_MTVEC(uint64_t mtvec)
  326. {
  327. __ASM volatile("csrw mtvec, %0" : : "r"(mtvec));
  328. }
  329. /**
  330. \brief Set MTVT
  331. \details Writes the given value to the MTVT Register.
  332. \param [in] mtvt MTVT Register value to set
  333. */
  334. __ALWAYS_STATIC_INLINE void __set_MTVT(uint64_t mtvt)
  335. {
  336. __ASM volatile("csrw mtvt, %0" : : "r"(mtvt));
  337. }
  338. /**
  339. \brief Get MTVT Register
  340. \details Returns the content of the MTVT Register.
  341. \return MTVT Register value
  342. */
  343. __ALWAYS_STATIC_INLINE uint64_t __get_MTVT(void)
  344. {
  345. uint64_t result;
  346. __ASM volatile("csrr %0, mtvt" : "=r"(result));
  347. return (result);
  348. }
  349. /**
  350. \brief Get MTIME
  351. \details Returns the content of the MTIME Register.
  352. \return MTIME Register value
  353. */
  354. __ALWAYS_STATIC_INLINE uint64_t __get_MTIME(void)
  355. {
  356. uint64_t result;
  357. __ASM volatile("rdtime %0" : "=r"(result));
  358. //__ASM volatile("csrr %0, 0xc01" : "=r"(result));
  359. return (result);
  360. }
  361. /**
  362. \brief Get SP
  363. \details Returns the content of the SP Register.
  364. \return SP Register value
  365. */
  366. __ALWAYS_STATIC_INLINE uint64_t __get_SP(void)
  367. {
  368. uint64_t result;
  369. __ASM volatile("mv %0, sp" : "=r"(result));
  370. return (result);
  371. }
  372. /**
  373. \brief Set SP
  374. \details Writes the given value to the SP Register.
  375. \param [in] sp SP Register value to set
  376. */
  377. __ALWAYS_STATIC_INLINE void __set_SP(uint64_t sp)
  378. {
  379. __ASM volatile("mv sp, %0" : : "r"(sp): "sp");
  380. }
  381. /**
  382. \brief Get MSCRATCH Register
  383. \details Returns the content of the MSCRATCH Register.
  384. \return MSCRATCH Register value
  385. */
  386. __ALWAYS_STATIC_INLINE uint64_t __get_MSCRATCH(void)
  387. {
  388. uint64_t result;
  389. __ASM volatile("csrr %0, mscratch" : "=r"(result));
  390. return (result);
  391. }
  392. /**
  393. \brief Set MSCRATCH
  394. \details Writes the given value to the MSCRATCH Register.
  395. \param [in] mscratch MSCRATCH Register value to set
  396. */
  397. __ALWAYS_STATIC_INLINE void __set_MSCRATCH(uint64_t mscratch)
  398. {
  399. __ASM volatile("csrw mscratch, %0" : : "r"(mscratch));
  400. }
  401. /**
  402. \brief Get MCAUSE Register
  403. \details Returns the content of the MCAUSE Register.
  404. \return MCAUSE Register value
  405. */
  406. __ALWAYS_STATIC_INLINE uint64_t __get_MCAUSE(void)
  407. {
  408. uint64_t result;
  409. __ASM volatile("csrr %0, mcause" : "=r"(result));
  410. return (result);
  411. }
  412. /**
  413. \brief Get SCAUSE Register
  414. \details Returns the content of the SCAUSE Register.
  415. \return SCAUSE Register value
  416. */
  417. __ALWAYS_STATIC_INLINE uint64_t __get_SCAUSE(void)
  418. {
  419. uint64_t result;
  420. __ASM volatile("csrr %0, scause" : "=r"(result));
  421. return (result);
  422. }
  423. /**
  424. \brief Get MNXTI Register
  425. \details Returns the content of the MNXTI Register.
  426. \return MNXTI Register value
  427. */
  428. __ALWAYS_STATIC_INLINE uint64_t __get_MNXTI(void)
  429. {
  430. uint64_t result;
  431. __ASM volatile("csrr %0, mnxti" : "=r"(result));
  432. return (result);
  433. }
  434. /**
  435. \brief Set MNXTI
  436. \details Writes the given value to the MNXTI Register.
  437. \param [in] mnxti MNXTI Register value to set
  438. */
  439. __ALWAYS_STATIC_INLINE void __set_MNXTI(uint64_t mnxti)
  440. {
  441. __ASM volatile("csrw mnxti, %0" : : "r"(mnxti));
  442. }
  443. /**
  444. \brief Get MINTSTATUS Register
  445. \details Returns the content of the MINTSTATUS Register.
  446. \return MINTSTATUS Register value
  447. */
  448. __ALWAYS_STATIC_INLINE uint64_t __get_MINTSTATUS(void)
  449. {
  450. uint64_t result;
  451. __ASM volatile("csrr %0, mintstatus" : "=r"(result));
  452. return (result);
  453. }
  454. /**
  455. \brief Get MTVAL Register
  456. \details Returns the content of the MTVAL Register.
  457. \return MTVAL Register value
  458. */
  459. __ALWAYS_STATIC_INLINE uint64_t __get_MTVAL(void)
  460. {
  461. uint64_t result;
  462. __ASM volatile("csrr %0, mtval" : "=r"(result));
  463. return (result);
  464. }
  465. /**
  466. \brief Get MIP Register
  467. \details Returns the content of the MIP Register.
  468. \return MIP Register value
  469. */
  470. __ALWAYS_STATIC_INLINE uint64_t __get_MIP(void)
  471. {
  472. uint64_t result;
  473. __ASM volatile("csrr %0, mip" : "=r"(result));
  474. return (result);
  475. }
  476. /**
  477. \brief Set MIP
  478. \details Writes the given value to the MIP Register.
  479. \param [in] mip MIP Register value to set
  480. */
  481. __ALWAYS_STATIC_INLINE void __set_MIP(uint64_t mip)
  482. {
  483. __ASM volatile("csrw mip, %0" : : "r"(mip));
  484. }
  485. /**
  486. \brief Get MCYCLEL Register
  487. \details Returns the content of the MCYCLEL Register.
  488. \return MCYCLE Register value
  489. */
  490. __ALWAYS_STATIC_INLINE uint64_t __get_MCYCLE(void)
  491. {
  492. uint64_t result;
  493. __ASM volatile("csrr %0, mcycle" : "=r"(result));
  494. return (result);
  495. }
  496. /**
  497. \brief Get MCYCLEH Register
  498. \details Returns the content of the MCYCLEH Register.
  499. \return MCYCLEH Register value
  500. */
  501. __ALWAYS_STATIC_INLINE uint64_t __get_MCYCLEH(void)
  502. {
  503. uint64_t result;
  504. __ASM volatile("csrr %0, mcycleh" : "=r"(result));
  505. return (result);
  506. }
  507. /**
  508. \brief Get MINSTRET Register
  509. \details Returns the content of the MINSTRET Register.
  510. \return MINSTRET Register value
  511. */
  512. __ALWAYS_STATIC_INLINE uint64_t __get_MINSTRET(void)
  513. {
  514. uint64_t result;
  515. __ASM volatile("csrr %0, minstret" : "=r"(result));
  516. return (result);
  517. }
  518. /**
  519. \brief Get MINSTRETH Register
  520. \details Returns the content of the MINSTRETH Register.
  521. \return MINSTRETH Register value
  522. */
  523. __ALWAYS_STATIC_INLINE uint64_t __get_MINSTRETH(void)
  524. {
  525. uint64_t result;
  526. __ASM volatile("csrr %0, minstreth" : "=r"(result));
  527. return (result);
  528. }
  529. /**
  530. \brief Get MVENDORID Register
  531. \details Returns the content of the MVENDROID Register.
  532. \return MVENDORID Register value
  533. */
  534. __ALWAYS_STATIC_INLINE uint64_t __get_MVENDORID(void)
  535. {
  536. uint64_t result;
  537. __ASM volatile("csrr %0, mvendorid" : "=r"(result));
  538. return (result);
  539. }
  540. /**
  541. \brief Get MARCHID Register
  542. \details Returns the content of the MARCHID Register.
  543. \return MARCHID Register value
  544. */
  545. __ALWAYS_STATIC_INLINE uint64_t __get_MARCHID(void)
  546. {
  547. uint64_t result;
  548. __ASM volatile("csrr %0, marchid" : "=r"(result));
  549. return (result);
  550. }
  551. /**
  552. \brief Get MIMPID Register
  553. \details Returns the content of the MIMPID Register.
  554. \return MIMPID Register value
  555. */
  556. __ALWAYS_STATIC_INLINE uint64_t __get_MIMPID(void)
  557. {
  558. uint64_t result;
  559. __ASM volatile("csrr %0, mimpid" : "=r"(result));
  560. return (result);
  561. }
  562. /**
  563. \brief Get MHARTID Register
  564. \details Returns the content of the MHARTID Register.
  565. \return MHARTID Register value
  566. */
  567. __ALWAYS_STATIC_INLINE uint64_t __get_MHARTID(void)
  568. {
  569. uint64_t result;
  570. __ASM volatile("csrr %0, mhartid" : "=r"(result));
  571. return (result);
  572. }
  573. /**
  574. \brief Get PMPCFGx Register
  575. \details Returns the content of the PMPCFGx Register.
  576. \return PMPCFGx Register value
  577. */
  578. __ALWAYS_STATIC_INLINE uint64_t __get_PMPCFG0(void)
  579. {
  580. uint64_t result;
  581. __ASM volatile("csrr %0, pmpcfg0" : "=r"(result));
  582. return (result);
  583. }
  584. __ALWAYS_STATIC_INLINE uint64_t __get_PMPCFG1(void)
  585. {
  586. uint64_t result;
  587. __ASM volatile("csrr %0, pmpcfg1" : "=r"(result));
  588. return (result);
  589. }
  590. __ALWAYS_STATIC_INLINE uint64_t __get_PMPCFG2(void)
  591. {
  592. uint64_t result;
  593. __ASM volatile("csrr %0, pmpcfg2" : "=r"(result));
  594. return (result);
  595. }
  596. __ALWAYS_STATIC_INLINE uint64_t __get_PMPCFG3(void)
  597. {
  598. uint64_t result;
  599. __ASM volatile("csrr %0, pmpcfg3" : "=r"(result));
  600. return (result);
  601. }
  602. /**
  603. \brief Get PMPxCFG Register by index
  604. \details Returns the content of the PMPxCFG Register.
  605. \param [in] idx PMP region index
  606. \return PMPxCFG Register value
  607. */
  608. __STATIC_INLINE uint8_t __get_PMPxCFG(uint64_t idx)
  609. {
  610. uint64_t pmpcfgx = 0;
  611. if (idx < 4) {
  612. pmpcfgx = __get_PMPCFG0();
  613. } else if (idx >= 4 && idx < 8) {
  614. idx -= 4;
  615. pmpcfgx = __get_PMPCFG1();
  616. } else if (idx >= 8 && idx < 12) {
  617. idx -= 8;
  618. pmpcfgx = __get_PMPCFG2();
  619. } else if (idx >= 12 && idx < 16) {
  620. idx -= 12;
  621. pmpcfgx = __get_PMPCFG3();
  622. } else {
  623. return 0;
  624. }
  625. return (uint8_t)((pmpcfgx & (0xFF << (idx << 3))) >> (idx << 3));
  626. }
  627. /**
  628. \brief Set PMPCFGx
  629. \details Writes the given value to the PMPCFGx Register.
  630. \param [in] pmpcfg PMPCFGx Register value to set
  631. */
  632. __ALWAYS_STATIC_INLINE void __set_PMPCFG0(uint64_t pmpcfg)
  633. {
  634. __ASM volatile("csrw pmpcfg0, %0" : : "r"(pmpcfg));
  635. }
  636. __ALWAYS_STATIC_INLINE void __set_PMPCFG1(uint64_t pmpcfg)
  637. {
  638. __ASM volatile("csrw pmpcfg1, %0" : : "r"(pmpcfg));
  639. }
  640. __ALWAYS_STATIC_INLINE void __set_PMPCFG2(uint64_t pmpcfg)
  641. {
  642. __ASM volatile("csrw pmpcfg2, %0" : : "r"(pmpcfg));
  643. }
  644. __ALWAYS_STATIC_INLINE void __set_PMPCFG3(uint64_t pmpcfg)
  645. {
  646. __ASM volatile("csrw pmpcfg3, %0" : : "r"(pmpcfg));
  647. }
  648. /**
  649. \brief Set PMPxCFG by index
  650. \details Writes the given value to the PMPxCFG Register.
  651. \param [in] idx PMPx region index
  652. \param [in] pmpxcfg PMPxCFG Register value to set
  653. */
  654. __STATIC_INLINE void __set_PMPxCFG(uint64_t idx, uint8_t pmpxcfg)
  655. {
  656. uint64_t pmpcfgx = 0;
  657. if (idx < 4) {
  658. pmpcfgx = __get_PMPCFG0();
  659. pmpcfgx = (pmpcfgx & ~(0xFF << (idx << 3))) | ((uint64_t)(pmpxcfg) << (idx << 3));
  660. __set_PMPCFG0(pmpcfgx);
  661. } else if (idx >= 4 && idx < 8) {
  662. idx -= 4;
  663. pmpcfgx = __get_PMPCFG1();
  664. pmpcfgx = (pmpcfgx & ~(0xFF << (idx << 3))) | ((uint64_t)(pmpxcfg) << (idx << 3));
  665. __set_PMPCFG1(pmpcfgx);
  666. } else if (idx >= 8 && idx < 12) {
  667. idx -= 8;
  668. pmpcfgx = __get_PMPCFG2();
  669. pmpcfgx = (pmpcfgx & ~(0xFF << (idx << 3))) | ((uint64_t)(pmpxcfg) << (idx << 3));
  670. __set_PMPCFG2(pmpcfgx);
  671. } else if (idx >= 12 && idx < 16) {
  672. idx -= 12;
  673. pmpcfgx = __get_PMPCFG3();
  674. pmpcfgx = (pmpcfgx & ~(0xFF << (idx << 3))) | ((uint64_t)(pmpxcfg) << (idx << 3));
  675. __set_PMPCFG3(pmpcfgx);
  676. } else {
  677. return;
  678. }
  679. }
  680. /**
  681. \brief Get PMPADDRx Register
  682. \details Returns the content of the PMPADDRx Register.
  683. \return PMPADDRx Register value
  684. */
  685. __ALWAYS_STATIC_INLINE uint64_t __get_PMPADDR0(void)
  686. {
  687. uint64_t result;
  688. __ASM volatile("csrr %0, pmpaddr0" : "=r"(result));
  689. return (result);
  690. }
  691. __ALWAYS_STATIC_INLINE uint64_t __get_PMPADDR1(void)
  692. {
  693. uint64_t result;
  694. __ASM volatile("csrr %0, pmpaddr1" : "=r"(result));
  695. return (result);
  696. }
  697. __ALWAYS_STATIC_INLINE uint64_t __get_PMPADDR2(void)
  698. {
  699. uint64_t result;
  700. __ASM volatile("csrr %0, pmpaddr2" : "=r"(result));
  701. return (result);
  702. }
  703. __ALWAYS_STATIC_INLINE uint64_t __get_PMPADDR3(void)
  704. {
  705. uint64_t result;
  706. __ASM volatile("csrr %0, pmpaddr3" : "=r"(result));
  707. return (result);
  708. }
  709. __ALWAYS_STATIC_INLINE uint64_t __get_PMPADDR4(void)
  710. {
  711. uint64_t result;
  712. __ASM volatile("csrr %0, pmpaddr4" : "=r"(result));
  713. return (result);
  714. }
  715. __ALWAYS_STATIC_INLINE uint64_t __get_PMPADDR5(void)
  716. {
  717. uint64_t result;
  718. __ASM volatile("csrr %0, pmpaddr5" : "=r"(result));
  719. return (result);
  720. }
  721. __ALWAYS_STATIC_INLINE uint64_t __get_PMPADDR6(void)
  722. {
  723. uint64_t result;
  724. __ASM volatile("csrr %0, pmpaddr6" : "=r"(result));
  725. return (result);
  726. }
  727. __ALWAYS_STATIC_INLINE uint64_t __get_PMPADDR7(void)
  728. {
  729. uint64_t result;
  730. __ASM volatile("csrr %0, pmpaddr7" : "=r"(result));
  731. return (result);
  732. }
  733. __ALWAYS_STATIC_INLINE uint64_t __get_PMPADDR8(void)
  734. {
  735. uint64_t result;
  736. __ASM volatile("csrr %0, pmpaddr8" : "=r"(result));
  737. return (result);
  738. }
  739. __ALWAYS_STATIC_INLINE uint64_t __get_PMPADDR9(void)
  740. {
  741. uint64_t result;
  742. __ASM volatile("csrr %0, pmpaddr9" : "=r"(result));
  743. return (result);
  744. }
  745. __ALWAYS_STATIC_INLINE uint64_t __get_PMPADDR10(void)
  746. {
  747. uint64_t result;
  748. __ASM volatile("csrr %0, pmpaddr10" : "=r"(result));
  749. return (result);
  750. }
  751. __ALWAYS_STATIC_INLINE uint64_t __get_PMPADDR11(void)
  752. {
  753. uint64_t result;
  754. __ASM volatile("csrr %0, pmpaddr11" : "=r"(result));
  755. return (result);
  756. }
  757. __ALWAYS_STATIC_INLINE uint64_t __get_PMPADDR12(void)
  758. {
  759. uint64_t result;
  760. __ASM volatile("csrr %0, pmpaddr12" : "=r"(result));
  761. return (result);
  762. }
  763. __ALWAYS_STATIC_INLINE uint64_t __get_PMPADDR13(void)
  764. {
  765. uint64_t result;
  766. __ASM volatile("csrr %0, pmpaddr13" : "=r"(result));
  767. return (result);
  768. }
  769. __ALWAYS_STATIC_INLINE uint64_t __get_PMPADDR14(void)
  770. {
  771. uint64_t result;
  772. __ASM volatile("csrr %0, pmpaddr14" : "=r"(result));
  773. return (result);
  774. }
  775. __ALWAYS_STATIC_INLINE uint64_t __get_PMPADDR15(void)
  776. {
  777. uint64_t result;
  778. __ASM volatile("csrr %0, pmpaddr15" : "=r"(result));
  779. return (result);
  780. }
  781. /**
  782. \brief Get PMPADDRx Register by index
  783. \details Returns the content of the PMPADDRx Register.
  784. \param [in] idx PMP region index
  785. \return PMPADDRx Register value
  786. */
  787. __STATIC_INLINE uint64_t __get_PMPADDRx(uint64_t idx)
  788. {
  789. switch (idx) {
  790. case 0:
  791. return __get_PMPADDR0();
  792. case 1:
  793. return __get_PMPADDR1();
  794. case 2:
  795. return __get_PMPADDR2();
  796. case 3:
  797. return __get_PMPADDR3();
  798. case 4:
  799. return __get_PMPADDR4();
  800. case 5:
  801. return __get_PMPADDR5();
  802. case 6:
  803. return __get_PMPADDR6();
  804. case 7:
  805. return __get_PMPADDR7();
  806. case 8:
  807. return __get_PMPADDR8();
  808. case 9:
  809. return __get_PMPADDR9();
  810. case 10:
  811. return __get_PMPADDR10();
  812. case 11:
  813. return __get_PMPADDR11();
  814. case 12:
  815. return __get_PMPADDR12();
  816. case 13:
  817. return __get_PMPADDR13();
  818. case 14:
  819. return __get_PMPADDR14();
  820. case 15:
  821. return __get_PMPADDR15();
  822. default:
  823. return 0;
  824. }
  825. }
  826. /**
  827. \brief Set PMPADDRx
  828. \details Writes the given value to the PMPADDRx Register.
  829. \param [in] pmpaddr PMPADDRx Register value to set
  830. */
  831. __ALWAYS_STATIC_INLINE void __set_PMPADDR0(uint64_t pmpaddr)
  832. {
  833. __ASM volatile("csrw pmpaddr0, %0" : : "r"(pmpaddr));
  834. }
  835. __ALWAYS_STATIC_INLINE void __set_PMPADDR1(uint64_t pmpaddr)
  836. {
  837. __ASM volatile("csrw pmpaddr1, %0" : : "r"(pmpaddr));
  838. }
  839. __ALWAYS_STATIC_INLINE void __set_PMPADDR2(uint64_t pmpaddr)
  840. {
  841. __ASM volatile("csrw pmpaddr2, %0" : : "r"(pmpaddr));
  842. }
  843. __ALWAYS_STATIC_INLINE void __set_PMPADDR3(uint64_t pmpaddr)
  844. {
  845. __ASM volatile("csrw pmpaddr3, %0" : : "r"(pmpaddr));
  846. }
  847. __ALWAYS_STATIC_INLINE void __set_PMPADDR4(uint64_t pmpaddr)
  848. {
  849. __ASM volatile("csrw pmpaddr4, %0" : : "r"(pmpaddr));
  850. }
  851. __ALWAYS_STATIC_INLINE void __set_PMPADDR5(uint64_t pmpaddr)
  852. {
  853. __ASM volatile("csrw pmpaddr5, %0" : : "r"(pmpaddr));
  854. }
  855. __ALWAYS_STATIC_INLINE void __set_PMPADDR6(uint64_t pmpaddr)
  856. {
  857. __ASM volatile("csrw pmpaddr6, %0" : : "r"(pmpaddr));
  858. }
  859. __ALWAYS_STATIC_INLINE void __set_PMPADDR7(uint64_t pmpaddr)
  860. {
  861. __ASM volatile("csrw pmpaddr7, %0" : : "r"(pmpaddr));
  862. }
  863. __ALWAYS_STATIC_INLINE void __set_PMPADDR8(uint64_t pmpaddr)
  864. {
  865. __ASM volatile("csrw pmpaddr8, %0" : : "r"(pmpaddr));
  866. }
  867. __ALWAYS_STATIC_INLINE void __set_PMPADDR9(uint64_t pmpaddr)
  868. {
  869. __ASM volatile("csrw pmpaddr9, %0" : : "r"(pmpaddr));
  870. }
  871. __ALWAYS_STATIC_INLINE void __set_PMPADDR10(uint64_t pmpaddr)
  872. {
  873. __ASM volatile("csrw pmpaddr10, %0" : : "r"(pmpaddr));
  874. }
  875. __ALWAYS_STATIC_INLINE void __set_PMPADDR11(uint64_t pmpaddr)
  876. {
  877. __ASM volatile("csrw pmpaddr11, %0" : : "r"(pmpaddr));
  878. }
  879. __ALWAYS_STATIC_INLINE void __set_PMPADDR12(uint64_t pmpaddr)
  880. {
  881. __ASM volatile("csrw pmpaddr12, %0" : : "r"(pmpaddr));
  882. }
  883. __ALWAYS_STATIC_INLINE void __set_PMPADDR13(uint64_t pmpaddr)
  884. {
  885. __ASM volatile("csrw pmpaddr13, %0" : : "r"(pmpaddr));
  886. }
  887. __ALWAYS_STATIC_INLINE void __set_PMPADDR14(uint64_t pmpaddr)
  888. {
  889. __ASM volatile("csrw pmpaddr14, %0" : : "r"(pmpaddr));
  890. }
  891. __ALWAYS_STATIC_INLINE void __set_PMPADDR15(uint64_t pmpaddr)
  892. {
  893. __ASM volatile("csrw pmpaddr15, %0" : : "r"(pmpaddr));
  894. }
  895. /**
  896. \brief Set PMPADDRx by index
  897. \details Writes the given value to the PMPADDRx Register.
  898. \param [in] idx PMP region index
  899. \param [in] pmpaddr PMPADDRx Register value to set
  900. */
  901. __STATIC_INLINE void __set_PMPADDRx(uint64_t idx, uint64_t pmpaddr)
  902. {
  903. switch (idx) {
  904. case 0:
  905. __set_PMPADDR0(pmpaddr);
  906. break;
  907. case 1:
  908. __set_PMPADDR1(pmpaddr);
  909. break;
  910. case 2:
  911. __set_PMPADDR2(pmpaddr);
  912. break;
  913. case 3:
  914. __set_PMPADDR3(pmpaddr);
  915. break;
  916. case 4:
  917. __set_PMPADDR4(pmpaddr);
  918. break;
  919. case 5:
  920. __set_PMPADDR5(pmpaddr);
  921. break;
  922. case 6:
  923. __set_PMPADDR6(pmpaddr);
  924. break;
  925. case 7:
  926. __set_PMPADDR7(pmpaddr);
  927. break;
  928. case 8:
  929. __set_PMPADDR8(pmpaddr);
  930. break;
  931. case 9:
  932. __set_PMPADDR9(pmpaddr);
  933. break;
  934. case 10:
  935. __set_PMPADDR10(pmpaddr);
  936. break;
  937. case 11:
  938. __set_PMPADDR11(pmpaddr);
  939. break;
  940. case 12:
  941. __set_PMPADDR12(pmpaddr);
  942. break;
  943. case 13:
  944. __set_PMPADDR13(pmpaddr);
  945. break;
  946. case 14:
  947. __set_PMPADDR14(pmpaddr);
  948. break;
  949. case 15:
  950. __set_PMPADDR15(pmpaddr);
  951. break;
  952. default:
  953. return;
  954. }
  955. }
  956. /**
  957. \brief Get MCOUNTEREN
  958. \details Returns the content of the MCOUNTEREN Register.
  959. \return MCOUNTEREN Register value
  960. */
  961. __ALWAYS_STATIC_INLINE uint64_t __get_MCOUNTEREN(void)
  962. {
  963. uint32_t result;
  964. __ASM volatile("csrr %0, mcounteren" : "=r"(result));
  965. return (result);
  966. }
  967. /**
  968. \brief Set MCOUNTEREN
  969. \details Writes the given value to the MCOUNTEREN Register.
  970. \param [in] mcounteren MCOUNTEREN Register value to set
  971. */
  972. __ALWAYS_STATIC_INLINE void __set_MCOUNTEREN(uint32_t mcounteren)
  973. {
  974. __ASM volatile("csrw mcounteren, %0" : : "r"(mcounteren));
  975. }
  976. /**
  977. \brief Get MCOUNTERWEN
  978. \details Returns the content of the MCOUNTERWEN Register.
  979. \return MCOUNTERWEN Register value
  980. */
  981. __ALWAYS_STATIC_INLINE uint64_t __get_MCOUNTERWEN(void)
  982. {
  983. uint32_t result;
  984. __ASM volatile("csrr %0, mcounterwen" : "=r"(result));
  985. return (result);
  986. }
  987. /**
  988. \brief Set MCOUNTERWEN
  989. \details Writes the given value to the MCOUNTERWEN Register.
  990. \param [in] mcounterwen MCOUNTERWEN Register value to set
  991. */
  992. __ALWAYS_STATIC_INLINE void __set_MCOUNTERWEN(uint32_t mcounterwen)
  993. {
  994. __ASM volatile("csrw mcounterwen, %0" : : "r"(mcounterwen));
  995. }
  996. /**
  997. \brief Set MEDELEG Register
  998. \details Writes the given value to the MEDELEG Register.
  999. */
  1000. __ALWAYS_STATIC_INLINE void __set_MEDELEG(uint64_t x)
  1001. {
  1002. asm volatile("csrw medeleg, %0"::"r"(x));
  1003. }
  1004. /**
  1005. \brief Set MEDELEG Register
  1006. \details Writes the given value to the MEDELEG Register.
  1007. */
  1008. __ALWAYS_STATIC_INLINE uint64_t __get_MEDELEG(void)
  1009. {
  1010. uint64_t x;
  1011. asm volatile("csrr %0, medeleg":"=r"(x));
  1012. return x;
  1013. }
  1014. /**
  1015. \brief Set MIDELEG Register
  1016. \details Writes the given value to the MIDELEG Register.
  1017. */
  1018. __ALWAYS_STATIC_INLINE void __set_MIDELEG(uint64_t x)
  1019. {
  1020. asm volatile("csrw mideleg, %0"::"r"(x));
  1021. }
  1022. /**
  1023. \brief Get MIDELEG Register
  1024. \details Returns the content of the MIDELEG Register.
  1025. \return MIDELEG Register value
  1026. */
  1027. __ALWAYS_STATIC_INLINE uint64_t __get_MIDELEG(void)
  1028. {
  1029. uint64_t x;
  1030. asm volatile("csrr %0, mideleg":"=r"(x));
  1031. return x;
  1032. }
  1033. /**
  1034. \brief Set SSTATUS Register
  1035. \details Writes the given value to the SSTATUS Register.
  1036. */
  1037. __ALWAYS_STATIC_INLINE void __set_SSTATUS(uint64_t x)
  1038. {
  1039. asm volatile("csrw sstatus, %0"::"r"(x));
  1040. }
  1041. /**
  1042. \brief Get SSTATUS Register
  1043. \details Returns the content of the SSTATUS Register.
  1044. \return SSTATUS Register value
  1045. */
  1046. __ALWAYS_STATIC_INLINE uint64_t __get_SSTATUS(void)
  1047. {
  1048. uint64_t x;
  1049. asm volatile("csrr %0, sstatus":"=r"(x));
  1050. return x;
  1051. }
  1052. /**
  1053. \brief Set SXSTATUS Register
  1054. \details Writes the given value to the SXSTATUS Register.
  1055. */
  1056. __ALWAYS_STATIC_INLINE void __set_SXSTATUS(uint64_t x)
  1057. {
  1058. asm volatile("csrw sxstatus, %0"::"r"(x));
  1059. }
  1060. /**
  1061. \brief Get SXSTATUS Register
  1062. \details Returns the content of the SXSTATUS Register.
  1063. \return SXSTATUS Register value
  1064. */
  1065. __ALWAYS_STATIC_INLINE uint64_t __get__SXSTATUS(void)
  1066. {
  1067. uint64_t x;
  1068. asm volatile("csrr %0, sxstatus":"=r"(x));
  1069. return x;
  1070. }
  1071. /**
  1072. \brief Set SIE Register
  1073. \details Writes the given value to the SIE Register.
  1074. */
  1075. __ALWAYS_STATIC_INLINE void __set_SIE(uint64_t x)
  1076. {
  1077. asm volatile("csrw sie, %0"::"r"(x));
  1078. }
  1079. /**
  1080. \brief Get SIE Register
  1081. \details Returns the content of the SIE Register.
  1082. \return SIE Register value
  1083. */
  1084. __ALWAYS_STATIC_INLINE uint64_t __get_SIE(void)
  1085. {
  1086. uint64_t x;
  1087. asm volatile("csrr %0, sie":"=r"(x));
  1088. return x;
  1089. }
  1090. /**
  1091. \brief Set STVAC Register
  1092. \details Writes the given value to the STVEC Register.
  1093. */
  1094. __ALWAYS_STATIC_INLINE void __set_STVEC(uint64_t x)
  1095. {
  1096. asm volatile("csrw stvec, %0"::"r"(x));
  1097. }
  1098. /**
  1099. \brief Get STVAC Register
  1100. \details Returns the content of the STVAC Register.
  1101. \return STVAC Register value
  1102. */
  1103. __ALWAYS_STATIC_INLINE uint64_t __get_STVEC(void)
  1104. {
  1105. uint64_t x;
  1106. asm volatile("csrr %0, stvec":"=r"(x));
  1107. return x;
  1108. }
  1109. /**
  1110. \brief Enable interrupts and exceptions
  1111. \details Enables interrupts and exceptions by setting the IE-bit and EE-bit in the PSR.
  1112. Can only be executed in Privileged modes.
  1113. */
  1114. __ALWAYS_STATIC_INLINE void __enable_excp_irq(void)
  1115. {
  1116. #ifdef CONFIG_MMU
  1117. __enable_supervisor_irq();
  1118. #else
  1119. __enable_irq();
  1120. #endif
  1121. }
  1122. /**
  1123. \brief Disable interrupts and exceptions
  1124. \details Disables interrupts and exceptions by clearing the IE-bit and EE-bit in the PSR.
  1125. Can only be executed in Privileged modes.
  1126. */
  1127. __ALWAYS_STATIC_INLINE void __disable_excp_irq(void)
  1128. {
  1129. #ifdef CONFIG_MMU
  1130. __disable_supervisor_irq();
  1131. #else
  1132. __disable_irq();
  1133. #endif
  1134. }
  1135. #define __CSI_GCC_OUT_REG(r) "=r" (r)
  1136. #define __CSI_GCC_USE_REG(r) "r" (r)
  1137. /**
  1138. \brief No Operation
  1139. \details No Operation does nothing. This instruction can be used for code alignment purposes.
  1140. */
  1141. __ALWAYS_STATIC_INLINE void __NOP(void)
  1142. {
  1143. __ASM volatile("nop");
  1144. }
  1145. /**
  1146. \brief return from M-MODE
  1147. \details return from M-MODE.
  1148. */
  1149. __ALWAYS_STATIC_INLINE void __MRET(void)
  1150. {
  1151. __ASM volatile("mret");
  1152. }
  1153. /**
  1154. \brief Wait For Interrupt
  1155. \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs.
  1156. */
  1157. __ALWAYS_STATIC_INLINE void __WFI(void)
  1158. {
  1159. __ASM volatile("wfi");
  1160. }
  1161. /**
  1162. \brief Wait For Interrupt
  1163. \details Wait For Interrupt is a hint instruction that suspends execution until one interrupt occurs.
  1164. */
  1165. __ALWAYS_STATIC_INLINE void __WAIT(void)
  1166. {
  1167. __ASM volatile("wfi");
  1168. }
  1169. /**
  1170. \brief Doze For Interrupt
  1171. \details Doze For Interrupt is a hint instruction that suspends execution until one interrupt occurs.
  1172. */
  1173. __ALWAYS_STATIC_INLINE void __DOZE(void)
  1174. {
  1175. __ASM volatile("wfi");
  1176. }
  1177. /**
  1178. \brief Stop For Interrupt
  1179. \details Stop For Interrupt is a hint instruction that suspends execution until one interrupt occurs.
  1180. */
  1181. __ALWAYS_STATIC_INLINE void __STOP(void)
  1182. {
  1183. __ASM volatile("wfi");
  1184. }
  1185. /**
  1186. \brief Instruction Synchronization Barrier
  1187. \details Instruction Synchronization Barrier flushes the pipeline in the processor,
  1188. so that all instructions following the ISB are fetched from cache or memory,
  1189. after the instruction has been completed.
  1190. */
  1191. __ALWAYS_STATIC_INLINE void __ISB(void)
  1192. {
  1193. __ASM volatile("fence.i");
  1194. __ASM volatile("fence r, r");
  1195. }
  1196. /**
  1197. \brief Data Synchronization Barrier
  1198. \details Acts as a special kind of Data Memory Barrier.
  1199. It completes when all explicit memory accesses before this instruction complete.
  1200. */
  1201. __ALWAYS_STATIC_INLINE void __DSB(void)
  1202. {
  1203. __ASM volatile("fence iorw, iorw");
  1204. __ASM volatile("sync");
  1205. }
  1206. /**
  1207. \brief Data Memory Barrier
  1208. \details Ensures the apparent order of the explicit memory operations before
  1209. and after the instruction, without ensuring their completion.
  1210. */
  1211. __ALWAYS_STATIC_INLINE void __DMB(void)
  1212. {
  1213. __ASM volatile("fence rw, rw");
  1214. }
  1215. /**
  1216. \brief Data Synchronization Barrier
  1217. \details Acts as a special kind of Data Memory Barrier.
  1218. It completes when all explicit memory accesses before this instruction complete.
  1219. */
  1220. __ALWAYS_STATIC_INLINE void __SYNC_IS(void)
  1221. {
  1222. __ASM volatile("sync.is");
  1223. }
  1224. /**
  1225. \brief Invalid all icache
  1226. \details invalid all icache.
  1227. */
  1228. __ALWAYS_STATIC_INLINE void __ICACHE_IALL(void)
  1229. {
  1230. __ASM volatile("icache.iall");
  1231. }
  1232. /**
  1233. \brief Invalid all cpu icache
  1234. \details invalid all cpu icache.
  1235. */
  1236. __ALWAYS_STATIC_INLINE void __ICACHE_IALLS(void)
  1237. {
  1238. __ASM volatile("icache.ialls");
  1239. }
  1240. /**
  1241. \brief Invalid Icache by phy addr
  1242. \details Invalid Icache by phy addr.
  1243. \param [in] addr operate addr
  1244. */
  1245. __ALWAYS_STATIC_INLINE void __ICACHE_IPA(uint64_t addr)
  1246. {
  1247. __ASM volatile("icache.ipa %0" : : "r"(addr));
  1248. }
  1249. /**
  1250. \brief Invalid Icache by virt address
  1251. \details Invalid Icache by virt address
  1252. \param [in] addr operate addr
  1253. */
  1254. __ALWAYS_STATIC_INLINE void __ICACHE_IVA(uint64_t addr)
  1255. {
  1256. __ASM volatile("icache.iva %0" : : "r"(addr));
  1257. }
  1258. /**
  1259. \brief Invalid all dcache
  1260. \details invalid all dcache.
  1261. */
  1262. __ALWAYS_STATIC_INLINE void __DCACHE_IALL(void)
  1263. {
  1264. __ASM volatile("dcache.iall");
  1265. }
  1266. /**
  1267. \brief Clear all dcache
  1268. \details clear all dcache.
  1269. */
  1270. __ALWAYS_STATIC_INLINE void __DCACHE_CALL(void)
  1271. {
  1272. __ASM volatile("dcache.call");
  1273. }
  1274. /**
  1275. \brief Clear&invalid all dcache
  1276. \details clear & invalid all dcache.
  1277. */
  1278. __ALWAYS_STATIC_INLINE void __DCACHE_CIALL(void)
  1279. {
  1280. __ASM volatile("dcache.ciall");
  1281. }
  1282. #if (__L2CACHE_PRESENT == 1U)
  1283. /**
  1284. \brief Invalid L2 cache
  1285. \details invalid L2 cache.
  1286. */
  1287. __ALWAYS_STATIC_INLINE void __L2CACHE_IALL(void)
  1288. {
  1289. __ASM volatile("l2cache.iall");
  1290. }
  1291. /**
  1292. \brief Clear L2cache
  1293. \details clear L2cache.
  1294. */
  1295. __ALWAYS_STATIC_INLINE void __L2CACHE_CALL(void)
  1296. {
  1297. __ASM volatile("l2cache.call");
  1298. }
  1299. /**
  1300. \brief Clear&invalid L2cache
  1301. \details clear & invalid L2cache.
  1302. */
  1303. __ALWAYS_STATIC_INLINE void __L2CACHE_CIALL(void)
  1304. {
  1305. __ASM volatile("l2cache.ciall");
  1306. }
  1307. #endif
  1308. /**
  1309. \brief Invalid Dcache by addr
  1310. \details Invalid Dcache by addr.
  1311. \param [in] addr operate addr
  1312. */
  1313. __ALWAYS_STATIC_INLINE void __DCACHE_IPA(uint64_t addr)
  1314. {
  1315. __ASM volatile("dcache.ipa %0" : : "r"(addr));
  1316. }
  1317. /**
  1318. \brief Invalid Dcache by virt addr
  1319. \details Invalid Dcache by virt addr.
  1320. \param [in] addr operate addr
  1321. */
  1322. __ALWAYS_STATIC_INLINE void __DCACHE_IVA(uint64_t addr)
  1323. {
  1324. __ASM volatile("dcache.iva %0" : : "r"(addr));
  1325. }
  1326. /**
  1327. \brief Clear Dcache by addr
  1328. \details Clear Dcache by addr.
  1329. \param [in] addr operate addr
  1330. */
  1331. __ALWAYS_STATIC_INLINE void __DCACHE_CPA(uint64_t addr)
  1332. {
  1333. __ASM volatile("dcache.cpa %0" : : "r"(addr));
  1334. }
  1335. /**
  1336. \brief Clear Dcache by virt addr
  1337. \details Clear Dcache by virt addr.
  1338. \param [in] addr operate addr
  1339. */
  1340. __ALWAYS_STATIC_INLINE void __DCACHE_CVA(uint64_t addr)
  1341. {
  1342. __ASM volatile("dcache.cva %0" : : "r"(addr));
  1343. }
  1344. /**
  1345. \brief Clear & Invalid Dcache by addr
  1346. \details Clear & Invalid Dcache by addr.
  1347. \param [in] addr operate addr
  1348. */
  1349. __ALWAYS_STATIC_INLINE void __DCACHE_CIPA(uint64_t addr)
  1350. {
  1351. __ASM volatile("dcache.cipa %0" : : "r"(addr));
  1352. }
  1353. /**
  1354. \brief Clear & Invalid Dcache by virt addr
  1355. \details Clear & Invalid Dcache by virt addr.
  1356. \param [in] addr operate addr
  1357. */
  1358. __ALWAYS_STATIC_INLINE void __DCACHE_CIVA(uint64_t addr)
  1359. {
  1360. __ASM volatile("dcache.civa %0" : : "r"(addr));
  1361. }
  1362. /**
  1363. \brief Reverse byte order (32 bit)
  1364. \details Reverses the byte order in integer value.
  1365. \param [in] value Value to reverse
  1366. \return Reversed value
  1367. */
  1368. __ALWAYS_STATIC_INLINE uint64_t __REV(uint64_t value)
  1369. {
  1370. return __builtin_bswap32(value);
  1371. }
  1372. /**
  1373. \brief Reverse byte order (16 bit)
  1374. \details Reverses the byte order in two unsigned short values.
  1375. \param [in] value Value to reverse
  1376. \return Reversed value
  1377. */
  1378. __ALWAYS_STATIC_INLINE uint32_t __REV16(uint32_t value)
  1379. {
  1380. uint32_t result;
  1381. result = ((value & 0xFF000000) >> 8) | ((value & 0x00FF0000) << 8) |
  1382. ((value & 0x0000FF00) >> 8) | ((value & 0x000000FF) << 8);
  1383. return (result);
  1384. }
  1385. /**
  1386. \brief Reverse byte order in signed short value
  1387. \details Reverses the byte order in a signed short value with sign extension to integer.
  1388. \param [in] value Value to reverse
  1389. \return Reversed value
  1390. */
  1391. __ALWAYS_STATIC_INLINE int32_t __REVSH(int32_t value)
  1392. {
  1393. return (short)(((value & 0xFF00) >> 8) | ((value & 0x00FF) << 8));
  1394. }
  1395. /**
  1396. \brief Rotate Right in unsigned value (32 bit)
  1397. \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits.
  1398. \param [in] op1 Value to rotate
  1399. \param [in] op2 Number of Bits to rotate
  1400. \return Rotated value
  1401. */
  1402. __ALWAYS_STATIC_INLINE uint32_t __ROR(uint32_t op1, uint32_t op2)
  1403. {
  1404. return (op1 >> op2) | (op1 << (32U - op2));
  1405. }
  1406. /**
  1407. \brief Breakpoint
  1408. \details Causes the processor to enter Debug state
  1409. Debug tools can use this to investigate system state when the instruction at a particular address is reached.
  1410. */
  1411. __ALWAYS_STATIC_INLINE void __BKPT(void)
  1412. {
  1413. __ASM volatile("ebreak");
  1414. }
  1415. /**
  1416. \brief Reverse bit order of value
  1417. \details Reverses the bit order of the given value.
  1418. \param [in] value Value to reverse
  1419. \return Reversed value
  1420. */
  1421. __ALWAYS_STATIC_INLINE uint32_t __RBIT(uint32_t value)
  1422. {
  1423. uint32_t result;
  1424. int32_t s = 4 /*sizeof(v)*/ * 8 - 1; /* extra shift needed at end */
  1425. result = value; /* r will be reversed bits of v; first get LSB of v */
  1426. for (value >>= 1U; value; value >>= 1U) {
  1427. result <<= 1U;
  1428. result |= value & 1U;
  1429. s--;
  1430. }
  1431. result <<= s; /* shift when v's highest bits are zero */
  1432. return (result);
  1433. }
  1434. /**
  1435. \brief Count leading zeros
  1436. \details Counts the number of leading zeros of a data value.
  1437. \param [in] value Value to count the leading zeros
  1438. \return number of leading zeros in value
  1439. */
  1440. #define __CLZ __builtin_clz
  1441. /**
  1442. \details This function saturates a signed value.
  1443. \param [in] x Value to be saturated
  1444. \param [in] y Bit position to saturate to [1..32]
  1445. \return Saturated value.
  1446. */
  1447. __ALWAYS_STATIC_INLINE int32_t __SSAT(int32_t x, uint32_t y)
  1448. {
  1449. int32_t posMax, negMin;
  1450. uint32_t i;
  1451. posMax = 1;
  1452. for (i = 0; i < (y - 1); i++) {
  1453. posMax = posMax * 2;
  1454. }
  1455. if (x > 0) {
  1456. posMax = (posMax - 1);
  1457. if (x > posMax) {
  1458. x = posMax;
  1459. }
  1460. // x &= (posMax * 2 + 1);
  1461. } else {
  1462. negMin = -posMax;
  1463. if (x < negMin) {
  1464. x = negMin;
  1465. }
  1466. // x &= (posMax * 2 - 1);
  1467. }
  1468. return (x);
  1469. }
  1470. /**
  1471. \brief Unsigned Saturate
  1472. \details Saturates an unsigned value.
  1473. \param [in] value Value to be saturated
  1474. \param [in] sat Bit position to saturate to (0..31)
  1475. \return Saturated value
  1476. */
  1477. __ALWAYS_STATIC_INLINE uint32_t __USAT(uint32_t value, uint32_t sat)
  1478. {
  1479. uint32_t result;
  1480. if ((((0xFFFFFFFF >> sat) << sat) & value) != 0) {
  1481. result = 0xFFFFFFFF >> (32 - sat);
  1482. } else {
  1483. result = value;
  1484. }
  1485. return (result);
  1486. }
  1487. /**
  1488. \brief Unsigned Saturate for internal use
  1489. \details Saturates an unsigned value, should not call directly.
  1490. \param [in] value Value to be saturated
  1491. \param [in] sat Bit position to saturate to (0..31)
  1492. \return Saturated value
  1493. */
  1494. __ALWAYS_STATIC_INLINE uint32_t __IUSAT(uint32_t value, uint32_t sat)
  1495. {
  1496. uint32_t result;
  1497. if (value & 0x80000000) { /* only overflow set bit-31 */
  1498. result = 0;
  1499. } else if ((((0xFFFFFFFF >> sat) << sat) & value) != 0) {
  1500. result = 0xFFFFFFFF >> (32 - sat);
  1501. } else {
  1502. result = value;
  1503. }
  1504. return (result);
  1505. }
  1506. /**
  1507. \brief Rotate Right with Extend
  1508. \details This function moves each bit of a bitstring right by one bit.
  1509. The carry input is shifted in at the left end of the bitstring.
  1510. \note carry input will always 0.
  1511. \param [in] op1 Value to rotate
  1512. \return Rotated value
  1513. */
  1514. __ALWAYS_STATIC_INLINE uint32_t __RRX(uint32_t op1)
  1515. {
  1516. return 0;
  1517. }
  1518. /**
  1519. \brief LDRT Unprivileged (8 bit)
  1520. \details Executes a Unprivileged LDRT instruction for 8 bit value.
  1521. \param [in] addr Pointer to location
  1522. \return value of type uint8_t at (*ptr)
  1523. */
  1524. __ALWAYS_STATIC_INLINE uint8_t __LDRBT(volatile uint8_t *addr)
  1525. {
  1526. uint32_t result;
  1527. __ASM volatile("lb %0, 0(%1)" : "=r"(result) : "r"(addr));
  1528. return ((uint8_t) result); /* Add explicit type cast here */
  1529. }
  1530. /**
  1531. \brief LDRT Unprivileged (16 bit)
  1532. \details Executes a Unprivileged LDRT instruction for 16 bit values.
  1533. \param [in] addr Pointer to location
  1534. \return value of type uint16_t at (*ptr)
  1535. */
  1536. __ALWAYS_STATIC_INLINE uint16_t __LDRHT(volatile uint16_t *addr)
  1537. {
  1538. uint32_t result;
  1539. __ASM volatile("lh %0, 0(%1)" : "=r"(result) : "r"(addr));
  1540. return ((uint16_t) result); /* Add explicit type cast here */
  1541. }
  1542. /**
  1543. \brief LDRT Unprivileged (32 bit)
  1544. \details Executes a Unprivileged LDRT instruction for 32 bit values.
  1545. \param [in] addr Pointer to location
  1546. \return value of type uint32_t at (*ptr)
  1547. */
  1548. __ALWAYS_STATIC_INLINE uint32_t __LDRT(volatile uint32_t *addr)
  1549. {
  1550. uint32_t result;
  1551. __ASM volatile("lw %0, 0(%1)" : "=r"(result) : "r"(addr));
  1552. return (result);
  1553. }
  1554. /**
  1555. \brief STRT Unprivileged (8 bit)
  1556. \details Executes a Unprivileged STRT instruction for 8 bit values.
  1557. \param [in] value Value to store
  1558. \param [in] addr Pointer to location
  1559. */
  1560. __ALWAYS_STATIC_INLINE void __STRBT(uint8_t value, volatile uint8_t *addr)
  1561. {
  1562. __ASM volatile("sb %1, 0(%0)" :: "r"(addr), "r"((uint32_t)value) : "memory");
  1563. }
  1564. /**
  1565. \brief STRT Unprivileged (16 bit)
  1566. \details Executes a Unprivileged STRT instruction for 16 bit values.
  1567. \param [in] value Value to store
  1568. \param [in] addr Pointer to location
  1569. */
  1570. __ALWAYS_STATIC_INLINE void __STRHT(uint16_t value, volatile uint16_t *addr)
  1571. {
  1572. __ASM volatile("sh %1, 0(%0)" :: "r"(addr), "r"((uint32_t)value) : "memory");
  1573. }
  1574. /**
  1575. \brief STRT Unprivileged (32 bit)
  1576. \details Executes a Unprivileged STRT instruction for 32 bit values.
  1577. \param [in] value Value to store
  1578. \param [in] addr Pointer to location
  1579. */
  1580. __ALWAYS_STATIC_INLINE void __STRT(uint32_t value, volatile uint32_t *addr)
  1581. {
  1582. __ASM volatile("sw %1, 0(%0)" :: "r"(addr), "r"(value) : "memory");
  1583. }
  1584. /*@}*/ /* end of group CSI_Core_InstructionInterface */
  1585. /* ################### Compiler specific Intrinsics ########################### */
  1586. /** \defgroup CSI_SIMD_intrinsics CSI SIMD Intrinsics
  1587. Access to dedicated SIMD instructions \n
  1588. Single Instruction Multiple Data (SIMD) extensions are provided to simplify development of application software. SIMD extensions increase the processing capability without materially increasing the power consumption. The SIMD extensions are completely transparent to the operating system (OS), allowing existing OS ports to be used.
  1589. @{
  1590. */
  1591. /**
  1592. \brief Halfword packing instruction. Combines bits[15:0] of val1 with bits[31:16]
  1593. of val2 levitated with the val3.
  1594. \details Combine a halfword from one register with a halfword from another register.
  1595. The second argument can be left-shifted before extraction of the halfword.
  1596. \param [in] val1 first 16-bit operands
  1597. \param [in] val2 second 16-bit operands
  1598. \param [in] val3 value for left-shifting val2. Value range [0..31].
  1599. \return the combination of halfwords.
  1600. \remark
  1601. res[15:0] = val1[15:0] \n
  1602. res[31:16] = val2[31:16] << val3
  1603. */
  1604. __ALWAYS_STATIC_INLINE uint32_t __PKHBT(uint32_t val1, uint32_t val2, uint32_t val3)
  1605. {
  1606. return ((((int32_t)(val1) << 0) & (int32_t)0x0000FFFF) | (((int32_t)(val2) << val3) & (int32_t)0xFFFF0000));
  1607. }
  1608. /**
  1609. \brief Halfword packing instruction. Combines bits[31:16] of val1 with bits[15:0]
  1610. of val2 right-shifted with the val3.
  1611. \details Combine a halfword from one register with a halfword from another register.
  1612. The second argument can be right-shifted before extraction of the halfword.
  1613. \param [in] val1 first 16-bit operands
  1614. \param [in] val2 second 16-bit operands
  1615. \param [in] val3 value for right-shifting val2. Value range [1..32].
  1616. \return the combination of halfwords.
  1617. \remark
  1618. res[15:0] = val2[15:0] >> val3 \n
  1619. res[31:16] = val1[31:16]
  1620. */
  1621. __ALWAYS_STATIC_INLINE uint32_t __PKHTB(uint32_t val1, uint32_t val2, uint32_t val3)
  1622. {
  1623. return ((((int32_t)(val1) << 0) & (int32_t)0xFFFF0000) | (((int32_t)(val2) >> val3) & (int32_t)0x0000FFFF));
  1624. }
  1625. /**
  1626. \brief Dual 16-bit signed saturate.
  1627. \details This function saturates a signed value.
  1628. \param [in] x two signed 16-bit values to be saturated.
  1629. \param [in] y bit position for saturation, an integral constant expression in the range 1 to 16.
  1630. \return the sum of the absolute differences of the following bytes, added to the accumulation value:\n
  1631. the signed saturation of the low halfword in val1, saturated to the bit position specified in
  1632. val2 and returned in the low halfword of the return value.\n
  1633. the signed saturation of the high halfword in val1, saturated to the bit position specified in
  1634. val2 and returned in the high halfword of the return value.
  1635. */
  1636. __ALWAYS_STATIC_INLINE uint32_t __SSAT16(int32_t x, const uint32_t y)
  1637. {
  1638. int32_t r = 0, s = 0;
  1639. r = __SSAT((((int32_t)x << 16) >> 16), y) & (int32_t)0x0000FFFF;
  1640. s = __SSAT((((int32_t)x) >> 16), y) & (int32_t)0x0000FFFF;
  1641. return ((uint32_t)((s << 16) | (r)));
  1642. }
  1643. /**
  1644. \brief Dual 16-bit unsigned saturate.
  1645. \details This function enables you to saturate two signed 16-bit values to a selected unsigned range.
  1646. \param [in] x two signed 16-bit values to be saturated.
  1647. \param [in] y bit position for saturation, an integral constant expression in the range 1 to 16.
  1648. \return the saturation of the two signed 16-bit values, as non-negative values:
  1649. the saturation of the low halfword in val1, saturated to the bit position specified in
  1650. val2 and returned in the low halfword of the return value.\n
  1651. the saturation of the high halfword in val1, saturated to the bit position specified in
  1652. val2 and returned in the high halfword of the return value.
  1653. */
  1654. __ALWAYS_STATIC_INLINE uint32_t __USAT16(uint32_t x, const uint32_t y)
  1655. {
  1656. int32_t r = 0, s = 0;
  1657. r = __IUSAT(((x << 16) >> 16), y) & 0x0000FFFF;
  1658. s = __IUSAT(((x) >> 16), y) & 0x0000FFFF;
  1659. return ((s << 16) | (r));
  1660. }
  1661. /**
  1662. \brief Quad 8-bit saturating addition.
  1663. \details This function enables you to perform four 8-bit integer additions,
  1664. saturating the results to the 8-bit signed integer range -2^7 <= x <= 2^7 - 1.
  1665. \param [in] x first four 8-bit summands.
  1666. \param [in] y second four 8-bit summands.
  1667. \return the saturated addition of the first byte of each operand in the first byte of the return value.\n
  1668. the saturated addition of the second byte of each operand in the second byte of the return value.\n
  1669. the saturated addition of the third byte of each operand in the third byte of the return value.\n
  1670. the saturated addition of the fourth byte of each operand in the fourth byte of the return value.\n
  1671. The returned results are saturated to the 8-bit signed integer range -2^7 <= x <= 2^7 - 1.
  1672. \remark
  1673. res[7:0] = val1[7:0] + val2[7:0] \n
  1674. res[15:8] = val1[15:8] + val2[15:8] \n
  1675. res[23:16] = val1[23:16] + val2[23:16] \n
  1676. res[31:24] = val1[31:24] + val2[31:24]
  1677. */
  1678. __ALWAYS_STATIC_INLINE uint32_t __QADD8(uint32_t x, uint32_t y)
  1679. {
  1680. int32_t r, s, t, u;
  1681. r = __SSAT(((((int32_t)x << 24) >> 24) + (((int32_t)y << 24) >> 24)), 8) & (int32_t)0x000000FF;
  1682. s = __SSAT(((((int32_t)x << 16) >> 24) + (((int32_t)y << 16) >> 24)), 8) & (int32_t)0x000000FF;
  1683. t = __SSAT(((((int32_t)x << 8) >> 24) + (((int32_t)y << 8) >> 24)), 8) & (int32_t)0x000000FF;
  1684. u = __SSAT(((((int32_t)x) >> 24) + (((int32_t)y) >> 24)), 8) & (int32_t)0x000000FF;
  1685. return ((uint32_t)((u << 24) | (t << 16) | (s << 8) | (r)));
  1686. }
  1687. /**
  1688. \brief Quad 8-bit unsigned saturating addition.
  1689. \details This function enables you to perform four unsigned 8-bit integer additions,
  1690. saturating the results to the 8-bit unsigned integer range 0 < x < 2^8 - 1.
  1691. \param [in] x first four 8-bit summands.
  1692. \param [in] y second four 8-bit summands.
  1693. \return the saturated addition of the first byte of each operand in the first byte of the return value.\n
  1694. the saturated addition of the second byte of each operand in the second byte of the return value.\n
  1695. the saturated addition of the third byte of each operand in the third byte of the return value.\n
  1696. the saturated addition of the fourth byte of each operand in the fourth byte of the return value.\n
  1697. The returned results are saturated to the 8-bit signed integer range 0 <= x <= 2^8 - 1.
  1698. \remark
  1699. res[7:0] = val1[7:0] + val2[7:0] \n
  1700. res[15:8] = val1[15:8] + val2[15:8] \n
  1701. res[23:16] = val1[23:16] + val2[23:16] \n
  1702. res[31:24] = val1[31:24] + val2[31:24]
  1703. */
  1704. __ALWAYS_STATIC_INLINE uint32_t __UQADD8(uint32_t x, uint32_t y)
  1705. {
  1706. int32_t r, s, t, u;
  1707. r = __IUSAT((((x << 24) >> 24) + ((y << 24) >> 24)), 8) & 0x000000FF;
  1708. s = __IUSAT((((x << 16) >> 24) + ((y << 16) >> 24)), 8) & 0x000000FF;
  1709. t = __IUSAT((((x << 8) >> 24) + ((y << 8) >> 24)), 8) & 0x000000FF;
  1710. u = __IUSAT((((x) >> 24) + ((y) >> 24)), 8) & 0x000000FF;
  1711. return ((u << 24) | (t << 16) | (s << 8) | (r));
  1712. }
  1713. /**
  1714. \brief Quad 8-bit signed addition.
  1715. \details This function performs four 8-bit signed integer additions.
  1716. \param [in] x first four 8-bit summands.
  1717. \param [in] y second four 8-bit summands.
  1718. \return the addition of the first bytes from each operand, in the first byte of the return value.\n
  1719. the addition of the second bytes of each operand, in the second byte of the return value.\n
  1720. the addition of the third bytes of each operand, in the third byte of the return value.\n
  1721. the addition of the fourth bytes of each operand, in the fourth byte of the return value.
  1722. \remark
  1723. res[7:0] = val1[7:0] + val2[7:0] \n
  1724. res[15:8] = val1[15:8] + val2[15:8] \n
  1725. res[23:16] = val1[23:16] + val2[23:16] \n
  1726. res[31:24] = val1[31:24] + val2[31:24]
  1727. */
  1728. __ALWAYS_STATIC_INLINE uint32_t __SADD8(uint32_t x, uint32_t y)
  1729. {
  1730. int32_t r, s, t, u;
  1731. r = ((((int32_t)x << 24) >> 24) + (((int32_t)y << 24) >> 24)) & (int32_t)0x000000FF;
  1732. s = ((((int32_t)x << 16) >> 24) + (((int32_t)y << 16) >> 24)) & (int32_t)0x000000FF;
  1733. t = ((((int32_t)x << 8) >> 24) + (((int32_t)y << 8) >> 24)) & (int32_t)0x000000FF;
  1734. u = ((((int32_t)x) >> 24) + (((int32_t)y) >> 24)) & (int32_t)0x000000FF;
  1735. return ((uint32_t)((u << 24) | (t << 16) | (s << 8) | (r)));
  1736. }
  1737. /**
  1738. \brief Quad 8-bit unsigned addition.
  1739. \details This function performs four unsigned 8-bit integer additions.
  1740. \param [in] x first four 8-bit summands.
  1741. \param [in] y second four 8-bit summands.
  1742. \return the addition of the first bytes from each operand, in the first byte of the return value.\n
  1743. the addition of the second bytes of each operand, in the second byte of the return value.\n
  1744. the addition of the third bytes of each operand, in the third byte of the return value.\n
  1745. the addition of the fourth bytes of each operand, in the fourth byte of the return value.
  1746. \remark
  1747. res[7:0] = val1[7:0] + val2[7:0] \n
  1748. res[15:8] = val1[15:8] + val2[15:8] \n
  1749. res[23:16] = val1[23:16] + val2[23:16] \n
  1750. res[31:24] = val1[31:24] + val2[31:24]
  1751. */
  1752. __ALWAYS_STATIC_INLINE uint32_t __UADD8(uint32_t x, uint32_t y)
  1753. {
  1754. int32_t r, s, t, u;
  1755. r = (((x << 24) >> 24) + ((y << 24) >> 24)) & 0x000000FF;
  1756. s = (((x << 16) >> 24) + ((y << 16) >> 24)) & 0x000000FF;
  1757. t = (((x << 8) >> 24) + ((y << 8) >> 24)) & 0x000000FF;
  1758. u = (((x) >> 24) + ((y) >> 24)) & 0x000000FF;
  1759. return ((u << 24) | (t << 16) | (s << 8) | (r));
  1760. }
  1761. /**
  1762. \brief Quad 8-bit saturating subtract.
  1763. \details This function enables you to perform four 8-bit integer subtractions,
  1764. saturating the results to the 8-bit signed integer range -2^7 <= x <= 2^7 - 1.
  1765. \param [in] x first four 8-bit summands.
  1766. \param [in] y second four 8-bit summands.
  1767. \return the subtraction of the first byte of each operand in the first byte of the return value.\n
  1768. the subtraction of the second byte of each operand in the second byte of the return value.\n
  1769. the subtraction of the third byte of each operand in the third byte of the return value.\n
  1770. the subtraction of the fourth byte of each operand in the fourth byte of the return value.\n
  1771. The returned results are saturated to the 8-bit signed integer range -2^7 <= x <= 2^7 - 1.
  1772. \remark
  1773. res[7:0] = val1[7:0] - val2[7:0] \n
  1774. res[15:8] = val1[15:8] - val2[15:8] \n
  1775. res[23:16] = val1[23:16] - val2[23:16] \n
  1776. res[31:24] = val1[31:24] - val2[31:24]
  1777. */
  1778. __ALWAYS_STATIC_INLINE uint32_t __QSUB8(uint32_t x, uint32_t y)
  1779. {
  1780. int32_t r, s, t, u;
  1781. r = __SSAT(((((int32_t)x << 24) >> 24) - (((int32_t)y << 24) >> 24)), 8) & (int32_t)0x000000FF;
  1782. s = __SSAT(((((int32_t)x << 16) >> 24) - (((int32_t)y << 16) >> 24)), 8) & (int32_t)0x000000FF;
  1783. t = __SSAT(((((int32_t)x << 8) >> 24) - (((int32_t)y << 8) >> 24)), 8) & (int32_t)0x000000FF;
  1784. u = __SSAT(((((int32_t)x) >> 24) - (((int32_t)y) >> 24)), 8) & (int32_t)0x000000FF;
  1785. return ((uint32_t)((u << 24) | (t << 16) | (s << 8) | (r)));
  1786. }
  1787. /**
  1788. \brief Quad 8-bit unsigned saturating subtraction.
  1789. \details This function enables you to perform four unsigned 8-bit integer subtractions,
  1790. saturating the results to the 8-bit unsigned integer range 0 < x < 2^8 - 1.
  1791. \param [in] x first four 8-bit summands.
  1792. \param [in] y second four 8-bit summands.
  1793. \return the subtraction of the first byte of each operand in the first byte of the return value.\n
  1794. the subtraction of the second byte of each operand in the second byte of the return value.\n
  1795. the subtraction of the third byte of each operand in the third byte of the return value.\n
  1796. the subtraction of the fourth byte of each operand in the fourth byte of the return value.\n
  1797. The returned results are saturated to the 8-bit unsigned integer range 0 <= x <= 2^8 - 1.
  1798. \remark
  1799. res[7:0] = val1[7:0] - val2[7:0] \n
  1800. res[15:8] = val1[15:8] - val2[15:8] \n
  1801. res[23:16] = val1[23:16] - val2[23:16] \n
  1802. res[31:24] = val1[31:24] - val2[31:24]
  1803. */
  1804. __ALWAYS_STATIC_INLINE uint32_t __UQSUB8(uint32_t x, uint32_t y)
  1805. {
  1806. int32_t r, s, t, u;
  1807. r = __IUSAT((((x << 24) >> 24) - ((y << 24) >> 24)), 8) & 0x000000FF;
  1808. s = __IUSAT((((x << 16) >> 24) - ((y << 16) >> 24)), 8) & 0x000000FF;
  1809. t = __IUSAT((((x << 8) >> 24) - ((y << 8) >> 24)), 8) & 0x000000FF;
  1810. u = __IUSAT((((x) >> 24) - ((y) >> 24)), 8) & 0x000000FF;
  1811. return ((u << 24) | (t << 16) | (s << 8) | (r));
  1812. }
  1813. /**
  1814. \brief Quad 8-bit signed subtraction.
  1815. \details This function enables you to perform four 8-bit signed integer subtractions.
  1816. \param [in] x first four 8-bit operands of each subtraction.
  1817. \param [in] y second four 8-bit operands of each subtraction.
  1818. \return the subtraction of the first bytes from each operand, in the first byte of the return value.\n
  1819. the subtraction of the second bytes of each operand, in the second byte of the return value.\n
  1820. the subtraction of the third bytes of each operand, in the third byte of the return value.\n
  1821. the subtraction of the fourth bytes of each operand, in the fourth byte of the return value.
  1822. \remark
  1823. res[7:0] = val1[7:0] - val2[7:0] \n
  1824. res[15:8] = val1[15:8] - val2[15:8] \n
  1825. res[23:16] = val1[23:16] - val2[23:16] \n
  1826. res[31:24] = val1[31:24] - val2[31:24]
  1827. */
  1828. __ALWAYS_STATIC_INLINE uint32_t __SSUB8(uint32_t x, uint32_t y)
  1829. {
  1830. int32_t r, s, t, u;
  1831. r = ((((int32_t)x << 24) >> 24) - (((int32_t)y << 24) >> 24)) & (int32_t)0x000000FF;
  1832. s = ((((int32_t)x << 16) >> 24) - (((int32_t)y << 16) >> 24)) & (int32_t)0x000000FF;
  1833. t = ((((int32_t)x << 8) >> 24) - (((int32_t)y << 8) >> 24)) & (int32_t)0x000000FF;
  1834. u = ((((int32_t)x) >> 24) - (((int32_t)y) >> 24)) & (int32_t)0x000000FF;
  1835. return ((uint32_t)((u << 24) | (t << 16) | (s << 8) | (r)));
  1836. }
  1837. /**
  1838. \brief Quad 8-bit unsigned subtract.
  1839. \details This function enables you to perform four 8-bit unsigned integer subtractions.
  1840. \param [in] x first four 8-bit operands of each subtraction.
  1841. \param [in] y second four 8-bit operands of each subtraction.
  1842. \return the subtraction of the first bytes from each operand, in the first byte of the return value.\n
  1843. the subtraction of the second bytes of each operand, in the second byte of the return value.\n
  1844. the subtraction of the third bytes of each operand, in the third byte of the return value.\n
  1845. the subtraction of the fourth bytes of each operand, in the fourth byte of the return value.
  1846. \remark
  1847. res[7:0] = val1[7:0] - val2[7:0] \n
  1848. res[15:8] = val1[15:8] - val2[15:8] \n
  1849. res[23:16] = val1[23:16] - val2[23:16] \n
  1850. res[31:24] = val1[31:24] - val2[31:24]
  1851. */
  1852. __ALWAYS_STATIC_INLINE uint32_t __USUB8(uint32_t x, uint32_t y)
  1853. {
  1854. int32_t r, s, t, u;
  1855. r = (((x << 24) >> 24) - ((y << 24) >> 24)) & 0x000000FF;
  1856. s = (((x << 16) >> 24) - ((y << 16) >> 24)) & 0x000000FF;
  1857. t = (((x << 8) >> 24) - ((y << 8) >> 24)) & 0x000000FF;
  1858. u = (((x) >> 24) - ((y) >> 24)) & 0x000000FF;
  1859. return ((u << 24) | (t << 16) | (s << 8) | (r));
  1860. }
  1861. /**
  1862. \brief Unsigned sum of quad 8-bit unsigned absolute difference.
  1863. \details This function enables you to perform four unsigned 8-bit subtractions, and add the absolute values
  1864. of the differences together, returning the result as a single unsigned integer.
  1865. \param [in] x first four 8-bit operands of each subtraction.
  1866. \param [in] y second four 8-bit operands of each subtraction.
  1867. \return the subtraction of the first bytes from each operand, in the first byte of the return value.\n
  1868. the subtraction of the second bytes of each operand, in the second byte of the return value.\n
  1869. the subtraction of the third bytes of each operand, in the third byte of the return value.\n
  1870. the subtraction of the fourth bytes of each operand, in the fourth byte of the return value.\n
  1871. The sum is returned as a single unsigned integer.
  1872. \remark
  1873. absdiff1 = val1[7:0] - val2[7:0] \n
  1874. absdiff2 = val1[15:8] - val2[15:8] \n
  1875. absdiff3 = val1[23:16] - val2[23:16] \n
  1876. absdiff4 = val1[31:24] - val2[31:24] \n
  1877. res[31:0] = absdiff1 + absdiff2 + absdiff3 + absdiff4
  1878. */
  1879. __ALWAYS_STATIC_INLINE uint32_t __USAD8(uint32_t x, uint32_t y)
  1880. {
  1881. int32_t r, s, t, u;
  1882. r = (((x << 24) >> 24) - ((y << 24) >> 24)) & 0x000000FF;
  1883. s = (((x << 16) >> 24) - ((y << 16) >> 24)) & 0x000000FF;
  1884. t = (((x << 8) >> 24) - ((y << 8) >> 24)) & 0x000000FF;
  1885. u = (((x) >> 24) - ((y) >> 24)) & 0x000000FF;
  1886. return (u + t + s + r);
  1887. }
  1888. /**
  1889. \brief Unsigned sum of quad 8-bit unsigned absolute difference with 32-bit accumulate.
  1890. \details This function enables you to perform four unsigned 8-bit subtractions, and add the absolute values
  1891. of the differences to a 32-bit accumulate operand.
  1892. \param [in] x first four 8-bit operands of each subtraction.
  1893. \param [in] y second four 8-bit operands of each subtraction.
  1894. \param [in] sum accumulation value.
  1895. \return the sum of the absolute differences of the following bytes, added to the accumulation value:
  1896. the subtraction of the first bytes from each operand, in the first byte of the return value.\n
  1897. the subtraction of the second bytes of each operand, in the second byte of the return value.\n
  1898. the subtraction of the third bytes of each operand, in the third byte of the return value.\n
  1899. the subtraction of the fourth bytes of each operand, in the fourth byte of the return value.
  1900. \remark
  1901. absdiff1 = val1[7:0] - val2[7:0] \n
  1902. absdiff2 = val1[15:8] - val2[15:8] \n
  1903. absdiff3 = val1[23:16] - val2[23:16] \n
  1904. absdiff4 = val1[31:24] - val2[31:24] \n
  1905. sum = absdiff1 + absdiff2 + absdiff3 + absdiff4 \n
  1906. res[31:0] = sum[31:0] + val3[31:0]
  1907. */
  1908. __ALWAYS_STATIC_INLINE uint32_t __USADA8(uint32_t x, uint32_t y, uint32_t sum)
  1909. {
  1910. int32_t r, s, t, u;
  1911. #ifdef __cplusplus
  1912. r = (abs((long long)((x << 24) >> 24) - ((y << 24) >> 24))) & 0x000000FF;
  1913. s = (abs((long long)((x << 16) >> 24) - ((y << 16) >> 24))) & 0x000000FF;
  1914. t = (abs((long long)((x << 8) >> 24) - ((y << 8) >> 24))) & 0x000000FF;
  1915. u = (abs((long long)((x) >> 24) - ((y) >> 24))) & 0x000000FF;
  1916. #else
  1917. r = (abs(((x << 24) >> 24) - ((y << 24) >> 24))) & 0x000000FF;
  1918. s = (abs(((x << 16) >> 24) - ((y << 16) >> 24))) & 0x000000FF;
  1919. t = (abs(((x << 8) >> 24) - ((y << 8) >> 24))) & 0x000000FF;
  1920. u = (abs(((x) >> 24) - ((y) >> 24))) & 0x000000FF;
  1921. #endif
  1922. return (u + t + s + r + sum);
  1923. }
  1924. /**
  1925. \brief Dual 16-bit saturating addition.
  1926. \details This function enables you to perform two 16-bit integer arithmetic additions in parallel,
  1927. saturating the results to the 16-bit signed integer range -2^15 <= x <= 2^15 - 1.
  1928. \param [in] x first two 16-bit summands.
  1929. \param [in] y second two 16-bit summands.
  1930. \return the saturated addition of the low halfwords, in the low halfword of the return value.\n
  1931. the saturated addition of the high halfwords, in the high halfword of the return value.\n
  1932. The returned results are saturated to the 16-bit signed integer range -2^15 <= x <= 2^15 - 1.
  1933. \remark
  1934. res[15:0] = val1[15:0] + val2[15:0] \n
  1935. res[31:16] = val1[31:16] + val2[31:16]
  1936. */
  1937. __ALWAYS_STATIC_INLINE uint32_t __QADD16(uint32_t x, uint32_t y)
  1938. {
  1939. int32_t r = 0, s = 0;
  1940. r = __SSAT(((((int32_t)x << 16) >> 16) + (((int32_t)y << 16) >> 16)), 16) & (int32_t)0x0000FFFF;
  1941. s = __SSAT(((((int32_t)x) >> 16) + (((int32_t)y) >> 16)), 16) & (int32_t)0x0000FFFF;
  1942. return ((uint32_t)((s << 16) | (r)));
  1943. }
  1944. /**
  1945. \brief Dual 16-bit unsigned saturating addition.
  1946. \details This function enables you to perform two unsigned 16-bit integer additions, saturating
  1947. the results to the 16-bit unsigned integer range 0 < x < 2^16 - 1.
  1948. \param [in] x first two 16-bit summands.
  1949. \param [in] y second two 16-bit summands.
  1950. \return the saturated addition of the low halfwords, in the low halfword of the return value.\n
  1951. the saturated addition of the high halfwords, in the high halfword of the return value.\n
  1952. The results are saturated to the 16-bit unsigned integer range 0 < x < 2^16 - 1.
  1953. \remark
  1954. res[15:0] = val1[15:0] + val2[15:0] \n
  1955. res[31:16] = val1[31:16] + val2[31:16]
  1956. */
  1957. __ALWAYS_STATIC_INLINE uint32_t __UQADD16(uint32_t x, uint32_t y)
  1958. {
  1959. int32_t r = 0, s = 0;
  1960. r = __IUSAT((((x << 16) >> 16) + ((y << 16) >> 16)), 16) & 0x0000FFFF;
  1961. s = __IUSAT((((x) >> 16) + ((y) >> 16)), 16) & 0x0000FFFF;
  1962. return ((s << 16) | (r));
  1963. }
  1964. /**
  1965. \brief Dual 16-bit signed addition.
  1966. \details This function enables you to perform two 16-bit signed integer additions.
  1967. \param [in] x first two 16-bit summands.
  1968. \param [in] y second two 16-bit summands.
  1969. \return the addition of the low halfwords in the low halfword of the return value.\n
  1970. the addition of the high halfwords in the high halfword of the return value.
  1971. \remark
  1972. res[15:0] = val1[15:0] + val2[15:0] \n
  1973. res[31:16] = val1[31:16] + val2[31:16]
  1974. */
  1975. __ALWAYS_STATIC_INLINE uint32_t __SADD16(uint32_t x, uint32_t y)
  1976. {
  1977. int32_t r = 0, s = 0;
  1978. r = ((((int32_t)x << 16) >> 16) + (((int32_t)y << 16) >> 16)) & (int32_t)0x0000FFFF;
  1979. s = ((((int32_t)x) >> 16) + (((int32_t)y) >> 16)) & (int32_t)0x0000FFFF;
  1980. return ((uint32_t)((s << 16) | (r)));
  1981. }
  1982. /**
  1983. \brief Dual 16-bit unsigned addition
  1984. \details This function enables you to perform two 16-bit unsigned integer additions.
  1985. \param [in] x first two 16-bit summands for each addition.
  1986. \param [in] y second two 16-bit summands for each addition.
  1987. \return the addition of the low halfwords in the low halfword of the return value.\n
  1988. the addition of the high halfwords in the high halfword of the return value.
  1989. \remark
  1990. res[15:0] = val1[15:0] + val2[15:0] \n
  1991. res[31:16] = val1[31:16] + val2[31:16]
  1992. */
  1993. __ALWAYS_STATIC_INLINE uint32_t __UADD16(uint32_t x, uint32_t y)
  1994. {
  1995. int32_t r = 0, s = 0;
  1996. r = (((x << 16) >> 16) + ((y << 16) >> 16)) & 0x0000FFFF;
  1997. s = (((x) >> 16) + ((y) >> 16)) & 0x0000FFFF;
  1998. return ((s << 16) | (r));
  1999. }
  2000. /**
  2001. \brief Dual 16-bit signed addition with halved results.
  2002. \details This function enables you to perform two signed 16-bit integer additions, halving the results.
  2003. \param [in] x first two 16-bit summands.
  2004. \param [in] y second two 16-bit summands.
  2005. \return the halved addition of the low halfwords, in the low halfword of the return value.\n
  2006. the halved addition of the high halfwords, in the high halfword of the return value.
  2007. \remark
  2008. res[15:0] = (val1[15:0] + val2[15:0]) >> 1 \n
  2009. res[31:16] = (val1[31:16] + val2[31:16]) >> 1
  2010. */
  2011. __ALWAYS_STATIC_INLINE uint32_t __SHADD16(uint32_t x, uint32_t y)
  2012. {
  2013. int32_t r, s;
  2014. r = (((((int32_t)x << 16) >> 16) + (((int32_t)y << 16) >> 16)) >> 1) & (int32_t)0x0000FFFF;
  2015. s = (((((int32_t)x) >> 16) + (((int32_t)y) >> 16)) >> 1) & (int32_t)0x0000FFFF;
  2016. return ((uint32_t)((s << 16) | (r)));
  2017. }
  2018. /**
  2019. \brief Dual 16-bit unsigned addition with halved results.
  2020. \details This function enables you to perform two unsigned 16-bit integer additions, halving the results.
  2021. \param [in] x first two 16-bit summands.
  2022. \param [in] y second two 16-bit summands.
  2023. \return the halved addition of the low halfwords, in the low halfword of the return value.\n
  2024. the halved addition of the high halfwords, in the high halfword of the return value.
  2025. \remark
  2026. res[15:0] = (val1[15:0] + val2[15:0]) >> 1 \n
  2027. res[31:16] = (val1[31:16] + val2[31:16]) >> 1
  2028. */
  2029. __ALWAYS_STATIC_INLINE uint32_t __UHADD16(uint32_t x, uint32_t y)
  2030. {
  2031. int32_t r, s;
  2032. r = ((((x << 16) >> 16) + ((y << 16) >> 16)) >> 1) & 0x0000FFFF;
  2033. s = ((((x) >> 16) + ((y) >> 16)) >> 1) & 0x0000FFFF;
  2034. return ((s << 16) | (r));
  2035. }
  2036. /**
  2037. \brief Quad 8-bit signed addition with halved results.
  2038. \details This function enables you to perform four signed 8-bit integer additions, halving the results.
  2039. \param [in] x first four 8-bit summands.
  2040. \param [in] y second four 8-bit summands.
  2041. \return the halved addition of the first bytes from each operand, in the first byte of the return value.\n
  2042. the halved addition of the second bytes from each operand, in the second byte of the return value.\n
  2043. the halved addition of the third bytes from each operand, in the third byte of the return value.\n
  2044. the halved addition of the fourth bytes from each operand, in the fourth byte of the return value.
  2045. \remark
  2046. res[7:0] = (val1[7:0] + val2[7:0] ) >> 1 \n
  2047. res[15:8] = (val1[15:8] + val2[15:8] ) >> 1 \n
  2048. res[23:16] = (val1[23:16] + val2[23:16]) >> 1 \n
  2049. res[31:24] = (val1[31:24] + val2[31:24]) >> 1
  2050. */
  2051. __ALWAYS_STATIC_INLINE uint32_t __SHADD8(uint32_t x, uint32_t y)
  2052. {
  2053. int32_t r, s, t, u;
  2054. r = (((((int32_t)x << 24) >> 24) + (((int32_t)y << 24) >> 24)) >> 1) & (int32_t)0x000000FF;
  2055. s = (((((int32_t)x << 16) >> 24) + (((int32_t)y << 16) >> 24)) >> 1) & (int32_t)0x000000FF;
  2056. t = (((((int32_t)x << 8) >> 24) + (((int32_t)y << 8) >> 24)) >> 1) & (int32_t)0x000000FF;
  2057. u = (((((int32_t)x) >> 24) + (((int32_t)y) >> 24)) >> 1) & (int32_t)0x000000FF;
  2058. return ((uint32_t)((u << 24) | (t << 16) | (s << 8) | (r)));
  2059. }
  2060. /**
  2061. \brief Quad 8-bit unsigned addition with halved results.
  2062. \details This function enables you to perform four unsigned 8-bit integer additions, halving the results.
  2063. \param [in] x first four 8-bit summands.
  2064. \param [in] y second four 8-bit summands.
  2065. \return the halved addition of the first bytes from each operand, in the first byte of the return value.\n
  2066. the halved addition of the second bytes from each operand, in the second byte of the return value.\n
  2067. the halved addition of the third bytes from each operand, in the third byte of the return value.\n
  2068. the halved addition of the fourth bytes from each operand, in the fourth byte of the return value.
  2069. \remark
  2070. res[7:0] = (val1[7:0] + val2[7:0] ) >> 1 \n
  2071. res[15:8] = (val1[15:8] + val2[15:8] ) >> 1 \n
  2072. res[23:16] = (val1[23:16] + val2[23:16]) >> 1 \n
  2073. res[31:24] = (val1[31:24] + val2[31:24]) >> 1
  2074. */
  2075. __ALWAYS_STATIC_INLINE uint32_t __UHADD8(uint32_t x, uint32_t y)
  2076. {
  2077. int32_t r, s, t, u;
  2078. r = ((((x << 24) >> 24) + ((y << 24) >> 24)) >> 1) & 0x000000FF;
  2079. s = ((((x << 16) >> 24) + ((y << 16) >> 24)) >> 1) & 0x000000FF;
  2080. t = ((((x << 8) >> 24) + ((y << 8) >> 24)) >> 1) & 0x000000FF;
  2081. u = ((((x) >> 24) + ((y) >> 24)) >> 1) & 0x000000FF;
  2082. return ((u << 24) | (t << 16) | (s << 8) | (r));
  2083. }
  2084. /**
  2085. \brief Dual 16-bit saturating subtract.
  2086. \details This function enables you to perform two 16-bit integer subtractions in parallel,
  2087. saturating the results to the 16-bit signed integer range -2^15 <= x <= 2^15 - 1.
  2088. \param [in] x first two 16-bit summands.
  2089. \param [in] y second two 16-bit summands.
  2090. \return the saturated subtraction of the low halfwords, in the low halfword of the return value.\n
  2091. the saturated subtraction of the high halfwords, in the high halfword of the return value.\n
  2092. The returned results are saturated to the 16-bit signed integer range -2^15 <= x <= 2^15 - 1.
  2093. \remark
  2094. res[15:0] = val1[15:0] - val2[15:0] \n
  2095. res[31:16] = val1[31:16] - val2[31:16]
  2096. */
  2097. __ALWAYS_STATIC_INLINE uint32_t __QSUB16(uint32_t x, uint32_t y)
  2098. {
  2099. int32_t r, s;
  2100. r = __SSAT(((((int32_t)x << 16) >> 16) - (((int32_t)y << 16) >> 16)), 16) & (int32_t)0x0000FFFF;
  2101. s = __SSAT(((((int32_t)x) >> 16) - (((int32_t)y) >> 16)), 16) & (int32_t)0x0000FFFF;
  2102. return ((uint32_t)((s << 16) | (r)));
  2103. }
  2104. /**
  2105. \brief Dual 16-bit unsigned saturating subtraction.
  2106. \details This function enables you to perform two unsigned 16-bit integer subtractions,
  2107. saturating the results to the 16-bit unsigned integer range 0 < x < 2^16 - 1.
  2108. \param [in] x first two 16-bit operands for each subtraction.
  2109. \param [in] y second two 16-bit operands for each subtraction.
  2110. \return the saturated subtraction of the low halfwords, in the low halfword of the return value.\n
  2111. the saturated subtraction of the high halfwords, in the high halfword of the return value.\n
  2112. The returned results are saturated to the 16-bit signed integer range -2^15 <= x <= 2^15 - 1.
  2113. \remark
  2114. res[15:0] = val1[15:0] - val2[15:0] \n
  2115. res[31:16] = val1[31:16] - val2[31:16]
  2116. */
  2117. __ALWAYS_STATIC_INLINE uint32_t __UQSUB16(uint32_t x, uint32_t y)
  2118. {
  2119. int32_t r, s;
  2120. r = __IUSAT((((x << 16) >> 16) - ((y << 16) >> 16)), 16) & 0x0000FFFF;
  2121. s = __IUSAT((((x) >> 16) - ((y) >> 16)), 16) & 0x0000FFFF;
  2122. return ((s << 16) | (r));
  2123. }
  2124. /**
  2125. \brief Dual 16-bit signed subtraction.
  2126. \details This function enables you to perform two 16-bit signed integer subtractions.
  2127. \param [in] x first two 16-bit operands of each subtraction.
  2128. \param [in] y second two 16-bit operands of each subtraction.
  2129. \return the subtraction of the low halfword in the second operand from the low
  2130. halfword in the first operand, in the low halfword of the return value. \n
  2131. the subtraction of the high halfword in the second operand from the high
  2132. halfword in the first operand, in the high halfword of the return value.
  2133. \remark
  2134. res[15:0] = val1[15:0] - val2[15:0] \n
  2135. res[31:16] = val1[31:16] - val2[31:16]
  2136. */
  2137. __ALWAYS_STATIC_INLINE uint32_t __SSUB16(uint32_t x, uint32_t y)
  2138. {
  2139. int32_t r, s;
  2140. r = ((((int32_t)x << 16) >> 16) - (((int32_t)y << 16) >> 16)) & (int32_t)0x0000FFFF;
  2141. s = ((((int32_t)x) >> 16) - (((int32_t)y) >> 16)) & (int32_t)0x0000FFFF;
  2142. return ((uint32_t)((s << 16) | (r)));
  2143. }
  2144. /**
  2145. \brief Dual 16-bit unsigned subtract.
  2146. \details This function enables you to perform two 16-bit unsigned integer subtractions.
  2147. \param [in] x first two 16-bit operands of each subtraction.
  2148. \param [in] y second two 16-bit operands of each subtraction.
  2149. \return the subtraction of the low halfword in the second operand from the low
  2150. halfword in the first operand, in the low halfword of the return value. \n
  2151. the subtraction of the high halfword in the second operand from the high
  2152. halfword in the first operand, in the high halfword of the return value.
  2153. \remark
  2154. res[15:0] = val1[15:0] - val2[15:0] \n
  2155. res[31:16] = val1[31:16] - val2[31:16]
  2156. */
  2157. __ALWAYS_STATIC_INLINE uint32_t __USUB16(uint32_t x, uint32_t y)
  2158. {
  2159. int32_t r, s;
  2160. r = (((x << 16) >> 16) - ((y << 16) >> 16)) & 0x0000FFFF;
  2161. s = (((x) >> 16) - ((y) >> 16)) & 0x0000FFFF;
  2162. return ((s << 16) | (r));
  2163. }
  2164. /**
  2165. \brief Dual 16-bit signed subtraction with halved results.
  2166. \details This function enables you to perform two signed 16-bit integer subtractions, halving the results.
  2167. \param [in] x first two 16-bit summands.
  2168. \param [in] y second two 16-bit summands.
  2169. \return the halved subtraction of the low halfwords, in the low halfword of the return value.\n
  2170. the halved subtraction of the high halfwords, in the high halfword of the return value.
  2171. \remark
  2172. res[15:0] = (val1[15:0] - val2[15:0]) >> 1 \n
  2173. res[31:16] = (val1[31:16] - val2[31:16]) >> 1
  2174. */
  2175. __ALWAYS_STATIC_INLINE uint32_t __SHSUB16(uint32_t x, uint32_t y)
  2176. {
  2177. int32_t r, s;
  2178. r = (((((int32_t)x << 16) >> 16) - (((int32_t)y << 16) >> 16)) >> 1) & (int32_t)0x0000FFFF;
  2179. s = (((((int32_t)x) >> 16) - (((int32_t)y) >> 16)) >> 1) & (int32_t)0x0000FFFF;
  2180. return ((uint32_t)((s << 16) | (r)));
  2181. }
  2182. /**
  2183. \brief Dual 16-bit unsigned subtraction with halved results.
  2184. \details This function enables you to perform two unsigned 16-bit integer subtractions, halving the results.
  2185. \param [in] x first two 16-bit summands.
  2186. \param [in] y second two 16-bit summands.
  2187. \return the halved subtraction of the low halfwords, in the low halfword of the return value.\n
  2188. the halved subtraction of the high halfwords, in the high halfword of the return value.
  2189. \remark
  2190. res[15:0] = (val1[15:0] - val2[15:0]) >> 1 \n
  2191. res[31:16] = (val1[31:16] - val2[31:16]) >> 1
  2192. */
  2193. __ALWAYS_STATIC_INLINE uint32_t __UHSUB16(uint32_t x, uint32_t y)
  2194. {
  2195. int32_t r, s;
  2196. r = ((((x << 16) >> 16) - ((y << 16) >> 16)) >> 1) & 0x0000FFFF;
  2197. s = ((((x) >> 16) - ((y) >> 16)) >> 1) & 0x0000FFFF;
  2198. return ((s << 16) | (r));
  2199. }
  2200. /**
  2201. \brief Quad 8-bit signed addition with halved results.
  2202. \details This function enables you to perform four signed 8-bit integer subtractions, halving the results.
  2203. \param [in] x first four 8-bit summands.
  2204. \param [in] y second four 8-bit summands.
  2205. \return the halved subtraction of the first bytes from each operand, in the first byte of the return value.\n
  2206. the halved subtraction of the second bytes from each operand, in the second byte of the return value.\n
  2207. the halved subtraction of the third bytes from each operand, in the third byte of the return value.\n
  2208. the halved subtraction of the fourth bytes from each operand, in the fourth byte of the return value.
  2209. \remark
  2210. res[7:0] = (val1[7:0] - val2[7:0] ) >> 1 \n
  2211. res[15:8] = (val1[15:8] - val2[15:8] ) >> 1 \n
  2212. res[23:16] = (val1[23:16] - val2[23:16]) >> 1 \n
  2213. res[31:24] = (val1[31:24] - val2[31:24]) >> 1
  2214. */
  2215. __ALWAYS_STATIC_INLINE uint32_t __SHSUB8(uint32_t x, uint32_t y)
  2216. {
  2217. int32_t r, s, t, u;
  2218. r = (((((int32_t)x << 24) >> 24) - (((int32_t)y << 24) >> 24)) >> 1) & (int32_t)0x000000FF;
  2219. s = (((((int32_t)x << 16) >> 24) - (((int32_t)y << 16) >> 24)) >> 1) & (int32_t)0x000000FF;
  2220. t = (((((int32_t)x << 8) >> 24) - (((int32_t)y << 8) >> 24)) >> 1) & (int32_t)0x000000FF;
  2221. u = (((((int32_t)x) >> 24) - (((int32_t)y) >> 24)) >> 1) & (int32_t)0x000000FF;
  2222. return ((uint32_t)((u << 24) | (t << 16) | (s << 8) | (r)));
  2223. }
  2224. /**
  2225. \brief Quad 8-bit unsigned subtraction with halved results.
  2226. \details This function enables you to perform four unsigned 8-bit integer subtractions, halving the results.
  2227. \param [in] x first four 8-bit summands.
  2228. \param [in] y second four 8-bit summands.
  2229. \return the halved subtraction of the first bytes from each operand, in the first byte of the return value.\n
  2230. the halved subtraction of the second bytes from each operand, in the second byte of the return value.\n
  2231. the halved subtraction of the third bytes from each operand, in the third byte of the return value.\n
  2232. the halved subtraction of the fourth bytes from each operand, in the fourth byte of the return value.
  2233. \remark
  2234. res[7:0] = (val1[7:0] - val2[7:0] ) >> 1 \n
  2235. res[15:8] = (val1[15:8] - val2[15:8] ) >> 1 \n
  2236. res[23:16] = (val1[23:16] - val2[23:16]) >> 1 \n
  2237. res[31:24] = (val1[31:24] - val2[31:24]) >> 1
  2238. */
  2239. __ALWAYS_STATIC_INLINE uint32_t __UHSUB8(uint32_t x, uint32_t y)
  2240. {
  2241. int32_t r, s, t, u;
  2242. r = ((((x << 24) >> 24) - ((y << 24) >> 24)) >> 1) & 0x000000FF;
  2243. s = ((((x << 16) >> 24) - ((y << 16) >> 24)) >> 1) & 0x000000FF;
  2244. t = ((((x << 8) >> 24) - ((y << 8) >> 24)) >> 1) & 0x000000FF;
  2245. u = ((((x) >> 24) - ((y) >> 24)) >> 1) & 0x000000FF;
  2246. return ((u << 24) | (t << 16) | (s << 8) | (r));
  2247. }
  2248. /**
  2249. \brief Dual 16-bit add and subtract with exchange.
  2250. \details This function enables you to exchange the halfwords of the one operand,
  2251. then add the high halfwords and subtract the low halfwords,
  2252. saturating the results to the 16-bit signed integer range -2^15 <= x <= 2^15 - 1.
  2253. \param [in] x first operand for the subtraction in the low halfword,
  2254. and the first operand for the addition in the high halfword.
  2255. \param [in] y second operand for the subtraction in the high halfword,
  2256. and the second operand for the addition in the low halfword.
  2257. \return the saturated subtraction of the high halfword in the second operand from the
  2258. low halfword in the first operand, in the low halfword of the return value.\n
  2259. the saturated addition of the high halfword in the first operand and the
  2260. low halfword in the second operand, in the high halfword of the return value.\n
  2261. The returned results are saturated to the 16-bit signed integer range -2^15 <= x <= 2^15 - 1.
  2262. \remark
  2263. res[15:0] = val1[15:0] - val2[31:16] \n
  2264. res[31:16] = val1[31:16] + val2[15:0]
  2265. */
  2266. __ALWAYS_STATIC_INLINE uint32_t __QASX(uint32_t x, uint32_t y)
  2267. {
  2268. int32_t r, s;
  2269. r = __SSAT(((((int32_t)x << 16) >> 16) - (((int32_t)y) >> 16)), 16) & (int32_t)0x0000FFFF;
  2270. s = __SSAT(((((int32_t)x) >> 16) + (((int32_t)y << 16) >> 16)), 16) & (int32_t)0x0000FFFF;
  2271. return ((uint32_t)((s << 16) | (r)));
  2272. }
  2273. /**
  2274. \brief Dual 16-bit unsigned saturating addition and subtraction with exchange.
  2275. \details This function enables you to exchange the halfwords of the second operand and
  2276. perform one unsigned 16-bit integer addition and one unsigned 16-bit subtraction,
  2277. saturating the results to the 16-bit unsigned integer range 0 <= x <= 2^16 - 1.
  2278. \param [in] x first operand for the subtraction in the low halfword,
  2279. and the first operand for the addition in the high halfword.
  2280. \param [in] y second operand for the subtraction in the high halfword,
  2281. and the second operand for the addition in the low halfword.
  2282. \return the saturated subtraction of the high halfword in the second operand from the
  2283. low halfword in the first operand, in the low halfword of the return value.\n
  2284. the saturated addition of the high halfword in the first operand and the
  2285. low halfword in the second operand, in the high halfword of the return value.\n
  2286. The returned results are saturated to the 16-bit unsigned integer range 0 <= x <= 2^16 - 1.
  2287. \remark
  2288. res[15:0] = val1[15:0] - val2[31:16] \n
  2289. res[31:16] = val1[31:16] + val2[15:0]
  2290. */
  2291. __ALWAYS_STATIC_INLINE uint32_t __UQASX(uint32_t x, uint32_t y)
  2292. {
  2293. int32_t r, s;
  2294. r = __IUSAT((((x << 16) >> 16) - ((y) >> 16)), 16) & 0x0000FFFF;
  2295. s = __IUSAT((((x) >> 16) + ((y << 16) >> 16)), 16) & 0x0000FFFF;
  2296. return ((s << 16) | (r));
  2297. }
  2298. /**
  2299. \brief Dual 16-bit addition and subtraction with exchange.
  2300. \details It enables you to exchange the halfwords of the second operand, add the high halfwords
  2301. and subtract the low halfwords.
  2302. \param [in] x first operand for the subtraction in the low halfword,
  2303. and the first operand for the addition in the high halfword.
  2304. \param [in] y second operand for the subtraction in the high halfword,
  2305. and the second operand for the addition in the low halfword.
  2306. \return the subtraction of the high halfword in the second operand from the
  2307. low halfword in the first operand, in the low halfword of the return value.\n
  2308. the addition of the high halfword in the first operand and the
  2309. low halfword in the second operand, in the high halfword of the return value.
  2310. \remark
  2311. res[15:0] = val1[15:0] - val2[31:16] \n
  2312. res[31:16] = val1[31:16] + val2[15:0]
  2313. */
  2314. __ALWAYS_STATIC_INLINE uint32_t __SASX(uint32_t x, uint32_t y)
  2315. {
  2316. int32_t r, s;
  2317. r = ((((int32_t)x << 16) >> 16) - (((int32_t)y) >> 16)) & (int32_t)0x0000FFFF;
  2318. s = ((((int32_t)x) >> 16) + (((int32_t)y << 16) >> 16)) & (int32_t)0x0000FFFF;
  2319. return ((uint32_t)((s << 16) | (r)));
  2320. }
  2321. /**
  2322. \brief Dual 16-bit unsigned addition and subtraction with exchange.
  2323. \details This function enables you to exchange the two halfwords of the second operand,
  2324. add the high halfwords and subtract the low halfwords.
  2325. \param [in] x first operand for the subtraction in the low halfword,
  2326. and the first operand for the addition in the high halfword.
  2327. \param [in] y second operand for the subtraction in the high halfword,
  2328. and the second operand for the addition in the low halfword.
  2329. \return the subtraction of the high halfword in the second operand from the
  2330. low halfword in the first operand, in the low halfword of the return value.\n
  2331. the addition of the high halfword in the first operand and the
  2332. low halfword in the second operand, in the high halfword of the return value.
  2333. \remark
  2334. res[15:0] = val1[15:0] - val2[31:16] \n
  2335. res[31:16] = val1[31:16] + val2[15:0]
  2336. */
  2337. __ALWAYS_STATIC_INLINE uint32_t __UASX(uint32_t x, uint32_t y)
  2338. {
  2339. int32_t r, s;
  2340. r = (((x << 16) >> 16) - ((y) >> 16)) & 0x0000FFFF;
  2341. s = (((x) >> 16) + ((y << 16) >> 16)) & 0x0000FFFF;
  2342. return ((s << 16) | (r));
  2343. }
  2344. /**
  2345. \brief Dual 16-bit signed addition and subtraction with halved results.
  2346. \details This function enables you to exchange the two halfwords of one operand, perform one
  2347. signed 16-bit integer addition and one signed 16-bit subtraction, and halve the results.
  2348. \param [in] x first 16-bit operands.
  2349. \param [in] y second 16-bit operands.
  2350. \return the halved subtraction of the high halfword in the second operand from the
  2351. low halfword in the first operand, in the low halfword of the return value.\n
  2352. the halved addition of the low halfword in the second operand from the high
  2353. halfword in the first operand, in the high halfword of the return value.
  2354. \remark
  2355. res[15:0] = (val1[15:0] - val2[31:16]) >> 1 \n
  2356. res[31:16] = (val1[31:16] + val2[15:0]) >> 1
  2357. */
  2358. __ALWAYS_STATIC_INLINE uint32_t __SHASX(uint32_t x, uint32_t y)
  2359. {
  2360. int32_t r, s;
  2361. r = (((((int32_t)x << 16) >> 16) - (((int32_t)y) >> 16)) >> 1) & (int32_t)0x0000FFFF;
  2362. s = (((((int32_t)x) >> 16) + (((int32_t)y << 16) >> 16)) >> 1) & (int32_t)0x0000FFFF;
  2363. return ((uint32_t)((s << 16) | (r)));
  2364. }
  2365. /**
  2366. \brief Dual 16-bit unsigned addition and subtraction with halved results and exchange.
  2367. \details This function enables you to exchange the halfwords of the second operand,
  2368. add the high halfwords and subtract the low halfwords, halving the results.
  2369. \param [in] x first operand for the subtraction in the low halfword, and
  2370. the first operand for the addition in the high halfword.
  2371. \param [in] y second operand for the subtraction in the high halfword, and
  2372. the second operand for the addition in the low halfword.
  2373. \return the halved subtraction of the high halfword in the second operand from the
  2374. low halfword in the first operand, in the low halfword of the return value.\n
  2375. the halved addition of the low halfword in the second operand from the high
  2376. halfword in the first operand, in the high halfword of the return value.
  2377. \remark
  2378. res[15:0] = (val1[15:0] - val2[31:16]) >> 1 \n
  2379. res[31:16] = (val1[31:16] + val2[15:0]) >> 1
  2380. */
  2381. __ALWAYS_STATIC_INLINE uint32_t __UHASX(uint32_t x, uint32_t y)
  2382. {
  2383. int32_t r, s;
  2384. r = ((((x << 16) >> 16) - ((y) >> 16)) >> 1) & 0x0000FFFF;
  2385. s = ((((x) >> 16) + ((y << 16) >> 16)) >> 1) & 0x0000FFFF;
  2386. return ((s << 16) | (r));
  2387. }
  2388. /**
  2389. \brief Dual 16-bit subtract and add with exchange.
  2390. \details This function enables you to exchange the halfwords of one operand,
  2391. then subtract the high halfwords and add the low halfwords,
  2392. saturating the results to the 16-bit signed integer range -2^15 <= x <= 2^15 - 1.
  2393. \param [in] x first operand for the addition in the low halfword,
  2394. and the first operand for the subtraction in the high halfword.
  2395. \param [in] y second operand for the addition in the high halfword,
  2396. and the second operand for the subtraction in the low halfword.
  2397. \return the saturated addition of the low halfword of the first operand and the high
  2398. halfword of the second operand, in the low halfword of the return value.\n
  2399. the saturated subtraction of the low halfword of the second operand from the
  2400. high halfword of the first operand, in the high halfword of the return value.\n
  2401. The returned results are saturated to the 16-bit signed integer range -2^15 <= x <= 2^15 - 1.
  2402. \remark
  2403. res[15:0] = val1[15:0] + val2[31:16] \n
  2404. res[31:16] = val1[31:16] - val2[15:0]
  2405. */
  2406. __ALWAYS_STATIC_INLINE uint32_t __QSAX(uint32_t x, uint32_t y)
  2407. {
  2408. int32_t r, s;
  2409. r = __SSAT(((((int32_t)x << 16) >> 16) + (((int32_t)y) >> 16)), 16) & (int32_t)0x0000FFFF;
  2410. s = __SSAT(((((int32_t)x) >> 16) - (((int32_t)y << 16) >> 16)), 16) & (int32_t)0x0000FFFF;
  2411. return ((uint32_t)((s << 16) | (r)));
  2412. }
  2413. /**
  2414. \brief Dual 16-bit unsigned saturating subtraction and addition with exchange.
  2415. \details This function enables you to exchange the halfwords of the second operand and perform
  2416. one unsigned 16-bit integer subtraction and one unsigned 16-bit addition, saturating
  2417. the results to the 16-bit unsigned integer range 0 <= x <= 2^16 - 1.
  2418. \param [in] x first operand for the addition in the low halfword,
  2419. and the first operand for the subtraction in the high halfword.
  2420. \param [in] y second operand for the addition in the high halfword,
  2421. and the second operand for the subtraction in the low halfword.
  2422. \return the saturated addition of the low halfword of the first operand and the high
  2423. halfword of the second operand, in the low halfword of the return value.\n
  2424. the saturated subtraction of the low halfword of the second operand from the
  2425. high halfword of the first operand, in the high halfword of the return value.\n
  2426. The returned results are saturated to the 16-bit unsigned integer range 0 <= x <= 2^16 - 1.
  2427. \remark
  2428. res[15:0] = val1[15:0] + val2[31:16] \n
  2429. res[31:16] = val1[31:16] - val2[15:0]
  2430. */
  2431. __ALWAYS_STATIC_INLINE uint32_t __UQSAX(uint32_t x, uint32_t y)
  2432. {
  2433. int32_t r, s;
  2434. r = __IUSAT((((x << 16) >> 16) + ((y) >> 16)), 16) & 0x0000FFFF;
  2435. s = __IUSAT((((x) >> 16) - ((y << 16) >> 16)), 16) & 0x0000FFFF;
  2436. return ((s << 16) | (r));
  2437. }
  2438. /**
  2439. \brief Dual 16-bit unsigned subtract and add with exchange.
  2440. \details This function enables you to exchange the halfwords of the second operand,
  2441. subtract the high halfwords and add the low halfwords.
  2442. \param [in] x first operand for the addition in the low halfword,
  2443. and the first operand for the subtraction in the high halfword.
  2444. \param [in] y second operand for the addition in the high halfword,
  2445. and the second operand for the subtraction in the low halfword.
  2446. \return the addition of the low halfword of the first operand and the high
  2447. halfword of the second operand, in the low halfword of the return value.\n
  2448. the subtraction of the low halfword of the second operand from the
  2449. high halfword of the first operand, in the high halfword of the return value.\n
  2450. \remark
  2451. res[15:0] = val1[15:0] + val2[31:16] \n
  2452. res[31:16] = val1[31:16] - val2[15:0]
  2453. */
  2454. __ALWAYS_STATIC_INLINE uint32_t __USAX(uint32_t x, uint32_t y)
  2455. {
  2456. int32_t r, s;
  2457. r = (((x << 16) >> 16) + ((y) >> 16)) & 0x0000FFFF;
  2458. s = (((x) >> 16) - ((y << 16) >> 16)) & 0x0000FFFF;
  2459. return ((s << 16) | (r));
  2460. }
  2461. /**
  2462. \brief Dual 16-bit signed subtraction and addition with exchange.
  2463. \details This function enables you to exchange the two halfwords of one operand and perform one
  2464. 16-bit integer subtraction and one 16-bit addition.
  2465. \param [in] x first operand for the addition in the low halfword, and the first operand
  2466. for the subtraction in the high halfword.
  2467. \param [in] y second operand for the addition in the high halfword, and the second
  2468. operand for the subtraction in the low halfword.
  2469. \return the addition of the low halfword of the first operand and the high
  2470. halfword of the second operand, in the low halfword of the return value.\n
  2471. the subtraction of the low halfword of the second operand from the
  2472. high halfword of the first operand, in the high halfword of the return value.\n
  2473. \remark
  2474. res[15:0] = val1[15:0] + val2[31:16] \n
  2475. res[31:16] = val1[31:16] - val2[15:0]
  2476. */
  2477. __ALWAYS_STATIC_INLINE uint32_t __SSAX(uint32_t x, uint32_t y)
  2478. {
  2479. int32_t r, s;
  2480. r = ((((int32_t)x << 16) >> 16) + (((int32_t)y) >> 16)) & (int32_t)0x0000FFFF;
  2481. s = ((((int32_t)x) >> 16) - (((int32_t)y << 16) >> 16)) & (int32_t)0x0000FFFF;
  2482. return ((uint32_t)((s << 16) | (r)));
  2483. }
  2484. /**
  2485. \brief Dual 16-bit signed subtraction and addition with halved results.
  2486. \details This function enables you to exchange the two halfwords of one operand, perform one signed
  2487. 16-bit integer subtraction and one signed 16-bit addition, and halve the results.
  2488. \param [in] x first 16-bit operands.
  2489. \param [in] y second 16-bit operands.
  2490. \return the halved addition of the low halfword in the first operand and the
  2491. high halfword in the second operand, in the low halfword of the return value.\n
  2492. the halved subtraction of the low halfword in the second operand from the
  2493. high halfword in the first operand, in the high halfword of the return value.
  2494. \remark
  2495. res[15:0] = (val1[15:0] + val2[31:16]) >> 1 \n
  2496. res[31:16] = (val1[31:16] - val2[15:0]) >> 1
  2497. */
  2498. __ALWAYS_STATIC_INLINE uint32_t __SHSAX(uint32_t x, uint32_t y)
  2499. {
  2500. int32_t r, s;
  2501. r = (((((int32_t)x << 16) >> 16) + (((int32_t)y) >> 16)) >> 1) & (int32_t)0x0000FFFF;
  2502. s = (((((int32_t)x) >> 16) - (((int32_t)y << 16) >> 16)) >> 1) & (int32_t)0x0000FFFF;
  2503. return ((uint32_t)((s << 16) | (r)));
  2504. }
  2505. /**
  2506. \brief Dual 16-bit unsigned subtraction and addition with halved results and exchange.
  2507. \details This function enables you to exchange the halfwords of the second operand,
  2508. subtract the high halfwords and add the low halfwords, halving the results.
  2509. \param [in] x first operand for the addition in the low halfword, and
  2510. the first operand for the subtraction in the high halfword.
  2511. \param [in] y second operand for the addition in the high halfword, and
  2512. the second operand for the subtraction in the low halfword.
  2513. \return the halved addition of the low halfword in the first operand and the
  2514. high halfword in the second operand, in the low halfword of the return value.\n
  2515. the halved subtraction of the low halfword in the second operand from the
  2516. high halfword in the first operand, in the high halfword of the return value.
  2517. \remark
  2518. res[15:0] = (val1[15:0] + val2[31:16]) >> 1 \n
  2519. res[31:16] = (val1[31:16] - val2[15:0]) >> 1
  2520. */
  2521. __ALWAYS_STATIC_INLINE uint32_t __UHSAX(uint32_t x, uint32_t y)
  2522. {
  2523. int32_t r, s;
  2524. r = ((((x << 16) >> 16) + ((y) >> 16)) >> 1) & 0x0000FFFF;
  2525. s = ((((x) >> 16) - ((y << 16) >> 16)) >> 1) & 0x0000FFFF;
  2526. return ((s << 16) | (r));
  2527. }
  2528. /**
  2529. \brief Dual 16-bit signed multiply with exchange returning difference.
  2530. \details This function enables you to perform two 16-bit signed multiplications, subtracting
  2531. one of the products from the other. The halfwords of the second operand are exchanged
  2532. before performing the arithmetic. This produces top * bottom and bottom * top multiplication.
  2533. \param [in] x first 16-bit operands for each multiplication.
  2534. \param [in] y second 16-bit operands for each multiplication.
  2535. \return the difference of the products of the two 16-bit signed multiplications.
  2536. \remark
  2537. p1 = val1[15:0] * val2[31:16] \n
  2538. p2 = val1[31:16] * val2[15:0] \n
  2539. res[31:0] = p1 - p2
  2540. */
  2541. __ALWAYS_STATIC_INLINE uint32_t __SMUSDX(uint32_t x, uint32_t y)
  2542. {
  2543. return ((uint32_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y) >> 16)) -
  2544. ((((int32_t)x) >> 16) * (((int32_t)y << 16) >> 16))));
  2545. }
  2546. /**
  2547. \brief Sum of dual 16-bit signed multiply with exchange.
  2548. \details This function enables you to perform two 16-bit signed multiplications with exchanged
  2549. halfwords of the second operand, adding the products together.
  2550. \param [in] x first 16-bit operands for each multiplication.
  2551. \param [in] y second 16-bit operands for each multiplication.
  2552. \return the sum of the products of the two 16-bit signed multiplications with exchanged halfwords of the second operand.
  2553. \remark
  2554. p1 = val1[15:0] * val2[31:16] \n
  2555. p2 = val1[31:16] * val2[15:0] \n
  2556. res[31:0] = p1 + p2
  2557. */
  2558. __ALWAYS_STATIC_INLINE uint32_t __SMUADX(uint32_t x, uint32_t y)
  2559. {
  2560. return ((uint32_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y) >> 16)) +
  2561. ((((int32_t)x) >> 16) * (((int32_t)y << 16) >> 16))));
  2562. }
  2563. /**
  2564. \brief Saturating add.
  2565. \details This function enables you to obtain the saturating add of two integers.
  2566. \param [in] x first summand of the saturating add operation.
  2567. \param [in] y second summand of the saturating add operation.
  2568. \return the saturating addition of val1 and val2.
  2569. \remark
  2570. res[31:0] = SAT(val1 + SAT(val2))
  2571. */
  2572. __ALWAYS_STATIC_INLINE int32_t __QADD(int32_t x, int32_t y)
  2573. {
  2574. int32_t result;
  2575. if (y >= 0) {
  2576. if ((int32_t)((uint32_t)x + (uint32_t)y) >= x) {
  2577. result = x + y;
  2578. } else {
  2579. result = 0x7FFFFFFF;
  2580. }
  2581. } else {
  2582. if ((int32_t)((uint32_t)x + (uint32_t)y) < x) {
  2583. result = x + y;
  2584. } else {
  2585. result = 0x80000000;
  2586. }
  2587. }
  2588. return result;
  2589. }
  2590. /**
  2591. \brief Saturating subtract.
  2592. \details This function enables you to obtain the saturating add of two integers.
  2593. \param [in] x first summand of the saturating add operation.
  2594. \param [in] y second summand of the saturating add operation.
  2595. \return the saturating addition of val1 and val2.
  2596. \remark
  2597. res[31:0] = SAT(val1 - SAT(val2))
  2598. */
  2599. __ALWAYS_STATIC_INLINE int32_t __QSUB(int32_t x, int32_t y)
  2600. {
  2601. int64_t tmp;
  2602. int32_t result;
  2603. tmp = (int64_t)x - (int64_t)y;
  2604. if (tmp > 0x7fffffff) {
  2605. tmp = 0x7fffffff;
  2606. } else if (tmp < (-2147483647 - 1)) {
  2607. tmp = -2147483647 - 1;
  2608. }
  2609. result = tmp;
  2610. return result;
  2611. }
  2612. /**
  2613. \brief Dual 16-bit signed multiply with single 32-bit accumulator.
  2614. \details This function enables you to perform two signed 16-bit multiplications,
  2615. adding both results to a 32-bit accumulate operand.
  2616. \param [in] x first 16-bit operands for each multiplication.
  2617. \param [in] y second 16-bit operands for each multiplication.
  2618. \param [in] sum accumulate value.
  2619. \return the product of each multiplication added to the accumulate value, as a 32-bit integer.
  2620. \remark
  2621. p1 = val1[15:0] * val2[15:0] \n
  2622. p2 = val1[31:16] * val2[31:16] \n
  2623. res[31:0] = p1 + p2 + val3[31:0]
  2624. */
  2625. __ALWAYS_STATIC_INLINE uint32_t __SMLAD(uint32_t x, uint32_t y, uint32_t sum)
  2626. {
  2627. return ((uint32_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y << 16) >> 16)) +
  2628. ((((int32_t)x) >> 16) * (((int32_t)y) >> 16)) +
  2629. (((int32_t)sum))));
  2630. }
  2631. /**
  2632. \brief Pre-exchanged dual 16-bit signed multiply with single 32-bit accumulator.
  2633. \details This function enables you to perform two signed 16-bit multiplications with exchanged
  2634. halfwords of the second operand, adding both results to a 32-bit accumulate operand.
  2635. \param [in] x first 16-bit operands for each multiplication.
  2636. \param [in] y second 16-bit operands for each multiplication.
  2637. \param [in] sum accumulate value.
  2638. \return the product of each multiplication with exchanged halfwords of the second
  2639. operand added to the accumulate value, as a 32-bit integer.
  2640. \remark
  2641. p1 = val1[15:0] * val2[31:16] \n
  2642. p2 = val1[31:16] * val2[15:0] \n
  2643. res[31:0] = p1 + p2 + val3[31:0]
  2644. */
  2645. __ALWAYS_STATIC_INLINE uint32_t __SMLADX(uint32_t x, uint32_t y, uint32_t sum)
  2646. {
  2647. return ((uint32_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y) >> 16)) +
  2648. ((((int32_t)x) >> 16) * (((int32_t)y << 16) >> 16)) +
  2649. (((int32_t)sum))));
  2650. }
  2651. /**
  2652. \brief Dual 16-bit signed multiply with exchange subtract with 32-bit accumulate.
  2653. \details This function enables you to perform two 16-bit signed multiplications, take the
  2654. difference of the products, subtracting the high halfword product from the low
  2655. halfword product, and add the difference to a 32-bit accumulate operand.
  2656. \param [in] x first 16-bit operands for each multiplication.
  2657. \param [in] y second 16-bit operands for each multiplication.
  2658. \param [in] sum accumulate value.
  2659. \return the difference of the product of each multiplication, added to the accumulate value.
  2660. \remark
  2661. p1 = val1[15:0] * val2[15:0] \n
  2662. p2 = val1[31:16] * val2[31:16] \n
  2663. res[31:0] = p1 - p2 + val3[31:0]
  2664. */
  2665. __ALWAYS_STATIC_INLINE uint32_t __SMLSD(uint32_t x, uint32_t y, uint32_t sum)
  2666. {
  2667. return ((uint32_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y << 16) >> 16)) -
  2668. ((((int32_t)x) >> 16) * (((int32_t)y) >> 16)) +
  2669. (((int32_t)sum))));
  2670. }
  2671. /**
  2672. \brief Dual 16-bit signed multiply with exchange subtract with 32-bit accumulate.
  2673. \details This function enables you to exchange the halfwords in the second operand, then perform two 16-bit
  2674. signed multiplications. The difference of the products is added to a 32-bit accumulate operand.
  2675. \param [in] x first 16-bit operands for each multiplication.
  2676. \param [in] y second 16-bit operands for each multiplication.
  2677. \param [in] sum accumulate value.
  2678. \return the difference of the product of each multiplication, added to the accumulate value.
  2679. \remark
  2680. p1 = val1[15:0] * val2[31:16] \n
  2681. p2 = val1[31:16] * val2[15:0] \n
  2682. res[31:0] = p1 - p2 + val3[31:0]
  2683. */
  2684. __ALWAYS_STATIC_INLINE uint32_t __SMLSDX(uint32_t x, uint32_t y, uint32_t sum)
  2685. {
  2686. return ((uint32_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y) >> 16)) -
  2687. ((((int32_t)x) >> 16) * (((int32_t)y << 16) >> 16)) +
  2688. (((int32_t)sum))));
  2689. }
  2690. /**
  2691. \brief Dual 16-bit signed multiply with single 64-bit accumulator.
  2692. \details This function enables you to perform two signed 16-bit multiplications, adding both results
  2693. to a 64-bit accumulate operand. Overflow is only possible as a result of the 64-bit addition.
  2694. This overflow is not detected if it occurs. Instead, the result wraps around modulo2^64.
  2695. \param [in] x first 16-bit operands for each multiplication.
  2696. \param [in] y second 16-bit operands for each multiplication.
  2697. \param [in] sum accumulate value.
  2698. \return the product of each multiplication added to the accumulate value.
  2699. \remark
  2700. p1 = val1[15:0] * val2[15:0] \n
  2701. p2 = val1[31:16] * val2[31:16] \n
  2702. sum = p1 + p2 + val3[63:32][31:0] \n
  2703. res[63:32] = sum[63:32] \n
  2704. res[31:0] = sum[31:0]
  2705. */
  2706. __ALWAYS_STATIC_INLINE uint64_t __SMLALD(uint32_t x, uint32_t y, uint64_t sum)
  2707. {
  2708. return ((uint64_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y << 16) >> 16)) +
  2709. ((((int32_t)x) >> 16) * (((int32_t)y) >> 16)) +
  2710. (((uint64_t)sum))));
  2711. }
  2712. /**
  2713. \brief Dual 16-bit signed multiply with exchange with single 64-bit accumulator.
  2714. \details This function enables you to exchange the halfwords of the second operand, and perform two
  2715. signed 16-bit multiplications, adding both results to a 64-bit accumulate operand. Overflow
  2716. is only possible as a result of the 64-bit addition. This overflow is not detected if it occurs.
  2717. Instead, the result wraps around modulo2^64.
  2718. \param [in] x first 16-bit operands for each multiplication.
  2719. \param [in] y second 16-bit operands for each multiplication.
  2720. \param [in] sum accumulate value.
  2721. \return the product of each multiplication added to the accumulate value.
  2722. \remark
  2723. p1 = val1[15:0] * val2[31:16] \n
  2724. p2 = val1[31:16] * val2[15:0] \n
  2725. sum = p1 + p2 + val3[63:32][31:0] \n
  2726. res[63:32] = sum[63:32] \n
  2727. res[31:0] = sum[31:0]
  2728. */
  2729. __ALWAYS_STATIC_INLINE uint64_t __SMLALDX(uint32_t x, uint32_t y, uint64_t sum)
  2730. {
  2731. return ((uint64_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y) >> 16)) +
  2732. ((((int32_t)x) >> 16) * (((int32_t)y << 16) >> 16)) +
  2733. (((uint64_t)sum))));
  2734. }
  2735. /**
  2736. \brief dual 16-bit signed multiply subtract with 64-bit accumulate.
  2737. \details This function It enables you to perform two 16-bit signed multiplications, take the difference
  2738. of the products, subtracting the high halfword product from the low halfword product, and add the
  2739. difference to a 64-bit accumulate operand. Overflow cannot occur during the multiplications or the
  2740. subtraction. Overflow can occur as a result of the 64-bit addition, and this overflow is not
  2741. detected. Instead, the result wraps round to modulo2^64.
  2742. \param [in] x first 16-bit operands for each multiplication.
  2743. \param [in] y second 16-bit operands for each multiplication.
  2744. \param [in] sum accumulate value.
  2745. \return the difference of the product of each multiplication, added to the accumulate value.
  2746. \remark
  2747. p1 = val1[15:0] * val2[15:0] \n
  2748. p2 = val1[31:16] * val2[31:16] \n
  2749. res[63:32][31:0] = p1 - p2 + val3[63:32][31:0]
  2750. */
  2751. __ALWAYS_STATIC_INLINE uint64_t __SMLSLD(uint32_t x, uint32_t y, uint64_t sum)
  2752. {
  2753. return ((uint64_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y << 16) >> 16)) -
  2754. ((((int32_t)x) >> 16) * (((int32_t)y) >> 16)) +
  2755. (((uint64_t)sum))));
  2756. }
  2757. /**
  2758. \brief Dual 16-bit signed multiply with exchange subtract with 64-bit accumulate.
  2759. \details This function enables you to exchange the halfwords of the second operand, perform two 16-bit multiplications,
  2760. adding the difference of the products to a 64-bit accumulate operand. Overflow cannot occur during the
  2761. multiplications or the subtraction. Overflow can occur as a result of the 64-bit addition, and this overflow
  2762. is not detected. Instead, the result wraps round to modulo2^64.
  2763. \param [in] x first 16-bit operands for each multiplication.
  2764. \param [in] y second 16-bit operands for each multiplication.
  2765. \param [in] sum accumulate value.
  2766. \return the difference of the product of each multiplication, added to the accumulate value.
  2767. \remark
  2768. p1 = val1[15:0] * val2[31:16] \n
  2769. p2 = val1[31:16] * val2[15:0] \n
  2770. res[63:32][31:0] = p1 - p2 + val3[63:32][31:0]
  2771. */
  2772. __ALWAYS_STATIC_INLINE uint64_t __SMLSLDX(uint32_t x, uint32_t y, uint64_t sum)
  2773. {
  2774. return ((uint64_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y) >> 16)) -
  2775. ((((int32_t)x) >> 16) * (((int32_t)y << 16) >> 16)) +
  2776. (((uint64_t)sum))));
  2777. }
  2778. /**
  2779. \brief 32-bit signed multiply with 32-bit truncated accumulator.
  2780. \details This function enables you to perform a signed 32-bit multiplications, adding the most
  2781. significant 32 bits of the 64-bit result to a 32-bit accumulate operand.
  2782. \param [in] x first operand for multiplication.
  2783. \param [in] y second operand for multiplication.
  2784. \param [in] sum accumulate value.
  2785. \return the product of multiplication (most significant 32 bits) is added to the accumulate value, as a 32-bit integer.
  2786. \remark
  2787. p = val1 * val2 \n
  2788. res[31:0] = p[63:32] + val3[31:0]
  2789. */
  2790. __ALWAYS_STATIC_INLINE uint32_t __SMMLA(int32_t x, int32_t y, int32_t sum)
  2791. {
  2792. return (uint32_t)((int32_t)((int64_t)((int64_t)x * (int64_t)y) >> 32) + sum);
  2793. }
  2794. /**
  2795. \brief Sum of dual 16-bit signed multiply.
  2796. \details This function enables you to perform two 16-bit signed multiplications, adding the products together.
  2797. \param [in] x first 16-bit operands for each multiplication.
  2798. \param [in] y second 16-bit operands for each multiplication.
  2799. \return the sum of the products of the two 16-bit signed multiplications.
  2800. \remark
  2801. p1 = val1[15:0] * val2[15:0] \n
  2802. p2 = val1[31:16] * val2[31:16] \n
  2803. res[31:0] = p1 + p2
  2804. */
  2805. __ALWAYS_STATIC_INLINE uint32_t __SMUAD(uint32_t x, uint32_t y)
  2806. {
  2807. return ((uint32_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y << 16) >> 16)) +
  2808. ((((int32_t)x) >> 16) * (((int32_t)y) >> 16))));
  2809. }
  2810. /**
  2811. \brief Dual 16-bit signed multiply returning difference.
  2812. \details This function enables you to perform two 16-bit signed multiplications, taking the difference
  2813. of the products by subtracting the high halfword product from the low halfword product.
  2814. \param [in] x first 16-bit operands for each multiplication.
  2815. \param [in] y second 16-bit operands for each multiplication.
  2816. \return the difference of the products of the two 16-bit signed multiplications.
  2817. \remark
  2818. p1 = val1[15:0] * val2[15:0] \n
  2819. p2 = val1[31:16] * val2[31:16] \n
  2820. res[31:0] = p1 - p2
  2821. */
  2822. __ALWAYS_STATIC_INLINE uint32_t __SMUSD(uint32_t x, uint32_t y)
  2823. {
  2824. return ((uint32_t)(((((int32_t)x << 16) >> 16) * (((int32_t)y << 16) >> 16)) -
  2825. ((((int32_t)x) >> 16) * (((int32_t)y) >> 16))));
  2826. }
  2827. /**
  2828. \brief Dual extracted 8-bit to 16-bit signed addition.
  2829. \details This function enables you to extract two 8-bit values from the second operand (at bit positions
  2830. [7:0] and [23:16]), sign-extend them to 16-bits each, and add the results to the first operand.
  2831. \param [in] x values added to the sign-extended to 16-bit values.
  2832. \param [in] y two 8-bit values to be extracted and sign-extended.
  2833. \return the addition of val1 and val2, where the 8-bit values in val2[7:0] and
  2834. val2[23:16] have been extracted and sign-extended prior to the addition.
  2835. \remark
  2836. res[15:0] = val1[15:0] + SignExtended(val2[7:0]) \n
  2837. res[31:16] = val1[31:16] + SignExtended(val2[23:16])
  2838. */
  2839. __ALWAYS_STATIC_INLINE uint32_t __SXTAB16(uint32_t x, uint32_t y)
  2840. {
  2841. return ((uint32_t)((((((int32_t)y << 24) >> 24) + (((int32_t)x << 16) >> 16)) & (int32_t)0x0000FFFF) |
  2842. (((((int32_t)y << 8) >> 8) + (((int32_t)x >> 16) << 16)) & (int32_t)0xFFFF0000)));
  2843. }
  2844. /**
  2845. \brief Extracted 16-bit to 32-bit unsigned addition.
  2846. \details This function enables you to extract two 8-bit values from one operand, zero-extend
  2847. them to 16 bits each, and add the results to two 16-bit values from another operand.
  2848. \param [in] x values added to the zero-extended to 16-bit values.
  2849. \param [in] y two 8-bit values to be extracted and zero-extended.
  2850. \return the addition of val1 and val2, where the 8-bit values in val2[7:0] and
  2851. val2[23:16] have been extracted and zero-extended prior to the addition.
  2852. \remark
  2853. res[15:0] = ZeroExt(val2[7:0] to 16 bits) + val1[15:0] \n
  2854. res[31:16] = ZeroExt(val2[31:16] to 16 bits) + val1[31:16]
  2855. */
  2856. __ALWAYS_STATIC_INLINE uint32_t __UXTAB16(uint32_t x, uint32_t y)
  2857. {
  2858. return ((uint32_t)(((((y << 24) >> 24) + ((x << 16) >> 16)) & 0x0000FFFF) |
  2859. ((((y << 8) >> 8) + ((x >> 16) << 16)) & 0xFFFF0000)));
  2860. }
  2861. /**
  2862. \brief Dual extract 8-bits and sign extend each to 16-bits.
  2863. \details This function enables you to extract two 8-bit values from an operand and sign-extend them to 16 bits each.
  2864. \param [in] x two 8-bit values in val[7:0] and val[23:16] to be sign-extended.
  2865. \return the 8-bit values sign-extended to 16-bit values.\n
  2866. sign-extended value of val[7:0] in the low halfword of the return value.\n
  2867. sign-extended value of val[23:16] in the high halfword of the return value.
  2868. \remark
  2869. res[15:0] = SignExtended(val[7:0]) \n
  2870. res[31:16] = SignExtended(val[23:16])
  2871. */
  2872. __ALWAYS_STATIC_INLINE uint32_t __SXTB16(uint32_t x)
  2873. {
  2874. return ((uint32_t)(((((int32_t)x << 24) >> 24) & (int32_t)0x0000FFFF) |
  2875. ((((int32_t)x << 8) >> 8) & (int32_t)0xFFFF0000)));
  2876. }
  2877. /**
  2878. \brief Dual extract 8-bits and zero-extend to 16-bits.
  2879. \details This function enables you to extract two 8-bit values from an operand and zero-extend them to 16 bits each.
  2880. \param [in] x two 8-bit values in val[7:0] and val[23:16] to be zero-extended.
  2881. \return the 8-bit values sign-extended to 16-bit values.\n
  2882. sign-extended value of val[7:0] in the low halfword of the return value.\n
  2883. sign-extended value of val[23:16] in the high halfword of the return value.
  2884. \remark
  2885. res[15:0] = SignExtended(val[7:0]) \n
  2886. res[31:16] = SignExtended(val[23:16])
  2887. */
  2888. __ALWAYS_STATIC_INLINE uint32_t __UXTB16(uint32_t x)
  2889. {
  2890. return ((uint32_t)((((x << 24) >> 24) & 0x0000FFFF) |
  2891. (((x << 8) >> 8) & 0xFFFF0000)));
  2892. }
  2893. #endif /* _CSI_RV32_GCC_H_ */