0001-RTT-VMM-implement-dual-system-running-on-realview-pb.patch 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211
  1. From d001bd8483c805c45a42d9bd0468a96722e72875 Mon Sep 17 00:00:00 2001
  2. From: Grissiom <chaos.proton@gmail.com>
  3. Date: Thu, 1 Aug 2013 14:59:56 +0800
  4. Subject: [PATCH 1/2] RTT-VMM: implement dual system running on realview-pb-a8
  5. Signed-off-by: Grissiom <chaos.proton@gmail.com>
  6. Signed-off-by: Bernard.Xiong <bernard.xiong@gmail.com>
  7. ---
  8. arch/arm/Kconfig | 1 +
  9. arch/arm/Makefile | 1 +
  10. arch/arm/common/gic.c | 67 +++++++++++++-
  11. arch/arm/include/asm/assembler.h | 8 +-
  12. arch/arm/include/asm/domain.h | 7 ++
  13. arch/arm/include/asm/irqflags.h | 84 ++++++++++++-----
  14. arch/arm/include/asm/mach/map.h | 5 +
  15. arch/arm/include/vmm/vmm.h | 35 +++++++
  16. arch/arm/include/vmm/vmm_config.h | 7 ++
  17. arch/arm/kernel/entry-armv.S | 30 +++++-
  18. arch/arm/kernel/entry-common.S | 3 +
  19. arch/arm/kernel/entry-header.S | 15 ++-
  20. arch/arm/mach-omap2/irq.c | 12 +++
  21. arch/arm/mm/fault.c | 9 ++
  22. arch/arm/mm/init.c | 8 ++
  23. arch/arm/mm/mmu.c | 44 +++++++++
  24. arch/arm/vmm/Kconfig | 49 ++++++++++
  25. arch/arm/vmm/Makefile | 10 ++
  26. arch/arm/vmm/README | 1 +
  27. arch/arm/vmm/am33xx/intc.h | 13 +++
  28. arch/arm/vmm/am33xx/softirq.c | 14 +++
  29. arch/arm/vmm/am33xx/virq.c | 48 ++++++++++
  30. arch/arm/vmm/realview_a8/softirq.c | 12 +++
  31. arch/arm/vmm/vmm.c | 32 +++++++
  32. arch/arm/vmm/vmm_traps.c | 37 ++++++++
  33. arch/arm/vmm/vmm_virhw.h | 59 ++++++++++++
  34. arch/arm/vmm/vmm_virq.c | 183 +++++++++++++++++++++++++++++++++++++
  35. 27 files changed, 767 insertions(+), 27 deletions(-)
  36. create mode 100644 arch/arm/include/vmm/vmm.h
  37. create mode 100644 arch/arm/include/vmm/vmm_config.h
  38. create mode 100644 arch/arm/vmm/Kconfig
  39. create mode 100644 arch/arm/vmm/Makefile
  40. create mode 100644 arch/arm/vmm/README
  41. create mode 100644 arch/arm/vmm/am33xx/intc.h
  42. create mode 100644 arch/arm/vmm/am33xx/softirq.c
  43. create mode 100644 arch/arm/vmm/am33xx/virq.c
  44. create mode 100644 arch/arm/vmm/realview_a8/softirq.c
  45. create mode 100644 arch/arm/vmm/vmm.c
  46. create mode 100644 arch/arm/vmm/vmm_traps.c
  47. create mode 100644 arch/arm/vmm/vmm_virhw.h
  48. create mode 100644 arch/arm/vmm/vmm_virq.c
  49. diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
  50. index 67874b8..eb82cd6 100644
  51. --- a/arch/arm/Kconfig
  52. +++ b/arch/arm/Kconfig
  53. @@ -1164,6 +1164,7 @@ config ARM_TIMER_SP804
  54. select HAVE_SCHED_CLOCK
  55. source arch/arm/mm/Kconfig
  56. +source arch/arm/vmm/Kconfig
  57. config ARM_NR_BANKS
  58. int
  59. diff --git a/arch/arm/Makefile b/arch/arm/Makefile
  60. index 30c443c..262c8e2 100644
  61. --- a/arch/arm/Makefile
  62. +++ b/arch/arm/Makefile
  63. @@ -252,6 +252,7 @@ core-$(CONFIG_FPE_NWFPE) += arch/arm/nwfpe/
  64. core-$(CONFIG_FPE_FASTFPE) += $(FASTFPE_OBJ)
  65. core-$(CONFIG_VFP) += arch/arm/vfp/
  66. core-$(CONFIG_XEN) += arch/arm/xen/
  67. +core-$(CONFIG_ARM_VMM) += arch/arm/vmm/
  68. # If we have a machine-specific directory, then include it in the build.
  69. core-y += arch/arm/kernel/ arch/arm/mm/ arch/arm/common/
  70. diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
  71. index 87dfa90..a9d7357 100644
  72. --- a/arch/arm/common/gic.c
  73. +++ b/arch/arm/common/gic.c
  74. @@ -45,6 +45,11 @@
  75. #include <asm/mach/irq.h>
  76. #include <asm/hardware/gic.h>
  77. +#ifdef CONFIG_ARM_VMM
  78. +#include <vmm/vmm.h>
  79. +#include "../vmm/vmm_virhw.h"
  80. +#endif
  81. +
  82. union gic_base {
  83. void __iomem *common_base;
  84. void __percpu __iomem **percpu_base;
  85. @@ -276,12 +281,72 @@ static int gic_set_wake(struct irq_data *d, unsigned int on)
  86. #define gic_set_wake NULL
  87. #endif
  88. +#ifdef CONFIG_ARM_VMM
  89. +void vmm_irq_handle(struct gic_chip_data *gic, struct pt_regs *regs)
  90. +{
  91. + unsigned long flags;
  92. + struct vmm_context* _vmm_context;
  93. +
  94. + _vmm_context = vmm_context_get();
  95. +
  96. + while (_vmm_context->virq_pended) {
  97. + int index;
  98. +
  99. + flags = vmm_irq_save();
  100. + _vmm_context->virq_pended = 0;
  101. + vmm_irq_restore(flags);
  102. +
  103. + /* get the pending interrupt */
  104. + for (index = 0; index < IRQS_NR_32; index++) {
  105. + int pdbit;
  106. +
  107. + for (pdbit = __builtin_ffs(_vmm_context->virq_pending[index]);
  108. + pdbit != 0;
  109. + pdbit = __builtin_ffs(_vmm_context->virq_pending[index])) {
  110. + unsigned long inner_flag;
  111. + int irqnr, oirqnr;
  112. +
  113. + pdbit--;
  114. +
  115. + inner_flag = vmm_irq_save();
  116. + _vmm_context->virq_pending[index] &= ~(1 << pdbit);
  117. + vmm_irq_restore(inner_flag);
  118. +
  119. + oirqnr = pdbit + index * 32;
  120. + if (likely(oirqnr > 15 && oirqnr < 1021)) {
  121. + irqnr = irq_find_mapping(gic->domain, oirqnr);
  122. + handle_IRQ(irqnr, regs);
  123. + } else if (oirqnr < 16) {
  124. + /* soft IRQs are EOIed by the host. */
  125. +#ifdef CONFIG_SMP
  126. + handle_IPI(oirqnr, regs);
  127. +#endif
  128. + }
  129. + /* umask interrupt */
  130. + /* FIXME: maybe we don't need this */
  131. + writel_relaxed(1 << (oirqnr % 32),
  132. + gic_data_dist_base(gic)
  133. + + GIC_DIST_ENABLE_SET
  134. + + (oirqnr / 32) * 4);
  135. +
  136. + }
  137. + }
  138. + }
  139. +}
  140. +#endif
  141. +
  142. asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
  143. {
  144. u32 irqstat, irqnr;
  145. struct gic_chip_data *gic = &gic_data[0];
  146. void __iomem *cpu_base = gic_data_cpu_base(gic);
  147. +#ifdef CONFIG_ARM_VMM
  148. + if (vmm_get_status()) {
  149. + vmm_irq_handle(gic, regs);
  150. + return;
  151. + }
  152. +#endif
  153. do {
  154. irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
  155. irqnr = irqstat & ~0x1c00;
  156. @@ -777,7 +842,7 @@ void __cpuinit gic_secondary_init(unsigned int gic_nr)
  157. gic_cpu_init(&gic_data[gic_nr]);
  158. }
  159. -#ifdef CONFIG_SMP
  160. +#if defined(CONFIG_SMP) || defined(CONFIG_ARM_VMM)
  161. void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
  162. {
  163. int cpu;
  164. diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
  165. index eb87200..b646fa7 100644
  166. --- a/arch/arm/include/asm/assembler.h
  167. +++ b/arch/arm/include/asm/assembler.h
  168. @@ -82,11 +82,15 @@
  169. */
  170. #if __LINUX_ARM_ARCH__ >= 6
  171. .macro disable_irq_notrace
  172. - cpsid i
  173. + stmdb sp!, {r0-r3, ip, lr}
  174. + bl irq_disable_asm
  175. + ldmia sp!, {r0-r3, ip, lr}
  176. .endm
  177. .macro enable_irq_notrace
  178. - cpsie i
  179. + stmdb sp!, {r0-r3, ip, lr}
  180. + bl irq_enable_asm
  181. + ldmia sp!, {r0-r3, ip, lr}
  182. .endm
  183. #else
  184. .macro disable_irq_notrace
  185. diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
  186. index 6ddbe44..bbc4470 100644
  187. --- a/arch/arm/include/asm/domain.h
  188. +++ b/arch/arm/include/asm/domain.h
  189. @@ -44,6 +44,13 @@
  190. #define DOMAIN_IO 0
  191. #endif
  192. +#ifdef CONFIG_ARM_VMM
  193. +/* RT-Thread VMM memory space */
  194. +#define DOMAIN_RTVMM 3
  195. +/* shared memory with VMM and Linux */
  196. +#define DOMAIN_RTVMM_SHR 4
  197. +#endif
  198. +
  199. /*
  200. * Domain types
  201. */
  202. diff --git a/arch/arm/include/asm/irqflags.h b/arch/arm/include/asm/irqflags.h
  203. index 1e6cca5..bfaedff 100644
  204. --- a/arch/arm/include/asm/irqflags.h
  205. +++ b/arch/arm/include/asm/irqflags.h
  206. @@ -9,34 +9,56 @@
  207. * CPU interrupt mask handling.
  208. */
  209. #if __LINUX_ARM_ARCH__ >= 6
  210. +#include <vmm/vmm.h> /* VMM only support ARMv7 right now */
  211. static inline unsigned long arch_local_irq_save(void)
  212. {
  213. unsigned long flags;
  214. - asm volatile(
  215. - " mrs %0, cpsr @ arch_local_irq_save\n"
  216. - " cpsid i"
  217. - : "=r" (flags) : : "memory", "cc");
  218. + if (vmm_status)
  219. + {
  220. + flags = vmm_save_virq();
  221. + }
  222. + else
  223. + {
  224. + asm volatile(
  225. + " mrs %0, cpsr @ arch_local_irq_save\n"
  226. + " cpsid i"
  227. + : "=r" (flags) : : "memory", "cc");
  228. + }
  229. return flags;
  230. }
  231. static inline void arch_local_irq_enable(void)
  232. {
  233. - asm volatile(
  234. - " cpsie i @ arch_local_irq_enable"
  235. - :
  236. - :
  237. - : "memory", "cc");
  238. + if (vmm_status)
  239. + {
  240. + vmm_enable_virq();
  241. + }
  242. + else
  243. + {
  244. + asm volatile(
  245. + " cpsie i @ arch_local_irq_enable"
  246. + :
  247. + :
  248. + : "memory", "cc");
  249. + }
  250. }
  251. static inline void arch_local_irq_disable(void)
  252. {
  253. - asm volatile(
  254. - " cpsid i @ arch_local_irq_disable"
  255. - :
  256. - :
  257. - : "memory", "cc");
  258. + if (vmm_status)
  259. + {
  260. + vmm_disable_virq();
  261. + }
  262. + else
  263. + {
  264. + asm volatile(
  265. + " cpsid i @ arch_local_irq_disable"
  266. + :
  267. + :
  268. + : "memory", "cc");
  269. + }
  270. }
  271. #define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc")
  272. @@ -128,9 +150,17 @@ static inline void arch_local_irq_disable(void)
  273. static inline unsigned long arch_local_save_flags(void)
  274. {
  275. unsigned long flags;
  276. - asm volatile(
  277. - " mrs %0, cpsr @ local_save_flags"
  278. - : "=r" (flags) : : "memory", "cc");
  279. +
  280. + if (vmm_status)
  281. + {
  282. + flags = vmm_return_virq();
  283. + }
  284. + else
  285. + {
  286. + asm volatile(
  287. + " mrs %0, cpsr @ local_save_flags"
  288. + : "=r" (flags) : : "memory", "cc");
  289. + }
  290. return flags;
  291. }
  292. @@ -139,15 +169,25 @@ static inline unsigned long arch_local_save_flags(void)
  293. */
  294. static inline void arch_local_irq_restore(unsigned long flags)
  295. {
  296. - asm volatile(
  297. - " msr cpsr_c, %0 @ local_irq_restore"
  298. - :
  299. - : "r" (flags)
  300. - : "memory", "cc");
  301. + if (vmm_status)
  302. + {
  303. + vmm_restore_virq(flags);
  304. + }
  305. + else
  306. + {
  307. + asm volatile(
  308. + " msr cpsr_c, %0 @ local_irq_restore"
  309. + :
  310. + : "r" (flags)
  311. + : "memory", "cc");
  312. + }
  313. }
  314. static inline int arch_irqs_disabled_flags(unsigned long flags)
  315. {
  316. + if (vmm_status)
  317. + return (flags == 0x01);
  318. +
  319. return flags & PSR_I_BIT;
  320. }
  321. diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
  322. index 2fe141f..502b341 100644
  323. --- a/arch/arm/include/asm/mach/map.h
  324. +++ b/arch/arm/include/asm/mach/map.h
  325. @@ -35,6 +35,11 @@ struct map_desc {
  326. #define MT_MEMORY_SO 14
  327. #define MT_MEMORY_DMA_READY 15
  328. +#ifdef CONFIG_ARM_VMM
  329. +#define MT_RTVMM 16
  330. +#define MT_RTVMM_SHARE 17
  331. +#endif
  332. +
  333. #ifdef CONFIG_MMU
  334. extern void iotable_init(struct map_desc *, int);
  335. extern void vm_reserve_area_early(unsigned long addr, unsigned long size,
  336. diff --git a/arch/arm/include/vmm/vmm.h b/arch/arm/include/vmm/vmm.h
  337. new file mode 100644
  338. index 0000000..3ff3f31
  339. --- /dev/null
  340. +++ b/arch/arm/include/vmm/vmm.h
  341. @@ -0,0 +1,35 @@
  342. +#ifndef __LINUX_VMM_H__
  343. +#define __LINUX_VMM_H__
  344. +
  345. +#include <linux/compiler.h>
  346. +
  347. +#include "vmm_config.h"
  348. +
  349. +struct irq_domain;
  350. +struct pt_regs;
  351. +
  352. +extern int vmm_status;
  353. +extern struct vmm_context *_vmm_context;
  354. +
  355. +/* VMM context routines */
  356. +void vmm_context_init(void* context);
  357. +struct vmm_context* vmm_context_get(void);
  358. +
  359. +void vmm_set_status(int status);
  360. +int vmm_get_status(void);
  361. +
  362. +void vmm_mem_init(void);
  363. +void vmm_raise_softirq(int irq);
  364. +
  365. +/* VMM vIRQ routines */
  366. +unsigned long vmm_save_virq(void);
  367. +unsigned long vmm_return_virq(void);
  368. +
  369. +void vmm_restore_virq(unsigned long flags);
  370. +void vmm_enable_virq(void);
  371. +void vmm_disable_virq(void);
  372. +void vmm_enter_hw_noirq(void);
  373. +
  374. +void vmm_raise_softirq(int irq);
  375. +
  376. +#endif
  377. diff --git a/arch/arm/include/vmm/vmm_config.h b/arch/arm/include/vmm/vmm_config.h
  378. new file mode 100644
  379. index 0000000..cce5e8a
  380. --- /dev/null
  381. +++ b/arch/arm/include/vmm/vmm_config.h
  382. @@ -0,0 +1,7 @@
  383. +#ifndef __LINUX_VMM_CONFIG_H__
  384. +#define __LINUX_VMM_CONFIG_H__
  385. +
  386. +#define HOST_VMM_ADDR_END CONFIG_HOST_VMM_ADDR_END
  387. +#define HOST_VMM_ADDR_BEGIN (CONFIG_HOST_VMM_ADDR_END - CONFIG_HOST_VMM_SIZE)
  388. +
  389. +#endif
  390. diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
  391. index 0f82098..80f1681 100644
  392. --- a/arch/arm/kernel/entry-armv.S
  393. +++ b/arch/arm/kernel/entry-armv.S
  394. @@ -182,6 +182,15 @@ ENDPROC(__und_invalid)
  395. @
  396. stmia r7, {r2 - r6}
  397. + stmdb sp!, {r0-r3, ip, lr}
  398. + mov r0, r5
  399. + add r1, sp, #4*6
  400. + bl vmm_save_virq_spsr_asm
  401. + mov r5, r0
  402. + bl vmm_switch_nohwirq_to_novirq
  403. + ldmia sp!, {r0-r3, ip, lr}
  404. + str r5, [sp, #S_PSR] @ fix the pushed SPSR
  405. +
  406. #ifdef CONFIG_TRACE_IRQFLAGS
  407. bl trace_hardirqs_off
  408. #endif
  409. @@ -208,6 +217,23 @@ __dabt_svc:
  410. UNWIND(.fnend )
  411. ENDPROC(__dabt_svc)
  412. + .macro svc_exit_irq, rpsr
  413. + cpsid i
  414. + msr spsr_cxsf, \rpsr
  415. + mov r0, \rpsr
  416. + bl vmm_on_svc_exit_irq
  417. +#if defined(CONFIG_CPU_V6)
  418. + ldr r0, [sp]
  419. + strex r1, r2, [sp] @ clear the exclusive monitor
  420. + ldmib sp, {r1 - pc}^ @ load r1 - pc, cpsr
  421. +#elif defined(CONFIG_CPU_32v6K)
  422. + clrex @ clear the exclusive monitor
  423. + ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
  424. +#else
  425. + ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
  426. +#endif
  427. + .endm
  428. +
  429. .align 5
  430. __irq_svc:
  431. svc_entry
  432. @@ -228,7 +254,7 @@ __irq_svc:
  433. @ the first place, so there's no point checking the PSR I bit.
  434. bl trace_hardirqs_on
  435. #endif
  436. - svc_exit r5 @ return from exception
  437. + svc_exit_irq r5 @ return from exception
  438. UNWIND(.fnend )
  439. ENDPROC(__irq_svc)
  440. @@ -393,6 +419,8 @@ ENDPROC(__pabt_svc)
  441. @
  442. zero_fp
  443. + bl vmm_switch_nohwirq_to_novirq
  444. +
  445. #ifdef CONFIG_IRQSOFF_TRACER
  446. bl trace_hardirqs_off
  447. #endif
  448. diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
  449. index a6c301e..325a26e 100644
  450. --- a/arch/arm/kernel/entry-common.S
  451. +++ b/arch/arm/kernel/entry-common.S
  452. @@ -349,6 +349,9 @@ ENTRY(vector_swi)
  453. str lr, [sp, #S_PC] @ Save calling PC
  454. str r8, [sp, #S_PSR] @ Save CPSR
  455. str r0, [sp, #S_OLD_R0] @ Save OLD_R0
  456. + stmdb sp!, {r0-r3, ip, lr}
  457. + bl vmm_switch_nohwirq_to_novirq
  458. + ldmia sp!, {r0-r3, ip, lr}
  459. zero_fp
  460. /*
  461. diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
  462. index 9a8531e..9e438dc 100644
  463. --- a/arch/arm/kernel/entry-header.S
  464. +++ b/arch/arm/kernel/entry-header.S
  465. @@ -75,7 +75,11 @@
  466. #ifndef CONFIG_THUMB2_KERNEL
  467. .macro svc_exit, rpsr
  468. - msr spsr_cxsf, \rpsr
  469. + cpsid i
  470. + mov r0, \rpsr
  471. + bl vmm_restore_virq_asm @ restore the IRQ to emulate
  472. + @ the behavior of ldmia {}^
  473. + msr spsr_cxsf, r0
  474. #if defined(CONFIG_CPU_V6)
  475. ldr r0, [sp]
  476. strex r1, r2, [sp] @ clear the exclusive monitor
  477. @@ -90,6 +94,10 @@
  478. .macro restore_user_regs, fast = 0, offset = 0
  479. ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
  480. + @ protect the spsr *and* stack we push the registers into this stack
  481. + @ and if the sp is not point to the bottom of the stack, IRQ should be
  482. + @ disabled.
  483. + cpsid i
  484. ldr lr, [sp, #\offset + S_PC]! @ get pc
  485. msr spsr_cxsf, r1 @ save in spsr_svc
  486. #if defined(CONFIG_CPU_V6)
  487. @@ -105,6 +113,11 @@
  488. mov r0, r0 @ ARMv5T and earlier require a nop
  489. @ after ldm {}^
  490. add sp, sp, #S_FRAME_SIZE - S_PC
  491. + @ TODO: in some conditions the call to vmm_on_ret_to_usr is useless.
  492. + stmdb sp!, {r0-r3, ip, lr}
  493. + mrs r0, spsr @ debug code
  494. + bl vmm_on_ret_to_usr
  495. + ldmia sp!, {r0-r3, ip, lr}
  496. movs pc, lr @ return & move spsr_svc into cpsr
  497. .endm
  498. diff --git a/arch/arm/mach-omap2/irq.c b/arch/arm/mach-omap2/irq.c
  499. index 3926f37..252577f 100644
  500. --- a/arch/arm/mach-omap2/irq.c
  501. +++ b/arch/arm/mach-omap2/irq.c
  502. @@ -23,6 +23,10 @@
  503. #include <linux/of_address.h>
  504. #include <linux/of_irq.h>
  505. +#ifdef CONFIG_ARM_VMM
  506. +#include <vmm/vmm.h>
  507. +#endif
  508. +
  509. #include "soc.h"
  510. #include "iomap.h"
  511. #include "common.h"
  512. @@ -223,6 +227,14 @@ static inline void omap_intc_handle_irq(void __iomem *base_addr, struct pt_regs
  513. {
  514. u32 irqnr;
  515. +#ifdef CONFIG_ARM_VMM
  516. + if (vmm_get_status())
  517. + {
  518. + vmm_irq_handle(base_addr, domain, regs);
  519. + return;
  520. + }
  521. +#endif
  522. +
  523. do {
  524. irqnr = readl_relaxed(base_addr + 0x98);
  525. if (irqnr)
  526. diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
  527. index 5dbf13f..e76ba74 100644
  528. --- a/arch/arm/mm/fault.c
  529. +++ b/arch/arm/mm/fault.c
  530. @@ -255,6 +255,10 @@ out:
  531. return fault;
  532. }
  533. +#ifdef CONFIG_ARM_VMM
  534. +#include <vmm/vmm.h>
  535. +#endif
  536. +
  537. static int __kprobes
  538. do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
  539. {
  540. @@ -268,6 +272,11 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
  541. if (notify_page_fault(regs, fsr))
  542. return 0;
  543. +#ifdef CONFIG_ARM_VMMX
  544. + WARN(HOST_VMM_ADDR_BEGIN < regs->ARM_pc &&
  545. + regs->ARM_pc < HOST_VMM_ADDR_END);
  546. +#endif
  547. +
  548. tsk = current;
  549. mm = tsk->mm;
  550. diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
  551. index ad722f1..ebb4e7f 100644
  552. --- a/arch/arm/mm/init.c
  553. +++ b/arch/arm/mm/init.c
  554. @@ -34,6 +34,10 @@
  555. #include <asm/mach/arch.h>
  556. #include <asm/mach/map.h>
  557. +#ifdef CONFIG_ARM_VMM
  558. +#include <vmm/vmm.h>
  559. +#endif
  560. +
  561. #include "mm.h"
  562. static unsigned long phys_initrd_start __initdata = 0;
  563. @@ -338,6 +342,10 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
  564. for (i = 0; i < mi->nr_banks; i++)
  565. memblock_add(mi->bank[i].start, mi->bank[i].size);
  566. +#ifdef CONFIG_ARM_VMM
  567. + memblock_reserve(__pa(HOST_VMM_ADDR_BEGIN), HOST_VMM_ADDR_END - HOST_VMM_ADDR_BEGIN);
  568. +#endif
  569. +
  570. /* Register the kernel text, kernel data and initrd with memblock. */
  571. #ifdef CONFIG_XIP_KERNEL
  572. memblock_reserve(__pa(_sdata), _end - _sdata);
  573. diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
  574. index ce328c7..7e7d0ca 100644
  575. --- a/arch/arm/mm/mmu.c
  576. +++ b/arch/arm/mm/mmu.c
  577. @@ -294,6 +294,20 @@ static struct mem_type mem_types[] = {
  578. .prot_l1 = PMD_TYPE_TABLE,
  579. .domain = DOMAIN_KERNEL,
  580. },
  581. +#ifdef CONFIG_ARM_VMM
  582. + [MT_RTVMM] = {
  583. + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
  584. + .prot_l1 = PMD_TYPE_TABLE,
  585. + .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
  586. + .domain = DOMAIN_RTVMM,
  587. + },
  588. + [MT_RTVMM_SHARE] = {
  589. + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
  590. + .prot_l1 = PMD_TYPE_TABLE,
  591. + .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
  592. + .domain = DOMAIN_RTVMM_SHR,
  593. + },
  594. +#endif
  595. };
  596. const struct mem_type *get_mem_type(unsigned int type)
  597. @@ -450,6 +464,9 @@ static void __init build_mem_type_table(void)
  598. mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
  599. mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
  600. mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
  601. +#ifdef CONFIG_ARM_VMM
  602. + /* FIXME */
  603. +#endif
  604. mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
  605. mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
  606. mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
  607. @@ -503,6 +520,12 @@ static void __init build_mem_type_table(void)
  608. mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
  609. mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
  610. mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
  611. +#ifdef CONFIG_ARM_VMM
  612. + mem_types[MT_RTVMM].prot_sect |= ecc_mask | cp->pmd;
  613. + mem_types[MT_RTVMM].prot_pte |= kern_pgprot;
  614. + mem_types[MT_RTVMM_SHARE].prot_sect |= ecc_mask | cp->pmd;
  615. + mem_types[MT_RTVMM_SHARE].prot_pte |= kern_pgprot;
  616. +#endif
  617. mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
  618. mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
  619. mem_types[MT_ROM].prot_sect |= cp->pmd;
  620. @@ -1152,6 +1175,27 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
  621. #endif
  622. /*
  623. + * Create mappings for RT-Thread VMM and it's shared memory with Linux
  624. + */
  625. +#ifdef CONFIG_ARM_VMM
  626. + /* the TEXCB attribute is not right yet */
  627. + /* shared memory region comes first */
  628. + map.pfn = __phys_to_pfn(virt_to_phys((void*)HOST_VMM_ADDR_BEGIN));
  629. + map.virtual = HOST_VMM_ADDR_BEGIN;
  630. + map.length = CONFIG_RTVMM_SHARED_SIZE;
  631. + map.type = MT_RTVMM_SHARE;
  632. + create_mapping(&map);
  633. +
  634. + /* vmm private region comes next */
  635. + map.pfn = __phys_to_pfn(virt_to_phys((void*)HOST_VMM_ADDR_BEGIN
  636. + + CONFIG_RTVMM_SHARED_SIZE));
  637. + map.virtual = HOST_VMM_ADDR_BEGIN + CONFIG_RTVMM_SHARED_SIZE;
  638. + map.length = CONFIG_HOST_VMM_SIZE - CONFIG_RTVMM_SHARED_SIZE;
  639. + map.type = MT_RTVMM;
  640. + create_mapping(&map);
  641. +#endif
  642. +
  643. + /*
  644. * Create a mapping for the machine vectors at the high-vectors
  645. * location (0xffff0000). If we aren't using high-vectors, also
  646. * create a mapping at the low-vectors virtual address.
  647. diff --git a/arch/arm/vmm/Kconfig b/arch/arm/vmm/Kconfig
  648. new file mode 100644
  649. index 0000000..d852056
  650. --- /dev/null
  651. +++ b/arch/arm/vmm/Kconfig
  652. @@ -0,0 +1,49 @@
  653. +menu "RT-Thread VMM Features"
  654. +
  655. +# ARM-VMM
  656. +config ARM_VMM
  657. + bool "Support RT-Thread VMM on ARM Cortex-A8"
  658. + depends on MACH_REALVIEW_PBA8
  659. + help
  660. + RT-Thread VMM implementation on ARM Cortex-A8
  661. +
  662. + Say Y if you want support for the RT-Thread VMM.
  663. + Otherwise, say N.
  664. +
  665. +if SOC_AM33XX
  666. +config HOST_VMM_ADDR_END
  667. + hex "End address of VMM"
  668. + depends on ARM_VMM
  669. + default 0xE0000000
  670. + help
  671. + The end address of VMM space. Normally, it's the
  672. + end address of DDR memory.
  673. +endif
  674. +
  675. +if MACH_REALVIEW_PBA8
  676. +config HOST_VMM_ADDR_END
  677. + hex "End address of VMM"
  678. + depends on ARM_VMM
  679. + default 0xE0000000
  680. + help
  681. + The end address of VMM space. Normally, it's the
  682. + end address of DDR memory.
  683. +endif
  684. +
  685. +config HOST_VMM_SIZE
  686. + hex "Size of VMM space"
  687. + depends on ARM_VMM
  688. + default 0x400000
  689. + help
  690. + The size of VMM space.
  691. +
  692. +config RTVMM_SHARED_SIZE
  693. + hex "Size of shared memory space between rt-vmm and Linux"
  694. + depends on ARM_VMM
  695. + default 0x100000
  696. + help
  697. + The size of shared memory space between rt-vmm and Linux. This shared
  698. + space is within the total size of the HOST_VMM_SIZE. So it is should
  699. + be smaller than HOST_VMM_SIZE.
  700. +
  701. +endmenu
  702. diff --git a/arch/arm/vmm/Makefile b/arch/arm/vmm/Makefile
  703. new file mode 100644
  704. index 0000000..127e43a
  705. --- /dev/null
  706. +++ b/arch/arm/vmm/Makefile
  707. @@ -0,0 +1,10 @@
  708. +#
  709. +# Makefile for the linux arm-vmm
  710. +#
  711. +
  712. +obj-$(CONFIG_ARM_VMM) += vmm.o vmm_traps.o vmm_virq.o
  713. +
  714. +ifeq ($(CONFIG_ARM_VMM),y)
  715. +obj-$(CONFIG_SOC_AM33XX) += am33xx/softirq.o am33xx/virq.o
  716. +obj-$(CONFIG_MACH_REALVIEW_PBA8) += realview_a8/softirq.o
  717. +endif
  718. diff --git a/arch/arm/vmm/README b/arch/arm/vmm/README
  719. new file mode 100644
  720. index 0000000..24f1b42
  721. --- /dev/null
  722. +++ b/arch/arm/vmm/README
  723. @@ -0,0 +1 @@
  724. +Linux VMM kernel routines
  725. diff --git a/arch/arm/vmm/am33xx/intc.h b/arch/arm/vmm/am33xx/intc.h
  726. new file mode 100644
  727. index 0000000..6c24f8d
  728. --- /dev/null
  729. +++ b/arch/arm/vmm/am33xx/intc.h
  730. @@ -0,0 +1,13 @@
  731. +#ifndef __INTC_H__
  732. +#define __INTC_H__
  733. +
  734. +#define OMAP34XX_IC_BASE 0x48200000
  735. +
  736. +#define INTC_SIR_SET0 0x0090
  737. +#define INTC_MIR_CLEAR0 0x0088
  738. +
  739. +#define OMAP2_L4_IO_OFFSET 0xb2000000
  740. +#define OMAP2_L4_IO_ADDRESS(pa) IOMEM((pa) + OMAP2_L4_IO_OFFSET) /* L4 */
  741. +#define OMAP3_IRQ_BASE OMAP2_L4_IO_ADDRESS(OMAP34XX_IC_BASE)
  742. +
  743. +#endif
  744. diff --git a/arch/arm/vmm/am33xx/softirq.c b/arch/arm/vmm/am33xx/softirq.c
  745. new file mode 100644
  746. index 0000000..5648496
  747. --- /dev/null
  748. +++ b/arch/arm/vmm/am33xx/softirq.c
  749. @@ -0,0 +1,14 @@
  750. +#include <linux/kernel.h>
  751. +#include <linux/module.h>
  752. +#include <asm/io.h>
  753. +
  754. +#include <vmm/vmm.h>
  755. +#include "../vmm_virhw.h"
  756. +#include "intc.h"
  757. +
  758. +void vmm_raise_softirq(int irq)
  759. +{
  760. + writel_relaxed(1 << (irq % 32),
  761. + OMAP3_IRQ_BASE + INTC_SIR_SET0 + (irq / 32) * 4);
  762. +}
  763. +EXPORT_SYMBOL(vmm_raise_softirq);
  764. diff --git a/arch/arm/vmm/am33xx/virq.c b/arch/arm/vmm/am33xx/virq.c
  765. new file mode 100644
  766. index 0000000..4ef7671
  767. --- /dev/null
  768. +++ b/arch/arm/vmm/am33xx/virq.c
  769. @@ -0,0 +1,48 @@
  770. +#include <linux/kernel.h>
  771. +#include <linux/module.h>
  772. +#include <linux/irqdomain.h>
  773. +
  774. +#include <asm/io.h>
  775. +#include <asm/irq.h>
  776. +
  777. +#include <vmm/vmm.h>
  778. +#include "../vmm_virhw.h"
  779. +#include "intc.h"
  780. +
  781. +void vmm_irq_handle(void __iomem *base_addr, struct irq_domain *domain,
  782. + struct pt_regs *regs)
  783. +{
  784. + unsigned long flags;
  785. + struct vmm_context* _vmm_context;
  786. +
  787. + _vmm_context = vmm_context_get();
  788. +
  789. + while (_vmm_context->virq_pended) {
  790. + int index;
  791. +
  792. + flags = vmm_irq_save();
  793. + _vmm_context->virq_pended = 0;
  794. + vmm_irq_restore(flags);
  795. +
  796. + /* get the pending interrupt */
  797. + for (index = 0; index < IRQS_NR_32; index++) {
  798. + int pdbit;
  799. +
  800. + for (pdbit = __builtin_ffs(_vmm_context->virq_pending[index]);
  801. + pdbit != 0;
  802. + pdbit = __builtin_ffs(_vmm_context->virq_pending[index])) {
  803. + unsigned long inner_flag;
  804. + int irqnr;
  805. +
  806. + pdbit--;
  807. +
  808. + inner_flag = vmm_irq_save();
  809. + _vmm_context->virq_pending[index] &= ~(1 << pdbit);
  810. + vmm_irq_restore(inner_flag);
  811. +
  812. + irqnr = irq_find_mapping(domain, pdbit + index * 32);
  813. + handle_IRQ(irqnr, regs);
  814. + }
  815. + }
  816. + }
  817. +}
  818. diff --git a/arch/arm/vmm/realview_a8/softirq.c b/arch/arm/vmm/realview_a8/softirq.c
  819. new file mode 100644
  820. index 0000000..a52b79c7
  821. --- /dev/null
  822. +++ b/arch/arm/vmm/realview_a8/softirq.c
  823. @@ -0,0 +1,12 @@
  824. +#include <linux/kernel.h>
  825. +#include <linux/module.h>
  826. +#include <asm/io.h>
  827. +#include <asm/hardware/gic.h>
  828. +
  829. +#include <vmm/vmm.h>
  830. +
  831. +void vmm_raise_softirq(int irq)
  832. +{
  833. + gic_raise_softirq(cpumask_of(0), irq);
  834. +}
  835. +EXPORT_SYMBOL(vmm_raise_softirq);
  836. diff --git a/arch/arm/vmm/vmm.c b/arch/arm/vmm/vmm.c
  837. new file mode 100644
  838. index 0000000..3b1d202
  839. --- /dev/null
  840. +++ b/arch/arm/vmm/vmm.c
  841. @@ -0,0 +1,32 @@
  842. +#include <linux/kernel.h>
  843. +#include <linux/module.h>
  844. +
  845. +#include <vmm/vmm.h>
  846. +
  847. +struct vmm_context* _vmm_context = NULL;
  848. +int vmm_status = 0;
  849. +EXPORT_SYMBOL(vmm_status);
  850. +
  851. +void vmm_set_status(int status)
  852. +{
  853. + vmm_status = status;
  854. +}
  855. +EXPORT_SYMBOL(vmm_set_status);
  856. +
  857. +int vmm_get_status(void)
  858. +{
  859. + return vmm_status;
  860. +}
  861. +EXPORT_SYMBOL(vmm_get_status);
  862. +
  863. +void vmm_context_init(void* context_addr)
  864. +{
  865. + _vmm_context = (struct vmm_context*)context_addr;
  866. +}
  867. +EXPORT_SYMBOL(vmm_context_init);
  868. +
  869. +struct vmm_context* vmm_context_get(void)
  870. +{
  871. + return _vmm_context;
  872. +}
  873. +EXPORT_SYMBOL(vmm_context_get);
  874. diff --git a/arch/arm/vmm/vmm_traps.c b/arch/arm/vmm/vmm_traps.c
  875. new file mode 100644
  876. index 0000000..def0d90
  877. --- /dev/null
  878. +++ b/arch/arm/vmm/vmm_traps.c
  879. @@ -0,0 +1,37 @@
  880. +#include <linux/kernel.h>
  881. +#include <linux/module.h>
  882. +#include <asm/traps.h>
  883. +#include <asm/cp15.h>
  884. +#include <asm/cacheflush.h>
  885. +
  886. +void trap_set_vector(void *start, unsigned int length)
  887. +{
  888. + unsigned char *ptr;
  889. + unsigned char *vector;
  890. +
  891. + ptr = start;
  892. + vector = (unsigned char*)vectors_page;
  893. +
  894. + /* only set IRQ and FIQ */
  895. +#if defined(CONFIG_CPU_USE_DOMAINS)
  896. + /* IRQ */
  897. + memcpy((void *)0xffff0018, (void*)(ptr + 0x18), 4);
  898. + memcpy((void *)(0xffff0018 + 0x20), (void*)(ptr + 0x18 + 0x20), 4);
  899. +
  900. + /* FIQ */
  901. + memcpy((void *)0xffff001C, (void*)(ptr + 0x1C), 4);
  902. + memcpy((void *)(0xffff001C + 0x20), (void*)(ptr + 0x1C + 0x20), 4);
  903. +#else
  904. + /* IRQ */
  905. + memcpy(vector + 0x18, (void*)(ptr + 0x18), 4);
  906. + memcpy(vector + 0x18 + 0x20, (void*)(ptr + 0x18 + 0x20), 4);
  907. +
  908. + /* FIQ */
  909. + memcpy(vector + 0x1C, (void*)(ptr + 0x1C), 4);
  910. + memcpy(vector + 0x1C + 0x20, (void*)(ptr + 0x1C + 0x20), 4);
  911. +#endif
  912. + flush_icache_range(0xffff0000, 0xffff0000 + length);
  913. + if (!vectors_high())
  914. + flush_icache_range(0x00, 0x00 + length);
  915. +}
  916. +EXPORT_SYMBOL(trap_set_vector);
  917. diff --git a/arch/arm/vmm/vmm_virhw.h b/arch/arm/vmm/vmm_virhw.h
  918. new file mode 100644
  919. index 0000000..363cc6e
  920. --- /dev/null
  921. +++ b/arch/arm/vmm/vmm_virhw.h
  922. @@ -0,0 +1,59 @@
  923. +#ifndef __VMM_VIRTHWH__
  924. +#define __VMM_VIRTHWH__
  925. +
  926. +#define REALVIEW_NR_IRQS 96
  927. +#define IRQS_NR_32 ((REALVIEW_NR_IRQS + 31)/32)
  928. +#define RTT_VMM_IRQ_TRIGGER 10
  929. +
  930. +struct vmm_context
  931. +{
  932. + /* the status of vGuest irq */
  933. + volatile unsigned long virq_status;
  934. +
  935. + /* has interrupt pended on vGuest OS IRQ */
  936. + volatile unsigned long virq_pended;
  937. +
  938. + /* pending interrupt for vGuest OS */
  939. + volatile unsigned long virq_pending[IRQS_NR_32];
  940. +};
  941. +
  942. +/* IRQ operation under VMM */
  943. +static inline unsigned long vmm_irq_save(void)
  944. +{
  945. + unsigned long flags;
  946. +
  947. + asm volatile(
  948. + " mrs %0, cpsr @ arch_local_irq_save\n"
  949. + " cpsid i"
  950. + : "=r" (flags) : : "memory", "cc");
  951. + return flags;
  952. +}
  953. +
  954. +static inline void vmm_irq_restore(unsigned long flags)
  955. +{
  956. + asm volatile(
  957. + " msr cpsr_c, %0 @ local_irq_restore"
  958. + :
  959. + : "r" (flags)
  960. + : "memory", "cc");
  961. +}
  962. +
  963. +static inline void vmm_irq_enable(void)
  964. +{
  965. + asm volatile(
  966. + " cpsie i @ arch_local_irq_enable"
  967. + :
  968. + :
  969. + : "memory", "cc");
  970. +}
  971. +
  972. +static inline void vmm_irq_disable(void)
  973. +{
  974. + asm volatile(
  975. + " cpsid i @ arch_local_irq_disable"
  976. + :
  977. + :
  978. + : "memory", "cc");
  979. +}
  980. +
  981. +#endif
  982. diff --git a/arch/arm/vmm/vmm_virq.c b/arch/arm/vmm/vmm_virq.c
  983. new file mode 100644
  984. index 0000000..85886a2
  985. --- /dev/null
  986. +++ b/arch/arm/vmm/vmm_virq.c
  987. @@ -0,0 +1,183 @@
  988. +#include <linux/bug.h>
  989. +#include <linux/kernel.h>
  990. +#include <linux/module.h>
  991. +#include <asm/unwind.h>
  992. +
  993. +#include <vmm/vmm.h>
  994. +
  995. +#include "vmm_virhw.h"
  996. +
  997. +/* VMM use the I bit in SPSR to save the virq status in the isr entry. So warn
  998. + * on the I bit set would gave some false negative result. */
  999. +//#define VMM_WARN_ON_I_BIT
  1000. +
  1001. +extern struct vmm_context* _vmm_context;
  1002. +
  1003. +void vmm_disable_virq(void)
  1004. +{
  1005. + unsigned long flags = vmm_irq_save();
  1006. + _vmm_context->virq_status = 0x01;
  1007. + vmm_irq_restore(flags);
  1008. +}
  1009. +EXPORT_SYMBOL(vmm_disable_virq);
  1010. +
  1011. +static void _vmm_raise_on_pended(void)
  1012. +{
  1013. + /* check any interrupt pended in vIRQ */
  1014. + if (_vmm_context->virq_pended) {
  1015. + /* trigger an soft interrupt */
  1016. + vmm_raise_softirq(RTT_VMM_IRQ_TRIGGER);
  1017. + return;
  1018. + }
  1019. +
  1020. +#if 0
  1021. + int i;
  1022. + for (i = 0; i < ARRAY_SIZE(_vmm_context->virq_pending); i++) {
  1023. + if (_vmm_context->virq_pending[i]) {
  1024. + _vmm_context->virq_pended = 1;
  1025. + pr_info("\n");
  1026. + vmm_raise_softirq(RTT_VMM_IRQ_TRIGGER);
  1027. + return;
  1028. + }
  1029. + }
  1030. +#endif
  1031. +}
  1032. +
  1033. +void vmm_enable_virq(void)
  1034. +{
  1035. + unsigned long flags = vmm_irq_save();
  1036. + _vmm_context->virq_status = 0x00;
  1037. + _vmm_raise_on_pended();
  1038. + vmm_irq_restore(flags);
  1039. +}
  1040. +EXPORT_SYMBOL(vmm_enable_virq);
  1041. +
  1042. +unsigned long vmm_return_virq(void)
  1043. +{
  1044. + unsigned long flags;
  1045. + unsigned long level;
  1046. +
  1047. + level = vmm_irq_save();
  1048. + flags = _vmm_context->virq_status;
  1049. + vmm_irq_restore(level);
  1050. +
  1051. + return flags;
  1052. +}
  1053. +EXPORT_SYMBOL(vmm_return_virq);
  1054. +
  1055. +unsigned long vmm_save_virq(void)
  1056. +{
  1057. + int status;
  1058. + unsigned long flags = vmm_irq_save();
  1059. +
  1060. + status = _vmm_context->virq_status;
  1061. + _vmm_context->virq_status = 0x01;
  1062. + vmm_irq_restore(flags);
  1063. +
  1064. + return status;
  1065. +}
  1066. +EXPORT_SYMBOL(vmm_save_virq);
  1067. +
  1068. +void vmm_restore_virq(unsigned long flags)
  1069. +{
  1070. + unsigned long level;
  1071. +
  1072. + level = vmm_irq_save();
  1073. + _vmm_context->virq_status = flags;
  1074. + if (_vmm_context->virq_status == 0)
  1075. + {
  1076. + _vmm_raise_on_pended();
  1077. + }
  1078. + vmm_irq_restore(level);
  1079. +}
  1080. +EXPORT_SYMBOL(vmm_restore_virq);
  1081. +
  1082. +unsigned long vmm_save_virq_spsr_asm(unsigned long spsr, struct pt_regs *regs)
  1083. +{
  1084. + if (vmm_status) {
  1085. + if (_vmm_context->virq_status)
  1086. + return spsr | PSR_I_BIT;
  1087. + }
  1088. + return spsr;
  1089. +}
  1090. +
  1091. +void irq_enable_asm(void)
  1092. +{
  1093. + if (vmm_status) {
  1094. + vmm_enable_virq();
  1095. + } else {
  1096. + asm volatile("cpsie i" : : : "memory", "cc");
  1097. + }
  1098. +}
  1099. +
  1100. +void irq_disable_asm(void)
  1101. +{
  1102. + if (vmm_status) {
  1103. + vmm_disable_virq();
  1104. + } else {
  1105. + asm volatile("cpsid i" : : : "memory", "cc");
  1106. + }
  1107. +}
  1108. +
  1109. +/* should be called when the guest entering the state that the IRQ is disabled
  1110. + * by hardware, for example, entering SVC, PABT, DABT mode.
  1111. + *
  1112. + * It will the open the hardware IRQ, virtual IRQ remain unchanged.
  1113. + */
  1114. +void vmm_switch_nohwirq_to_novirq(void)
  1115. +{
  1116. + if (vmm_status) {
  1117. + vmm_disable_virq();
  1118. + asm volatile("cpsie i" : : : "memory", "cc");
  1119. + }
  1120. +}
  1121. +
  1122. +unsigned long vmm_restore_virq_asm(unsigned long spsr)
  1123. +{
  1124. + if (vmm_status) {
  1125. +#ifdef VMM_WARN_ON_I_BIT
  1126. + WARN(spsr & PSR_I_BIT, "return to svc mode with I in SPSR set\n");
  1127. +#endif
  1128. + vmm_restore_virq(!!(spsr & PSR_I_BIT));
  1129. + return spsr & ~PSR_I_BIT;
  1130. + } else {
  1131. + return spsr;
  1132. + }
  1133. +}
  1134. +
  1135. +void vmm_on_ret_to_usr(unsigned long spsr)
  1136. +{
  1137. + if (vmm_status) {
  1138. +#ifdef VMM_WARN_ON_I_BIT
  1139. + WARN(spsr & PSR_I_BIT, "return to user mode with I in SPSR set\n");
  1140. +#endif
  1141. + vmm_enable_virq();
  1142. + }
  1143. +}
  1144. +
  1145. +void vmm_on_svc_exit_irq(unsigned long spsr)
  1146. +{
  1147. + if (vmm_status) {
  1148. +#ifdef VMM_WARN_ON_I_BIT
  1149. + WARN(spsr & PSR_I_BIT, "exit IRQ with I in SPSR set\n");
  1150. +#endif
  1151. + vmm_enable_virq();
  1152. + }
  1153. +}
  1154. +
  1155. +void vmm_dump_irq(void)
  1156. +{
  1157. + int i;
  1158. + unsigned long cpsr;
  1159. +
  1160. + asm volatile ("mrs %0, cpsr": "=r"(cpsr));
  1161. +
  1162. + printk("status: %08lx, pended: %08lx, cpsr: %08lx\n",
  1163. + _vmm_context->virq_status, _vmm_context->virq_pended, cpsr);
  1164. + printk("pending: ");
  1165. + for (i = 0; i < ARRAY_SIZE(_vmm_context->virq_pending); i++) {
  1166. + printk("%08lx, ", _vmm_context->virq_pending[i]);
  1167. + }
  1168. + printk("\n");
  1169. +}
  1170. +
  1171. --
  1172. 1.8.4