fiq_smp_call.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215
  1. #include <linux/kernel.h>
  2. #include <linux/init.h>
  3. #include <linux/cpu.h>
  4. #include <linux/percpu.h>
  5. #include <linux/smp.h>
  6. #include <asm/fiq_smp_call.h>
  7. #include <mach/irqs.h>
  8. #if defined(CONFIG_FIQ_GLUE)
  9. #if defined(CONFIG_TRUSTONIC_TEE_SUPPORT)
  10. #include <mach/mt_secure_api.h>
  11. #endif
  12. enum {
  13. CSD_FLAG_LOCK = 0x01,
  14. };
  15. struct fiq_call_single_data {
  16. struct list_head list;
  17. smp_call_func_t func;
  18. void *info;
  19. cpumask_var_t cpumask;
  20. u16 flags;
  21. u16 priv;
  22. };
  23. struct call_function_data {
  24. struct fiq_call_single_data csd;
  25. fiq_smp_call_func_t func;
  26. atomic_t refs;
  27. cpumask_var_t cpumask;
  28. };
  29. static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, fiq_cfd_data);
  30. static struct call_function_data *current_cfd_data;
  31. static int __csd_lock_wait(struct fiq_call_single_data *data)
  32. {
  33. int cpu, nr_online_cpus = 0;
  34. while (data->flags & CSD_FLAG_LOCK) {
  35. for_each_cpu(cpu, data->cpumask) {
  36. if (cpu_online(cpu))
  37. nr_online_cpus++;
  38. }
  39. if (!nr_online_cpus)
  40. return -ENXIO;
  41. cpu_relax();
  42. }
  43. return 0;
  44. }
  45. static void __csd_lock(struct fiq_call_single_data *data)
  46. {
  47. __csd_lock_wait(data);
  48. data->flags = CSD_FLAG_LOCK;
  49. /*
  50. * prevent CPU from reordering the above assignment
  51. * to ->flags with any subsequent assignments to other
  52. * fields of the specified fiq_call_single_data structure:
  53. */
  54. smp_mb();
  55. }
  56. static void __csd_unlock(struct fiq_call_single_data *data)
  57. {
  58. WARN_ON(!(data->flags & CSD_FLAG_LOCK));
  59. /*
  60. * ensure we're all done before releasing data:
  61. */
  62. smp_mb();
  63. data->flags &= ~CSD_FLAG_LOCK;
  64. }
  65. #if defined(CONFIG_TRUSTONIC_TEE_SUPPORT)
  66. static void fiq_security_fastcall(const struct cpumask *mask)
  67. {
  68. unsigned long map = *cpus_addr(*mask);
  69. mt_secure_call(MC_FC_MTK_AEEDUMP, map, 0, 0);
  70. }
  71. #endif
  72. static int get_HW_cpuid(void)
  73. {
  74. int id;
  75. asm ("mrc p15, 0, %0, c0, c0, 5 @ Get CPUID\n" : "=r"(id));
  76. return (id&0x3)+((id&0xF00)>>6);
  77. }
  78. /*
  79. * fiq_smp_call_function: FIQ version of smp_call_function.
  80. * @func:
  81. * @info:
  82. * @wait:
  83. * Return 0 for success and error code for failure.
  84. *
  85. * This function is designed for the debugger only.
  86. * Other kernel code or drivers should NOT use this function.
  87. * This function can only be used in the FIQ-WDT handler.
  88. */
  89. int fiq_smp_call_function(fiq_smp_call_func_t func, void *info, int wait)
  90. {
  91. struct cpumask *mask = (struct cpumask *)cpu_online_mask;
  92. struct call_function_data *data;
  93. int refs, install_csd, this_cpu = 0;
  94. this_cpu = get_HW_cpuid();
  95. data = &__get_cpu_var(fiq_cfd_data);
  96. __csd_lock(&data->csd);
  97. atomic_set(&data->refs, 0);
  98. data->func = func;
  99. data->csd.info = info;
  100. /* make sure data prepared before next step */
  101. smp_wmb();
  102. cpumask_and(data->cpumask, mask, cpu_online_mask);
  103. cpumask_clear_cpu(this_cpu, data->cpumask);
  104. refs = cpumask_weight(data->cpumask);
  105. cpumask_and(data->csd.cpumask, data->cpumask, data->cpumask);
  106. if (unlikely(!refs)) {
  107. __csd_unlock(&data->csd);
  108. goto fiq_smp_call_function_exit;
  109. }
  110. /* poll to install data on current_cfd_data */
  111. install_csd = 0;
  112. do {
  113. #if 0 /* no need to protect due to FIQ-WDT */
  114. spin_lock(&fiq_smp_call_lock);
  115. #endif
  116. if (!current_cfd_data) {
  117. atomic_set(&data->refs, refs);
  118. current_cfd_data = data;
  119. install_csd = 1;
  120. }
  121. #if 0
  122. spin_unlock(&fiq_smp_call_lock);
  123. #endif
  124. } while (!install_csd);
  125. /* make sure data prepared before sending SGI */
  126. smp_mb();
  127. /* send a message to all CPUs in the map */
  128. #if !defined(CONFIG_TRUSTONIC_TEE_SUPPORT)
  129. irq_raise_softirq(data->cpumask, FIQ_SMP_CALL_SGI);
  130. #else
  131. fiq_security_fastcall(data->cpumask);
  132. #endif
  133. if (wait)
  134. __csd_lock_wait(&data->csd);
  135. fiq_smp_call_function_exit:
  136. return 0;
  137. }
  138. static void fiq_smp_call_handler(void *arg, void *regs, void *svc_sp)
  139. {
  140. struct call_function_data *data;
  141. int cpu = 0, refs;
  142. fiq_smp_call_func_t func;
  143. /* get the current cpu id */
  144. asm volatile ("MRC p15, 0, %0, c0, c0, 5\n" "AND %0, %0, #0xf\n"
  145. : "+r"(cpu) : : "cc");
  146. data = current_cfd_data;
  147. if (data) {
  148. func = data->func;
  149. func(data->csd.info, regs, svc_sp);
  150. cpumask_clear_cpu(cpu, data->csd.cpumask);
  151. refs = atomic_dec_return(&data->refs);
  152. if (refs == 0) {
  153. __csd_unlock(&data->csd);
  154. current_cfd_data = NULL;
  155. }
  156. }
  157. }
  158. static void __fiq_smp_call_init(void *info)
  159. {
  160. int err;
  161. err = request_fiq(FIQ_SMP_CALL_SGI, fiq_smp_call_handler, 0, NULL);
  162. if (err)
  163. pr_err("fail to request FIQ for FIQ_SMP_CALL_SGI\n");
  164. else
  165. pr_debug("Request FIQ for FIQ_SMP_CALL_SGI\n");
  166. }
  167. static int __init fiq_smp_call_init(void)
  168. {
  169. __fiq_smp_call_init(NULL);
  170. smp_call_function(__fiq_smp_call_init, NULL, 1);
  171. return 0;
  172. }
  173. arch_initcall(fiq_smp_call_init);
  174. #endif /* CONFIG_FIQ_GLUE */