trusty-irq.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684
  1. /*
  2. * Copyright (C) 2013 Google, Inc.
  3. *
  4. * This software is licensed under the terms of the GNU General Public
  5. * License version 2, as published by the Free Software Foundation, and
  6. * may be copied, distributed, and modified under those terms.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. *
  13. */
  14. #include <linux/cpu.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/irq.h>
  17. #include <linux/of.h>
  18. #include <linux/platform_device.h>
  19. #include <linux/slab.h>
  20. #include <linux/string.h>
  21. #include <linux/trusty/smcall.h>
  22. #include <linux/trusty/sm_err.h>
  23. #include <linux/trusty/trusty.h>
  24. #ifdef CONFIG_TRUSTY_INTERRUPT_MAP
  25. #include <linux/irqdomain.h>
  26. #include <linux/of_irq.h>
  27. #include <dt-bindings/interrupt-controller/arm-gic.h>
  28. #endif
  29. struct trusty_irq {
  30. struct trusty_irq_state *is;
  31. struct hlist_node node;
  32. unsigned int irq;
  33. bool percpu;
  34. bool enable;
  35. struct trusty_irq __percpu *percpu_ptr;
  36. };
  37. struct trusty_irq_work {
  38. struct trusty_irq_state *is;
  39. struct work_struct work;
  40. };
  41. struct trusty_irq_irqset {
  42. struct hlist_head pending;
  43. struct hlist_head inactive;
  44. };
  45. struct trusty_irq_state {
  46. struct device *dev;
  47. struct device *trusty_dev;
  48. struct trusty_irq_work __percpu *irq_work;
  49. struct trusty_irq_irqset normal_irqs;
  50. spinlock_t normal_irqs_lock;
  51. struct trusty_irq_irqset __percpu *percpu_irqs;
  52. struct notifier_block trusty_call_notifier;
  53. struct notifier_block cpu_notifier;
  54. };
  55. #ifdef CONFIG_TRUSTY_INTERRUPT_MAP
  56. static struct device_node *spi_node;
  57. static struct device_node *ppi_node;
  58. static struct trusty_irq __percpu *trusty_ipi_data[16];
  59. static int trusty_ipi_init[16];
  60. #endif
  61. static void trusty_irq_enable_pending_irqs(struct trusty_irq_state *is,
  62. struct trusty_irq_irqset *irqset,
  63. bool percpu)
  64. {
  65. struct hlist_node *n;
  66. struct trusty_irq *trusty_irq;
  67. hlist_for_each_entry_safe(trusty_irq, n, &irqset->pending, node) {
  68. dev_dbg(is->dev,
  69. "%s: enable pending irq %d, percpu %d, cpu %d\n",
  70. __func__, trusty_irq->irq, percpu, smp_processor_id());
  71. if (percpu)
  72. enable_percpu_irq(trusty_irq->irq, 0);
  73. else
  74. enable_irq(trusty_irq->irq);
  75. hlist_del(&trusty_irq->node);
  76. hlist_add_head(&trusty_irq->node, &irqset->inactive);
  77. }
  78. }
  79. static void trusty_irq_enable_irqset(struct trusty_irq_state *is,
  80. struct trusty_irq_irqset *irqset)
  81. {
  82. struct trusty_irq *trusty_irq;
  83. hlist_for_each_entry(trusty_irq, &irqset->inactive, node) {
  84. if (trusty_irq->enable) {
  85. dev_warn(is->dev,
  86. "%s: percpu irq %d already enabled, cpu %d\n",
  87. __func__, trusty_irq->irq, smp_processor_id());
  88. continue;
  89. }
  90. dev_dbg(is->dev, "%s: enable percpu irq %d, cpu %d\n",
  91. __func__, trusty_irq->irq, smp_processor_id());
  92. enable_percpu_irq(trusty_irq->irq, 0);
  93. trusty_irq->enable = true;
  94. }
  95. }
  96. static void trusty_irq_disable_irqset(struct trusty_irq_state *is,
  97. struct trusty_irq_irqset *irqset)
  98. {
  99. struct hlist_node *n;
  100. struct trusty_irq *trusty_irq;
  101. hlist_for_each_entry(trusty_irq, &irqset->inactive, node) {
  102. if (!trusty_irq->enable) {
  103. dev_warn(is->dev,
  104. "irq %d already disabled, percpu %d, cpu %d\n",
  105. trusty_irq->irq, trusty_irq->percpu,
  106. smp_processor_id());
  107. continue;
  108. }
  109. dev_dbg(is->dev, "%s: disable irq %d, percpu %d, cpu %d\n",
  110. __func__, trusty_irq->irq, trusty_irq->percpu,
  111. smp_processor_id());
  112. trusty_irq->enable = false;
  113. if (trusty_irq->percpu)
  114. disable_percpu_irq(trusty_irq->irq);
  115. else
  116. disable_irq_nosync(trusty_irq->irq);
  117. }
  118. hlist_for_each_entry_safe(trusty_irq, n, &irqset->pending, node) {
  119. if (!trusty_irq->enable) {
  120. dev_warn(is->dev,
  121. "pending irq %d already disabled, percpu %d, cpu %d\n",
  122. trusty_irq->irq, trusty_irq->percpu,
  123. smp_processor_id());
  124. }
  125. dev_dbg(is->dev,
  126. "%s: disable pending irq %d, percpu %d, cpu %d\n",
  127. __func__, trusty_irq->irq, trusty_irq->percpu,
  128. smp_processor_id());
  129. trusty_irq->enable = false;
  130. hlist_del(&trusty_irq->node);
  131. hlist_add_head(&trusty_irq->node, &irqset->inactive);
  132. }
  133. }
  134. static int trusty_irq_call_notify(struct notifier_block *nb,
  135. unsigned long action, void *data)
  136. {
  137. struct trusty_irq_state *is;
  138. BUG_ON(!irqs_disabled());
  139. if (action != TRUSTY_CALL_PREPARE)
  140. return NOTIFY_DONE;
  141. is = container_of(nb, struct trusty_irq_state, trusty_call_notifier);
  142. spin_lock(&is->normal_irqs_lock);
  143. trusty_irq_enable_pending_irqs(is, &is->normal_irqs, false);
  144. spin_unlock(&is->normal_irqs_lock);
  145. trusty_irq_enable_pending_irqs(is, this_cpu_ptr(is->percpu_irqs), true);
  146. return NOTIFY_OK;
  147. }
  148. static void trusty_irq_work_func_locked_nop(struct work_struct *work)
  149. {
  150. int ret;
  151. struct trusty_irq_state *is =
  152. container_of(work, struct trusty_irq_work, work)->is;
  153. dev_dbg(is->dev, "%s\n", __func__);
  154. ret = trusty_std_call32(is->trusty_dev, SMC_SC_LOCKED_NOP, 0, 0, 0);
  155. if (ret != 0)
  156. dev_err(is->dev, "%s: SMC_SC_LOCKED_NOP failed %d",
  157. __func__, ret);
  158. dev_dbg(is->dev, "%s: done\n", __func__);
  159. }
  160. static void trusty_irq_work_func(struct work_struct *work)
  161. {
  162. int ret;
  163. struct trusty_irq_state *is =
  164. container_of(work, struct trusty_irq_work, work)->is;
  165. dev_dbg(is->dev, "%s\n", __func__);
  166. do {
  167. ret = trusty_std_call32(is->trusty_dev, SMC_SC_NOP, 0, 0, 0);
  168. } while (ret == SM_ERR_NOP_INTERRUPTED);
  169. if (ret != SM_ERR_NOP_DONE)
  170. dev_err(is->dev, "%s: SMC_SC_NOP failed %d", __func__, ret);
  171. dev_dbg(is->dev, "%s: done\n", __func__);
  172. }
  173. irqreturn_t trusty_irq_handler(int irq, void *data)
  174. {
  175. struct trusty_irq *trusty_irq = data;
  176. struct trusty_irq_state *is = trusty_irq->is;
  177. struct trusty_irq_work *trusty_irq_work = this_cpu_ptr(is->irq_work);
  178. struct trusty_irq_irqset *irqset;
  179. dev_dbg(is->dev, "%s: irq %d, percpu %d, cpu %d, enable %d\n",
  180. __func__, irq, trusty_irq->irq, smp_processor_id(),
  181. trusty_irq->enable);
  182. if (trusty_irq->percpu) {
  183. disable_percpu_irq(irq);
  184. irqset = this_cpu_ptr(is->percpu_irqs);
  185. } else {
  186. disable_irq_nosync(irq);
  187. irqset = &is->normal_irqs;
  188. }
  189. spin_lock(&is->normal_irqs_lock);
  190. if (trusty_irq->enable) {
  191. hlist_del(&trusty_irq->node);
  192. hlist_add_head(&trusty_irq->node, &irqset->pending);
  193. }
  194. spin_unlock(&is->normal_irqs_lock);
  195. schedule_work_on(raw_smp_processor_id(), &trusty_irq_work->work);
  196. dev_dbg(is->dev, "%s: irq %d done\n", __func__, irq);
  197. return IRQ_HANDLED;
  198. }
  199. #ifdef CONFIG_TRUSTY_INTERRUPT_MAP
  200. void handle_trusty_ipi(int ipinr)
  201. {
  202. if (trusty_ipi_init[ipinr] == 0)
  203. return;
  204. irq_enter();
  205. trusty_irq_handler(ipinr, this_cpu_ptr(trusty_ipi_data[ipinr]));
  206. irq_exit();
  207. }
  208. #endif
  209. static void trusty_irq_cpu_up(void *info)
  210. {
  211. unsigned long irq_flags;
  212. struct trusty_irq_state *is = info;
  213. dev_dbg(is->dev, "%s: cpu %d\n", __func__, smp_processor_id());
  214. local_irq_save(irq_flags);
  215. trusty_irq_enable_irqset(is, this_cpu_ptr(is->percpu_irqs));
  216. local_irq_restore(irq_flags);
  217. }
  218. static void trusty_irq_cpu_down(void *info)
  219. {
  220. unsigned long irq_flags;
  221. struct trusty_irq_state *is = info;
  222. dev_dbg(is->dev, "%s: cpu %d\n", __func__, smp_processor_id());
  223. local_irq_save(irq_flags);
  224. trusty_irq_disable_irqset(is, this_cpu_ptr(is->percpu_irqs));
  225. local_irq_restore(irq_flags);
  226. }
  227. static void trusty_irq_cpu_dead(void *info)
  228. {
  229. unsigned long irq_flags;
  230. struct trusty_irq_state *is = info;
  231. dev_dbg(is->dev, "%s: cpu %d\n", __func__, smp_processor_id());
  232. local_irq_save(irq_flags);
  233. schedule_work_on(smp_processor_id(), &(this_cpu_ptr(is->irq_work)->work));
  234. local_irq_restore(irq_flags);
  235. }
  236. static int trusty_irq_cpu_notify(struct notifier_block *nb,
  237. unsigned long action, void *hcpu)
  238. {
  239. struct trusty_irq_state *is;
  240. is = container_of(nb, struct trusty_irq_state, cpu_notifier);
  241. dev_dbg(is->dev, "%s: 0x%lx\n", __func__, action);
  242. switch (action & ~CPU_TASKS_FROZEN) {
  243. case CPU_STARTING:
  244. trusty_irq_cpu_up(is);
  245. break;
  246. case CPU_DEAD:
  247. trusty_irq_cpu_dead(is);
  248. break;
  249. case CPU_DYING:
  250. trusty_irq_cpu_down(is);
  251. break;
  252. }
  253. return NOTIFY_OK;
  254. }
  255. static int trusty_irq_init_normal_irq(struct trusty_irq_state *is, int irq)
  256. {
  257. int ret;
  258. unsigned long irq_flags;
  259. struct trusty_irq *trusty_irq;
  260. dev_dbg(is->dev, "%s: irq %d\n", __func__, irq);
  261. trusty_irq = kzalloc(sizeof(*trusty_irq), GFP_KERNEL);
  262. if (!trusty_irq)
  263. return -ENOMEM;
  264. #ifdef CONFIG_TRUSTY_INTERRUPT_MAP
  265. if (spi_node) {
  266. struct of_phandle_args oirq;
  267. if (irq < 32) {
  268. ret = -EINVAL;
  269. dev_err(is->dev, "SPI only, no %d\n", irq);
  270. goto err_request_irq;
  271. }
  272. oirq.np = spi_node;
  273. oirq.args_count = 3;
  274. oirq.args[0] = GIC_SPI;
  275. oirq.args[1] = irq - 32;
  276. oirq.args[2] = 0;
  277. irq = irq_create_of_mapping(&oirq);
  278. if (irq == 0) {
  279. ret = -EINVAL;
  280. goto err_request_irq;
  281. }
  282. }
  283. #endif
  284. trusty_irq->is = is;
  285. trusty_irq->irq = irq;
  286. trusty_irq->enable = true;
  287. spin_lock_irqsave(&is->normal_irqs_lock, irq_flags);
  288. hlist_add_head(&trusty_irq->node, &is->normal_irqs.inactive);
  289. spin_unlock_irqrestore(&is->normal_irqs_lock, irq_flags);
  290. ret = request_irq(irq, trusty_irq_handler, IRQF_NO_THREAD,
  291. "trusty", trusty_irq);
  292. if (ret) {
  293. dev_err(is->dev, "request_irq failed %d\n", ret);
  294. goto err_request_irq;
  295. }
  296. return 0;
  297. err_request_irq:
  298. spin_lock_irqsave(&is->normal_irqs_lock, irq_flags);
  299. hlist_del(&trusty_irq->node);
  300. spin_unlock_irqrestore(&is->normal_irqs_lock, irq_flags);
  301. kfree(trusty_irq);
  302. return ret;
  303. }
  304. static int trusty_irq_init_per_cpu_irq(struct trusty_irq_state *is, int irq)
  305. {
  306. int ret;
  307. unsigned int cpu;
  308. struct trusty_irq __percpu *trusty_irq_handler_data;
  309. dev_dbg(is->dev, "%s: irq %d\n", __func__, irq);
  310. trusty_irq_handler_data = alloc_percpu(struct trusty_irq);
  311. if (!trusty_irq_handler_data)
  312. return -ENOMEM;
  313. for_each_possible_cpu(cpu) {
  314. struct trusty_irq *trusty_irq;
  315. struct trusty_irq_irqset *irqset;
  316. trusty_irq = per_cpu_ptr(trusty_irq_handler_data, cpu);
  317. irqset = per_cpu_ptr(is->percpu_irqs, cpu);
  318. trusty_irq->is = is;
  319. hlist_add_head(&trusty_irq->node, &irqset->inactive);
  320. trusty_irq->irq = irq;
  321. trusty_irq->percpu = true;
  322. trusty_irq->percpu_ptr = trusty_irq_handler_data;
  323. }
  324. #ifdef CONFIG_TRUSTY_INTERRUPT_MAP
  325. if (irq < 16) { /* IPI (SGI) */
  326. trusty_ipi_data[irq] = trusty_irq_handler_data;
  327. trusty_ipi_init[irq] = 1;
  328. return 0;
  329. }
  330. if (ppi_node) {
  331. struct of_phandle_args oirq;
  332. if (irq >= 32) {
  333. ret = -EINVAL;
  334. dev_err(is->dev, "Not support SPI %d\n", irq);
  335. goto err_request_percpu_irq;
  336. }
  337. oirq.np = ppi_node;
  338. oirq.args_count = 3;
  339. oirq.args[0] = GIC_PPI;
  340. oirq.args[1] = irq - 16;
  341. oirq.args[2] = 0;
  342. irq = irq_create_of_mapping(&oirq);
  343. if (irq == 0) {
  344. ret = -EINVAL;
  345. goto err_request_percpu_irq;
  346. }
  347. for_each_possible_cpu(cpu)
  348. per_cpu_ptr(trusty_irq_handler_data, cpu)->irq = irq;
  349. }
  350. #endif
  351. ret = request_percpu_irq(irq, trusty_irq_handler, "trusty",
  352. trusty_irq_handler_data);
  353. if (ret) {
  354. dev_err(is->dev, "request_percpu_irq failed %d\n", ret);
  355. goto err_request_percpu_irq;
  356. }
  357. return 0;
  358. err_request_percpu_irq:
  359. for_each_possible_cpu(cpu) {
  360. struct trusty_irq *trusty_irq;
  361. trusty_irq = per_cpu_ptr(trusty_irq_handler_data, cpu);
  362. hlist_del(&trusty_irq->node);
  363. }
  364. free_percpu(trusty_irq_handler_data);
  365. return ret;
  366. }
  367. static int trusty_smc_get_next_irq(struct trusty_irq_state *is,
  368. unsigned long min_irq, bool per_cpu)
  369. {
  370. return trusty_fast_call32(is->trusty_dev, SMC_FC_GET_NEXT_IRQ,
  371. min_irq, per_cpu, 0);
  372. }
  373. static int trusty_irq_init_one(struct trusty_irq_state *is,
  374. int irq, bool per_cpu)
  375. {
  376. int ret;
  377. irq = trusty_smc_get_next_irq(is, irq, per_cpu);
  378. if (irq < 0)
  379. return irq;
  380. if (per_cpu)
  381. ret = trusty_irq_init_per_cpu_irq(is, irq);
  382. else
  383. ret = trusty_irq_init_normal_irq(is, irq);
  384. if (ret) {
  385. dev_warn(is->dev,
  386. "failed to initialize irq %d, irq will be ignored\n",
  387. irq);
  388. }
  389. return irq + 1;
  390. }
  391. static void trusty_irq_free_irqs(struct trusty_irq_state *is)
  392. {
  393. struct trusty_irq *irq;
  394. struct hlist_node *n;
  395. unsigned int cpu;
  396. hlist_for_each_entry_safe(irq, n, &is->normal_irqs.inactive, node) {
  397. dev_dbg(is->dev, "%s: irq %d\n", __func__, irq->irq);
  398. free_irq(irq->irq, irq);
  399. hlist_del(&irq->node);
  400. kfree(irq);
  401. }
  402. hlist_for_each_entry_safe(irq, n,
  403. &this_cpu_ptr(is->percpu_irqs)->inactive,
  404. node) {
  405. struct trusty_irq __percpu *trusty_irq_handler_data;
  406. dev_dbg(is->dev, "%s: percpu irq %d\n", __func__, irq->irq);
  407. trusty_irq_handler_data = irq->percpu_ptr;
  408. free_percpu_irq(irq->irq, trusty_irq_handler_data);
  409. for_each_possible_cpu(cpu) {
  410. struct trusty_irq *irq_tmp;
  411. irq_tmp = per_cpu_ptr(trusty_irq_handler_data, cpu);
  412. hlist_del(&irq_tmp->node);
  413. }
  414. free_percpu(trusty_irq_handler_data);
  415. }
  416. }
  417. #ifdef CONFIG_TRUSTY_INTERRUPT_MAP
  418. static void init_irq_node(struct device_node *node)
  419. {
  420. struct device_node *spi;
  421. struct device_node *ppi;
  422. if (!node)
  423. return;
  424. spi = of_irq_find_parent(node);
  425. if (!spi)
  426. return;
  427. ppi = of_parse_phandle(node, "ppi-interrupt-parent", 0);
  428. if (!ppi)
  429. ppi = of_irq_find_parent(spi);
  430. if (!ppi)
  431. return;
  432. spi_node = spi;
  433. ppi_node = ppi;
  434. }
  435. #endif
  436. static int trusty_irq_probe(struct platform_device *pdev)
  437. {
  438. int ret;
  439. int irq;
  440. unsigned int cpu;
  441. unsigned long irq_flags;
  442. struct trusty_irq_state *is;
  443. work_func_t work_func;
  444. dev_dbg(&pdev->dev, "%s\n", __func__);
  445. #ifdef CONFIG_TRUSTY_INTERRUPT_MAP
  446. init_irq_node(pdev->dev.of_node);
  447. #endif
  448. is = kzalloc(sizeof(*is), GFP_KERNEL);
  449. if (!is) {
  450. ret = -ENOMEM;
  451. goto err_alloc_is;
  452. }
  453. is->dev = &pdev->dev;
  454. is->trusty_dev = is->dev->parent;
  455. is->irq_work = alloc_percpu(struct trusty_irq_work);
  456. if (!is->irq_work) {
  457. ret = -ENOMEM;
  458. goto err_alloc_irq_work;
  459. }
  460. spin_lock_init(&is->normal_irqs_lock);
  461. is->percpu_irqs = alloc_percpu(struct trusty_irq_irqset);
  462. if (!is->percpu_irqs) {
  463. ret = -ENOMEM;
  464. goto err_alloc_pending_percpu_irqs;
  465. }
  466. platform_set_drvdata(pdev, is);
  467. is->trusty_call_notifier.notifier_call = trusty_irq_call_notify;
  468. ret = trusty_call_notifier_register(is->trusty_dev,
  469. &is->trusty_call_notifier);
  470. if (ret) {
  471. dev_err(&pdev->dev,
  472. "failed to register trusty call notifier\n");
  473. goto err_trusty_call_notifier_register;
  474. }
  475. if (trusty_get_api_version(is->trusty_dev) < TRUSTY_API_VERSION_SMP)
  476. work_func = trusty_irq_work_func_locked_nop;
  477. else
  478. work_func = trusty_irq_work_func;
  479. for_each_possible_cpu(cpu) {
  480. struct trusty_irq_work *trusty_irq_work;
  481. trusty_irq_work = per_cpu_ptr(is->irq_work, cpu);
  482. trusty_irq_work->is = is;
  483. INIT_WORK(&trusty_irq_work->work, work_func);
  484. }
  485. for (irq = 0; irq >= 0;)
  486. irq = trusty_irq_init_one(is, irq, true);
  487. for (irq = 0; irq >= 0;)
  488. irq = trusty_irq_init_one(is, irq, false);
  489. is->cpu_notifier.notifier_call = trusty_irq_cpu_notify;
  490. ret = register_hotcpu_notifier(&is->cpu_notifier);
  491. if (ret) {
  492. dev_err(&pdev->dev, "register_cpu_notifier failed %d\n", ret);
  493. goto err_register_hotcpu_notifier;
  494. }
  495. ret = on_each_cpu(trusty_irq_cpu_up, is, 0);
  496. if (ret) {
  497. dev_err(&pdev->dev, "register_cpu_notifier failed %d\n", ret);
  498. goto err_on_each_cpu;
  499. }
  500. return 0;
  501. err_on_each_cpu:
  502. unregister_hotcpu_notifier(&is->cpu_notifier);
  503. on_each_cpu(trusty_irq_cpu_down, is, 1);
  504. err_register_hotcpu_notifier:
  505. spin_lock_irqsave(&is->normal_irqs_lock, irq_flags);
  506. trusty_irq_disable_irqset(is, &is->normal_irqs);
  507. spin_unlock_irqrestore(&is->normal_irqs_lock, irq_flags);
  508. trusty_irq_free_irqs(is);
  509. trusty_call_notifier_unregister(is->trusty_dev,
  510. &is->trusty_call_notifier);
  511. err_trusty_call_notifier_register:
  512. free_percpu(is->percpu_irqs);
  513. err_alloc_pending_percpu_irqs:
  514. for_each_possible_cpu(cpu) {
  515. struct trusty_irq_work *trusty_irq_work;
  516. trusty_irq_work = per_cpu_ptr(is->irq_work, cpu);
  517. flush_work(&trusty_irq_work->work);
  518. }
  519. free_percpu(is->irq_work);
  520. err_alloc_irq_work:
  521. kfree(is);
  522. err_alloc_is:
  523. return ret;
  524. }
  525. static int trusty_irq_remove(struct platform_device *pdev)
  526. {
  527. int ret;
  528. unsigned int cpu;
  529. unsigned long irq_flags;
  530. struct trusty_irq_state *is = platform_get_drvdata(pdev);
  531. dev_dbg(&pdev->dev, "%s\n", __func__);
  532. unregister_hotcpu_notifier(&is->cpu_notifier);
  533. ret = on_each_cpu(trusty_irq_cpu_down, is, 1);
  534. if (ret)
  535. dev_err(&pdev->dev, "on_each_cpu failed %d\n", ret);
  536. spin_lock_irqsave(&is->normal_irqs_lock, irq_flags);
  537. trusty_irq_disable_irqset(is, &is->normal_irqs);
  538. spin_unlock_irqrestore(&is->normal_irqs_lock, irq_flags);
  539. trusty_irq_free_irqs(is);
  540. trusty_call_notifier_unregister(is->trusty_dev,
  541. &is->trusty_call_notifier);
  542. free_percpu(is->percpu_irqs);
  543. for_each_possible_cpu(cpu) {
  544. struct trusty_irq_work *trusty_irq_work;
  545. trusty_irq_work = per_cpu_ptr(is->irq_work, cpu);
  546. flush_work(&trusty_irq_work->work);
  547. }
  548. free_percpu(is->irq_work);
  549. kfree(is);
  550. return 0;
  551. }
  552. static const struct of_device_id trusty_test_of_match[] = {
  553. { .compatible = "android,trusty-irq-v1", },
  554. {},
  555. };
  556. static struct platform_driver trusty_irq_driver = {
  557. .probe = trusty_irq_probe,
  558. .remove = trusty_irq_remove,
  559. .driver = {
  560. .name = "trusty-irq",
  561. .owner = THIS_MODULE,
  562. .of_match_table = trusty_test_of_match,
  563. },
  564. };
  565. module_platform_driver(trusty_irq_driver);