avc.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208
  1. /*
  2. * Implementation of the kernel access vector cache (AVC).
  3. *
  4. * Authors: Stephen Smalley, <sds@epoch.ncsc.mil>
  5. * James Morris <jmorris@redhat.com>
  6. *
  7. * Update: KaiGai, Kohei <kaigai@ak.jp.nec.com>
  8. * Replaced the avc_lock spinlock by RCU.
  9. *
  10. * Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com>
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License version 2,
  14. * as published by the Free Software Foundation.
  15. */
  16. #include <linux/types.h>
  17. #include <linux/stddef.h>
  18. #include <linux/kernel.h>
  19. #include <linux/slab.h>
  20. #include <linux/fs.h>
  21. #include <linux/dcache.h>
  22. #include <linux/init.h>
  23. #include <linux/skbuff.h>
  24. #include <linux/percpu.h>
  25. #include <linux/list.h>
  26. #include <net/sock.h>
  27. #include <linux/un.h>
  28. #include <net/af_unix.h>
  29. #include <linux/ip.h>
  30. #include <linux/audit.h>
  31. #include <linux/ipv6.h>
  32. #include <net/ipv6.h>
  33. #include "avc.h"
  34. #include "avc_ss.h"
  35. #include "classmap.h"
  36. #define AVC_CACHE_SLOTS 512
  37. #define AVC_DEF_CACHE_THRESHOLD 512
  38. #define AVC_CACHE_RECLAIM 16
  39. #ifdef CONFIG_SECURITY_SELINUX_AVC_STATS
  40. #define avc_cache_stats_incr(field) this_cpu_inc(avc_cache_stats.field)
  41. #else
  42. #define avc_cache_stats_incr(field) do {} while (0)
  43. #endif
  44. struct avc_entry {
  45. u32 ssid;
  46. u32 tsid;
  47. u16 tclass;
  48. struct av_decision avd;
  49. struct avc_operation_node *ops_node;
  50. };
  51. struct avc_node {
  52. struct avc_entry ae;
  53. struct hlist_node list; /* anchored in avc_cache->slots[i] */
  54. struct rcu_head rhead;
  55. };
  56. struct avc_cache {
  57. struct hlist_head slots[AVC_CACHE_SLOTS]; /* head for avc_node->list */
  58. spinlock_t slots_lock[AVC_CACHE_SLOTS]; /* lock for writes */
  59. atomic_t lru_hint; /* LRU hint for reclaim scan */
  60. atomic_t active_nodes;
  61. u32 latest_notif; /* latest revocation notification */
  62. };
  63. struct avc_operation_decision_node {
  64. struct operation_decision od;
  65. struct list_head od_list;
  66. };
  67. struct avc_operation_node {
  68. struct operation ops;
  69. struct list_head od_head; /* list of operation_decision_node */
  70. };
  71. struct avc_callback_node {
  72. int (*callback) (u32 event);
  73. u32 events;
  74. struct avc_callback_node *next;
  75. };
  76. /* Exported via selinufs */
  77. unsigned int avc_cache_threshold = AVC_DEF_CACHE_THRESHOLD;
  78. #ifdef CONFIG_SECURITY_SELINUX_AVC_STATS
  79. DEFINE_PER_CPU(struct avc_cache_stats, avc_cache_stats) = { 0 };
  80. #endif
  81. static struct avc_cache avc_cache;
  82. static struct avc_callback_node *avc_callbacks;
  83. static struct kmem_cache *avc_node_cachep;
  84. static struct kmem_cache *avc_operation_decision_node_cachep;
  85. static struct kmem_cache *avc_operation_node_cachep;
  86. static struct kmem_cache *avc_operation_perm_cachep;
  87. static inline int avc_hash(u32 ssid, u32 tsid, u16 tclass)
  88. {
  89. return (ssid ^ (tsid<<2) ^ (tclass<<4)) & (AVC_CACHE_SLOTS - 1);
  90. }
  91. /**
  92. * avc_dump_av - Display an access vector in human-readable form.
  93. * @tclass: target security class
  94. * @av: access vector
  95. */
  96. static void avc_dump_av(struct audit_buffer *ab, u16 tclass, u32 av)
  97. {
  98. const char **perms;
  99. int i, perm;
  100. if (av == 0) {
  101. audit_log_format(ab, " null");
  102. return;
  103. }
  104. perms = secclass_map[tclass-1].perms;
  105. audit_log_format(ab, " {");
  106. i = 0;
  107. perm = 1;
  108. while (i < (sizeof(av) * 8)) {
  109. if ((perm & av) && perms[i]) {
  110. audit_log_format(ab, " %s", perms[i]);
  111. av &= ~perm;
  112. }
  113. i++;
  114. perm <<= 1;
  115. }
  116. if (av)
  117. audit_log_format(ab, " 0x%x", av);
  118. audit_log_format(ab, " }");
  119. }
  120. /**
  121. * avc_dump_query - Display a SID pair and a class in human-readable form.
  122. * @ssid: source security identifier
  123. * @tsid: target security identifier
  124. * @tclass: target security class
  125. */
  126. static void avc_dump_query(struct audit_buffer *ab, u32 ssid, u32 tsid, u16 tclass)
  127. {
  128. int rc;
  129. char *scontext;
  130. u32 scontext_len;
  131. rc = security_sid_to_context(ssid, &scontext, &scontext_len);
  132. if (rc)
  133. audit_log_format(ab, "ssid=%d", ssid);
  134. else {
  135. audit_log_format(ab, "scontext=%s", scontext);
  136. kfree(scontext);
  137. }
  138. rc = security_sid_to_context(tsid, &scontext, &scontext_len);
  139. if (rc)
  140. audit_log_format(ab, " tsid=%d", tsid);
  141. else {
  142. audit_log_format(ab, " tcontext=%s", scontext);
  143. kfree(scontext);
  144. }
  145. BUG_ON(tclass >= ARRAY_SIZE(secclass_map));
  146. audit_log_format(ab, " tclass=%s", secclass_map[tclass-1].name);
  147. }
  148. /**
  149. * avc_init - Initialize the AVC.
  150. *
  151. * Initialize the access vector cache.
  152. */
  153. void __init avc_init(void)
  154. {
  155. int i;
  156. for (i = 0; i < AVC_CACHE_SLOTS; i++) {
  157. INIT_HLIST_HEAD(&avc_cache.slots[i]);
  158. spin_lock_init(&avc_cache.slots_lock[i]);
  159. }
  160. atomic_set(&avc_cache.active_nodes, 0);
  161. atomic_set(&avc_cache.lru_hint, 0);
  162. avc_node_cachep = kmem_cache_create("avc_node", sizeof(struct avc_node),
  163. 0, SLAB_PANIC, NULL);
  164. avc_operation_node_cachep = kmem_cache_create("avc_operation_node",
  165. sizeof(struct avc_operation_node),
  166. 0, SLAB_PANIC, NULL);
  167. avc_operation_decision_node_cachep = kmem_cache_create(
  168. "avc_operation_decision_node",
  169. sizeof(struct avc_operation_decision_node),
  170. 0, SLAB_PANIC, NULL);
  171. avc_operation_perm_cachep = kmem_cache_create("avc_operation_perm",
  172. sizeof(struct operation_perm),
  173. 0, SLAB_PANIC, NULL);
  174. audit_log(current->audit_context, GFP_KERNEL, AUDIT_KERNEL, "AVC INITIALIZED\n");
  175. }
  176. int avc_get_hash_stats(char *page)
  177. {
  178. int i, chain_len, max_chain_len, slots_used;
  179. struct avc_node *node;
  180. struct hlist_head *head;
  181. rcu_read_lock();
  182. slots_used = 0;
  183. max_chain_len = 0;
  184. for (i = 0; i < AVC_CACHE_SLOTS; i++) {
  185. head = &avc_cache.slots[i];
  186. if (!hlist_empty(head)) {
  187. slots_used++;
  188. chain_len = 0;
  189. hlist_for_each_entry_rcu(node, head, list)
  190. chain_len++;
  191. if (chain_len > max_chain_len)
  192. max_chain_len = chain_len;
  193. }
  194. }
  195. rcu_read_unlock();
  196. return scnprintf(page, PAGE_SIZE, "entries: %d\nbuckets used: %d/%d\n"
  197. "longest chain: %d\n",
  198. atomic_read(&avc_cache.active_nodes),
  199. slots_used, AVC_CACHE_SLOTS, max_chain_len);
  200. }
  201. /*
  202. * using a linked list for operation_decision lookup because the list is
  203. * always small. i.e. less than 5, typically 1
  204. */
  205. static struct operation_decision *avc_operation_lookup(u8 type,
  206. struct avc_operation_node *ops_node)
  207. {
  208. struct avc_operation_decision_node *od_node;
  209. struct operation_decision *od = NULL;
  210. list_for_each_entry(od_node, &ops_node->od_head, od_list) {
  211. if (od_node->od.type != type)
  212. continue;
  213. od = &od_node->od;
  214. break;
  215. }
  216. return od;
  217. }
  218. static inline unsigned int avc_operation_has_perm(struct operation_decision *od,
  219. u16 cmd, u8 specified)
  220. {
  221. unsigned int rc = 0;
  222. u8 num = cmd & 0xff;
  223. if ((specified == OPERATION_ALLOWED) &&
  224. (od->specified & OPERATION_ALLOWED))
  225. rc = security_operation_test(od->allowed->perms, num);
  226. else if ((specified == OPERATION_AUDITALLOW) &&
  227. (od->specified & OPERATION_AUDITALLOW))
  228. rc = security_operation_test(od->auditallow->perms, num);
  229. else if ((specified == OPERATION_DONTAUDIT) &&
  230. (od->specified & OPERATION_DONTAUDIT))
  231. rc = security_operation_test(od->dontaudit->perms, num);
  232. return rc;
  233. }
  234. static void avc_operation_allow_perm(struct avc_operation_node *node, u16 cmd)
  235. {
  236. struct operation_decision *od;
  237. u8 type;
  238. u8 num;
  239. type = cmd >> 8;
  240. num = cmd & 0xff;
  241. security_operation_set(node->ops.type, type);
  242. od = avc_operation_lookup(type, node);
  243. if (od && od->allowed)
  244. security_operation_set(od->allowed->perms, num);
  245. }
  246. static void avc_operation_decision_free(
  247. struct avc_operation_decision_node *od_node)
  248. {
  249. struct operation_decision *od;
  250. od = &od_node->od;
  251. if (od->allowed)
  252. kmem_cache_free(avc_operation_perm_cachep, od->allowed);
  253. if (od->auditallow)
  254. kmem_cache_free(avc_operation_perm_cachep, od->auditallow);
  255. if (od->dontaudit)
  256. kmem_cache_free(avc_operation_perm_cachep, od->dontaudit);
  257. kmem_cache_free(avc_operation_decision_node_cachep, od_node);
  258. }
  259. static void avc_operation_free(struct avc_operation_node *ops_node)
  260. {
  261. struct avc_operation_decision_node *od_node, *tmp;
  262. if (!ops_node)
  263. return;
  264. list_for_each_entry_safe(od_node, tmp, &ops_node->od_head, od_list) {
  265. list_del(&od_node->od_list);
  266. avc_operation_decision_free(od_node);
  267. }
  268. kmem_cache_free(avc_operation_node_cachep, ops_node);
  269. }
  270. static void avc_copy_operation_decision(struct operation_decision *dest,
  271. struct operation_decision *src)
  272. {
  273. dest->type = src->type;
  274. dest->specified = src->specified;
  275. if (dest->specified & OPERATION_ALLOWED)
  276. memcpy(dest->allowed->perms, src->allowed->perms,
  277. sizeof(src->allowed->perms));
  278. if (dest->specified & OPERATION_AUDITALLOW)
  279. memcpy(dest->auditallow->perms, src->auditallow->perms,
  280. sizeof(src->auditallow->perms));
  281. if (dest->specified & OPERATION_DONTAUDIT)
  282. memcpy(dest->dontaudit->perms, src->dontaudit->perms,
  283. sizeof(src->dontaudit->perms));
  284. }
  285. /*
  286. * similar to avc_copy_operation_decision, but only copy decision
  287. * information relevant to this command
  288. */
  289. static inline void avc_quick_copy_operation_decision(u16 cmd,
  290. struct operation_decision *dest,
  291. struct operation_decision *src)
  292. {
  293. /*
  294. * compute index of the u32 of the 256 bits (8 u32s) that contain this
  295. * command permission
  296. */
  297. u8 i = (0xff & cmd) >> 5;
  298. dest->specified = src->specified;
  299. if (dest->specified & OPERATION_ALLOWED)
  300. dest->allowed->perms[i] = src->allowed->perms[i];
  301. if (dest->specified & OPERATION_AUDITALLOW)
  302. dest->auditallow->perms[i] = src->auditallow->perms[i];
  303. if (dest->specified & OPERATION_DONTAUDIT)
  304. dest->dontaudit->perms[i] = src->dontaudit->perms[i];
  305. }
  306. static struct avc_operation_decision_node
  307. *avc_operation_decision_alloc(u8 specified)
  308. {
  309. struct avc_operation_decision_node *node;
  310. struct operation_decision *od;
  311. node = kmem_cache_zalloc(avc_operation_decision_node_cachep,
  312. GFP_ATOMIC | __GFP_NOMEMALLOC);
  313. if (!node)
  314. return NULL;
  315. od = &node->od;
  316. if (specified & OPERATION_ALLOWED) {
  317. od->allowed = kmem_cache_zalloc(avc_operation_perm_cachep,
  318. GFP_ATOMIC | __GFP_NOMEMALLOC);
  319. if (!od->allowed)
  320. goto error;
  321. }
  322. if (specified & OPERATION_AUDITALLOW) {
  323. od->auditallow = kmem_cache_zalloc(avc_operation_perm_cachep,
  324. GFP_ATOMIC | __GFP_NOMEMALLOC);
  325. if (!od->auditallow)
  326. goto error;
  327. }
  328. if (specified & OPERATION_DONTAUDIT) {
  329. od->dontaudit = kmem_cache_zalloc(avc_operation_perm_cachep,
  330. GFP_ATOMIC | __GFP_NOMEMALLOC);
  331. if (!od->dontaudit)
  332. goto error;
  333. }
  334. return node;
  335. error:
  336. avc_operation_decision_free(node);
  337. return NULL;
  338. }
  339. static int avc_add_operation(struct avc_node *node,
  340. struct operation_decision *od)
  341. {
  342. struct avc_operation_decision_node *dest_od;
  343. node->ae.ops_node->ops.len++;
  344. dest_od = avc_operation_decision_alloc(od->specified);
  345. if (!dest_od)
  346. return -ENOMEM;
  347. avc_copy_operation_decision(&dest_od->od, od);
  348. list_add(&dest_od->od_list, &node->ae.ops_node->od_head);
  349. return 0;
  350. }
  351. static struct avc_operation_node *avc_operation_alloc(void)
  352. {
  353. struct avc_operation_node *ops;
  354. ops = kmem_cache_zalloc(avc_operation_node_cachep,
  355. GFP_ATOMIC|__GFP_NOMEMALLOC);
  356. if (!ops)
  357. return ops;
  358. INIT_LIST_HEAD(&ops->od_head);
  359. return ops;
  360. }
  361. static int avc_operation_populate(struct avc_node *node,
  362. struct avc_operation_node *src)
  363. {
  364. struct avc_operation_node *dest;
  365. struct avc_operation_decision_node *dest_od;
  366. struct avc_operation_decision_node *src_od;
  367. if (src->ops.len == 0)
  368. return 0;
  369. dest = avc_operation_alloc();
  370. if (!dest)
  371. return -ENOMEM;
  372. memcpy(dest->ops.type, &src->ops.type, sizeof(dest->ops.type));
  373. dest->ops.len = src->ops.len;
  374. /* for each source od allocate a destination od and copy */
  375. list_for_each_entry(src_od, &src->od_head, od_list) {
  376. dest_od = avc_operation_decision_alloc(src_od->od.specified);
  377. if (!dest_od)
  378. goto error;
  379. avc_copy_operation_decision(&dest_od->od, &src_od->od);
  380. list_add(&dest_od->od_list, &dest->od_head);
  381. }
  382. node->ae.ops_node = dest;
  383. return 0;
  384. error:
  385. avc_operation_free(dest);
  386. return -ENOMEM;
  387. }
  388. static inline u32 avc_operation_audit_required(u32 requested,
  389. struct av_decision *avd,
  390. struct operation_decision *od,
  391. u16 cmd,
  392. int result,
  393. u32 *deniedp)
  394. {
  395. u32 denied, audited;
  396. denied = requested & ~avd->allowed;
  397. if (unlikely(denied)) {
  398. audited = denied & avd->auditdeny;
  399. if (audited && od) {
  400. if (avc_operation_has_perm(od, cmd,
  401. OPERATION_DONTAUDIT))
  402. audited &= ~requested;
  403. }
  404. } else if (result) {
  405. audited = denied = requested;
  406. } else {
  407. audited = requested & avd->auditallow;
  408. if (audited && od) {
  409. if (!avc_operation_has_perm(od, cmd,
  410. OPERATION_AUDITALLOW))
  411. audited &= ~requested;
  412. }
  413. }
  414. *deniedp = denied;
  415. return audited;
  416. }
  417. static inline int avc_operation_audit(u32 ssid, u32 tsid, u16 tclass,
  418. u32 requested, struct av_decision *avd,
  419. struct operation_decision *od,
  420. u16 cmd, int result,
  421. struct common_audit_data *ad)
  422. {
  423. u32 audited, denied;
  424. audited = avc_operation_audit_required(
  425. requested, avd, od, cmd, result, &denied);
  426. if (likely(!audited))
  427. return 0;
  428. return slow_avc_audit(ssid, tsid, tclass, requested,
  429. audited, denied, result, ad, 0);
  430. }
  431. static void avc_node_free(struct rcu_head *rhead)
  432. {
  433. struct avc_node *node = container_of(rhead, struct avc_node, rhead);
  434. avc_operation_free(node->ae.ops_node);
  435. kmem_cache_free(avc_node_cachep, node);
  436. avc_cache_stats_incr(frees);
  437. }
  438. static void avc_node_delete(struct avc_node *node)
  439. {
  440. hlist_del_rcu(&node->list);
  441. call_rcu(&node->rhead, avc_node_free);
  442. atomic_dec(&avc_cache.active_nodes);
  443. }
  444. static void avc_node_kill(struct avc_node *node)
  445. {
  446. avc_operation_free(node->ae.ops_node);
  447. kmem_cache_free(avc_node_cachep, node);
  448. avc_cache_stats_incr(frees);
  449. atomic_dec(&avc_cache.active_nodes);
  450. }
  451. static void avc_node_replace(struct avc_node *new, struct avc_node *old)
  452. {
  453. hlist_replace_rcu(&old->list, &new->list);
  454. call_rcu(&old->rhead, avc_node_free);
  455. atomic_dec(&avc_cache.active_nodes);
  456. }
  457. static inline int avc_reclaim_node(void)
  458. {
  459. struct avc_node *node;
  460. int hvalue, try, ecx;
  461. unsigned long flags;
  462. struct hlist_head *head;
  463. spinlock_t *lock;
  464. for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++) {
  465. hvalue = atomic_inc_return(&avc_cache.lru_hint) & (AVC_CACHE_SLOTS - 1);
  466. head = &avc_cache.slots[hvalue];
  467. lock = &avc_cache.slots_lock[hvalue];
  468. if (!spin_trylock_irqsave(lock, flags))
  469. continue;
  470. rcu_read_lock();
  471. hlist_for_each_entry(node, head, list) {
  472. avc_node_delete(node);
  473. avc_cache_stats_incr(reclaims);
  474. ecx++;
  475. if (ecx >= AVC_CACHE_RECLAIM) {
  476. rcu_read_unlock();
  477. spin_unlock_irqrestore(lock, flags);
  478. goto out;
  479. }
  480. }
  481. rcu_read_unlock();
  482. spin_unlock_irqrestore(lock, flags);
  483. }
  484. out:
  485. return ecx;
  486. }
  487. static struct avc_node *avc_alloc_node(void)
  488. {
  489. struct avc_node *node;
  490. node = kmem_cache_zalloc(avc_node_cachep, GFP_ATOMIC|__GFP_NOMEMALLOC);
  491. if (!node)
  492. goto out;
  493. INIT_HLIST_NODE(&node->list);
  494. avc_cache_stats_incr(allocations);
  495. if (atomic_inc_return(&avc_cache.active_nodes) > avc_cache_threshold)
  496. avc_reclaim_node();
  497. out:
  498. return node;
  499. }
  500. static void avc_node_populate(struct avc_node *node, u32 ssid, u32 tsid, u16 tclass, struct av_decision *avd)
  501. {
  502. node->ae.ssid = ssid;
  503. node->ae.tsid = tsid;
  504. node->ae.tclass = tclass;
  505. memcpy(&node->ae.avd, avd, sizeof(node->ae.avd));
  506. }
  507. static inline struct avc_node *avc_search_node(u32 ssid, u32 tsid, u16 tclass)
  508. {
  509. struct avc_node *node, *ret = NULL;
  510. int hvalue;
  511. struct hlist_head *head;
  512. hvalue = avc_hash(ssid, tsid, tclass);
  513. head = &avc_cache.slots[hvalue];
  514. hlist_for_each_entry_rcu(node, head, list) {
  515. if (ssid == node->ae.ssid &&
  516. tclass == node->ae.tclass &&
  517. tsid == node->ae.tsid) {
  518. ret = node;
  519. break;
  520. }
  521. }
  522. return ret;
  523. }
  524. /**
  525. * avc_lookup - Look up an AVC entry.
  526. * @ssid: source security identifier
  527. * @tsid: target security identifier
  528. * @tclass: target security class
  529. *
  530. * Look up an AVC entry that is valid for the
  531. * (@ssid, @tsid), interpreting the permissions
  532. * based on @tclass. If a valid AVC entry exists,
  533. * then this function returns the avc_node.
  534. * Otherwise, this function returns NULL.
  535. */
  536. static struct avc_node *avc_lookup(u32 ssid, u32 tsid, u16 tclass)
  537. {
  538. struct avc_node *node;
  539. avc_cache_stats_incr(lookups);
  540. node = avc_search_node(ssid, tsid, tclass);
  541. if (node)
  542. return node;
  543. avc_cache_stats_incr(misses);
  544. return NULL;
  545. }
  546. static int avc_latest_notif_update(int seqno, int is_insert)
  547. {
  548. int ret = 0;
  549. static DEFINE_SPINLOCK(notif_lock);
  550. unsigned long flag;
  551. spin_lock_irqsave(&notif_lock, flag);
  552. if (is_insert) {
  553. if (seqno < avc_cache.latest_notif) {
  554. printk(KERN_WARNING "SELinux: avc: seqno %d < latest_notif %d\n",
  555. seqno, avc_cache.latest_notif);
  556. ret = -EAGAIN;
  557. }
  558. } else {
  559. if (seqno > avc_cache.latest_notif)
  560. avc_cache.latest_notif = seqno;
  561. }
  562. spin_unlock_irqrestore(&notif_lock, flag);
  563. return ret;
  564. }
  565. /**
  566. * avc_insert - Insert an AVC entry.
  567. * @ssid: source security identifier
  568. * @tsid: target security identifier
  569. * @tclass: target security class
  570. * @avd: resulting av decision
  571. * @ops: resulting operation decisions
  572. *
  573. * Insert an AVC entry for the SID pair
  574. * (@ssid, @tsid) and class @tclass.
  575. * The access vectors and the sequence number are
  576. * normally provided by the security server in
  577. * response to a security_compute_av() call. If the
  578. * sequence number @avd->seqno is not less than the latest
  579. * revocation notification, then the function copies
  580. * the access vectors into a cache entry, returns
  581. * avc_node inserted. Otherwise, this function returns NULL.
  582. */
  583. static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass,
  584. struct av_decision *avd,
  585. struct avc_operation_node *ops_node)
  586. {
  587. struct avc_node *pos, *node = NULL;
  588. int hvalue;
  589. unsigned long flag;
  590. if (avc_latest_notif_update(avd->seqno, 1))
  591. goto out;
  592. node = avc_alloc_node();
  593. if (node) {
  594. struct hlist_head *head;
  595. spinlock_t *lock;
  596. int rc = 0;
  597. hvalue = avc_hash(ssid, tsid, tclass);
  598. avc_node_populate(node, ssid, tsid, tclass, avd);
  599. rc = avc_operation_populate(node, ops_node);
  600. if (rc) {
  601. kmem_cache_free(avc_node_cachep, node);
  602. return NULL;
  603. }
  604. head = &avc_cache.slots[hvalue];
  605. lock = &avc_cache.slots_lock[hvalue];
  606. spin_lock_irqsave(lock, flag);
  607. hlist_for_each_entry(pos, head, list) {
  608. if (pos->ae.ssid == ssid &&
  609. pos->ae.tsid == tsid &&
  610. pos->ae.tclass == tclass) {
  611. avc_node_replace(node, pos);
  612. goto found;
  613. }
  614. }
  615. hlist_add_head_rcu(&node->list, head);
  616. found:
  617. spin_unlock_irqrestore(lock, flag);
  618. }
  619. out:
  620. return node;
  621. }
  622. /**
  623. * avc_audit_pre_callback - SELinux specific information
  624. * will be called by generic audit code
  625. * @ab: the audit buffer
  626. * @a: audit_data
  627. */
  628. static void avc_audit_pre_callback(struct audit_buffer *ab, void *a)
  629. {
  630. struct common_audit_data *ad = a;
  631. audit_log_format(ab, "avc: %s ",
  632. ad->selinux_audit_data->denied ? "denied" : "granted");
  633. avc_dump_av(ab, ad->selinux_audit_data->tclass,
  634. ad->selinux_audit_data->audited);
  635. audit_log_format(ab, " for ");
  636. }
  637. /**
  638. * avc_audit_post_callback - SELinux specific information
  639. * will be called by generic audit code
  640. * @ab: the audit buffer
  641. * @a: audit_data
  642. */
  643. static void avc_audit_post_callback(struct audit_buffer *ab, void *a)
  644. {
  645. struct common_audit_data *ad = a;
  646. audit_log_format(ab, " ");
  647. avc_dump_query(ab, ad->selinux_audit_data->ssid,
  648. ad->selinux_audit_data->tsid,
  649. ad->selinux_audit_data->tclass);
  650. if (ad->selinux_audit_data->denied) {
  651. audit_log_format(ab, " permissive=%u",
  652. ad->selinux_audit_data->result ? 0 : 1);
  653. #ifdef CONFIG_MTK_SELINUX_AEE_WARNING
  654. {
  655. struct nlmsghdr *nlh;
  656. char *selinux_data;
  657. if (ab) {
  658. nlh = nlmsg_hdr(audit_get_skb(ab));
  659. selinux_data = nlmsg_data(nlh);
  660. if (nlh->nlmsg_type != AUDIT_EOE) {
  661. if (nlh->nlmsg_type == 1400)
  662. mtk_audit_hook(selinux_data);
  663. }
  664. }
  665. }
  666. #endif
  667. }
  668. }
  669. /* This is the slow part of avc audit with big stack footprint */
  670. noinline int slow_avc_audit(u32 ssid, u32 tsid, u16 tclass,
  671. u32 requested, u32 audited, u32 denied, int result,
  672. struct common_audit_data *a,
  673. unsigned flags)
  674. {
  675. struct common_audit_data stack_data;
  676. struct selinux_audit_data sad;
  677. if (!a) {
  678. a = &stack_data;
  679. a->type = LSM_AUDIT_DATA_NONE;
  680. }
  681. /*
  682. * When in a RCU walk do the audit on the RCU retry. This is because
  683. * the collection of the dname in an inode audit message is not RCU
  684. * safe. Note this may drop some audits when the situation changes
  685. * during retry. However this is logically just as if the operation
  686. * happened a little later.
  687. */
  688. if ((a->type == LSM_AUDIT_DATA_INODE) &&
  689. (flags & MAY_NOT_BLOCK))
  690. return -ECHILD;
  691. sad.tclass = tclass;
  692. sad.requested = requested;
  693. sad.ssid = ssid;
  694. sad.tsid = tsid;
  695. sad.audited = audited;
  696. sad.denied = denied;
  697. sad.result = result;
  698. a->selinux_audit_data = &sad;
  699. common_lsm_audit(a, avc_audit_pre_callback, avc_audit_post_callback);
  700. return 0;
  701. }
  702. /**
  703. * avc_add_callback - Register a callback for security events.
  704. * @callback: callback function
  705. * @events: security events
  706. *
  707. * Register a callback function for events in the set @events.
  708. * Returns %0 on success or -%ENOMEM if insufficient memory
  709. * exists to add the callback.
  710. */
  711. int __init avc_add_callback(int (*callback)(u32 event), u32 events)
  712. {
  713. struct avc_callback_node *c;
  714. int rc = 0;
  715. c = kmalloc(sizeof(*c), GFP_KERNEL);
  716. if (!c) {
  717. rc = -ENOMEM;
  718. goto out;
  719. }
  720. c->callback = callback;
  721. c->events = events;
  722. c->next = avc_callbacks;
  723. avc_callbacks = c;
  724. out:
  725. return rc;
  726. }
  727. static inline int avc_sidcmp(u32 x, u32 y)
  728. {
  729. return (x == y || x == SECSID_WILD || y == SECSID_WILD);
  730. }
  731. /**
  732. * avc_update_node Update an AVC entry
  733. * @event : Updating event
  734. * @perms : Permission mask bits
  735. * @ssid,@tsid,@tclass : identifier of an AVC entry
  736. * @seqno : sequence number when decision was made
  737. * @od: operation_decision to be added to the node
  738. *
  739. * if a valid AVC entry doesn't exist,this function returns -ENOENT.
  740. * if kmalloc() called internal returns NULL, this function returns -ENOMEM.
  741. * otherwise, this function updates the AVC entry. The original AVC-entry object
  742. * will release later by RCU.
  743. */
  744. static int avc_update_node(u32 event, u32 perms, u16 cmd, u32 ssid, u32 tsid,
  745. u16 tclass, u32 seqno,
  746. struct operation_decision *od,
  747. u32 flags)
  748. {
  749. int hvalue, rc = 0;
  750. unsigned long flag;
  751. struct avc_node *pos, *node, *orig = NULL;
  752. struct hlist_head *head;
  753. spinlock_t *lock;
  754. node = avc_alloc_node();
  755. if (!node) {
  756. rc = -ENOMEM;
  757. goto out;
  758. }
  759. /* Lock the target slot */
  760. hvalue = avc_hash(ssid, tsid, tclass);
  761. head = &avc_cache.slots[hvalue];
  762. lock = &avc_cache.slots_lock[hvalue];
  763. spin_lock_irqsave(lock, flag);
  764. hlist_for_each_entry(pos, head, list) {
  765. if (ssid == pos->ae.ssid &&
  766. tsid == pos->ae.tsid &&
  767. tclass == pos->ae.tclass &&
  768. seqno == pos->ae.avd.seqno){
  769. orig = pos;
  770. break;
  771. }
  772. }
  773. if (!orig) {
  774. rc = -ENOENT;
  775. avc_node_kill(node);
  776. goto out_unlock;
  777. }
  778. /*
  779. * Copy and replace original node.
  780. */
  781. avc_node_populate(node, ssid, tsid, tclass, &orig->ae.avd);
  782. if (orig->ae.ops_node) {
  783. rc = avc_operation_populate(node, orig->ae.ops_node);
  784. if (rc) {
  785. kmem_cache_free(avc_node_cachep, node);
  786. goto out_unlock;
  787. }
  788. }
  789. switch (event) {
  790. case AVC_CALLBACK_GRANT:
  791. node->ae.avd.allowed |= perms;
  792. if (node->ae.ops_node && (flags & AVC_OPERATION_CMD))
  793. avc_operation_allow_perm(node->ae.ops_node, cmd);
  794. break;
  795. case AVC_CALLBACK_TRY_REVOKE:
  796. case AVC_CALLBACK_REVOKE:
  797. node->ae.avd.allowed &= ~perms;
  798. break;
  799. case AVC_CALLBACK_AUDITALLOW_ENABLE:
  800. node->ae.avd.auditallow |= perms;
  801. break;
  802. case AVC_CALLBACK_AUDITALLOW_DISABLE:
  803. node->ae.avd.auditallow &= ~perms;
  804. break;
  805. case AVC_CALLBACK_AUDITDENY_ENABLE:
  806. node->ae.avd.auditdeny |= perms;
  807. break;
  808. case AVC_CALLBACK_AUDITDENY_DISABLE:
  809. node->ae.avd.auditdeny &= ~perms;
  810. break;
  811. case AVC_CALLBACK_ADD_OPERATION:
  812. avc_add_operation(node, od);
  813. break;
  814. }
  815. avc_node_replace(node, orig);
  816. out_unlock:
  817. spin_unlock_irqrestore(lock, flag);
  818. out:
  819. return rc;
  820. }
  821. /**
  822. * avc_flush - Flush the cache
  823. */
  824. static void avc_flush(void)
  825. {
  826. struct hlist_head *head;
  827. struct avc_node *node;
  828. spinlock_t *lock;
  829. unsigned long flag;
  830. int i;
  831. for (i = 0; i < AVC_CACHE_SLOTS; i++) {
  832. head = &avc_cache.slots[i];
  833. lock = &avc_cache.slots_lock[i];
  834. spin_lock_irqsave(lock, flag);
  835. /*
  836. * With preemptable RCU, the outer spinlock does not
  837. * prevent RCU grace periods from ending.
  838. */
  839. rcu_read_lock();
  840. hlist_for_each_entry(node, head, list)
  841. avc_node_delete(node);
  842. rcu_read_unlock();
  843. spin_unlock_irqrestore(lock, flag);
  844. }
  845. }
  846. /**
  847. * avc_ss_reset - Flush the cache and revalidate migrated permissions.
  848. * @seqno: policy sequence number
  849. */
  850. int avc_ss_reset(u32 seqno)
  851. {
  852. struct avc_callback_node *c;
  853. int rc = 0, tmprc;
  854. avc_flush();
  855. for (c = avc_callbacks; c; c = c->next) {
  856. if (c->events & AVC_CALLBACK_RESET) {
  857. tmprc = c->callback(AVC_CALLBACK_RESET);
  858. /* save the first error encountered for the return
  859. value and continue processing the callbacks */
  860. if (!rc)
  861. rc = tmprc;
  862. }
  863. }
  864. avc_latest_notif_update(seqno, 0);
  865. return rc;
  866. }
  867. /*
  868. * Slow-path helper function for avc_has_perm_noaudit,
  869. * when the avc_node lookup fails. We get called with
  870. * the RCU read lock held, and need to return with it
  871. * still held, but drop if for the security compute.
  872. *
  873. * Don't inline this, since it's the slow-path and just
  874. * results in a bigger stack frame.
  875. */
  876. static noinline struct avc_node *avc_compute_av(u32 ssid, u32 tsid,
  877. u16 tclass, struct av_decision *avd,
  878. struct avc_operation_node *ops_node)
  879. {
  880. rcu_read_unlock();
  881. INIT_LIST_HEAD(&ops_node->od_head);
  882. security_compute_av(ssid, tsid, tclass, avd, &ops_node->ops);
  883. rcu_read_lock();
  884. return avc_insert(ssid, tsid, tclass, avd, ops_node);
  885. }
  886. static noinline int avc_denied(u32 ssid, u32 tsid,
  887. u16 tclass, u32 requested,
  888. u16 cmd, unsigned flags,
  889. struct av_decision *avd)
  890. {
  891. if (flags & AVC_STRICT)
  892. return -EACCES;
  893. if (selinux_enforcing && !(avd->flags & AVD_FLAGS_PERMISSIVE))
  894. return -EACCES;
  895. avc_update_node(AVC_CALLBACK_GRANT, requested, cmd, ssid,
  896. tsid, tclass, avd->seqno, NULL, flags);
  897. return 0;
  898. }
  899. /*
  900. * ioctl commands are comprised of four fields, direction, size, type, and
  901. * number. The avc operation logic filters based on two of them:
  902. *
  903. * type: or code, typically unique to each driver
  904. * number: or function
  905. *
  906. * For example, 0x89 is a socket type, and number 0x27 is the get hardware
  907. * address function.
  908. */
  909. int avc_has_operation(u32 ssid, u32 tsid, u16 tclass, u32 requested,
  910. u16 cmd, struct common_audit_data *ad)
  911. {
  912. struct avc_node *node;
  913. struct av_decision avd;
  914. u32 denied;
  915. struct operation_decision *od = NULL;
  916. struct operation_decision od_local;
  917. struct operation_perm allowed;
  918. struct operation_perm auditallow;
  919. struct operation_perm dontaudit;
  920. struct avc_operation_node local_ops_node;
  921. struct avc_operation_node *ops_node;
  922. u8 type = cmd >> 8;
  923. int rc = 0, rc2;
  924. ops_node = &local_ops_node;
  925. BUG_ON(!requested);
  926. rcu_read_lock();
  927. node = avc_lookup(ssid, tsid, tclass);
  928. if (unlikely(!node)) {
  929. node = avc_compute_av(ssid, tsid, tclass, &avd, ops_node);
  930. } else {
  931. memcpy(&avd, &node->ae.avd, sizeof(avd));
  932. ops_node = node->ae.ops_node;
  933. }
  934. /* if operations are not defined, only consider av_decision */
  935. if (!ops_node || !ops_node->ops.len)
  936. goto decision;
  937. od_local.allowed = &allowed;
  938. od_local.auditallow = &auditallow;
  939. od_local.dontaudit = &dontaudit;
  940. /* lookup operation decision */
  941. od = avc_operation_lookup(type, ops_node);
  942. if (unlikely(!od)) {
  943. /* Compute operation decision if type is flagged */
  944. if (!security_operation_test(ops_node->ops.type, type)) {
  945. avd.allowed &= ~requested;
  946. goto decision;
  947. }
  948. rcu_read_unlock();
  949. security_compute_operation(ssid, tsid, tclass, type, &od_local);
  950. rcu_read_lock();
  951. avc_update_node(AVC_CALLBACK_ADD_OPERATION, requested, cmd,
  952. ssid, tsid, tclass, avd.seqno, &od_local, 0);
  953. } else {
  954. avc_quick_copy_operation_decision(cmd, &od_local, od);
  955. }
  956. od = &od_local;
  957. if (!avc_operation_has_perm(od, cmd, OPERATION_ALLOWED))
  958. avd.allowed &= ~requested;
  959. decision:
  960. denied = requested & ~(avd.allowed);
  961. if (unlikely(denied))
  962. rc = avc_denied(ssid, tsid, tclass, requested, cmd,
  963. AVC_OPERATION_CMD, &avd);
  964. rcu_read_unlock();
  965. rc2 = avc_operation_audit(ssid, tsid, tclass, requested,
  966. &avd, od, cmd, rc, ad);
  967. if (rc2)
  968. return rc2;
  969. return rc;
  970. }
  971. /**
  972. * avc_has_perm_noaudit - Check permissions but perform no auditing.
  973. * @ssid: source security identifier
  974. * @tsid: target security identifier
  975. * @tclass: target security class
  976. * @requested: requested permissions, interpreted based on @tclass
  977. * @flags: AVC_STRICT or 0
  978. * @avd: access vector decisions
  979. *
  980. * Check the AVC to determine whether the @requested permissions are granted
  981. * for the SID pair (@ssid, @tsid), interpreting the permissions
  982. * based on @tclass, and call the security server on a cache miss to obtain
  983. * a new decision and add it to the cache. Return a copy of the decisions
  984. * in @avd. Return %0 if all @requested permissions are granted,
  985. * -%EACCES if any permissions are denied, or another -errno upon
  986. * other errors. This function is typically called by avc_has_perm(),
  987. * but may also be called directly to separate permission checking from
  988. * auditing, e.g. in cases where a lock must be held for the check but
  989. * should be released for the auditing.
  990. */
  991. inline int avc_has_perm_noaudit(u32 ssid, u32 tsid,
  992. u16 tclass, u32 requested,
  993. unsigned flags,
  994. struct av_decision *avd)
  995. {
  996. struct avc_node *node;
  997. struct avc_operation_node ops_node;
  998. int rc = 0;
  999. u32 denied;
  1000. BUG_ON(!requested);
  1001. rcu_read_lock();
  1002. node = avc_lookup(ssid, tsid, tclass);
  1003. if (unlikely(!node))
  1004. node = avc_compute_av(ssid, tsid, tclass, avd, &ops_node);
  1005. else
  1006. memcpy(avd, &node->ae.avd, sizeof(*avd));
  1007. denied = requested & ~(avd->allowed);
  1008. if (unlikely(denied))
  1009. rc = avc_denied(ssid, tsid, tclass, requested, 0, flags, avd);
  1010. rcu_read_unlock();
  1011. return rc;
  1012. }
  1013. /**
  1014. * avc_has_perm - Check permissions and perform any appropriate auditing.
  1015. * @ssid: source security identifier
  1016. * @tsid: target security identifier
  1017. * @tclass: target security class
  1018. * @requested: requested permissions, interpreted based on @tclass
  1019. * @auditdata: auxiliary audit data
  1020. *
  1021. * Check the AVC to determine whether the @requested permissions are granted
  1022. * for the SID pair (@ssid, @tsid), interpreting the permissions
  1023. * based on @tclass, and call the security server on a cache miss to obtain
  1024. * a new decision and add it to the cache. Audit the granting or denial of
  1025. * permissions in accordance with the policy. Return %0 if all @requested
  1026. * permissions are granted, -%EACCES if any permissions are denied, or
  1027. * another -errno upon other errors.
  1028. */
  1029. int avc_has_perm(u32 ssid, u32 tsid, u16 tclass,
  1030. u32 requested, struct common_audit_data *auditdata)
  1031. {
  1032. struct av_decision avd;
  1033. int rc, rc2;
  1034. rc = avc_has_perm_noaudit(ssid, tsid, tclass, requested, 0, &avd);
  1035. rc2 = avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata);
  1036. if (rc2)
  1037. return rc2;
  1038. return rc;
  1039. }
  1040. u32 avc_policy_seqno(void)
  1041. {
  1042. return avc_cache.latest_notif;
  1043. }
  1044. void avc_disable(void)
  1045. {
  1046. /*
  1047. * If you are looking at this because you have realized that we are
  1048. * not destroying the avc_node_cachep it might be easy to fix, but
  1049. * I don't know the memory barrier semantics well enough to know. It's
  1050. * possible that some other task dereferenced security_ops when
  1051. * it still pointed to selinux operations. If that is the case it's
  1052. * possible that it is about to use the avc and is about to need the
  1053. * avc_node_cachep. I know I could wrap the security.c security_ops call
  1054. * in an rcu_lock, but seriously, it's not worth it. Instead I just flush
  1055. * the cache and get that memory back.
  1056. */
  1057. if (avc_node_cachep) {
  1058. avc_flush();
  1059. /* kmem_cache_destroy(avc_node_cachep); */
  1060. }
  1061. }