user_namespace.c 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013
  1. /*
  2. * This program is free software; you can redistribute it and/or
  3. * modify it under the terms of the GNU General Public License as
  4. * published by the Free Software Foundation, version 2 of the
  5. * License.
  6. */
  7. #include <linux/export.h>
  8. #include <linux/nsproxy.h>
  9. #include <linux/slab.h>
  10. #include <linux/user_namespace.h>
  11. #include <linux/proc_ns.h>
  12. #include <linux/highuid.h>
  13. #include <linux/cred.h>
  14. #include <linux/securebits.h>
  15. #include <linux/keyctl.h>
  16. #include <linux/key-type.h>
  17. #include <keys/user-type.h>
  18. #include <linux/seq_file.h>
  19. #include <linux/fs.h>
  20. #include <linux/uaccess.h>
  21. #include <linux/ctype.h>
  22. #include <linux/projid.h>
  23. #include <linux/fs_struct.h>
  24. static struct kmem_cache *user_ns_cachep __read_mostly;
  25. static DEFINE_MUTEX(userns_state_mutex);
  26. static bool new_idmap_permitted(const struct file *file,
  27. struct user_namespace *ns, int cap_setid,
  28. struct uid_gid_map *map);
  29. static void set_cred_user_ns(struct cred *cred, struct user_namespace *user_ns)
  30. {
  31. /* Start with the same capabilities as init but useless for doing
  32. * anything as the capabilities are bound to the new user namespace.
  33. */
  34. cred->securebits = SECUREBITS_DEFAULT;
  35. cred->cap_inheritable = CAP_EMPTY_SET;
  36. cred->cap_permitted = CAP_FULL_SET;
  37. cred->cap_effective = CAP_FULL_SET;
  38. cred->cap_bset = CAP_FULL_SET;
  39. #ifdef CONFIG_KEYS
  40. key_put(cred->request_key_auth);
  41. cred->request_key_auth = NULL;
  42. #endif
  43. /* tgcred will be cleared in our caller bc CLONE_THREAD won't be set */
  44. cred->user_ns = user_ns;
  45. }
  46. /*
  47. * Create a new user namespace, deriving the creator from the user in the
  48. * passed credentials, and replacing that user with the new root user for the
  49. * new namespace.
  50. *
  51. * This is called by copy_creds(), which will finish setting the target task's
  52. * credentials.
  53. */
  54. int create_user_ns(struct cred *new)
  55. {
  56. struct user_namespace *ns, *parent_ns = new->user_ns;
  57. kuid_t owner = new->euid;
  58. kgid_t group = new->egid;
  59. int ret;
  60. if (parent_ns->level > 32)
  61. return -EUSERS;
  62. /*
  63. * Verify that we can not violate the policy of which files
  64. * may be accessed that is specified by the root directory,
  65. * by verifing that the root directory is at the root of the
  66. * mount namespace which allows all files to be accessed.
  67. */
  68. if (current_chrooted())
  69. return -EPERM;
  70. /* The creator needs a mapping in the parent user namespace
  71. * or else we won't be able to reasonably tell userspace who
  72. * created a user_namespace.
  73. */
  74. if (!kuid_has_mapping(parent_ns, owner) ||
  75. !kgid_has_mapping(parent_ns, group))
  76. return -EPERM;
  77. ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
  78. if (!ns)
  79. return -ENOMEM;
  80. ret = proc_alloc_inum(&ns->proc_inum);
  81. if (ret) {
  82. kmem_cache_free(user_ns_cachep, ns);
  83. return ret;
  84. }
  85. atomic_set(&ns->count, 1);
  86. /* Leave the new->user_ns reference with the new user namespace. */
  87. ns->parent = parent_ns;
  88. ns->level = parent_ns->level + 1;
  89. ns->owner = owner;
  90. ns->group = group;
  91. /* Inherit USERNS_SETGROUPS_ALLOWED from our parent */
  92. mutex_lock(&userns_state_mutex);
  93. ns->flags = parent_ns->flags;
  94. mutex_unlock(&userns_state_mutex);
  95. set_cred_user_ns(new, ns);
  96. #ifdef CONFIG_PERSISTENT_KEYRINGS
  97. init_rwsem(&ns->persistent_keyring_register_sem);
  98. #endif
  99. return 0;
  100. }
  101. int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
  102. {
  103. struct cred *cred;
  104. int err = -ENOMEM;
  105. if (!(unshare_flags & CLONE_NEWUSER))
  106. return 0;
  107. cred = prepare_creds();
  108. if (cred) {
  109. err = create_user_ns(cred);
  110. if (err)
  111. put_cred(cred);
  112. else
  113. *new_cred = cred;
  114. }
  115. return err;
  116. }
  117. void free_user_ns(struct user_namespace *ns)
  118. {
  119. struct user_namespace *parent;
  120. do {
  121. parent = ns->parent;
  122. #ifdef CONFIG_PERSISTENT_KEYRINGS
  123. key_put(ns->persistent_keyring_register);
  124. #endif
  125. proc_free_inum(ns->proc_inum);
  126. kmem_cache_free(user_ns_cachep, ns);
  127. ns = parent;
  128. } while (atomic_dec_and_test(&parent->count));
  129. }
  130. EXPORT_SYMBOL(free_user_ns);
  131. static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count)
  132. {
  133. unsigned idx, extents;
  134. u32 first, last, id2;
  135. id2 = id + count - 1;
  136. /* Find the matching extent */
  137. extents = map->nr_extents;
  138. smp_rmb();
  139. for (idx = 0; idx < extents; idx++) {
  140. first = map->extent[idx].first;
  141. last = first + map->extent[idx].count - 1;
  142. if (id >= first && id <= last &&
  143. (id2 >= first && id2 <= last))
  144. break;
  145. }
  146. /* Map the id or note failure */
  147. if (idx < extents)
  148. id = (id - first) + map->extent[idx].lower_first;
  149. else
  150. id = (u32) -1;
  151. return id;
  152. }
  153. static u32 map_id_down(struct uid_gid_map *map, u32 id)
  154. {
  155. unsigned idx, extents;
  156. u32 first, last;
  157. /* Find the matching extent */
  158. extents = map->nr_extents;
  159. smp_rmb();
  160. for (idx = 0; idx < extents; idx++) {
  161. first = map->extent[idx].first;
  162. last = first + map->extent[idx].count - 1;
  163. if (id >= first && id <= last)
  164. break;
  165. }
  166. /* Map the id or note failure */
  167. if (idx < extents)
  168. id = (id - first) + map->extent[idx].lower_first;
  169. else
  170. id = (u32) -1;
  171. return id;
  172. }
  173. static u32 map_id_up(struct uid_gid_map *map, u32 id)
  174. {
  175. unsigned idx, extents;
  176. u32 first, last;
  177. /* Find the matching extent */
  178. extents = map->nr_extents;
  179. smp_rmb();
  180. for (idx = 0; idx < extents; idx++) {
  181. first = map->extent[idx].lower_first;
  182. last = first + map->extent[idx].count - 1;
  183. if (id >= first && id <= last)
  184. break;
  185. }
  186. /* Map the id or note failure */
  187. if (idx < extents)
  188. id = (id - first) + map->extent[idx].first;
  189. else
  190. id = (u32) -1;
  191. return id;
  192. }
  193. /**
  194. * make_kuid - Map a user-namespace uid pair into a kuid.
  195. * @ns: User namespace that the uid is in
  196. * @uid: User identifier
  197. *
  198. * Maps a user-namespace uid pair into a kernel internal kuid,
  199. * and returns that kuid.
  200. *
  201. * When there is no mapping defined for the user-namespace uid
  202. * pair INVALID_UID is returned. Callers are expected to test
  203. * for and handle INVALID_UID being returned. INVALID_UID
  204. * may be tested for using uid_valid().
  205. */
  206. kuid_t make_kuid(struct user_namespace *ns, uid_t uid)
  207. {
  208. /* Map the uid to a global kernel uid */
  209. return KUIDT_INIT(map_id_down(&ns->uid_map, uid));
  210. }
  211. EXPORT_SYMBOL(make_kuid);
  212. /**
  213. * from_kuid - Create a uid from a kuid user-namespace pair.
  214. * @targ: The user namespace we want a uid in.
  215. * @kuid: The kernel internal uid to start with.
  216. *
  217. * Map @kuid into the user-namespace specified by @targ and
  218. * return the resulting uid.
  219. *
  220. * There is always a mapping into the initial user_namespace.
  221. *
  222. * If @kuid has no mapping in @targ (uid_t)-1 is returned.
  223. */
  224. uid_t from_kuid(struct user_namespace *targ, kuid_t kuid)
  225. {
  226. /* Map the uid from a global kernel uid */
  227. return map_id_up(&targ->uid_map, __kuid_val(kuid));
  228. }
  229. EXPORT_SYMBOL(from_kuid);
  230. /**
  231. * from_kuid_munged - Create a uid from a kuid user-namespace pair.
  232. * @targ: The user namespace we want a uid in.
  233. * @kuid: The kernel internal uid to start with.
  234. *
  235. * Map @kuid into the user-namespace specified by @targ and
  236. * return the resulting uid.
  237. *
  238. * There is always a mapping into the initial user_namespace.
  239. *
  240. * Unlike from_kuid from_kuid_munged never fails and always
  241. * returns a valid uid. This makes from_kuid_munged appropriate
  242. * for use in syscalls like stat and getuid where failing the
  243. * system call and failing to provide a valid uid are not an
  244. * options.
  245. *
  246. * If @kuid has no mapping in @targ overflowuid is returned.
  247. */
  248. uid_t from_kuid_munged(struct user_namespace *targ, kuid_t kuid)
  249. {
  250. uid_t uid;
  251. uid = from_kuid(targ, kuid);
  252. if (uid == (uid_t) -1)
  253. uid = overflowuid;
  254. return uid;
  255. }
  256. EXPORT_SYMBOL(from_kuid_munged);
  257. /**
  258. * make_kgid - Map a user-namespace gid pair into a kgid.
  259. * @ns: User namespace that the gid is in
  260. * @gid: group identifier
  261. *
  262. * Maps a user-namespace gid pair into a kernel internal kgid,
  263. * and returns that kgid.
  264. *
  265. * When there is no mapping defined for the user-namespace gid
  266. * pair INVALID_GID is returned. Callers are expected to test
  267. * for and handle INVALID_GID being returned. INVALID_GID may be
  268. * tested for using gid_valid().
  269. */
  270. kgid_t make_kgid(struct user_namespace *ns, gid_t gid)
  271. {
  272. /* Map the gid to a global kernel gid */
  273. return KGIDT_INIT(map_id_down(&ns->gid_map, gid));
  274. }
  275. EXPORT_SYMBOL(make_kgid);
  276. /**
  277. * from_kgid - Create a gid from a kgid user-namespace pair.
  278. * @targ: The user namespace we want a gid in.
  279. * @kgid: The kernel internal gid to start with.
  280. *
  281. * Map @kgid into the user-namespace specified by @targ and
  282. * return the resulting gid.
  283. *
  284. * There is always a mapping into the initial user_namespace.
  285. *
  286. * If @kgid has no mapping in @targ (gid_t)-1 is returned.
  287. */
  288. gid_t from_kgid(struct user_namespace *targ, kgid_t kgid)
  289. {
  290. /* Map the gid from a global kernel gid */
  291. return map_id_up(&targ->gid_map, __kgid_val(kgid));
  292. }
  293. EXPORT_SYMBOL(from_kgid);
  294. /**
  295. * from_kgid_munged - Create a gid from a kgid user-namespace pair.
  296. * @targ: The user namespace we want a gid in.
  297. * @kgid: The kernel internal gid to start with.
  298. *
  299. * Map @kgid into the user-namespace specified by @targ and
  300. * return the resulting gid.
  301. *
  302. * There is always a mapping into the initial user_namespace.
  303. *
  304. * Unlike from_kgid from_kgid_munged never fails and always
  305. * returns a valid gid. This makes from_kgid_munged appropriate
  306. * for use in syscalls like stat and getgid where failing the
  307. * system call and failing to provide a valid gid are not options.
  308. *
  309. * If @kgid has no mapping in @targ overflowgid is returned.
  310. */
  311. gid_t from_kgid_munged(struct user_namespace *targ, kgid_t kgid)
  312. {
  313. gid_t gid;
  314. gid = from_kgid(targ, kgid);
  315. if (gid == (gid_t) -1)
  316. gid = overflowgid;
  317. return gid;
  318. }
  319. EXPORT_SYMBOL(from_kgid_munged);
  320. /**
  321. * make_kprojid - Map a user-namespace projid pair into a kprojid.
  322. * @ns: User namespace that the projid is in
  323. * @projid: Project identifier
  324. *
  325. * Maps a user-namespace uid pair into a kernel internal kuid,
  326. * and returns that kuid.
  327. *
  328. * When there is no mapping defined for the user-namespace projid
  329. * pair INVALID_PROJID is returned. Callers are expected to test
  330. * for and handle handle INVALID_PROJID being returned. INVALID_PROJID
  331. * may be tested for using projid_valid().
  332. */
  333. kprojid_t make_kprojid(struct user_namespace *ns, projid_t projid)
  334. {
  335. /* Map the uid to a global kernel uid */
  336. return KPROJIDT_INIT(map_id_down(&ns->projid_map, projid));
  337. }
  338. EXPORT_SYMBOL(make_kprojid);
  339. /**
  340. * from_kprojid - Create a projid from a kprojid user-namespace pair.
  341. * @targ: The user namespace we want a projid in.
  342. * @kprojid: The kernel internal project identifier to start with.
  343. *
  344. * Map @kprojid into the user-namespace specified by @targ and
  345. * return the resulting projid.
  346. *
  347. * There is always a mapping into the initial user_namespace.
  348. *
  349. * If @kprojid has no mapping in @targ (projid_t)-1 is returned.
  350. */
  351. projid_t from_kprojid(struct user_namespace *targ, kprojid_t kprojid)
  352. {
  353. /* Map the uid from a global kernel uid */
  354. return map_id_up(&targ->projid_map, __kprojid_val(kprojid));
  355. }
  356. EXPORT_SYMBOL(from_kprojid);
  357. /**
  358. * from_kprojid_munged - Create a projiid from a kprojid user-namespace pair.
  359. * @targ: The user namespace we want a projid in.
  360. * @kprojid: The kernel internal projid to start with.
  361. *
  362. * Map @kprojid into the user-namespace specified by @targ and
  363. * return the resulting projid.
  364. *
  365. * There is always a mapping into the initial user_namespace.
  366. *
  367. * Unlike from_kprojid from_kprojid_munged never fails and always
  368. * returns a valid projid. This makes from_kprojid_munged
  369. * appropriate for use in syscalls like stat and where
  370. * failing the system call and failing to provide a valid projid are
  371. * not an options.
  372. *
  373. * If @kprojid has no mapping in @targ OVERFLOW_PROJID is returned.
  374. */
  375. projid_t from_kprojid_munged(struct user_namespace *targ, kprojid_t kprojid)
  376. {
  377. projid_t projid;
  378. projid = from_kprojid(targ, kprojid);
  379. if (projid == (projid_t) -1)
  380. projid = OVERFLOW_PROJID;
  381. return projid;
  382. }
  383. EXPORT_SYMBOL(from_kprojid_munged);
  384. static int uid_m_show(struct seq_file *seq, void *v)
  385. {
  386. struct user_namespace *ns = seq->private;
  387. struct uid_gid_extent *extent = v;
  388. struct user_namespace *lower_ns;
  389. uid_t lower;
  390. lower_ns = seq_user_ns(seq);
  391. if ((lower_ns == ns) && lower_ns->parent)
  392. lower_ns = lower_ns->parent;
  393. lower = from_kuid(lower_ns, KUIDT_INIT(extent->lower_first));
  394. seq_printf(seq, "%10u %10u %10u\n",
  395. extent->first,
  396. lower,
  397. extent->count);
  398. return 0;
  399. }
  400. static int gid_m_show(struct seq_file *seq, void *v)
  401. {
  402. struct user_namespace *ns = seq->private;
  403. struct uid_gid_extent *extent = v;
  404. struct user_namespace *lower_ns;
  405. gid_t lower;
  406. lower_ns = seq_user_ns(seq);
  407. if ((lower_ns == ns) && lower_ns->parent)
  408. lower_ns = lower_ns->parent;
  409. lower = from_kgid(lower_ns, KGIDT_INIT(extent->lower_first));
  410. seq_printf(seq, "%10u %10u %10u\n",
  411. extent->first,
  412. lower,
  413. extent->count);
  414. return 0;
  415. }
  416. static int projid_m_show(struct seq_file *seq, void *v)
  417. {
  418. struct user_namespace *ns = seq->private;
  419. struct uid_gid_extent *extent = v;
  420. struct user_namespace *lower_ns;
  421. projid_t lower;
  422. lower_ns = seq_user_ns(seq);
  423. if ((lower_ns == ns) && lower_ns->parent)
  424. lower_ns = lower_ns->parent;
  425. lower = from_kprojid(lower_ns, KPROJIDT_INIT(extent->lower_first));
  426. seq_printf(seq, "%10u %10u %10u\n",
  427. extent->first,
  428. lower,
  429. extent->count);
  430. return 0;
  431. }
  432. static void *m_start(struct seq_file *seq, loff_t *ppos,
  433. struct uid_gid_map *map)
  434. {
  435. struct uid_gid_extent *extent = NULL;
  436. loff_t pos = *ppos;
  437. if (pos < map->nr_extents)
  438. extent = &map->extent[pos];
  439. return extent;
  440. }
  441. static void *uid_m_start(struct seq_file *seq, loff_t *ppos)
  442. {
  443. struct user_namespace *ns = seq->private;
  444. return m_start(seq, ppos, &ns->uid_map);
  445. }
  446. static void *gid_m_start(struct seq_file *seq, loff_t *ppos)
  447. {
  448. struct user_namespace *ns = seq->private;
  449. return m_start(seq, ppos, &ns->gid_map);
  450. }
  451. static void *projid_m_start(struct seq_file *seq, loff_t *ppos)
  452. {
  453. struct user_namespace *ns = seq->private;
  454. return m_start(seq, ppos, &ns->projid_map);
  455. }
  456. static void *m_next(struct seq_file *seq, void *v, loff_t *pos)
  457. {
  458. (*pos)++;
  459. return seq->op->start(seq, pos);
  460. }
  461. static void m_stop(struct seq_file *seq, void *v)
  462. {
  463. return;
  464. }
  465. const struct seq_operations proc_uid_seq_operations = {
  466. .start = uid_m_start,
  467. .stop = m_stop,
  468. .next = m_next,
  469. .show = uid_m_show,
  470. };
  471. const struct seq_operations proc_gid_seq_operations = {
  472. .start = gid_m_start,
  473. .stop = m_stop,
  474. .next = m_next,
  475. .show = gid_m_show,
  476. };
  477. const struct seq_operations proc_projid_seq_operations = {
  478. .start = projid_m_start,
  479. .stop = m_stop,
  480. .next = m_next,
  481. .show = projid_m_show,
  482. };
  483. static bool mappings_overlap(struct uid_gid_map *new_map,
  484. struct uid_gid_extent *extent)
  485. {
  486. u32 upper_first, lower_first, upper_last, lower_last;
  487. unsigned idx;
  488. upper_first = extent->first;
  489. lower_first = extent->lower_first;
  490. upper_last = upper_first + extent->count - 1;
  491. lower_last = lower_first + extent->count - 1;
  492. for (idx = 0; idx < new_map->nr_extents; idx++) {
  493. u32 prev_upper_first, prev_lower_first;
  494. u32 prev_upper_last, prev_lower_last;
  495. struct uid_gid_extent *prev;
  496. prev = &new_map->extent[idx];
  497. prev_upper_first = prev->first;
  498. prev_lower_first = prev->lower_first;
  499. prev_upper_last = prev_upper_first + prev->count - 1;
  500. prev_lower_last = prev_lower_first + prev->count - 1;
  501. /* Does the upper range intersect a previous extent? */
  502. if ((prev_upper_first <= upper_last) &&
  503. (prev_upper_last >= upper_first))
  504. return true;
  505. /* Does the lower range intersect a previous extent? */
  506. if ((prev_lower_first <= lower_last) &&
  507. (prev_lower_last >= lower_first))
  508. return true;
  509. }
  510. return false;
  511. }
  512. static ssize_t map_write(struct file *file, const char __user *buf,
  513. size_t count, loff_t *ppos,
  514. int cap_setid,
  515. struct uid_gid_map *map,
  516. struct uid_gid_map *parent_map)
  517. {
  518. struct seq_file *seq = file->private_data;
  519. struct user_namespace *ns = seq->private;
  520. struct uid_gid_map new_map;
  521. unsigned idx;
  522. struct uid_gid_extent *extent = NULL;
  523. unsigned long page = 0;
  524. char *kbuf, *pos, *next_line;
  525. ssize_t ret = -EINVAL;
  526. /*
  527. * The userns_state_mutex serializes all writes to any given map.
  528. *
  529. * Any map is only ever written once.
  530. *
  531. * An id map fits within 1 cache line on most architectures.
  532. *
  533. * On read nothing needs to be done unless you are on an
  534. * architecture with a crazy cache coherency model like alpha.
  535. *
  536. * There is a one time data dependency between reading the
  537. * count of the extents and the values of the extents. The
  538. * desired behavior is to see the values of the extents that
  539. * were written before the count of the extents.
  540. *
  541. * To achieve this smp_wmb() is used on guarantee the write
  542. * order and smp_rmb() is guaranteed that we don't have crazy
  543. * architectures returning stale data.
  544. */
  545. mutex_lock(&userns_state_mutex);
  546. ret = -EPERM;
  547. /* Only allow one successful write to the map */
  548. if (map->nr_extents != 0)
  549. goto out;
  550. /*
  551. * Adjusting namespace settings requires capabilities on the target.
  552. */
  553. if (cap_valid(cap_setid) && !file_ns_capable(file, ns, CAP_SYS_ADMIN))
  554. goto out;
  555. /* Get a buffer */
  556. ret = -ENOMEM;
  557. page = __get_free_page(GFP_TEMPORARY);
  558. kbuf = (char *) page;
  559. if (!page)
  560. goto out;
  561. /* Only allow <= page size writes at the beginning of the file */
  562. ret = -EINVAL;
  563. if ((*ppos != 0) || (count >= PAGE_SIZE))
  564. goto out;
  565. /* Slurp in the user data */
  566. ret = -EFAULT;
  567. if (copy_from_user(kbuf, buf, count))
  568. goto out;
  569. kbuf[count] = '\0';
  570. /* Parse the user data */
  571. ret = -EINVAL;
  572. pos = kbuf;
  573. new_map.nr_extents = 0;
  574. for (; pos; pos = next_line) {
  575. extent = &new_map.extent[new_map.nr_extents];
  576. /* Find the end of line and ensure I don't look past it */
  577. next_line = strchr(pos, '\n');
  578. if (next_line) {
  579. *next_line = '\0';
  580. next_line++;
  581. if (*next_line == '\0')
  582. next_line = NULL;
  583. }
  584. pos = skip_spaces(pos);
  585. extent->first = simple_strtoul(pos, &pos, 10);
  586. if (!isspace(*pos))
  587. goto out;
  588. pos = skip_spaces(pos);
  589. extent->lower_first = simple_strtoul(pos, &pos, 10);
  590. if (!isspace(*pos))
  591. goto out;
  592. pos = skip_spaces(pos);
  593. extent->count = simple_strtoul(pos, &pos, 10);
  594. if (*pos && !isspace(*pos))
  595. goto out;
  596. /* Verify there is not trailing junk on the line */
  597. pos = skip_spaces(pos);
  598. if (*pos != '\0')
  599. goto out;
  600. /* Verify we have been given valid starting values */
  601. if ((extent->first == (u32) -1) ||
  602. (extent->lower_first == (u32) -1))
  603. goto out;
  604. /* Verify count is not zero and does not cause the
  605. * extent to wrap
  606. */
  607. if ((extent->first + extent->count) <= extent->first)
  608. goto out;
  609. if ((extent->lower_first + extent->count) <=
  610. extent->lower_first)
  611. goto out;
  612. /* Do the ranges in extent overlap any previous extents? */
  613. if (mappings_overlap(&new_map, extent))
  614. goto out;
  615. new_map.nr_extents++;
  616. /* Fail if the file contains too many extents */
  617. if ((new_map.nr_extents == UID_GID_MAP_MAX_EXTENTS) &&
  618. (next_line != NULL))
  619. goto out;
  620. }
  621. /* Be very certaint the new map actually exists */
  622. if (new_map.nr_extents == 0)
  623. goto out;
  624. ret = -EPERM;
  625. /* Validate the user is allowed to use user id's mapped to. */
  626. if (!new_idmap_permitted(file, ns, cap_setid, &new_map))
  627. goto out;
  628. /* Map the lower ids from the parent user namespace to the
  629. * kernel global id space.
  630. */
  631. for (idx = 0; idx < new_map.nr_extents; idx++) {
  632. u32 lower_first;
  633. extent = &new_map.extent[idx];
  634. lower_first = map_id_range_down(parent_map,
  635. extent->lower_first,
  636. extent->count);
  637. /* Fail if we can not map the specified extent to
  638. * the kernel global id space.
  639. */
  640. if (lower_first == (u32) -1)
  641. goto out;
  642. extent->lower_first = lower_first;
  643. }
  644. /* Install the map */
  645. memcpy(map->extent, new_map.extent,
  646. new_map.nr_extents*sizeof(new_map.extent[0]));
  647. smp_wmb();
  648. map->nr_extents = new_map.nr_extents;
  649. *ppos = count;
  650. ret = count;
  651. out:
  652. mutex_unlock(&userns_state_mutex);
  653. if (page)
  654. free_page(page);
  655. return ret;
  656. }
  657. ssize_t proc_uid_map_write(struct file *file, const char __user *buf,
  658. size_t size, loff_t *ppos)
  659. {
  660. struct seq_file *seq = file->private_data;
  661. struct user_namespace *ns = seq->private;
  662. struct user_namespace *seq_ns = seq_user_ns(seq);
  663. if (!ns->parent)
  664. return -EPERM;
  665. if ((seq_ns != ns) && (seq_ns != ns->parent))
  666. return -EPERM;
  667. return map_write(file, buf, size, ppos, CAP_SETUID,
  668. &ns->uid_map, &ns->parent->uid_map);
  669. }
  670. ssize_t proc_gid_map_write(struct file *file, const char __user *buf,
  671. size_t size, loff_t *ppos)
  672. {
  673. struct seq_file *seq = file->private_data;
  674. struct user_namespace *ns = seq->private;
  675. struct user_namespace *seq_ns = seq_user_ns(seq);
  676. if (!ns->parent)
  677. return -EPERM;
  678. if ((seq_ns != ns) && (seq_ns != ns->parent))
  679. return -EPERM;
  680. return map_write(file, buf, size, ppos, CAP_SETGID,
  681. &ns->gid_map, &ns->parent->gid_map);
  682. }
  683. ssize_t proc_projid_map_write(struct file *file, const char __user *buf,
  684. size_t size, loff_t *ppos)
  685. {
  686. struct seq_file *seq = file->private_data;
  687. struct user_namespace *ns = seq->private;
  688. struct user_namespace *seq_ns = seq_user_ns(seq);
  689. if (!ns->parent)
  690. return -EPERM;
  691. if ((seq_ns != ns) && (seq_ns != ns->parent))
  692. return -EPERM;
  693. /* Anyone can set any valid project id no capability needed */
  694. return map_write(file, buf, size, ppos, -1,
  695. &ns->projid_map, &ns->parent->projid_map);
  696. }
  697. static bool new_idmap_permitted(const struct file *file,
  698. struct user_namespace *ns, int cap_setid,
  699. struct uid_gid_map *new_map)
  700. {
  701. const struct cred *cred = file->f_cred;
  702. /* Don't allow mappings that would allow anything that wouldn't
  703. * be allowed without the establishment of unprivileged mappings.
  704. */
  705. if ((new_map->nr_extents == 1) && (new_map->extent[0].count == 1) &&
  706. uid_eq(ns->owner, cred->euid)) {
  707. u32 id = new_map->extent[0].lower_first;
  708. if (cap_setid == CAP_SETUID) {
  709. kuid_t uid = make_kuid(ns->parent, id);
  710. if (uid_eq(uid, cred->euid))
  711. return true;
  712. } else if (cap_setid == CAP_SETGID) {
  713. kgid_t gid = make_kgid(ns->parent, id);
  714. if (!(ns->flags & USERNS_SETGROUPS_ALLOWED) &&
  715. gid_eq(gid, cred->egid))
  716. return true;
  717. }
  718. }
  719. /* Allow anyone to set a mapping that doesn't require privilege */
  720. if (!cap_valid(cap_setid))
  721. return true;
  722. /* Allow the specified ids if we have the appropriate capability
  723. * (CAP_SETUID or CAP_SETGID) over the parent user namespace.
  724. * And the opener of the id file also had the approprpiate capability.
  725. */
  726. if (ns_capable(ns->parent, cap_setid) &&
  727. file_ns_capable(file, ns->parent, cap_setid))
  728. return true;
  729. return false;
  730. }
  731. int proc_setgroups_show(struct seq_file *seq, void *v)
  732. {
  733. struct user_namespace *ns = seq->private;
  734. unsigned long userns_flags = ACCESS_ONCE(ns->flags);
  735. seq_printf(seq, "%s\n",
  736. (userns_flags & USERNS_SETGROUPS_ALLOWED) ?
  737. "allow" : "deny");
  738. return 0;
  739. }
  740. ssize_t proc_setgroups_write(struct file *file, const char __user *buf,
  741. size_t count, loff_t *ppos)
  742. {
  743. struct seq_file *seq = file->private_data;
  744. struct user_namespace *ns = seq->private;
  745. char kbuf[8], *pos;
  746. bool setgroups_allowed;
  747. ssize_t ret;
  748. /* Only allow a very narrow range of strings to be written */
  749. ret = -EINVAL;
  750. if ((*ppos != 0) || (count >= sizeof(kbuf)))
  751. goto out;
  752. /* What was written? */
  753. ret = -EFAULT;
  754. if (copy_from_user(kbuf, buf, count))
  755. goto out;
  756. kbuf[count] = '\0';
  757. pos = kbuf;
  758. /* What is being requested? */
  759. ret = -EINVAL;
  760. if (strncmp(pos, "allow", 5) == 0) {
  761. pos += 5;
  762. setgroups_allowed = true;
  763. }
  764. else if (strncmp(pos, "deny", 4) == 0) {
  765. pos += 4;
  766. setgroups_allowed = false;
  767. }
  768. else
  769. goto out;
  770. /* Verify there is not trailing junk on the line */
  771. pos = skip_spaces(pos);
  772. if (*pos != '\0')
  773. goto out;
  774. ret = -EPERM;
  775. mutex_lock(&userns_state_mutex);
  776. if (setgroups_allowed) {
  777. /* Enabling setgroups after setgroups has been disabled
  778. * is not allowed.
  779. */
  780. if (!(ns->flags & USERNS_SETGROUPS_ALLOWED))
  781. goto out_unlock;
  782. } else {
  783. /* Permanently disabling setgroups after setgroups has
  784. * been enabled by writing the gid_map is not allowed.
  785. */
  786. if (ns->gid_map.nr_extents != 0)
  787. goto out_unlock;
  788. ns->flags &= ~USERNS_SETGROUPS_ALLOWED;
  789. }
  790. mutex_unlock(&userns_state_mutex);
  791. /* Report a successful write */
  792. *ppos = count;
  793. ret = count;
  794. out:
  795. return ret;
  796. out_unlock:
  797. mutex_unlock(&userns_state_mutex);
  798. goto out;
  799. }
  800. bool userns_may_setgroups(const struct user_namespace *ns)
  801. {
  802. bool allowed;
  803. mutex_lock(&userns_state_mutex);
  804. /* It is not safe to use setgroups until a gid mapping in
  805. * the user namespace has been established.
  806. */
  807. allowed = ns->gid_map.nr_extents != 0;
  808. /* Is setgroups allowed? */
  809. allowed = allowed && (ns->flags & USERNS_SETGROUPS_ALLOWED);
  810. mutex_unlock(&userns_state_mutex);
  811. return allowed;
  812. }
  813. static void *userns_get(struct task_struct *task)
  814. {
  815. struct user_namespace *user_ns;
  816. rcu_read_lock();
  817. user_ns = get_user_ns(__task_cred(task)->user_ns);
  818. rcu_read_unlock();
  819. return user_ns;
  820. }
  821. static void userns_put(void *ns)
  822. {
  823. put_user_ns(ns);
  824. }
  825. static int userns_install(struct nsproxy *nsproxy, void *ns)
  826. {
  827. struct user_namespace *user_ns = ns;
  828. struct cred *cred;
  829. /* Don't allow gaining capabilities by reentering
  830. * the same user namespace.
  831. */
  832. if (user_ns == current_user_ns())
  833. return -EINVAL;
  834. /* Threaded processes may not enter a different user namespace */
  835. if (atomic_read(&current->mm->mm_users) > 1)
  836. return -EINVAL;
  837. if (current->fs->users != 1)
  838. return -EINVAL;
  839. if (!ns_capable(user_ns, CAP_SYS_ADMIN))
  840. return -EPERM;
  841. cred = prepare_creds();
  842. if (!cred)
  843. return -ENOMEM;
  844. put_user_ns(cred->user_ns);
  845. set_cred_user_ns(cred, get_user_ns(user_ns));
  846. return commit_creds(cred);
  847. }
  848. static unsigned int userns_inum(void *ns)
  849. {
  850. struct user_namespace *user_ns = ns;
  851. return user_ns->proc_inum;
  852. }
  853. const struct proc_ns_operations userns_operations = {
  854. .name = "user",
  855. .type = CLONE_NEWUSER,
  856. .get = userns_get,
  857. .put = userns_put,
  858. .install = userns_install,
  859. .inum = userns_inum,
  860. };
  861. static __init int user_namespaces_init(void)
  862. {
  863. user_ns_cachep = KMEM_CACHE(user_namespace, SLAB_PANIC);
  864. return 0;
  865. }
  866. subsys_initcall(user_namespaces_init);