x_tables.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355
  1. /*
  2. * x_tables core - Backend for {ip,ip6,arp}_tables
  3. *
  4. * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org>
  5. * Copyright (C) 2006-2012 Patrick McHardy <kaber@trash.net>
  6. *
  7. * Based on existing ip_tables code which is
  8. * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
  9. * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License version 2 as
  13. * published by the Free Software Foundation.
  14. *
  15. */
  16. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  17. #include <linux/kernel.h>
  18. #include <linux/module.h>
  19. #include <linux/socket.h>
  20. #include <linux/net.h>
  21. #include <linux/proc_fs.h>
  22. #include <linux/seq_file.h>
  23. #include <linux/string.h>
  24. #include <linux/vmalloc.h>
  25. #include <linux/mutex.h>
  26. #include <linux/mm.h>
  27. #include <linux/slab.h>
  28. #include <linux/audit.h>
  29. #include <net/net_namespace.h>
  30. #include <linux/netfilter/x_tables.h>
  31. #include <linux/netfilter_arp.h>
  32. #include <linux/netfilter_ipv4/ip_tables.h>
  33. #include <linux/netfilter_ipv6/ip6_tables.h>
  34. #include <linux/netfilter_arp/arp_tables.h>
  35. MODULE_LICENSE("GPL");
  36. MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
  37. MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
  38. #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
  39. struct compat_delta {
  40. unsigned int offset; /* offset in kernel */
  41. int delta; /* delta in 32bit user land */
  42. };
  43. struct xt_af {
  44. struct mutex mutex;
  45. struct list_head match;
  46. struct list_head target;
  47. #ifdef CONFIG_COMPAT
  48. struct mutex compat_mutex;
  49. struct compat_delta *compat_tab;
  50. unsigned int number; /* number of slots in compat_tab[] */
  51. unsigned int cur; /* number of used slots in compat_tab[] */
  52. #endif
  53. };
  54. static struct xt_af *xt;
  55. static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
  56. [NFPROTO_UNSPEC] = "x",
  57. [NFPROTO_IPV4] = "ip",
  58. [NFPROTO_ARP] = "arp",
  59. [NFPROTO_BRIDGE] = "eb",
  60. [NFPROTO_IPV6] = "ip6",
  61. };
  62. /* Allow this many total (re)entries. */
  63. static const unsigned int xt_jumpstack_multiplier = 2;
  64. /* Registration hooks for targets. */
  65. int xt_register_target(struct xt_target *target)
  66. {
  67. u_int8_t af = target->family;
  68. mutex_lock(&xt[af].mutex);
  69. list_add(&target->list, &xt[af].target);
  70. mutex_unlock(&xt[af].mutex);
  71. return 0;
  72. }
  73. EXPORT_SYMBOL(xt_register_target);
  74. void
  75. xt_unregister_target(struct xt_target *target)
  76. {
  77. u_int8_t af = target->family;
  78. mutex_lock(&xt[af].mutex);
  79. list_del(&target->list);
  80. mutex_unlock(&xt[af].mutex);
  81. }
  82. EXPORT_SYMBOL(xt_unregister_target);
  83. int
  84. xt_register_targets(struct xt_target *target, unsigned int n)
  85. {
  86. unsigned int i;
  87. int err = 0;
  88. for (i = 0; i < n; i++) {
  89. err = xt_register_target(&target[i]);
  90. if (err)
  91. goto err;
  92. }
  93. return err;
  94. err:
  95. if (i > 0)
  96. xt_unregister_targets(target, i);
  97. return err;
  98. }
  99. EXPORT_SYMBOL(xt_register_targets);
  100. void
  101. xt_unregister_targets(struct xt_target *target, unsigned int n)
  102. {
  103. while (n-- > 0)
  104. xt_unregister_target(&target[n]);
  105. }
  106. EXPORT_SYMBOL(xt_unregister_targets);
  107. int xt_register_match(struct xt_match *match)
  108. {
  109. u_int8_t af = match->family;
  110. mutex_lock(&xt[af].mutex);
  111. list_add(&match->list, &xt[af].match);
  112. mutex_unlock(&xt[af].mutex);
  113. return 0;
  114. }
  115. EXPORT_SYMBOL(xt_register_match);
  116. void
  117. xt_unregister_match(struct xt_match *match)
  118. {
  119. u_int8_t af = match->family;
  120. mutex_lock(&xt[af].mutex);
  121. list_del(&match->list);
  122. mutex_unlock(&xt[af].mutex);
  123. }
  124. EXPORT_SYMBOL(xt_unregister_match);
  125. int
  126. xt_register_matches(struct xt_match *match, unsigned int n)
  127. {
  128. unsigned int i;
  129. int err = 0;
  130. for (i = 0; i < n; i++) {
  131. err = xt_register_match(&match[i]);
  132. if (err)
  133. goto err;
  134. }
  135. return err;
  136. err:
  137. if (i > 0)
  138. xt_unregister_matches(match, i);
  139. return err;
  140. }
  141. EXPORT_SYMBOL(xt_register_matches);
  142. void
  143. xt_unregister_matches(struct xt_match *match, unsigned int n)
  144. {
  145. while (n-- > 0)
  146. xt_unregister_match(&match[n]);
  147. }
  148. EXPORT_SYMBOL(xt_unregister_matches);
  149. /*
  150. * These are weird, but module loading must not be done with mutex
  151. * held (since they will register), and we have to have a single
  152. * function to use.
  153. */
  154. /* Find match, grabs ref. Returns ERR_PTR() on error. */
  155. struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
  156. {
  157. struct xt_match *m;
  158. int err = -ENOENT;
  159. mutex_lock(&xt[af].mutex);
  160. list_for_each_entry(m, &xt[af].match, list) {
  161. if (strcmp(m->name, name) == 0) {
  162. if (m->revision == revision) {
  163. if (try_module_get(m->me)) {
  164. mutex_unlock(&xt[af].mutex);
  165. return m;
  166. }
  167. } else
  168. err = -EPROTOTYPE; /* Found something. */
  169. }
  170. }
  171. mutex_unlock(&xt[af].mutex);
  172. if (af != NFPROTO_UNSPEC)
  173. /* Try searching again in the family-independent list */
  174. return xt_find_match(NFPROTO_UNSPEC, name, revision);
  175. return ERR_PTR(err);
  176. }
  177. EXPORT_SYMBOL(xt_find_match);
  178. struct xt_match *
  179. xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
  180. {
  181. struct xt_match *match;
  182. match = xt_find_match(nfproto, name, revision);
  183. if (IS_ERR(match)) {
  184. request_module("%st_%s", xt_prefix[nfproto], name);
  185. match = xt_find_match(nfproto, name, revision);
  186. }
  187. return match;
  188. }
  189. EXPORT_SYMBOL_GPL(xt_request_find_match);
  190. /* Find target, grabs ref. Returns ERR_PTR() on error. */
  191. struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
  192. {
  193. struct xt_target *t;
  194. int err = -ENOENT;
  195. mutex_lock(&xt[af].mutex);
  196. list_for_each_entry(t, &xt[af].target, list) {
  197. if (strcmp(t->name, name) == 0) {
  198. if (t->revision == revision) {
  199. if (try_module_get(t->me)) {
  200. mutex_unlock(&xt[af].mutex);
  201. return t;
  202. }
  203. } else
  204. err = -EPROTOTYPE; /* Found something. */
  205. }
  206. }
  207. mutex_unlock(&xt[af].mutex);
  208. if (af != NFPROTO_UNSPEC)
  209. /* Try searching again in the family-independent list */
  210. return xt_find_target(NFPROTO_UNSPEC, name, revision);
  211. return ERR_PTR(err);
  212. }
  213. EXPORT_SYMBOL(xt_find_target);
  214. struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
  215. {
  216. struct xt_target *target;
  217. target = xt_find_target(af, name, revision);
  218. if (IS_ERR(target)) {
  219. request_module("%st_%s", xt_prefix[af], name);
  220. target = xt_find_target(af, name, revision);
  221. }
  222. return target;
  223. }
  224. EXPORT_SYMBOL_GPL(xt_request_find_target);
  225. static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
  226. {
  227. const struct xt_match *m;
  228. int have_rev = 0;
  229. list_for_each_entry(m, &xt[af].match, list) {
  230. if (strcmp(m->name, name) == 0) {
  231. if (m->revision > *bestp)
  232. *bestp = m->revision;
  233. if (m->revision == revision)
  234. have_rev = 1;
  235. }
  236. }
  237. if (af != NFPROTO_UNSPEC && !have_rev)
  238. return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);
  239. return have_rev;
  240. }
  241. static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
  242. {
  243. const struct xt_target *t;
  244. int have_rev = 0;
  245. list_for_each_entry(t, &xt[af].target, list) {
  246. if (strcmp(t->name, name) == 0) {
  247. if (t->revision > *bestp)
  248. *bestp = t->revision;
  249. if (t->revision == revision)
  250. have_rev = 1;
  251. }
  252. }
  253. if (af != NFPROTO_UNSPEC && !have_rev)
  254. return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
  255. return have_rev;
  256. }
  257. /* Returns true or false (if no such extension at all) */
  258. int xt_find_revision(u8 af, const char *name, u8 revision, int target,
  259. int *err)
  260. {
  261. int have_rev, best = -1;
  262. mutex_lock(&xt[af].mutex);
  263. if (target == 1)
  264. have_rev = target_revfn(af, name, revision, &best);
  265. else
  266. have_rev = match_revfn(af, name, revision, &best);
  267. mutex_unlock(&xt[af].mutex);
  268. /* Nothing at all? Return 0 to try loading module. */
  269. if (best == -1) {
  270. *err = -ENOENT;
  271. return 0;
  272. }
  273. *err = best;
  274. if (!have_rev)
  275. *err = -EPROTONOSUPPORT;
  276. return 1;
  277. }
  278. EXPORT_SYMBOL_GPL(xt_find_revision);
  279. static char *
  280. textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto)
  281. {
  282. static const char *const inetbr_names[] = {
  283. "PREROUTING", "INPUT", "FORWARD",
  284. "OUTPUT", "POSTROUTING", "BROUTING",
  285. };
  286. static const char *const arp_names[] = {
  287. "INPUT", "FORWARD", "OUTPUT",
  288. };
  289. const char *const *names;
  290. unsigned int i, max;
  291. char *p = buf;
  292. bool np = false;
  293. int res;
  294. names = (nfproto == NFPROTO_ARP) ? arp_names : inetbr_names;
  295. max = (nfproto == NFPROTO_ARP) ? ARRAY_SIZE(arp_names) :
  296. ARRAY_SIZE(inetbr_names);
  297. *p = '\0';
  298. for (i = 0; i < max; ++i) {
  299. if (!(mask & (1 << i)))
  300. continue;
  301. res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]);
  302. if (res > 0) {
  303. size -= res;
  304. p += res;
  305. }
  306. np = true;
  307. }
  308. return buf;
  309. }
  310. int xt_check_match(struct xt_mtchk_param *par,
  311. unsigned int size, u_int8_t proto, bool inv_proto)
  312. {
  313. int ret;
  314. if (XT_ALIGN(par->match->matchsize) != size &&
  315. par->match->matchsize != -1) {
  316. /*
  317. * ebt_among is exempt from centralized matchsize checking
  318. * because it uses a dynamic-size data set.
  319. */
  320. pr_err("%s_tables: %s.%u match: invalid size "
  321. "%u (kernel) != (user) %u\n",
  322. xt_prefix[par->family], par->match->name,
  323. par->match->revision,
  324. XT_ALIGN(par->match->matchsize), size);
  325. return -EINVAL;
  326. }
  327. if (par->match->table != NULL &&
  328. strcmp(par->match->table, par->table) != 0) {
  329. pr_err("%s_tables: %s match: only valid in %s table, not %s\n",
  330. xt_prefix[par->family], par->match->name,
  331. par->match->table, par->table);
  332. return -EINVAL;
  333. }
  334. if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) {
  335. char used[64], allow[64];
  336. pr_err("%s_tables: %s match: used from hooks %s, but only "
  337. "valid from %s\n",
  338. xt_prefix[par->family], par->match->name,
  339. textify_hooks(used, sizeof(used), par->hook_mask,
  340. par->family),
  341. textify_hooks(allow, sizeof(allow), par->match->hooks,
  342. par->family));
  343. return -EINVAL;
  344. }
  345. if (par->match->proto && (par->match->proto != proto || inv_proto)) {
  346. pr_err("%s_tables: %s match: only valid for protocol %u\n",
  347. xt_prefix[par->family], par->match->name,
  348. par->match->proto);
  349. return -EINVAL;
  350. }
  351. if (par->match->checkentry != NULL) {
  352. ret = par->match->checkentry(par);
  353. if (ret < 0)
  354. return ret;
  355. else if (ret > 0)
  356. /* Flag up potential errors. */
  357. return -EIO;
  358. }
  359. return 0;
  360. }
  361. EXPORT_SYMBOL_GPL(xt_check_match);
  362. #ifdef CONFIG_COMPAT
  363. int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
  364. {
  365. struct xt_af *xp = &xt[af];
  366. if (!xp->compat_tab) {
  367. if (!xp->number)
  368. return -EINVAL;
  369. xp->compat_tab = vmalloc(sizeof(struct compat_delta) * xp->number);
  370. if (!xp->compat_tab)
  371. return -ENOMEM;
  372. xp->cur = 0;
  373. }
  374. if (xp->cur >= xp->number)
  375. return -EINVAL;
  376. if (xp->cur)
  377. delta += xp->compat_tab[xp->cur - 1].delta;
  378. xp->compat_tab[xp->cur].offset = offset;
  379. xp->compat_tab[xp->cur].delta = delta;
  380. xp->cur++;
  381. return 0;
  382. }
  383. EXPORT_SYMBOL_GPL(xt_compat_add_offset);
  384. void xt_compat_flush_offsets(u_int8_t af)
  385. {
  386. if (xt[af].compat_tab) {
  387. vfree(xt[af].compat_tab);
  388. xt[af].compat_tab = NULL;
  389. xt[af].number = 0;
  390. xt[af].cur = 0;
  391. }
  392. }
  393. EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
  394. int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
  395. {
  396. struct compat_delta *tmp = xt[af].compat_tab;
  397. int mid, left = 0, right = xt[af].cur - 1;
  398. while (left <= right) {
  399. mid = (left + right) >> 1;
  400. if (offset > tmp[mid].offset)
  401. left = mid + 1;
  402. else if (offset < tmp[mid].offset)
  403. right = mid - 1;
  404. else
  405. return mid ? tmp[mid - 1].delta : 0;
  406. }
  407. return left ? tmp[left - 1].delta : 0;
  408. }
  409. EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
  410. void xt_compat_init_offsets(u_int8_t af, unsigned int number)
  411. {
  412. xt[af].number = number;
  413. xt[af].cur = 0;
  414. }
  415. EXPORT_SYMBOL(xt_compat_init_offsets);
  416. int xt_compat_match_offset(const struct xt_match *match)
  417. {
  418. u_int16_t csize = match->compatsize ? : match->matchsize;
  419. return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize);
  420. }
  421. EXPORT_SYMBOL_GPL(xt_compat_match_offset);
  422. int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
  423. unsigned int *size)
  424. {
  425. const struct xt_match *match = m->u.kernel.match;
  426. struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
  427. int pad, off = xt_compat_match_offset(match);
  428. u_int16_t msize = cm->u.user.match_size;
  429. m = *dstptr;
  430. memcpy(m, cm, sizeof(*cm));
  431. if (match->compat_from_user)
  432. match->compat_from_user(m->data, cm->data);
  433. else
  434. memcpy(m->data, cm->data, msize - sizeof(*cm));
  435. pad = XT_ALIGN(match->matchsize) - match->matchsize;
  436. if (pad > 0)
  437. memset(m->data + match->matchsize, 0, pad);
  438. msize += off;
  439. m->u.user.match_size = msize;
  440. *size += off;
  441. *dstptr += msize;
  442. return 0;
  443. }
  444. EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
  445. int xt_compat_match_to_user(const struct xt_entry_match *m,
  446. void __user **dstptr, unsigned int *size)
  447. {
  448. const struct xt_match *match = m->u.kernel.match;
  449. struct compat_xt_entry_match __user *cm = *dstptr;
  450. int off = xt_compat_match_offset(match);
  451. u_int16_t msize = m->u.user.match_size - off;
  452. if (copy_to_user(cm, m, sizeof(*cm)) ||
  453. put_user(msize, &cm->u.user.match_size) ||
  454. copy_to_user(cm->u.user.name, m->u.kernel.match->name,
  455. strlen(m->u.kernel.match->name) + 1))
  456. return -EFAULT;
  457. if (match->compat_to_user) {
  458. if (match->compat_to_user((void __user *)cm->data, m->data))
  459. return -EFAULT;
  460. } else {
  461. if (copy_to_user(cm->data, m->data, msize - sizeof(*cm)))
  462. return -EFAULT;
  463. }
  464. *size -= off;
  465. *dstptr += msize;
  466. return 0;
  467. }
  468. EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
  469. #endif /* CONFIG_COMPAT */
  470. int xt_check_target(struct xt_tgchk_param *par,
  471. unsigned int size, u_int8_t proto, bool inv_proto)
  472. {
  473. int ret;
  474. if (XT_ALIGN(par->target->targetsize) != size) {
  475. pr_err("%s_tables: %s.%u target: invalid size "
  476. "%u (kernel) != (user) %u\n",
  477. xt_prefix[par->family], par->target->name,
  478. par->target->revision,
  479. XT_ALIGN(par->target->targetsize), size);
  480. return -EINVAL;
  481. }
  482. if (par->target->table != NULL &&
  483. strcmp(par->target->table, par->table) != 0) {
  484. pr_err("%s_tables: %s target: only valid in %s table, not %s\n",
  485. xt_prefix[par->family], par->target->name,
  486. par->target->table, par->table);
  487. return -EINVAL;
  488. }
  489. if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) {
  490. char used[64], allow[64];
  491. pr_err("%s_tables: %s target: used from hooks %s, but only "
  492. "usable from %s\n",
  493. xt_prefix[par->family], par->target->name,
  494. textify_hooks(used, sizeof(used), par->hook_mask,
  495. par->family),
  496. textify_hooks(allow, sizeof(allow), par->target->hooks,
  497. par->family));
  498. return -EINVAL;
  499. }
  500. if (par->target->proto && (par->target->proto != proto || inv_proto)) {
  501. pr_err("%s_tables: %s target: only valid for protocol %u\n",
  502. xt_prefix[par->family], par->target->name,
  503. par->target->proto);
  504. return -EINVAL;
  505. }
  506. if (par->target->checkentry != NULL) {
  507. ret = par->target->checkentry(par);
  508. if (ret < 0)
  509. return ret;
  510. else if (ret > 0)
  511. /* Flag up potential errors. */
  512. return -EIO;
  513. }
  514. return 0;
  515. }
  516. EXPORT_SYMBOL_GPL(xt_check_target);
  517. #ifdef CONFIG_COMPAT
  518. int xt_compat_target_offset(const struct xt_target *target)
  519. {
  520. u_int16_t csize = target->compatsize ? : target->targetsize;
  521. return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize);
  522. }
  523. EXPORT_SYMBOL_GPL(xt_compat_target_offset);
  524. void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
  525. unsigned int *size)
  526. {
  527. const struct xt_target *target = t->u.kernel.target;
  528. struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
  529. int pad, off = xt_compat_target_offset(target);
  530. u_int16_t tsize = ct->u.user.target_size;
  531. t = *dstptr;
  532. memcpy(t, ct, sizeof(*ct));
  533. if (target->compat_from_user)
  534. target->compat_from_user(t->data, ct->data);
  535. else
  536. memcpy(t->data, ct->data, tsize - sizeof(*ct));
  537. pad = XT_ALIGN(target->targetsize) - target->targetsize;
  538. if (pad > 0)
  539. memset(t->data + target->targetsize, 0, pad);
  540. tsize += off;
  541. t->u.user.target_size = tsize;
  542. *size += off;
  543. *dstptr += tsize;
  544. }
  545. EXPORT_SYMBOL_GPL(xt_compat_target_from_user);
  546. int xt_compat_target_to_user(const struct xt_entry_target *t,
  547. void __user **dstptr, unsigned int *size)
  548. {
  549. const struct xt_target *target = t->u.kernel.target;
  550. struct compat_xt_entry_target __user *ct = *dstptr;
  551. int off = xt_compat_target_offset(target);
  552. u_int16_t tsize = t->u.user.target_size - off;
  553. if (copy_to_user(ct, t, sizeof(*ct)) ||
  554. put_user(tsize, &ct->u.user.target_size) ||
  555. copy_to_user(ct->u.user.name, t->u.kernel.target->name,
  556. strlen(t->u.kernel.target->name) + 1))
  557. return -EFAULT;
  558. if (target->compat_to_user) {
  559. if (target->compat_to_user((void __user *)ct->data, t->data))
  560. return -EFAULT;
  561. } else {
  562. if (copy_to_user(ct->data, t->data, tsize - sizeof(*ct)))
  563. return -EFAULT;
  564. }
  565. *size -= off;
  566. *dstptr += tsize;
  567. return 0;
  568. }
  569. EXPORT_SYMBOL_GPL(xt_compat_target_to_user);
  570. #endif
  571. struct xt_table_info *xt_alloc_table_info(unsigned int size)
  572. {
  573. struct xt_table_info *newinfo;
  574. int cpu;
  575. /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
  576. if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages)
  577. return NULL;
  578. newinfo = kzalloc(XT_TABLE_INFO_SZ, GFP_KERNEL);
  579. if (!newinfo)
  580. return NULL;
  581. newinfo->size = size;
  582. for_each_possible_cpu(cpu) {
  583. if (size <= PAGE_SIZE)
  584. newinfo->entries[cpu] = kmalloc_node(size,
  585. GFP_KERNEL,
  586. cpu_to_node(cpu));
  587. else
  588. newinfo->entries[cpu] = vmalloc_node(size,
  589. cpu_to_node(cpu));
  590. if (newinfo->entries[cpu] == NULL) {
  591. xt_free_table_info(newinfo);
  592. return NULL;
  593. }
  594. }
  595. return newinfo;
  596. }
  597. EXPORT_SYMBOL(xt_alloc_table_info);
  598. void xt_free_table_info(struct xt_table_info *info)
  599. {
  600. int cpu;
  601. for_each_possible_cpu(cpu)
  602. kvfree(info->entries[cpu]);
  603. if (info->jumpstack != NULL) {
  604. for_each_possible_cpu(cpu)
  605. kvfree(info->jumpstack[cpu]);
  606. kvfree(info->jumpstack);
  607. }
  608. free_percpu(info->stackptr);
  609. kfree(info);
  610. }
  611. EXPORT_SYMBOL(xt_free_table_info);
  612. /* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */
  613. struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
  614. const char *name)
  615. {
  616. struct xt_table *t;
  617. mutex_lock(&xt[af].mutex);
  618. list_for_each_entry(t, &net->xt.tables[af], list)
  619. if (strcmp(t->name, name) == 0 && try_module_get(t->me))
  620. return t;
  621. mutex_unlock(&xt[af].mutex);
  622. return NULL;
  623. }
  624. EXPORT_SYMBOL_GPL(xt_find_table_lock);
  625. void xt_table_unlock(struct xt_table *table)
  626. {
  627. mutex_unlock(&xt[table->af].mutex);
  628. }
  629. EXPORT_SYMBOL_GPL(xt_table_unlock);
  630. #ifdef CONFIG_COMPAT
  631. void xt_compat_lock(u_int8_t af)
  632. {
  633. mutex_lock(&xt[af].compat_mutex);
  634. }
  635. EXPORT_SYMBOL_GPL(xt_compat_lock);
  636. void xt_compat_unlock(u_int8_t af)
  637. {
  638. mutex_unlock(&xt[af].compat_mutex);
  639. }
  640. EXPORT_SYMBOL_GPL(xt_compat_unlock);
  641. #endif
  642. DEFINE_PER_CPU(seqcount_t, xt_recseq);
  643. EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq);
  644. static int xt_jumpstack_alloc(struct xt_table_info *i)
  645. {
  646. unsigned int size;
  647. int cpu;
  648. i->stackptr = alloc_percpu(unsigned int);
  649. if (i->stackptr == NULL)
  650. return -ENOMEM;
  651. size = sizeof(void **) * nr_cpu_ids;
  652. if (size > PAGE_SIZE)
  653. i->jumpstack = vzalloc(size);
  654. else
  655. i->jumpstack = kzalloc(size, GFP_KERNEL);
  656. if (i->jumpstack == NULL)
  657. return -ENOMEM;
  658. i->stacksize *= xt_jumpstack_multiplier;
  659. size = sizeof(void *) * i->stacksize;
  660. for_each_possible_cpu(cpu) {
  661. if (size > PAGE_SIZE)
  662. i->jumpstack[cpu] = vmalloc_node(size,
  663. cpu_to_node(cpu));
  664. else
  665. i->jumpstack[cpu] = kmalloc_node(size,
  666. GFP_KERNEL, cpu_to_node(cpu));
  667. if (i->jumpstack[cpu] == NULL)
  668. /*
  669. * Freeing will be done later on by the callers. The
  670. * chain is: xt_replace_table -> __do_replace ->
  671. * do_replace -> xt_free_table_info.
  672. */
  673. return -ENOMEM;
  674. }
  675. return 0;
  676. }
  677. struct xt_table_info *
  678. xt_replace_table(struct xt_table *table,
  679. unsigned int num_counters,
  680. struct xt_table_info *newinfo,
  681. int *error)
  682. {
  683. struct xt_table_info *private;
  684. int ret;
  685. ret = xt_jumpstack_alloc(newinfo);
  686. if (ret < 0) {
  687. *error = ret;
  688. return NULL;
  689. }
  690. /* Do the substitution. */
  691. local_bh_disable();
  692. private = table->private;
  693. /* Check inside lock: is the old number correct? */
  694. if (num_counters != private->number) {
  695. pr_debug("num_counters != table->private->number (%u/%u)\n",
  696. num_counters, private->number);
  697. local_bh_enable();
  698. *error = -EAGAIN;
  699. return NULL;
  700. }
  701. newinfo->initial_entries = private->initial_entries;
  702. /*
  703. * Ensure contents of newinfo are visible before assigning to
  704. * private.
  705. */
  706. smp_wmb();
  707. table->private = newinfo;
  708. /*
  709. * Even though table entries have now been swapped, other CPU's
  710. * may still be using the old entries. This is okay, because
  711. * resynchronization happens because of the locking done
  712. * during the get_counters() routine.
  713. */
  714. local_bh_enable();
  715. #ifdef CONFIG_AUDIT
  716. if (audit_enabled) {
  717. struct audit_buffer *ab;
  718. ab = audit_log_start(current->audit_context, GFP_KERNEL,
  719. AUDIT_NETFILTER_CFG);
  720. if (ab) {
  721. audit_log_format(ab, "table=%s family=%u entries=%u",
  722. table->name, table->af,
  723. private->number);
  724. audit_log_end(ab);
  725. }
  726. }
  727. #endif
  728. return private;
  729. }
  730. EXPORT_SYMBOL_GPL(xt_replace_table);
  731. struct xt_table *xt_register_table(struct net *net,
  732. const struct xt_table *input_table,
  733. struct xt_table_info *bootstrap,
  734. struct xt_table_info *newinfo)
  735. {
  736. int ret;
  737. struct xt_table_info *private;
  738. struct xt_table *t, *table;
  739. /* Don't add one object to multiple lists. */
  740. table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
  741. if (!table) {
  742. ret = -ENOMEM;
  743. goto out;
  744. }
  745. mutex_lock(&xt[table->af].mutex);
  746. /* Don't autoload: we'd eat our tail... */
  747. list_for_each_entry(t, &net->xt.tables[table->af], list) {
  748. if (strcmp(t->name, table->name) == 0) {
  749. ret = -EEXIST;
  750. goto unlock;
  751. }
  752. }
  753. /* Simplifies replace_table code. */
  754. table->private = bootstrap;
  755. if (!xt_replace_table(table, 0, newinfo, &ret))
  756. goto unlock;
  757. private = table->private;
  758. pr_debug("table->private->number = %u\n", private->number);
  759. /* save number of initial entries */
  760. private->initial_entries = private->number;
  761. list_add(&table->list, &net->xt.tables[table->af]);
  762. mutex_unlock(&xt[table->af].mutex);
  763. return table;
  764. unlock:
  765. mutex_unlock(&xt[table->af].mutex);
  766. kfree(table);
  767. out:
  768. return ERR_PTR(ret);
  769. }
  770. EXPORT_SYMBOL_GPL(xt_register_table);
  771. void *xt_unregister_table(struct xt_table *table)
  772. {
  773. struct xt_table_info *private;
  774. mutex_lock(&xt[table->af].mutex);
  775. private = table->private;
  776. list_del(&table->list);
  777. mutex_unlock(&xt[table->af].mutex);
  778. kfree(table);
  779. return private;
  780. }
  781. EXPORT_SYMBOL_GPL(xt_unregister_table);
  782. #ifdef CONFIG_PROC_FS
  783. struct xt_names_priv {
  784. struct seq_net_private p;
  785. u_int8_t af;
  786. };
  787. static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos)
  788. {
  789. struct xt_names_priv *priv = seq->private;
  790. struct net *net = seq_file_net(seq);
  791. u_int8_t af = priv->af;
  792. mutex_lock(&xt[af].mutex);
  793. return seq_list_start(&net->xt.tables[af], *pos);
  794. }
  795. static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  796. {
  797. struct xt_names_priv *priv = seq->private;
  798. struct net *net = seq_file_net(seq);
  799. u_int8_t af = priv->af;
  800. return seq_list_next(v, &net->xt.tables[af], pos);
  801. }
  802. static void xt_table_seq_stop(struct seq_file *seq, void *v)
  803. {
  804. struct xt_names_priv *priv = seq->private;
  805. u_int8_t af = priv->af;
  806. mutex_unlock(&xt[af].mutex);
  807. }
  808. static int xt_table_seq_show(struct seq_file *seq, void *v)
  809. {
  810. struct xt_table *table = list_entry(v, struct xt_table, list);
  811. if (strlen(table->name))
  812. return seq_printf(seq, "%s\n", table->name);
  813. else
  814. return 0;
  815. }
  816. static const struct seq_operations xt_table_seq_ops = {
  817. .start = xt_table_seq_start,
  818. .next = xt_table_seq_next,
  819. .stop = xt_table_seq_stop,
  820. .show = xt_table_seq_show,
  821. };
  822. static int xt_table_open(struct inode *inode, struct file *file)
  823. {
  824. int ret;
  825. struct xt_names_priv *priv;
  826. ret = seq_open_net(inode, file, &xt_table_seq_ops,
  827. sizeof(struct xt_names_priv));
  828. if (!ret) {
  829. priv = ((struct seq_file *)file->private_data)->private;
  830. priv->af = (unsigned long)PDE_DATA(inode);
  831. }
  832. return ret;
  833. }
  834. static const struct file_operations xt_table_ops = {
  835. .owner = THIS_MODULE,
  836. .open = xt_table_open,
  837. .read = seq_read,
  838. .llseek = seq_lseek,
  839. .release = seq_release_net,
  840. };
  841. /*
  842. * Traverse state for ip{,6}_{tables,matches} for helping crossing
  843. * the multi-AF mutexes.
  844. */
  845. struct nf_mttg_trav {
  846. struct list_head *head, *curr;
  847. uint8_t class, nfproto;
  848. };
  849. enum {
  850. MTTG_TRAV_INIT,
  851. MTTG_TRAV_NFP_UNSPEC,
  852. MTTG_TRAV_NFP_SPEC,
  853. MTTG_TRAV_DONE,
  854. };
  855. static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
  856. bool is_target)
  857. {
  858. static const uint8_t next_class[] = {
  859. [MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC,
  860. [MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE,
  861. };
  862. struct nf_mttg_trav *trav = seq->private;
  863. switch (trav->class) {
  864. case MTTG_TRAV_INIT:
  865. trav->class = MTTG_TRAV_NFP_UNSPEC;
  866. mutex_lock(&xt[NFPROTO_UNSPEC].mutex);
  867. trav->head = trav->curr = is_target ?
  868. &xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match;
  869. break;
  870. case MTTG_TRAV_NFP_UNSPEC:
  871. trav->curr = trav->curr->next;
  872. if (trav->curr != trav->head)
  873. break;
  874. mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
  875. mutex_lock(&xt[trav->nfproto].mutex);
  876. trav->head = trav->curr = is_target ?
  877. &xt[trav->nfproto].target : &xt[trav->nfproto].match;
  878. trav->class = next_class[trav->class];
  879. break;
  880. case MTTG_TRAV_NFP_SPEC:
  881. trav->curr = trav->curr->next;
  882. if (trav->curr != trav->head)
  883. break;
  884. /* fallthru, _stop will unlock */
  885. default:
  886. return NULL;
  887. }
  888. if (ppos != NULL)
  889. ++*ppos;
  890. return trav;
  891. }
  892. static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos,
  893. bool is_target)
  894. {
  895. struct nf_mttg_trav *trav = seq->private;
  896. unsigned int j;
  897. trav->class = MTTG_TRAV_INIT;
  898. for (j = 0; j < *pos; ++j)
  899. if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL)
  900. return NULL;
  901. return trav;
  902. }
  903. static void xt_mttg_seq_stop(struct seq_file *seq, void *v)
  904. {
  905. struct nf_mttg_trav *trav = seq->private;
  906. switch (trav->class) {
  907. case MTTG_TRAV_NFP_UNSPEC:
  908. mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
  909. break;
  910. case MTTG_TRAV_NFP_SPEC:
  911. mutex_unlock(&xt[trav->nfproto].mutex);
  912. break;
  913. }
  914. }
  915. static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos)
  916. {
  917. return xt_mttg_seq_start(seq, pos, false);
  918. }
  919. static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
  920. {
  921. return xt_mttg_seq_next(seq, v, ppos, false);
  922. }
  923. static int xt_match_seq_show(struct seq_file *seq, void *v)
  924. {
  925. const struct nf_mttg_trav *trav = seq->private;
  926. const struct xt_match *match;
  927. switch (trav->class) {
  928. case MTTG_TRAV_NFP_UNSPEC:
  929. case MTTG_TRAV_NFP_SPEC:
  930. if (trav->curr == trav->head)
  931. return 0;
  932. match = list_entry(trav->curr, struct xt_match, list);
  933. return (*match->name == '\0') ? 0 :
  934. seq_printf(seq, "%s\n", match->name);
  935. }
  936. return 0;
  937. }
  938. static const struct seq_operations xt_match_seq_ops = {
  939. .start = xt_match_seq_start,
  940. .next = xt_match_seq_next,
  941. .stop = xt_mttg_seq_stop,
  942. .show = xt_match_seq_show,
  943. };
  944. static int xt_match_open(struct inode *inode, struct file *file)
  945. {
  946. struct nf_mttg_trav *trav;
  947. trav = __seq_open_private(file, &xt_match_seq_ops, sizeof(*trav));
  948. if (!trav)
  949. return -ENOMEM;
  950. trav->nfproto = (unsigned long)PDE_DATA(inode);
  951. return 0;
  952. }
  953. static const struct file_operations xt_match_ops = {
  954. .owner = THIS_MODULE,
  955. .open = xt_match_open,
  956. .read = seq_read,
  957. .llseek = seq_lseek,
  958. .release = seq_release_private,
  959. };
  960. static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos)
  961. {
  962. return xt_mttg_seq_start(seq, pos, true);
  963. }
  964. static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
  965. {
  966. return xt_mttg_seq_next(seq, v, ppos, true);
  967. }
  968. static int xt_target_seq_show(struct seq_file *seq, void *v)
  969. {
  970. const struct nf_mttg_trav *trav = seq->private;
  971. const struct xt_target *target;
  972. switch (trav->class) {
  973. case MTTG_TRAV_NFP_UNSPEC:
  974. case MTTG_TRAV_NFP_SPEC:
  975. if (trav->curr == trav->head)
  976. return 0;
  977. target = list_entry(trav->curr, struct xt_target, list);
  978. return (*target->name == '\0') ? 0 :
  979. seq_printf(seq, "%s\n", target->name);
  980. }
  981. return 0;
  982. }
  983. static const struct seq_operations xt_target_seq_ops = {
  984. .start = xt_target_seq_start,
  985. .next = xt_target_seq_next,
  986. .stop = xt_mttg_seq_stop,
  987. .show = xt_target_seq_show,
  988. };
  989. static int xt_target_open(struct inode *inode, struct file *file)
  990. {
  991. struct nf_mttg_trav *trav;
  992. trav = __seq_open_private(file, &xt_target_seq_ops, sizeof(*trav));
  993. if (!trav)
  994. return -ENOMEM;
  995. trav->nfproto = (unsigned long)PDE_DATA(inode);
  996. return 0;
  997. }
  998. static const struct file_operations xt_target_ops = {
  999. .owner = THIS_MODULE,
  1000. .open = xt_target_open,
  1001. .read = seq_read,
  1002. .llseek = seq_lseek,
  1003. .release = seq_release_private,
  1004. };
  1005. #define FORMAT_TABLES "_tables_names"
  1006. #define FORMAT_MATCHES "_tables_matches"
  1007. #define FORMAT_TARGETS "_tables_targets"
  1008. #endif /* CONFIG_PROC_FS */
  1009. /**
  1010. * xt_hook_link - set up hooks for a new table
  1011. * @table: table with metadata needed to set up hooks
  1012. * @fn: Hook function
  1013. *
  1014. * This function will take care of creating and registering the necessary
  1015. * Netfilter hooks for XT tables.
  1016. */
  1017. struct nf_hook_ops *xt_hook_link(const struct xt_table *table, nf_hookfn *fn)
  1018. {
  1019. unsigned int hook_mask = table->valid_hooks;
  1020. uint8_t i, num_hooks = hweight32(hook_mask);
  1021. uint8_t hooknum;
  1022. struct nf_hook_ops *ops;
  1023. int ret;
  1024. ops = kmalloc(sizeof(*ops) * num_hooks, GFP_KERNEL);
  1025. if (ops == NULL)
  1026. return ERR_PTR(-ENOMEM);
  1027. for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0;
  1028. hook_mask >>= 1, ++hooknum) {
  1029. if (!(hook_mask & 1))
  1030. continue;
  1031. ops[i].hook = fn;
  1032. ops[i].owner = table->me;
  1033. ops[i].pf = table->af;
  1034. ops[i].hooknum = hooknum;
  1035. ops[i].priority = table->priority;
  1036. ++i;
  1037. }
  1038. ret = nf_register_hooks(ops, num_hooks);
  1039. if (ret < 0) {
  1040. kfree(ops);
  1041. return ERR_PTR(ret);
  1042. }
  1043. return ops;
  1044. }
  1045. EXPORT_SYMBOL_GPL(xt_hook_link);
  1046. /**
  1047. * xt_hook_unlink - remove hooks for a table
  1048. * @ops: nf_hook_ops array as returned by nf_hook_link
  1049. * @hook_mask: the very same mask that was passed to nf_hook_link
  1050. */
  1051. void xt_hook_unlink(const struct xt_table *table, struct nf_hook_ops *ops)
  1052. {
  1053. nf_unregister_hooks(ops, hweight32(table->valid_hooks));
  1054. kfree(ops);
  1055. }
  1056. EXPORT_SYMBOL_GPL(xt_hook_unlink);
  1057. int xt_proto_init(struct net *net, u_int8_t af)
  1058. {
  1059. #ifdef CONFIG_PROC_FS
  1060. char buf[XT_FUNCTION_MAXNAMELEN];
  1061. struct proc_dir_entry *proc;
  1062. #endif
  1063. if (af >= ARRAY_SIZE(xt_prefix))
  1064. return -EINVAL;
  1065. #ifdef CONFIG_PROC_FS
  1066. strlcpy(buf, xt_prefix[af], sizeof(buf));
  1067. strlcat(buf, FORMAT_TABLES, sizeof(buf));
  1068. proc = proc_create_data(buf, 0440, net->proc_net, &xt_table_ops,
  1069. (void *)(unsigned long)af);
  1070. if (!proc)
  1071. goto out;
  1072. strlcpy(buf, xt_prefix[af], sizeof(buf));
  1073. strlcat(buf, FORMAT_MATCHES, sizeof(buf));
  1074. proc = proc_create_data(buf, 0440, net->proc_net, &xt_match_ops,
  1075. (void *)(unsigned long)af);
  1076. if (!proc)
  1077. goto out_remove_tables;
  1078. strlcpy(buf, xt_prefix[af], sizeof(buf));
  1079. strlcat(buf, FORMAT_TARGETS, sizeof(buf));
  1080. proc = proc_create_data(buf, 0440, net->proc_net, &xt_target_ops,
  1081. (void *)(unsigned long)af);
  1082. if (!proc)
  1083. goto out_remove_matches;
  1084. #endif
  1085. return 0;
  1086. #ifdef CONFIG_PROC_FS
  1087. out_remove_matches:
  1088. strlcpy(buf, xt_prefix[af], sizeof(buf));
  1089. strlcat(buf, FORMAT_MATCHES, sizeof(buf));
  1090. remove_proc_entry(buf, net->proc_net);
  1091. out_remove_tables:
  1092. strlcpy(buf, xt_prefix[af], sizeof(buf));
  1093. strlcat(buf, FORMAT_TABLES, sizeof(buf));
  1094. remove_proc_entry(buf, net->proc_net);
  1095. out:
  1096. return -1;
  1097. #endif
  1098. }
  1099. EXPORT_SYMBOL_GPL(xt_proto_init);
  1100. void xt_proto_fini(struct net *net, u_int8_t af)
  1101. {
  1102. #ifdef CONFIG_PROC_FS
  1103. char buf[XT_FUNCTION_MAXNAMELEN];
  1104. strlcpy(buf, xt_prefix[af], sizeof(buf));
  1105. strlcat(buf, FORMAT_TABLES, sizeof(buf));
  1106. remove_proc_entry(buf, net->proc_net);
  1107. strlcpy(buf, xt_prefix[af], sizeof(buf));
  1108. strlcat(buf, FORMAT_TARGETS, sizeof(buf));
  1109. remove_proc_entry(buf, net->proc_net);
  1110. strlcpy(buf, xt_prefix[af], sizeof(buf));
  1111. strlcat(buf, FORMAT_MATCHES, sizeof(buf));
  1112. remove_proc_entry(buf, net->proc_net);
  1113. #endif /*CONFIG_PROC_FS*/
  1114. }
  1115. EXPORT_SYMBOL_GPL(xt_proto_fini);
  1116. static int __net_init xt_net_init(struct net *net)
  1117. {
  1118. int i;
  1119. for (i = 0; i < NFPROTO_NUMPROTO; i++)
  1120. INIT_LIST_HEAD(&net->xt.tables[i]);
  1121. return 0;
  1122. }
  1123. static struct pernet_operations xt_net_ops = {
  1124. .init = xt_net_init,
  1125. };
  1126. static int __init xt_init(void)
  1127. {
  1128. unsigned int i;
  1129. int rv;
  1130. for_each_possible_cpu(i) {
  1131. seqcount_init(&per_cpu(xt_recseq, i));
  1132. }
  1133. xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL);
  1134. if (!xt)
  1135. return -ENOMEM;
  1136. for (i = 0; i < NFPROTO_NUMPROTO; i++) {
  1137. mutex_init(&xt[i].mutex);
  1138. #ifdef CONFIG_COMPAT
  1139. mutex_init(&xt[i].compat_mutex);
  1140. xt[i].compat_tab = NULL;
  1141. #endif
  1142. INIT_LIST_HEAD(&xt[i].target);
  1143. INIT_LIST_HEAD(&xt[i].match);
  1144. }
  1145. rv = register_pernet_subsys(&xt_net_ops);
  1146. if (rv < 0)
  1147. kfree(xt);
  1148. return rv;
  1149. }
  1150. static void __exit xt_fini(void)
  1151. {
  1152. unregister_pernet_subsys(&xt_net_ops);
  1153. kfree(xt);
  1154. }
  1155. module_init(xt_init);
  1156. module_exit(xt_fini);