nf_queue.c 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245
  1. /*
  2. * Rusty Russell (C)2000 -- This code is GPL.
  3. * Patrick McHardy (c) 2006-2012
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/slab.h>
  7. #include <linux/init.h>
  8. #include <linux/module.h>
  9. #include <linux/proc_fs.h>
  10. #include <linux/skbuff.h>
  11. #include <linux/netfilter.h>
  12. #include <linux/seq_file.h>
  13. #include <linux/rcupdate.h>
  14. #include <net/protocol.h>
  15. #include <net/netfilter/nf_queue.h>
  16. #include <net/dst.h>
  17. #include "nf_internals.h"
  18. /*
  19. * Hook for nfnetlink_queue to register its queue handler.
  20. * We do this so that most of the NFQUEUE code can be modular.
  21. *
  22. * Once the queue is registered it must reinject all packets it
  23. * receives, no matter what.
  24. */
  25. static const struct nf_queue_handler __rcu *queue_handler __read_mostly;
  26. /* return EBUSY when somebody else is registered, return EEXIST if the
  27. * same handler is registered, return 0 in case of success. */
  28. void nf_register_queue_handler(const struct nf_queue_handler *qh)
  29. {
  30. /* should never happen, we only have one queueing backend in kernel */
  31. WARN_ON(rcu_access_pointer(queue_handler));
  32. rcu_assign_pointer(queue_handler, qh);
  33. }
  34. EXPORT_SYMBOL(nf_register_queue_handler);
  35. /* The caller must flush their queue before this */
  36. void nf_unregister_queue_handler(void)
  37. {
  38. RCU_INIT_POINTER(queue_handler, NULL);
  39. synchronize_rcu();
  40. }
  41. EXPORT_SYMBOL(nf_unregister_queue_handler);
  42. void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
  43. {
  44. /* Release those devices we held, or Alexey will kill me. */
  45. if (entry->indev)
  46. dev_put(entry->indev);
  47. if (entry->outdev)
  48. dev_put(entry->outdev);
  49. #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
  50. if (entry->skb->nf_bridge) {
  51. struct nf_bridge_info *nf_bridge = entry->skb->nf_bridge;
  52. if (nf_bridge->physindev)
  53. dev_put(nf_bridge->physindev);
  54. if (nf_bridge->physoutdev)
  55. dev_put(nf_bridge->physoutdev);
  56. }
  57. #endif
  58. /* Drop reference to owner of hook which queued us. */
  59. module_put(entry->elem->owner);
  60. }
  61. EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs);
  62. /* Bump dev refs so they don't vanish while packet is out */
  63. bool nf_queue_entry_get_refs(struct nf_queue_entry *entry)
  64. {
  65. if (!try_module_get(entry->elem->owner))
  66. return false;
  67. if (entry->indev)
  68. dev_hold(entry->indev);
  69. if (entry->outdev)
  70. dev_hold(entry->outdev);
  71. #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
  72. if (entry->skb->nf_bridge) {
  73. struct nf_bridge_info *nf_bridge = entry->skb->nf_bridge;
  74. struct net_device *physdev;
  75. physdev = nf_bridge->physindev;
  76. if (physdev)
  77. dev_hold(physdev);
  78. physdev = nf_bridge->physoutdev;
  79. if (physdev)
  80. dev_hold(physdev);
  81. }
  82. #endif
  83. return true;
  84. }
  85. EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
  86. void nf_queue_nf_hook_drop(struct nf_hook_ops *ops)
  87. {
  88. const struct nf_queue_handler *qh;
  89. struct net *net;
  90. rtnl_lock();
  91. rcu_read_lock();
  92. qh = rcu_dereference(queue_handler);
  93. if (qh) {
  94. for_each_net(net) {
  95. qh->nf_hook_drop(net, ops);
  96. }
  97. }
  98. rcu_read_unlock();
  99. rtnl_unlock();
  100. }
  101. /*
  102. * Any packet that leaves via this function must come back
  103. * through nf_reinject().
  104. */
  105. int nf_queue(struct sk_buff *skb,
  106. struct nf_hook_ops *elem,
  107. u_int8_t pf, unsigned int hook,
  108. struct net_device *indev,
  109. struct net_device *outdev,
  110. int (*okfn)(struct sk_buff *),
  111. unsigned int queuenum)
  112. {
  113. int status = -ENOENT;
  114. struct nf_queue_entry *entry = NULL;
  115. const struct nf_afinfo *afinfo;
  116. const struct nf_queue_handler *qh;
  117. /* QUEUE == DROP if no one is waiting, to be safe. */
  118. rcu_read_lock();
  119. qh = rcu_dereference(queue_handler);
  120. if (!qh) {
  121. status = -ESRCH;
  122. goto err_unlock;
  123. }
  124. afinfo = nf_get_afinfo(pf);
  125. if (!afinfo)
  126. goto err_unlock;
  127. entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC);
  128. if (!entry) {
  129. status = -ENOMEM;
  130. goto err_unlock;
  131. }
  132. *entry = (struct nf_queue_entry) {
  133. .skb = skb,
  134. .elem = elem,
  135. .pf = pf,
  136. .hook = hook,
  137. .indev = indev,
  138. .outdev = outdev,
  139. .okfn = okfn,
  140. .size = sizeof(*entry) + afinfo->route_key_size,
  141. };
  142. if (!nf_queue_entry_get_refs(entry)) {
  143. status = -ECANCELED;
  144. goto err_unlock;
  145. }
  146. skb_dst_force(skb);
  147. afinfo->saveroute(skb, entry);
  148. status = qh->outfn(entry, queuenum);
  149. rcu_read_unlock();
  150. if (status < 0) {
  151. nf_queue_entry_release_refs(entry);
  152. goto err;
  153. }
  154. return 0;
  155. err_unlock:
  156. rcu_read_unlock();
  157. err:
  158. kfree(entry);
  159. return status;
  160. }
  161. void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
  162. {
  163. struct sk_buff *skb = entry->skb;
  164. struct nf_hook_ops *elem = entry->elem;
  165. const struct nf_afinfo *afinfo;
  166. int err;
  167. rcu_read_lock();
  168. nf_queue_entry_release_refs(entry);
  169. /* Continue traversal iff userspace said ok... */
  170. if (verdict == NF_REPEAT) {
  171. elem = list_entry(elem->list.prev, struct nf_hook_ops, list);
  172. verdict = NF_ACCEPT;
  173. }
  174. if (verdict == NF_ACCEPT) {
  175. afinfo = nf_get_afinfo(entry->pf);
  176. if (!afinfo || afinfo->reroute(skb, entry) < 0)
  177. verdict = NF_DROP;
  178. }
  179. if (verdict == NF_ACCEPT) {
  180. next_hook:
  181. verdict = nf_iterate(&nf_hooks[entry->pf][entry->hook],
  182. skb, entry->hook,
  183. entry->indev, entry->outdev, &elem,
  184. entry->okfn, INT_MIN);
  185. }
  186. switch (verdict & NF_VERDICT_MASK) {
  187. case NF_ACCEPT:
  188. case NF_STOP:
  189. local_bh_disable();
  190. entry->okfn(skb);
  191. local_bh_enable();
  192. break;
  193. case NF_QUEUE:
  194. err = nf_queue(skb, elem, entry->pf, entry->hook,
  195. entry->indev, entry->outdev, entry->okfn,
  196. verdict >> NF_VERDICT_QBITS);
  197. if (err < 0) {
  198. if (err == -ECANCELED)
  199. goto next_hook;
  200. if (err == -ESRCH &&
  201. (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
  202. goto next_hook;
  203. kfree_skb(skb);
  204. }
  205. break;
  206. case NF_STOLEN:
  207. break;
  208. default:
  209. kfree_skb(skb);
  210. }
  211. rcu_read_unlock();
  212. kfree(entry);
  213. }
  214. EXPORT_SYMBOL(nf_reinject);