netfilter_bridge.h 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127
  1. #ifndef __LINUX_BRIDGE_NETFILTER_H
  2. #define __LINUX_BRIDGE_NETFILTER_H
  3. #include <uapi/linux/netfilter_bridge.h>
  4. enum nf_br_hook_priorities {
  5. NF_BR_PRI_FIRST = INT_MIN,
  6. NF_BR_PRI_NAT_DST_BRIDGED = -300,
  7. NF_BR_PRI_FILTER_BRIDGED = -200,
  8. NF_BR_PRI_BRNF = 0,
  9. NF_BR_PRI_NAT_DST_OTHER = 100,
  10. NF_BR_PRI_FILTER_OTHER = 200,
  11. NF_BR_PRI_NAT_SRC = 300,
  12. NF_BR_PRI_LAST = INT_MAX,
  13. };
  14. #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
  15. #define BRNF_PKT_TYPE 0x01
  16. #define BRNF_BRIDGED_DNAT 0x02
  17. #define BRNF_BRIDGED 0x04
  18. #define BRNF_NF_BRIDGE_PREROUTING 0x08
  19. #define BRNF_8021Q 0x10
  20. #define BRNF_PPPoE 0x20
  21. static inline unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb)
  22. {
  23. switch (skb->protocol) {
  24. case __cpu_to_be16(ETH_P_8021Q):
  25. return VLAN_HLEN;
  26. case __cpu_to_be16(ETH_P_PPP_SES):
  27. return PPPOE_SES_HLEN;
  28. default:
  29. return 0;
  30. }
  31. }
  32. static inline void nf_bridge_update_protocol(struct sk_buff *skb)
  33. {
  34. if (skb->nf_bridge->mask & BRNF_8021Q)
  35. skb->protocol = htons(ETH_P_8021Q);
  36. else if (skb->nf_bridge->mask & BRNF_PPPoE)
  37. skb->protocol = htons(ETH_P_PPP_SES);
  38. }
  39. /* Fill in the header for fragmented IP packets handled by
  40. * the IPv4 connection tracking code.
  41. *
  42. * Only used in br_forward.c
  43. */
  44. static inline int nf_bridge_copy_header(struct sk_buff *skb)
  45. {
  46. int err;
  47. unsigned int header_size;
  48. nf_bridge_update_protocol(skb);
  49. header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
  50. err = skb_cow_head(skb, header_size);
  51. if (err)
  52. return err;
  53. skb_copy_to_linear_data_offset(skb, -header_size,
  54. skb->nf_bridge->data, header_size);
  55. __skb_push(skb, nf_bridge_encap_header_len(skb));
  56. return 0;
  57. }
  58. static inline int nf_bridge_maybe_copy_header(struct sk_buff *skb)
  59. {
  60. if (skb->nf_bridge &&
  61. skb->nf_bridge->mask & (BRNF_BRIDGED | BRNF_BRIDGED_DNAT))
  62. return nf_bridge_copy_header(skb);
  63. return 0;
  64. }
  65. static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
  66. {
  67. if (unlikely(skb->nf_bridge->mask & BRNF_PPPoE))
  68. return PPPOE_SES_HLEN;
  69. return 0;
  70. }
  71. int br_handle_frame_finish(struct sk_buff *skb);
  72. /* Only used in br_device.c */
  73. static inline int br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
  74. {
  75. struct nf_bridge_info *nf_bridge = skb->nf_bridge;
  76. skb_pull(skb, ETH_HLEN);
  77. nf_bridge->mask ^= BRNF_BRIDGED_DNAT;
  78. skb_copy_to_linear_data_offset(skb, -(ETH_HLEN-ETH_ALEN),
  79. skb->nf_bridge->data, ETH_HLEN-ETH_ALEN);
  80. skb->dev = nf_bridge->physindev;
  81. return br_handle_frame_finish(skb);
  82. }
  83. /* This is called by the IP fragmenting code and it ensures there is
  84. * enough room for the encapsulating header (if there is one). */
  85. static inline unsigned int nf_bridge_pad(const struct sk_buff *skb)
  86. {
  87. if (skb->nf_bridge)
  88. return nf_bridge_encap_header_len(skb);
  89. return 0;
  90. }
  91. struct bridge_skb_cb {
  92. union {
  93. __be32 ipv4;
  94. } daddr;
  95. };
  96. static inline void br_drop_fake_rtable(struct sk_buff *skb)
  97. {
  98. struct dst_entry *dst = skb_dst(skb);
  99. if (dst && (dst->flags & DST_FAKE_RTABLE))
  100. skb_dst_drop(skb);
  101. }
  102. #else
  103. #define nf_bridge_maybe_copy_header(skb) (0)
  104. #define nf_bridge_pad(skb) (0)
  105. #define br_drop_fake_rtable(skb) do { } while (0)
  106. #endif /* CONFIG_BRIDGE_NETFILTER */
  107. #endif