小公司研发总监,既当司令也当兵!
分类: linux
2015-05-20 11:42:04
本文结合网络上关于linux网桥的说明、 linux平台的代码阅读记录,整理的一篇总结性文档。由于时间仓促,分析可能存在不足之外,望大家见谅和指正。
对于接触过linux 网络的童鞋,对网桥功能应该不陌生。概括来说,网桥实现最重要的两点:
1. mac学习:学习mac地址,起初,网桥是没有任何地址与端口的对应关系的,它发送数据,还是得想hub一样,但是每发送一个数据,它都会关心数据包的来源mac是从自己的哪个端口来的,由于学习,建立地址-端口的对照表(cam表)。
2. 报文转发:每发送一个数据包,网桥都会提取其目的mac地址,从自己的地址-端口对照表(cam表)中查找由哪个端口把数据包发送出去。
本文目的让读者对linux网桥有个全面的认识。作重讲述linux网桥的定义、网桥管理、数据流程和端口-mac映射管理,以及网桥的netfilter。关于网桥的stp,由于ap121上,网桥并没启用stp,所以这部分不做详细介绍,只在文档后面做一个简单介绍。
在linux里面使用网桥非常简单,仅需要做两件事情就可以配置了。其一是在编译内核里把config_bridge或condig_bridge_module编译选项打开;其二是安装brctl工具。第一步是使内核协议栈支持网桥,第二步是安装用户空间工具,通过一系列的ioctl调用来配置网桥。在我们开发过程中,常见的几条命令:
brctl addbr br0 (建立一个网桥br0, 同时在linux内核里面创建虚拟网卡br0)
brctl addif br0 eth0
brctl addif br0 ath0
brctl addif br0 ath1 (分别为网桥br0添加接口eth0, ath0和ath1)
本章我们的目的就是弄清楚以上几条命令在内核中是如何实现、生效的。
按照惯例,先熟悉一下网桥相关的重要数据结构体定义,方便后续讲解。和网桥息息相关的几个结构体包括:网桥自身定义(net_bridge)、网桥端口(net_bridge_port)、网桥端口-mac映射表项(net_bridge_fdb_entry)等。另外,网桥本身也是一个虚拟的网卡设备(net_device)。net_device是一个庞大的结构体,我们在这里就不展现了。关于net_device详细介绍请参考《linux设备驱动程序》网络驱动程序章节, net_device的详细介绍。下面我们介绍网桥、端口、端口-mac映射表项的数据结构。
网桥定义:
struct net_bridge
{
//自旋锁
spinlock_t lock;
//网桥所有端口的链表,其中每个元素都是一个net_bridge_port结构。
struct list_head port_list;
//网桥会建立一个虚拟设备来进行管理,这个设备的mac地址是动态指定的,通常就是桥组中一个物理端口的mac地址
struct net_device *dev;
//这个锁是用来保护下面的那个hash链表。
spinlock_t hash_lock;
//保存forwarding database的一个hash链表(这个也就是地址学习的东东,所以通过hash能快速定位),这里每个元素都是一个net_bridge_fsb_entry结构
struct hlist_head hash[br_hash_size];
//这个结构没有被使用
struct list_head age_list;
unsigned long feature_mask;
#ifdef config_bridge_netfilter
struct rtable fake_rtable;
#endif
unsigned long flags;
#define br_set_mac_addr 0x00000001
//stp相关的一些东西
bridge_id designated_root;
bridge_id bridge_id;
u32 root_path_cost;
unsigned long max_age;
unsigned long hello_time;
unsigned long forward_delay;
unsigned long bridge_max_age;
unsigned long ageing_time;
unsigned long bridge_hello_time;
unsigned long bridge_forward_delay;
u8 group_addr[eth_alen];
u16 root_port;
//stp当前使用的协议
enum {
br_no_stp, /* no spanning tree */
br_kernel_stp, /* old stp in kernel */
br_user_stp, /* new rstp in userspace */
} stp_enabled;
unsigned char topology_change;
unsigned char topology_change_detected;
//stp要用的一些定时器列表
struct timer_list hello_timer;
struct timer_list tcn_timer;
struct timer_list topology_change_timer;
struct timer_list gc_timer;
struct kobject *ifobj;
};
网桥端口数据结构体:
struct net_bridge_port
{
//当前端口所属的网桥设备
struct net_bridge *br;
//表示链接到这个端口的物理设备
struct net_device *dev;
//同一桥内的端口链表
struct list_head list;
//stp相关的一些参数
u8 priority;
u8 state;
u16 port_no;
unsigned char topology_change_ack;
unsigned char config_pending;
port_id port_id;
port_id designated_port;
bridge_id designated_root;
bridge_id designated_bridge;
u32 path_cost;
u32 designated_cost;
//端口定时器,也就是stp控制超时的一些定时器列表
struct timer_list forward_delay_timer;
struct timer_list hold_timer;
struct timer_list message_age_timer;
struct kobject kobj;
struct rcu_head rcu;
};
网桥端口-mac映射表项:
struct net_bridge_fdb_entry
{
//用于cam表连接的链表指针
struct hlist_node hlist;
//桥的端口(最主要的两个域就是这个域和下面的mac地址域)
struct net_bridge_port *dst;
//当使用rcu策略,才用到
struct rcu_head rcu;
//引用计数
atomic_t use_count;
unsigned long ageing_timer;
//mac地址
mac_addr addr;
//标明是否为本机mac地址
unsigned char is_local;
//标明是否为静态地址
unsigned char is_static;
};
关于net_bridge、 net_bridge_port、net_bridge_fdb_entry它们之间的关系可以使用如下图的示意图表示:
重要数据结构关系示意图
网桥在内核中,被实现为一个内核模块,源代码在~/1xu/ap121/linux/kernels/mips-linux-2.6.31/net/bridge/br.c中。初始化方法br_init:
static int __init br_init(void)
{
int err;
//stp的注册
err = stp_proto_register(&br_stp_proto);
if (err < 0) {
printk(kern_err "bridge: can't register sap for stp\n");
return err;
}
//cam表的初始化
err = br_fdb_init();
if (err)
goto err_out;
//网桥的netfilter钩子函数的初始化
err = br_netfilter_init();
if (err)
goto err_out1;
//注册到netdevice的通知链上
err = register_netdevice_notifier(&br_device_notifier);
if (err)
goto err_out2;
err = br_netlink_init();
if (err)
goto err_out3;
//设置网桥设备的do_ioctl函数,也就是提供给用户空间ioctl接口
brioctl_set(br_ioctl_deviceless_stub);
//设置网桥数据处理接口
br_handle_frame_hook = br_handle_frame;
//设置网桥cam数据交换接口
br_fdb_get_hook = br_fdb_get;
br_fdb_put_hook = br_fdb_put;
return 0;
// 异常处理略
…
return err;
}
网桥内核模块初始化后,并没有真正的一个网桥设备被实例化,它只是搭建好了运行环境。要网桥真正的运作,还需要从创建一个网桥设备开始。
接上文,在网桥初始化的时候,设置了网桥的ioctl接口:br_ioctl_deviceless_stub。下面看看br_ioctl_deviceless_stub的实现:
int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, void __user *uarg)
{
switch (cmd) {
case siocgifbr:
case siocsifbr:
return old_deviceless(net, uarg);
case siocbraddbr:
case siocbrdelbr:
{
char buf[ifnamsiz];
if (!capable(cap_net_admin))
return -eperm;
if (copy_from_user(buf, uarg, ifnamsiz))
return -efault;
buf[ifnamsiz-1] = 0;
if (cmd == siocbraddbr)
return br_add_bridge(net, buf); //添加网桥
return br_del_bridge(net, buf); //删除网桥
}
}
return -eopnotsupp;
}
当我们执行brctl addbr br0时,我们传入的cmd为siocbraddbr,会转入br_add_bridge中进行:
int br_add_bridge(struct net *net, const char *name)
{
struct net_device *dev;
int ret;
// 创建一个网卡设备
dev = new_bridge_dev(net, name);
if (!dev)
return -enomem;
rtnl_lock();
// 内核确认设备名称
if (strchr(dev->name, '%')) {
ret = dev_alloc_name(dev, dev->name);
if (ret < 0)
goto out_free;
}
// 注册网卡设备
ret = register_netdevice(dev);
if (ret)
goto out_free;
//在sysfs中建立相关信息,便于查看和管理
ret = br_sysfs_addbr(dev);
if (ret)
unregister_netdevice(dev);
out:
rtnl_unlock();
return ret;
out_free:
free_netdev(dev);
goto out;
}
网桥是一个虚拟的设备,它的注册跟实际的物理网络设备注册是一样的(可以参看《linux设备驱动程序》网络驱动程序中,net_device创建和注册过程):
static struct net_device *new_bridge_dev(struct net *net, const char *name)
{
struct net_bridge *br;
struct net_device *dev;
// 创建net_device设备,执行网桥设备初始化程序:br_dev_setup
dev = alloc_netdev(sizeof(struct net_bridge), name,
br_dev_setup);
if (!dev)
return null;
// 设定net
dev_net_set(dev, net);
// net_device私有区被指定网桥,然后进行网桥相关初始化填充
br = netdev_priv(dev);
br->dev = dev;
spin_lock_init(&br->lock);
init_list_head(&br->port_list);
spin_lock_init(&br->hash_lock);
br->bridge_id.prio[0] = 0x80;
br->bridge_id.prio[1] = 0x00;
memcpy(br->group_addr, br_group_address, eth_alen);
br->feature_mask = dev->features;
br->stp_enabled = br_no_stp; // 默认不开启stp功能
br->designated_root = br->bridge_id;
br->root_path_cost = 0;
br->root_port = 0;
br->bridge_max_age = br->max_age = 20 * hz;
br->bridge_hello_time = br->hello_time = 2 * hz;
br->bridge_forward_delay = br->forward_delay = 15 * hz;
br->topology_change = 0;
br->topology_change_detected = 0;
br->ageing_time = 300 * hz;
br_netfilter_rtable_init(br);
init_list_head(&br->age_list);
br_stp_timer_init(br);
return dev;
}
更详细的,看看网桥虚拟设备初始化的细节:
void br_dev_setup(struct net_device *dev)
{
//初始化mac
random_ether_addr(dev->dev_addr);
// 网桥设备也是以太网设备,需要进行以太网部分初始化
ether_setup(dev);
dev->netdev_ops = &br_netdev_ops;
dev->destructor = free_netdev;
set_ethtool_ops(dev, &br_ethtool_ops);
dev->tx_queue_len = 0;
dev->priv_flags = iff_ebridge;
dev->features = netif_f_sg | netif_f_fraglist | netif_f_highdma |
netif_f_gso_mask | netif_f_no_csum | netif_f_lltx |
netif_f_netns_local | netif_f_gso;
}
static const struct ethtool_ops br_ethtool_ops = {
.get_drvinfo = br_getinfo,
.get_link = ethtool_op_get_link,
.get_tx_csum = ethtool_op_get_tx_csum,
.set_tx_csum = br_set_tx_csum,
.get_sg = ethtool_op_get_sg,
.set_sg = br_set_sg,
.get_tso = ethtool_op_get_tso,
.set_tso = br_set_tso,
.get_ufo = ethtool_op_get_ufo,
.get_flags = ethtool_op_get_flags,
};
static const struct net_device_ops br_netdev_ops = {
.ndo_open = br_dev_open, // 打开设备
.ndo_stop = br_dev_stop, // 停止设备
.ndo_start_xmit = br_dev_xmit, // 发送数据
.ndo_set_mac_address = br_set_mac_address, // 设置mac
.ndo_set_multicast_list = br_dev_set_multicast_list, // 设置mutlicast
.ndo_change_mtu = br_change_mtu, // 设置mtu
.ndo_do_ioctl = br_dev_ioctl, // 设备ioctl
};
以上是创建网桥及网桥初始化的全部过程,关于网桥删除主要是上述网桥注册过程的逆过程:解除端口,清除定时器,删除sysfs设备,注销虚拟设备:
static void del_br(struct net_bridge *br)
{
struct net_bridge_port *p, *n;
list_for_each_entry_safe(p, n, &br->port_list, list) {
del_nbp(p);
}
del_timer_sync(&br->gc_timer);
br_sysfs_delbr(br->dev);
unregister_netdevice(br->dev);
}
仅仅创建网桥,还是不够的。实际应用中的网桥需要添加实际的端口(即物理接口),比如:
brctl addif br0 eth0
应用程序在使用ioctl来为网桥增加物理接口,对应内核函数br_dev_ioctl(初始化网桥时指定的),代码和分析如下:
int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct net_bridge *br = netdev_priv(dev);
switch(cmd) {
case siocdevprivate:
return old_dev_ioctl(dev, rq, cmd);
case siocbraddif:
case siocbrdelif:
// 根据cmd类型执行添加或删除端口
return add_del_if(br, rq->ifr_ifindex, cmd == siocbraddif);
}
pr_debug("bridge does not support ioctl 0x%x\n", cmd);
return -eopnotsupp;
}
static int add_del_if(struct net_bridge *br, int ifindex, int isadd)
{
struct net_device *dev;
int ret;
if (!capable(cap_net_admin))
return -eperm;
dev = dev_get_by_index(dev_net(br->dev), ifindex);
if (dev == null)
return -einval;
if (isadd)
ret = br_add_if(br, dev); //增加一个端口
else
ret = br_del_if(br, dev); //删除端口
dev_put(dev);
return ret;
}
事实上,增加一个端口就是实例化并填充一个net_bridge_port,并将其加入到网桥的端口记录表中:
int br_add_if(struct net_bridge *br, struct net_device *dev)
{
struct net_bridge_port *p;
int err = 0;
// 环路端口和非以太网设备不添加
if (dev->flags & iff_loopback || dev->type != arphrd_ether)
return -einval;
// 如果加入端口本身也是网桥设备,不添加
if (dev->netdev_ops->ndo_start_xmit == br_dev_xmit)
return -eloop;
// 如果加入端口设备已经属于其他网桥,不添加
if (dev->br_port != null)
return -ebusy;
// new一个port
p = new_nbp(br, dev);
if (is_err(p))
return ptr_err(p);
// 设置为混杂模式
err = dev_set_promiscuity(dev, 1);
if (err)
goto put_back;
// 一些初始化
err = kobject_init_and_add(&p->kobj, &brport_ktype, &(dev->dev.kobj),
sysfs_bridge_port_attr);
if (err)
goto err0;
// 将端口的mac插入到端口-mac映射表中
err = br_fdb_insert(br, p, dev->dev_addr);
if (err)
goto err1;
// 添加到sysfs文件系统中
err = br_sysfs_addif(p);
if (err)
goto err2;
rcu_assign_pointer(dev->br_port, p);
dev_disable_lro(dev);
// 添加到网桥端口记录表中
list_add_rcu(&p->list, &br->port_list);
spin_lock_bh(&br->lock);
br_stp_recalculate_bridge_id(br);
br_features_recompute(br);
if ((dev->flags & iff_up) && netif_carrier_ok(dev) &&
(br->dev->flags & iff_up))
br_stp_enable_port(p);
spin_unlock_bh(&br->lock);
br_ifinfo_notify(rtm_newlink, p);
dev_set_mtu(br->dev, br_min_mtu(br));
kobject_uevent(&p->kobj, kobj_add);
return 0;
err2:
br_fdb_delete_by_port(br, p, 1);
err1:
kobject_put(&p->kobj);
err0:
dev_set_promiscuity(dev, -1);
put_back:
dev_put(dev);
kfree(p);
return err;
}
int br_del_if(struct net_bridge *br, struct net_device *dev)
{
struct net_bridge_port *p = dev->br_port;
if (!p || p->br != br)
return -einval;
del_nbp(p);
spin_lock_bh(&br->lock);
br_stp_recalculate_bridge_id(br);
br_features_recompute(br);
spin_unlock_bh(&br->lock);
return 0;
}
在我们日常开发中,最常见的一种拓扑如下图所示:
典型拓扑
其中,dut有三个端口ath0(本地无线端口)、eth0(有线lan口)、aht1(连接远程无线端口);该三个端口通过br0网桥桥接在一起。本章作重讲述,在该拓扑下,pc1、pc2、pc3以及root-ap之间,是如何通过dut(br0)进行数据交互的。
现假设pc3向pc1发送一个数据包,数据首先会由eth0网卡接收,此后网卡向cpu发送接收中断。当cpu执行当前指令后(如果开中断的话),马上跳到网卡的驱动程去。eht0的网卡驱动首先生成一个skb结构,然后对以太网层进行分析,最后驱动将该skb结构放到当前cpu的输入队列中,唤醒软中断。如果没有其它中断的到来,那么软中断将调用netif_receive_skb函数。关于网卡驱动和中断响应不是本文讨论的重点,所以我们还是从netif_receive_skb说起。
/**
* netif_receive_skb - process receive buffer from network
* @skb: buffer to process
*
* netif_receive_skb() is the main receive data processing function.
* it always succeeds. the buffer may be dropped during processing
* for congestion control or by the protocol layers.
*
* this function may only be called from softirq context and interrupts
* should be enabled.
*
* return values (usually ignored):
* net_rx_success: no congestion
* net_rx_drop: packet was dropped
*/
int netif_receive_skb(struct sk_buff *skb)
{
struct packet_type *ptype, *pt_prev;
struct net_device *orig_dev;
struct net_device *null_or_orig;
int ret = net_rx_drop;
__be16 type;
if (skb->vlan_tci && vlan_hwaccel_do_receive(skb))
return net_rx_success;
/* if we've gotten here through napi, check netpoll */
#ifdef config_mapping
if (skb->dev)
#endif
if (netpoll_receive_skb(skb))
return net_rx_drop;
if (!skb->tstamp.tv64)
net_timestamp(skb);
if (!skb->iif)
skb->iif = skb->dev->ifindex;
null_or_orig = null;
orig_dev = skb->dev;
if (orig_dev->master) {
if (skb_bond_should_drop(skb))
null_or_orig = orig_dev; /* deliver only exact match */
else
skb->dev = orig_dev->master;
}
__get_cpu_var(netdev_rx_stat).total ;
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
skb->mac_len = skb->network_header - skb->mac_header;
pt_prev = null;
rcu_read_lock();
#ifdef config_net_cls_act
if (skb->tc_verd & tc_ncls) {
skb->tc_verd = clr_tc_ncls(skb->tc_verd);
goto ncls;
}
#endif
// 检查数据包是否有packet socket来接受该包(比如抓包工具),如果有则往该socket发送一份
list_for_each_entry_rcu(ptype, &ptype_all, list) {
if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
ptype->dev == orig_dev) {
if (pt_prev)
ret = deliver_skb(skb, pt_prev, orig_dev);
pt_prev = ptype;
}
}
#ifdef config_net_cls_act
skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
if (!skb)
goto out;
ncls:
#endif
// 尝试交由网桥处理,如果网桥处理了,返回skb=null
skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
if (!skb)
goto out;
skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
if (!skb)
goto out;
// 对数据包转到l3层处理
type = skb->protocol;
list_for_each_entry_rcu(ptype,
&ptype_base[ntohs(type) & ptype_hash_mask], list) {
if (ptype->type == type &&
(ptype->dev == null_or_orig || ptype->dev == skb->dev ||
ptype->dev == orig_dev)) {
if (pt_prev)
ret = deliver_skb(skb, pt_prev, orig_dev);
pt_prev = ptype;
}
}
if (pt_prev) {
ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
} else {
kfree_skb(skb);
/* jamal, now you will not able to escape explaining
* me how you were going to use this. :-)
*/
ret = net_rx_drop;
}
out:
rcu_read_unlock();
return ret;
}
总结而言,netif_recerve_skb函数主要做三件事情:
1. 如果有socket需要(如抓包应用)skb,则将skb复制给他们;
2. 处理桥接,即如果开启了网桥,进行网桥处理;
3. 将skb交给网络层。
static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
struct packet_type **pt_prev, int *ret,
struct net_device *orig_dev)
{
struct net_bridge_port *port;
// 如果数据包是环回包,或者数据包的产生设备不属于任何网桥,则不进行网桥处理
if (skb->pkt_type == packet_loopback ||
(port = rcu_dereference(skb->dev->br_port)) == null)
return skb;
if (*pt_prev) {
*ret = deliver_skb(skb, *pt_prev, orig_dev);
*pt_prev = null;
}
// 调用网桥处理接口,该接口在网桥初始化时被指定为br_handle_frame
return br_handle_frame_hook(port, skb);
}
/*
* called via br_handle_frame_hook.
* return null if skb is handled
* note: already called with rcu_read_lock (preempt_disabled)
*/
struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb)
{
// 获取数据包mac
const unsigned char *dest = eth_hdr(skb)->h_dest;
int (*rhook)(struct sk_buff *skb);
if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
goto drop;
skb = skb_share_check(skb, gfp_atomic);
if (!skb)
return null;
#ifdef config_athrs_hw_nat
skb->ath_hw_nat_fw_flags = 1;
#endif
//如果目的地址是01:80:c2:00:00:0x,则是发往stp的多播地址,此时可能需要进行stp处理
if (unlikely(is_link_local(dest))) {
/* pause frames shouldn't be passed up by driver anyway */
if (skb->protocol == htons(eth_p_pause))
goto drop;
/* if stp is turned off, then forward */
if (p->br->stp_enabled == br_no_stp && dest[5] == 0)
goto forward;
// 在老版本的网桥实现中,这里有一个分支进行stp数据包处理。在新内核版本中(2.6,新只是相对的),stp被实现为上层协议,所以会在网桥处理后,传递到上层再进行专门处理。
if (nf_hook(pf_bridge, nf_br_local_in, skb, skb->dev,
null, br_handle_local_finish)) //更新cam表
return null; /* frame consumed by filter */
else
return skb;// 由于br_handle_local_finish返回始终为0,所以return skb将继续上层处理
}
forward:
switch (p->state) {
case br_state_forwarding:
// 判断是否需要走三层进行转发,这个是broute表的执行函数
rhook = rcu_dereference(br_should_route_hook);
if (rhook != null) {
if (rhook(skb))
return skb;
dest = eth_hdr(skb)->h_dest;
}
/* 注意: fall through */
case br_state_learning:
if (!compare_ether_addr(p->br->dev->dev_addr, dest))
skb->pkt_type = packet_host;
// netfilter hook点
nf_hook(pf_bridge, nf_br_pre_routing, skb, skb->dev, null,
br_handle_frame_finish);
break;
default:
drop:
kfree_skb(skb);
}
return null;
}
int br_handle_frame_finish(struct sk_buff *skb)
{
const unsigned char *dest = eth_hdr(skb)->h_dest;
struct net_bridge_port *p = rcu_dereference(skb->dev->br_port);
struct net_bridge *br;
struct net_bridge_fdb_entry *dst;
struct sk_buff *skb2;
// 如果网桥处于disabled状态,直接drop
if (!p || p->state == br_state_disabled)
goto drop;
/* insert into forwarding database after filtering to avoid spoofing */
// 选择端口所属的网桥(可能有多个网桥的情况)
br = p->br;
// 更新端口-mac映射表
br_fdb_update(br, p, eth_hdr(skb)->h_source);
if (p->state == br_state_learning)
goto drop;
/* the packet skb2 goes to the local host (null to skip). */
skb2 = null;
if (br->dev->flags & iff_promisc)
skb2 = skb;
dst = null;
if (is_multicast_ether_addr(dest)) {
br->dev->stats.multicast ;
skb2 = skb;
} else if ((dst = __br_fdb_get(br, dest)) && dst->is_local) {
skb2 = skb;
/* do not forward the packet since it's local. */
skb = null;
}
if (skb2 == skb)
skb2 = skb_clone(skb, gfp_atomic);
if (skb2)
br_pass_frame_up(br, skb2); // 如果skb2非空,则向上传递报文
if (skb) {
if (dst)
br_forward(dst->dst, skb);
else
br_flood_forward(br, skb); // 多播或端口-mac表中无记录,需要洪泛发送(每个端口均发送)
}
out:
return 0;
drop:
kfree_skb(skb);
goto out;
}
void br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
{
if (should_deliver(to, skb)) {
__br_forward(to, skb);
return;
}
kfree_skb(skb);
}
static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
{
struct net_device *indev;
indev = skb->dev;
skb->dev = to->dev; //替换报文中的dev为转发端口对应的dev
skb->ip_summed = checksum_none;
// netfilter
hook处理
nf_hook(pf_bridge, nf_br_forward, skb, indev, skb->dev,
br_forward_finish);
}
void br_flood_forward(struct net_bridge *br, struct sk_buff *skb)
{
br_flood(br, skb, __br_forward);
}
static void br_flood(struct net_bridge *br, struct sk_buff *skb,
void (*__packet_hook)(const struct net_bridge_port *p,
struct sk_buff *skb))
{
struct net_bridge_port *p;
struct net_bridge_port *prev;
prev = null;
/* backup multicast address. by houxb, 07dec10 */
#ifdef config_tp_multicast
#define is_multicast_addr(ptr) ((ptr[0] == 0x01) && (ptr[1] == 0x00) && (ptr[2] == 0x5e) ? 1 : 0)
mac_addr multi_mac_addr;
unsigned char *pmac = multi_mac_addr.addr;
memset(pmac, 0, 6/*eth_alen*/);
if(is_multicast_addr(skb_mac_header(skb)))
{
//backup multicast address
memcpy(pmac, skb_mac_header(skb), 6/*eth_alen*/);
}
#endif
// 遍历所有端口,从每个端口发送一份出去, should_deliver会排除进来的端口
list_for_each_entry_rcu(p, &br->port_list, list) {
if (should_deliver(p, skb)) {
if (prev != null) {
struct sk_buff *skb2;
if ((skb2 = skb_clone(skb, gfp_atomic)) == null) {
br->dev->stats.tx_dropped ;
kfree_skb(skb);
return;
}
#ifdef config_tp_multicast
if(is_multicast_addr(pmac))
{
//restore multicast address
memcpy(skb_mac_header(skb), pmac, 6/*eth_alen*/);
}
#endif
__packet_hook(prev, skb2);
}
prev = p;
}
}
if (prev != null) {
#ifdef config_tp_multicast
if(is_multicast_addr(pmac))
{
//restore multicast address
memcpy(skb_mac_header(skb), pmac, 6/*eth_alen*/);
}
#endif
__packet_hook(prev, skb);
return;
}
kfree_skb(skb);
}
int br_forward_finish(struct sk_buff *skb)
{
// netfilter hook: nf_br_post_routing
return nf_hook(pf_bridge, nf_br_post_routing, skb, null, skb->dev,
br_dev_queue_push_xmit);
}
int br_dev_queue_push_xmit(struct sk_buff *skb)
{
/* drop mtu oversized packets except gso */
if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb))
kfree_skb(skb);
else {
/* ip_refrag calls ip_fragment, doesn't copy the mac header. */
if (nf_bridge_maybe_copy_header(skb))
kfree_skb(skb);
else {
skb_push(skb, eth_hlen);
dev_queue_xmit(skb); // 进入驱动
}
}
return 0;
}
static void br_pass_frame_up(struct net_bridge *br, struct sk_buff *skb)
{
struct net_device *indev, *brdev = br->dev;
// 数据统计
brdev->stats.rx_packets ;
brdev->stats.rx_bytes = skb->len;
indev = skb->dev;
// 特别注意:此处将skb的dev强制修改为网桥dev
skb->dev = brdev;
// netfilter hook :nf_br_local_in
nf_hook(pf_bridge, nf_br_local_in, skb, indev, null,
netif_receive_skb);
}
这段代码非常简单,对net_bridge的数据统计进行更新以后,强制将skb的dev修改为网桥的dev,最后通过nf_hook在nf_br_local_in挂接点上调用回了netif_receive_skb方法。
在netif_receive_skb函数中,调用了handle_bridge函数,重新触发了网桥处理流程,现在发往网桥虚拟设备的数据包又回到了netif_receive_skb,那么网桥的处理过程会不会又被调用呢?答案是否定的。回顾网桥入口函数handle_bridge方法,判断是否执行网桥处理流程的判断:
// 如果数据包是环回包,或者数据包的产生设备不属于任何网桥,则不进行网桥处理
if (skb->pkt_type == packet_loopback ||
(port = rcu_dereference(skb->dev->br_port)) == null)
return skb;
见上文程序段,br_pass_frame_up函数将skb->dev赋成了br->dev,实际上skb->dev变成了网桥建立的虚拟设备;这个设备是网桥本身而不是桥组的某一端口(它不属于任何网桥设备,因为前面提到过网桥不能添加一个网桥设备做端口),故而在进行网桥处理判断时,不能进入网桥处理流程 ,从而进入上层协议栈处理。
进入桥的数据报文分为几个类型,桥对应的处理方法也不同:
1、 报文是本机发送给自己的,桥不处理,交给上层协议栈;
2、 接收报文的物理接口不是网桥接口,桥不处理,交给上层协议栈;
3、 进入网桥后,如果网桥的状态为disable,则将包丢弃不处理;
4、 报文源地址无效(广播,多播,以及00:00:00:00:00:00),丢包;
5、 如果是stp的bpdu包,交给上层协议栈;
6、 如果是发给本机的报文,桥直接返回,交给上层协议栈,不转发;
7、 需要转发的报文分三种情况:
1) 广播或多播,则除接收端口外的所有端口都需要转发一份;
2) 单播并且在端口-mac映射表中能找到端口映射的,只需要网映射端口转发一份即可;
3) 单播但找不到端口映射的,则除了接收端口外其余端口都需要转发。
最后,再回顾一下网桥数据处理主要函数关系图:
图网桥处理流程示意图
众所周知,网桥需要维护一个mac地址-端口映射表,端口是指网桥自身提供的端口,而mac地址是指与端口相连的另一端主机的mac地址。当网桥收到一个报文时,先获取它的源mac,更新数据库,然后读取该报文的目标mac地址,查找该数据库,如果找到,根据找到条目的端口进行转发;否则会把数据包向除入口端口以外的所有端口转发。
通常网桥端口-mac映射表又被称为网桥转发数据库或cam,为了简化叙述,统一使用数据库代替。
数据库使用kmem_cache_create函数进行创建,使用kmem_cache_desctory进行销毁。回顾网桥初始化时,会调用br_fdb_init进行数据库初始化:
int __init br_fdb_init(void)
{
br_fdb_cache = kmem_cache_create("bridge_fdb_cache",
sizeof(struct net_bridge_fdb_entry),
0,
slab_hwcache_align, null);
if (!br_fdb_cache)
return -enomem;
get_random_bytes(&fdb_salt, sizeof(fdb_salt));
return 0;
}
销毁:
void br_fdb_fini(void)
{
kmem_cache_destroy(br_fdb_cache);
}
当网桥收到一个数据包时,它会获取该数据的源mac地址,然后对数据库进行更新。如果该mac地址不在数库中,则创建一个新表项。如果存在,更新它的过期时间。数据库使用hash表的结构方式,便于高效查询。数据库更新函数:
void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
const unsigned char *addr)
{
// 使用hash算法,找到skb的mac所属的表
struct hlist_head *head = &br->hash[br_mac_hash(addr)];
struct net_bridge_fdb_entry *fdb;
/* some users want to always flood. */
if (hold_time(br) == 0)
return;
/* ignore packets unless we are using this port */
if (!(source->state == br_state_learning ||
source->state == br_state_forwarding))
return;
fdb = fdb_find(head, addr);
if (likely(fdb)) { // 如果skb的mac已经存在于数据库中,更新过期时间
/* attempt to update an entry for a local interface */
if (unlikely(fdb->is_local)) {
if (net_ratelimit())
printk(kern_warning "%s: received packet with "
"own address as source address\n",
source->dev->name);
} else {
/* fastpath: update of existing entry */
fdb->dst = source;
fdb->ageing_timer = jiffies;
}
} else { // 如果skb的mac不在数据中,则新建一条记录
spin_lock(&br->hash_lock);
if (!fdb_find(head, addr))
fdb_create(head, source, addr, 0); // 创建表项
/* else we lose race and someone else inserts
* it first, don't bother updating
*/
spin_unlock(&br->hash_lock);
}
}
见上文程序段,在更新表项的函数里,已经为mac地址算出其所属的hash链表,因此,创建函数只需要在该链上添加一个数据项即可:
static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
struct net_bridge_port *source,
const unsigned char *addr,
int is_local)
{
struct net_bridge_fdb_entry *fdb;
fdb = kmem_cache_alloc(br_fdb_cache, gfp_atomic);
if (fdb) {
memcpy(fdb->addr.addr, addr, eth_alen);
hlist_add_head_rcu(&fdb->hlist, head);
fdb->dst = source;
fdb->is_local = is_local; // 0
fdb->is_static = is_local; // 0
fdb->ageing_timer = jiffies;
}
return fdb;
}
网桥的数据项查找与一般的查找类似,但略有不同。前面提到,如果要更新一mac地址,不管该地址是否已经过期了,只需遍历该mac地址对应的hash链表,然后更新年龄,此时它肯定不过期了。但网桥要转发数据时,除了要找到该目标mac的出口端口外,还要判断该记录是否过期了。因此,数据项的查找有两种,一种用于更新,另一用于转发:
static inline struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head,
const unsigned char *addr)
{
struct hlist_node *h;
struct net_bridge_fdb_entry *fdb;
hlist_for_each_entry_rcu(fdb, h, head, hlist) {
if (!compare_ether_addr(fdb->addr.addr, addr))
return fdb;
}
return null;
}
/* no locking or refcounting, assumes caller has no preempt (rcu_read_lock) */
struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
const unsigned char *addr)
{
struct hlist_node *h;
struct net_bridge_fdb_entry *fdb;
hlist_for_each_entry_rcu(fdb, h, &br->hash[br_mac_hash(addr)], hlist) {
if (!compare_ether_addr(fdb->addr.addr, addr)) {
if (unlikely(has_expired(br, fdb))) // 判断是否过期
break;
return fdb;
}
}
return null;
}
之前我们在专门讲过linux的netfilter框架(虽然当时主要是针对ip层),所以这里就不再详细讲解网桥netfilter的过程。在网桥处理逻辑中,我们已经看到了各个hook点的调用关系,和ip的netfilter是一致的。关于这部分内容,这里就不重复讲述;这里要讲的,是与ip的netfilter不同的一些东西。
直接上代码:
int __init br_netfilter_init(void)
{
int ret;
// 注册hook options
ret = nf_register_hooks(br_nf_ops, array_size(br_nf_ops));
if (ret < 0)
return ret;
#ifdef config_sysctl
brnf_sysctl_header = register_sysctl_paths(brnf_path, brnf_table);
if (brnf_sysctl_header == null) {
printk(kern_warning
"br_netfilter: can't register to sysctl.\n");
nf_unregister_hooks(br_nf_ops, array_size(br_nf_ops));
return -enomem;
}
#endif
printk(kern_notice "bridge firewalling registered\n");
return 0;
}
static struct nf_hook_ops br_nf_ops[] __read_mostly = {
{ .hook = br_nf_pre_routing,
.owner = this_module,
.pf = pf_bridge,
.hooknum = nf_br_pre_routing,
.priority = nf_br_pri_brnf, }, // 优先级为0
{ .hook = br_nf_local_in,
.owner = this_module,
.pf = pf_bridge,
.hooknum = nf_br_local_in,
.priority = nf_br_pri_brnf, },
{ .hook = br_nf_forward_ip,
.owner = this_module,
.pf = pf_bridge,
.hooknum = nf_br_forward,
.priority = nf_br_pri_brnf - 1, }, //优先级为-1, ip高于arp
{ .hook = br_nf_forward_arp,
.owner = this_module,
.pf = pf_bridge,
.hooknum = nf_br_forward,
.priority = nf_br_pri_brnf, },
{ .hook = br_nf_local_out,
.owner = this_module,
.pf = pf_bridge,
.hooknum = nf_br_local_out,
.priority = nf_br_pri_first, },
{ .hook = br_nf_post_routing,
.owner = this_module,
.pf = pf_bridge,
.hooknum = nf_br_post_routing,
.priority = nf_br_pri_last, },
{ .hook = ip_sabotage_in,
.owner = this_module,
.pf = pf_inet,
.hooknum = nf_inet_pre_routing,
.priority = nf_ip_pri_first, },
{ .hook = ip_sabotage_in,
.owner = this_module,
.pf = pf_inet6,
.hooknum = nf_inet_pre_routing,
.priority = nf_ip6_pri_first, },
};
回想ip的netfilter,每个nf_hook_ops都属于某个特定的表。但bridge下的netfilter,在netfilter初始化的时候,注册了一系列nf_hook_ops,它们不属于任何表,且它们的优先级为0,默认都会被执行。这些hook函数不执行具体的匹配规则,但是会做一些特殊的处理,如调用ip层的hook。这部分功能将在后文讲述bridge与ip联动的时候讲述。
在网桥的netfilter下,内建了三张表:broute、nat和filter。其中broute主要用于判断某数据包是否应该进入网络层进行处理(跳过网桥处理)。与传统netfilter下的表注册不一样,broute注册没有注册nf_hook_ops,所以不能通过nf_hook()调用;相反其调用方式是直接通过在适当的位置调用其表执行函数。
broute表
static struct ebt_entries initial_chain = {
.name = "brouting",
.policy = ebt_accept,
};
static struct ebt_replace_kernel initial_table =
{
.name = "broute",
.valid_hooks = 1 << nf_br_brouting,
.entries_size = sizeof(struct ebt_entries),
.hook_entry = {
[nf_br_brouting] = &initial_chain,
},
.entries = (char *)&initial_chain,
};
static struct ebt_table broute_table =
{
.name = "broute",
.table = &initial_table,
.valid_hooks = 1 << nf_br_brouting, // 非传统的几处hook点,专门为brout表定义的一个假hook点
.check = check,
.me = this_module,
};
static int __init ebtable_broute_init(void)
{
int ret;
ret = register_pernet_subsys(&broute_net_ops);
if (ret < 0)
return ret;
/* see br_input.c */
rcu_assign_pointer(br_should_route_hook, ebt_broute);
return 0;
}
static int ebt_broute(struct sk_buff *skb)
{
int ret;
ret = ebt_do_table(nf_br_brouting, skb, skb->dev, null,
dev_net(skb->dev)->xt.broute_table);
if (ret == nf_drop)
return 1; /* route it */
return 0; /* bridge it */
}
nat表
static struct nf_hook_ops ebt_ops_nat[] __read_mostly = {
{
.hook = ebt_nat_out,
.owner = this_module,
.pf = pf_bridge,
.hooknum = nf_br_local_out,
.priority = nf_br_pri_nat_dst_other, // 100
},
{
.hook = ebt_nat_out,
.owner = this_module,
.pf = pf_bridge,
.hooknum = nf_br_post_routing,
.priority = nf_br_pri_nat_src, //300
},
{
.hook = ebt_nat_in,
.owner = this_module,
.pf = pf_bridge,
.hooknum = nf_br_pre_routing,
.priority = nf_br_pri_nat_dst_bridged, // -300
},
};
static struct ebt_table frame_nat =
{
.name = "nat",
.table = &initial_table,
.valid_hooks = nat_valid_hooks,
.check = check,
.me = this_module,
};
static int __init ebtable_nat_init(void)
{
int ret;
ret = register_pernet_subsys(&frame_nat_net_ops);
if (ret < 0)
return ret;
ret = nf_register_hooks(ebt_ops_nat, array_size(ebt_ops_nat));
if (ret < 0)
unregister_pernet_subsys(&frame_nat_net_ops);
return ret;
}
nat表的注册和之前讲过iptables相关表注册是一致的,都是初始化表结构,初始化nf_hook_ops,让后分别注册。值得注意的是,在注册nf_hook_ops的时候,各个nf_hook_ops的优先级是不一样的。优先级定义:
enum nf_br_hook_priorities {
nf_br_pri_first = int_min,
nf_br_pri_nat_dst_bridged = -300,
nf_br_pri_filter_bridged = -200,
nf_br_pri_brnf = 0,
nf_br_pri_nat_dst_other = 100,
nf_br_pri_filter_other = 200,
nf_br_pri_nat_src = 300,
nf_br_pri_last = int_max,
};
filter表
和nat表类似,不赘述。
bridge和ip在透明防火墙中是需要联动的,因为ip层可以做更多的事情。虽然,这些事情也是可以在bridge中实现的,但是模块化及kiss原则将bridge从这些复杂的事情中分割出来,仅做它自己该处理的事情;如果需要ip层帮助,则直接调用ip层的hook即可。
下面,我们通过“read the fucking source code”,了解bridge与ip层到底是如何联动的。
#define nf_hook(pf, hook, skb, indev, outdev, okfn) \
nf_hook_thresh(pf, hook, skb, indev, outdev, okfn, int_min) // int_min最小的整数
#define nf_hook_thresh(pf, hook, skb, indev, outdev, okfn, thresh) \
({int __ret; \
if ((__ret=nf_hook_thresh(pf, hook, (skb), indev, outdev, okfn, thresh, 1)) == 1)\ // nf_accept == 1
__ret = (okfn)(skb); // 执行hook后的回调函数 \
__ret;})
unsigned int nf_iterate(struct list_head *head,
struct sk_buff *skb,
unsigned int hook,
const struct net_device *indev,
const struct net_device *outdev,
struct list_head **i,
int (*okfn)(struct sk_buff *),
int hook_thresh)
{
unsigned int verdict;
/*
* the caller must not block between calls to this
* function because of risk of continuing from deleted element.
*/
list_for_each_continue_rcu(*i, head) { // 遍历所有nf_hook_ops
struct nf_hook_ops *elem = (struct nf_hook_ops *)*i;
// 如果设置的thresh值高于nf_hook_ops注册的优先级,则跳过该ops
if (hook_thresh > elem->priority)
continue;
/* optimization: we don't need to hold module
reference here, since function can't sleep. --rr */
verdict = elem->hook(hook, skb, indev, outdev, okfn); // 执行hook函数
if (verdict != nf_accept) {
#ifdef config_netfilter_debug
if (unlikely((verdict & nf_verdict_mask)
> nf_max_verdict)) {
nfdebug("evil return from %p(%u).\n",
elem->hook, hook);
continue;
}
#endif
if (verdict != nf_repeat)
return verdict;
*i = (*i)->prev;
}
}
return nf_accept;
}
通过上述分析,如果通过nf_hook()进入,这在该hook点注册的所有nf_hook_ops都会被执行,毕竟nf_hook指定了thresh值是最小整数。相反,如果要控制thresh的值,来过滤一部分nf_hook_ops,则需要显示调用nf_hook_thresh(),并指定thresh的值。
进一步分析nf_br_pre_routing这个hook点的数据流情况。回顾5.1节和5.2节,网桥在nf_br_pre_routing点上,注册了两个nf_hook_ops:
一个是默认的hook处理:
{ .hook = br_nf_pre_routing,
.owner = this_module,
.pf = pf_bridge,
.hooknum = nf_br_pre_routing,
.priority = nf_br_pri_brnf, }, // 优先级为0
另一个是nat表注册的:
{
.hook = ebt_nat_in,
.owner = this_module,
.pf = pf_bridge,
.hooknum = nf_br_pre_routing,
.priority = nf_br_pri_nat_dst_bridged, //优先级 -300
},
其中,ebt_nat_in是传统netfilter表处理hook回调函数,通过调用do_tables遍历表规则,对数据处理:
static unsigned int
ebt_nat_in(unsigned int hook, struct sk_buff *skb, const struct net_device *in
, const struct net_device *out, int (*okfn)(struct sk_buff *))
{
return ebt_do_table(hook, skb, in, out, dev_net(in)->xt.frame_nat);
}
但是,br_nf_pre_routing所做的事情却有所不同:
static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
struct iphdr *iph;
__u32 len = nf_bridge_encap_header_len(skb);
if (unlikely(!pskb_may_pull(skb, len)))
goto out;
if (skb->protocol == htons(eth_p_ipv6) || is_vlan_ipv6(skb) ||
is_pppoe_ipv6(skb)) { // 如果是ipv6的数据,则交由ipv6的hook处理
#ifdef config_sysctl
if (!brnf_call_ip6tables)
return nf_accept;
#endif
nf_bridge_pull_encap_header_rcsum(skb);
return br_nf_pre_routing_ipv6(hook, skb, in, out, okfn);
}
#ifdef config_sysctl
if (!brnf_call_iptables)
return nf_accept;
#endif
if (skb->protocol != htons(eth_p_ip) && !is_vlan_ip(skb) &&
!is_pppoe_ip(skb)) // 如果不是ip数据,则通过交由后续规则或上层处理
return nf_accept;
nf_bridge_pull_encap_header_rcsum(skb);
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
goto inhdr_error;
iph = ip_hdr(skb);
if (iph->ihl < 5 || iph->version != 4)
goto inhdr_error;
if (!pskb_may_pull(skb, 4 * iph->ihl))
goto inhdr_error;
iph = ip_hdr(skb);
if (ip_fast_csum((__u8 *) iph, iph->ihl) != 0)
goto inhdr_error;
len = ntohs(iph->tot_len);
if (skb->len < len || len < 4 * iph->ihl)
goto inhdr_error;
pskb_trim_rcsum(skb, len);
nf_bridge_put(skb->nf_bridge);
if (!nf_bridge_alloc(skb))
return nf_drop;
if (!setup_pre_routing(skb))
return nf_drop;
store_orig_dstaddr(skb);
nf_hook(pf_inet, nf_inet_pre_routing, skb, skb->dev, null,
br_nf_pre_routing_finish);
return nf_stolen;
inhdr_error:
// ip_inc_stats_bh(ipinhdrerrors);
out:
return nf_drop;
}
static int br_nf_pre_routing_finish(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
struct iphdr *iph = ip_hdr(skb);
struct nf_bridge_info *nf_bridge = skb->nf_bridge;
struct rtable *rt;
int err;
if (nf_bridge->mask & brnf_pkt_type) {
skb->pkt_type = packet_otherhost;
nf_bridge->mask ^= brnf_pkt_type;
}
nf_bridge->mask ^= brnf_nf_bridge_prerouting;
if (dnat_took_place(skb)) { // 如果做了dnat,则交由ip层进行route
if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
struct flowi fl = {
.nl_u = {
.ip4_u = {
.daddr = iph->daddr,
.saddr = 0,
.tos = rt_tos(iph->tos) },
},
.proto = 0,
};
struct in_device *in_dev = in_dev_get(dev);
/* if err equals -ehostunreach the error is due to a
* martian destination or due to the fact that
* forwarding is disabled. for most martian packets,
* ip_route_output_key() will fail. it won't fail for 2 types of
* martian destinations: loopback destinations and destination
* 0.0.0.0. in both cases the packet will be dropped because the
* destination is the loopback device and not the bridge. */
if (err != -ehostunreach || !in_dev || in_dev_forward(in_dev))
goto free_skb;
if (!ip_route_output_key(dev_net(dev), &rt, &fl)) {
/* - bridged-and-dnat'ed traffic doesn't
* require ip_forwarding. */
if (((struct dst_entry *)rt)->dev == dev) {
skb_dst_set(skb, (struct dst_entry *)rt);
goto bridged_dnat;
}
/* we are sure that forwarding is disabled, so printing
* this message is no problem. note that the packet could
* still have a martian destination address, in which case
* the packet could be dropped even if forwarding were enabled */
__br_dnat_complain();
dst_release((struct dst_entry *)rt);
}
free_skb:
kfree_skb(skb);
return 0;
} else {
if (skb_dst(skb)->dev == dev) {
bridged_dnat:
/* tell br_nf_local_out this is a
* bridged frame */
nf_bridge->mask |= brnf_bridged_dnat;
skb->dev = nf_bridge->physindev;
nf_bridge_push_encap_header(skb);
nf_hook_thresh(pf_bridge, nf_br_pre_routing,
skb, skb->dev, null,
br_nf_pre_routing_finish_bridge,
1);
return 0;
}
memcpy(eth_hdr(skb)->h_dest, dev->dev_addr, eth_alen);
skb->pkt_type = packet_host;
}
} else {
rt = bridge_parent_rtable(nf_bridge->physindev);
if (!rt) {
kfree_skb(skb);
return 0;
}
dst_hold(&rt->u.dst);
skb_dst_set(skb, &rt->u.dst);
}
skb->dev = nf_bridge->physindev;
nf_bridge_push_encap_header(skb);
nf_hook_thresh(pf_bridge, nf_br_pre_routing, skb, skb->dev, null,
br_handle_frame_finish, 1);
return 0;
}
由5.3.1节的分析知,nf_hook_thresh()中thresh设置为1,将从优先级为1的nf_hook_ops执行;整个过程相当于,在bridge prerouting hook点上,先执行完优先级小于0的hook操作,然后转入ip层执行所有ip prerouting hook点上的hook操作,最后(依据ip层执行)再转回bridge prerouting hook点,从优先级为1处继续执行:
图 bridge 与ip联动示意图
事实上,bridge与ip的联动过程比较复杂,本文只是示例了其核心的机制,很多细节的控制并没有一一说明。关于bridge与ip的联动过程中,更多实现细节留给读者自行分析吧。
关于其它网桥hook点上,bridge与ip的联动本文将略去不再讲述,因为其原理和方法大致是一致的,只是不同的处理细节不一致。关于bridge与ip的联动全貌,可以参考帖子:
针对我司linux平台的实现,bridge与ip的联动关系如下图:
图 linux平台bridge 与ip联动
生成树协议stp(spanning tree protocol)的主要功能有两个:一是在利用生成树算法、在以太网络中,创建一个以某台交换机的某个端口为根的生成树,避免环路。二是在以太网络拓扑发生变化时,通过生成树协议达到收敛保护的目的。
stp:生成树算法。
bpdu:stp的数据单元,在网桥局域网内传递信息。
tcn:拓扑改变通知bpdu。
根网桥:具有最小网桥id的网桥被选作根网桥,网桥id应为唯一的。
根端口:在指定网桥上面,到根网桥路径花费最小的端口为根端口,如果指定网桥上面有几个端口,到根网桥路径花费一样小,那么选择端口id 最小的端口为根端口。
指定网桥:局域网通过所连的网桥,接收和发送数据帧,如果局域网有且只有一个网桥相连,那么这个网桥必定是指定网桥,如果有多个网桥跟这个局域网相连,那么到根网桥路径花费最少的那个网桥为指定网桥,如果,有几个网桥到到根网桥路径花费一样,那么比较网桥id,id最小的被选作为指定网桥。
指定端口:指定网桥上面和局域网相连的端口叫做指定端口,如果指定网桥上面有几个端口,同时和局域网相连,那么选择端口id 最小的端口为所在局域网的指定端口。
根路径花费:当端口为根端口时候,通过这个端口的路径花费。 对于这个网桥来说,路径费用是到根网桥的费用之和。
指定花费:当端口为所在局域网的指定端口时候,即为根路径费用,当不为指定端口时候,是所在局域网指定端口到根网桥的费用。
br_state_disabled(0):禁用状态,不参与生成树,不转发任何数据帧。
br_state_listening(1): 监听状态,能够决定根,可以选择根端口、指定端口和非指定端口。在监昕状态的过程中,端口不能学 习任何接收帧的单播地址。
br_state_learning(2): 学习状态,端口能学习流入帧的mac地址,不能转发帧。
br_state_forwarding(3): 转发状态,接口能够转发帧。端口学习到接收帧的源 mac地址,并可根据目标mac地址进行恰当地转发。
br_state_blocking(4):阻塞状态,不参与帧转发、监听流人的bpdu,不能学习接收帧的任何mac地址 。
运行生成树算法(sta)的网桥定期发送bpdu;选取唯一一个根网桥;在每个非根网桥选取唯一一个根端口;在每网段选取唯一一个标志端口。
(1) 选取唯一一个根网桥:bpdu中包含bridge id;bridge id(8b)=优先级(2b)+交换机mac地址(6b);一些交换机的优先级默认为32768,可以修改;优先级值最小的成为根网桥;优先级值最小的成为根网桥;优先级值相同,mac地址最小的成为根网桥;bridge id值最小的成为根网桥;根网桥缺省每2秒发送一次bpdu。
(2) 在每个非根网桥选取唯一一个根端口:根网桥上没有根端口;端口代价最小的成为根端口;端口代价相同,port id最小端口的成为端口;port id通常为端口的mac地址;mac地址最小的端口成为根端口。
(3) 在每网段选取唯一一个标志端口:端口代价最小的成为标识端口;根网桥端口到各网段的代价最小;通常只有根网桥端口成为标识端口;被选定为根端口和标识端口的进行转发状态;落选端口进入阻塞状态,只侦听bpdu。
(4) 阻塞端口在指定的时间间隔(缺省20秒)收不到bpdu时,会重新运行生成树算法进行选举;缺点:在运行生成树算法的过程中,网络处理阻断状态,所有端口都不进行转发。计算过程缺省为50秒。
当网桥加电的时,网桥将认为它就是根网桥,并且将过渡到监听状态。一般情况下,当网桥认识到网络拓扑发生变更的时,将出现两种过渡状态:在拓扑变更的过程中,端口需要根据转发延迟计时器的数值而临时性地实施监听和学习状态。
当端口处于监听状态的时,它将利用发送和接收bpdu来确定活跃( active)的拓扑;当网络拓扑处于过渡期的时候,将不传递任何用户数据; 在监听状态的过程中,网桥将处理它所接收的bpdu;对于作为指定端口或根端口的端口,它们将在15秒(转发延迟的默认值)之启过渡到学习状态;对于不是指定端口或根端口的端口,它们将过渡返回到阻塞状态。
当端口处于学习状态的时,将利用从端口所学到的mac地址来组建自己的mac地址表;不能转发用户数据帧;在这个时刻,网桥不能传递任何用户数据。
当端口处于数据转发的时,学习状态能够降低所需扩散的数据帧的数量;如果某个端口在学习状态结束的时候仍然是指定端口或根端口,那么该端口就将过渡到转发状态;对于不是指定端口 或根端口的端口,它们将过渡返回到阻塞状态;在转发状态中,端口能够发送和接收用户数据;端口从阻塞状态过渡到转发状态的正常时间是30~50秒。
注:如果端口所连接的对象是主机,那么因为在这些链珞上的转发不会造成stp环路,所以这些端口也就不需要参与stp监听和学习的过程。
在早期的版本中,网桥的stp数据包是在网桥处理过程中,按照特定的组播地址进行识别,然后在网桥处理过程中完成相应的数据处理。后来的实现中,将其实现为一种单独的协议,并最终由ieee802.2协议进行封包传递。
在网桥初始化的时候,为stp注册了协议,并指定其接收数据的函数为br_stp_proto:
static const struct stp_proto br_stp_proto = {
.rcv = br_stp_rcv, // 接收函数
};
br_init()方法中,注册stp协议:
err = stp_proto_register(&br_stp_proto);
int stp_proto_register(const struct stp_proto *proto)
{
int err = 0;
mutex_lock(&stp_proto_mutex);
if (sap_registered == 0) {
sap = llc_sap_open(llc_sap_bspan, stp_pdu_rcv); //在llc上注册数据处理函数。llc_sap_bspan (0x42):bridge spanning tree proto
if (!sap) {
err = -enomem;
goto out;
}
}
if (is_zero_ether_addr(proto->group_address))
rcu_assign_pointer(stp_proto, proto);
else
rcu_assign_pointer(garp_protos[proto->group_address[5] -
garp_addr_min], proto); //按照组播地址下标5的序号,将协议加入到garp_protos中
out:
mutex_unlock(&stp_proto_mutex);
return err;
}
// llc_sap_open 将创建一个llc_sap结构,并加入到llc协议链表中
struct llc_sap *llc_sap_open(unsigned char lsap,
int (*func)(struct sk_buff *skb,
struct net_device *dev,
struct packet_type *pt,
struct net_device *orig_dev))
{
struct llc_sap *sap = null;
write_lock_bh(&llc_sap_list_lock);
if (__llc_sap_find(lsap)) /* sap already exists */
goto out;
sap = llc_sap_alloc();
if (!sap)
goto out;
sap->laddr.lsap = lsap; // 上层协议标识
sap->rcv_func = func; // 上层协议入口函数
llc_add_sap(sap);
out:
write_unlock_bh(&llc_sap_list_lock);
return sap;
既然stp协议是附在llc之上的,那么还得从llc的接收说起。llc在初始化的时候,注册了其数据接收函数为llc_rev():
int llc_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
struct llc_sap *sap;
struct llc_pdu_sn *pdu;
int dest;
int (*rcv)(struct sk_buff *, struct net_device *,
struct packet_type *, struct net_device *);
……
// 获取报文头部
pdu = llc_pdu_sn_hdr(skb);
if (unlikely(!pdu->dsap)) /* null dsap, refer to station */
goto handle_station;
sap = llc_sap_find(pdu->dsap); // 查找所属上层协议
if (unlikely(!sap)) {/* unknown sap */
dprintk("%s: llc_sap_find(x) failed!\n", __func__,
pdu->dsap);
goto drop;
}
/*
* first the upper layer protocols that don't need the full
* llc functionality
*/
rcv = rcu_dereference(sap->rcv_func);
if (rcv) {
struct sk_buff *cskb = skb_clone(skb, gfp_atomic);
if (cskb)
rcv(cskb, dev, pt, orig_dev); // 执行上层协议的接收函数
}
dest = llc_pdu_type(skb);
if (unlikely(!dest || !llc_type_handlers[dest - 1]))
goto drop_put;
llc_type_handlers[dest - 1](sap, skb);
……
}
在stp协议注册过程中,指定了llc到stp的入口函数是stp_pdu_rcv:
static int stp_pdu_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
const struct ethhdr *eh = eth_hdr(skb);
const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
const struct stp_proto *proto;
if (pdu->ssap != llc_sap_bspan ||
pdu->dsap != llc_sap_bspan ||
pdu->ctrl_1 != llc_pdu_type_u)
goto err;
if (eh->h_dest[5] >= garp_addr_min && eh->h_dest[5] <= garp_addr_max) {
// 读取对用的proto
proto = rcu_dereference(garp_protos[eh->h_dest[5] -
garp_addr_min]);
if (proto &&
compare_ether_addr(eh->h_dest, proto->group_address))
goto err;
} else
proto = rcu_dereference(stp_proto);
if (!proto)
goto err;
proto->rcv(proto, skb, dev); // 真正stp协议的执行函数
return 0;
……
}
真正stp协议的执行函数br_stp_rcv:
void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
struct net_device *dev)
{
const unsigned char *dest = eth_hdr(skb)->h_dest;
struct net_bridge_port *p = rcu_dereference(dev->br_port);
struct net_bridge *br;
const unsigned char *buf;
if (!p)
goto err;
if (!pskb_may_pull(skb, 4))
goto err;
/* compare of protocol id and version */
buf = skb->data;
if (buf[0] != 0 || buf[1] != 0 || buf[2] != 0)
goto err;
br = p->br;
spin_lock(&br->lock);
if (br->stp_enabled != br_kernel_stp) //没有开启stp功能
goto out;
if (!(br->dev->flags & iff_up))
goto out;
if (p->state == br_state_disabled)
goto out;
if (compare_ether_addr(dest, br->group_addr) != 0)
goto out;
buf = skb_pull(skb, 3);
if (buf[0] == bpdu_type_config) {
struct br_config_bpdu bpdu;
if (!pskb_may_pull(skb, 32))
goto out;
buf = skb->data;
bpdu.topology_change = (buf[1] & 0x01) ? 1 : 0;
bpdu.topology_change_ack = (buf[1] & 0x80) ? 1 : 0;
bpdu.root.prio[0] = buf[2];
bpdu.root.prio[1] = buf[3];
bpdu.root.addr[0] = buf[4];
bpdu.root.addr[1] = buf[5];
bpdu.root.addr[2] = buf[6];
bpdu.root.addr[3] = buf[7];
bpdu.root.addr[4] = buf[8];
bpdu.root.addr[5] = buf[9];
bpdu.root_path_cost =
(buf[10] << 24) |
(buf[11] << 16) |
(buf[12] << 8) |
buf[13];
bpdu.bridge_id.prio[0] = buf[14];
bpdu.bridge_id.prio[1] = buf[15];
bpdu.bridge_id.addr[0] = buf[16];
bpdu.bridge_id.addr[1] = buf[17];
bpdu.bridge_id.addr[2] = buf[18];
bpdu.bridge_id.addr[3] = buf[19];
bpdu.bridge_id.addr[4] = buf[20];
bpdu.bridge_id.addr[5] = buf[21];
bpdu.port_id = (buf[22] << 8) | buf[23];
bpdu.message_age = br_get_ticks(buf 24);
bpdu.max_age = br_get_ticks(buf 26);
bpdu.hello_time = br_get_ticks(buf 28);
bpdu.forward_delay = br_get_ticks(buf 30);
br_received_config_bpdu(p, &bpdu); // 进入网桥配置信息处理
}
else if (buf[0] == bpdu_type_tcn) {
br_received_tcn_bpdu(p); // 进入网络变更信息处理
}
out:
spin_unlock(&br->lock);
err:
kfree_skb(skb);
}
void br_received_config_bpdu(struct net_bridge_port *p, struct br_config_bpdu *bpdu)
{
struct net_bridge *br;
int was_root;
br = p->br;
// 自己是根桥吗?用自己的br_id和bpdu包中的根id相比较
was_root = br_is_root_bridge(br);
//比桥bpdu包中的信息(bpdu)和原先的对应的信息(p),如果需要更新,返回1,相同返回0,不需更新返回-1
if (br_supersedes_port_info(p, bpdu)) {
//刷新自己的相关信息
br_record_config_information(p, bpdu);
//进行root_bridge、port的选举
br_configuration_update(br);
//设置端口状态
br_port_state_selection(br);
// 如果因为这个bpdu导致拓朴变化了,如自己以前是根桥,现在不是了,需要发送tcn包,进行通告
if (!br_is_root_bridge(br) && was_root) {
del_timer(&br->hello_timer);
if (br->topology_change_detected) {
del_timer(&br->topology_change_timer);
br_transmit_tcn(br);
mod_timer(&br->tcn_timer,
jiffies br->bridge_hello_time);
}
}
// 需要把这个bpdu包继续转发下去
if (p->port_no == br->root_port) {
br_record_config_timeout_values(br, bpdu);
br_config_bpdu_generation(br);
if (bpdu->topology_change_ack)
br_topology_change_acknowledged(br);
}
} else if (br_is_designated_port(p)) { //如果收到这个bpdu包,不是“最优”的,而接收数据包的接口不是根端口,直接将转发出去就可以了
br_reply(p);
}
}
void br_received_tcn_bpdu(struct net_bridge_port *p)
{
if (br_is_designated_port(p)) {
pr_info("%s: received tcn bpdu on port %i(%s)\n",
p->br->dev->name, p->port_no, p->dev->name);
br_topology_change_detection(p->br); // 发送变更通知
br_topology_change_acknowledge(p); // 应答变更
}
}