| 1 | // SPDX-License-Identifier: GPL-2.0+ | 
|---|
| 2 | /* | 
|---|
| 3 | * Infrastructure to handle all PHY devices connected to a given netdev, | 
|---|
| 4 | * either directly or indirectly attached. | 
|---|
| 5 | * | 
|---|
| 6 | * Copyright (c) 2023 Maxime Chevallier<maxime.chevallier@bootlin.com> | 
|---|
| 7 | */ | 
|---|
| 8 |  | 
|---|
| 9 | #include <linux/phy_link_topology.h> | 
|---|
| 10 | #include <linux/phy.h> | 
|---|
| 11 | #include <linux/rtnetlink.h> | 
|---|
| 12 | #include <linux/xarray.h> | 
|---|
| 13 |  | 
|---|
| 14 | static int netdev_alloc_phy_link_topology(struct net_device *dev) | 
|---|
| 15 | { | 
|---|
| 16 | struct phy_link_topology *topo; | 
|---|
| 17 |  | 
|---|
| 18 | topo = kzalloc(sizeof(*topo), GFP_KERNEL); | 
|---|
| 19 | if (!topo) | 
|---|
| 20 | return -ENOMEM; | 
|---|
| 21 |  | 
|---|
| 22 | xa_init_flags(xa: &topo->phys, XA_FLAGS_ALLOC1); | 
|---|
| 23 | topo->next_phy_index = 1; | 
|---|
| 24 |  | 
|---|
| 25 | dev->link_topo = topo; | 
|---|
| 26 |  | 
|---|
| 27 | return 0; | 
|---|
| 28 | } | 
|---|
| 29 |  | 
|---|
| 30 | int phy_link_topo_add_phy(struct net_device *dev, | 
|---|
| 31 | struct phy_device *phy, | 
|---|
| 32 | enum phy_upstream upt, void *upstream) | 
|---|
| 33 | { | 
|---|
| 34 | struct phy_link_topology *topo = dev->link_topo; | 
|---|
| 35 | struct phy_device_node *pdn; | 
|---|
| 36 | int ret; | 
|---|
| 37 |  | 
|---|
| 38 | if (!topo) { | 
|---|
| 39 | ret = netdev_alloc_phy_link_topology(dev); | 
|---|
| 40 | if (ret) | 
|---|
| 41 | return ret; | 
|---|
| 42 |  | 
|---|
| 43 | topo = dev->link_topo; | 
|---|
| 44 | } | 
|---|
| 45 |  | 
|---|
| 46 | pdn = kzalloc(sizeof(*pdn), GFP_KERNEL); | 
|---|
| 47 | if (!pdn) | 
|---|
| 48 | return -ENOMEM; | 
|---|
| 49 |  | 
|---|
| 50 | pdn->phy = phy; | 
|---|
| 51 | switch (upt) { | 
|---|
| 52 | case PHY_UPSTREAM_MAC: | 
|---|
| 53 | pdn->upstream.netdev = (struct net_device *)upstream; | 
|---|
| 54 | if (phy_on_sfp(phydev: phy)) | 
|---|
| 55 | pdn->parent_sfp_bus = pdn->upstream.netdev->sfp_bus; | 
|---|
| 56 | break; | 
|---|
| 57 | case PHY_UPSTREAM_PHY: | 
|---|
| 58 | pdn->upstream.phydev = (struct phy_device *)upstream; | 
|---|
| 59 | if (phy_on_sfp(phydev: phy)) | 
|---|
| 60 | pdn->parent_sfp_bus = pdn->upstream.phydev->sfp_bus; | 
|---|
| 61 | break; | 
|---|
| 62 | default: | 
|---|
| 63 | ret = -EINVAL; | 
|---|
| 64 | goto err; | 
|---|
| 65 | } | 
|---|
| 66 | pdn->upstream_type = upt; | 
|---|
| 67 |  | 
|---|
| 68 | /* Attempt to re-use a previously allocated phy_index */ | 
|---|
| 69 | if (phy->phyindex) | 
|---|
| 70 | ret = xa_insert(xa: &topo->phys, index: phy->phyindex, entry: pdn, GFP_KERNEL); | 
|---|
| 71 | else | 
|---|
| 72 | ret = xa_alloc_cyclic(xa: &topo->phys, id: &phy->phyindex, entry: pdn, | 
|---|
| 73 | xa_limit_32b, next: &topo->next_phy_index, | 
|---|
| 74 | GFP_KERNEL); | 
|---|
| 75 |  | 
|---|
| 76 | if (ret < 0) | 
|---|
| 77 | goto err; | 
|---|
| 78 |  | 
|---|
| 79 | return 0; | 
|---|
| 80 |  | 
|---|
| 81 | err: | 
|---|
| 82 | kfree(objp: pdn); | 
|---|
| 83 | return ret; | 
|---|
| 84 | } | 
|---|
| 85 | EXPORT_SYMBOL_GPL(phy_link_topo_add_phy); | 
|---|
| 86 |  | 
|---|
| 87 | void phy_link_topo_del_phy(struct net_device *dev, | 
|---|
| 88 | struct phy_device *phy) | 
|---|
| 89 | { | 
|---|
| 90 | struct phy_link_topology *topo = dev->link_topo; | 
|---|
| 91 | struct phy_device_node *pdn; | 
|---|
| 92 |  | 
|---|
| 93 | if (!topo) | 
|---|
| 94 | return; | 
|---|
| 95 |  | 
|---|
| 96 | pdn = xa_erase(&topo->phys, index: phy->phyindex); | 
|---|
| 97 |  | 
|---|
| 98 | /* We delete the PHY from the topology, however we don't re-set the | 
|---|
| 99 | * phy->phyindex field. If the PHY isn't gone, we can re-assign it the | 
|---|
| 100 | * same index next time it's added back to the topology | 
|---|
| 101 | */ | 
|---|
| 102 |  | 
|---|
| 103 | kfree(objp: pdn); | 
|---|
| 104 | } | 
|---|
| 105 | EXPORT_SYMBOL_GPL(phy_link_topo_del_phy); | 
|---|
| 106 |  | 
|---|