| 1 | // SPDX-License-Identifier: GPL-2.0-or-later | 
|---|
| 2 | /* | 
|---|
| 3 | * net/sched/sch_generic.c	Generic packet scheduler routines. | 
|---|
| 4 | * | 
|---|
| 5 | * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> | 
|---|
| 6 | *              Jamal Hadi Salim, <hadi@cyberus.ca> 990601 | 
|---|
| 7 | *              - Ingress support | 
|---|
| 8 | */ | 
|---|
| 9 |  | 
|---|
| 10 | #include <linux/bitops.h> | 
|---|
| 11 | #include <linux/module.h> | 
|---|
| 12 | #include <linux/types.h> | 
|---|
| 13 | #include <linux/kernel.h> | 
|---|
| 14 | #include <linux/sched.h> | 
|---|
| 15 | #include <linux/string.h> | 
|---|
| 16 | #include <linux/errno.h> | 
|---|
| 17 | #include <linux/netdevice.h> | 
|---|
| 18 | #include <linux/skbuff.h> | 
|---|
| 19 | #include <linux/rtnetlink.h> | 
|---|
| 20 | #include <linux/init.h> | 
|---|
| 21 | #include <linux/rcupdate.h> | 
|---|
| 22 | #include <linux/list.h> | 
|---|
| 23 | #include <linux/slab.h> | 
|---|
| 24 | #include <linux/if_vlan.h> | 
|---|
| 25 | #include <linux/skb_array.h> | 
|---|
| 26 | #include <linux/if_macvlan.h> | 
|---|
| 27 | #include <linux/bpf.h> | 
|---|
| 28 | #include <net/sch_generic.h> | 
|---|
| 29 | #include <net/pkt_sched.h> | 
|---|
| 30 | #include <net/dst.h> | 
|---|
| 31 | #include <net/hotdata.h> | 
|---|
| 32 | #include <trace/events/qdisc.h> | 
|---|
| 33 | #include <trace/events/net.h> | 
|---|
| 34 | #include <net/xfrm.h> | 
|---|
| 35 |  | 
|---|
| 36 | /* Qdisc to use by default */ | 
|---|
| 37 | const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops; | 
|---|
| 38 | EXPORT_SYMBOL(default_qdisc_ops); | 
|---|
| 39 |  | 
|---|
| 40 | static void qdisc_maybe_clear_missed(struct Qdisc *q, | 
|---|
| 41 | const struct netdev_queue *txq) | 
|---|
| 42 | { | 
|---|
| 43 | clear_bit(nr: __QDISC_STATE_MISSED, addr: &q->state); | 
|---|
| 44 |  | 
|---|
| 45 | /* Make sure the below netif_xmit_frozen_or_stopped() | 
|---|
| 46 | * checking happens after clearing STATE_MISSED. | 
|---|
| 47 | */ | 
|---|
| 48 | smp_mb__after_atomic(); | 
|---|
| 49 |  | 
|---|
| 50 | /* Checking netif_xmit_frozen_or_stopped() again to | 
|---|
| 51 | * make sure STATE_MISSED is set if the STATE_MISSED | 
|---|
| 52 | * set by netif_tx_wake_queue()'s rescheduling of | 
|---|
| 53 | * net_tx_action() is cleared by the above clear_bit(). | 
|---|
| 54 | */ | 
|---|
| 55 | if (!netif_xmit_frozen_or_stopped(dev_queue: txq)) | 
|---|
| 56 | set_bit(nr: __QDISC_STATE_MISSED, addr: &q->state); | 
|---|
| 57 | else | 
|---|
| 58 | set_bit(nr: __QDISC_STATE_DRAINING, addr: &q->state); | 
|---|
| 59 | } | 
|---|
| 60 |  | 
|---|
| 61 | /* Main transmission queue. */ | 
|---|
| 62 |  | 
|---|
| 63 | /* Modifications to data participating in scheduling must be protected with | 
|---|
| 64 | * qdisc_lock(qdisc) spinlock. | 
|---|
| 65 | * | 
|---|
| 66 | * The idea is the following: | 
|---|
| 67 | * - enqueue, dequeue are serialized via qdisc root lock | 
|---|
| 68 | * - ingress filtering is also serialized via qdisc root lock | 
|---|
| 69 | * - updates to tree and tree walking are only done under the rtnl mutex. | 
|---|
| 70 | */ | 
|---|
| 71 |  | 
|---|
| 72 | #define SKB_XOFF_MAGIC ((struct sk_buff *)1UL) | 
|---|
| 73 |  | 
|---|
| 74 | static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q) | 
|---|
| 75 | { | 
|---|
| 76 | const struct netdev_queue *txq = q->dev_queue; | 
|---|
| 77 | spinlock_t *lock = NULL; | 
|---|
| 78 | struct sk_buff *skb; | 
|---|
| 79 |  | 
|---|
| 80 | if (q->flags & TCQ_F_NOLOCK) { | 
|---|
| 81 | lock = qdisc_lock(qdisc: q); | 
|---|
| 82 | spin_lock(lock); | 
|---|
| 83 | } | 
|---|
| 84 |  | 
|---|
| 85 | skb = skb_peek(list_: &q->skb_bad_txq); | 
|---|
| 86 | if (skb) { | 
|---|
| 87 | /* check the reason of requeuing without tx lock first */ | 
|---|
| 88 | txq = skb_get_tx_queue(dev: txq->dev, skb); | 
|---|
| 89 | if (!netif_xmit_frozen_or_stopped(dev_queue: txq)) { | 
|---|
| 90 | skb = __skb_dequeue(list: &q->skb_bad_txq); | 
|---|
| 91 | if (qdisc_is_percpu_stats(q)) { | 
|---|
| 92 | qdisc_qstats_cpu_backlog_dec(sch: q, skb); | 
|---|
| 93 | qdisc_qstats_cpu_qlen_dec(sch: q); | 
|---|
| 94 | } else { | 
|---|
| 95 | qdisc_qstats_backlog_dec(sch: q, skb); | 
|---|
| 96 | q->q.qlen--; | 
|---|
| 97 | } | 
|---|
| 98 | } else { | 
|---|
| 99 | skb = SKB_XOFF_MAGIC; | 
|---|
| 100 | qdisc_maybe_clear_missed(q, txq); | 
|---|
| 101 | } | 
|---|
| 102 | } | 
|---|
| 103 |  | 
|---|
| 104 | if (lock) | 
|---|
| 105 | spin_unlock(lock); | 
|---|
| 106 |  | 
|---|
| 107 | return skb; | 
|---|
| 108 | } | 
|---|
| 109 |  | 
|---|
| 110 | static inline struct sk_buff *qdisc_dequeue_skb_bad_txq(struct Qdisc *q) | 
|---|
| 111 | { | 
|---|
| 112 | struct sk_buff *skb = skb_peek(list_: &q->skb_bad_txq); | 
|---|
| 113 |  | 
|---|
| 114 | if (unlikely(skb)) | 
|---|
| 115 | skb = __skb_dequeue_bad_txq(q); | 
|---|
| 116 |  | 
|---|
| 117 | return skb; | 
|---|
| 118 | } | 
|---|
| 119 |  | 
|---|
| 120 | static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q, | 
|---|
| 121 | struct sk_buff *skb) | 
|---|
| 122 | { | 
|---|
| 123 | spinlock_t *lock = NULL; | 
|---|
| 124 |  | 
|---|
| 125 | if (q->flags & TCQ_F_NOLOCK) { | 
|---|
| 126 | lock = qdisc_lock(qdisc: q); | 
|---|
| 127 | spin_lock(lock); | 
|---|
| 128 | } | 
|---|
| 129 |  | 
|---|
| 130 | __skb_queue_tail(list: &q->skb_bad_txq, newsk: skb); | 
|---|
| 131 |  | 
|---|
| 132 | if (qdisc_is_percpu_stats(q)) { | 
|---|
| 133 | qdisc_qstats_cpu_backlog_inc(sch: q, skb); | 
|---|
| 134 | qdisc_qstats_cpu_qlen_inc(sch: q); | 
|---|
| 135 | } else { | 
|---|
| 136 | qdisc_qstats_backlog_inc(sch: q, skb); | 
|---|
| 137 | q->q.qlen++; | 
|---|
| 138 | } | 
|---|
| 139 |  | 
|---|
| 140 | if (lock) | 
|---|
| 141 | spin_unlock(lock); | 
|---|
| 142 | } | 
|---|
| 143 |  | 
|---|
| 144 | static inline void dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) | 
|---|
| 145 | { | 
|---|
| 146 | spinlock_t *lock = NULL; | 
|---|
| 147 |  | 
|---|
| 148 | if (q->flags & TCQ_F_NOLOCK) { | 
|---|
| 149 | lock = qdisc_lock(qdisc: q); | 
|---|
| 150 | spin_lock(lock); | 
|---|
| 151 | } | 
|---|
| 152 |  | 
|---|
| 153 | while (skb) { | 
|---|
| 154 | struct sk_buff *next = skb->next; | 
|---|
| 155 |  | 
|---|
| 156 | __skb_queue_tail(list: &q->gso_skb, newsk: skb); | 
|---|
| 157 |  | 
|---|
| 158 | /* it's still part of the queue */ | 
|---|
| 159 | if (qdisc_is_percpu_stats(q)) { | 
|---|
| 160 | qdisc_qstats_cpu_requeues_inc(sch: q); | 
|---|
| 161 | qdisc_qstats_cpu_backlog_inc(sch: q, skb); | 
|---|
| 162 | qdisc_qstats_cpu_qlen_inc(sch: q); | 
|---|
| 163 | } else { | 
|---|
| 164 | q->qstats.requeues++; | 
|---|
| 165 | qdisc_qstats_backlog_inc(sch: q, skb); | 
|---|
| 166 | q->q.qlen++; | 
|---|
| 167 | } | 
|---|
| 168 |  | 
|---|
| 169 | skb = next; | 
|---|
| 170 | } | 
|---|
| 171 |  | 
|---|
| 172 | if (lock) { | 
|---|
| 173 | spin_unlock(lock); | 
|---|
| 174 | set_bit(nr: __QDISC_STATE_MISSED, addr: &q->state); | 
|---|
| 175 | } else { | 
|---|
| 176 | __netif_schedule(q); | 
|---|
| 177 | } | 
|---|
| 178 | } | 
|---|
| 179 |  | 
|---|
| 180 | static void try_bulk_dequeue_skb(struct Qdisc *q, | 
|---|
| 181 | struct sk_buff *skb, | 
|---|
| 182 | const struct netdev_queue *txq, | 
|---|
| 183 | int *packets) | 
|---|
| 184 | { | 
|---|
| 185 | int bytelimit = qdisc_avail_bulklimit(txq) - skb->len; | 
|---|
| 186 |  | 
|---|
| 187 | while (bytelimit > 0) { | 
|---|
| 188 | struct sk_buff *nskb = q->dequeue(q); | 
|---|
| 189 |  | 
|---|
| 190 | if (!nskb) | 
|---|
| 191 | break; | 
|---|
| 192 |  | 
|---|
| 193 | bytelimit -= nskb->len; /* covers GSO len */ | 
|---|
| 194 | skb->next = nskb; | 
|---|
| 195 | skb = nskb; | 
|---|
| 196 | (*packets)++; /* GSO counts as one pkt */ | 
|---|
| 197 | } | 
|---|
| 198 | skb_mark_not_on_list(skb); | 
|---|
| 199 | } | 
|---|
| 200 |  | 
|---|
| 201 | /* This variant of try_bulk_dequeue_skb() makes sure | 
|---|
| 202 | * all skbs in the chain are for the same txq | 
|---|
| 203 | */ | 
|---|
| 204 | static void try_bulk_dequeue_skb_slow(struct Qdisc *q, | 
|---|
| 205 | struct sk_buff *skb, | 
|---|
| 206 | int *packets) | 
|---|
| 207 | { | 
|---|
| 208 | int mapping = skb_get_queue_mapping(skb); | 
|---|
| 209 | struct sk_buff *nskb; | 
|---|
| 210 | int cnt = 0; | 
|---|
| 211 |  | 
|---|
| 212 | do { | 
|---|
| 213 | nskb = q->dequeue(q); | 
|---|
| 214 | if (!nskb) | 
|---|
| 215 | break; | 
|---|
| 216 | if (unlikely(skb_get_queue_mapping(nskb) != mapping)) { | 
|---|
| 217 | qdisc_enqueue_skb_bad_txq(q, skb: nskb); | 
|---|
| 218 | break; | 
|---|
| 219 | } | 
|---|
| 220 | skb->next = nskb; | 
|---|
| 221 | skb = nskb; | 
|---|
| 222 | } while (++cnt < 8); | 
|---|
| 223 | (*packets) += cnt; | 
|---|
| 224 | skb_mark_not_on_list(skb); | 
|---|
| 225 | } | 
|---|
| 226 |  | 
|---|
| 227 | /* Note that dequeue_skb can possibly return a SKB list (via skb->next). | 
|---|
| 228 | * A requeued skb (via q->gso_skb) can also be a SKB list. | 
|---|
| 229 | */ | 
|---|
| 230 | static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate, | 
|---|
| 231 | int *packets) | 
|---|
| 232 | { | 
|---|
| 233 | const struct netdev_queue *txq = q->dev_queue; | 
|---|
| 234 | struct sk_buff *skb = NULL; | 
|---|
| 235 |  | 
|---|
| 236 | *packets = 1; | 
|---|
| 237 | if (unlikely(!skb_queue_empty(&q->gso_skb))) { | 
|---|
| 238 | spinlock_t *lock = NULL; | 
|---|
| 239 |  | 
|---|
| 240 | if (q->flags & TCQ_F_NOLOCK) { | 
|---|
| 241 | lock = qdisc_lock(qdisc: q); | 
|---|
| 242 | spin_lock(lock); | 
|---|
| 243 | } | 
|---|
| 244 |  | 
|---|
| 245 | skb = skb_peek(list_: &q->gso_skb); | 
|---|
| 246 |  | 
|---|
| 247 | /* skb may be null if another cpu pulls gso_skb off in between | 
|---|
| 248 | * empty check and lock. | 
|---|
| 249 | */ | 
|---|
| 250 | if (!skb) { | 
|---|
| 251 | if (lock) | 
|---|
| 252 | spin_unlock(lock); | 
|---|
| 253 | goto validate; | 
|---|
| 254 | } | 
|---|
| 255 |  | 
|---|
| 256 | /* skb in gso_skb were already validated */ | 
|---|
| 257 | *validate = false; | 
|---|
| 258 | if (xfrm_offload(skb)) | 
|---|
| 259 | *validate = true; | 
|---|
| 260 | /* check the reason of requeuing without tx lock first */ | 
|---|
| 261 | txq = skb_get_tx_queue(dev: txq->dev, skb); | 
|---|
| 262 | if (!netif_xmit_frozen_or_stopped(dev_queue: txq)) { | 
|---|
| 263 | skb = __skb_dequeue(list: &q->gso_skb); | 
|---|
| 264 | if (qdisc_is_percpu_stats(q)) { | 
|---|
| 265 | qdisc_qstats_cpu_backlog_dec(sch: q, skb); | 
|---|
| 266 | qdisc_qstats_cpu_qlen_dec(sch: q); | 
|---|
| 267 | } else { | 
|---|
| 268 | qdisc_qstats_backlog_dec(sch: q, skb); | 
|---|
| 269 | q->q.qlen--; | 
|---|
| 270 | } | 
|---|
| 271 | } else { | 
|---|
| 272 | skb = NULL; | 
|---|
| 273 | qdisc_maybe_clear_missed(q, txq); | 
|---|
| 274 | } | 
|---|
| 275 | if (lock) | 
|---|
| 276 | spin_unlock(lock); | 
|---|
| 277 | goto trace; | 
|---|
| 278 | } | 
|---|
| 279 | validate: | 
|---|
| 280 | *validate = true; | 
|---|
| 281 |  | 
|---|
| 282 | if ((q->flags & TCQ_F_ONETXQUEUE) && | 
|---|
| 283 | netif_xmit_frozen_or_stopped(dev_queue: txq)) { | 
|---|
| 284 | qdisc_maybe_clear_missed(q, txq); | 
|---|
| 285 | return skb; | 
|---|
| 286 | } | 
|---|
| 287 |  | 
|---|
| 288 | skb = qdisc_dequeue_skb_bad_txq(q); | 
|---|
| 289 | if (unlikely(skb)) { | 
|---|
| 290 | if (skb == SKB_XOFF_MAGIC) | 
|---|
| 291 | return NULL; | 
|---|
| 292 | goto bulk; | 
|---|
| 293 | } | 
|---|
| 294 | skb = q->dequeue(q); | 
|---|
| 295 | if (skb) { | 
|---|
| 296 | bulk: | 
|---|
| 297 | if (qdisc_may_bulk(qdisc: q)) | 
|---|
| 298 | try_bulk_dequeue_skb(q, skb, txq, packets); | 
|---|
| 299 | else | 
|---|
| 300 | try_bulk_dequeue_skb_slow(q, skb, packets); | 
|---|
| 301 | } | 
|---|
| 302 | trace: | 
|---|
| 303 | trace_qdisc_dequeue(qdisc: q, txq, packets: *packets, skb); | 
|---|
| 304 | return skb; | 
|---|
| 305 | } | 
|---|
| 306 |  | 
|---|
| 307 | /* | 
|---|
| 308 | * Transmit possibly several skbs, and handle the return status as | 
|---|
| 309 | * required. Owning qdisc running bit guarantees that only one CPU | 
|---|
| 310 | * can execute this function. | 
|---|
| 311 | * | 
|---|
| 312 | * Returns to the caller: | 
|---|
| 313 | *				false  - hardware queue frozen backoff | 
|---|
| 314 | *				true   - feel free to send more pkts | 
|---|
| 315 | */ | 
|---|
| 316 | bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, | 
|---|
| 317 | struct net_device *dev, struct netdev_queue *txq, | 
|---|
| 318 | spinlock_t *root_lock, bool validate) | 
|---|
| 319 | { | 
|---|
| 320 | int ret = NETDEV_TX_BUSY; | 
|---|
| 321 | bool again = false; | 
|---|
| 322 |  | 
|---|
| 323 | /* And release qdisc */ | 
|---|
| 324 | if (root_lock) | 
|---|
| 325 | spin_unlock(lock: root_lock); | 
|---|
| 326 |  | 
|---|
| 327 | /* Note that we validate skb (GSO, checksum, ...) outside of locks */ | 
|---|
| 328 | if (validate) | 
|---|
| 329 | skb = validate_xmit_skb_list(skb, dev, again: &again); | 
|---|
| 330 |  | 
|---|
| 331 | #ifdef CONFIG_XFRM_OFFLOAD | 
|---|
| 332 | if (unlikely(again)) { | 
|---|
| 333 | if (root_lock) | 
|---|
| 334 | spin_lock(root_lock); | 
|---|
| 335 |  | 
|---|
| 336 | dev_requeue_skb(skb, q); | 
|---|
| 337 | return false; | 
|---|
| 338 | } | 
|---|
| 339 | #endif | 
|---|
| 340 |  | 
|---|
| 341 | if (likely(skb)) { | 
|---|
| 342 | HARD_TX_LOCK(dev, txq, smp_processor_id()); | 
|---|
| 343 | if (!netif_xmit_frozen_or_stopped(dev_queue: txq)) | 
|---|
| 344 | skb = dev_hard_start_xmit(skb, dev, txq, ret: &ret); | 
|---|
| 345 | else | 
|---|
| 346 | qdisc_maybe_clear_missed(q, txq); | 
|---|
| 347 |  | 
|---|
| 348 | HARD_TX_UNLOCK(dev, txq); | 
|---|
| 349 | } else { | 
|---|
| 350 | if (root_lock) | 
|---|
| 351 | spin_lock(lock: root_lock); | 
|---|
| 352 | return true; | 
|---|
| 353 | } | 
|---|
| 354 |  | 
|---|
| 355 | if (root_lock) | 
|---|
| 356 | spin_lock(lock: root_lock); | 
|---|
| 357 |  | 
|---|
| 358 | if (!dev_xmit_complete(rc: ret)) { | 
|---|
| 359 | /* Driver returned NETDEV_TX_BUSY - requeue skb */ | 
|---|
| 360 | if (unlikely(ret != NETDEV_TX_BUSY)) | 
|---|
| 361 | net_warn_ratelimited( "BUG %s code %d qlen %d\n", | 
|---|
| 362 | dev->name, ret, q->q.qlen); | 
|---|
| 363 |  | 
|---|
| 364 | dev_requeue_skb(skb, q); | 
|---|
| 365 | return false; | 
|---|
| 366 | } | 
|---|
| 367 |  | 
|---|
| 368 | return true; | 
|---|
| 369 | } | 
|---|
| 370 |  | 
|---|
| 371 | /* | 
|---|
| 372 | * NOTE: Called under qdisc_lock(q) with locally disabled BH. | 
|---|
| 373 | * | 
|---|
| 374 | * running seqcount guarantees only one CPU can process | 
|---|
| 375 | * this qdisc at a time. qdisc_lock(q) serializes queue accesses for | 
|---|
| 376 | * this queue. | 
|---|
| 377 | * | 
|---|
| 378 | *  netif_tx_lock serializes accesses to device driver. | 
|---|
| 379 | * | 
|---|
| 380 | *  qdisc_lock(q) and netif_tx_lock are mutually exclusive, | 
|---|
| 381 | *  if one is grabbed, another must be free. | 
|---|
| 382 | * | 
|---|
| 383 | * Note, that this procedure can be called by a watchdog timer | 
|---|
| 384 | * | 
|---|
| 385 | * Returns to the caller: | 
|---|
| 386 | *				0  - queue is empty or throttled. | 
|---|
| 387 | *				>0 - queue is not empty. | 
|---|
| 388 | * | 
|---|
| 389 | */ | 
|---|
| 390 | static inline bool qdisc_restart(struct Qdisc *q, int *packets) | 
|---|
| 391 | { | 
|---|
| 392 | spinlock_t *root_lock = NULL; | 
|---|
| 393 | struct netdev_queue *txq; | 
|---|
| 394 | struct net_device *dev; | 
|---|
| 395 | struct sk_buff *skb; | 
|---|
| 396 | bool validate; | 
|---|
| 397 |  | 
|---|
| 398 | /* Dequeue packet */ | 
|---|
| 399 | skb = dequeue_skb(q, validate: &validate, packets); | 
|---|
| 400 | if (unlikely(!skb)) | 
|---|
| 401 | return false; | 
|---|
| 402 |  | 
|---|
| 403 | if (!(q->flags & TCQ_F_NOLOCK)) | 
|---|
| 404 | root_lock = qdisc_lock(qdisc: q); | 
|---|
| 405 |  | 
|---|
| 406 | dev = qdisc_dev(qdisc: q); | 
|---|
| 407 | txq = skb_get_tx_queue(dev, skb); | 
|---|
| 408 |  | 
|---|
| 409 | return sch_direct_xmit(skb, q, dev, txq, root_lock, validate); | 
|---|
| 410 | } | 
|---|
| 411 |  | 
|---|
| 412 | void __qdisc_run(struct Qdisc *q) | 
|---|
| 413 | { | 
|---|
| 414 | int quota = READ_ONCE(net_hotdata.dev_tx_weight); | 
|---|
| 415 | int packets; | 
|---|
| 416 |  | 
|---|
| 417 | while (qdisc_restart(q, packets: &packets)) { | 
|---|
| 418 | quota -= packets; | 
|---|
| 419 | if (quota <= 0) { | 
|---|
| 420 | if (q->flags & TCQ_F_NOLOCK) | 
|---|
| 421 | set_bit(nr: __QDISC_STATE_MISSED, addr: &q->state); | 
|---|
| 422 | else | 
|---|
| 423 | __netif_schedule(q); | 
|---|
| 424 |  | 
|---|
| 425 | break; | 
|---|
| 426 | } | 
|---|
| 427 | } | 
|---|
| 428 | } | 
|---|
| 429 |  | 
|---|
| 430 | unsigned long dev_trans_start(struct net_device *dev) | 
|---|
| 431 | { | 
|---|
| 432 | unsigned long res = READ_ONCE(netdev_get_tx_queue(dev, 0)->trans_start); | 
|---|
| 433 | unsigned long val; | 
|---|
| 434 | unsigned int i; | 
|---|
| 435 |  | 
|---|
| 436 | for (i = 1; i < dev->num_tx_queues; i++) { | 
|---|
| 437 | val = READ_ONCE(netdev_get_tx_queue(dev, i)->trans_start); | 
|---|
| 438 | if (val && time_after(val, res)) | 
|---|
| 439 | res = val; | 
|---|
| 440 | } | 
|---|
| 441 |  | 
|---|
| 442 | return res; | 
|---|
| 443 | } | 
|---|
| 444 | EXPORT_SYMBOL(dev_trans_start); | 
|---|
| 445 |  | 
|---|
| 446 | static void netif_freeze_queues(struct net_device *dev) | 
|---|
| 447 | { | 
|---|
| 448 | unsigned int i; | 
|---|
| 449 | int cpu; | 
|---|
| 450 |  | 
|---|
| 451 | cpu = smp_processor_id(); | 
|---|
| 452 | for (i = 0; i < dev->num_tx_queues; i++) { | 
|---|
| 453 | struct netdev_queue *txq = netdev_get_tx_queue(dev, index: i); | 
|---|
| 454 |  | 
|---|
| 455 | /* We are the only thread of execution doing a | 
|---|
| 456 | * freeze, but we have to grab the _xmit_lock in | 
|---|
| 457 | * order to synchronize with threads which are in | 
|---|
| 458 | * the ->hard_start_xmit() handler and already | 
|---|
| 459 | * checked the frozen bit. | 
|---|
| 460 | */ | 
|---|
| 461 | __netif_tx_lock(txq, cpu); | 
|---|
| 462 | set_bit(nr: __QUEUE_STATE_FROZEN, addr: &txq->state); | 
|---|
| 463 | __netif_tx_unlock(txq); | 
|---|
| 464 | } | 
|---|
| 465 | } | 
|---|
| 466 |  | 
|---|
| 467 | void netif_tx_lock(struct net_device *dev) | 
|---|
| 468 | { | 
|---|
| 469 | spin_lock(lock: &dev->tx_global_lock); | 
|---|
| 470 | netif_freeze_queues(dev); | 
|---|
| 471 | } | 
|---|
| 472 | EXPORT_SYMBOL(netif_tx_lock); | 
|---|
| 473 |  | 
|---|
| 474 | static void netif_unfreeze_queues(struct net_device *dev) | 
|---|
| 475 | { | 
|---|
| 476 | unsigned int i; | 
|---|
| 477 |  | 
|---|
| 478 | for (i = 0; i < dev->num_tx_queues; i++) { | 
|---|
| 479 | struct netdev_queue *txq = netdev_get_tx_queue(dev, index: i); | 
|---|
| 480 |  | 
|---|
| 481 | /* No need to grab the _xmit_lock here.  If the | 
|---|
| 482 | * queue is not stopped for another reason, we | 
|---|
| 483 | * force a schedule. | 
|---|
| 484 | */ | 
|---|
| 485 | clear_bit(nr: __QUEUE_STATE_FROZEN, addr: &txq->state); | 
|---|
| 486 | netif_schedule_queue(txq); | 
|---|
| 487 | } | 
|---|
| 488 | } | 
|---|
| 489 |  | 
|---|
| 490 | void netif_tx_unlock(struct net_device *dev) | 
|---|
| 491 | { | 
|---|
| 492 | netif_unfreeze_queues(dev); | 
|---|
| 493 | spin_unlock(lock: &dev->tx_global_lock); | 
|---|
| 494 | } | 
|---|
| 495 | EXPORT_SYMBOL(netif_tx_unlock); | 
|---|
| 496 |  | 
|---|
| 497 | static void dev_watchdog(struct timer_list *t) | 
|---|
| 498 | { | 
|---|
| 499 | struct net_device *dev = timer_container_of(dev, t, watchdog_timer); | 
|---|
| 500 | bool release = true; | 
|---|
| 501 |  | 
|---|
| 502 | spin_lock(lock: &dev->tx_global_lock); | 
|---|
| 503 | if (!qdisc_tx_is_noop(dev)) { | 
|---|
| 504 | if (netif_device_present(dev) && | 
|---|
| 505 | netif_running(dev) && | 
|---|
| 506 | netif_carrier_ok(dev)) { | 
|---|
| 507 | unsigned int timedout_ms = 0; | 
|---|
| 508 | unsigned int i; | 
|---|
| 509 | unsigned long trans_start; | 
|---|
| 510 | unsigned long oldest_start = jiffies; | 
|---|
| 511 |  | 
|---|
| 512 | for (i = 0; i < dev->num_tx_queues; i++) { | 
|---|
| 513 | struct netdev_queue *txq; | 
|---|
| 514 |  | 
|---|
| 515 | txq = netdev_get_tx_queue(dev, index: i); | 
|---|
| 516 | if (!netif_xmit_stopped(dev_queue: txq)) | 
|---|
| 517 | continue; | 
|---|
| 518 |  | 
|---|
| 519 | /* Paired with WRITE_ONCE() + smp_mb...() in | 
|---|
| 520 | * netdev_tx_sent_queue() and netif_tx_stop_queue(). | 
|---|
| 521 | */ | 
|---|
| 522 | smp_mb(); | 
|---|
| 523 | trans_start = READ_ONCE(txq->trans_start); | 
|---|
| 524 |  | 
|---|
| 525 | if (time_after(jiffies, trans_start + dev->watchdog_timeo)) { | 
|---|
| 526 | timedout_ms = jiffies_to_msecs(j: jiffies - trans_start); | 
|---|
| 527 | atomic_long_inc(v: &txq->trans_timeout); | 
|---|
| 528 | break; | 
|---|
| 529 | } | 
|---|
| 530 | if (time_after(oldest_start, trans_start)) | 
|---|
| 531 | oldest_start = trans_start; | 
|---|
| 532 | } | 
|---|
| 533 |  | 
|---|
| 534 | if (unlikely(timedout_ms)) { | 
|---|
| 535 | trace_net_dev_xmit_timeout(dev, queue_index: i); | 
|---|
| 536 | netdev_crit(dev, format: "NETDEV WATCHDOG: CPU: %d: transmit queue %u timed out %u ms\n", | 
|---|
| 537 | raw_smp_processor_id(), | 
|---|
| 538 | i, timedout_ms); | 
|---|
| 539 | netif_freeze_queues(dev); | 
|---|
| 540 | dev->netdev_ops->ndo_tx_timeout(dev, i); | 
|---|
| 541 | netif_unfreeze_queues(dev); | 
|---|
| 542 | } | 
|---|
| 543 | if (!mod_timer(timer: &dev->watchdog_timer, | 
|---|
| 544 | expires: round_jiffies(j: oldest_start + | 
|---|
| 545 | dev->watchdog_timeo))) | 
|---|
| 546 | release = false; | 
|---|
| 547 | } | 
|---|
| 548 | } | 
|---|
| 549 | spin_unlock(lock: &dev->tx_global_lock); | 
|---|
| 550 |  | 
|---|
| 551 | if (release) | 
|---|
| 552 | netdev_put(dev, tracker: &dev->watchdog_dev_tracker); | 
|---|
| 553 | } | 
|---|
| 554 |  | 
|---|
| 555 | void netdev_watchdog_up(struct net_device *dev) | 
|---|
| 556 | { | 
|---|
| 557 | if (!dev->netdev_ops->ndo_tx_timeout) | 
|---|
| 558 | return; | 
|---|
| 559 | if (dev->watchdog_timeo <= 0) | 
|---|
| 560 | dev->watchdog_timeo = 5*HZ; | 
|---|
| 561 | if (!mod_timer(timer: &dev->watchdog_timer, | 
|---|
| 562 | expires: round_jiffies(j: jiffies + dev->watchdog_timeo))) | 
|---|
| 563 | netdev_hold(dev, tracker: &dev->watchdog_dev_tracker, | 
|---|
| 564 | GFP_ATOMIC); | 
|---|
| 565 | } | 
|---|
| 566 | EXPORT_SYMBOL_GPL(netdev_watchdog_up); | 
|---|
| 567 |  | 
|---|
| 568 | static void netdev_watchdog_down(struct net_device *dev) | 
|---|
| 569 | { | 
|---|
| 570 | netif_tx_lock_bh(dev); | 
|---|
| 571 | if (timer_delete(timer: &dev->watchdog_timer)) | 
|---|
| 572 | netdev_put(dev, tracker: &dev->watchdog_dev_tracker); | 
|---|
| 573 | netif_tx_unlock_bh(dev); | 
|---|
| 574 | } | 
|---|
| 575 |  | 
|---|
| 576 | /** | 
|---|
| 577 | *	netif_carrier_on - set carrier | 
|---|
| 578 | *	@dev: network device | 
|---|
| 579 | * | 
|---|
| 580 | * Device has detected acquisition of carrier. | 
|---|
| 581 | */ | 
|---|
| 582 | void netif_carrier_on(struct net_device *dev) | 
|---|
| 583 | { | 
|---|
| 584 | if (test_and_clear_bit(nr: __LINK_STATE_NOCARRIER, addr: &dev->state)) { | 
|---|
| 585 | if (dev->reg_state == NETREG_UNINITIALIZED) | 
|---|
| 586 | return; | 
|---|
| 587 | atomic_inc(v: &dev->carrier_up_count); | 
|---|
| 588 | linkwatch_fire_event(dev); | 
|---|
| 589 | if (netif_running(dev)) | 
|---|
| 590 | netdev_watchdog_up(dev); | 
|---|
| 591 | } | 
|---|
| 592 | } | 
|---|
| 593 | EXPORT_SYMBOL(netif_carrier_on); | 
|---|
| 594 |  | 
|---|
| 595 | /** | 
|---|
| 596 | *	netif_carrier_off - clear carrier | 
|---|
| 597 | *	@dev: network device | 
|---|
| 598 | * | 
|---|
| 599 | * Device has detected loss of carrier. | 
|---|
| 600 | */ | 
|---|
| 601 | void netif_carrier_off(struct net_device *dev) | 
|---|
| 602 | { | 
|---|
| 603 | if (!test_and_set_bit(nr: __LINK_STATE_NOCARRIER, addr: &dev->state)) { | 
|---|
| 604 | if (dev->reg_state == NETREG_UNINITIALIZED) | 
|---|
| 605 | return; | 
|---|
| 606 | atomic_inc(v: &dev->carrier_down_count); | 
|---|
| 607 | linkwatch_fire_event(dev); | 
|---|
| 608 | } | 
|---|
| 609 | } | 
|---|
| 610 | EXPORT_SYMBOL(netif_carrier_off); | 
|---|
| 611 |  | 
|---|
| 612 | /** | 
|---|
| 613 | *	netif_carrier_event - report carrier state event | 
|---|
| 614 | *	@dev: network device | 
|---|
| 615 | * | 
|---|
| 616 | * Device has detected a carrier event but the carrier state wasn't changed. | 
|---|
| 617 | * Use in drivers when querying carrier state asynchronously, to avoid missing | 
|---|
| 618 | * events (link flaps) if link recovers before it's queried. | 
|---|
| 619 | */ | 
|---|
| 620 | void netif_carrier_event(struct net_device *dev) | 
|---|
| 621 | { | 
|---|
| 622 | if (dev->reg_state == NETREG_UNINITIALIZED) | 
|---|
| 623 | return; | 
|---|
| 624 | atomic_inc(v: &dev->carrier_up_count); | 
|---|
| 625 | atomic_inc(v: &dev->carrier_down_count); | 
|---|
| 626 | linkwatch_fire_event(dev); | 
|---|
| 627 | } | 
|---|
| 628 | EXPORT_SYMBOL_GPL(netif_carrier_event); | 
|---|
| 629 |  | 
|---|
| 630 | /* "NOOP" scheduler: the best scheduler, recommended for all interfaces | 
|---|
| 631 | under all circumstances. It is difficult to invent anything faster or | 
|---|
| 632 | cheaper. | 
|---|
| 633 | */ | 
|---|
| 634 |  | 
|---|
| 635 | static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, | 
|---|
| 636 | struct sk_buff **to_free) | 
|---|
| 637 | { | 
|---|
| 638 | dev_core_stats_tx_dropped_inc(dev: skb->dev); | 
|---|
| 639 | __qdisc_drop(skb, to_free); | 
|---|
| 640 | return NET_XMIT_CN; | 
|---|
| 641 | } | 
|---|
| 642 |  | 
|---|
| 643 | static struct sk_buff *noop_dequeue(struct Qdisc *qdisc) | 
|---|
| 644 | { | 
|---|
| 645 | return NULL; | 
|---|
| 646 | } | 
|---|
| 647 |  | 
|---|
| 648 | struct Qdisc_ops noop_qdisc_ops __read_mostly = { | 
|---|
| 649 | .id		= "noop", | 
|---|
| 650 | .priv_size	=	0, | 
|---|
| 651 | .enqueue	=	noop_enqueue, | 
|---|
| 652 | .dequeue	=	noop_dequeue, | 
|---|
| 653 | .peek		=	noop_dequeue, | 
|---|
| 654 | .owner		=	THIS_MODULE, | 
|---|
| 655 | }; | 
|---|
| 656 |  | 
|---|
| 657 | static struct netdev_queue noop_netdev_queue = { | 
|---|
| 658 | RCU_POINTER_INITIALIZER(qdisc, &noop_qdisc), | 
|---|
| 659 | RCU_POINTER_INITIALIZER(qdisc_sleeping, &noop_qdisc), | 
|---|
| 660 | }; | 
|---|
| 661 |  | 
|---|
| 662 | struct Qdisc noop_qdisc = { | 
|---|
| 663 | .enqueue	=	noop_enqueue, | 
|---|
| 664 | .dequeue	=	noop_dequeue, | 
|---|
| 665 | .flags		=	TCQ_F_BUILTIN, | 
|---|
| 666 | .ops		=	&noop_qdisc_ops, | 
|---|
| 667 | .q.lock		=	__SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), | 
|---|
| 668 | .dev_queue	=	&noop_netdev_queue, | 
|---|
| 669 | .busylock	=	__SPIN_LOCK_UNLOCKED(noop_qdisc.busylock), | 
|---|
| 670 | .gso_skb = { | 
|---|
| 671 | .next = (struct sk_buff *)&noop_qdisc.gso_skb, | 
|---|
| 672 | .prev = (struct sk_buff *)&noop_qdisc.gso_skb, | 
|---|
| 673 | .qlen = 0, | 
|---|
| 674 | .lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.gso_skb.lock), | 
|---|
| 675 | }, | 
|---|
| 676 | .skb_bad_txq = { | 
|---|
| 677 | .next = (struct sk_buff *)&noop_qdisc.skb_bad_txq, | 
|---|
| 678 | .prev = (struct sk_buff *)&noop_qdisc.skb_bad_txq, | 
|---|
| 679 | .qlen = 0, | 
|---|
| 680 | .lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.skb_bad_txq.lock), | 
|---|
| 681 | }, | 
|---|
| 682 | .owner = -1, | 
|---|
| 683 | }; | 
|---|
| 684 | EXPORT_SYMBOL(noop_qdisc); | 
|---|
| 685 |  | 
|---|
| 686 | static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt, | 
|---|
| 687 | struct netlink_ext_ack *extack) | 
|---|
| 688 | { | 
|---|
| 689 | /* register_qdisc() assigns a default of noop_enqueue if unset, | 
|---|
| 690 | * but __dev_queue_xmit() treats noqueue only as such | 
|---|
| 691 | * if this is NULL - so clear it here. */ | 
|---|
| 692 | qdisc->enqueue = NULL; | 
|---|
| 693 | return 0; | 
|---|
| 694 | } | 
|---|
| 695 |  | 
|---|
| 696 | struct Qdisc_ops noqueue_qdisc_ops __read_mostly = { | 
|---|
| 697 | .id		= "noqueue", | 
|---|
| 698 | .priv_size	=	0, | 
|---|
| 699 | .init		=	noqueue_init, | 
|---|
| 700 | .enqueue	=	noop_enqueue, | 
|---|
| 701 | .dequeue	=	noop_dequeue, | 
|---|
| 702 | .peek		=	noop_dequeue, | 
|---|
| 703 | .owner		=	THIS_MODULE, | 
|---|
| 704 | }; | 
|---|
| 705 |  | 
|---|
| 706 | const u8 sch_default_prio2band[TC_PRIO_MAX + 1] = { | 
|---|
| 707 | 1, 2, 2, 2, 1, 2, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1 | 
|---|
| 708 | }; | 
|---|
| 709 | EXPORT_SYMBOL(sch_default_prio2band); | 
|---|
| 710 |  | 
|---|
| 711 | /* 3-band FIFO queue: old style, but should be a bit faster than | 
|---|
| 712 | generic prio+fifo combination. | 
|---|
| 713 | */ | 
|---|
| 714 |  | 
|---|
| 715 | #define PFIFO_FAST_BANDS 3 | 
|---|
| 716 |  | 
|---|
| 717 | /* | 
|---|
| 718 | * Private data for a pfifo_fast scheduler containing: | 
|---|
| 719 | *	- rings for priority bands | 
|---|
| 720 | */ | 
|---|
| 721 | struct pfifo_fast_priv { | 
|---|
| 722 | struct skb_array q[PFIFO_FAST_BANDS]; | 
|---|
| 723 | }; | 
|---|
| 724 |  | 
|---|
| 725 | static inline struct skb_array *band2list(struct pfifo_fast_priv *priv, | 
|---|
| 726 | int band) | 
|---|
| 727 | { | 
|---|
| 728 | return &priv->q[band]; | 
|---|
| 729 | } | 
|---|
| 730 |  | 
|---|
| 731 | static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, | 
|---|
| 732 | struct sk_buff **to_free) | 
|---|
| 733 | { | 
|---|
| 734 | int band = sch_default_prio2band[skb->priority & TC_PRIO_MAX]; | 
|---|
| 735 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); | 
|---|
| 736 | struct skb_array *q = band2list(priv, band); | 
|---|
| 737 | unsigned int pkt_len = qdisc_pkt_len(skb); | 
|---|
| 738 | int err; | 
|---|
| 739 |  | 
|---|
| 740 | err = skb_array_produce(a: q, skb); | 
|---|
| 741 |  | 
|---|
| 742 | if (unlikely(err)) { | 
|---|
| 743 | tcf_set_drop_reason(skb, reason: SKB_DROP_REASON_QDISC_OVERLIMIT); | 
|---|
| 744 |  | 
|---|
| 745 | if (qdisc_is_percpu_stats(q: qdisc)) | 
|---|
| 746 | return qdisc_drop_cpu(skb, sch: qdisc, to_free); | 
|---|
| 747 | else | 
|---|
| 748 | return qdisc_drop(skb, sch: qdisc, to_free); | 
|---|
| 749 | } | 
|---|
| 750 |  | 
|---|
| 751 | qdisc_update_stats_at_enqueue(sch: qdisc, pkt_len); | 
|---|
| 752 | return NET_XMIT_SUCCESS; | 
|---|
| 753 | } | 
|---|
| 754 |  | 
|---|
| 755 | static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc) | 
|---|
| 756 | { | 
|---|
| 757 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); | 
|---|
| 758 | struct sk_buff *skb = NULL; | 
|---|
| 759 | bool need_retry = true; | 
|---|
| 760 | int band; | 
|---|
| 761 |  | 
|---|
| 762 | retry: | 
|---|
| 763 | for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) { | 
|---|
| 764 | struct skb_array *q = band2list(priv, band); | 
|---|
| 765 |  | 
|---|
| 766 | if (__skb_array_empty(a: q)) | 
|---|
| 767 | continue; | 
|---|
| 768 |  | 
|---|
| 769 | skb = __skb_array_consume(a: q); | 
|---|
| 770 | } | 
|---|
| 771 | if (likely(skb)) { | 
|---|
| 772 | qdisc_update_stats_at_dequeue(sch: qdisc, skb); | 
|---|
| 773 | } else if (need_retry && | 
|---|
| 774 | READ_ONCE(qdisc->state) & QDISC_STATE_NON_EMPTY) { | 
|---|
| 775 | /* Delay clearing the STATE_MISSED here to reduce | 
|---|
| 776 | * the overhead of the second spin_trylock() in | 
|---|
| 777 | * qdisc_run_begin() and __netif_schedule() calling | 
|---|
| 778 | * in qdisc_run_end(). | 
|---|
| 779 | */ | 
|---|
| 780 | clear_bit(nr: __QDISC_STATE_MISSED, addr: &qdisc->state); | 
|---|
| 781 | clear_bit(nr: __QDISC_STATE_DRAINING, addr: &qdisc->state); | 
|---|
| 782 |  | 
|---|
| 783 | /* Make sure dequeuing happens after clearing | 
|---|
| 784 | * STATE_MISSED. | 
|---|
| 785 | */ | 
|---|
| 786 | smp_mb__after_atomic(); | 
|---|
| 787 |  | 
|---|
| 788 | need_retry = false; | 
|---|
| 789 |  | 
|---|
| 790 | goto retry; | 
|---|
| 791 | } | 
|---|
| 792 |  | 
|---|
| 793 | return skb; | 
|---|
| 794 | } | 
|---|
| 795 |  | 
|---|
| 796 | static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc) | 
|---|
| 797 | { | 
|---|
| 798 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); | 
|---|
| 799 | struct sk_buff *skb = NULL; | 
|---|
| 800 | int band; | 
|---|
| 801 |  | 
|---|
| 802 | for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) { | 
|---|
| 803 | struct skb_array *q = band2list(priv, band); | 
|---|
| 804 |  | 
|---|
| 805 | skb = __skb_array_peek(a: q); | 
|---|
| 806 | } | 
|---|
| 807 |  | 
|---|
| 808 | return skb; | 
|---|
| 809 | } | 
|---|
| 810 |  | 
|---|
| 811 | static void pfifo_fast_reset(struct Qdisc *qdisc) | 
|---|
| 812 | { | 
|---|
| 813 | int i, band; | 
|---|
| 814 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); | 
|---|
| 815 |  | 
|---|
| 816 | for (band = 0; band < PFIFO_FAST_BANDS; band++) { | 
|---|
| 817 | struct skb_array *q = band2list(priv, band); | 
|---|
| 818 | struct sk_buff *skb; | 
|---|
| 819 |  | 
|---|
| 820 | /* NULL ring is possible if destroy path is due to a failed | 
|---|
| 821 | * skb_array_init() in pfifo_fast_init() case. | 
|---|
| 822 | */ | 
|---|
| 823 | if (!q->ring.queue) | 
|---|
| 824 | continue; | 
|---|
| 825 |  | 
|---|
| 826 | while ((skb = __skb_array_consume(a: q)) != NULL) | 
|---|
| 827 | kfree_skb(skb); | 
|---|
| 828 | } | 
|---|
| 829 |  | 
|---|
| 830 | if (qdisc_is_percpu_stats(q: qdisc)) { | 
|---|
| 831 | for_each_possible_cpu(i) { | 
|---|
| 832 | struct gnet_stats_queue *q; | 
|---|
| 833 |  | 
|---|
| 834 | q = per_cpu_ptr(qdisc->cpu_qstats, i); | 
|---|
| 835 | q->backlog = 0; | 
|---|
| 836 | q->qlen = 0; | 
|---|
| 837 | } | 
|---|
| 838 | } | 
|---|
| 839 | } | 
|---|
| 840 |  | 
|---|
| 841 | static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb) | 
|---|
| 842 | { | 
|---|
| 843 | struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS }; | 
|---|
| 844 |  | 
|---|
| 845 | memcpy(to: &opt.priomap, from: sch_default_prio2band, TC_PRIO_MAX + 1); | 
|---|
| 846 | if (nla_put(skb, attrtype: TCA_OPTIONS, attrlen: sizeof(opt), data: &opt)) | 
|---|
| 847 | goto nla_put_failure; | 
|---|
| 848 | return skb->len; | 
|---|
| 849 |  | 
|---|
| 850 | nla_put_failure: | 
|---|
| 851 | return -1; | 
|---|
| 852 | } | 
|---|
| 853 |  | 
|---|
| 854 | static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt, | 
|---|
| 855 | struct netlink_ext_ack *extack) | 
|---|
| 856 | { | 
|---|
| 857 | unsigned int qlen = qdisc_dev(qdisc)->tx_queue_len; | 
|---|
| 858 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); | 
|---|
| 859 | int prio; | 
|---|
| 860 |  | 
|---|
| 861 | /* guard against zero length rings */ | 
|---|
| 862 | if (!qlen) | 
|---|
| 863 | return -EINVAL; | 
|---|
| 864 |  | 
|---|
| 865 | for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { | 
|---|
| 866 | struct skb_array *q = band2list(priv, band: prio); | 
|---|
| 867 | int err; | 
|---|
| 868 |  | 
|---|
| 869 | err = skb_array_init(q, qlen, GFP_KERNEL); | 
|---|
| 870 | if (err) | 
|---|
| 871 | return -ENOMEM; | 
|---|
| 872 | } | 
|---|
| 873 |  | 
|---|
| 874 | /* Can by-pass the queue discipline */ | 
|---|
| 875 | qdisc->flags |= TCQ_F_CAN_BYPASS; | 
|---|
| 876 | return 0; | 
|---|
| 877 | } | 
|---|
| 878 |  | 
|---|
| 879 | static void pfifo_fast_destroy(struct Qdisc *sch) | 
|---|
| 880 | { | 
|---|
| 881 | struct pfifo_fast_priv *priv = qdisc_priv(sch); | 
|---|
| 882 | int prio; | 
|---|
| 883 |  | 
|---|
| 884 | for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { | 
|---|
| 885 | struct skb_array *q = band2list(priv, band: prio); | 
|---|
| 886 |  | 
|---|
| 887 | /* NULL ring is possible if destroy path is due to a failed | 
|---|
| 888 | * skb_array_init() in pfifo_fast_init() case. | 
|---|
| 889 | */ | 
|---|
| 890 | if (!q->ring.queue) | 
|---|
| 891 | continue; | 
|---|
| 892 | /* Destroy ring but no need to kfree_skb because a call to | 
|---|
| 893 | * pfifo_fast_reset() has already done that work. | 
|---|
| 894 | */ | 
|---|
| 895 | ptr_ring_cleanup(r: &q->ring, NULL); | 
|---|
| 896 | } | 
|---|
| 897 | } | 
|---|
| 898 |  | 
|---|
| 899 | static int pfifo_fast_change_tx_queue_len(struct Qdisc *sch, | 
|---|
| 900 | unsigned int new_len) | 
|---|
| 901 | { | 
|---|
| 902 | struct pfifo_fast_priv *priv = qdisc_priv(sch); | 
|---|
| 903 | struct skb_array *bands[PFIFO_FAST_BANDS]; | 
|---|
| 904 | int prio; | 
|---|
| 905 |  | 
|---|
| 906 | for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { | 
|---|
| 907 | struct skb_array *q = band2list(priv, band: prio); | 
|---|
| 908 |  | 
|---|
| 909 | bands[prio] = q; | 
|---|
| 910 | } | 
|---|
| 911 |  | 
|---|
| 912 | return skb_array_resize_multiple_bh(bands, PFIFO_FAST_BANDS, new_len, | 
|---|
| 913 | GFP_KERNEL); | 
|---|
| 914 | } | 
|---|
| 915 |  | 
|---|
| 916 | struct Qdisc_ops pfifo_fast_ops __read_mostly = { | 
|---|
| 917 | .id		= "pfifo_fast", | 
|---|
| 918 | .priv_size	=	sizeof(struct pfifo_fast_priv), | 
|---|
| 919 | .enqueue	=	pfifo_fast_enqueue, | 
|---|
| 920 | .dequeue	=	pfifo_fast_dequeue, | 
|---|
| 921 | .peek		=	pfifo_fast_peek, | 
|---|
| 922 | .init		=	pfifo_fast_init, | 
|---|
| 923 | .destroy	=	pfifo_fast_destroy, | 
|---|
| 924 | .reset		=	pfifo_fast_reset, | 
|---|
| 925 | .dump		=	pfifo_fast_dump, | 
|---|
| 926 | .change_tx_queue_len =  pfifo_fast_change_tx_queue_len, | 
|---|
| 927 | .owner		=	THIS_MODULE, | 
|---|
| 928 | .static_flags	=	TCQ_F_NOLOCK | TCQ_F_CPUSTATS, | 
|---|
| 929 | }; | 
|---|
| 930 | EXPORT_SYMBOL(pfifo_fast_ops); | 
|---|
| 931 |  | 
|---|
| 932 | static struct lock_class_key qdisc_tx_busylock; | 
|---|
| 933 |  | 
|---|
| 934 | struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, | 
|---|
| 935 | const struct Qdisc_ops *ops, | 
|---|
| 936 | struct netlink_ext_ack *extack) | 
|---|
| 937 | { | 
|---|
| 938 | struct Qdisc *sch; | 
|---|
| 939 | unsigned int size = sizeof(*sch) + ops->priv_size; | 
|---|
| 940 | int err = -ENOBUFS; | 
|---|
| 941 | struct net_device *dev; | 
|---|
| 942 |  | 
|---|
| 943 | if (!dev_queue) { | 
|---|
| 944 | NL_SET_ERR_MSG(extack, "No device queue given"); | 
|---|
| 945 | err = -EINVAL; | 
|---|
| 946 | goto errout; | 
|---|
| 947 | } | 
|---|
| 948 |  | 
|---|
| 949 | dev = dev_queue->dev; | 
|---|
| 950 | sch = kzalloc_node(size, GFP_KERNEL, netdev_queue_numa_node_read(dev_queue)); | 
|---|
| 951 |  | 
|---|
| 952 | if (!sch) | 
|---|
| 953 | goto errout; | 
|---|
| 954 | __skb_queue_head_init(list: &sch->gso_skb); | 
|---|
| 955 | __skb_queue_head_init(list: &sch->skb_bad_txq); | 
|---|
| 956 | gnet_stats_basic_sync_init(b: &sch->bstats); | 
|---|
| 957 | lockdep_register_key(key: &sch->root_lock_key); | 
|---|
| 958 | spin_lock_init(&sch->q.lock); | 
|---|
| 959 | lockdep_set_class(&sch->q.lock, &sch->root_lock_key); | 
|---|
| 960 |  | 
|---|
| 961 | if (ops->static_flags & TCQ_F_CPUSTATS) { | 
|---|
| 962 | sch->cpu_bstats = | 
|---|
| 963 | netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync); | 
|---|
| 964 | if (!sch->cpu_bstats) | 
|---|
| 965 | goto errout1; | 
|---|
| 966 |  | 
|---|
| 967 | sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue); | 
|---|
| 968 | if (!sch->cpu_qstats) { | 
|---|
| 969 | free_percpu(pdata: sch->cpu_bstats); | 
|---|
| 970 | goto errout1; | 
|---|
| 971 | } | 
|---|
| 972 | } | 
|---|
| 973 |  | 
|---|
| 974 | spin_lock_init(&sch->busylock); | 
|---|
| 975 | lockdep_set_class(&sch->busylock, | 
|---|
| 976 | dev->qdisc_tx_busylock ?: &qdisc_tx_busylock); | 
|---|
| 977 |  | 
|---|
| 978 | /* seqlock has the same scope of busylock, for NOLOCK qdisc */ | 
|---|
| 979 | spin_lock_init(&sch->seqlock); | 
|---|
| 980 | lockdep_set_class(&sch->seqlock, | 
|---|
| 981 | dev->qdisc_tx_busylock ?: &qdisc_tx_busylock); | 
|---|
| 982 |  | 
|---|
| 983 | sch->ops = ops; | 
|---|
| 984 | sch->flags = ops->static_flags; | 
|---|
| 985 | sch->enqueue = ops->enqueue; | 
|---|
| 986 | sch->dequeue = ops->dequeue; | 
|---|
| 987 | sch->dev_queue = dev_queue; | 
|---|
| 988 | sch->owner = -1; | 
|---|
| 989 | netdev_hold(dev, tracker: &sch->dev_tracker, GFP_KERNEL); | 
|---|
| 990 | refcount_set(r: &sch->refcnt, n: 1); | 
|---|
| 991 |  | 
|---|
| 992 | return sch; | 
|---|
| 993 | errout1: | 
|---|
| 994 | lockdep_unregister_key(key: &sch->root_lock_key); | 
|---|
| 995 | kfree(objp: sch); | 
|---|
| 996 | errout: | 
|---|
| 997 | return ERR_PTR(error: err); | 
|---|
| 998 | } | 
|---|
| 999 |  | 
|---|
| 1000 | struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, | 
|---|
| 1001 | const struct Qdisc_ops *ops, | 
|---|
| 1002 | unsigned int parentid, | 
|---|
| 1003 | struct netlink_ext_ack *extack) | 
|---|
| 1004 | { | 
|---|
| 1005 | struct Qdisc *sch; | 
|---|
| 1006 |  | 
|---|
| 1007 | if (!bpf_try_module_get(data: ops, owner: ops->owner)) { | 
|---|
| 1008 | NL_SET_ERR_MSG(extack, "Failed to increase module reference counter"); | 
|---|
| 1009 | return NULL; | 
|---|
| 1010 | } | 
|---|
| 1011 |  | 
|---|
| 1012 | sch = qdisc_alloc(dev_queue, ops, extack); | 
|---|
| 1013 | if (IS_ERR(ptr: sch)) { | 
|---|
| 1014 | bpf_module_put(data: ops, owner: ops->owner); | 
|---|
| 1015 | return NULL; | 
|---|
| 1016 | } | 
|---|
| 1017 | sch->parent = parentid; | 
|---|
| 1018 |  | 
|---|
| 1019 | if (!ops->init || ops->init(sch, NULL, extack) == 0) { | 
|---|
| 1020 | trace_qdisc_create(ops, dev: dev_queue->dev, parent: parentid); | 
|---|
| 1021 | return sch; | 
|---|
| 1022 | } | 
|---|
| 1023 |  | 
|---|
| 1024 | qdisc_put(qdisc: sch); | 
|---|
| 1025 | return NULL; | 
|---|
| 1026 | } | 
|---|
| 1027 | EXPORT_SYMBOL(qdisc_create_dflt); | 
|---|
| 1028 |  | 
|---|
| 1029 | /* Under qdisc_lock(qdisc) and BH! */ | 
|---|
| 1030 |  | 
|---|
| 1031 | void qdisc_reset(struct Qdisc *qdisc) | 
|---|
| 1032 | { | 
|---|
| 1033 | const struct Qdisc_ops *ops = qdisc->ops; | 
|---|
| 1034 |  | 
|---|
| 1035 | trace_qdisc_reset(q: qdisc); | 
|---|
| 1036 |  | 
|---|
| 1037 | if (ops->reset) | 
|---|
| 1038 | ops->reset(qdisc); | 
|---|
| 1039 |  | 
|---|
| 1040 | __skb_queue_purge(list: &qdisc->gso_skb); | 
|---|
| 1041 | __skb_queue_purge(list: &qdisc->skb_bad_txq); | 
|---|
| 1042 |  | 
|---|
| 1043 | qdisc->q.qlen = 0; | 
|---|
| 1044 | qdisc->qstats.backlog = 0; | 
|---|
| 1045 | } | 
|---|
| 1046 | EXPORT_SYMBOL(qdisc_reset); | 
|---|
| 1047 |  | 
|---|
| 1048 | void qdisc_free(struct Qdisc *qdisc) | 
|---|
| 1049 | { | 
|---|
| 1050 | if (qdisc_is_percpu_stats(q: qdisc)) { | 
|---|
| 1051 | free_percpu(pdata: qdisc->cpu_bstats); | 
|---|
| 1052 | free_percpu(pdata: qdisc->cpu_qstats); | 
|---|
| 1053 | } | 
|---|
| 1054 |  | 
|---|
| 1055 | kfree(objp: qdisc); | 
|---|
| 1056 | } | 
|---|
| 1057 |  | 
|---|
| 1058 | static void qdisc_free_cb(struct rcu_head *head) | 
|---|
| 1059 | { | 
|---|
| 1060 | struct Qdisc *q = container_of(head, struct Qdisc, rcu); | 
|---|
| 1061 |  | 
|---|
| 1062 | qdisc_free(qdisc: q); | 
|---|
| 1063 | } | 
|---|
| 1064 |  | 
|---|
| 1065 | static void __qdisc_destroy(struct Qdisc *qdisc) | 
|---|
| 1066 | { | 
|---|
| 1067 | const struct Qdisc_ops  *ops = qdisc->ops; | 
|---|
| 1068 | struct net_device *dev = qdisc_dev(qdisc); | 
|---|
| 1069 |  | 
|---|
| 1070 | #ifdef CONFIG_NET_SCHED | 
|---|
| 1071 | qdisc_hash_del(q: qdisc); | 
|---|
| 1072 |  | 
|---|
| 1073 | qdisc_put_stab(rtnl_dereference(qdisc->stab)); | 
|---|
| 1074 | #endif | 
|---|
| 1075 | gen_kill_estimator(ptr: &qdisc->rate_est); | 
|---|
| 1076 |  | 
|---|
| 1077 | qdisc_reset(qdisc); | 
|---|
| 1078 |  | 
|---|
| 1079 |  | 
|---|
| 1080 | if (ops->destroy) | 
|---|
| 1081 | ops->destroy(qdisc); | 
|---|
| 1082 |  | 
|---|
| 1083 | lockdep_unregister_key(key: &qdisc->root_lock_key); | 
|---|
| 1084 | bpf_module_put(data: ops, owner: ops->owner); | 
|---|
| 1085 | netdev_put(dev, tracker: &qdisc->dev_tracker); | 
|---|
| 1086 |  | 
|---|
| 1087 | trace_qdisc_destroy(q: qdisc); | 
|---|
| 1088 |  | 
|---|
| 1089 | call_rcu(head: &qdisc->rcu, func: qdisc_free_cb); | 
|---|
| 1090 | } | 
|---|
| 1091 |  | 
|---|
| 1092 | void qdisc_destroy(struct Qdisc *qdisc) | 
|---|
| 1093 | { | 
|---|
| 1094 | if (qdisc->flags & TCQ_F_BUILTIN) | 
|---|
| 1095 | return; | 
|---|
| 1096 |  | 
|---|
| 1097 | __qdisc_destroy(qdisc); | 
|---|
| 1098 | } | 
|---|
| 1099 |  | 
|---|
| 1100 | void qdisc_put(struct Qdisc *qdisc) | 
|---|
| 1101 | { | 
|---|
| 1102 | if (!qdisc) | 
|---|
| 1103 | return; | 
|---|
| 1104 |  | 
|---|
| 1105 | if (qdisc->flags & TCQ_F_BUILTIN || | 
|---|
| 1106 | !refcount_dec_and_test(r: &qdisc->refcnt)) | 
|---|
| 1107 | return; | 
|---|
| 1108 |  | 
|---|
| 1109 | __qdisc_destroy(qdisc); | 
|---|
| 1110 | } | 
|---|
| 1111 | EXPORT_SYMBOL(qdisc_put); | 
|---|
| 1112 |  | 
|---|
| 1113 | /* Version of qdisc_put() that is called with rtnl mutex unlocked. | 
|---|
| 1114 | * Intended to be used as optimization, this function only takes rtnl lock if | 
|---|
| 1115 | * qdisc reference counter reached zero. | 
|---|
| 1116 | */ | 
|---|
| 1117 |  | 
|---|
| 1118 | void qdisc_put_unlocked(struct Qdisc *qdisc) | 
|---|
| 1119 | { | 
|---|
| 1120 | if (qdisc->flags & TCQ_F_BUILTIN || | 
|---|
| 1121 | !refcount_dec_and_rtnl_lock(r: &qdisc->refcnt)) | 
|---|
| 1122 | return; | 
|---|
| 1123 |  | 
|---|
| 1124 | __qdisc_destroy(qdisc); | 
|---|
| 1125 | rtnl_unlock(); | 
|---|
| 1126 | } | 
|---|
| 1127 | EXPORT_SYMBOL(qdisc_put_unlocked); | 
|---|
| 1128 |  | 
|---|
| 1129 | /* Attach toplevel qdisc to device queue. */ | 
|---|
| 1130 | struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, | 
|---|
| 1131 | struct Qdisc *qdisc) | 
|---|
| 1132 | { | 
|---|
| 1133 | struct Qdisc *oqdisc = rtnl_dereference(dev_queue->qdisc_sleeping); | 
|---|
| 1134 | spinlock_t *root_lock; | 
|---|
| 1135 |  | 
|---|
| 1136 | root_lock = qdisc_lock(qdisc: oqdisc); | 
|---|
| 1137 | spin_lock_bh(lock: root_lock); | 
|---|
| 1138 |  | 
|---|
| 1139 | /* ... and graft new one */ | 
|---|
| 1140 | if (qdisc == NULL) | 
|---|
| 1141 | qdisc = &noop_qdisc; | 
|---|
| 1142 | rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc); | 
|---|
| 1143 | rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc); | 
|---|
| 1144 |  | 
|---|
| 1145 | spin_unlock_bh(lock: root_lock); | 
|---|
| 1146 |  | 
|---|
| 1147 | return oqdisc; | 
|---|
| 1148 | } | 
|---|
| 1149 | EXPORT_SYMBOL(dev_graft_qdisc); | 
|---|
| 1150 |  | 
|---|
| 1151 | static void shutdown_scheduler_queue(struct net_device *dev, | 
|---|
| 1152 | struct netdev_queue *dev_queue, | 
|---|
| 1153 | void *_qdisc_default) | 
|---|
| 1154 | { | 
|---|
| 1155 | struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); | 
|---|
| 1156 | struct Qdisc *qdisc_default = _qdisc_default; | 
|---|
| 1157 |  | 
|---|
| 1158 | if (qdisc) { | 
|---|
| 1159 | rcu_assign_pointer(dev_queue->qdisc, qdisc_default); | 
|---|
| 1160 | rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc_default); | 
|---|
| 1161 |  | 
|---|
| 1162 | qdisc_put(qdisc); | 
|---|
| 1163 | } | 
|---|
| 1164 | } | 
|---|
| 1165 |  | 
|---|
| 1166 | static void attach_one_default_qdisc(struct net_device *dev, | 
|---|
| 1167 | struct netdev_queue *dev_queue, | 
|---|
| 1168 | void *_unused) | 
|---|
| 1169 | { | 
|---|
| 1170 | struct Qdisc *qdisc; | 
|---|
| 1171 | const struct Qdisc_ops *ops = default_qdisc_ops; | 
|---|
| 1172 |  | 
|---|
| 1173 | if (dev->priv_flags & IFF_NO_QUEUE) | 
|---|
| 1174 | ops = &noqueue_qdisc_ops; | 
|---|
| 1175 | else if(dev->type == ARPHRD_CAN) | 
|---|
| 1176 | ops = &pfifo_fast_ops; | 
|---|
| 1177 |  | 
|---|
| 1178 | qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT, NULL); | 
|---|
| 1179 | if (!qdisc) | 
|---|
| 1180 | return; | 
|---|
| 1181 |  | 
|---|
| 1182 | if (!netif_is_multiqueue(dev)) | 
|---|
| 1183 | qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; | 
|---|
| 1184 | rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc); | 
|---|
| 1185 | } | 
|---|
| 1186 |  | 
|---|
| 1187 | static void attach_default_qdiscs(struct net_device *dev) | 
|---|
| 1188 | { | 
|---|
| 1189 | struct netdev_queue *txq; | 
|---|
| 1190 | struct Qdisc *qdisc; | 
|---|
| 1191 |  | 
|---|
| 1192 | txq = netdev_get_tx_queue(dev, index: 0); | 
|---|
| 1193 |  | 
|---|
| 1194 | if (!netif_is_multiqueue(dev) || | 
|---|
| 1195 | dev->priv_flags & IFF_NO_QUEUE) { | 
|---|
| 1196 | netdev_for_each_tx_queue(dev, f: attach_one_default_qdisc, NULL); | 
|---|
| 1197 | qdisc = rtnl_dereference(txq->qdisc_sleeping); | 
|---|
| 1198 | rcu_assign_pointer(dev->qdisc, qdisc); | 
|---|
| 1199 | qdisc_refcount_inc(qdisc); | 
|---|
| 1200 | } else { | 
|---|
| 1201 | qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT, NULL); | 
|---|
| 1202 | if (qdisc) { | 
|---|
| 1203 | rcu_assign_pointer(dev->qdisc, qdisc); | 
|---|
| 1204 | qdisc->ops->attach(qdisc); | 
|---|
| 1205 | } | 
|---|
| 1206 | } | 
|---|
| 1207 | qdisc = rtnl_dereference(dev->qdisc); | 
|---|
| 1208 |  | 
|---|
| 1209 | /* Detect default qdisc setup/init failed and fallback to "noqueue" */ | 
|---|
| 1210 | if (qdisc == &noop_qdisc) { | 
|---|
| 1211 | netdev_warn(dev, format: "default qdisc (%s) fail, fallback to %s\n", | 
|---|
| 1212 | default_qdisc_ops->id, noqueue_qdisc_ops.id); | 
|---|
| 1213 | netdev_for_each_tx_queue(dev, f: shutdown_scheduler_queue, arg: &noop_qdisc); | 
|---|
| 1214 | dev->priv_flags |= IFF_NO_QUEUE; | 
|---|
| 1215 | netdev_for_each_tx_queue(dev, f: attach_one_default_qdisc, NULL); | 
|---|
| 1216 | qdisc = rtnl_dereference(txq->qdisc_sleeping); | 
|---|
| 1217 | rcu_assign_pointer(dev->qdisc, qdisc); | 
|---|
| 1218 | qdisc_refcount_inc(qdisc); | 
|---|
| 1219 | dev->priv_flags ^= IFF_NO_QUEUE; | 
|---|
| 1220 | } | 
|---|
| 1221 |  | 
|---|
| 1222 | #ifdef CONFIG_NET_SCHED | 
|---|
| 1223 | if (qdisc != &noop_qdisc) | 
|---|
| 1224 | qdisc_hash_add(q: qdisc, invisible: false); | 
|---|
| 1225 | #endif | 
|---|
| 1226 | } | 
|---|
| 1227 |  | 
|---|
| 1228 | static void transition_one_qdisc(struct net_device *dev, | 
|---|
| 1229 | struct netdev_queue *dev_queue, | 
|---|
| 1230 | void *_need_watchdog) | 
|---|
| 1231 | { | 
|---|
| 1232 | struct Qdisc *new_qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); | 
|---|
| 1233 | int *need_watchdog_p = _need_watchdog; | 
|---|
| 1234 |  | 
|---|
| 1235 | if (!(new_qdisc->flags & TCQ_F_BUILTIN)) | 
|---|
| 1236 | clear_bit(nr: __QDISC_STATE_DEACTIVATED, addr: &new_qdisc->state); | 
|---|
| 1237 |  | 
|---|
| 1238 | rcu_assign_pointer(dev_queue->qdisc, new_qdisc); | 
|---|
| 1239 | if (need_watchdog_p) { | 
|---|
| 1240 | WRITE_ONCE(dev_queue->trans_start, 0); | 
|---|
| 1241 | *need_watchdog_p = 1; | 
|---|
| 1242 | } | 
|---|
| 1243 | } | 
|---|
| 1244 |  | 
|---|
| 1245 | void dev_activate(struct net_device *dev) | 
|---|
| 1246 | { | 
|---|
| 1247 | int need_watchdog; | 
|---|
| 1248 |  | 
|---|
| 1249 | /* No queueing discipline is attached to device; | 
|---|
| 1250 | * create default one for devices, which need queueing | 
|---|
| 1251 | * and noqueue_qdisc for virtual interfaces | 
|---|
| 1252 | */ | 
|---|
| 1253 |  | 
|---|
| 1254 | if (rtnl_dereference(dev->qdisc) == &noop_qdisc) | 
|---|
| 1255 | attach_default_qdiscs(dev); | 
|---|
| 1256 |  | 
|---|
| 1257 | if (!netif_carrier_ok(dev)) | 
|---|
| 1258 | /* Delay activation until next carrier-on event */ | 
|---|
| 1259 | return; | 
|---|
| 1260 |  | 
|---|
| 1261 | need_watchdog = 0; | 
|---|
| 1262 | netdev_for_each_tx_queue(dev, f: transition_one_qdisc, arg: &need_watchdog); | 
|---|
| 1263 | if (dev_ingress_queue(dev)) | 
|---|
| 1264 | transition_one_qdisc(dev, dev_queue: dev_ingress_queue(dev), NULL); | 
|---|
| 1265 |  | 
|---|
| 1266 | if (need_watchdog) { | 
|---|
| 1267 | netif_trans_update(dev); | 
|---|
| 1268 | netdev_watchdog_up(dev); | 
|---|
| 1269 | } | 
|---|
| 1270 | } | 
|---|
| 1271 | EXPORT_SYMBOL(dev_activate); | 
|---|
| 1272 |  | 
|---|
| 1273 | static void qdisc_deactivate(struct Qdisc *qdisc) | 
|---|
| 1274 | { | 
|---|
| 1275 | if (qdisc->flags & TCQ_F_BUILTIN) | 
|---|
| 1276 | return; | 
|---|
| 1277 |  | 
|---|
| 1278 | set_bit(nr: __QDISC_STATE_DEACTIVATED, addr: &qdisc->state); | 
|---|
| 1279 | } | 
|---|
| 1280 |  | 
|---|
| 1281 | static void dev_deactivate_queue(struct net_device *dev, | 
|---|
| 1282 | struct netdev_queue *dev_queue, | 
|---|
| 1283 | void *_sync_needed) | 
|---|
| 1284 | { | 
|---|
| 1285 | bool *sync_needed = _sync_needed; | 
|---|
| 1286 | struct Qdisc *qdisc; | 
|---|
| 1287 |  | 
|---|
| 1288 | qdisc = rtnl_dereference(dev_queue->qdisc); | 
|---|
| 1289 | if (qdisc) { | 
|---|
| 1290 | if (qdisc->enqueue) | 
|---|
| 1291 | *sync_needed = true; | 
|---|
| 1292 | qdisc_deactivate(qdisc); | 
|---|
| 1293 | rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc); | 
|---|
| 1294 | } | 
|---|
| 1295 | } | 
|---|
| 1296 |  | 
|---|
| 1297 | static void dev_reset_queue(struct net_device *dev, | 
|---|
| 1298 | struct netdev_queue *dev_queue, | 
|---|
| 1299 | void *_unused) | 
|---|
| 1300 | { | 
|---|
| 1301 | struct Qdisc *qdisc; | 
|---|
| 1302 | bool nolock; | 
|---|
| 1303 |  | 
|---|
| 1304 | qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); | 
|---|
| 1305 | if (!qdisc) | 
|---|
| 1306 | return; | 
|---|
| 1307 |  | 
|---|
| 1308 | nolock = qdisc->flags & TCQ_F_NOLOCK; | 
|---|
| 1309 |  | 
|---|
| 1310 | if (nolock) | 
|---|
| 1311 | spin_lock_bh(lock: &qdisc->seqlock); | 
|---|
| 1312 | spin_lock_bh(lock: qdisc_lock(qdisc)); | 
|---|
| 1313 |  | 
|---|
| 1314 | qdisc_reset(qdisc); | 
|---|
| 1315 |  | 
|---|
| 1316 | spin_unlock_bh(lock: qdisc_lock(qdisc)); | 
|---|
| 1317 | if (nolock) { | 
|---|
| 1318 | clear_bit(nr: __QDISC_STATE_MISSED, addr: &qdisc->state); | 
|---|
| 1319 | clear_bit(nr: __QDISC_STATE_DRAINING, addr: &qdisc->state); | 
|---|
| 1320 | spin_unlock_bh(lock: &qdisc->seqlock); | 
|---|
| 1321 | } | 
|---|
| 1322 | } | 
|---|
| 1323 |  | 
|---|
| 1324 | static bool some_qdisc_is_busy(struct net_device *dev) | 
|---|
| 1325 | { | 
|---|
| 1326 | unsigned int i; | 
|---|
| 1327 |  | 
|---|
| 1328 | for (i = 0; i < dev->num_tx_queues; i++) { | 
|---|
| 1329 | struct netdev_queue *dev_queue; | 
|---|
| 1330 | spinlock_t *root_lock; | 
|---|
| 1331 | struct Qdisc *q; | 
|---|
| 1332 | int val; | 
|---|
| 1333 |  | 
|---|
| 1334 | dev_queue = netdev_get_tx_queue(dev, index: i); | 
|---|
| 1335 | q = rtnl_dereference(dev_queue->qdisc_sleeping); | 
|---|
| 1336 |  | 
|---|
| 1337 | root_lock = qdisc_lock(qdisc: q); | 
|---|
| 1338 | spin_lock_bh(lock: root_lock); | 
|---|
| 1339 |  | 
|---|
| 1340 | val = (qdisc_is_running(qdisc: q) || | 
|---|
| 1341 | test_bit(__QDISC_STATE_SCHED, &q->state)); | 
|---|
| 1342 |  | 
|---|
| 1343 | spin_unlock_bh(lock: root_lock); | 
|---|
| 1344 |  | 
|---|
| 1345 | if (val) | 
|---|
| 1346 | return true; | 
|---|
| 1347 | } | 
|---|
| 1348 | return false; | 
|---|
| 1349 | } | 
|---|
| 1350 |  | 
|---|
| 1351 | /** | 
|---|
| 1352 | * 	dev_deactivate_many - deactivate transmissions on several devices | 
|---|
| 1353 | * 	@head: list of devices to deactivate | 
|---|
| 1354 | * | 
|---|
| 1355 | *	This function returns only when all outstanding transmissions | 
|---|
| 1356 | *	have completed, unless all devices are in dismantle phase. | 
|---|
| 1357 | */ | 
|---|
| 1358 | void dev_deactivate_many(struct list_head *head) | 
|---|
| 1359 | { | 
|---|
| 1360 | bool sync_needed = false; | 
|---|
| 1361 | struct net_device *dev; | 
|---|
| 1362 |  | 
|---|
| 1363 | list_for_each_entry(dev, head, close_list) { | 
|---|
| 1364 | netdev_for_each_tx_queue(dev, f: dev_deactivate_queue, | 
|---|
| 1365 | arg: &sync_needed); | 
|---|
| 1366 | if (dev_ingress_queue(dev)) | 
|---|
| 1367 | dev_deactivate_queue(dev, dev_queue: dev_ingress_queue(dev), | 
|---|
| 1368 | sync_needed: &sync_needed); | 
|---|
| 1369 |  | 
|---|
| 1370 | netdev_watchdog_down(dev); | 
|---|
| 1371 | } | 
|---|
| 1372 |  | 
|---|
| 1373 | /* Wait for outstanding qdisc enqueuing calls. */ | 
|---|
| 1374 | if (sync_needed) | 
|---|
| 1375 | synchronize_net(); | 
|---|
| 1376 |  | 
|---|
| 1377 | list_for_each_entry(dev, head, close_list) { | 
|---|
| 1378 | netdev_for_each_tx_queue(dev, f: dev_reset_queue, NULL); | 
|---|
| 1379 |  | 
|---|
| 1380 | if (dev_ingress_queue(dev)) | 
|---|
| 1381 | dev_reset_queue(dev, dev_queue: dev_ingress_queue(dev), NULL); | 
|---|
| 1382 | } | 
|---|
| 1383 |  | 
|---|
| 1384 | /* Wait for outstanding qdisc_run calls. */ | 
|---|
| 1385 | list_for_each_entry(dev, head, close_list) { | 
|---|
| 1386 | while (some_qdisc_is_busy(dev)) { | 
|---|
| 1387 | /* wait_event() would avoid this sleep-loop but would | 
|---|
| 1388 | * require expensive checks in the fast paths of packet | 
|---|
| 1389 | * processing which isn't worth it. | 
|---|
| 1390 | */ | 
|---|
| 1391 | schedule_timeout_uninterruptible(timeout: 1); | 
|---|
| 1392 | } | 
|---|
| 1393 | } | 
|---|
| 1394 | } | 
|---|
| 1395 |  | 
|---|
| 1396 | void dev_deactivate(struct net_device *dev) | 
|---|
| 1397 | { | 
|---|
| 1398 | LIST_HEAD(single); | 
|---|
| 1399 |  | 
|---|
| 1400 | list_add(new: &dev->close_list, head: &single); | 
|---|
| 1401 | dev_deactivate_many(head: &single); | 
|---|
| 1402 | list_del(entry: &single); | 
|---|
| 1403 | } | 
|---|
| 1404 | EXPORT_SYMBOL(dev_deactivate); | 
|---|
| 1405 |  | 
|---|
| 1406 | static int qdisc_change_tx_queue_len(struct net_device *dev, | 
|---|
| 1407 | struct netdev_queue *dev_queue) | 
|---|
| 1408 | { | 
|---|
| 1409 | struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); | 
|---|
| 1410 | const struct Qdisc_ops *ops = qdisc->ops; | 
|---|
| 1411 |  | 
|---|
| 1412 | if (ops->change_tx_queue_len) | 
|---|
| 1413 | return ops->change_tx_queue_len(qdisc, dev->tx_queue_len); | 
|---|
| 1414 | return 0; | 
|---|
| 1415 | } | 
|---|
| 1416 |  | 
|---|
| 1417 | void dev_qdisc_change_real_num_tx(struct net_device *dev, | 
|---|
| 1418 | unsigned int new_real_tx) | 
|---|
| 1419 | { | 
|---|
| 1420 | struct Qdisc *qdisc = rtnl_dereference(dev->qdisc); | 
|---|
| 1421 |  | 
|---|
| 1422 | if (qdisc->ops->change_real_num_tx) | 
|---|
| 1423 | qdisc->ops->change_real_num_tx(qdisc, new_real_tx); | 
|---|
| 1424 | } | 
|---|
| 1425 |  | 
|---|
| 1426 | void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx) | 
|---|
| 1427 | { | 
|---|
| 1428 | #ifdef CONFIG_NET_SCHED | 
|---|
| 1429 | struct net_device *dev = qdisc_dev(qdisc: sch); | 
|---|
| 1430 | struct Qdisc *qdisc; | 
|---|
| 1431 | unsigned int i; | 
|---|
| 1432 |  | 
|---|
| 1433 | for (i = new_real_tx; i < dev->real_num_tx_queues; i++) { | 
|---|
| 1434 | qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc_sleeping); | 
|---|
| 1435 | /* Only update the default qdiscs we created, | 
|---|
| 1436 | * qdiscs with handles are always hashed. | 
|---|
| 1437 | */ | 
|---|
| 1438 | if (qdisc != &noop_qdisc && !qdisc->handle) | 
|---|
| 1439 | qdisc_hash_del(q: qdisc); | 
|---|
| 1440 | } | 
|---|
| 1441 | for (i = dev->real_num_tx_queues; i < new_real_tx; i++) { | 
|---|
| 1442 | qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc_sleeping); | 
|---|
| 1443 | if (qdisc != &noop_qdisc && !qdisc->handle) | 
|---|
| 1444 | qdisc_hash_add(q: qdisc, invisible: false); | 
|---|
| 1445 | } | 
|---|
| 1446 | #endif | 
|---|
| 1447 | } | 
|---|
| 1448 | EXPORT_SYMBOL(mq_change_real_num_tx); | 
|---|
| 1449 |  | 
|---|
| 1450 | int dev_qdisc_change_tx_queue_len(struct net_device *dev) | 
|---|
| 1451 | { | 
|---|
| 1452 | bool up = dev->flags & IFF_UP; | 
|---|
| 1453 | unsigned int i; | 
|---|
| 1454 | int ret = 0; | 
|---|
| 1455 |  | 
|---|
| 1456 | if (up) | 
|---|
| 1457 | dev_deactivate(dev); | 
|---|
| 1458 |  | 
|---|
| 1459 | for (i = 0; i < dev->num_tx_queues; i++) { | 
|---|
| 1460 | ret = qdisc_change_tx_queue_len(dev, dev_queue: &dev->_tx[i]); | 
|---|
| 1461 |  | 
|---|
| 1462 | /* TODO: revert changes on a partial failure */ | 
|---|
| 1463 | if (ret) | 
|---|
| 1464 | break; | 
|---|
| 1465 | } | 
|---|
| 1466 |  | 
|---|
| 1467 | if (up) | 
|---|
| 1468 | dev_activate(dev); | 
|---|
| 1469 | return ret; | 
|---|
| 1470 | } | 
|---|
| 1471 |  | 
|---|
| 1472 | static void dev_init_scheduler_queue(struct net_device *dev, | 
|---|
| 1473 | struct netdev_queue *dev_queue, | 
|---|
| 1474 | void *_qdisc) | 
|---|
| 1475 | { | 
|---|
| 1476 | struct Qdisc *qdisc = _qdisc; | 
|---|
| 1477 |  | 
|---|
| 1478 | rcu_assign_pointer(dev_queue->qdisc, qdisc); | 
|---|
| 1479 | rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc); | 
|---|
| 1480 | } | 
|---|
| 1481 |  | 
|---|
| 1482 | void dev_init_scheduler(struct net_device *dev) | 
|---|
| 1483 | { | 
|---|
| 1484 | rcu_assign_pointer(dev->qdisc, &noop_qdisc); | 
|---|
| 1485 | netdev_for_each_tx_queue(dev, f: dev_init_scheduler_queue, arg: &noop_qdisc); | 
|---|
| 1486 | if (dev_ingress_queue(dev)) | 
|---|
| 1487 | dev_init_scheduler_queue(dev, dev_queue: dev_ingress_queue(dev), qdisc: &noop_qdisc); | 
|---|
| 1488 |  | 
|---|
| 1489 | timer_setup(&dev->watchdog_timer, dev_watchdog, 0); | 
|---|
| 1490 | } | 
|---|
| 1491 |  | 
|---|
| 1492 | void dev_shutdown(struct net_device *dev) | 
|---|
| 1493 | { | 
|---|
| 1494 | netdev_for_each_tx_queue(dev, f: shutdown_scheduler_queue, arg: &noop_qdisc); | 
|---|
| 1495 | if (dev_ingress_queue(dev)) | 
|---|
| 1496 | shutdown_scheduler_queue(dev, dev_queue: dev_ingress_queue(dev), qdisc_default: &noop_qdisc); | 
|---|
| 1497 | qdisc_put(rtnl_dereference(dev->qdisc)); | 
|---|
| 1498 | rcu_assign_pointer(dev->qdisc, &noop_qdisc); | 
|---|
| 1499 |  | 
|---|
| 1500 | WARN_ON(timer_pending(&dev->watchdog_timer)); | 
|---|
| 1501 | } | 
|---|
| 1502 |  | 
|---|
| 1503 | /** | 
|---|
| 1504 | * psched_ratecfg_precompute__() - Pre-compute values for reciprocal division | 
|---|
| 1505 | * @rate:   Rate to compute reciprocal division values of | 
|---|
| 1506 | * @mult:   Multiplier for reciprocal division | 
|---|
| 1507 | * @shift:  Shift for reciprocal division | 
|---|
| 1508 | * | 
|---|
| 1509 | * The multiplier and shift for reciprocal division by rate are stored | 
|---|
| 1510 | * in mult and shift. | 
|---|
| 1511 | * | 
|---|
| 1512 | * The deal here is to replace a divide by a reciprocal one | 
|---|
| 1513 | * in fast path (a reciprocal divide is a multiply and a shift) | 
|---|
| 1514 | * | 
|---|
| 1515 | * Normal formula would be : | 
|---|
| 1516 | *  time_in_ns = (NSEC_PER_SEC * len) / rate_bps | 
|---|
| 1517 | * | 
|---|
| 1518 | * We compute mult/shift to use instead : | 
|---|
| 1519 | *  time_in_ns = (len * mult) >> shift; | 
|---|
| 1520 | * | 
|---|
| 1521 | * We try to get the highest possible mult value for accuracy, | 
|---|
| 1522 | * but have to make sure no overflows will ever happen. | 
|---|
| 1523 | * | 
|---|
| 1524 | * reciprocal_value() is not used here it doesn't handle 64-bit values. | 
|---|
| 1525 | */ | 
|---|
| 1526 | static void psched_ratecfg_precompute__(u64 rate, u32 *mult, u8 *shift) | 
|---|
| 1527 | { | 
|---|
| 1528 | u64 factor = NSEC_PER_SEC; | 
|---|
| 1529 |  | 
|---|
| 1530 | *mult = 1; | 
|---|
| 1531 | *shift = 0; | 
|---|
| 1532 |  | 
|---|
| 1533 | if (rate <= 0) | 
|---|
| 1534 | return; | 
|---|
| 1535 |  | 
|---|
| 1536 | for (;;) { | 
|---|
| 1537 | *mult = div64_u64(dividend: factor, divisor: rate); | 
|---|
| 1538 | if (*mult & (1U << 31) || factor & (1ULL << 63)) | 
|---|
| 1539 | break; | 
|---|
| 1540 | factor <<= 1; | 
|---|
| 1541 | (*shift)++; | 
|---|
| 1542 | } | 
|---|
| 1543 | } | 
|---|
| 1544 |  | 
|---|
| 1545 | void psched_ratecfg_precompute(struct psched_ratecfg *r, | 
|---|
| 1546 | const struct tc_ratespec *conf, | 
|---|
| 1547 | u64 rate64) | 
|---|
| 1548 | { | 
|---|
| 1549 | memset(s: r, c: 0, n: sizeof(*r)); | 
|---|
| 1550 | r->overhead = conf->overhead; | 
|---|
| 1551 | r->mpu = conf->mpu; | 
|---|
| 1552 | r->rate_bytes_ps = max_t(u64, conf->rate, rate64); | 
|---|
| 1553 | r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK); | 
|---|
| 1554 | psched_ratecfg_precompute__(rate: r->rate_bytes_ps, mult: &r->mult, shift: &r->shift); | 
|---|
| 1555 | } | 
|---|
| 1556 | EXPORT_SYMBOL(psched_ratecfg_precompute); | 
|---|
| 1557 |  | 
|---|
| 1558 | void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64) | 
|---|
| 1559 | { | 
|---|
| 1560 | r->rate_pkts_ps = pktrate64; | 
|---|
| 1561 | psched_ratecfg_precompute__(rate: r->rate_pkts_ps, mult: &r->mult, shift: &r->shift); | 
|---|
| 1562 | } | 
|---|
| 1563 | EXPORT_SYMBOL(psched_ppscfg_precompute); | 
|---|
| 1564 |  | 
|---|
| 1565 | void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp, | 
|---|
| 1566 | struct tcf_proto *tp_head) | 
|---|
| 1567 | { | 
|---|
| 1568 | /* Protected with chain0->filter_chain_lock. | 
|---|
| 1569 | * Can't access chain directly because tp_head can be NULL. | 
|---|
| 1570 | */ | 
|---|
| 1571 | struct mini_Qdisc *miniq_old = | 
|---|
| 1572 | rcu_dereference_protected(*miniqp->p_miniq, 1); | 
|---|
| 1573 | struct mini_Qdisc *miniq; | 
|---|
| 1574 |  | 
|---|
| 1575 | if (!tp_head) { | 
|---|
| 1576 | RCU_INIT_POINTER(*miniqp->p_miniq, NULL); | 
|---|
| 1577 | } else { | 
|---|
| 1578 | miniq = miniq_old != &miniqp->miniq1 ? | 
|---|
| 1579 | &miniqp->miniq1 : &miniqp->miniq2; | 
|---|
| 1580 |  | 
|---|
| 1581 | /* We need to make sure that readers won't see the miniq | 
|---|
| 1582 | * we are about to modify. So ensure that at least one RCU | 
|---|
| 1583 | * grace period has elapsed since the miniq was made | 
|---|
| 1584 | * inactive. | 
|---|
| 1585 | */ | 
|---|
| 1586 | if (IS_ENABLED(CONFIG_PREEMPT_RT)) | 
|---|
| 1587 | cond_synchronize_rcu(oldstate: miniq->rcu_state); | 
|---|
| 1588 | else if (!poll_state_synchronize_rcu(oldstate: miniq->rcu_state)) | 
|---|
| 1589 | synchronize_rcu_expedited(); | 
|---|
| 1590 |  | 
|---|
| 1591 | miniq->filter_list = tp_head; | 
|---|
| 1592 | rcu_assign_pointer(*miniqp->p_miniq, miniq); | 
|---|
| 1593 | } | 
|---|
| 1594 |  | 
|---|
| 1595 | if (miniq_old) | 
|---|
| 1596 | /* This is counterpart of the rcu sync above. We need to | 
|---|
| 1597 | * block potential new user of miniq_old until all readers | 
|---|
| 1598 | * are not seeing it. | 
|---|
| 1599 | */ | 
|---|
| 1600 | miniq_old->rcu_state = start_poll_synchronize_rcu(); | 
|---|
| 1601 | } | 
|---|
| 1602 | EXPORT_SYMBOL(mini_qdisc_pair_swap); | 
|---|
| 1603 |  | 
|---|
| 1604 | void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp, | 
|---|
| 1605 | struct tcf_block *block) | 
|---|
| 1606 | { | 
|---|
| 1607 | miniqp->miniq1.block = block; | 
|---|
| 1608 | miniqp->miniq2.block = block; | 
|---|
| 1609 | } | 
|---|
| 1610 | EXPORT_SYMBOL(mini_qdisc_pair_block_init); | 
|---|
| 1611 |  | 
|---|
| 1612 | void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc, | 
|---|
| 1613 | struct mini_Qdisc __rcu **p_miniq) | 
|---|
| 1614 | { | 
|---|
| 1615 | miniqp->miniq1.cpu_bstats = qdisc->cpu_bstats; | 
|---|
| 1616 | miniqp->miniq1.cpu_qstats = qdisc->cpu_qstats; | 
|---|
| 1617 | miniqp->miniq2.cpu_bstats = qdisc->cpu_bstats; | 
|---|
| 1618 | miniqp->miniq2.cpu_qstats = qdisc->cpu_qstats; | 
|---|
| 1619 | miniqp->miniq1.rcu_state = get_state_synchronize_rcu(); | 
|---|
| 1620 | miniqp->miniq2.rcu_state = miniqp->miniq1.rcu_state; | 
|---|
| 1621 | miniqp->p_miniq = p_miniq; | 
|---|
| 1622 | } | 
|---|
| 1623 | EXPORT_SYMBOL(mini_qdisc_pair_init); | 
|---|
| 1624 |  | 
|---|