Linux 4.8
[cascardo/linux.git] / drivers / net / wireless / broadcom / brcm80211 / brcmfmac / flowring.c
1 /* Copyright (c) 2014 Broadcom Corporation
2  *
3  * Permission to use, copy, modify, and/or distribute this software for any
4  * purpose with or without fee is hereby granted, provided that the above
5  * copyright notice and this permission notice appear in all copies.
6  *
7  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
10  * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
12  * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
13  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14  */
15
16
17 #include <linux/types.h>
18 #include <linux/netdevice.h>
19 #include <linux/etherdevice.h>
20 #include <brcmu_utils.h>
21
22 #include "core.h"
23 #include "debug.h"
24 #include "bus.h"
25 #include "proto.h"
26 #include "flowring.h"
27 #include "msgbuf.h"
28 #include "common.h"
29
30
31 #define BRCMF_FLOWRING_HIGH             1024
32 #define BRCMF_FLOWRING_LOW              (BRCMF_FLOWRING_HIGH - 256)
33 #define BRCMF_FLOWRING_INVALID_IFIDX    0xff
34
35 #define BRCMF_FLOWRING_HASH_AP(da, fifo, ifidx) (da[5] * 2 + fifo + ifidx * 16)
36 #define BRCMF_FLOWRING_HASH_STA(fifo, ifidx) (fifo + ifidx * 16)
37
38 static const u8 brcmf_flowring_prio2fifo[] = {
39         1,
40         0,
41         0,
42         1,
43         2,
44         2,
45         3,
46         3
47 };
48
49
50 static bool
51 brcmf_flowring_is_tdls_mac(struct brcmf_flowring *flow, u8 mac[ETH_ALEN])
52 {
53         struct brcmf_flowring_tdls_entry *search;
54
55         search = flow->tdls_entry;
56
57         while (search) {
58                 if (memcmp(search->mac, mac, ETH_ALEN) == 0)
59                         return true;
60                 search = search->next;
61         }
62
63         return false;
64 }
65
66
67 u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
68                           u8 prio, u8 ifidx)
69 {
70         struct brcmf_flowring_hash *hash;
71         u16 hash_idx;
72         u32 i;
73         bool found;
74         bool sta;
75         u8 fifo;
76         u8 *mac;
77
78         fifo = brcmf_flowring_prio2fifo[prio];
79         sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT);
80         mac = da;
81         if ((!sta) && (is_multicast_ether_addr(da))) {
82                 mac = (u8 *)ALLFFMAC;
83                 fifo = 0;
84         }
85         if ((sta) && (flow->tdls_active) &&
86             (brcmf_flowring_is_tdls_mac(flow, da))) {
87                 sta = false;
88         }
89         hash_idx =  sta ? BRCMF_FLOWRING_HASH_STA(fifo, ifidx) :
90                           BRCMF_FLOWRING_HASH_AP(mac, fifo, ifidx);
91         hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1);
92         found = false;
93         hash = flow->hash;
94         for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) {
95                 if ((sta || (memcmp(hash[hash_idx].mac, mac, ETH_ALEN) == 0)) &&
96                     (hash[hash_idx].fifo == fifo) &&
97                     (hash[hash_idx].ifidx == ifidx)) {
98                         found = true;
99                         break;
100                 }
101                 hash_idx++;
102                 hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1);
103         }
104         if (found)
105                 return hash[hash_idx].flowid;
106
107         return BRCMF_FLOWRING_INVALID_ID;
108 }
109
110
111 u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
112                           u8 prio, u8 ifidx)
113 {
114         struct brcmf_flowring_ring *ring;
115         struct brcmf_flowring_hash *hash;
116         u16 hash_idx;
117         u32 i;
118         bool found;
119         u8 fifo;
120         bool sta;
121         u8 *mac;
122
123         fifo = brcmf_flowring_prio2fifo[prio];
124         sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT);
125         mac = da;
126         if ((!sta) && (is_multicast_ether_addr(da))) {
127                 mac = (u8 *)ALLFFMAC;
128                 fifo = 0;
129         }
130         if ((sta) && (flow->tdls_active) &&
131             (brcmf_flowring_is_tdls_mac(flow, da))) {
132                 sta = false;
133         }
134         hash_idx =  sta ? BRCMF_FLOWRING_HASH_STA(fifo, ifidx) :
135                           BRCMF_FLOWRING_HASH_AP(mac, fifo, ifidx);
136         hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1);
137         found = false;
138         hash = flow->hash;
139         for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) {
140                 if ((hash[hash_idx].ifidx == BRCMF_FLOWRING_INVALID_IFIDX) &&
141                     (is_zero_ether_addr(hash[hash_idx].mac))) {
142                         found = true;
143                         break;
144                 }
145                 hash_idx++;
146                 hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1);
147         }
148         if (found) {
149                 for (i = 0; i < flow->nrofrings; i++) {
150                         if (flow->rings[i] == NULL)
151                                 break;
152                 }
153                 if (i == flow->nrofrings)
154                         return -ENOMEM;
155
156                 ring = kzalloc(sizeof(*ring), GFP_ATOMIC);
157                 if (!ring)
158                         return -ENOMEM;
159
160                 memcpy(hash[hash_idx].mac, mac, ETH_ALEN);
161                 hash[hash_idx].fifo = fifo;
162                 hash[hash_idx].ifidx = ifidx;
163                 hash[hash_idx].flowid = i;
164
165                 ring->hash_id = hash_idx;
166                 ring->status = RING_CLOSED;
167                 skb_queue_head_init(&ring->skblist);
168                 flow->rings[i] = ring;
169
170                 return i;
171         }
172         return BRCMF_FLOWRING_INVALID_ID;
173 }
174
175
176 u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u16 flowid)
177 {
178         struct brcmf_flowring_ring *ring;
179
180         ring = flow->rings[flowid];
181
182         return flow->hash[ring->hash_id].fifo;
183 }
184
185
186 static void brcmf_flowring_block(struct brcmf_flowring *flow, u16 flowid,
187                                  bool blocked)
188 {
189         struct brcmf_flowring_ring *ring;
190         struct brcmf_bus *bus_if;
191         struct brcmf_pub *drvr;
192         struct brcmf_if *ifp;
193         bool currently_blocked;
194         int i;
195         u8 ifidx;
196         unsigned long flags;
197
198         spin_lock_irqsave(&flow->block_lock, flags);
199
200         ring = flow->rings[flowid];
201         if (ring->blocked == blocked) {
202                 spin_unlock_irqrestore(&flow->block_lock, flags);
203                 return;
204         }
205         ifidx = brcmf_flowring_ifidx_get(flow, flowid);
206
207         currently_blocked = false;
208         for (i = 0; i < flow->nrofrings; i++) {
209                 if ((flow->rings[i]) && (i != flowid)) {
210                         ring = flow->rings[i];
211                         if ((ring->status == RING_OPEN) &&
212                             (brcmf_flowring_ifidx_get(flow, i) == ifidx)) {
213                                 if (ring->blocked) {
214                                         currently_blocked = true;
215                                         break;
216                                 }
217                         }
218                 }
219         }
220         flow->rings[flowid]->blocked = blocked;
221         if (currently_blocked) {
222                 spin_unlock_irqrestore(&flow->block_lock, flags);
223                 return;
224         }
225
226         bus_if = dev_get_drvdata(flow->dev);
227         drvr = bus_if->drvr;
228         ifp = brcmf_get_ifp(drvr, ifidx);
229         brcmf_txflowblock_if(ifp, BRCMF_NETIF_STOP_REASON_FLOW, blocked);
230
231         spin_unlock_irqrestore(&flow->block_lock, flags);
232 }
233
234
235 void brcmf_flowring_delete(struct brcmf_flowring *flow, u16 flowid)
236 {
237         struct brcmf_flowring_ring *ring;
238         u16 hash_idx;
239         struct sk_buff *skb;
240
241         ring = flow->rings[flowid];
242         if (!ring)
243                 return;
244         brcmf_flowring_block(flow, flowid, false);
245         hash_idx = ring->hash_id;
246         flow->hash[hash_idx].ifidx = BRCMF_FLOWRING_INVALID_IFIDX;
247         eth_zero_addr(flow->hash[hash_idx].mac);
248         flow->rings[flowid] = NULL;
249
250         skb = skb_dequeue(&ring->skblist);
251         while (skb) {
252                 brcmu_pkt_buf_free_skb(skb);
253                 skb = skb_dequeue(&ring->skblist);
254         }
255
256         kfree(ring);
257 }
258
259
260 u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u16 flowid,
261                            struct sk_buff *skb)
262 {
263         struct brcmf_flowring_ring *ring;
264
265         ring = flow->rings[flowid];
266
267         skb_queue_tail(&ring->skblist, skb);
268
269         if (!ring->blocked &&
270             (skb_queue_len(&ring->skblist) > BRCMF_FLOWRING_HIGH)) {
271                 brcmf_flowring_block(flow, flowid, true);
272                 brcmf_dbg(MSGBUF, "Flowcontrol: BLOCK for ring %d\n", flowid);
273                 /* To prevent (work around) possible race condition, check
274                  * queue len again. It is also possible to use locking to
275                  * protect, but that is undesirable for every enqueue and
276                  * dequeue. This simple check will solve a possible race
277                  * condition if it occurs.
278                  */
279                 if (skb_queue_len(&ring->skblist) < BRCMF_FLOWRING_LOW)
280                         brcmf_flowring_block(flow, flowid, false);
281         }
282         return skb_queue_len(&ring->skblist);
283 }
284
285
286 struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u16 flowid)
287 {
288         struct brcmf_flowring_ring *ring;
289         struct sk_buff *skb;
290
291         ring = flow->rings[flowid];
292         if (ring->status != RING_OPEN)
293                 return NULL;
294
295         skb = skb_dequeue(&ring->skblist);
296
297         if (ring->blocked &&
298             (skb_queue_len(&ring->skblist) < BRCMF_FLOWRING_LOW)) {
299                 brcmf_flowring_block(flow, flowid, false);
300                 brcmf_dbg(MSGBUF, "Flowcontrol: OPEN for ring %d\n", flowid);
301         }
302
303         return skb;
304 }
305
306
307 void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u16 flowid,
308                              struct sk_buff *skb)
309 {
310         struct brcmf_flowring_ring *ring;
311
312         ring = flow->rings[flowid];
313
314         skb_queue_head(&ring->skblist, skb);
315 }
316
317
318 u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u16 flowid)
319 {
320         struct brcmf_flowring_ring *ring;
321
322         ring = flow->rings[flowid];
323         if (!ring)
324                 return 0;
325
326         if (ring->status != RING_OPEN)
327                 return 0;
328
329         return skb_queue_len(&ring->skblist);
330 }
331
332
333 void brcmf_flowring_open(struct brcmf_flowring *flow, u16 flowid)
334 {
335         struct brcmf_flowring_ring *ring;
336
337         ring = flow->rings[flowid];
338         if (!ring) {
339                 brcmf_err("Ring NULL, for flowid %d\n", flowid);
340                 return;
341         }
342
343         ring->status = RING_OPEN;
344 }
345
346
347 u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u16 flowid)
348 {
349         struct brcmf_flowring_ring *ring;
350         u16 hash_idx;
351
352         ring = flow->rings[flowid];
353         hash_idx = ring->hash_id;
354
355         return flow->hash[hash_idx].ifidx;
356 }
357
358
359 struct brcmf_flowring *brcmf_flowring_attach(struct device *dev, u16 nrofrings)
360 {
361         struct brcmf_flowring *flow;
362         u32 i;
363
364         flow = kzalloc(sizeof(*flow), GFP_KERNEL);
365         if (flow) {
366                 flow->dev = dev;
367                 flow->nrofrings = nrofrings;
368                 spin_lock_init(&flow->block_lock);
369                 for (i = 0; i < ARRAY_SIZE(flow->addr_mode); i++)
370                         flow->addr_mode[i] = ADDR_INDIRECT;
371                 for (i = 0; i < ARRAY_SIZE(flow->hash); i++)
372                         flow->hash[i].ifidx = BRCMF_FLOWRING_INVALID_IFIDX;
373                 flow->rings = kcalloc(nrofrings, sizeof(*flow->rings),
374                                       GFP_KERNEL);
375                 if (!flow->rings) {
376                         kfree(flow);
377                         flow = NULL;
378                 }
379         }
380
381         return flow;
382 }
383
384
385 void brcmf_flowring_detach(struct brcmf_flowring *flow)
386 {
387         struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev);
388         struct brcmf_pub *drvr = bus_if->drvr;
389         struct brcmf_flowring_tdls_entry *search;
390         struct brcmf_flowring_tdls_entry *remove;
391         u16 flowid;
392
393         for (flowid = 0; flowid < flow->nrofrings; flowid++) {
394                 if (flow->rings[flowid])
395                         brcmf_msgbuf_delete_flowring(drvr, flowid);
396         }
397
398         search = flow->tdls_entry;
399         while (search) {
400                 remove = search;
401                 search = search->next;
402                 kfree(remove);
403         }
404         kfree(flow->rings);
405         kfree(flow);
406 }
407
408
409 void brcmf_flowring_configure_addr_mode(struct brcmf_flowring *flow, int ifidx,
410                                         enum proto_addr_mode addr_mode)
411 {
412         struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev);
413         struct brcmf_pub *drvr = bus_if->drvr;
414         u32 i;
415         u16 flowid;
416
417         if (flow->addr_mode[ifidx] != addr_mode) {
418                 for (i = 0; i < ARRAY_SIZE(flow->hash); i++) {
419                         if (flow->hash[i].ifidx == ifidx) {
420                                 flowid = flow->hash[i].flowid;
421                                 if (flow->rings[flowid]->status != RING_OPEN)
422                                         continue;
423                                 flow->rings[flowid]->status = RING_CLOSING;
424                                 brcmf_msgbuf_delete_flowring(drvr, flowid);
425                         }
426                 }
427                 flow->addr_mode[ifidx] = addr_mode;
428         }
429 }
430
431
432 void brcmf_flowring_delete_peer(struct brcmf_flowring *flow, int ifidx,
433                                 u8 peer[ETH_ALEN])
434 {
435         struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev);
436         struct brcmf_pub *drvr = bus_if->drvr;
437         struct brcmf_flowring_hash *hash;
438         struct brcmf_flowring_tdls_entry *prev;
439         struct brcmf_flowring_tdls_entry *search;
440         u32 i;
441         u16 flowid;
442         bool sta;
443
444         sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT);
445
446         search = flow->tdls_entry;
447         prev = NULL;
448         while (search) {
449                 if (memcmp(search->mac, peer, ETH_ALEN) == 0) {
450                         sta = false;
451                         break;
452                 }
453                 prev = search;
454                 search = search->next;
455         }
456
457         hash = flow->hash;
458         for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) {
459                 if ((sta || (memcmp(hash[i].mac, peer, ETH_ALEN) == 0)) &&
460                     (hash[i].ifidx == ifidx)) {
461                         flowid = flow->hash[i].flowid;
462                         if (flow->rings[flowid]->status == RING_OPEN) {
463                                 flow->rings[flowid]->status = RING_CLOSING;
464                                 brcmf_msgbuf_delete_flowring(drvr, flowid);
465                         }
466                 }
467         }
468
469         if (search) {
470                 if (prev)
471                         prev->next = search->next;
472                 else
473                         flow->tdls_entry = search->next;
474                 kfree(search);
475                 if (flow->tdls_entry == NULL)
476                         flow->tdls_active = false;
477         }
478 }
479
480
481 void brcmf_flowring_add_tdls_peer(struct brcmf_flowring *flow, int ifidx,
482                                   u8 peer[ETH_ALEN])
483 {
484         struct brcmf_flowring_tdls_entry *tdls_entry;
485         struct brcmf_flowring_tdls_entry *search;
486
487         tdls_entry = kzalloc(sizeof(*tdls_entry), GFP_ATOMIC);
488         if (tdls_entry == NULL)
489                 return;
490
491         memcpy(tdls_entry->mac, peer, ETH_ALEN);
492         tdls_entry->next = NULL;
493         if (flow->tdls_entry == NULL) {
494                 flow->tdls_entry = tdls_entry;
495         } else {
496                 search = flow->tdls_entry;
497                 if (memcmp(search->mac, peer, ETH_ALEN) == 0)
498                         return;
499                 while (search->next) {
500                         search = search->next;
501                         if (memcmp(search->mac, peer, ETH_ALEN) == 0)
502                                 return;
503                 }
504                 search->next = tdls_entry;
505         }
506
507         flow->tdls_active = true;
508 }