2 * Copyright Gavin Shan, IBM Corporation 2016.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/netdevice.h>
14 #include <linux/skbuff.h>
15 #include <linux/netlink.h>
18 #include <net/net_namespace.h>
23 LIST_HEAD(ncsi_dev_list);
24 DEFINE_SPINLOCK(ncsi_dev_lock);
26 static inline int ncsi_filter_size(int table)
28 int sizes[] = { 2, 6, 6, 6 };
30 BUILD_BUG_ON(ARRAY_SIZE(sizes) != NCSI_FILTER_MAX);
31 if (table < NCSI_FILTER_BASE || table >= NCSI_FILTER_MAX)
37 int ncsi_find_filter(struct ncsi_channel *nc, int table, void *data)
39 struct ncsi_channel_filter *ncf;
44 ncf = nc->filters[table];
48 size = ncsi_filter_size(table);
52 spin_lock_irqsave(&nc->lock, flags);
53 bitmap = (void *)&ncf->bitmap;
55 while ((index = find_next_bit(bitmap, ncf->total, index + 1))
57 if (!memcmp(ncf->data + size * index, data, size)) {
58 spin_unlock_irqrestore(&nc->lock, flags);
62 spin_unlock_irqrestore(&nc->lock, flags);
67 int ncsi_add_filter(struct ncsi_channel *nc, int table, void *data)
69 struct ncsi_channel_filter *ncf;
74 size = ncsi_filter_size(table);
78 index = ncsi_find_filter(nc, table, data);
82 ncf = nc->filters[table];
86 spin_lock_irqsave(&nc->lock, flags);
87 bitmap = (void *)&ncf->bitmap;
89 index = find_next_zero_bit(bitmap, ncf->total, 0);
90 if (index >= ncf->total) {
91 spin_unlock_irqrestore(&nc->lock, flags);
94 } while (test_and_set_bit(index, bitmap));
96 memcpy(ncf->data + size * index, data, size);
97 spin_unlock_irqrestore(&nc->lock, flags);
102 int ncsi_remove_filter(struct ncsi_channel *nc, int table, int index)
104 struct ncsi_channel_filter *ncf;
109 size = ncsi_filter_size(table);
113 ncf = nc->filters[table];
114 if (!ncf || index >= ncf->total)
117 spin_lock_irqsave(&nc->lock, flags);
118 bitmap = (void *)&ncf->bitmap;
119 if (test_and_clear_bit(index, bitmap))
120 memset(ncf->data + size * index, 0, size);
121 spin_unlock_irqrestore(&nc->lock, flags);
126 struct ncsi_channel *ncsi_find_channel(struct ncsi_package *np,
129 struct ncsi_channel *nc;
131 NCSI_FOR_EACH_CHANNEL(np, nc) {
139 struct ncsi_channel *ncsi_add_channel(struct ncsi_package *np, unsigned char id)
141 struct ncsi_channel *nc, *tmp;
145 nc = kzalloc(sizeof(*nc), GFP_ATOMIC);
151 nc->state = NCSI_CHANNEL_INACTIVE;
152 spin_lock_init(&nc->lock);
153 for (index = 0; index < NCSI_CAP_MAX; index++)
154 nc->caps[index].index = index;
155 for (index = 0; index < NCSI_MODE_MAX; index++)
156 nc->modes[index].index = index;
158 spin_lock_irqsave(&np->lock, flags);
159 tmp = ncsi_find_channel(np, id);
161 spin_unlock_irqrestore(&np->lock, flags);
166 list_add_tail_rcu(&nc->node, &np->channels);
168 spin_unlock_irqrestore(&np->lock, flags);
173 static void ncsi_remove_channel(struct ncsi_channel *nc)
175 struct ncsi_package *np = nc->package;
176 struct ncsi_channel_filter *ncf;
180 /* Release filters */
181 spin_lock_irqsave(&nc->lock, flags);
182 for (i = 0; i < NCSI_FILTER_MAX; i++) {
183 ncf = nc->filters[i];
187 nc->filters[i] = NULL;
191 nc->state = NCSI_CHANNEL_INACTIVE;
192 spin_unlock_irqrestore(&nc->lock, flags);
194 /* Remove and free channel */
195 spin_lock_irqsave(&np->lock, flags);
196 list_del_rcu(&nc->node);
198 spin_unlock_irqrestore(&np->lock, flags);
203 struct ncsi_package *ncsi_find_package(struct ncsi_dev_priv *ndp,
206 struct ncsi_package *np;
208 NCSI_FOR_EACH_PACKAGE(ndp, np) {
216 struct ncsi_package *ncsi_add_package(struct ncsi_dev_priv *ndp,
219 struct ncsi_package *np, *tmp;
222 np = kzalloc(sizeof(*np), GFP_ATOMIC);
228 spin_lock_init(&np->lock);
229 INIT_LIST_HEAD(&np->channels);
231 spin_lock_irqsave(&ndp->lock, flags);
232 tmp = ncsi_find_package(ndp, id);
234 spin_unlock_irqrestore(&ndp->lock, flags);
239 list_add_tail_rcu(&np->node, &ndp->packages);
241 spin_unlock_irqrestore(&ndp->lock, flags);
246 void ncsi_remove_package(struct ncsi_package *np)
248 struct ncsi_dev_priv *ndp = np->ndp;
249 struct ncsi_channel *nc, *tmp;
252 /* Release all child channels */
253 list_for_each_entry_safe(nc, tmp, &np->channels, node)
254 ncsi_remove_channel(nc);
256 /* Remove and free package */
257 spin_lock_irqsave(&ndp->lock, flags);
258 list_del_rcu(&np->node);
260 spin_unlock_irqrestore(&ndp->lock, flags);
265 void ncsi_find_package_and_channel(struct ncsi_dev_priv *ndp,
267 struct ncsi_package **np,
268 struct ncsi_channel **nc)
270 struct ncsi_package *p;
271 struct ncsi_channel *c;
273 p = ncsi_find_package(ndp, NCSI_PACKAGE_INDEX(id));
274 c = p ? ncsi_find_channel(p, NCSI_CHANNEL_INDEX(id)) : NULL;
282 /* For two consecutive NCSI commands, the packet IDs shouldn't
283 * be same. Otherwise, the bogus response might be replied. So
284 * the available IDs are allocated in round-robin fashion.
286 struct ncsi_request *ncsi_alloc_request(struct ncsi_dev_priv *ndp, bool driven)
288 struct ncsi_request *nr = NULL;
289 int i, limit = ARRAY_SIZE(ndp->requests);
292 /* Check if there is one available request until the ceiling */
293 spin_lock_irqsave(&ndp->lock, flags);
294 for (i = ndp->request_id; !nr && i < limit; i++) {
295 if (ndp->requests[i].used)
298 nr = &ndp->requests[i];
301 if (++ndp->request_id >= limit)
305 /* Fail back to check from the starting cursor */
306 for (i = 0; !nr && i < ndp->request_id; i++) {
307 if (ndp->requests[i].used)
310 nr = &ndp->requests[i];
313 if (++ndp->request_id >= limit)
316 spin_unlock_irqrestore(&ndp->lock, flags);
321 void ncsi_free_request(struct ncsi_request *nr)
323 struct ncsi_dev_priv *ndp = nr->ndp;
324 struct sk_buff *cmd, *rsp;
329 del_timer_sync(&nr->timer);
332 spin_lock_irqsave(&ndp->lock, flags);
338 spin_unlock_irqrestore(&ndp->lock, flags);
340 /* Release command and response */
345 struct ncsi_dev *ncsi_find_dev(struct net_device *dev)
347 struct ncsi_dev_priv *ndp;
349 NCSI_FOR_EACH_DEV(ndp) {
350 if (ndp->ndev.dev == dev)
357 static void ncsi_request_timeout(unsigned long data)
359 struct ncsi_request *nr = (struct ncsi_request *)data;
360 struct ncsi_dev_priv *ndp = nr->ndp;
363 /* If the request already had associated response,
364 * let the response handler to release it.
366 spin_lock_irqsave(&ndp->lock, flags);
368 if (nr->rsp || !nr->cmd) {
369 spin_unlock_irqrestore(&ndp->lock, flags);
372 spin_unlock_irqrestore(&ndp->lock, flags);
374 /* Release the request */
375 ncsi_free_request(nr);
378 struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
379 void (*handler)(struct ncsi_dev *ndev))
381 struct ncsi_dev_priv *ndp;
386 /* Check if the device has been registered or not */
387 nd = ncsi_find_dev(dev);
391 /* Create NCSI device */
392 ndp = kzalloc(sizeof(*ndp), GFP_ATOMIC);
397 nd->state = ncsi_dev_state_registered;
399 nd->handler = handler;
401 /* Initialize private NCSI device */
402 spin_lock_init(&ndp->lock);
403 INIT_LIST_HEAD(&ndp->packages);
405 for (i = 0; i < ARRAY_SIZE(ndp->requests); i++) {
406 ndp->requests[i].id = i;
407 ndp->requests[i].ndp = ndp;
408 setup_timer(&ndp->requests[i].timer,
409 ncsi_request_timeout,
410 (unsigned long)&ndp->requests[i]);
413 spin_lock_irqsave(&ncsi_dev_lock, flags);
414 list_add_tail_rcu(&ndp->node, &ncsi_dev_list);
415 spin_unlock_irqrestore(&ncsi_dev_lock, flags);
419 EXPORT_SYMBOL_GPL(ncsi_register_dev);
421 void ncsi_unregister_dev(struct ncsi_dev *nd)
423 struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
424 struct ncsi_package *np, *tmp;
427 list_for_each_entry_safe(np, tmp, &ndp->packages, node)
428 ncsi_remove_package(np);
430 spin_lock_irqsave(&ncsi_dev_lock, flags);
431 list_del_rcu(&ndp->node);
432 spin_unlock_irqrestore(&ncsi_dev_lock, flags);
436 EXPORT_SYMBOL_GPL(ncsi_unregister_dev);