Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[cascardo/linux.git] / drivers / net / ethernet / intel / i40e / i40e_adminq.c
1 /*******************************************************************************
2  *
3  * Intel Ethernet Controller XL710 Family Linux Driver
4  * Copyright(c) 2013 - 2014 Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with this program.  If not, see <http://www.gnu.org/licenses/>.
17  *
18  * The full GNU General Public License is included in this distribution in
19  * the file called "COPYING".
20  *
21  * Contact Information:
22  * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24  *
25  ******************************************************************************/
26
27 #include "i40e_status.h"
28 #include "i40e_type.h"
29 #include "i40e_register.h"
30 #include "i40e_adminq.h"
31 #include "i40e_prototype.h"
32
33 static void i40e_resume_aq(struct i40e_hw *hw);
34
35 /**
36  * i40e_is_nvm_update_op - return true if this is an NVM update operation
37  * @desc: API request descriptor
38  **/
39 static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
40 {
41         return (desc->opcode == i40e_aqc_opc_nvm_erase) ||
42                (desc->opcode == i40e_aqc_opc_nvm_update);
43 }
44
45 /**
46  *  i40e_adminq_init_regs - Initialize AdminQ registers
47  *  @hw: pointer to the hardware structure
48  *
49  *  This assumes the alloc_asq and alloc_arq functions have already been called
50  **/
51 static void i40e_adminq_init_regs(struct i40e_hw *hw)
52 {
53         /* set head and tail registers in our local struct */
54         if (hw->mac.type == I40E_MAC_VF) {
55                 hw->aq.asq.tail = I40E_VF_ATQT1;
56                 hw->aq.asq.head = I40E_VF_ATQH1;
57                 hw->aq.asq.len  = I40E_VF_ATQLEN1;
58                 hw->aq.arq.tail = I40E_VF_ARQT1;
59                 hw->aq.arq.head = I40E_VF_ARQH1;
60                 hw->aq.arq.len  = I40E_VF_ARQLEN1;
61         } else {
62                 hw->aq.asq.tail = I40E_PF_ATQT;
63                 hw->aq.asq.head = I40E_PF_ATQH;
64                 hw->aq.asq.len  = I40E_PF_ATQLEN;
65                 hw->aq.arq.tail = I40E_PF_ARQT;
66                 hw->aq.arq.head = I40E_PF_ARQH;
67                 hw->aq.arq.len  = I40E_PF_ARQLEN;
68         }
69 }
70
71 /**
72  *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
73  *  @hw: pointer to the hardware structure
74  **/
75 static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
76 {
77         i40e_status ret_code;
78
79         ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
80                                          i40e_mem_atq_ring,
81                                          (hw->aq.num_asq_entries *
82                                          sizeof(struct i40e_aq_desc)),
83                                          I40E_ADMINQ_DESC_ALIGNMENT);
84         if (ret_code)
85                 return ret_code;
86
87         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
88                                           (hw->aq.num_asq_entries *
89                                           sizeof(struct i40e_asq_cmd_details)));
90         if (ret_code) {
91                 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
92                 return ret_code;
93         }
94
95         return ret_code;
96 }
97
98 /**
99  *  i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
100  *  @hw: pointer to the hardware structure
101  **/
102 static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
103 {
104         i40e_status ret_code;
105
106         ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
107                                          i40e_mem_arq_ring,
108                                          (hw->aq.num_arq_entries *
109                                          sizeof(struct i40e_aq_desc)),
110                                          I40E_ADMINQ_DESC_ALIGNMENT);
111
112         return ret_code;
113 }
114
115 /**
116  *  i40e_free_adminq_asq - Free Admin Queue send rings
117  *  @hw: pointer to the hardware structure
118  *
119  *  This assumes the posted send buffers have already been cleaned
120  *  and de-allocated
121  **/
122 static void i40e_free_adminq_asq(struct i40e_hw *hw)
123 {
124         i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
125 }
126
127 /**
128  *  i40e_free_adminq_arq - Free Admin Queue receive rings
129  *  @hw: pointer to the hardware structure
130  *
131  *  This assumes the posted receive buffers have already been cleaned
132  *  and de-allocated
133  **/
134 static void i40e_free_adminq_arq(struct i40e_hw *hw)
135 {
136         i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
137 }
138
139 /**
140  *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
141  *  @hw: pointer to the hardware structure
142  **/
143 static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
144 {
145         i40e_status ret_code;
146         struct i40e_aq_desc *desc;
147         struct i40e_dma_mem *bi;
148         int i;
149
150         /* We'll be allocating the buffer info memory first, then we can
151          * allocate the mapped buffers for the event processing
152          */
153
154         /* buffer_info structures do not need alignment */
155         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
156                 (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
157         if (ret_code)
158                 goto alloc_arq_bufs;
159         hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
160
161         /* allocate the mapped buffers */
162         for (i = 0; i < hw->aq.num_arq_entries; i++) {
163                 bi = &hw->aq.arq.r.arq_bi[i];
164                 ret_code = i40e_allocate_dma_mem(hw, bi,
165                                                  i40e_mem_arq_buf,
166                                                  hw->aq.arq_buf_size,
167                                                  I40E_ADMINQ_DESC_ALIGNMENT);
168                 if (ret_code)
169                         goto unwind_alloc_arq_bufs;
170
171                 /* now configure the descriptors for use */
172                 desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
173
174                 desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
175                 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
176                         desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
177                 desc->opcode = 0;
178                 /* This is in accordance with Admin queue design, there is no
179                  * register for buffer size configuration
180                  */
181                 desc->datalen = cpu_to_le16((u16)bi->size);
182                 desc->retval = 0;
183                 desc->cookie_high = 0;
184                 desc->cookie_low = 0;
185                 desc->params.external.addr_high =
186                         cpu_to_le32(upper_32_bits(bi->pa));
187                 desc->params.external.addr_low =
188                         cpu_to_le32(lower_32_bits(bi->pa));
189                 desc->params.external.param0 = 0;
190                 desc->params.external.param1 = 0;
191         }
192
193 alloc_arq_bufs:
194         return ret_code;
195
196 unwind_alloc_arq_bufs:
197         /* don't try to free the one that failed... */
198         i--;
199         for (; i >= 0; i--)
200                 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
201         i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
202
203         return ret_code;
204 }
205
206 /**
207  *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
208  *  @hw: pointer to the hardware structure
209  **/
210 static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
211 {
212         i40e_status ret_code;
213         struct i40e_dma_mem *bi;
214         int i;
215
216         /* No mapped memory needed yet, just the buffer info structures */
217         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
218                 (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
219         if (ret_code)
220                 goto alloc_asq_bufs;
221         hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
222
223         /* allocate the mapped buffers */
224         for (i = 0; i < hw->aq.num_asq_entries; i++) {
225                 bi = &hw->aq.asq.r.asq_bi[i];
226                 ret_code = i40e_allocate_dma_mem(hw, bi,
227                                                  i40e_mem_asq_buf,
228                                                  hw->aq.asq_buf_size,
229                                                  I40E_ADMINQ_DESC_ALIGNMENT);
230                 if (ret_code)
231                         goto unwind_alloc_asq_bufs;
232         }
233 alloc_asq_bufs:
234         return ret_code;
235
236 unwind_alloc_asq_bufs:
237         /* don't try to free the one that failed... */
238         i--;
239         for (; i >= 0; i--)
240                 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
241         i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
242
243         return ret_code;
244 }
245
246 /**
247  *  i40e_free_arq_bufs - Free receive queue buffer info elements
248  *  @hw: pointer to the hardware structure
249  **/
250 static void i40e_free_arq_bufs(struct i40e_hw *hw)
251 {
252         int i;
253
254         /* free descriptors */
255         for (i = 0; i < hw->aq.num_arq_entries; i++)
256                 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
257
258         /* free the descriptor memory */
259         i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
260
261         /* free the dma header */
262         i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
263 }
264
265 /**
266  *  i40e_free_asq_bufs - Free send queue buffer info elements
267  *  @hw: pointer to the hardware structure
268  **/
269 static void i40e_free_asq_bufs(struct i40e_hw *hw)
270 {
271         int i;
272
273         /* only unmap if the address is non-NULL */
274         for (i = 0; i < hw->aq.num_asq_entries; i++)
275                 if (hw->aq.asq.r.asq_bi[i].pa)
276                         i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
277
278         /* free the buffer info list */
279         i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
280
281         /* free the descriptor memory */
282         i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
283
284         /* free the dma header */
285         i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
286 }
287
288 /**
289  *  i40e_config_asq_regs - configure ASQ registers
290  *  @hw: pointer to the hardware structure
291  *
292  *  Configure base address and length registers for the transmit queue
293  **/
294 static void i40e_config_asq_regs(struct i40e_hw *hw)
295 {
296         if (hw->mac.type == I40E_MAC_VF) {
297                 /* configure the transmit queue */
298                 wr32(hw, I40E_VF_ATQBAH1,
299                     upper_32_bits(hw->aq.asq.desc_buf.pa));
300                 wr32(hw, I40E_VF_ATQBAL1,
301                     lower_32_bits(hw->aq.asq.desc_buf.pa));
302                 wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
303                                           I40E_VF_ATQLEN1_ATQENABLE_MASK));
304         } else {
305                 /* configure the transmit queue */
306                 wr32(hw, I40E_PF_ATQBAH,
307                     upper_32_bits(hw->aq.asq.desc_buf.pa));
308                 wr32(hw, I40E_PF_ATQBAL,
309                     lower_32_bits(hw->aq.asq.desc_buf.pa));
310                 wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
311                                           I40E_PF_ATQLEN_ATQENABLE_MASK));
312         }
313 }
314
315 /**
316  *  i40e_config_arq_regs - ARQ register configuration
317  *  @hw: pointer to the hardware structure
318  *
319  * Configure base address and length registers for the receive (event queue)
320  **/
321 static void i40e_config_arq_regs(struct i40e_hw *hw)
322 {
323         if (hw->mac.type == I40E_MAC_VF) {
324                 /* configure the receive queue */
325                 wr32(hw, I40E_VF_ARQBAH1,
326                     upper_32_bits(hw->aq.arq.desc_buf.pa));
327                 wr32(hw, I40E_VF_ARQBAL1,
328                     lower_32_bits(hw->aq.arq.desc_buf.pa));
329                 wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
330                                           I40E_VF_ARQLEN1_ARQENABLE_MASK));
331         } else {
332                 /* configure the receive queue */
333                 wr32(hw, I40E_PF_ARQBAH,
334                     upper_32_bits(hw->aq.arq.desc_buf.pa));
335                 wr32(hw, I40E_PF_ARQBAL,
336                     lower_32_bits(hw->aq.arq.desc_buf.pa));
337                 wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
338                                           I40E_PF_ARQLEN_ARQENABLE_MASK));
339         }
340
341         /* Update tail in the HW to post pre-allocated buffers */
342         wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
343 }
344
345 /**
346  *  i40e_init_asq - main initialization routine for ASQ
347  *  @hw: pointer to the hardware structure
348  *
349  *  This is the main initialization routine for the Admin Send Queue
350  *  Prior to calling this function, drivers *MUST* set the following fields
351  *  in the hw->aq structure:
352  *     - hw->aq.num_asq_entries
353  *     - hw->aq.arq_buf_size
354  *
355  *  Do *NOT* hold the lock when calling this as the memory allocation routines
356  *  called are not going to be atomic context safe
357  **/
358 static i40e_status i40e_init_asq(struct i40e_hw *hw)
359 {
360         i40e_status ret_code = 0;
361
362         if (hw->aq.asq.count > 0) {
363                 /* queue already initialized */
364                 ret_code = I40E_ERR_NOT_READY;
365                 goto init_adminq_exit;
366         }
367
368         /* verify input for valid configuration */
369         if ((hw->aq.num_asq_entries == 0) ||
370             (hw->aq.asq_buf_size == 0)) {
371                 ret_code = I40E_ERR_CONFIG;
372                 goto init_adminq_exit;
373         }
374
375         hw->aq.asq.next_to_use = 0;
376         hw->aq.asq.next_to_clean = 0;
377         hw->aq.asq.count = hw->aq.num_asq_entries;
378
379         /* allocate the ring memory */
380         ret_code = i40e_alloc_adminq_asq_ring(hw);
381         if (ret_code)
382                 goto init_adminq_exit;
383
384         /* allocate buffers in the rings */
385         ret_code = i40e_alloc_asq_bufs(hw);
386         if (ret_code)
387                 goto init_adminq_free_rings;
388
389         /* initialize base registers */
390         i40e_config_asq_regs(hw);
391
392         /* success! */
393         goto init_adminq_exit;
394
395 init_adminq_free_rings:
396         i40e_free_adminq_asq(hw);
397
398 init_adminq_exit:
399         return ret_code;
400 }
401
402 /**
403  *  i40e_init_arq - initialize ARQ
404  *  @hw: pointer to the hardware structure
405  *
406  *  The main initialization routine for the Admin Receive (Event) Queue.
407  *  Prior to calling this function, drivers *MUST* set the following fields
408  *  in the hw->aq structure:
409  *     - hw->aq.num_asq_entries
410  *     - hw->aq.arq_buf_size
411  *
412  *  Do *NOT* hold the lock when calling this as the memory allocation routines
413  *  called are not going to be atomic context safe
414  **/
415 static i40e_status i40e_init_arq(struct i40e_hw *hw)
416 {
417         i40e_status ret_code = 0;
418
419         if (hw->aq.arq.count > 0) {
420                 /* queue already initialized */
421                 ret_code = I40E_ERR_NOT_READY;
422                 goto init_adminq_exit;
423         }
424
425         /* verify input for valid configuration */
426         if ((hw->aq.num_arq_entries == 0) ||
427             (hw->aq.arq_buf_size == 0)) {
428                 ret_code = I40E_ERR_CONFIG;
429                 goto init_adminq_exit;
430         }
431
432         hw->aq.arq.next_to_use = 0;
433         hw->aq.arq.next_to_clean = 0;
434         hw->aq.arq.count = hw->aq.num_arq_entries;
435
436         /* allocate the ring memory */
437         ret_code = i40e_alloc_adminq_arq_ring(hw);
438         if (ret_code)
439                 goto init_adminq_exit;
440
441         /* allocate buffers in the rings */
442         ret_code = i40e_alloc_arq_bufs(hw);
443         if (ret_code)
444                 goto init_adminq_free_rings;
445
446         /* initialize base registers */
447         i40e_config_arq_regs(hw);
448
449         /* success! */
450         goto init_adminq_exit;
451
452 init_adminq_free_rings:
453         i40e_free_adminq_arq(hw);
454
455 init_adminq_exit:
456         return ret_code;
457 }
458
459 /**
460  *  i40e_shutdown_asq - shutdown the ASQ
461  *  @hw: pointer to the hardware structure
462  *
463  *  The main shutdown routine for the Admin Send Queue
464  **/
465 static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
466 {
467         i40e_status ret_code = 0;
468
469         if (hw->aq.asq.count == 0)
470                 return I40E_ERR_NOT_READY;
471
472         /* Stop firmware AdminQ processing */
473         wr32(hw, hw->aq.asq.head, 0);
474         wr32(hw, hw->aq.asq.tail, 0);
475         wr32(hw, hw->aq.asq.len, 0);
476
477         /* make sure lock is available */
478         mutex_lock(&hw->aq.asq_mutex);
479
480         hw->aq.asq.count = 0; /* to indicate uninitialized queue */
481
482         /* free ring buffers */
483         i40e_free_asq_bufs(hw);
484
485         mutex_unlock(&hw->aq.asq_mutex);
486
487         return ret_code;
488 }
489
490 /**
491  *  i40e_shutdown_arq - shutdown ARQ
492  *  @hw: pointer to the hardware structure
493  *
494  *  The main shutdown routine for the Admin Receive Queue
495  **/
496 static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
497 {
498         i40e_status ret_code = 0;
499
500         if (hw->aq.arq.count == 0)
501                 return I40E_ERR_NOT_READY;
502
503         /* Stop firmware AdminQ processing */
504         wr32(hw, hw->aq.arq.head, 0);
505         wr32(hw, hw->aq.arq.tail, 0);
506         wr32(hw, hw->aq.arq.len, 0);
507
508         /* make sure lock is available */
509         mutex_lock(&hw->aq.arq_mutex);
510
511         hw->aq.arq.count = 0; /* to indicate uninitialized queue */
512
513         /* free ring buffers */
514         i40e_free_arq_bufs(hw);
515
516         mutex_unlock(&hw->aq.arq_mutex);
517
518         return ret_code;
519 }
520
521 /**
522  *  i40e_init_adminq - main initialization routine for Admin Queue
523  *  @hw: pointer to the hardware structure
524  *
525  *  Prior to calling this function, drivers *MUST* set the following fields
526  *  in the hw->aq structure:
527  *     - hw->aq.num_asq_entries
528  *     - hw->aq.num_arq_entries
529  *     - hw->aq.arq_buf_size
530  *     - hw->aq.asq_buf_size
531  **/
532 i40e_status i40e_init_adminq(struct i40e_hw *hw)
533 {
534         i40e_status ret_code;
535         u16 eetrack_lo, eetrack_hi;
536         int retry = 0;
537
538         /* verify input for valid configuration */
539         if ((hw->aq.num_arq_entries == 0) ||
540             (hw->aq.num_asq_entries == 0) ||
541             (hw->aq.arq_buf_size == 0) ||
542             (hw->aq.asq_buf_size == 0)) {
543                 ret_code = I40E_ERR_CONFIG;
544                 goto init_adminq_exit;
545         }
546
547         /* initialize locks */
548         mutex_init(&hw->aq.asq_mutex);
549         mutex_init(&hw->aq.arq_mutex);
550
551         /* Set up register offsets */
552         i40e_adminq_init_regs(hw);
553
554         /* allocate the ASQ */
555         ret_code = i40e_init_asq(hw);
556         if (ret_code)
557                 goto init_adminq_destroy_locks;
558
559         /* allocate the ARQ */
560         ret_code = i40e_init_arq(hw);
561         if (ret_code)
562                 goto init_adminq_free_asq;
563
564         /* There are some cases where the firmware may not be quite ready
565          * for AdminQ operations, so we retry the AdminQ setup a few times
566          * if we see timeouts in this first AQ call.
567          */
568         do {
569                 ret_code = i40e_aq_get_firmware_version(hw,
570                                                         &hw->aq.fw_maj_ver,
571                                                         &hw->aq.fw_min_ver,
572                                                         &hw->aq.api_maj_ver,
573                                                         &hw->aq.api_min_ver,
574                                                         NULL);
575                 if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
576                         break;
577                 retry++;
578                 msleep(100);
579                 i40e_resume_aq(hw);
580         } while (retry < 10);
581         if (ret_code != I40E_SUCCESS)
582                 goto init_adminq_free_arq;
583
584         /* get the NVM version info */
585         i40e_read_nvm_word(hw, I40E_SR_NVM_IMAGE_VERSION, &hw->nvm.version);
586         i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
587         i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
588         hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
589
590         if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
591                 ret_code = I40E_ERR_FIRMWARE_API_VERSION;
592                 goto init_adminq_free_arq;
593         }
594
595         /* pre-emptive resource lock release */
596         i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
597         hw->aq.nvm_busy = false;
598
599         ret_code = i40e_aq_set_hmc_resource_profile(hw,
600                                                     I40E_HMC_PROFILE_DEFAULT,
601                                                     0,
602                                                     NULL);
603         ret_code = 0;
604
605         /* success! */
606         goto init_adminq_exit;
607
608 init_adminq_free_arq:
609         i40e_shutdown_arq(hw);
610 init_adminq_free_asq:
611         i40e_shutdown_asq(hw);
612 init_adminq_destroy_locks:
613
614 init_adminq_exit:
615         return ret_code;
616 }
617
618 /**
619  *  i40e_shutdown_adminq - shutdown routine for the Admin Queue
620  *  @hw: pointer to the hardware structure
621  **/
622 i40e_status i40e_shutdown_adminq(struct i40e_hw *hw)
623 {
624         i40e_status ret_code = 0;
625
626         if (i40e_check_asq_alive(hw))
627                 i40e_aq_queue_shutdown(hw, true);
628
629         i40e_shutdown_asq(hw);
630         i40e_shutdown_arq(hw);
631
632         /* destroy the locks */
633
634         return ret_code;
635 }
636
637 /**
638  *  i40e_clean_asq - cleans Admin send queue
639  *  @hw: pointer to the hardware structure
640  *
641  *  returns the number of free desc
642  **/
643 static u16 i40e_clean_asq(struct i40e_hw *hw)
644 {
645         struct i40e_adminq_ring *asq = &(hw->aq.asq);
646         struct i40e_asq_cmd_details *details;
647         u16 ntc = asq->next_to_clean;
648         struct i40e_aq_desc desc_cb;
649         struct i40e_aq_desc *desc;
650
651         desc = I40E_ADMINQ_DESC(*asq, ntc);
652         details = I40E_ADMINQ_DETAILS(*asq, ntc);
653         while (rd32(hw, hw->aq.asq.head) != ntc) {
654                 if (details->callback) {
655                         I40E_ADMINQ_CALLBACK cb_func =
656                                         (I40E_ADMINQ_CALLBACK)details->callback;
657                         desc_cb = *desc;
658                         cb_func(hw, &desc_cb);
659                 }
660                 memset(desc, 0, sizeof(*desc));
661                 memset(details, 0, sizeof(*details));
662                 ntc++;
663                 if (ntc == asq->count)
664                         ntc = 0;
665                 desc = I40E_ADMINQ_DESC(*asq, ntc);
666                 details = I40E_ADMINQ_DETAILS(*asq, ntc);
667         }
668
669         asq->next_to_clean = ntc;
670
671         return I40E_DESC_UNUSED(asq);
672 }
673
674 /**
675  *  i40e_asq_done - check if FW has processed the Admin Send Queue
676  *  @hw: pointer to the hw struct
677  *
678  *  Returns true if the firmware has processed all descriptors on the
679  *  admin send queue. Returns false if there are still requests pending.
680  **/
681 static bool i40e_asq_done(struct i40e_hw *hw)
682 {
683         /* AQ designers suggest use of head for better
684          * timing reliability than DD bit
685          */
686         return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
687
688 }
689
690 /**
691  *  i40e_asq_send_command - send command to Admin Queue
692  *  @hw: pointer to the hw struct
693  *  @desc: prefilled descriptor describing the command (non DMA mem)
694  *  @buff: buffer to use for indirect commands
695  *  @buff_size: size of buffer for indirect commands
696  *  @cmd_details: pointer to command details structure
697  *
698  *  This is the main send command driver routine for the Admin Queue send
699  *  queue.  It runs the queue, cleans the queue, etc
700  **/
701 i40e_status i40e_asq_send_command(struct i40e_hw *hw,
702                                 struct i40e_aq_desc *desc,
703                                 void *buff, /* can be NULL */
704                                 u16  buff_size,
705                                 struct i40e_asq_cmd_details *cmd_details)
706 {
707         i40e_status status = 0;
708         struct i40e_dma_mem *dma_buff = NULL;
709         struct i40e_asq_cmd_details *details;
710         struct i40e_aq_desc *desc_on_ring;
711         bool cmd_completed = false;
712         u16  retval = 0;
713
714         if (hw->aq.asq.count == 0) {
715                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
716                            "AQTX: Admin queue not initialized.\n");
717                 status = I40E_ERR_QUEUE_EMPTY;
718                 goto asq_send_command_exit;
719         }
720
721         if (i40e_is_nvm_update_op(desc) && hw->aq.nvm_busy) {
722                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: NVM busy.\n");
723                 status = I40E_ERR_NVM;
724                 goto asq_send_command_exit;
725         }
726
727         details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
728         if (cmd_details) {
729                 *details = *cmd_details;
730
731                 /* If the cmd_details are defined copy the cookie.  The
732                  * cpu_to_le32 is not needed here because the data is ignored
733                  * by the FW, only used by the driver
734                  */
735                 if (details->cookie) {
736                         desc->cookie_high =
737                                 cpu_to_le32(upper_32_bits(details->cookie));
738                         desc->cookie_low =
739                                 cpu_to_le32(lower_32_bits(details->cookie));
740                 }
741         } else {
742                 memset(details, 0, sizeof(struct i40e_asq_cmd_details));
743         }
744
745         /* clear requested flags and then set additional flags if defined */
746         desc->flags &= ~cpu_to_le16(details->flags_dis);
747         desc->flags |= cpu_to_le16(details->flags_ena);
748
749         mutex_lock(&hw->aq.asq_mutex);
750
751         if (buff_size > hw->aq.asq_buf_size) {
752                 i40e_debug(hw,
753                            I40E_DEBUG_AQ_MESSAGE,
754                            "AQTX: Invalid buffer size: %d.\n",
755                            buff_size);
756                 status = I40E_ERR_INVALID_SIZE;
757                 goto asq_send_command_error;
758         }
759
760         if (details->postpone && !details->async) {
761                 i40e_debug(hw,
762                            I40E_DEBUG_AQ_MESSAGE,
763                            "AQTX: Async flag not set along with postpone flag");
764                 status = I40E_ERR_PARAM;
765                 goto asq_send_command_error;
766         }
767
768         /* call clean and check queue available function to reclaim the
769          * descriptors that were processed by FW, the function returns the
770          * number of desc available
771          */
772         /* the clean function called here could be called in a separate thread
773          * in case of asynchronous completions
774          */
775         if (i40e_clean_asq(hw) == 0) {
776                 i40e_debug(hw,
777                            I40E_DEBUG_AQ_MESSAGE,
778                            "AQTX: Error queue is full.\n");
779                 status = I40E_ERR_ADMIN_QUEUE_FULL;
780                 goto asq_send_command_error;
781         }
782
783         /* initialize the temp desc pointer with the right desc */
784         desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
785
786         /* if the desc is available copy the temp desc to the right place */
787         *desc_on_ring = *desc;
788
789         /* if buff is not NULL assume indirect command */
790         if (buff != NULL) {
791                 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
792                 /* copy the user buff into the respective DMA buff */
793                 memcpy(dma_buff->va, buff, buff_size);
794                 desc_on_ring->datalen = cpu_to_le16(buff_size);
795
796                 /* Update the address values in the desc with the pa value
797                  * for respective buffer
798                  */
799                 desc_on_ring->params.external.addr_high =
800                                 cpu_to_le32(upper_32_bits(dma_buff->pa));
801                 desc_on_ring->params.external.addr_low =
802                                 cpu_to_le32(lower_32_bits(dma_buff->pa));
803         }
804
805         /* bump the tail */
806         i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff);
807         (hw->aq.asq.next_to_use)++;
808         if (hw->aq.asq.next_to_use == hw->aq.asq.count)
809                 hw->aq.asq.next_to_use = 0;
810         if (!details->postpone)
811                 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
812
813         /* if cmd_details are not defined or async flag is not set,
814          * we need to wait for desc write back
815          */
816         if (!details->async && !details->postpone) {
817                 u32 total_delay = 0;
818                 u32 delay_len = 10;
819
820                 do {
821                         /* AQ designers suggest use of head for better
822                          * timing reliability than DD bit
823                          */
824                         if (i40e_asq_done(hw))
825                                 break;
826                         /* ugh! delay while spin_lock */
827                         udelay(delay_len);
828                         total_delay += delay_len;
829                 } while (total_delay <  I40E_ASQ_CMD_TIMEOUT);
830         }
831
832         /* if ready, copy the desc back to temp */
833         if (i40e_asq_done(hw)) {
834                 *desc = *desc_on_ring;
835                 if (buff != NULL)
836                         memcpy(buff, dma_buff->va, buff_size);
837                 retval = le16_to_cpu(desc->retval);
838                 if (retval != 0) {
839                         i40e_debug(hw,
840                                    I40E_DEBUG_AQ_MESSAGE,
841                                    "AQTX: Command completed with error 0x%X.\n",
842                                    retval);
843                         /* strip off FW internal code */
844                         retval &= 0xff;
845                 }
846                 cmd_completed = true;
847                 if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
848                         status = 0;
849                 else
850                         status = I40E_ERR_ADMIN_QUEUE_ERROR;
851                 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
852         }
853
854         if (i40e_is_nvm_update_op(desc))
855                 hw->aq.nvm_busy = true;
856
857         /* update the error if time out occurred */
858         if ((!cmd_completed) &&
859             (!details->async && !details->postpone)) {
860                 i40e_debug(hw,
861                            I40E_DEBUG_AQ_MESSAGE,
862                            "AQTX: Writeback timeout.\n");
863                 status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
864         }
865
866 asq_send_command_error:
867         mutex_unlock(&hw->aq.asq_mutex);
868 asq_send_command_exit:
869         return status;
870 }
871
872 /**
873  *  i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
874  *  @desc:     pointer to the temp descriptor (non DMA mem)
875  *  @opcode:   the opcode can be used to decide which flags to turn off or on
876  *
877  *  Fill the desc with default values
878  **/
879 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
880                                        u16 opcode)
881 {
882         /* zero out the desc */
883         memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
884         desc->opcode = cpu_to_le16(opcode);
885         desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI);
886 }
887
888 /**
889  *  i40e_clean_arq_element
890  *  @hw: pointer to the hw struct
891  *  @e: event info from the receive descriptor, includes any buffers
892  *  @pending: number of events that could be left to process
893  *
894  *  This function cleans one Admin Receive Queue element and returns
895  *  the contents through e.  It can also return how many events are
896  *  left to process through 'pending'
897  **/
898 i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
899                                              struct i40e_arq_event_info *e,
900                                              u16 *pending)
901 {
902         i40e_status ret_code = 0;
903         u16 ntc = hw->aq.arq.next_to_clean;
904         struct i40e_aq_desc *desc;
905         struct i40e_dma_mem *bi;
906         u16 desc_idx;
907         u16 datalen;
908         u16 flags;
909         u16 ntu;
910
911         /* take the lock before we start messing with the ring */
912         mutex_lock(&hw->aq.arq_mutex);
913
914         /* set next_to_use to head */
915         ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
916         if (ntu == ntc) {
917                 /* nothing to do - shouldn't need to update ring's values */
918                 i40e_debug(hw,
919                            I40E_DEBUG_AQ_MESSAGE,
920                            "AQRX: Queue is empty.\n");
921                 ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
922                 goto clean_arq_element_out;
923         }
924
925         /* now clean the next descriptor */
926         desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
927         desc_idx = ntc;
928         i40e_debug_aq(hw,
929                       I40E_DEBUG_AQ_COMMAND,
930                       (void *)desc,
931                       hw->aq.arq.r.arq_bi[desc_idx].va);
932
933         flags = le16_to_cpu(desc->flags);
934         if (flags & I40E_AQ_FLAG_ERR) {
935                 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
936                 hw->aq.arq_last_status =
937                         (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
938                 i40e_debug(hw,
939                            I40E_DEBUG_AQ_MESSAGE,
940                            "AQRX: Event received with error 0x%X.\n",
941                            hw->aq.arq_last_status);
942         } else {
943                 e->desc = *desc;
944                 datalen = le16_to_cpu(desc->datalen);
945                 e->msg_size = min(datalen, e->msg_size);
946                 if (e->msg_buf != NULL && (e->msg_size != 0))
947                         memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
948                                e->msg_size);
949         }
950
951         if (i40e_is_nvm_update_op(&e->desc))
952                 hw->aq.nvm_busy = false;
953
954         /* Restore the original datalen and buffer address in the desc,
955          * FW updates datalen to indicate the event message
956          * size
957          */
958         bi = &hw->aq.arq.r.arq_bi[ntc];
959         memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
960
961         desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
962         if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
963                 desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
964         desc->datalen = cpu_to_le16((u16)bi->size);
965         desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
966         desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
967
968         /* set tail = the last cleaned desc index. */
969         wr32(hw, hw->aq.arq.tail, ntc);
970         /* ntc is updated to tail + 1 */
971         ntc++;
972         if (ntc == hw->aq.num_arq_entries)
973                 ntc = 0;
974         hw->aq.arq.next_to_clean = ntc;
975         hw->aq.arq.next_to_use = ntu;
976
977 clean_arq_element_out:
978         /* Set pending if needed, unlock and return */
979         if (pending != NULL)
980                 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
981         mutex_unlock(&hw->aq.arq_mutex);
982
983         return ret_code;
984 }
985
986 static void i40e_resume_aq(struct i40e_hw *hw)
987 {
988         /* Registers are reset after PF reset */
989         hw->aq.asq.next_to_use = 0;
990         hw->aq.asq.next_to_clean = 0;
991
992         i40e_config_asq_regs(hw);
993
994         hw->aq.arq.next_to_use = 0;
995         hw->aq.arq.next_to_clean = 0;
996
997         i40e_config_arq_regs(hw);
998 }