Add support of Cavium Liquidio ethernet adapters
[cascardo/linux.git] / drivers / net / ethernet / cavium / liquidio / octeon_device.c
1 /**********************************************************************
2 * Author: Cavium, Inc.
3 *
4 * Contact: support@cavium.com
5 *          Please include "LiquidIO" in the subject.
6 *
7 * Copyright (c) 2003-2015 Cavium, Inc.
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT.  See the GNU General Public License for more
17 * details.
18 *
19 * This file may also be available under a different license from Cavium.
20 * Contact Cavium, Inc. for more information
21 **********************************************************************/
22 #include <linux/version.h>
23 #include <linux/types.h>
24 #include <linux/list.h>
25 #include <linux/interrupt.h>
26 #include <linux/pci.h>
27 #include <linux/crc32.h>
28 #include <linux/kthread.h>
29 #include <linux/netdevice.h>
30 #include "octeon_config.h"
31 #include "liquidio_common.h"
32 #include "octeon_droq.h"
33 #include "octeon_iq.h"
34 #include "response_manager.h"
35 #include "octeon_device.h"
36 #include "octeon_nic.h"
37 #include "octeon_main.h"
38 #include "octeon_network.h"
39 #include "cn66xx_regs.h"
40 #include "cn66xx_device.h"
41 #include "cn68xx_regs.h"
42 #include "cn68xx_device.h"
43 #include "liquidio_image.h"
44 #include "octeon_mem_ops.h"
45
46 /** Default configuration
47  *  for CN66XX OCTEON Models.
48  */
49 static struct octeon_config default_cn66xx_conf = {
50         .card_type                              = LIO_210SV,
51         .card_name                              = LIO_210SV_NAME,
52
53         /** IQ attributes */
54         .iq                                     = {
55                 .max_iqs                        = CN6XXX_CFG_IO_QUEUES,
56                 .pending_list_size              =
57                         (CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES),
58                 .instr_type                     = OCTEON_64BYTE_INSTR,
59                 .db_min                         = CN6XXX_DB_MIN,
60                 .db_timeout                     = CN6XXX_DB_TIMEOUT,
61         }
62         ,
63
64         /** OQ attributes */
65         .oq                                     = {
66                 .max_oqs                        = CN6XXX_CFG_IO_QUEUES,
67                 .info_ptr                       = OCTEON_OQ_INFOPTR_MODE,
68                 .refill_threshold               = CN6XXX_OQ_REFIL_THRESHOLD,
69                 .oq_intr_pkt                    = CN6XXX_OQ_INTR_PKT,
70                 .oq_intr_time                   = CN6XXX_OQ_INTR_TIME,
71                 .pkts_per_intr                  = CN6XXX_OQ_PKTSPER_INTR,
72         }
73         ,
74
75         .num_nic_ports                          = DEFAULT_NUM_NIC_PORTS_66XX,
76         .num_def_rx_descs                       = CN6XXX_MAX_OQ_DESCRIPTORS,
77         .num_def_tx_descs                       = CN6XXX_MAX_IQ_DESCRIPTORS,
78         .def_rx_buf_size                        = CN6XXX_OQ_BUF_SIZE,
79
80         /* For ethernet interface 0:  Port cfg Attributes */
81         .nic_if_cfg[0] = {
82                 /* Max Txqs: Half for each of the two ports :max_iq/2 */
83                 .max_txqs                       = MAX_TXQS_PER_INTF,
84
85                 /* Actual configured value. Range could be: 1...max_txqs */
86                 .num_txqs                       = DEF_TXQS_PER_INTF,
87
88                 /* Max Rxqs: Half for each of the two ports :max_oq/2  */
89                 .max_rxqs                       = MAX_RXQS_PER_INTF,
90
91                 /* Actual configured value. Range could be: 1...max_rxqs */
92                 .num_rxqs                       = DEF_RXQS_PER_INTF,
93
94                 /* Num of desc for rx rings */
95                 .num_rx_descs                   = CN6XXX_MAX_OQ_DESCRIPTORS,
96
97                 /* Num of desc for tx rings */
98                 .num_tx_descs                   = CN6XXX_MAX_IQ_DESCRIPTORS,
99
100                 /* SKB size, We need not change buf size even for Jumbo frames.
101                  * Octeon can send jumbo frames in 4 consecutive descriptors,
102                  */
103                 .rx_buf_size                    = CN6XXX_OQ_BUF_SIZE,
104
105                 .base_queue                     = BASE_QUEUE_NOT_REQUESTED,
106
107                 .gmx_port_id                    = 0,
108         },
109
110         .nic_if_cfg[1] = {
111                 /* Max Txqs: Half for each of the two ports :max_iq/2 */
112                 .max_txqs                       = MAX_TXQS_PER_INTF,
113
114                 /* Actual configured value. Range could be: 1...max_txqs */
115                 .num_txqs                       = DEF_TXQS_PER_INTF,
116
117                 /* Max Rxqs: Half for each of the two ports :max_oq/2  */
118                 .max_rxqs                       = MAX_RXQS_PER_INTF,
119
120                 /* Actual configured value. Range could be: 1...max_rxqs */
121                 .num_rxqs                       = DEF_RXQS_PER_INTF,
122
123                 /* Num of desc for rx rings */
124                 .num_rx_descs                   = CN6XXX_MAX_OQ_DESCRIPTORS,
125
126                 /* Num of desc for tx rings */
127                 .num_tx_descs                   = CN6XXX_MAX_IQ_DESCRIPTORS,
128
129                 /* SKB size, We need not change buf size even for Jumbo frames.
130                  * Octeon can send jumbo frames in 4 consecutive descriptors,
131                  */
132                 .rx_buf_size                    = CN6XXX_OQ_BUF_SIZE,
133
134                 .base_queue                     = BASE_QUEUE_NOT_REQUESTED,
135
136                 .gmx_port_id                    = 1,
137         },
138
139         /** Miscellaneous attributes */
140         .misc                                   = {
141                 /* Host driver link query interval */
142                 .oct_link_query_interval        = 100,
143
144                 /* Octeon link query interval */
145                 .host_link_query_interval       = 500,
146
147                 .enable_sli_oq_bp               = 0,
148
149                 /* Control queue group */
150                 .ctrlq_grp                      = 1,
151         }
152         ,
153 };
154
155 /** Default configuration
156  *  for CN68XX OCTEON Model.
157  */
158
159 static struct octeon_config default_cn68xx_conf = {
160         .card_type                              = LIO_410NV,
161         .card_name                              = LIO_410NV_NAME,
162
163         /** IQ attributes */
164         .iq                                     = {
165                 .max_iqs                        = CN6XXX_CFG_IO_QUEUES,
166                 .pending_list_size              =
167                         (CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES),
168                 .instr_type                     = OCTEON_64BYTE_INSTR,
169                 .db_min                         = CN6XXX_DB_MIN,
170                 .db_timeout                     = CN6XXX_DB_TIMEOUT,
171         }
172         ,
173
174         /** OQ attributes */
175         .oq                                     = {
176                 .max_oqs                        = CN6XXX_CFG_IO_QUEUES,
177                 .info_ptr                       = OCTEON_OQ_INFOPTR_MODE,
178                 .refill_threshold               = CN6XXX_OQ_REFIL_THRESHOLD,
179                 .oq_intr_pkt                    = CN6XXX_OQ_INTR_PKT,
180                 .oq_intr_time                   = CN6XXX_OQ_INTR_TIME,
181                 .pkts_per_intr                  = CN6XXX_OQ_PKTSPER_INTR,
182         }
183         ,
184
185         .num_nic_ports                          = DEFAULT_NUM_NIC_PORTS_68XX,
186         .num_def_rx_descs                       = CN6XXX_MAX_OQ_DESCRIPTORS,
187         .num_def_tx_descs                       = CN6XXX_MAX_IQ_DESCRIPTORS,
188         .def_rx_buf_size                        = CN6XXX_OQ_BUF_SIZE,
189
190         .nic_if_cfg[0] = {
191                 /* Max Txqs: Half for each of the two ports :max_iq/2 */
192                 .max_txqs                       = MAX_TXQS_PER_INTF,
193
194                 /* Actual configured value. Range could be: 1...max_txqs */
195                 .num_txqs                       = DEF_TXQS_PER_INTF,
196
197                 /* Max Rxqs: Half for each of the two ports :max_oq/2  */
198                 .max_rxqs                       = MAX_RXQS_PER_INTF,
199
200                 /* Actual configured value. Range could be: 1...max_rxqs */
201                 .num_rxqs                       = DEF_RXQS_PER_INTF,
202
203                 /* Num of desc for rx rings */
204                 .num_rx_descs                   = CN6XXX_MAX_OQ_DESCRIPTORS,
205
206                 /* Num of desc for tx rings */
207                 .num_tx_descs                   = CN6XXX_MAX_IQ_DESCRIPTORS,
208
209                 /* SKB size, We need not change buf size even for Jumbo frames.
210                  * Octeon can send jumbo frames in 4 consecutive descriptors,
211                  */
212                 .rx_buf_size                    = CN6XXX_OQ_BUF_SIZE,
213
214                 .base_queue                     = BASE_QUEUE_NOT_REQUESTED,
215
216                 .gmx_port_id                    = 0,
217         },
218
219         .nic_if_cfg[1] = {
220                 /* Max Txqs: Half for each of the two ports :max_iq/2 */
221                 .max_txqs                       = MAX_TXQS_PER_INTF,
222
223                 /* Actual configured value. Range could be: 1...max_txqs */
224                 .num_txqs                       = DEF_TXQS_PER_INTF,
225
226                 /* Max Rxqs: Half for each of the two ports :max_oq/2  */
227                 .max_rxqs                       = MAX_RXQS_PER_INTF,
228
229                 /* Actual configured value. Range could be: 1...max_rxqs */
230                 .num_rxqs                       = DEF_RXQS_PER_INTF,
231
232                 /* Num of desc for rx rings */
233                 .num_rx_descs                   = CN6XXX_MAX_OQ_DESCRIPTORS,
234
235                 /* Num of desc for tx rings */
236                 .num_tx_descs                   = CN6XXX_MAX_IQ_DESCRIPTORS,
237
238                 /* SKB size, We need not change buf size even for Jumbo frames.
239                  * Octeon can send jumbo frames in 4 consecutive descriptors,
240                  */
241                 .rx_buf_size                    = CN6XXX_OQ_BUF_SIZE,
242
243                 .base_queue                     = BASE_QUEUE_NOT_REQUESTED,
244
245                 .gmx_port_id                    = 1,
246         },
247
248         .nic_if_cfg[2] = {
249                 /* Max Txqs: Half for each of the two ports :max_iq/2 */
250                 .max_txqs                       = MAX_TXQS_PER_INTF,
251
252                 /* Actual configured value. Range could be: 1...max_txqs */
253                 .num_txqs                       = DEF_TXQS_PER_INTF,
254
255                 /* Max Rxqs: Half for each of the two ports :max_oq/2  */
256                 .max_rxqs                       = MAX_RXQS_PER_INTF,
257
258                 /* Actual configured value. Range could be: 1...max_rxqs */
259                 .num_rxqs                       = DEF_RXQS_PER_INTF,
260
261                 /* Num of desc for rx rings */
262                 .num_rx_descs                   = CN6XXX_MAX_OQ_DESCRIPTORS,
263
264                 /* Num of desc for tx rings */
265                 .num_tx_descs                   = CN6XXX_MAX_IQ_DESCRIPTORS,
266
267                 /* SKB size, We need not change buf size even for Jumbo frames.
268                  * Octeon can send jumbo frames in 4 consecutive descriptors,
269                  */
270                 .rx_buf_size                    = CN6XXX_OQ_BUF_SIZE,
271
272                 .base_queue                     = BASE_QUEUE_NOT_REQUESTED,
273
274                 .gmx_port_id                    = 2,
275         },
276
277         .nic_if_cfg[3] = {
278                 /* Max Txqs: Half for each of the two ports :max_iq/2 */
279                 .max_txqs                       = MAX_TXQS_PER_INTF,
280
281                 /* Actual configured value. Range could be: 1...max_txqs */
282                 .num_txqs                       = DEF_TXQS_PER_INTF,
283
284                 /* Max Rxqs: Half for each of the two ports :max_oq/2  */
285                 .max_rxqs                       = MAX_RXQS_PER_INTF,
286
287                 /* Actual configured value. Range could be: 1...max_rxqs */
288                 .num_rxqs                       = DEF_RXQS_PER_INTF,
289
290                 /* Num of desc for rx rings */
291                 .num_rx_descs                   = CN6XXX_MAX_OQ_DESCRIPTORS,
292
293                 /* Num of desc for tx rings */
294                 .num_tx_descs                   = CN6XXX_MAX_IQ_DESCRIPTORS,
295
296                 /* SKB size, We need not change buf size even for Jumbo frames.
297                  * Octeon can send jumbo frames in 4 consecutive descriptors,
298                  */
299                 .rx_buf_size                    = CN6XXX_OQ_BUF_SIZE,
300
301                 .base_queue                     = BASE_QUEUE_NOT_REQUESTED,
302
303                 .gmx_port_id                    = 3,
304         },
305
306         /** Miscellaneous attributes */
307         .misc                                   = {
308                 /* Host driver link query interval */
309                 .oct_link_query_interval        = 100,
310
311                 /* Octeon link query interval */
312                 .host_link_query_interval       = 500,
313
314                 .enable_sli_oq_bp               = 0,
315
316                 /* Control queue group */
317                 .ctrlq_grp                      = 1,
318         }
319         ,
320 };
321
322 /** Default configuration
323  *  for CN68XX OCTEON Model.
324  */
325 static struct octeon_config default_cn68xx_210nv_conf = {
326         .card_type                              = LIO_210NV,
327         .card_name                              = LIO_210NV_NAME,
328
329         /** IQ attributes */
330
331         .iq                                     = {
332                 .max_iqs                        = CN6XXX_CFG_IO_QUEUES,
333                 .pending_list_size              =
334                         (CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES),
335                 .instr_type                     = OCTEON_64BYTE_INSTR,
336                 .db_min                         = CN6XXX_DB_MIN,
337                 .db_timeout                     = CN6XXX_DB_TIMEOUT,
338         }
339         ,
340
341         /** OQ attributes */
342         .oq                                     = {
343                 .max_oqs                        = CN6XXX_CFG_IO_QUEUES,
344                 .info_ptr                       = OCTEON_OQ_INFOPTR_MODE,
345                 .refill_threshold               = CN6XXX_OQ_REFIL_THRESHOLD,
346                 .oq_intr_pkt                    = CN6XXX_OQ_INTR_PKT,
347                 .oq_intr_time                   = CN6XXX_OQ_INTR_TIME,
348                 .pkts_per_intr                  = CN6XXX_OQ_PKTSPER_INTR,
349         }
350         ,
351
352         .num_nic_ports                  = DEFAULT_NUM_NIC_PORTS_68XX_210NV,
353         .num_def_rx_descs               = CN6XXX_MAX_OQ_DESCRIPTORS,
354         .num_def_tx_descs               = CN6XXX_MAX_IQ_DESCRIPTORS,
355         .def_rx_buf_size                = CN6XXX_OQ_BUF_SIZE,
356
357         .nic_if_cfg[0] = {
358                 /* Max Txqs: Half for each of the two ports :max_iq/2 */
359                 .max_txqs                       = MAX_TXQS_PER_INTF,
360
361                 /* Actual configured value. Range could be: 1...max_txqs */
362                 .num_txqs                       = DEF_TXQS_PER_INTF,
363
364                 /* Max Rxqs: Half for each of the two ports :max_oq/2  */
365                 .max_rxqs                       = MAX_RXQS_PER_INTF,
366
367                 /* Actual configured value. Range could be: 1...max_rxqs */
368                 .num_rxqs                       = DEF_RXQS_PER_INTF,
369
370                 /* Num of desc for rx rings */
371                 .num_rx_descs                   = CN6XXX_MAX_OQ_DESCRIPTORS,
372
373                 /* Num of desc for tx rings */
374                 .num_tx_descs                   = CN6XXX_MAX_IQ_DESCRIPTORS,
375
376                 /* SKB size, We need not change buf size even for Jumbo frames.
377                  * Octeon can send jumbo frames in 4 consecutive descriptors,
378                  */
379                 .rx_buf_size                    = CN6XXX_OQ_BUF_SIZE,
380
381                 .base_queue                     = BASE_QUEUE_NOT_REQUESTED,
382
383                 .gmx_port_id                    = 0,
384         },
385
386         .nic_if_cfg[1] = {
387                 /* Max Txqs: Half for each of the two ports :max_iq/2 */
388                 .max_txqs                       = MAX_TXQS_PER_INTF,
389
390                 /* Actual configured value. Range could be: 1...max_txqs */
391                 .num_txqs                       = DEF_TXQS_PER_INTF,
392
393                 /* Max Rxqs: Half for each of the two ports :max_oq/2  */
394                 .max_rxqs                       = MAX_RXQS_PER_INTF,
395
396                 /* Actual configured value. Range could be: 1...max_rxqs */
397                 .num_rxqs                       = DEF_RXQS_PER_INTF,
398
399                 /* Num of desc for rx rings */
400                 .num_rx_descs                   = CN6XXX_MAX_OQ_DESCRIPTORS,
401
402                 /* Num of desc for tx rings */
403                 .num_tx_descs                   = CN6XXX_MAX_IQ_DESCRIPTORS,
404
405                 /* SKB size, We need not change buf size even for Jumbo frames.
406                  * Octeon can send jumbo frames in 4 consecutive descriptors,
407                  */
408                 .rx_buf_size                    = CN6XXX_OQ_BUF_SIZE,
409
410                 .base_queue                     = BASE_QUEUE_NOT_REQUESTED,
411
412                 .gmx_port_id                    = 1,
413         },
414
415         /** Miscellaneous attributes */
416         .misc                                   = {
417                 /* Host driver link query interval */
418                 .oct_link_query_interval        = 100,
419
420                 /* Octeon link query interval */
421                 .host_link_query_interval       = 500,
422
423                 .enable_sli_oq_bp               = 0,
424
425                 /* Control queue group */
426                 .ctrlq_grp                      = 1,
427         }
428         ,
429 };
430
431 enum {
432         OCTEON_CONFIG_TYPE_DEFAULT = 0,
433         NUM_OCTEON_CONFS,
434 };
435
436 static struct octeon_config_ptr {
437         u32 conf_type;
438 } oct_conf_info[MAX_OCTEON_DEVICES] = {
439         {
440                 OCTEON_CONFIG_TYPE_DEFAULT,
441         }, {
442                 OCTEON_CONFIG_TYPE_DEFAULT,
443         }, {
444                 OCTEON_CONFIG_TYPE_DEFAULT,
445         }, {
446                 OCTEON_CONFIG_TYPE_DEFAULT,
447         },
448 };
449
450 static char oct_dev_state_str[OCT_DEV_STATES + 1][32] = {
451         "BEGIN",        "PCI-MAP-DONE",       "DISPATCH-INIT-DONE",
452         "IQ-INIT-DONE", "SCBUFF-POOL-INIT-DONE", "RESPLIST-INIT-DONE",
453         "DROQ-INIT-DONE", "IO-QUEUES-INIT-DONE", "CONSOLE-INIT-DONE",
454         "HOST-READY",   "CORE-READY",         "RUNNING",           "IN-RESET",
455         "INVALID"
456 };
457
458 static char oct_dev_app_str[CVM_DRV_APP_COUNT + 1][32] = {
459         "BASE", "NIC", "UNKNOWN"};
460
461 static struct octeon_device *octeon_device[MAX_OCTEON_DEVICES];
462 static u32 octeon_device_count;
463
464 static struct octeon_core_setup core_setup[MAX_OCTEON_DEVICES];
465
466 void oct_set_config_info(int oct_id, int conf_type)
467 {
468         if (conf_type < 0 || conf_type > (NUM_OCTEON_CONFS - 1))
469                 conf_type = OCTEON_CONFIG_TYPE_DEFAULT;
470         oct_conf_info[oct_id].conf_type = conf_type;
471 }
472
473 void octeon_init_device_list(int conf_type)
474 {
475         int i;
476
477         memset(octeon_device, 0, (sizeof(void *) * MAX_OCTEON_DEVICES));
478         for (i = 0; i <  MAX_OCTEON_DEVICES; i++)
479                 oct_set_config_info(i, conf_type);
480 }
481
482 static void *__retrieve_octeon_config_info(struct octeon_device *oct,
483                                            u16 card_type)
484 {
485         u32 oct_id = oct->octeon_id;
486         void *ret = NULL;
487
488         switch (oct_conf_info[oct_id].conf_type) {
489         case OCTEON_CONFIG_TYPE_DEFAULT:
490                 if (oct->chip_id == OCTEON_CN66XX) {
491                         ret = (void *)&default_cn66xx_conf;
492                 } else if ((oct->chip_id == OCTEON_CN68XX) &&
493                            (card_type == LIO_210NV)) {
494                         ret =  (void *)&default_cn68xx_210nv_conf;
495                 } else if ((oct->chip_id == OCTEON_CN68XX) &&
496                            (card_type == LIO_410NV)) {
497                         ret =  (void *)&default_cn68xx_conf;
498                 }
499                 break;
500         default:
501                 break;
502         }
503         return ret;
504 }
505
506 static int __verify_octeon_config_info(struct octeon_device *oct, void *conf)
507 {
508         switch (oct->chip_id) {
509         case OCTEON_CN66XX:
510         case OCTEON_CN68XX:
511                 return lio_validate_cn6xxx_config_info(oct, conf);
512
513         default:
514                 break;
515         }
516
517         return 1;
518 }
519
520 void *oct_get_config_info(struct octeon_device *oct, u16 card_type)
521 {
522         void *conf = NULL;
523
524         conf = __retrieve_octeon_config_info(oct, card_type);
525         if (!conf)
526                 return NULL;
527
528         if (__verify_octeon_config_info(oct, conf)) {
529                 dev_err(&oct->pci_dev->dev, "Configuration verification failed\n");
530                 return NULL;
531         }
532
533         return conf;
534 }
535
536 char *lio_get_state_string(atomic_t *state_ptr)
537 {
538         s32 istate = (s32)atomic_read(state_ptr);
539
540         if (istate > OCT_DEV_STATES || istate < 0)
541                 return oct_dev_state_str[OCT_DEV_STATE_INVALID];
542         return oct_dev_state_str[istate];
543 }
544
545 static char *get_oct_app_string(u32 app_mode)
546 {
547         if (app_mode <= CVM_DRV_APP_END)
548                 return oct_dev_app_str[app_mode - CVM_DRV_APP_START];
549         return oct_dev_app_str[CVM_DRV_INVALID_APP - CVM_DRV_APP_START];
550 }
551
552 int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
553                              size_t size)
554 {
555         int ret = 0;
556         u8 *p;
557         u8 *buffer;
558         u32 crc32_result;
559         u64 load_addr;
560         u32 image_len;
561         struct octeon_firmware_file_header *h;
562         u32 i;
563
564         if (size < sizeof(struct octeon_firmware_file_header)) {
565                 dev_err(&oct->pci_dev->dev, "Firmware file too small (%d < %d).\n",
566                         (u32)size,
567                         (u32)sizeof(struct octeon_firmware_file_header));
568                 return -EINVAL;
569         }
570
571         h = (struct octeon_firmware_file_header *)data;
572
573         if (h->magic != be32_to_cpu(LIO_NIC_MAGIC)) {
574                 dev_err(&oct->pci_dev->dev, "Unrecognized firmware file.\n");
575                 return -EINVAL;
576         }
577
578         crc32_result =
579                 crc32(~0, data,
580                       sizeof(struct octeon_firmware_file_header) -
581                       sizeof(u32)) ^ ~0U;
582         if (crc32_result != be32_to_cpu(h->crc32)) {
583                 dev_err(&oct->pci_dev->dev, "Firmware CRC mismatch (0x%08x != 0x%08x).\n",
584                         crc32_result, be32_to_cpu(h->crc32));
585                 return -EINVAL;
586         }
587
588         if (memcmp(LIQUIDIO_VERSION, h->version, strlen(LIQUIDIO_VERSION))) {
589                 dev_err(&oct->pci_dev->dev, "Unmatched firmware version. Expected %s, got %s.\n",
590                         LIQUIDIO_VERSION, h->version);
591                 return -EINVAL;
592         }
593
594         if (be32_to_cpu(h->num_images) > LIO_MAX_IMAGES) {
595                 dev_err(&oct->pci_dev->dev, "Too many images in firmware file (%d).\n",
596                         be32_to_cpu(h->num_images));
597                 return -EINVAL;
598         }
599
600         dev_info(&oct->pci_dev->dev, "Firmware version: %s\n", h->version);
601         snprintf(oct->fw_info.liquidio_firmware_version, 32, "LIQUIDIO: %s",
602                  h->version);
603
604         buffer = kmalloc(size, GFP_KERNEL);
605         if (!buffer)
606                 return -ENOMEM;
607
608         memcpy(buffer, data, size);
609
610         p = buffer + sizeof(struct octeon_firmware_file_header);
611
612         /* load all images */
613         for (i = 0; i < be32_to_cpu(h->num_images); i++) {
614                 load_addr = be64_to_cpu(h->desc[i].addr);
615                 image_len = be32_to_cpu(h->desc[i].len);
616
617                 /* validate the image */
618                 crc32_result = crc32(~0, p, image_len) ^ ~0U;
619                 if (crc32_result != be32_to_cpu(h->desc[i].crc32)) {
620                         dev_err(&oct->pci_dev->dev,
621                                 "Firmware CRC mismatch in image %d (0x%08x != 0x%08x).\n",
622                                 i, crc32_result,
623                                 be32_to_cpu(h->desc[i].crc32));
624                         ret = -EINVAL;
625                         goto done_downloading;
626                 }
627
628                 /* download the image */
629                 octeon_pci_write_core_mem(oct, load_addr, p, image_len);
630
631                 p += image_len;
632                 dev_dbg(&oct->pci_dev->dev,
633                         "Downloaded image %d (%d bytes) to address 0x%016llx\n",
634                         i, image_len, load_addr);
635         }
636
637         /* Invoke the bootcmd */
638         ret = octeon_console_send_cmd(oct, h->bootcmd, 50);
639
640 done_downloading:
641         kfree(buffer);
642
643         return ret;
644 }
645
646 void octeon_free_device_mem(struct octeon_device *oct)
647 {
648         u32 i;
649
650         for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
651                 /* could check  mask as well */
652                 if (oct->droq[i])
653                         vfree(oct->droq[i]);
654         }
655
656         for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
657                 /* could check mask as well */
658                 if (oct->instr_queue[i])
659                         vfree(oct->instr_queue[i]);
660         }
661
662         i = oct->octeon_id;
663         vfree(oct);
664
665         octeon_device[i] = NULL;
666         octeon_device_count--;
667 }
668
669 static struct octeon_device *octeon_allocate_device_mem(u32 pci_id,
670                                                         u32 priv_size)
671 {
672         struct octeon_device *oct;
673         u8 *buf = NULL;
674         u32 octdevsize = 0, configsize = 0, size;
675
676         switch (pci_id) {
677         case OCTEON_CN68XX:
678         case OCTEON_CN66XX:
679                 configsize = sizeof(struct octeon_cn6xxx);
680                 break;
681
682         default:
683                 pr_err("%s: Unknown PCI Device: 0x%x\n",
684                        __func__,
685                        pci_id);
686                 return NULL;
687         }
688
689         if (configsize & 0x7)
690                 configsize += (8 - (configsize & 0x7));
691
692         octdevsize = sizeof(struct octeon_device);
693         if (octdevsize & 0x7)
694                 octdevsize += (8 - (octdevsize & 0x7));
695
696         if (priv_size & 0x7)
697                 priv_size += (8 - (priv_size & 0x7));
698
699         size = octdevsize + priv_size + configsize +
700                 (sizeof(struct octeon_dispatch) * DISPATCH_LIST_SIZE);
701
702         buf = vmalloc(size);
703         if (!buf)
704                 return NULL;
705
706         memset(buf, 0, size);
707
708         oct = (struct octeon_device *)buf;
709         oct->priv = (void *)(buf + octdevsize);
710         oct->chip = (void *)(buf + octdevsize + priv_size);
711         oct->dispatch.dlist = (struct octeon_dispatch *)
712                 (buf + octdevsize + priv_size + configsize);
713
714         return oct;
715 }
716
717 struct octeon_device *octeon_allocate_device(u32 pci_id,
718                                              u32 priv_size)
719 {
720         u32 oct_idx = 0;
721         struct octeon_device *oct = NULL;
722
723         for (oct_idx = 0; oct_idx < MAX_OCTEON_DEVICES; oct_idx++)
724                 if (!octeon_device[oct_idx])
725                         break;
726
727         if (oct_idx == MAX_OCTEON_DEVICES)
728                 return NULL;
729
730         oct = octeon_allocate_device_mem(pci_id, priv_size);
731         if (!oct)
732                 return NULL;
733
734         spin_lock_init(&oct->pci_win_lock);
735         spin_lock_init(&oct->mem_access_lock);
736
737         octeon_device_count++;
738         octeon_device[oct_idx] = oct;
739
740         oct->octeon_id = oct_idx;
741         snprintf((oct->device_name), sizeof(oct->device_name),
742                  "LiquidIO%d", (oct->octeon_id));
743
744         return oct;
745 }
746
747 int octeon_setup_instr_queues(struct octeon_device *oct)
748 {
749         u32 i, num_iqs = 0;
750         u32 num_descs = 0;
751
752         /* this causes queue 0 to be default queue */
753         if (OCTEON_CN6XXX(oct)) {
754                 num_iqs = 1;
755                 num_descs =
756                         CFG_GET_NUM_DEF_TX_DESCS(CHIP_FIELD(oct, cn6xxx, conf));
757         }
758
759         oct->num_iqs = 0;
760
761         for (i = 0; i < num_iqs; i++) {
762                 oct->instr_queue[i] =
763                         vmalloc(sizeof(struct octeon_instr_queue));
764                 if (!oct->instr_queue[i])
765                         return 1;
766
767                 memset(oct->instr_queue[i], 0,
768                        sizeof(struct octeon_instr_queue));
769
770                 oct->instr_queue[i]->app_ctx = (void *)(size_t)i;
771                 if (octeon_init_instr_queue(oct, i, num_descs))
772                         return 1;
773
774                 oct->num_iqs++;
775         }
776
777         return 0;
778 }
779
780 int octeon_setup_output_queues(struct octeon_device *oct)
781 {
782         u32 i, num_oqs = 0;
783         u32 num_descs = 0;
784         u32 desc_size = 0;
785
786         /* this causes queue 0 to be default queue */
787         if (OCTEON_CN6XXX(oct)) {
788                 /* CFG_GET_OQ_MAX_BASE_Q(CHIP_FIELD(oct, cn6xxx, conf)); */
789                 num_oqs = 1;
790                 num_descs =
791                         CFG_GET_NUM_DEF_RX_DESCS(CHIP_FIELD(oct, cn6xxx, conf));
792                 desc_size =
793                         CFG_GET_DEF_RX_BUF_SIZE(CHIP_FIELD(oct, cn6xxx, conf));
794         }
795
796         oct->num_oqs = 0;
797
798         for (i = 0; i < num_oqs; i++) {
799                 oct->droq[i] = vmalloc(sizeof(*oct->droq[i]));
800                 if (!oct->droq[i])
801                         return 1;
802
803                 memset(oct->droq[i], 0, sizeof(struct octeon_droq));
804
805                 if (octeon_init_droq(oct, i, num_descs, desc_size, NULL))
806                         return 1;
807
808                 oct->num_oqs++;
809         }
810
811         return 0;
812 }
813
814 void octeon_set_io_queues_off(struct octeon_device *oct)
815 {
816         /* Disable the i/p and o/p queues for this Octeon. */
817
818         octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
819         octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
820 }
821
822 void octeon_set_droq_pkt_op(struct octeon_device *oct,
823                             u32 q_no,
824                             u32 enable)
825 {
826         u32 reg_val = 0;
827
828         /* Disable the i/p and o/p queues for this Octeon. */
829         reg_val = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB);
830
831         if (enable)
832                 reg_val = reg_val | (1 << q_no);
833         else
834                 reg_val = reg_val & (~(1 << q_no));
835
836         octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, reg_val);
837 }
838
839 int octeon_init_dispatch_list(struct octeon_device *oct)
840 {
841         u32 i;
842
843         oct->dispatch.count = 0;
844
845         for (i = 0; i < DISPATCH_LIST_SIZE; i++) {
846                 oct->dispatch.dlist[i].opcode = 0;
847                 INIT_LIST_HEAD(&oct->dispatch.dlist[i].list);
848         }
849
850         for (i = 0; i <= REQTYPE_LAST; i++)
851                 octeon_register_reqtype_free_fn(oct, i, NULL);
852
853         spin_lock_init(&oct->dispatch.lock);
854
855         return 0;
856 }
857
858 void octeon_delete_dispatch_list(struct octeon_device *oct)
859 {
860         u32 i;
861         struct list_head freelist, *temp, *tmp2;
862
863         INIT_LIST_HEAD(&freelist);
864
865         spin_lock_bh(&oct->dispatch.lock);
866
867         for (i = 0; i < DISPATCH_LIST_SIZE; i++) {
868                 struct list_head *dispatch;
869
870                 dispatch = &oct->dispatch.dlist[i].list;
871                 while (dispatch->next != dispatch) {
872                         temp = dispatch->next;
873                         list_del(temp);
874                         list_add_tail(temp, &freelist);
875                 }
876
877                 oct->dispatch.dlist[i].opcode = 0;
878         }
879
880         oct->dispatch.count = 0;
881
882         spin_unlock_bh(&oct->dispatch.lock);
883
884         list_for_each_safe(temp, tmp2, &freelist) {
885                 list_del(temp);
886                 vfree(temp);
887         }
888 }
889
890 octeon_dispatch_fn_t
891 octeon_get_dispatch(struct octeon_device *octeon_dev, u16 opcode,
892                     u16 subcode)
893 {
894         u32 idx;
895         struct list_head *dispatch;
896         octeon_dispatch_fn_t fn = NULL;
897         u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
898
899         idx = combined_opcode & OCTEON_OPCODE_MASK;
900
901         spin_lock_bh(&octeon_dev->dispatch.lock);
902
903         if (octeon_dev->dispatch.count == 0) {
904                 spin_unlock_bh(&octeon_dev->dispatch.lock);
905                 return NULL;
906         }
907
908         if (!(octeon_dev->dispatch.dlist[idx].opcode)) {
909                 spin_unlock_bh(&octeon_dev->dispatch.lock);
910                 return NULL;
911         }
912
913         if (octeon_dev->dispatch.dlist[idx].opcode == combined_opcode) {
914                 fn = octeon_dev->dispatch.dlist[idx].dispatch_fn;
915         } else {
916                 list_for_each(dispatch,
917                               &octeon_dev->dispatch.dlist[idx].list) {
918                         if (((struct octeon_dispatch *)dispatch)->opcode ==
919                             combined_opcode) {
920                                 fn = ((struct octeon_dispatch *)
921                                       dispatch)->dispatch_fn;
922                                 break;
923                         }
924                 }
925         }
926
927         spin_unlock_bh(&octeon_dev->dispatch.lock);
928         return fn;
929 }
930
931 /* octeon_register_dispatch_fn
932  * Parameters:
933  *   octeon_id - id of the octeon device.
934  *   opcode    - opcode for which driver should call the registered function
935  *   subcode   - subcode for which driver should call the registered function
936  *   fn        - The function to call when a packet with "opcode" arrives in
937  *                octeon output queues.
938  *   fn_arg    - The argument to be passed when calling function "fn".
939  * Description:
940  *   Registers a function and its argument to be called when a packet
941  *   arrives in Octeon output queues with "opcode".
942  * Returns:
943  *   Success: 0
944  *   Failure: 1
945  * Locks:
946  *   No locks are held.
947  */
948 int
949 octeon_register_dispatch_fn(struct octeon_device *oct,
950                             u16 opcode,
951                             u16 subcode,
952                             octeon_dispatch_fn_t fn, void *fn_arg)
953 {
954         u32 idx;
955         octeon_dispatch_fn_t pfn;
956         u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
957
958         idx = combined_opcode & OCTEON_OPCODE_MASK;
959
960         spin_lock_bh(&oct->dispatch.lock);
961         /* Add dispatch function to first level of lookup table */
962         if (oct->dispatch.dlist[idx].opcode == 0) {
963                 oct->dispatch.dlist[idx].opcode = combined_opcode;
964                 oct->dispatch.dlist[idx].dispatch_fn = fn;
965                 oct->dispatch.dlist[idx].arg = fn_arg;
966                 oct->dispatch.count++;
967                 spin_unlock_bh(&oct->dispatch.lock);
968                 return 0;
969         }
970
971         spin_unlock_bh(&oct->dispatch.lock);
972
973         /* Check if there was a function already registered for this
974          * opcode/subcode.
975          */
976         pfn = octeon_get_dispatch(oct, opcode, subcode);
977         if (!pfn) {
978                 struct octeon_dispatch *dispatch;
979
980                 dev_dbg(&oct->pci_dev->dev,
981                         "Adding opcode to dispatch list linked list\n");
982                 dispatch = (struct octeon_dispatch *)
983                            vmalloc(sizeof(struct octeon_dispatch));
984                 if (!dispatch) {
985                         dev_err(&oct->pci_dev->dev,
986                                 "No memory to add dispatch function\n");
987                         return 1;
988                 }
989                 dispatch->opcode = combined_opcode;
990                 dispatch->dispatch_fn = fn;
991                 dispatch->arg = fn_arg;
992
993                 /* Add dispatch function to linked list of fn ptrs
994                  * at the hashed index.
995                  */
996                 spin_lock_bh(&oct->dispatch.lock);
997                 list_add(&dispatch->list, &oct->dispatch.dlist[idx].list);
998                 oct->dispatch.count++;
999                 spin_unlock_bh(&oct->dispatch.lock);
1000
1001         } else {
1002                 dev_err(&oct->pci_dev->dev,
1003                         "Found previously registered dispatch fn for opcode/subcode: %x/%x\n",
1004                         opcode, subcode);
1005                 return 1;
1006         }
1007
1008         return 0;
1009 }
1010
1011 /* octeon_unregister_dispatch_fn
1012  * Parameters:
1013  *   oct       - octeon device
1014  *   opcode    - driver should unregister the function for this opcode
1015  *   subcode   - driver should unregister the function for this subcode
1016  * Description:
1017  *   Unregister the function set for this opcode+subcode.
1018  * Returns:
1019  *   Success: 0
1020  *   Failure: 1
1021  * Locks:
1022  *   No locks are held.
1023  */
1024 int
1025 octeon_unregister_dispatch_fn(struct octeon_device *oct, u16 opcode,
1026                               u16 subcode)
1027 {
1028         int retval = 0;
1029         u32 idx;
1030         struct list_head *dispatch, *dfree = NULL, *tmp2;
1031         u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
1032
1033         idx = combined_opcode & OCTEON_OPCODE_MASK;
1034
1035         spin_lock_bh(&oct->dispatch.lock);
1036
1037         if (oct->dispatch.count == 0) {
1038                 spin_unlock_bh(&oct->dispatch.lock);
1039                 dev_err(&oct->pci_dev->dev,
1040                         "No dispatch functions registered for this device\n");
1041                 return 1;
1042         }
1043
1044         if (oct->dispatch.dlist[idx].opcode == combined_opcode) {
1045                 dispatch = &oct->dispatch.dlist[idx].list;
1046                 if (dispatch->next != dispatch) {
1047                         dispatch = dispatch->next;
1048                         oct->dispatch.dlist[idx].opcode =
1049                                 ((struct octeon_dispatch *)dispatch)->opcode;
1050                         oct->dispatch.dlist[idx].dispatch_fn =
1051                                 ((struct octeon_dispatch *)
1052                                  dispatch)->dispatch_fn;
1053                         oct->dispatch.dlist[idx].arg =
1054                                 ((struct octeon_dispatch *)dispatch)->arg;
1055                         list_del(dispatch);
1056                         dfree = dispatch;
1057                 } else {
1058                         oct->dispatch.dlist[idx].opcode = 0;
1059                         oct->dispatch.dlist[idx].dispatch_fn = NULL;
1060                         oct->dispatch.dlist[idx].arg = NULL;
1061                 }
1062         } else {
1063                 retval = 1;
1064                 list_for_each_safe(dispatch, tmp2,
1065                                    &(oct->dispatch.dlist[idx].
1066                                      list)) {
1067                         if (((struct octeon_dispatch *)dispatch)->opcode ==
1068                             combined_opcode) {
1069                                 list_del(dispatch);
1070                                 dfree = dispatch;
1071                                 retval = 0;
1072                         }
1073                 }
1074         }
1075
1076         if (!retval)
1077                 oct->dispatch.count--;
1078
1079         spin_unlock_bh(&oct->dispatch.lock);
1080
1081         if (dfree)
1082                 vfree(dfree);
1083
1084         return retval;
1085 }
1086
1087 int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf)
1088 {
1089         u32 i;
1090         char app_name[16];
1091         struct octeon_device *oct = (struct octeon_device *)buf;
1092         struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
1093         struct octeon_core_setup *cs = NULL;
1094         u32 num_nic_ports = 0;
1095
1096         if (OCTEON_CN6XXX(oct))
1097                 num_nic_ports =
1098                         CFG_GET_NUM_NIC_PORTS(CHIP_FIELD(oct, cn6xxx, conf));
1099
1100         if (atomic_read(&oct->status) >= OCT_DEV_RUNNING) {
1101                 dev_err(&oct->pci_dev->dev, "Received CORE OK when device state is 0x%x\n",
1102                         atomic_read(&oct->status));
1103                 goto core_drv_init_err;
1104         }
1105
1106         strncpy(app_name,
1107                 get_oct_app_string(
1108                 (u32)recv_pkt->rh.r_core_drv_init.app_mode),
1109                 sizeof(app_name) - 1);
1110         oct->app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode;
1111         if (recv_pkt->rh.r_core_drv_init.app_mode == CVM_DRV_NIC_APP)
1112                 oct->fw_info.max_nic_ports =
1113                         (u32)recv_pkt->rh.r_core_drv_init.max_nic_ports;
1114                 oct->fw_info.num_gmx_ports =
1115                         (u32)recv_pkt->rh.r_core_drv_init.num_gmx_ports;
1116
1117         if (oct->fw_info.max_nic_ports < num_nic_ports) {
1118                 dev_err(&oct->pci_dev->dev,
1119                         "Config has more ports than firmware allows (%d > %d).\n",
1120                         num_nic_ports, oct->fw_info.max_nic_ports);
1121                 goto core_drv_init_err;
1122         }
1123         oct->fw_info.app_cap_flags = recv_pkt->rh.r_core_drv_init.app_cap_flags;
1124         oct->fw_info.app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode;
1125
1126         atomic_set(&oct->status, OCT_DEV_CORE_OK);
1127
1128         cs = &core_setup[oct->octeon_id];
1129
1130         if (recv_pkt->buffer_size[0] != sizeof(*cs)) {
1131                 dev_dbg(&oct->pci_dev->dev, "Core setup bytes expected %u found %d\n",
1132                         (u32)sizeof(*cs),
1133                         recv_pkt->buffer_size[0]);
1134         }
1135
1136         memcpy(cs, get_rbd(recv_pkt->buffer_ptr[0]), sizeof(*cs));
1137         strncpy(oct->boardinfo.name, cs->boardname, OCT_BOARD_NAME);
1138         strncpy(oct->boardinfo.serial_number, cs->board_serial_number,
1139                 OCT_SERIAL_LEN);
1140
1141         octeon_swap_8B_data((u64 *)cs, (sizeof(*cs) >> 3));
1142
1143         oct->boardinfo.major = cs->board_rev_major;
1144         oct->boardinfo.minor = cs->board_rev_minor;
1145
1146         dev_info(&oct->pci_dev->dev,
1147                  "Running %s (%llu Hz)\n",
1148                  app_name, CVM_CAST64(cs->corefreq));
1149
1150 core_drv_init_err:
1151         for (i = 0; i < recv_pkt->buffer_count; i++)
1152                 recv_buffer_free(recv_pkt->buffer_ptr[i]);
1153         octeon_free_recv_info(recv_info);
1154         return 0;
1155 }
1156
1157 int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no)
1158
1159 {
1160         if (oct && (q_no < MAX_OCTEON_INSTR_QUEUES) &&
1161             (oct->io_qmask.iq & (1UL << q_no)))
1162                 return oct->instr_queue[q_no]->max_count;
1163
1164         return -1;
1165 }
1166
1167 int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no)
1168 {
1169         if (oct && (q_no < MAX_OCTEON_OUTPUT_QUEUES) &&
1170             (oct->io_qmask.oq & (1UL << q_no)))
1171                 return oct->droq[q_no]->max_count;
1172         return -1;
1173 }
1174
1175 /* Retruns the host firmware handshake OCTEON specific configuration */
1176 struct octeon_config *octeon_get_conf(struct octeon_device *oct)
1177 {
1178         struct octeon_config *default_oct_conf = NULL;
1179
1180         /* check the OCTEON Device model & return the corresponding octeon
1181          * configuration
1182          */
1183
1184         if (OCTEON_CN6XXX(oct)) {
1185                 default_oct_conf =
1186                         (struct octeon_config *)(CHIP_FIELD(oct, cn6xxx, conf));
1187         }
1188
1189         return default_oct_conf;
1190 }
1191
1192 /* scratch register address is same in all the OCT-II and CN70XX models */
1193 #define CNXX_SLI_SCRATCH1   0x3C0
1194
1195 /** Get the octeon device pointer.
1196  *  @param octeon_id  - The id for which the octeon device pointer is required.
1197  *  @return Success: Octeon device pointer.
1198  *  @return Failure: NULL.
1199  */
1200 struct octeon_device *lio_get_device(u32 octeon_id)
1201 {
1202         if (octeon_id >= MAX_OCTEON_DEVICES)
1203                 return NULL;
1204         else
1205                 return octeon_device[octeon_id];
1206 }
1207
1208 u64 lio_pci_readq(struct octeon_device *oct, u64 addr)
1209 {
1210         u64 val64;
1211         unsigned long flags;
1212         u32 val32, addrhi;
1213
1214         spin_lock_irqsave(&oct->pci_win_lock, flags);
1215
1216         /* The windowed read happens when the LSB of the addr is written.
1217          * So write MSB first
1218          */
1219         addrhi = (addr >> 32);
1220         if ((oct->chip_id == OCTEON_CN66XX) || (oct->chip_id == OCTEON_CN68XX))
1221                 addrhi |= 0x00060000;
1222         writel(addrhi, oct->reg_list.pci_win_rd_addr_hi);
1223
1224         /* Read back to preserve ordering of writes */
1225         val32 = readl(oct->reg_list.pci_win_rd_addr_hi);
1226
1227         writel(addr & 0xffffffff, oct->reg_list.pci_win_rd_addr_lo);
1228         val32 = readl(oct->reg_list.pci_win_rd_addr_lo);
1229
1230         val64 = readq(oct->reg_list.pci_win_rd_data);
1231
1232         spin_unlock_irqrestore(&oct->pci_win_lock, flags);
1233
1234         return val64;
1235 }
1236
1237 void lio_pci_writeq(struct octeon_device *oct,
1238                     u64 val,
1239                     u64 addr)
1240 {
1241         u32 val32;
1242         unsigned long flags;
1243
1244         spin_lock_irqsave(&oct->pci_win_lock, flags);
1245
1246         writeq(addr, oct->reg_list.pci_win_wr_addr);
1247
1248         /* The write happens when the LSB is written. So write MSB first. */
1249         writel(val >> 32, oct->reg_list.pci_win_wr_data_hi);
1250         /* Read the MSB to ensure ordering of writes. */
1251         val32 = readl(oct->reg_list.pci_win_wr_data_hi);
1252
1253         writel(val & 0xffffffff, oct->reg_list.pci_win_wr_data_lo);
1254
1255         spin_unlock_irqrestore(&oct->pci_win_lock, flags);
1256 }
1257
1258 int octeon_mem_access_ok(struct octeon_device *oct)
1259 {
1260         u64 access_okay = 0;
1261
1262         /* Check to make sure a DDR interface is enabled */
1263         u64 lmc0_reset_ctl = lio_pci_readq(oct, CN6XXX_LMC0_RESET_CTL);
1264
1265         access_okay = (lmc0_reset_ctl & CN6XXX_LMC0_RESET_CTL_DDR3RST_MASK);
1266
1267         return access_okay ? 0 : 1;
1268 }
1269
1270 int octeon_wait_for_ddr_init(struct octeon_device *oct, u32 *timeout)
1271 {
1272         int ret = 1;
1273         u32 ms;
1274
1275         if (!timeout)
1276                 return ret;
1277
1278         while (*timeout == 0)
1279                 schedule_timeout_uninterruptible(HZ / 10);
1280
1281         for (ms = 0; (ret != 0) && ((*timeout == 0) || (ms <= *timeout));
1282              ms += HZ / 10) {
1283                 ret = octeon_mem_access_ok(oct);
1284
1285                 /* wait 100 ms */
1286                 if (ret)
1287                         schedule_timeout_uninterruptible(HZ / 10);
1288         }
1289
1290         return ret;
1291 }
1292
1293 /** Get the octeon id assigned to the octeon device passed as argument.
1294  *  This function is exported to other modules.
1295  *  @param dev - octeon device pointer passed as a void *.
1296  *  @return octeon device id
1297  */
1298 int lio_get_device_id(void *dev)
1299 {
1300         struct octeon_device *octeon_dev = (struct octeon_device *)dev;
1301         u32 i;
1302
1303         for (i = 0; i < MAX_OCTEON_DEVICES; i++)
1304                 if (octeon_device[i] == octeon_dev)
1305                         return octeon_dev->octeon_id;
1306         return -1;
1307 }