Merge tag 'mmc-v4.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc
[cascardo/linux.git] / drivers / crypto / qat / qat_common / qat_uclo.c
1 /*
2   This file is provided under a dual BSD/GPLv2 license.  When using or
3   redistributing this file, you may do so under either license.
4
5   GPL LICENSE SUMMARY
6   Copyright(c) 2014 Intel Corporation.
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of version 2 of the GNU General Public License as
9   published by the Free Software Foundation.
10
11   This program is distributed in the hope that it will be useful, but
12   WITHOUT ANY WARRANTY; without even the implied warranty of
13   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14   General Public License for more details.
15
16   Contact Information:
17   qat-linux@intel.com
18
19   BSD LICENSE
20   Copyright(c) 2014 Intel Corporation.
21   Redistribution and use in source and binary forms, with or without
22   modification, are permitted provided that the following conditions
23   are met:
24
25     * Redistributions of source code must retain the above copyright
26       notice, this list of conditions and the following disclaimer.
27     * Redistributions in binary form must reproduce the above copyright
28       notice, this list of conditions and the following disclaimer in
29       the documentation and/or other materials provided with the
30       distribution.
31     * Neither the name of Intel Corporation nor the names of its
32       contributors may be used to endorse or promote products derived
33       from this software without specific prior written permission.
34
35   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 */
47 #include <linux/slab.h>
48 #include <linux/ctype.h>
49 #include <linux/kernel.h>
50 #include <linux/delay.h>
51 #include "adf_accel_devices.h"
52 #include "adf_common_drv.h"
53 #include "icp_qat_uclo.h"
54 #include "icp_qat_hal.h"
55 #include "icp_qat_fw_loader_handle.h"
56
57 #define UWORD_CPYBUF_SIZE 1024
58 #define INVLD_UWORD 0xffffffffffull
59 #define PID_MINOR_REV 0xf
60 #define PID_MAJOR_REV (0xf << 4)
61
62 static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle,
63                                  unsigned int ae, unsigned int image_num)
64 {
65         struct icp_qat_uclo_aedata *ae_data;
66         struct icp_qat_uclo_encapme *encap_image;
67         struct icp_qat_uclo_page *page = NULL;
68         struct icp_qat_uclo_aeslice *ae_slice = NULL;
69
70         ae_data = &obj_handle->ae_data[ae];
71         encap_image = &obj_handle->ae_uimage[image_num];
72         ae_slice = &ae_data->ae_slices[ae_data->slice_num];
73         ae_slice->encap_image = encap_image;
74
75         if (encap_image->img_ptr) {
76                 ae_slice->ctx_mask_assigned =
77                                         encap_image->img_ptr->ctx_assigned;
78                 ae_data->eff_ustore_size = obj_handle->ustore_phy_size;
79         } else {
80                 ae_slice->ctx_mask_assigned = 0;
81         }
82         ae_slice->region = kzalloc(sizeof(*ae_slice->region), GFP_KERNEL);
83         if (!ae_slice->region)
84                 return -ENOMEM;
85         ae_slice->page = kzalloc(sizeof(*ae_slice->page), GFP_KERNEL);
86         if (!ae_slice->page)
87                 goto out_err;
88         page = ae_slice->page;
89         page->encap_page = encap_image->page;
90         ae_slice->page->region = ae_slice->region;
91         ae_data->slice_num++;
92         return 0;
93 out_err:
94         kfree(ae_slice->region);
95         ae_slice->region = NULL;
96         return -ENOMEM;
97 }
98
99 static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data)
100 {
101         unsigned int i;
102
103         if (!ae_data) {
104                 pr_err("QAT: bad argument, ae_data is NULL\n ");
105                 return -EINVAL;
106         }
107
108         for (i = 0; i < ae_data->slice_num; i++) {
109                 kfree(ae_data->ae_slices[i].region);
110                 ae_data->ae_slices[i].region = NULL;
111                 kfree(ae_data->ae_slices[i].page);
112                 ae_data->ae_slices[i].page = NULL;
113         }
114         return 0;
115 }
116
117 static char *qat_uclo_get_string(struct icp_qat_uof_strtable *str_table,
118                                  unsigned int str_offset)
119 {
120         if ((!str_table->table_len) || (str_offset > str_table->table_len))
121                 return NULL;
122         return (char *)(((uintptr_t)(str_table->strings)) + str_offset);
123 }
124
125 static int qat_uclo_check_uof_format(struct icp_qat_uof_filehdr *hdr)
126 {
127         int maj = hdr->maj_ver & 0xff;
128         int min = hdr->min_ver & 0xff;
129
130         if (hdr->file_id != ICP_QAT_UOF_FID) {
131                 pr_err("QAT: Invalid header 0x%x\n", hdr->file_id);
132                 return -EINVAL;
133         }
134         if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) {
135                 pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n",
136                        maj, min);
137                 return -EINVAL;
138         }
139         return 0;
140 }
141
142 static int qat_uclo_check_suof_format(struct icp_qat_suof_filehdr *suof_hdr)
143 {
144         int maj = suof_hdr->maj_ver & 0xff;
145         int min = suof_hdr->min_ver & 0xff;
146
147         if (suof_hdr->file_id != ICP_QAT_SUOF_FID) {
148                 pr_err("QAT: invalid header 0x%x\n", suof_hdr->file_id);
149                 return -EINVAL;
150         }
151         if (suof_hdr->fw_type != 0) {
152                 pr_err("QAT: unsupported firmware type\n");
153                 return -EINVAL;
154         }
155         if (suof_hdr->num_chunks <= 0x1) {
156                 pr_err("QAT: SUOF chunk amount is incorrect\n");
157                 return -EINVAL;
158         }
159         if (maj != ICP_QAT_SUOF_MAJVER || min != ICP_QAT_SUOF_MINVER) {
160                 pr_err("QAT: bad SUOF version, major 0x%x, minor 0x%x\n",
161                        maj, min);
162                 return -EINVAL;
163         }
164         return 0;
165 }
166
167 static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle,
168                                       unsigned int addr, unsigned int *val,
169                                       unsigned int num_in_bytes)
170 {
171         unsigned int outval;
172         unsigned char *ptr = (unsigned char *)val;
173
174         while (num_in_bytes) {
175                 memcpy(&outval, ptr, 4);
176                 SRAM_WRITE(handle, addr, outval);
177                 num_in_bytes -= 4;
178                 ptr += 4;
179                 addr += 4;
180         }
181 }
182
183 static void qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle,
184                                       unsigned char ae, unsigned int addr,
185                                       unsigned int *val,
186                                       unsigned int num_in_bytes)
187 {
188         unsigned int outval;
189         unsigned char *ptr = (unsigned char *)val;
190
191         addr >>= 0x2; /* convert to uword address */
192
193         while (num_in_bytes) {
194                 memcpy(&outval, ptr, 4);
195                 qat_hal_wr_umem(handle, ae, addr++, 1, &outval);
196                 num_in_bytes -= 4;
197                 ptr += 4;
198         }
199 }
200
201 static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle,
202                                    unsigned char ae,
203                                    struct icp_qat_uof_batch_init
204                                    *umem_init_header)
205 {
206         struct icp_qat_uof_batch_init *umem_init;
207
208         if (!umem_init_header)
209                 return;
210         umem_init = umem_init_header->next;
211         while (umem_init) {
212                 unsigned int addr, *value, size;
213
214                 ae = umem_init->ae;
215                 addr = umem_init->addr;
216                 value = umem_init->value;
217                 size = umem_init->size;
218                 qat_uclo_wr_umem_by_words(handle, ae, addr, value, size);
219                 umem_init = umem_init->next;
220         }
221 }
222
223 static void
224 qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle,
225                                  struct icp_qat_uof_batch_init **base)
226 {
227         struct icp_qat_uof_batch_init *umem_init;
228
229         umem_init = *base;
230         while (umem_init) {
231                 struct icp_qat_uof_batch_init *pre;
232
233                 pre = umem_init;
234                 umem_init = umem_init->next;
235                 kfree(pre);
236         }
237         *base = NULL;
238 }
239
240 static int qat_uclo_parse_num(char *str, unsigned int *num)
241 {
242         char buf[16] = {0};
243         unsigned long ae = 0;
244         int i;
245
246         strncpy(buf, str, 15);
247         for (i = 0; i < 16; i++) {
248                 if (!isdigit(buf[i])) {
249                         buf[i] = '\0';
250                         break;
251                 }
252         }
253         if ((kstrtoul(buf, 10, &ae)))
254                 return -EFAULT;
255
256         *num = (unsigned int)ae;
257         return 0;
258 }
259
260 static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle,
261                                      struct icp_qat_uof_initmem *init_mem,
262                                      unsigned int size_range, unsigned int *ae)
263 {
264         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
265         char *str;
266
267         if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) {
268                 pr_err("QAT: initmem is out of range");
269                 return -EINVAL;
270         }
271         if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) {
272                 pr_err("QAT: Memory scope for init_mem error\n");
273                 return -EINVAL;
274         }
275         str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name);
276         if (!str) {
277                 pr_err("QAT: AE name assigned in UOF init table is NULL\n");
278                 return -EINVAL;
279         }
280         if (qat_uclo_parse_num(str, ae)) {
281                 pr_err("QAT: Parse num for AE number failed\n");
282                 return -EINVAL;
283         }
284         if (*ae >= ICP_QAT_UCLO_MAX_AE) {
285                 pr_err("QAT: ae %d out of range\n", *ae);
286                 return -EINVAL;
287         }
288         return 0;
289 }
290
291 static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle
292                                            *handle, struct icp_qat_uof_initmem
293                                            *init_mem, unsigned int ae,
294                                            struct icp_qat_uof_batch_init
295                                            **init_tab_base)
296 {
297         struct icp_qat_uof_batch_init *init_header, *tail;
298         struct icp_qat_uof_batch_init *mem_init, *tail_old;
299         struct icp_qat_uof_memvar_attr *mem_val_attr;
300         unsigned int i, flag = 0;
301
302         mem_val_attr =
303                 (struct icp_qat_uof_memvar_attr *)((uintptr_t)init_mem +
304                 sizeof(struct icp_qat_uof_initmem));
305
306         init_header = *init_tab_base;
307         if (!init_header) {
308                 init_header = kzalloc(sizeof(*init_header), GFP_KERNEL);
309                 if (!init_header)
310                         return -ENOMEM;
311                 init_header->size = 1;
312                 *init_tab_base = init_header;
313                 flag = 1;
314         }
315         tail_old = init_header;
316         while (tail_old->next)
317                 tail_old = tail_old->next;
318         tail = tail_old;
319         for (i = 0; i < init_mem->val_attr_num; i++) {
320                 mem_init = kzalloc(sizeof(*mem_init), GFP_KERNEL);
321                 if (!mem_init)
322                         goto out_err;
323                 mem_init->ae = ae;
324                 mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte;
325                 mem_init->value = &mem_val_attr->value;
326                 mem_init->size = 4;
327                 mem_init->next = NULL;
328                 tail->next = mem_init;
329                 tail = mem_init;
330                 init_header->size += qat_hal_get_ins_num();
331                 mem_val_attr++;
332         }
333         return 0;
334 out_err:
335         while (tail_old) {
336                 mem_init = tail_old->next;
337                 kfree(tail_old);
338                 tail_old = mem_init;
339         }
340         if (flag)
341                 kfree(*init_tab_base);
342         return -ENOMEM;
343 }
344
345 static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle,
346                                   struct icp_qat_uof_initmem *init_mem)
347 {
348         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
349         unsigned int ae;
350
351         if (qat_uclo_fetch_initmem_ae(handle, init_mem,
352                                       ICP_QAT_UCLO_MAX_LMEM_REG, &ae))
353                 return -EINVAL;
354         if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
355                                             &obj_handle->lm_init_tab[ae]))
356                 return -EINVAL;
357         return 0;
358 }
359
360 static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
361                                   struct icp_qat_uof_initmem *init_mem)
362 {
363         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
364         unsigned int ae, ustore_size, uaddr, i;
365
366         ustore_size = obj_handle->ustore_phy_size;
367         if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae))
368                 return -EINVAL;
369         if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
370                                             &obj_handle->umem_init_tab[ae]))
371                 return -EINVAL;
372         /* set the highest ustore address referenced */
373         uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2;
374         for (i = 0; i < obj_handle->ae_data[ae].slice_num; i++) {
375                 if (obj_handle->ae_data[ae].ae_slices[i].
376                     encap_image->uwords_num < uaddr)
377                         obj_handle->ae_data[ae].ae_slices[i].
378                         encap_image->uwords_num = uaddr;
379         }
380         return 0;
381 }
382
383 #define ICP_DH895XCC_PESRAM_BAR_SIZE 0x80000
384 static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
385                                    struct icp_qat_uof_initmem *init_mem)
386 {
387         switch (init_mem->region) {
388         case ICP_QAT_UOF_LMEM_REGION:
389                 if (qat_uclo_init_lmem_seg(handle, init_mem))
390                         return -EINVAL;
391                 break;
392         case ICP_QAT_UOF_UMEM_REGION:
393                 if (qat_uclo_init_umem_seg(handle, init_mem))
394                         return -EINVAL;
395                 break;
396         default:
397                 pr_err("QAT: initmem region error. region type=0x%x\n",
398                        init_mem->region);
399                 return -EINVAL;
400         }
401         return 0;
402 }
403
404 static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
405                                 struct icp_qat_uclo_encapme *image)
406 {
407         unsigned int i;
408         struct icp_qat_uclo_encap_page *page;
409         struct icp_qat_uof_image *uof_image;
410         unsigned char ae;
411         unsigned int ustore_size;
412         unsigned int patt_pos;
413         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
414         uint64_t *fill_data;
415
416         uof_image = image->img_ptr;
417         fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(uint64_t),
418                             GFP_KERNEL);
419         if (!fill_data)
420                 return -ENOMEM;
421         for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++)
422                 memcpy(&fill_data[i], &uof_image->fill_pattern,
423                        sizeof(uint64_t));
424         page = image->page;
425
426         for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
427                 if (!test_bit(ae, (unsigned long *)&uof_image->ae_assigned))
428                         continue;
429                 ustore_size = obj_handle->ae_data[ae].eff_ustore_size;
430                 patt_pos = page->beg_addr_p + page->micro_words_num;
431
432                 qat_hal_wr_uwords(handle, (unsigned char)ae, 0,
433                                   page->beg_addr_p, &fill_data[0]);
434                 qat_hal_wr_uwords(handle, (unsigned char)ae, patt_pos,
435                                   ustore_size - patt_pos + 1,
436                                   &fill_data[page->beg_addr_p]);
437         }
438         kfree(fill_data);
439         return 0;
440 }
441
442 static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle)
443 {
444         int i, ae;
445         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
446         struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem;
447
448         for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) {
449                 if (initmem->num_in_bytes) {
450                         if (qat_uclo_init_ae_memory(handle, initmem))
451                                 return -EINVAL;
452                 }
453                 initmem = (struct icp_qat_uof_initmem *)((uintptr_t)(
454                         (uintptr_t)initmem +
455                         sizeof(struct icp_qat_uof_initmem)) +
456                         (sizeof(struct icp_qat_uof_memvar_attr) *
457                         initmem->val_attr_num));
458         }
459         for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
460                 if (qat_hal_batch_wr_lm(handle, ae,
461                                         obj_handle->lm_init_tab[ae])) {
462                         pr_err("QAT: fail to batch init lmem for AE %d\n", ae);
463                         return -EINVAL;
464                 }
465                 qat_uclo_cleanup_batch_init_list(handle,
466                                                  &obj_handle->lm_init_tab[ae]);
467                 qat_uclo_batch_wr_umem(handle, ae,
468                                        obj_handle->umem_init_tab[ae]);
469                 qat_uclo_cleanup_batch_init_list(handle,
470                                                  &obj_handle->
471                                                  umem_init_tab[ae]);
472         }
473         return 0;
474 }
475
476 static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr,
477                                  char *chunk_id, void *cur)
478 {
479         int i;
480         struct icp_qat_uof_chunkhdr *chunk_hdr =
481             (struct icp_qat_uof_chunkhdr *)
482             ((uintptr_t)obj_hdr + sizeof(struct icp_qat_uof_objhdr));
483
484         for (i = 0; i < obj_hdr->num_chunks; i++) {
485                 if ((cur < (void *)&chunk_hdr[i]) &&
486                     !strncmp(chunk_hdr[i].chunk_id, chunk_id,
487                              ICP_QAT_UOF_OBJID_LEN)) {
488                         return &chunk_hdr[i];
489                 }
490         }
491         return NULL;
492 }
493
494 static unsigned int qat_uclo_calc_checksum(unsigned int reg, int ch)
495 {
496         int i;
497         unsigned int topbit = 1 << 0xF;
498         unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch);
499
500         reg ^= inbyte << 0x8;
501         for (i = 0; i < 0x8; i++) {
502                 if (reg & topbit)
503                         reg = (reg << 1) ^ 0x1021;
504                 else
505                         reg <<= 1;
506         }
507         return reg & 0xFFFF;
508 }
509
510 static unsigned int qat_uclo_calc_str_checksum(char *ptr, int num)
511 {
512         unsigned int chksum = 0;
513
514         if (ptr)
515                 while (num--)
516                         chksum = qat_uclo_calc_checksum(chksum, *ptr++);
517         return chksum;
518 }
519
520 static struct icp_qat_uclo_objhdr *
521 qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr,
522                    char *chunk_id)
523 {
524         struct icp_qat_uof_filechunkhdr *file_chunk;
525         struct icp_qat_uclo_objhdr *obj_hdr;
526         char *chunk;
527         int i;
528
529         file_chunk = (struct icp_qat_uof_filechunkhdr *)
530                 (buf + sizeof(struct icp_qat_uof_filehdr));
531         for (i = 0; i < file_hdr->num_chunks; i++) {
532                 if (!strncmp(file_chunk->chunk_id, chunk_id,
533                              ICP_QAT_UOF_OBJID_LEN)) {
534                         chunk = buf + file_chunk->offset;
535                         if (file_chunk->checksum != qat_uclo_calc_str_checksum(
536                                 chunk, file_chunk->size))
537                                 break;
538                         obj_hdr = kzalloc(sizeof(*obj_hdr), GFP_KERNEL);
539                         if (!obj_hdr)
540                                 break;
541                         obj_hdr->file_buff = chunk;
542                         obj_hdr->checksum = file_chunk->checksum;
543                         obj_hdr->size = file_chunk->size;
544                         return obj_hdr;
545                 }
546                 file_chunk++;
547         }
548         return NULL;
549 }
550
551 static unsigned int
552 qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
553                             struct icp_qat_uof_image *image)
554 {
555         struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab;
556         struct icp_qat_uof_objtable *neigh_reg_tab;
557         struct icp_qat_uof_code_page *code_page;
558
559         code_page = (struct icp_qat_uof_code_page *)
560                         ((char *)image + sizeof(struct icp_qat_uof_image));
561         uc_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
562                      code_page->uc_var_tab_offset);
563         imp_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
564                       code_page->imp_var_tab_offset);
565         imp_expr_tab = (struct icp_qat_uof_objtable *)
566                        (encap_uof_obj->beg_uof +
567                        code_page->imp_expr_tab_offset);
568         if (uc_var_tab->entry_num || imp_var_tab->entry_num ||
569             imp_expr_tab->entry_num) {
570                 pr_err("QAT: UOF can't contain imported variable to be parsed");
571                 return -EINVAL;
572         }
573         neigh_reg_tab = (struct icp_qat_uof_objtable *)
574                         (encap_uof_obj->beg_uof +
575                         code_page->neigh_reg_tab_offset);
576         if (neigh_reg_tab->entry_num) {
577                 pr_err("QAT: UOF can't contain shared control store feature");
578                 return -EINVAL;
579         }
580         if (image->numpages > 1) {
581                 pr_err("QAT: UOF can't contain multiple pages");
582                 return -EINVAL;
583         }
584         if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) {
585                 pr_err("QAT: UOF can't use shared control store feature");
586                 return -EFAULT;
587         }
588         if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) {
589                 pr_err("QAT: UOF can't use reloadable feature");
590                 return -EFAULT;
591         }
592         return 0;
593 }
594
595 static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj
596                                      *encap_uof_obj,
597                                      struct icp_qat_uof_image *img,
598                                      struct icp_qat_uclo_encap_page *page)
599 {
600         struct icp_qat_uof_code_page *code_page;
601         struct icp_qat_uof_code_area *code_area;
602         struct icp_qat_uof_objtable *uword_block_tab;
603         struct icp_qat_uof_uword_block *uwblock;
604         int i;
605
606         code_page = (struct icp_qat_uof_code_page *)
607                         ((char *)img + sizeof(struct icp_qat_uof_image));
608         page->def_page = code_page->def_page;
609         page->page_region = code_page->page_region;
610         page->beg_addr_v = code_page->beg_addr_v;
611         page->beg_addr_p = code_page->beg_addr_p;
612         code_area = (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof +
613                                                 code_page->code_area_offset);
614         page->micro_words_num = code_area->micro_words_num;
615         uword_block_tab = (struct icp_qat_uof_objtable *)
616                           (encap_uof_obj->beg_uof +
617                           code_area->uword_block_tab);
618         page->uwblock_num = uword_block_tab->entry_num;
619         uwblock = (struct icp_qat_uof_uword_block *)((char *)uword_block_tab +
620                         sizeof(struct icp_qat_uof_objtable));
621         page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock;
622         for (i = 0; i < uword_block_tab->entry_num; i++)
623                 page->uwblock[i].micro_words =
624                 (uintptr_t)encap_uof_obj->beg_uof + uwblock[i].uword_offset;
625 }
626
627 static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle,
628                                struct icp_qat_uclo_encapme *ae_uimage,
629                                int max_image)
630 {
631         int i, j;
632         struct icp_qat_uof_chunkhdr *chunk_hdr = NULL;
633         struct icp_qat_uof_image *image;
634         struct icp_qat_uof_objtable *ae_regtab;
635         struct icp_qat_uof_objtable *init_reg_sym_tab;
636         struct icp_qat_uof_objtable *sbreak_tab;
637         struct icp_qat_uof_encap_obj *encap_uof_obj =
638                                         &obj_handle->encap_uof_obj;
639
640         for (j = 0; j < max_image; j++) {
641                 chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
642                                                 ICP_QAT_UOF_IMAG, chunk_hdr);
643                 if (!chunk_hdr)
644                         break;
645                 image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof +
646                                                      chunk_hdr->offset);
647                 ae_regtab = (struct icp_qat_uof_objtable *)
648                            (image->reg_tab_offset +
649                            obj_handle->obj_hdr->file_buff);
650                 ae_uimage[j].ae_reg_num = ae_regtab->entry_num;
651                 ae_uimage[j].ae_reg = (struct icp_qat_uof_ae_reg *)
652                         (((char *)ae_regtab) +
653                         sizeof(struct icp_qat_uof_objtable));
654                 init_reg_sym_tab = (struct icp_qat_uof_objtable *)
655                                    (image->init_reg_sym_tab +
656                                    obj_handle->obj_hdr->file_buff);
657                 ae_uimage[j].init_regsym_num = init_reg_sym_tab->entry_num;
658                 ae_uimage[j].init_regsym = (struct icp_qat_uof_init_regsym *)
659                         (((char *)init_reg_sym_tab) +
660                         sizeof(struct icp_qat_uof_objtable));
661                 sbreak_tab = (struct icp_qat_uof_objtable *)
662                         (image->sbreak_tab + obj_handle->obj_hdr->file_buff);
663                 ae_uimage[j].sbreak_num = sbreak_tab->entry_num;
664                 ae_uimage[j].sbreak = (struct icp_qat_uof_sbreak *)
665                                       (((char *)sbreak_tab) +
666                                       sizeof(struct icp_qat_uof_objtable));
667                 ae_uimage[j].img_ptr = image;
668                 if (qat_uclo_check_image_compat(encap_uof_obj, image))
669                         goto out_err;
670                 ae_uimage[j].page =
671                         kzalloc(sizeof(struct icp_qat_uclo_encap_page),
672                                 GFP_KERNEL);
673                 if (!ae_uimage[j].page)
674                         goto out_err;
675                 qat_uclo_map_image_page(encap_uof_obj, image,
676                                         ae_uimage[j].page);
677         }
678         return j;
679 out_err:
680         for (i = 0; i < j; i++)
681                 kfree(ae_uimage[i].page);
682         return 0;
683 }
684
685 static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
686 {
687         int i, ae;
688         int mflag = 0;
689         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
690
691         for (ae = 0; ae < max_ae; ae++) {
692                 if (!test_bit(ae,
693                               (unsigned long *)&handle->hal_handle->ae_mask))
694                         continue;
695                 for (i = 0; i < obj_handle->uimage_num; i++) {
696                         if (!test_bit(ae, (unsigned long *)
697                         &obj_handle->ae_uimage[i].img_ptr->ae_assigned))
698                                 continue;
699                         mflag = 1;
700                         if (qat_uclo_init_ae_data(obj_handle, ae, i))
701                                 return -EINVAL;
702                 }
703         }
704         if (!mflag) {
705                 pr_err("QAT: uimage uses AE not set");
706                 return -EINVAL;
707         }
708         return 0;
709 }
710
711 static struct icp_qat_uof_strtable *
712 qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr,
713                        char *tab_name, struct icp_qat_uof_strtable *str_table)
714 {
715         struct icp_qat_uof_chunkhdr *chunk_hdr;
716
717         chunk_hdr = qat_uclo_find_chunk((struct icp_qat_uof_objhdr *)
718                                         obj_hdr->file_buff, tab_name, NULL);
719         if (chunk_hdr) {
720                 int hdr_size;
721
722                 memcpy(&str_table->table_len, obj_hdr->file_buff +
723                        chunk_hdr->offset, sizeof(str_table->table_len));
724                 hdr_size = (char *)&str_table->strings - (char *)str_table;
725                 str_table->strings = (uintptr_t)obj_hdr->file_buff +
726                                         chunk_hdr->offset + hdr_size;
727                 return str_table;
728         }
729         return NULL;
730 }
731
732 static void
733 qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj,
734                            struct icp_qat_uclo_init_mem_table *init_mem_tab)
735 {
736         struct icp_qat_uof_chunkhdr *chunk_hdr;
737
738         chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
739                                         ICP_QAT_UOF_IMEM, NULL);
740         if (chunk_hdr) {
741                 memmove(&init_mem_tab->entry_num, encap_uof_obj->beg_uof +
742                         chunk_hdr->offset, sizeof(unsigned int));
743                 init_mem_tab->init_mem = (struct icp_qat_uof_initmem *)
744                 (encap_uof_obj->beg_uof + chunk_hdr->offset +
745                 sizeof(unsigned int));
746         }
747 }
748
749 static unsigned int
750 qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle)
751 {
752         switch (handle->pci_dev->device) {
753         case ADF_DH895XCC_PCI_DEVICE_ID:
754                 return ICP_QAT_AC_895XCC_DEV_TYPE;
755         case ADF_C62X_PCI_DEVICE_ID:
756                 return ICP_QAT_AC_C62X_DEV_TYPE;
757         case ADF_C3XXX_PCI_DEVICE_ID:
758                 return ICP_QAT_AC_C3XXX_DEV_TYPE;
759         default:
760                 pr_err("QAT: unsupported device 0x%x\n",
761                        handle->pci_dev->device);
762                 return 0;
763         }
764 }
765
766 static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
767 {
768         unsigned int maj_ver, prod_type = obj_handle->prod_type;
769
770         if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->ac_dev_type)) {
771                 pr_err("QAT: UOF type 0x%x doesn't match with platform 0x%x\n",
772                        obj_handle->encap_uof_obj.obj_hdr->ac_dev_type,
773                        prod_type);
774                 return -EINVAL;
775         }
776         maj_ver = obj_handle->prod_rev & 0xff;
777         if ((obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver) ||
778             (obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver)) {
779                 pr_err("QAT: UOF majVer 0x%x out of range\n", maj_ver);
780                 return -EINVAL;
781         }
782         return 0;
783 }
784
785 static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
786                              unsigned char ae, unsigned char ctx_mask,
787                              enum icp_qat_uof_regtype reg_type,
788                              unsigned short reg_addr, unsigned int value)
789 {
790         switch (reg_type) {
791         case ICP_GPA_ABS:
792         case ICP_GPB_ABS:
793                 ctx_mask = 0;
794         case ICP_GPA_REL:
795         case ICP_GPB_REL:
796                 return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type,
797                                         reg_addr, value);
798         case ICP_SR_ABS:
799         case ICP_DR_ABS:
800         case ICP_SR_RD_ABS:
801         case ICP_DR_RD_ABS:
802                 ctx_mask = 0;
803         case ICP_SR_REL:
804         case ICP_DR_REL:
805         case ICP_SR_RD_REL:
806         case ICP_DR_RD_REL:
807                 return qat_hal_init_rd_xfer(handle, ae, ctx_mask, reg_type,
808                                             reg_addr, value);
809         case ICP_SR_WR_ABS:
810         case ICP_DR_WR_ABS:
811                 ctx_mask = 0;
812         case ICP_SR_WR_REL:
813         case ICP_DR_WR_REL:
814                 return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type,
815                                             reg_addr, value);
816         case ICP_NEIGH_REL:
817                 return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value);
818         default:
819                 pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type);
820                 return -EFAULT;
821         }
822         return 0;
823 }
824
825 static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle,
826                                  unsigned int ae,
827                                  struct icp_qat_uclo_encapme *encap_ae)
828 {
829         unsigned int i;
830         unsigned char ctx_mask;
831         struct icp_qat_uof_init_regsym *init_regsym;
832
833         if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) ==
834             ICP_QAT_UCLO_MAX_CTX)
835                 ctx_mask = 0xff;
836         else
837                 ctx_mask = 0x55;
838
839         for (i = 0; i < encap_ae->init_regsym_num; i++) {
840                 unsigned int exp_res;
841
842                 init_regsym = &encap_ae->init_regsym[i];
843                 exp_res = init_regsym->value;
844                 switch (init_regsym->init_type) {
845                 case ICP_QAT_UOF_INIT_REG:
846                         qat_uclo_init_reg(handle, ae, ctx_mask,
847                                           (enum icp_qat_uof_regtype)
848                                           init_regsym->reg_type,
849                                           (unsigned short)init_regsym->reg_addr,
850                                           exp_res);
851                         break;
852                 case ICP_QAT_UOF_INIT_REG_CTX:
853                         /* check if ctx is appropriate for the ctxMode */
854                         if (!((1 << init_regsym->ctx) & ctx_mask)) {
855                                 pr_err("QAT: invalid ctx num = 0x%x\n",
856                                        init_regsym->ctx);
857                                 return -EINVAL;
858                         }
859                         qat_uclo_init_reg(handle, ae,
860                                           (unsigned char)
861                                           (1 << init_regsym->ctx),
862                                           (enum icp_qat_uof_regtype)
863                                           init_regsym->reg_type,
864                                           (unsigned short)init_regsym->reg_addr,
865                                           exp_res);
866                         break;
867                 case ICP_QAT_UOF_INIT_EXPR:
868                         pr_err("QAT: INIT_EXPR feature not supported\n");
869                         return -EINVAL;
870                 case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP:
871                         pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n");
872                         return -EINVAL;
873                 default:
874                         break;
875                 }
876         }
877         return 0;
878 }
879
880 static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle)
881 {
882         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
883         unsigned int s, ae;
884
885         if (obj_handle->global_inited)
886                 return 0;
887         if (obj_handle->init_mem_tab.entry_num) {
888                 if (qat_uclo_init_memory(handle)) {
889                         pr_err("QAT: initialize memory failed\n");
890                         return -EINVAL;
891                 }
892         }
893         for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
894                 for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
895                         if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
896                                 continue;
897                         if (qat_uclo_init_reg_sym(handle, ae,
898                                                   obj_handle->ae_data[ae].
899                                                   ae_slices[s].encap_image))
900                                 return -EINVAL;
901                 }
902         }
903         obj_handle->global_inited = 1;
904         return 0;
905 }
906
907 static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle)
908 {
909         unsigned char ae, nn_mode, s;
910         struct icp_qat_uof_image *uof_image;
911         struct icp_qat_uclo_aedata *ae_data;
912         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
913
914         for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
915                 if (!test_bit(ae,
916                               (unsigned long *)&handle->hal_handle->ae_mask))
917                         continue;
918                 ae_data = &obj_handle->ae_data[ae];
919                 for (s = 0; s < min_t(unsigned int, ae_data->slice_num,
920                                       ICP_QAT_UCLO_MAX_CTX); s++) {
921                         if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
922                                 continue;
923                         uof_image = ae_data->ae_slices[s].encap_image->img_ptr;
924                         if (qat_hal_set_ae_ctx_mode(handle, ae,
925                                                     (char)ICP_QAT_CTX_MODE
926                                                     (uof_image->ae_mode))) {
927                                 pr_err("QAT: qat_hal_set_ae_ctx_mode error\n");
928                                 return -EFAULT;
929                         }
930                         nn_mode = ICP_QAT_NN_MODE(uof_image->ae_mode);
931                         if (qat_hal_set_ae_nn_mode(handle, ae, nn_mode)) {
932                                 pr_err("QAT: qat_hal_set_ae_nn_mode error\n");
933                                 return -EFAULT;
934                         }
935                         if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0,
936                                                    (char)ICP_QAT_LOC_MEM0_MODE
937                                                    (uof_image->ae_mode))) {
938                                 pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n");
939                                 return -EFAULT;
940                         }
941                         if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1,
942                                                    (char)ICP_QAT_LOC_MEM1_MODE
943                                                    (uof_image->ae_mode))) {
944                                 pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n");
945                                 return -EFAULT;
946                         }
947                 }
948         }
949         return 0;
950 }
951
952 static void qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle)
953 {
954         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
955         struct icp_qat_uclo_encapme *image;
956         int a;
957
958         for (a = 0; a < obj_handle->uimage_num; a++) {
959                 image = &obj_handle->ae_uimage[a];
960                 image->uwords_num = image->page->beg_addr_p +
961                                         image->page->micro_words_num;
962         }
963 }
964
965 static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
966 {
967         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
968         unsigned int ae;
969
970         obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff;
971         obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *)
972                                              obj_handle->obj_hdr->file_buff;
973         obj_handle->uword_in_bytes = 6;
974         obj_handle->prod_type = qat_uclo_get_dev_type(handle);
975         obj_handle->prod_rev = PID_MAJOR_REV |
976                         (PID_MINOR_REV & handle->hal_handle->revision_id);
977         if (qat_uclo_check_uof_compat(obj_handle)) {
978                 pr_err("QAT: UOF incompatible\n");
979                 return -EINVAL;
980         }
981         obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(uint64_t),
982                                         GFP_KERNEL);
983         if (!obj_handle->uword_buf)
984                 return -ENOMEM;
985         obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE;
986         if (!obj_handle->obj_hdr->file_buff ||
987             !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT,
988                                     &obj_handle->str_table)) {
989                 pr_err("QAT: UOF doesn't have effective images\n");
990                 goto out_err;
991         }
992         obj_handle->uimage_num =
993                 qat_uclo_map_uimage(obj_handle, obj_handle->ae_uimage,
994                                     ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX);
995         if (!obj_handle->uimage_num)
996                 goto out_err;
997         if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) {
998                 pr_err("QAT: Bad object\n");
999                 goto out_check_uof_aemask_err;
1000         }
1001         qat_uclo_init_uword_num(handle);
1002         qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj,
1003                                    &obj_handle->init_mem_tab);
1004         if (qat_uclo_set_ae_mode(handle))
1005                 goto out_check_uof_aemask_err;
1006         return 0;
1007 out_check_uof_aemask_err:
1008         for (ae = 0; ae < obj_handle->uimage_num; ae++)
1009                 kfree(obj_handle->ae_uimage[ae].page);
1010 out_err:
1011         kfree(obj_handle->uword_buf);
1012         return -EFAULT;
1013 }
1014
1015 static int qat_uclo_map_suof_file_hdr(struct icp_qat_fw_loader_handle *handle,
1016                                       struct icp_qat_suof_filehdr *suof_ptr,
1017                                       int suof_size)
1018 {
1019         unsigned int check_sum = 0;
1020         unsigned int min_ver_offset = 0;
1021         struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
1022
1023         suof_handle->file_id = ICP_QAT_SUOF_FID;
1024         suof_handle->suof_buf = (char *)suof_ptr;
1025         suof_handle->suof_size = suof_size;
1026         min_ver_offset = suof_size - offsetof(struct icp_qat_suof_filehdr,
1027                                               min_ver);
1028         check_sum = qat_uclo_calc_str_checksum((char *)&suof_ptr->min_ver,
1029                                                min_ver_offset);
1030         if (check_sum != suof_ptr->check_sum) {
1031                 pr_err("QAT: incorrect SUOF checksum\n");
1032                 return -EINVAL;
1033         }
1034         suof_handle->check_sum = suof_ptr->check_sum;
1035         suof_handle->min_ver = suof_ptr->min_ver;
1036         suof_handle->maj_ver = suof_ptr->maj_ver;
1037         suof_handle->fw_type = suof_ptr->fw_type;
1038         return 0;
1039 }
1040
1041 static void qat_uclo_map_simg(struct icp_qat_suof_handle *suof_handle,
1042                               struct icp_qat_suof_img_hdr *suof_img_hdr,
1043                               struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
1044 {
1045         struct icp_qat_simg_ae_mode *ae_mode;
1046         struct icp_qat_suof_objhdr *suof_objhdr;
1047
1048         suof_img_hdr->simg_buf  = (suof_handle->suof_buf +
1049                                    suof_chunk_hdr->offset +
1050                                    sizeof(*suof_objhdr));
1051         suof_img_hdr->simg_len = ((struct icp_qat_suof_objhdr *)(uintptr_t)
1052                                   (suof_handle->suof_buf +
1053                                    suof_chunk_hdr->offset))->img_length;
1054
1055         suof_img_hdr->css_header = suof_img_hdr->simg_buf;
1056         suof_img_hdr->css_key = (suof_img_hdr->css_header +
1057                                  sizeof(struct icp_qat_css_hdr));
1058         suof_img_hdr->css_signature = suof_img_hdr->css_key +
1059                                       ICP_QAT_CSS_FWSK_MODULUS_LEN +
1060                                       ICP_QAT_CSS_FWSK_EXPONENT_LEN;
1061         suof_img_hdr->css_simg = suof_img_hdr->css_signature +
1062                                  ICP_QAT_CSS_SIGNATURE_LEN;
1063
1064         ae_mode = (struct icp_qat_simg_ae_mode *)(suof_img_hdr->css_simg);
1065         suof_img_hdr->ae_mask = ae_mode->ae_mask;
1066         suof_img_hdr->simg_name = (unsigned long)&ae_mode->simg_name;
1067         suof_img_hdr->appmeta_data = (unsigned long)&ae_mode->appmeta_data;
1068         suof_img_hdr->fw_type = ae_mode->fw_type;
1069 }
1070
1071 static void
1072 qat_uclo_map_suof_symobjs(struct icp_qat_suof_handle *suof_handle,
1073                           struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
1074 {
1075         char **sym_str = (char **)&suof_handle->sym_str;
1076         unsigned int *sym_size = &suof_handle->sym_size;
1077         struct icp_qat_suof_strtable *str_table_obj;
1078
1079         *sym_size = *(unsigned int *)(uintptr_t)
1080                    (suof_chunk_hdr->offset + suof_handle->suof_buf);
1081         *sym_str = (char *)(uintptr_t)
1082                    (suof_handle->suof_buf + suof_chunk_hdr->offset +
1083                    sizeof(str_table_obj->tab_length));
1084 }
1085
1086 static int qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle *handle,
1087                                       struct icp_qat_suof_img_hdr *img_hdr)
1088 {
1089         struct icp_qat_simg_ae_mode *img_ae_mode = NULL;
1090         unsigned int prod_rev, maj_ver, prod_type;
1091
1092         prod_type = qat_uclo_get_dev_type(handle);
1093         img_ae_mode = (struct icp_qat_simg_ae_mode *)img_hdr->css_simg;
1094         prod_rev = PID_MAJOR_REV |
1095                          (PID_MINOR_REV & handle->hal_handle->revision_id);
1096         if (img_ae_mode->dev_type != prod_type) {
1097                 pr_err("QAT: incompatible product type %x\n",
1098                        img_ae_mode->dev_type);
1099                 return -EINVAL;
1100         }
1101         maj_ver = prod_rev & 0xff;
1102         if ((maj_ver > img_ae_mode->devmax_ver) ||
1103             (maj_ver < img_ae_mode->devmin_ver)) {
1104                 pr_err("QAT: incompatible device majver 0x%x\n", maj_ver);
1105                 return -EINVAL;
1106         }
1107         return 0;
1108 }
1109
1110 static void qat_uclo_del_suof(struct icp_qat_fw_loader_handle *handle)
1111 {
1112         struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
1113
1114         kfree(sobj_handle->img_table.simg_hdr);
1115         sobj_handle->img_table.simg_hdr = NULL;
1116         kfree(handle->sobj_handle);
1117         handle->sobj_handle = NULL;
1118 }
1119
1120 static void qat_uclo_tail_img(struct icp_qat_suof_img_hdr *suof_img_hdr,
1121                               unsigned int img_id, unsigned int num_simgs)
1122 {
1123         struct icp_qat_suof_img_hdr img_header;
1124
1125         if (img_id != num_simgs - 1) {
1126                 memcpy(&img_header, &suof_img_hdr[num_simgs - 1],
1127                        sizeof(*suof_img_hdr));
1128                 memcpy(&suof_img_hdr[num_simgs - 1], &suof_img_hdr[img_id],
1129                        sizeof(*suof_img_hdr));
1130                 memcpy(&suof_img_hdr[img_id], &img_header,
1131                        sizeof(*suof_img_hdr));
1132         }
1133 }
1134
1135 static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle,
1136                              struct icp_qat_suof_filehdr *suof_ptr,
1137                              int suof_size)
1138 {
1139         struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
1140         struct icp_qat_suof_chunk_hdr *suof_chunk_hdr = NULL;
1141         struct icp_qat_suof_img_hdr *suof_img_hdr = NULL;
1142         int ret = 0, ae0_img = ICP_QAT_UCLO_MAX_AE;
1143         unsigned int i = 0;
1144         struct icp_qat_suof_img_hdr img_header;
1145
1146         if (!suof_ptr || (suof_size == 0)) {
1147                 pr_err("QAT: input parameter SUOF pointer/size is NULL\n");
1148                 return -EINVAL;
1149         }
1150         if (qat_uclo_check_suof_format(suof_ptr))
1151                 return -EINVAL;
1152         ret = qat_uclo_map_suof_file_hdr(handle, suof_ptr, suof_size);
1153         if (ret)
1154                 return ret;
1155         suof_chunk_hdr = (struct icp_qat_suof_chunk_hdr *)
1156                          ((uintptr_t)suof_ptr + sizeof(*suof_ptr));
1157
1158         qat_uclo_map_suof_symobjs(suof_handle, suof_chunk_hdr);
1159         suof_handle->img_table.num_simgs = suof_ptr->num_chunks - 1;
1160
1161         if (suof_handle->img_table.num_simgs != 0) {
1162                 suof_img_hdr = kzalloc(suof_handle->img_table.num_simgs *
1163                                        sizeof(img_header), GFP_KERNEL);
1164                 if (!suof_img_hdr)
1165                         return -ENOMEM;
1166                 suof_handle->img_table.simg_hdr = suof_img_hdr;
1167         }
1168
1169         for (i = 0; i < suof_handle->img_table.num_simgs; i++) {
1170                 qat_uclo_map_simg(handle->sobj_handle, &suof_img_hdr[i],
1171                                   &suof_chunk_hdr[1 + i]);
1172                 ret = qat_uclo_check_simg_compat(handle,
1173                                                  &suof_img_hdr[i]);
1174                 if (ret)
1175                         return ret;
1176                 if ((suof_img_hdr[i].ae_mask & 0x1) != 0)
1177                         ae0_img = i;
1178         }
1179         qat_uclo_tail_img(suof_img_hdr, ae0_img,
1180                           suof_handle->img_table.num_simgs);
1181         return 0;
1182 }
1183
1184 #define ADD_ADDR(high, low)  ((((uint64_t)high) << 32) + low)
1185 #define BITS_IN_DWORD 32
1186
1187 static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
1188                             struct icp_qat_fw_auth_desc *desc)
1189 {
1190         unsigned int fcu_sts, retry = 0;
1191         u64 bus_addr;
1192
1193         bus_addr = ADD_ADDR(desc->css_hdr_high, desc->css_hdr_low)
1194                            - sizeof(struct icp_qat_auth_chunk);
1195         SET_CAP_CSR(handle, FCU_DRAM_ADDR_HI, (bus_addr >> BITS_IN_DWORD));
1196         SET_CAP_CSR(handle, FCU_DRAM_ADDR_LO, bus_addr);
1197         SET_CAP_CSR(handle, FCU_CONTROL, FCU_CTRL_CMD_AUTH);
1198
1199         do {
1200                 msleep(FW_AUTH_WAIT_PERIOD);
1201                 fcu_sts = GET_CAP_CSR(handle, FCU_STATUS);
1202                 if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_FAIL)
1203                         goto auth_fail;
1204                 if (((fcu_sts >> FCU_STS_AUTHFWLD_POS) & 0x1))
1205                         if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_DONE)
1206                                 return 0;
1207         } while (retry++ < FW_AUTH_MAX_RETRY);
1208 auth_fail:
1209         pr_err("QAT: authentication error (FCU_STATUS = 0x%x),retry = %d\n",
1210                fcu_sts & FCU_AUTH_STS_MASK, retry);
1211         return -EINVAL;
1212 }
1213
1214 static int qat_uclo_simg_alloc(struct icp_qat_fw_loader_handle *handle,
1215                                struct icp_firml_dram_desc *dram_desc,
1216                                unsigned int size)
1217 {
1218         void *vptr;
1219         dma_addr_t ptr;
1220
1221         vptr = dma_alloc_coherent(&handle->pci_dev->dev,
1222                                   size, &ptr, GFP_KERNEL);
1223         if (!vptr)
1224                 return -ENOMEM;
1225         dram_desc->dram_base_addr_v = vptr;
1226         dram_desc->dram_bus_addr = ptr;
1227         dram_desc->dram_size = size;
1228         return 0;
1229 }
1230
1231 static void qat_uclo_simg_free(struct icp_qat_fw_loader_handle *handle,
1232                                struct icp_firml_dram_desc *dram_desc)
1233 {
1234         dma_free_coherent(&handle->pci_dev->dev,
1235                           (size_t)(dram_desc->dram_size),
1236                           (dram_desc->dram_base_addr_v),
1237                           dram_desc->dram_bus_addr);
1238         memset(dram_desc, 0, sizeof(*dram_desc));
1239 }
1240
1241 static void qat_uclo_ummap_auth_fw(struct icp_qat_fw_loader_handle *handle,
1242                                    struct icp_qat_fw_auth_desc **desc)
1243 {
1244         struct icp_firml_dram_desc dram_desc;
1245
1246         dram_desc.dram_base_addr_v = *desc;
1247         dram_desc.dram_bus_addr = ((struct icp_qat_auth_chunk *)
1248                                    (*desc))->chunk_bus_addr;
1249         dram_desc.dram_size = ((struct icp_qat_auth_chunk *)
1250                                (*desc))->chunk_size;
1251         qat_uclo_simg_free(handle, &dram_desc);
1252 }
1253
1254 static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
1255                                 char *image, unsigned int size,
1256                                 struct icp_qat_fw_auth_desc **desc)
1257 {
1258         struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image;
1259         struct icp_qat_fw_auth_desc *auth_desc;
1260         struct icp_qat_auth_chunk *auth_chunk;
1261         u64 virt_addr,  bus_addr, virt_base;
1262         unsigned int length, simg_offset = sizeof(*auth_chunk);
1263         struct icp_firml_dram_desc img_desc;
1264
1265         if (size > (ICP_QAT_AE_IMG_OFFSET + ICP_QAT_CSS_MAX_IMAGE_LEN)) {
1266                 pr_err("QAT: error, input image size overflow %d\n", size);
1267                 return -EINVAL;
1268         }
1269         length = (css_hdr->fw_type == CSS_AE_FIRMWARE) ?
1270                  ICP_QAT_CSS_AE_SIMG_LEN + simg_offset :
1271                  size + ICP_QAT_CSS_FWSK_PAD_LEN + simg_offset;
1272         if (qat_uclo_simg_alloc(handle, &img_desc, length)) {
1273                 pr_err("QAT: error, allocate continuous dram fail\n");
1274                 return -ENOMEM;
1275         }
1276
1277         auth_chunk = img_desc.dram_base_addr_v;
1278         auth_chunk->chunk_size = img_desc.dram_size;
1279         auth_chunk->chunk_bus_addr = img_desc.dram_bus_addr;
1280         virt_base = (uintptr_t)img_desc.dram_base_addr_v + simg_offset;
1281         bus_addr  = img_desc.dram_bus_addr + simg_offset;
1282         auth_desc = img_desc.dram_base_addr_v;
1283         auth_desc->css_hdr_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1284         auth_desc->css_hdr_low = (unsigned int)bus_addr;
1285         virt_addr = virt_base;
1286
1287         memcpy((void *)(uintptr_t)virt_addr, image, sizeof(*css_hdr));
1288         /* pub key */
1289         bus_addr = ADD_ADDR(auth_desc->css_hdr_high, auth_desc->css_hdr_low) +
1290                            sizeof(*css_hdr);
1291         virt_addr = virt_addr + sizeof(*css_hdr);
1292
1293         auth_desc->fwsk_pub_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1294         auth_desc->fwsk_pub_low = (unsigned int)bus_addr;
1295
1296         memcpy((void *)(uintptr_t)virt_addr,
1297                (void *)(image + sizeof(*css_hdr)),
1298                ICP_QAT_CSS_FWSK_MODULUS_LEN);
1299         /* padding */
1300         memset((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN),
1301                0, ICP_QAT_CSS_FWSK_PAD_LEN);
1302
1303         /* exponent */
1304         memcpy((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN +
1305                ICP_QAT_CSS_FWSK_PAD_LEN),
1306                (void *)(image + sizeof(*css_hdr) +
1307                         ICP_QAT_CSS_FWSK_MODULUS_LEN),
1308                sizeof(unsigned int));
1309
1310         /* signature */
1311         bus_addr = ADD_ADDR(auth_desc->fwsk_pub_high,
1312                             auth_desc->fwsk_pub_low) +
1313                    ICP_QAT_CSS_FWSK_PUB_LEN;
1314         virt_addr = virt_addr + ICP_QAT_CSS_FWSK_PUB_LEN;
1315         auth_desc->signature_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1316         auth_desc->signature_low = (unsigned int)bus_addr;
1317
1318         memcpy((void *)(uintptr_t)virt_addr,
1319                (void *)(image + sizeof(*css_hdr) +
1320                ICP_QAT_CSS_FWSK_MODULUS_LEN +
1321                ICP_QAT_CSS_FWSK_EXPONENT_LEN),
1322                ICP_QAT_CSS_SIGNATURE_LEN);
1323
1324         bus_addr = ADD_ADDR(auth_desc->signature_high,
1325                             auth_desc->signature_low) +
1326                    ICP_QAT_CSS_SIGNATURE_LEN;
1327         virt_addr += ICP_QAT_CSS_SIGNATURE_LEN;
1328
1329         auth_desc->img_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1330         auth_desc->img_low = (unsigned int)bus_addr;
1331         auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET;
1332         memcpy((void *)(uintptr_t)virt_addr,
1333                (void *)(image + ICP_QAT_AE_IMG_OFFSET),
1334                auth_desc->img_len);
1335         virt_addr = virt_base;
1336         /* AE firmware */
1337         if (((struct icp_qat_css_hdr *)(uintptr_t)virt_addr)->fw_type ==
1338             CSS_AE_FIRMWARE) {
1339                 auth_desc->img_ae_mode_data_high = auth_desc->img_high;
1340                 auth_desc->img_ae_mode_data_low = auth_desc->img_low;
1341                 bus_addr = ADD_ADDR(auth_desc->img_ae_mode_data_high,
1342                                     auth_desc->img_ae_mode_data_low) +
1343                            sizeof(struct icp_qat_simg_ae_mode);
1344
1345                 auth_desc->img_ae_init_data_high = (unsigned int)
1346                                                  (bus_addr >> BITS_IN_DWORD);
1347                 auth_desc->img_ae_init_data_low = (unsigned int)bus_addr;
1348                 bus_addr += ICP_QAT_SIMG_AE_INIT_SEQ_LEN;
1349                 auth_desc->img_ae_insts_high = (unsigned int)
1350                                              (bus_addr >> BITS_IN_DWORD);
1351                 auth_desc->img_ae_insts_low = (unsigned int)bus_addr;
1352         } else {
1353                 auth_desc->img_ae_insts_high = auth_desc->img_high;
1354                 auth_desc->img_ae_insts_low = auth_desc->img_low;
1355         }
1356         *desc = auth_desc;
1357         return 0;
1358 }
1359
1360 static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle,
1361                             struct icp_qat_fw_auth_desc *desc)
1362 {
1363         unsigned int i;
1364         unsigned int fcu_sts;
1365         struct icp_qat_simg_ae_mode *virt_addr;
1366         unsigned int fcu_loaded_ae_pos = FCU_LOADED_AE_POS;
1367
1368         virt_addr = (void *)((uintptr_t)desc +
1369                      sizeof(struct icp_qat_auth_chunk) +
1370                      sizeof(struct icp_qat_css_hdr) +
1371                      ICP_QAT_CSS_FWSK_PUB_LEN +
1372                      ICP_QAT_CSS_SIGNATURE_LEN);
1373         for (i = 0; i < handle->hal_handle->ae_max_num; i++) {
1374                 int retry = 0;
1375
1376                 if (!((virt_addr->ae_mask >> i) & 0x1))
1377                         continue;
1378                 if (qat_hal_check_ae_active(handle, i)) {
1379                         pr_err("QAT: AE %d is active\n", i);
1380                         return -EINVAL;
1381                 }
1382                 SET_CAP_CSR(handle, FCU_CONTROL,
1383                             (FCU_CTRL_CMD_LOAD | (i << FCU_CTRL_AE_POS)));
1384
1385                 do {
1386                         msleep(FW_AUTH_WAIT_PERIOD);
1387                         fcu_sts = GET_CAP_CSR(handle, FCU_STATUS);
1388                         if (((fcu_sts & FCU_AUTH_STS_MASK) ==
1389                             FCU_STS_LOAD_DONE) &&
1390                             ((fcu_sts >> fcu_loaded_ae_pos) & (1 << i)))
1391                                 break;
1392                 } while (retry++ < FW_AUTH_MAX_RETRY);
1393                 if (retry > FW_AUTH_MAX_RETRY) {
1394                         pr_err("QAT: firmware load failed timeout %x\n", retry);
1395                         return -EINVAL;
1396                 }
1397         }
1398         return 0;
1399 }
1400
1401 static int qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle *handle,
1402                                  void *addr_ptr, int mem_size)
1403 {
1404         struct icp_qat_suof_handle *suof_handle;
1405
1406         suof_handle = kzalloc(sizeof(*suof_handle), GFP_KERNEL);
1407         if (!suof_handle)
1408                 return -ENOMEM;
1409         handle->sobj_handle = suof_handle;
1410         if (qat_uclo_map_suof(handle, addr_ptr, mem_size)) {
1411                 qat_uclo_del_suof(handle);
1412                 pr_err("QAT: map SUOF failed\n");
1413                 return -EINVAL;
1414         }
1415         return 0;
1416 }
1417
1418 int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
1419                        void *addr_ptr, int mem_size)
1420 {
1421         struct icp_qat_fw_auth_desc *desc = NULL;
1422         int status = 0;
1423
1424         if (handle->fw_auth) {
1425                 if (!qat_uclo_map_auth_fw(handle, addr_ptr, mem_size, &desc))
1426                         status = qat_uclo_auth_fw(handle, desc);
1427                 qat_uclo_ummap_auth_fw(handle, &desc);
1428         } else {
1429                 if (handle->pci_dev->device == ADF_C3XXX_PCI_DEVICE_ID) {
1430                         pr_err("QAT: C3XXX doesn't support unsigned MMP\n");
1431                         return -EINVAL;
1432                 }
1433                 qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, mem_size);
1434         }
1435         return status;
1436 }
1437
1438 static int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
1439                                 void *addr_ptr, int mem_size)
1440 {
1441         struct icp_qat_uof_filehdr *filehdr;
1442         struct icp_qat_uclo_objhandle *objhdl;
1443
1444         objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL);
1445         if (!objhdl)
1446                 return -ENOMEM;
1447         objhdl->obj_buf = kmemdup(addr_ptr, mem_size, GFP_KERNEL);
1448         if (!objhdl->obj_buf)
1449                 goto out_objbuf_err;
1450         filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf;
1451         if (qat_uclo_check_uof_format(filehdr))
1452                 goto out_objhdr_err;
1453         objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr,
1454                                              ICP_QAT_UOF_OBJS);
1455         if (!objhdl->obj_hdr) {
1456                 pr_err("QAT: object file chunk is null\n");
1457                 goto out_objhdr_err;
1458         }
1459         handle->obj_handle = objhdl;
1460         if (qat_uclo_parse_uof_obj(handle))
1461                 goto out_overlay_obj_err;
1462         return 0;
1463
1464 out_overlay_obj_err:
1465         handle->obj_handle = NULL;
1466         kfree(objhdl->obj_hdr);
1467 out_objhdr_err:
1468         kfree(objhdl->obj_buf);
1469 out_objbuf_err:
1470         kfree(objhdl);
1471         return -ENOMEM;
1472 }
1473
1474 int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle,
1475                      void *addr_ptr, int mem_size)
1476 {
1477         BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >=
1478                      (sizeof(handle->hal_handle->ae_mask) * 8));
1479
1480         if (!handle || !addr_ptr || mem_size < 24)
1481                 return -EINVAL;
1482
1483         return (handle->fw_auth) ?
1484                         qat_uclo_map_suof_obj(handle, addr_ptr, mem_size) :
1485                         qat_uclo_map_uof_obj(handle, addr_ptr, mem_size);
1486 }
1487
1488 void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle)
1489 {
1490         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1491         unsigned int a;
1492
1493         if (handle->sobj_handle)
1494                 qat_uclo_del_suof(handle);
1495         if (!obj_handle)
1496                 return;
1497
1498         kfree(obj_handle->uword_buf);
1499         for (a = 0; a < obj_handle->uimage_num; a++)
1500                 kfree(obj_handle->ae_uimage[a].page);
1501
1502         for (a = 0; a < handle->hal_handle->ae_max_num; a++)
1503                 qat_uclo_free_ae_data(&obj_handle->ae_data[a]);
1504
1505         kfree(obj_handle->obj_hdr);
1506         kfree(obj_handle->obj_buf);
1507         kfree(obj_handle);
1508         handle->obj_handle = NULL;
1509 }
1510
1511 static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle,
1512                                  struct icp_qat_uclo_encap_page *encap_page,
1513                                  uint64_t *uword, unsigned int addr_p,
1514                                  unsigned int raddr, uint64_t fill)
1515 {
1516         uint64_t uwrd = 0;
1517         unsigned int i;
1518
1519         if (!encap_page) {
1520                 *uword = fill;
1521                 return;
1522         }
1523         for (i = 0; i < encap_page->uwblock_num; i++) {
1524                 if (raddr >= encap_page->uwblock[i].start_addr &&
1525                     raddr <= encap_page->uwblock[i].start_addr +
1526                     encap_page->uwblock[i].words_num - 1) {
1527                         raddr -= encap_page->uwblock[i].start_addr;
1528                         raddr *= obj_handle->uword_in_bytes;
1529                         memcpy(&uwrd, (void *)(((uintptr_t)
1530                                encap_page->uwblock[i].micro_words) + raddr),
1531                                obj_handle->uword_in_bytes);
1532                         uwrd = uwrd & 0xbffffffffffull;
1533                 }
1534         }
1535         *uword = uwrd;
1536         if (*uword == INVLD_UWORD)
1537                 *uword = fill;
1538 }
1539
1540 static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle,
1541                                         struct icp_qat_uclo_encap_page
1542                                         *encap_page, unsigned int ae)
1543 {
1544         unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen;
1545         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1546         uint64_t fill_pat;
1547
1548         /* load the page starting at appropriate ustore address */
1549         /* get fill-pattern from an image -- they are all the same */
1550         memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern,
1551                sizeof(uint64_t));
1552         uw_physical_addr = encap_page->beg_addr_p;
1553         uw_relative_addr = 0;
1554         words_num = encap_page->micro_words_num;
1555         while (words_num) {
1556                 if (words_num < UWORD_CPYBUF_SIZE)
1557                         cpylen = words_num;
1558                 else
1559                         cpylen = UWORD_CPYBUF_SIZE;
1560
1561                 /* load the buffer */
1562                 for (i = 0; i < cpylen; i++)
1563                         qat_uclo_fill_uwords(obj_handle, encap_page,
1564                                              &obj_handle->uword_buf[i],
1565                                              uw_physical_addr + i,
1566                                              uw_relative_addr + i, fill_pat);
1567
1568                 /* copy the buffer to ustore */
1569                 qat_hal_wr_uwords(handle, (unsigned char)ae,
1570                                   uw_physical_addr, cpylen,
1571                                   obj_handle->uword_buf);
1572
1573                 uw_physical_addr += cpylen;
1574                 uw_relative_addr += cpylen;
1575                 words_num -= cpylen;
1576         }
1577 }
1578
1579 static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle,
1580                                     struct icp_qat_uof_image *image)
1581 {
1582         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1583         unsigned int ctx_mask, s;
1584         struct icp_qat_uclo_page *page;
1585         unsigned char ae;
1586         int ctx;
1587
1588         if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX)
1589                 ctx_mask = 0xff;
1590         else
1591                 ctx_mask = 0x55;
1592         /* load the default page and set assigned CTX PC
1593          * to the entrypoint address */
1594         for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
1595                 if (!test_bit(ae, (unsigned long *)&image->ae_assigned))
1596                         continue;
1597                 /* find the slice to which this image is assigned */
1598                 for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
1599                         if (image->ctx_assigned & obj_handle->ae_data[ae].
1600                             ae_slices[s].ctx_mask_assigned)
1601                                 break;
1602                 }
1603                 if (s >= obj_handle->ae_data[ae].slice_num)
1604                         continue;
1605                 page = obj_handle->ae_data[ae].ae_slices[s].page;
1606                 if (!page->encap_page->def_page)
1607                         continue;
1608                 qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae);
1609
1610                 page = obj_handle->ae_data[ae].ae_slices[s].page;
1611                 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++)
1612                         obj_handle->ae_data[ae].ae_slices[s].cur_page[ctx] =
1613                                         (ctx_mask & (1 << ctx)) ? page : NULL;
1614                 qat_hal_set_live_ctx(handle, (unsigned char)ae,
1615                                      image->ctx_assigned);
1616                 qat_hal_set_pc(handle, (unsigned char)ae, image->ctx_assigned,
1617                                image->entry_address);
1618         }
1619 }
1620
1621 static int qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle *handle)
1622 {
1623         unsigned int i;
1624         struct icp_qat_fw_auth_desc *desc = NULL;
1625         struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
1626         struct icp_qat_suof_img_hdr *simg_hdr = sobj_handle->img_table.simg_hdr;
1627
1628         for (i = 0; i < sobj_handle->img_table.num_simgs; i++) {
1629                 if (qat_uclo_map_auth_fw(handle,
1630                                          (char *)simg_hdr[i].simg_buf,
1631                                          (unsigned int)
1632                                          (simg_hdr[i].simg_len),
1633                                          &desc))
1634                         goto wr_err;
1635                 if (qat_uclo_auth_fw(handle, desc))
1636                         goto wr_err;
1637                 if (qat_uclo_load_fw(handle, desc))
1638                         goto wr_err;
1639                 qat_uclo_ummap_auth_fw(handle, &desc);
1640         }
1641         return 0;
1642 wr_err:
1643         qat_uclo_ummap_auth_fw(handle, &desc);
1644         return -EINVAL;
1645 }
1646
1647 static int qat_uclo_wr_uof_img(struct icp_qat_fw_loader_handle *handle)
1648 {
1649         struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1650         unsigned int i;
1651
1652         if (qat_uclo_init_globals(handle))
1653                 return -EINVAL;
1654         for (i = 0; i < obj_handle->uimage_num; i++) {
1655                 if (!obj_handle->ae_uimage[i].img_ptr)
1656                         return -EINVAL;
1657                 if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i]))
1658                         return -EINVAL;
1659                 qat_uclo_wr_uimage_page(handle,
1660                                         obj_handle->ae_uimage[i].img_ptr);
1661         }
1662         return 0;
1663 }
1664
1665 int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle)
1666 {
1667         return (handle->fw_auth) ? qat_uclo_wr_suof_img(handle) :
1668                                    qat_uclo_wr_uof_img(handle);
1669 }