3 * sep_main_mod.c - Security Processor Driver main group of functions
5 * Copyright(c) 2009 Intel Corporation. All rights reserved.
6 * Copyright(c) 2009 Discretix. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 * Mark Allyn mark.a.allyn@intel.com
28 * 2009.06.26 Initial publish
32 #include <linux/init.h>
33 #include <linux/module.h>
35 #include <linux/cdev.h>
36 #include <linux/kdev_t.h>
37 #include <linux/mutex.h>
39 #include <linux/poll.h>
40 #include <linux/wait.h>
41 #include <asm/ioctl.h>
42 #include <linux/ioport.h>
44 #include <linux/interrupt.h>
45 #include <linux/pagemap.h>
46 #include <asm/cacheflush.h>
47 #include "sep_driver_hw_defs.h"
48 #include "sep_driver_config.h"
49 #include "sep_driver_api.h"
50 #include "sep_driver_ext_api.h"
53 /*----------------------------------------
55 -----------------------------------------*/
58 #define INT_MODULE_PARM(n, v) int n = v; module_param(n, int, 0)
60 /*--------------------------------------
62 -----------------------------------------*/
66 /*--------------------------------------------
68 --------------------------------------------*/
70 /* debug messages level */
71 INT_MODULE_PARM(sepDebug, 0x0);
72 MODULE_PARM_DESC(sepDebug, "Flag to enable SEP debug messages");
76 mutex for the access to the internals of the sep driver
78 static DEFINE_MUTEX(sep_mutex);
81 /* wait queue head (event) of the driver */
82 static DECLARE_WAIT_QUEUE_HEAD(g_sep_event);
86 /*------------------------------------------------
88 ---------------------------------------------------*/
91 interrupt handler function
93 irqreturn_t sep_inthandler(int irq, void *dev_id);
96 this function registers the driver to the file system
98 static int sep_register_driver_to_fs(void);
101 this function unregisters driver from fs
103 static void sep_unregister_driver_from_fs(void);
106 this function calculates the size of data that can be inserted into the lli
107 table from this array the condition is that either the table is full
108 (all etnries are entered), or there are no more entries in the lli array
110 static unsigned long sep_calculate_lli_table_max_size(struct sep_lli_entry_t *lli_in_array_ptr, unsigned long num_array_entries);
112 this functions builds ont lli table from the lli_array according to the
115 static void sep_build_lli_table(struct sep_lli_entry_t *lli_array_ptr, struct sep_lli_entry_t *lli_table_ptr, unsigned long *num_processed_entries_ptr, unsigned long *num_table_entries_ptr, unsigned long table_data_size);
118 this function goes over the list of the print created tables and prints
121 static void sep_debug_print_lli_tables(struct sep_lli_entry_t *lli_table_ptr, unsigned long num_table_entries, unsigned long table_data_size);
126 This function raises interrupt to SEPm that signals that is has a new
129 static void sep_send_command_handler(void);
133 This function raises interrupt to SEP that signals that is has a
136 static void sep_send_reply_command_handler(void);
139 This function handles the allocate data pool memory request
140 This function returns calculates the physical address of the allocated memory
141 and the offset of this area from the mapped address. Therefore, the FVOs in
142 user space can calculate the exact virtual address of this allocated memory
144 static int sep_allocate_data_pool_memory_handler(unsigned long arg);
148 This function handles write into allocated data pool command
150 static int sep_write_into_data_pool_handler(unsigned long arg);
153 this function handles the read from data pool command
155 static int sep_read_from_data_pool_handler(unsigned long arg);
158 this function handles tha request for creation of the DMA table
159 for the synchronic symmetric operations (AES,DES)
161 static int sep_create_sync_dma_tables_handler(unsigned long arg);
164 this function handles the request to create the DMA tables for flow
166 static int sep_create_flow_dma_tables_handler(unsigned long arg);
169 This API handles the end transaction request
171 static int sep_end_transaction_handler(unsigned long arg);
175 this function handles add tables to flow
177 static int sep_add_flow_tables_handler(unsigned long arg);
180 this function add the flow add message to the specific flow
182 static int sep_add_flow_tables_message_handler(unsigned long arg);
185 this function handles the request for SEP start
187 static int sep_start_handler(void);
190 this function handles the request for SEP initialization
192 static int sep_init_handler(unsigned long arg);
195 this function handles the request cache and resident reallocation
197 static int sep_realloc_cache_resident_handler(unsigned long arg);
201 This api handles the setting of API mode to blocking or non-blocking
203 static int sep_set_api_mode_handler(unsigned long arg);
205 /* handler for flow done interrupt */
206 static void sep_flow_done_handler(struct work_struct *work);
209 This function locks all the physical pages of the kernel virtual buffer
210 and construct a basic lli array, where each entry holds the physical
211 page address and the size that application data holds in this physical pages
213 static int sep_lock_kernel_pages(unsigned long kernel_virt_addr, unsigned long data_size, unsigned long *num_pages_ptr, struct sep_lli_entry_t **lli_array_ptr, struct page ***page_array_ptr);
216 This function creates one DMA table for flow and returns its data,
217 and pointer to its info entry
219 static int sep_prepare_one_flow_dma_table(unsigned long virt_buff_addr, unsigned long virt_buff_size, struct sep_lli_entry_t *table_data, struct sep_lli_entry_t **info_entry_ptr, struct sep_flow_context_t *flow_data_ptr, bool isKernelVirtualAddress);
222 This function creates a list of tables for flow and returns the data for the
223 first and last tables of the list
225 static int sep_prepare_flow_dma_tables(unsigned long num_virtual_buffers,
226 unsigned long first_buff_addr, struct sep_flow_context_t *flow_data_ptr, struct sep_lli_entry_t *first_table_data_ptr, struct sep_lli_entry_t *last_table_data_ptr, bool isKernelVirtualAddress);
229 this function find a space for the new flow dma table
231 static int sep_find_free_flow_dma_table_space(unsigned long **table_address_ptr);
234 this function goes over all the flow tables connected to the given table and
237 static void sep_deallocated_flow_tables(struct sep_lli_entry_t *first_table_ptr);
240 This function handler the set flow id command
242 static int sep_set_flow_id_handler(unsigned long arg);
245 This function returns pointer to the flow data structure
246 that conatins the given id
248 static int sep_find_flow_context(unsigned long flow_id, struct sep_flow_context_t **flow_data_ptr);
252 this function returns the physical and virtual addresses of the static pool
254 static int sep_get_static_pool_addr_handler(unsigned long arg);
257 this address gets the offset of the physical address from the start of
260 static int sep_get_physical_mapped_offset_handler(unsigned long arg);
264 this function handles the request for get time
266 static int sep_get_time_handler(unsigned long arg);
269 calculates time and sets it at the predefined address
271 static int sep_set_time(unsigned long *address_ptr, unsigned long *time_in_sec_ptr);
274 PATCH for configuring the DMA to single burst instead of multi-burst
276 static void sep_configure_dma_burst(void);
279 This function locks all the physical pages of the
280 application virtual buffer and construct a basic lli
281 array, where each entry holds the physical page address
282 and the size that application data holds in this physical pages
284 static int sep_lock_user_pages(unsigned long app_virt_addr, unsigned long data_size, unsigned long *num_pages_ptr, struct sep_lli_entry_t **lli_array_ptr, struct page ***page_array_ptr);
286 /*---------------------------------------------
288 -----------------------------------------------*/
291 this function locks SEP by locking the semaphore
295 mutex_lock(&sep_mutex);
300 this function unlocks SEP
305 mutex_unlock(&sep_mutex);
309 this function returns the address of the message shared area
311 void sep_map_shared_area(unsigned long *mappedAddr_ptr)
313 *mappedAddr_ptr = sep_dev->shared_area_addr;
317 this function returns the address of the message shared area
319 void sep_send_msg_rdy_cmd()
321 sep_send_command_handler();
324 /* this functions frees all the resources that were allocated for the building
325 of the LLI DMA tables */
326 void sep_free_dma_resources()
328 sep_free_dma_table_data_handler();
331 /* poll(suspend), until reply from sep */
332 void sep_driver_poll()
334 unsigned long retVal = 0;
336 #ifdef SEP_DRIVER_POLLING_MODE
338 while (sep_dev->host_to_sep_send_counter != (retVal & 0x7FFFFFFF))
339 retVal = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
341 sep_dev->sep_to_host_reply_counter++;
343 /* poll, until reply from sep */
344 wait_event(g_sep_event, (sep_dev->host_to_sep_send_counter == sep_dev->sep_to_host_reply_counter));
349 /*----------------------------------------------------------------------
350 open function of the character driver - must only lock the mutex
351 must also release the memory data pool allocations
352 ------------------------------------------------------------------------*/
353 static int sep_open(struct inode *inode_ptr, struct file *file_ptr)
357 dbg("SEP Driver:--------> open start\n");
361 /* check the blocking mode */
362 if (sep_dev->block_mode_flag)
364 mutex_lock(&sep_mutex);
366 error = mutex_trylock(&sep_mutex);
368 /* check the error */
370 edbg("SEP Driver: down_interruptible failed\n");
375 /* release data pool allocations */
376 sep_dev->data_pool_bytes_allocated = 0;
379 dbg("SEP Driver:<-------- open end\n");
386 /*------------------------------------------------------------
388 -------------------------------------------------------------*/
389 static int sep_release(struct inode *inode_ptr, struct file *file_ptr)
391 dbg("----------->SEP Driver: sep_release start\n");
393 #if 0 /*!SEP_DRIVER_POLLING_MODE */
395 sep_write_reg(sep_dev, HW_HOST_IMR_REG_ADDR, 0x7FFF);
397 /* release IRQ line */
398 free_irq(SEP_DIRVER_IRQ_NUM, &sep_dev->reg_base_address);
402 /* unlock the sep mutex */
403 mutex_unlock(&sep_mutex);
405 dbg("SEP Driver:<-------- sep_release end\n");
413 /*---------------------------------------------------------------
414 map function - this functions maps the message shared area
415 -----------------------------------------------------------------*/
416 static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
418 unsigned long phys_addr;
420 dbg("-------->SEP Driver: mmap start\n");
422 /* check that the size of the mapped range is as the size of the message
424 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
425 edbg("SEP Driver mmap requested size is more than allowed\n");
426 printk(KERN_WARNING "SEP Driver mmap requested size is more \
428 printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_end);
429 printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_start);
433 edbg("SEP Driver:g_message_shared_area_addr is %08lx\n", sep_dev->message_shared_area_addr);
435 /* get physical address */
436 phys_addr = sep_dev->phys_shared_area_addr;
438 edbg("SEP Driver: phys_addr is %08lx\n", phys_addr);
440 if (remap_pfn_range(vma, vma->vm_start, phys_addr >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
441 edbg("SEP Driver remap_page_range failed\n");
442 printk(KERN_WARNING "SEP Driver remap_page_range failed\n");
446 dbg("SEP Driver:<-------- mmap end\n");
452 /*-----------------------------------------------
454 *----------------------------------------------*/
455 static unsigned int sep_poll(struct file *filp, poll_table * wait)
458 unsigned int mask = 0;
459 unsigned long retVal = 0; /* flow id */
461 dbg("---------->SEP Driver poll: start\n");
464 #if SEP_DRIVER_POLLING_MODE
466 while (sep_dev->host_to_sep_send_counter != (retVal & 0x7FFFFFFF)) {
467 retVal = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
469 for (count = 0; count < 10 * 4; count += 4)
470 edbg("Poll Debug Word %lu of the message is %lu\n", count, *((unsigned long *) (sep_dev->shared_area_addr + SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES + count)));
473 sep_dev->sep_to_host_reply_counter++;
475 /* add the event to the polling wait table */
476 poll_wait(filp, &g_sep_event, wait);
480 edbg("sep_dev->host_to_sep_send_counter is %lu\n", sep_dev->host_to_sep_send_counter);
481 edbg("sep_dev->sep_to_host_reply_counter is %lu\n", sep_dev->sep_to_host_reply_counter);
483 /* check if the data is ready */
484 if (sep_dev->host_to_sep_send_counter == sep_dev->sep_to_host_reply_counter) {
485 for (count = 0; count < 12 * 4; count += 4)
486 edbg("Sep Mesg Word %lu of the message is %lu\n", count, *((unsigned long *) (sep_dev->shared_area_addr + count)));
488 for (count = 0; count < 10 * 4; count += 4)
489 edbg("Debug Data Word %lu of the message is %lu\n", count, *((unsigned long *) (sep_dev->shared_area_addr + 0x1800 + count)));
491 retVal = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
492 edbg("retVal is %lu\n", retVal);
493 /* check if the this is sep reply or request */
495 edbg("SEP Driver: sep request in\n");
497 mask |= POLLOUT | POLLWRNORM;
499 edbg("SEP Driver: sep reply in\n");
500 mask |= POLLIN | POLLRDNORM;
503 dbg("SEP Driver:<-------- poll exit\n");
508 static int sep_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
512 dbg("------------>SEP Driver: ioctl start\n");
514 edbg("SEP Driver: cmd is %x\n", cmd);
516 /* check that the command is for sep device */
517 if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER)
521 case SEP_IOCSENDSEPCOMMAND:
522 /* send command to SEP */
523 sep_send_command_handler();
524 edbg("SEP Driver: after sep_send_command_handler\n");
526 case SEP_IOCSENDSEPRPLYCOMMAND:
527 /* send reply command to SEP */
528 sep_send_reply_command_handler();
530 case SEP_IOCALLOCDATAPOLL:
531 /* allocate data pool */
532 error = sep_allocate_data_pool_memory_handler(arg);
534 case SEP_IOCWRITEDATAPOLL:
535 /* write data into memory pool */
536 error = sep_write_into_data_pool_handler(arg);
538 case SEP_IOCREADDATAPOLL:
539 /* read data from data pool into application memory */
540 error = sep_read_from_data_pool_handler(arg);
542 case SEP_IOCCREATESYMDMATABLE:
543 /* create dma table for synhronic operation */
544 error = sep_create_sync_dma_tables_handler(arg);
546 case SEP_IOCCREATEFLOWDMATABLE:
547 /* create flow dma tables */
548 error = sep_create_flow_dma_tables_handler(arg);
550 case SEP_IOCFREEDMATABLEDATA:
552 error = sep_free_dma_table_data_handler();
554 case SEP_IOCSETFLOWID:
556 error = sep_set_flow_id_handler(arg);
558 case SEP_IOCADDFLOWTABLE:
559 /* add tables to the dynamic flow */
560 error = sep_add_flow_tables_handler(arg);
562 case SEP_IOCADDFLOWMESSAGE:
563 /* add message of add tables to flow */
564 error = sep_add_flow_tables_message_handler(arg);
566 case SEP_IOCSEPSTART:
567 /* start command to sep */
568 error = sep_start_handler();
571 /* init command to sep */
572 error = sep_init_handler(arg);
574 case SEP_IOCSETAPIMODE:
575 /* set non- blocking mode */
576 error = sep_set_api_mode_handler(arg);
578 case SEP_IOCGETSTATICPOOLADDR:
579 /* get the physical and virtual addresses of the static pool */
580 error = sep_get_static_pool_addr_handler(arg);
582 case SEP_IOCENDTRANSACTION:
583 error = sep_end_transaction_handler(arg);
585 case SEP_IOCREALLOCCACHERES:
586 error = sep_realloc_cache_resident_handler(arg);
588 case SEP_IOCGETMAPPEDADDROFFSET:
589 error = sep_get_physical_mapped_offset_handler(arg);
592 error = sep_get_time_handler(arg);
598 dbg("SEP Driver:<-------- ioctl end\n");
605 interrupt handler function
607 irqreturn_t sep_inthandler(int irq, void *dev_id)
609 irqreturn_t int_error;
611 unsigned long reg_val;
612 unsigned long flow_id;
613 struct sep_flow_context_t *flow_context_ptr;
615 int_error = IRQ_HANDLED;
617 /* read the IRR register to check if this is SEP interrupt */
618 reg_val = sep_read_reg(sep_dev, HW_HOST_IRR_REG_ADDR);
619 edbg("SEP Interrupt - reg is %08lx\n", reg_val);
621 /* check if this is the flow interrupt */
622 if (0 /*reg_val & (0x1 << 11) */ ) {
623 /* read GPRO to find out the which flow is done */
624 flow_id = sep_read_reg(sep_dev, HW_HOST_IRR_REG_ADDR);
626 /* find the contex of the flow */
627 error = sep_find_flow_context(flow_id >> 28, &flow_context_ptr);
629 goto end_function_with_error;
631 INIT_WORK(&flow_context_ptr->flow_wq, sep_flow_done_handler);
634 queue_work(sep_dev->flow_wq_ptr, &flow_context_ptr->flow_wq);
637 /* check if this is reply interrupt from SEP */
638 if (reg_val & (0x1 << 13)) {
639 /* update the counter of reply messages */
640 sep_dev->sep_to_host_reply_counter++;
642 /* wake up the waiting process */
643 wake_up(&g_sep_event);
645 int_error = IRQ_NONE;
649 end_function_with_error:
650 /* clear the interrupt */
651 sep_write_reg(sep_dev, HW_HOST_ICR_REG_ADDR, reg_val);
658 This function prepares only input DMA table for synhronic symmetric
661 int sep_prepare_input_dma_table(unsigned long app_virt_addr, unsigned long data_size, unsigned long block_size, unsigned long *lli_table_ptr, unsigned long *num_entries_ptr, unsigned long *table_data_size_ptr, bool isKernelVirtualAddress)
663 /* pointer to the info entry of the table - the last entry */
664 struct sep_lli_entry_t *info_entry_ptr;
665 /* array of pointers ot page */
666 struct sep_lli_entry_t *lli_array_ptr;
667 /* points to the first entry to be processed in the lli_in_array */
668 unsigned long current_entry;
669 /* num entries in the virtual buffer */
670 unsigned long sep_lli_entries;
671 /* lli table pointer */
672 struct sep_lli_entry_t *in_lli_table_ptr;
673 /* the total data in one table */
674 unsigned long table_data_size;
675 /* number of entries in lli table */
676 unsigned long num_entries_in_table;
677 /* next table address */
678 unsigned long lli_table_alloc_addr;
679 unsigned long result;
681 dbg("SEP Driver:--------> sep_prepare_input_dma_table start\n");
683 edbg("SEP Driver:data_size is %lu\n", data_size);
684 edbg("SEP Driver:block_size is %lu\n", block_size);
686 /* initialize the pages pointers */
687 sep_dev->in_page_array = 0;
688 sep_dev->in_num_pages = 0;
690 if (data_size == 0) {
691 /* special case - created 2 entries table with zero data */
692 in_lli_table_ptr = (struct sep_lli_entry_t *) (sep_dev->shared_area_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES);
693 in_lli_table_ptr->physical_address = sep_dev->shared_area_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
694 in_lli_table_ptr->block_size = 0;
697 in_lli_table_ptr->physical_address = 0xFFFFFFFF;
698 in_lli_table_ptr->block_size = 0;
700 *lli_table_ptr = sep_dev->phys_shared_area_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
701 *num_entries_ptr = 2;
702 *table_data_size_ptr = 0;
707 /* check if the pages are in Kernel Virtual Address layout */
708 if (isKernelVirtualAddress == true)
709 /* lock the pages of the kernel buffer and translate them to pages */
710 result = sep_lock_kernel_pages(app_virt_addr, data_size, &sep_dev->in_num_pages, &lli_array_ptr, &sep_dev->in_page_array);
712 /* lock the pages of the user buffer and translate them to pages */
713 result = sep_lock_user_pages(app_virt_addr, data_size, &sep_dev->in_num_pages, &lli_array_ptr, &sep_dev->in_page_array);
718 edbg("SEP Driver:output sep_dev->in_num_pages is %lu\n", sep_dev->in_num_pages);
722 sep_lli_entries = sep_dev->in_num_pages;
724 /* initiate to point after the message area */
725 lli_table_alloc_addr = sep_dev->shared_area_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
727 /* loop till all the entries in in array are not processed */
728 while (current_entry < sep_lli_entries) {
729 /* set the new input and output tables */
730 in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
732 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
734 /* calculate the maximum size of data for input table */
735 table_data_size = sep_calculate_lli_table_max_size(&lli_array_ptr[current_entry], (sep_lli_entries - current_entry));
737 /* now calculate the table size so that it will be module block size */
738 table_data_size = (table_data_size / block_size) * block_size;
740 edbg("SEP Driver:output table_data_size is %lu\n", table_data_size);
742 /* construct input lli table */
743 sep_build_lli_table(&lli_array_ptr[current_entry], in_lli_table_ptr, ¤t_entry, &num_entries_in_table, table_data_size);
745 if (info_entry_ptr == 0) {
746 /* set the output parameters to physical addresses */
747 *lli_table_ptr = sep_shared_area_virt_to_phys((unsigned long) in_lli_table_ptr);
748 *num_entries_ptr = num_entries_in_table;
749 *table_data_size_ptr = table_data_size;
751 edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_ptr);
753 /* update the info entry of the previous in table */
754 info_entry_ptr->physical_address = sep_shared_area_virt_to_phys((unsigned long) in_lli_table_ptr);
755 info_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
758 /* save the pointer to the info entry of the current tables */
759 info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
762 /* print input tables */
763 sep_debug_print_lli_tables((struct sep_lli_entry_t *)
764 sep_shared_area_phys_to_virt(*lli_table_ptr), *num_entries_ptr, *table_data_size_ptr);
766 /* the array of the pages */
767 kfree(lli_array_ptr);
769 dbg("SEP Driver:<-------- sep_prepare_input_dma_table end\n");
775 This function builds input and output DMA tables for synhronic
776 symmetric operations (AES, DES). It also checks that each table
777 is of the modular block size
779 int sep_prepare_input_output_dma_table(unsigned long app_virt_in_addr,
780 unsigned long app_virt_out_addr,
781 unsigned long data_size,
782 unsigned long block_size,
783 unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr, bool isKernelVirtualAddress)
785 /* array of pointers of page */
786 struct sep_lli_entry_t *lli_in_array;
787 /* array of pointers of page */
788 struct sep_lli_entry_t *lli_out_array;
791 dbg("SEP Driver:--------> sep_prepare_input_output_dma_table start\n");
793 /* initialize the pages pointers */
794 sep_dev->in_page_array = 0;
795 sep_dev->out_page_array = 0;
797 /* check if the pages are in Kernel Virtual Address layout */
798 if (isKernelVirtualAddress == true) {
799 /* lock the pages of the kernel buffer and translate them to pages */
800 result = sep_lock_kernel_pages(app_virt_in_addr, data_size, &sep_dev->in_num_pages, &lli_in_array, &sep_dev->in_page_array);
802 edbg("SEP Driver: sep_lock_kernel_pages for input virtual buffer failed\n");
806 /* lock the pages of the user buffer and translate them to pages */
807 result = sep_lock_user_pages(app_virt_in_addr, data_size, &sep_dev->in_num_pages, &lli_in_array, &sep_dev->in_page_array);
809 edbg("SEP Driver: sep_lock_user_pages for input virtual buffer failed\n");
814 if (isKernelVirtualAddress == true) {
815 result = sep_lock_kernel_pages(app_virt_out_addr, data_size, &sep_dev->out_num_pages, &lli_out_array, &sep_dev->out_page_array);
817 edbg("SEP Driver: sep_lock_kernel_pages for output virtual buffer failed\n");
818 goto end_function_with_error1;
821 result = sep_lock_user_pages(app_virt_out_addr, data_size, &sep_dev->out_num_pages, &lli_out_array, &sep_dev->out_page_array);
823 edbg("SEP Driver: sep_lock_user_pages for output virtual buffer failed\n");
824 goto end_function_with_error1;
827 edbg("sep_dev->in_num_pages is %lu\n", sep_dev->in_num_pages);
828 edbg("sep_dev->out_num_pages is %lu\n", sep_dev->out_num_pages);
829 edbg("SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n", SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
832 /* call the fucntion that creates table from the lli arrays */
833 result = sep_construct_dma_tables_from_lli(lli_in_array, sep_dev->in_num_pages, lli_out_array, sep_dev->out_num_pages, block_size, lli_table_in_ptr, lli_table_out_ptr, in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr);
835 edbg("SEP Driver: sep_construct_dma_tables_from_lli failed\n");
836 goto end_function_with_error2;
839 /* fall through - free the lli entry arrays */
840 dbg("in_num_entries_ptr is %08lx\n", *in_num_entries_ptr);
841 dbg("out_num_entries_ptr is %08lx\n", *out_num_entries_ptr);
842 dbg("table_data_size_ptr is %08lx\n", *table_data_size_ptr);
843 end_function_with_error2:
844 kfree(lli_out_array);
845 end_function_with_error1:
848 dbg("SEP Driver:<-------- sep_prepare_input_output_dma_table end result = %d\n", (int) result);
855 This function creates the input and output dma tables for
856 symmetric operations (AES/DES) according to the block size from LLI arays
858 int sep_construct_dma_tables_from_lli(struct sep_lli_entry_t *lli_in_array,
859 unsigned long sep_in_lli_entries,
860 struct sep_lli_entry_t *lli_out_array,
861 unsigned long sep_out_lli_entries,
862 unsigned long block_size, unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr)
864 /* points to the area where next lli table can be allocated */
865 unsigned long lli_table_alloc_addr;
866 /* input lli table */
867 struct sep_lli_entry_t *in_lli_table_ptr;
868 /* output lli table */
869 struct sep_lli_entry_t *out_lli_table_ptr;
870 /* pointer to the info entry of the table - the last entry */
871 struct sep_lli_entry_t *info_in_entry_ptr;
872 /* pointer to the info entry of the table - the last entry */
873 struct sep_lli_entry_t *info_out_entry_ptr;
874 /* points to the first entry to be processed in the lli_in_array */
875 unsigned long current_in_entry;
876 /* points to the first entry to be processed in the lli_out_array */
877 unsigned long current_out_entry;
878 /* max size of the input table */
879 unsigned long in_table_data_size;
880 /* max size of the output table */
881 unsigned long out_table_data_size;
882 /* flag te signifies if this is the first tables build from the arrays */
883 unsigned long first_table_flag;
884 /* the data size that should be in table */
885 unsigned long table_data_size;
886 /* number of etnries in the input table */
887 unsigned long num_entries_in_table;
888 /* number of etnries in the output table */
889 unsigned long num_entries_out_table;
891 dbg("SEP Driver:--------> sep_construct_dma_tables_from_lli start\n");
893 /* initiate to pint after the message area */
894 lli_table_alloc_addr = sep_dev->shared_area_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
896 current_in_entry = 0;
897 current_out_entry = 0;
898 first_table_flag = 1;
899 info_in_entry_ptr = 0;
900 info_out_entry_ptr = 0;
902 /* loop till all the entries in in array are not processed */
903 while (current_in_entry < sep_in_lli_entries) {
904 /* set the new input and output tables */
905 in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
907 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
909 /* set the first output tables */
910 out_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
912 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
914 /* calculate the maximum size of data for input table */
915 in_table_data_size = sep_calculate_lli_table_max_size(&lli_in_array[current_in_entry], (sep_in_lli_entries - current_in_entry));
917 /* calculate the maximum size of data for output table */
918 out_table_data_size = sep_calculate_lli_table_max_size(&lli_out_array[current_out_entry], (sep_out_lli_entries - current_out_entry));
920 edbg("SEP Driver:in_table_data_size is %lu\n", in_table_data_size);
921 edbg("SEP Driver:out_table_data_size is %lu\n", out_table_data_size);
923 /* check where the data is smallest */
924 table_data_size = in_table_data_size;
925 if (table_data_size > out_table_data_size)
926 table_data_size = out_table_data_size;
928 /* now calculate the table size so that it will be module block size */
929 table_data_size = (table_data_size / block_size) * block_size;
931 dbg("SEP Driver:table_data_size is %lu\n", table_data_size);
933 /* construct input lli table */
934 sep_build_lli_table(&lli_in_array[current_in_entry], in_lli_table_ptr, ¤t_in_entry, &num_entries_in_table, table_data_size);
936 /* construct output lli table */
937 sep_build_lli_table(&lli_out_array[current_out_entry], out_lli_table_ptr, ¤t_out_entry, &num_entries_out_table, table_data_size);
939 /* if info entry is null - this is the first table built */
940 if (info_in_entry_ptr == 0) {
941 /* set the output parameters to physical addresses */
942 *lli_table_in_ptr = sep_shared_area_virt_to_phys((unsigned long) in_lli_table_ptr);
943 *in_num_entries_ptr = num_entries_in_table;
944 *lli_table_out_ptr = sep_shared_area_virt_to_phys((unsigned long) out_lli_table_ptr);
945 *out_num_entries_ptr = num_entries_out_table;
946 *table_data_size_ptr = table_data_size;
948 edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_in_ptr);
949 edbg("SEP Driver:output lli_table_out_ptr is %08lx\n", *lli_table_out_ptr);
951 /* update the info entry of the previous in table */
952 info_in_entry_ptr->physical_address = sep_shared_area_virt_to_phys((unsigned long) in_lli_table_ptr);
953 info_in_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
955 /* update the info entry of the previous in table */
956 info_out_entry_ptr->physical_address = sep_shared_area_virt_to_phys((unsigned long) out_lli_table_ptr);
957 info_out_entry_ptr->block_size = ((num_entries_out_table) << 24) | (table_data_size);
960 /* save the pointer to the info entry of the current tables */
961 info_in_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
962 info_out_entry_ptr = out_lli_table_ptr + num_entries_out_table - 1;
964 edbg("SEP Driver:output num_entries_out_table is %lu\n", (unsigned long) num_entries_out_table);
965 edbg("SEP Driver:output info_in_entry_ptr is %lu\n", (unsigned long) info_in_entry_ptr);
966 edbg("SEP Driver:output info_out_entry_ptr is %lu\n", (unsigned long) info_out_entry_ptr);
969 /* print input tables */
970 sep_debug_print_lli_tables((struct sep_lli_entry_t *)
971 sep_shared_area_phys_to_virt(*lli_table_in_ptr), *in_num_entries_ptr, *table_data_size_ptr);
972 /* print output tables */
973 sep_debug_print_lli_tables((struct sep_lli_entry_t *)
974 sep_shared_area_phys_to_virt(*lli_table_out_ptr), *out_num_entries_ptr, *table_data_size_ptr);
975 dbg("SEP Driver:<-------- sep_construct_dma_tables_from_lli end\n");
980 this function calculates the size of data that can be inserted into the lli
981 table from this array the condition is that either the table is full
982 (all etnries are entered), or there are no more entries in the lli array
984 unsigned long sep_calculate_lli_table_max_size(struct sep_lli_entry_t *lli_in_array_ptr, unsigned long num_array_entries)
986 unsigned long table_data_size = 0;
987 unsigned long counter;
989 /* calculate the data in the out lli table if till we fill the whole
990 table or till the data has ended */
991 for (counter = 0; (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) && (counter < num_array_entries); counter++)
992 table_data_size += lli_in_array_ptr[counter].block_size;
993 return table_data_size;
997 this functions builds ont lli table from the lli_array according to
998 the given size of data
1000 static void sep_build_lli_table(struct sep_lli_entry_t *lli_array_ptr, struct sep_lli_entry_t *lli_table_ptr, unsigned long *num_processed_entries_ptr, unsigned long *num_table_entries_ptr, unsigned long table_data_size)
1002 unsigned long curr_table_data_size;
1003 /* counter of lli array entry */
1004 unsigned long array_counter;
1006 dbg("SEP Driver:--------> sep_build_lli_table start\n");
1008 /* init currrent table data size and lli array entry counter */
1009 curr_table_data_size = 0;
1011 *num_table_entries_ptr = 1;
1013 edbg("SEP Driver:table_data_size is %lu\n", table_data_size);
1015 /* fill the table till table size reaches the needed amount */
1016 while (curr_table_data_size < table_data_size) {
1017 /* update the number of entries in table */
1018 (*num_table_entries_ptr)++;
1020 lli_table_ptr->physical_address = lli_array_ptr[array_counter].physical_address;
1021 lli_table_ptr->block_size = lli_array_ptr[array_counter].block_size;
1022 curr_table_data_size += lli_table_ptr->block_size;
1024 edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
1025 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1026 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1028 /* check for overflow of the table data */
1029 if (curr_table_data_size > table_data_size) {
1030 edbg("SEP Driver:curr_table_data_size > table_data_size\n");
1032 /* update the size of block in the table */
1033 lli_table_ptr->block_size -= (curr_table_data_size - table_data_size);
1035 /* update the physical address in the lli array */
1036 lli_array_ptr[array_counter].physical_address += lli_table_ptr->block_size;
1038 /* update the block size left in the lli array */
1039 lli_array_ptr[array_counter].block_size = (curr_table_data_size - table_data_size);
1041 /* advance to the next entry in the lli_array */
1044 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1045 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1047 /* move to the next entry in table */
1051 /* set the info entry to default */
1052 lli_table_ptr->physical_address = 0xffffffff;
1053 lli_table_ptr->block_size = 0;
1055 edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
1056 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1057 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1059 /* set the output parameter */
1060 *num_processed_entries_ptr += array_counter;
1062 edbg("SEP Driver:*num_processed_entries_ptr is %lu\n", *num_processed_entries_ptr);
1063 dbg("SEP Driver:<-------- sep_build_lli_table end\n");
1068 this function goes over the list of the print created tables and
1071 static void sep_debug_print_lli_tables(struct sep_lli_entry_t *lli_table_ptr, unsigned long num_table_entries, unsigned long table_data_size)
1073 unsigned long table_count;
1074 unsigned long entries_count;
1076 dbg("SEP Driver:--------> sep_debug_print_lli_tables start\n");
1079 while ((unsigned long) lli_table_ptr != 0xffffffff) {
1080 edbg("SEP Driver: lli table %08lx, table_data_size is %lu\n", table_count, table_data_size);
1081 edbg("SEP Driver: num_table_entries is %lu\n", num_table_entries);
1083 /* print entries of the table (without info entry) */
1084 for (entries_count = 0; entries_count < num_table_entries; entries_count++, lli_table_ptr++) {
1085 edbg("SEP Driver:lli_table_ptr address is %08lx\n", (unsigned long) lli_table_ptr);
1086 edbg("SEP Driver:phys address is %08lx block size is %lu\n", lli_table_ptr->physical_address, lli_table_ptr->block_size);
1089 /* point to the info entry */
1092 edbg("SEP Driver:phys lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1093 edbg("SEP Driver:phys lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1096 table_data_size = lli_table_ptr->block_size & 0xffffff;
1097 num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
1098 lli_table_ptr = (struct sep_lli_entry_t *)
1099 (lli_table_ptr->physical_address);
1101 edbg("SEP Driver:phys table_data_size is %lu num_table_entries is %lu lli_table_ptr is%lu\n", table_data_size, num_table_entries, (unsigned long) lli_table_ptr);
1103 if ((unsigned long) lli_table_ptr != 0xffffffff)
1104 lli_table_ptr = (struct sep_lli_entry_t *) sep_shared_area_phys_to_virt((unsigned long) lli_table_ptr);
1108 dbg("SEP Driver:<-------- sep_debug_print_lli_tables end\n");
1113 This function locks all the physical pages of the application virtual buffer
1114 and construct a basic lli array, where each entry holds the physical page
1115 address and the size that application data holds in this physical pages
1117 int sep_lock_user_pages(unsigned long app_virt_addr, unsigned long data_size, unsigned long *num_pages_ptr, struct sep_lli_entry_t **lli_array_ptr, struct page ***page_array_ptr)
1120 /* the the page of the end address of the user space buffer */
1121 unsigned long end_page;
1122 /* the page of the start address of the user space buffer */
1123 unsigned long start_page;
1124 /* the range in pages */
1125 unsigned long num_pages;
1126 struct page **page_array;
1127 struct sep_lli_entry_t *lli_array;
1128 unsigned long count;
1131 dbg("SEP Driver:--------> sep_lock_user_pages start\n");
1133 /* set start and end pages and num pages */
1134 end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
1135 start_page = app_virt_addr >> PAGE_SHIFT;
1136 num_pages = end_page - start_page + 1;
1138 edbg("SEP Driver: app_virt_addr is %08lx\n", app_virt_addr);
1139 edbg("SEP Driver: data_size is %lu\n", data_size);
1140 edbg("SEP Driver: start_page is %lu\n", start_page);
1141 edbg("SEP Driver: end_page is %lu\n", end_page);
1142 edbg("SEP Driver: num_pages is %lu\n", num_pages);
1144 /* allocate array of pages structure pointers */
1145 page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
1147 edbg("SEP Driver: kmalloc for page_array failed\n");
1153 lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
1155 edbg("SEP Driver: kmalloc for lli_array failed\n");
1158 goto end_function_with_error1;
1161 /* convert the application virtual address into a set of physical */
1162 down_read(¤t->mm->mmap_sem);
1163 result = get_user_pages(current, current->mm, app_virt_addr, num_pages, 1, 0, page_array, 0);
1164 up_read(¤t->mm->mmap_sem);
1166 /* check the number of pages locked - if not all then exit with error */
1167 if (result != num_pages) {
1168 dbg("SEP Driver: not all pages locked by get_user_pages\n");
1171 goto end_function_with_error2;
1174 /* flush the cache */
1175 for (count = 0; count < num_pages; count++)
1176 flush_dcache_page(page_array[count]);
1178 /* set the start address of the first page - app data may start not at
1179 the beginning of the page */
1180 lli_array[0].physical_address = ((unsigned long) page_to_phys(page_array[0])) + (app_virt_addr & (~PAGE_MASK));
1182 /* check that not all the data is in the first page only */
1183 if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
1184 lli_array[0].block_size = data_size;
1186 lli_array[0].block_size = PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
1189 dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
1191 /* go from the second page to the prev before last */
1192 for (count = 1; count < (num_pages - 1); count++) {
1193 lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
1194 lli_array[count].block_size = PAGE_SIZE;
1196 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
1199 /* if more then 1 pages locked - then update for the last page size needed */
1200 if (num_pages > 1) {
1201 /* update the address of the last page */
1202 lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
1204 /* set the size of the last page */
1205 lli_array[count].block_size = (app_virt_addr + data_size) & (~PAGE_MASK);
1207 if (lli_array[count].block_size == 0) {
1208 dbg("app_virt_addr is %08lx\n", app_virt_addr);
1209 dbg("data_size is %lu\n", data_size);
1212 edbg("lli_array[%lu].physical_address is %08lx, \
1213 lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
1216 /* set output params */
1217 *lli_array_ptr = lli_array;
1218 *num_pages_ptr = num_pages;
1219 *page_array_ptr = page_array;
1222 end_function_with_error2:
1223 /* release the cache */
1224 for (count = 0; count < num_pages; count++)
1225 page_cache_release(page_array[count]);
1227 end_function_with_error1:
1230 dbg("SEP Driver:<-------- sep_lock_user_pages end\n");
1235 This function locks all the physical pages of the kernel virtual buffer
1236 and construct a basic lli array, where each entry holds the physical
1237 page address and the size that application data holds in this physical pages
1239 int sep_lock_kernel_pages(unsigned long kernel_virt_addr, unsigned long data_size, unsigned long *num_pages_ptr, struct sep_lli_entry_t **lli_array_ptr, struct page ***page_array_ptr)
1242 /* the the page of the end address of the user space buffer */
1243 unsigned long end_page;
1244 /* the page of the start address of the user space buffer */
1245 unsigned long start_page;
1246 /* the range in pages */
1247 unsigned long num_pages;
1248 struct sep_lli_entry_t *lli_array;
1249 /* next kernel address to map */
1250 unsigned long next_kernel_address;
1251 unsigned long count;
1253 dbg("SEP Driver:--------> sep_lock_kernel_pages start\n");
1255 /* set start and end pages and num pages */
1256 end_page = (kernel_virt_addr + data_size - 1) >> PAGE_SHIFT;
1257 start_page = kernel_virt_addr >> PAGE_SHIFT;
1258 num_pages = end_page - start_page + 1;
1260 edbg("SEP Driver: kernel_virt_addr is %08lx\n", kernel_virt_addr);
1261 edbg("SEP Driver: data_size is %lu\n", data_size);
1262 edbg("SEP Driver: start_page is %lx\n", start_page);
1263 edbg("SEP Driver: end_page is %lx\n", end_page);
1264 edbg("SEP Driver: num_pages is %lu\n", num_pages);
1266 lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
1268 edbg("SEP Driver: kmalloc for lli_array failed\n");
1273 /* set the start address of the first page - app data may start not at
1274 the beginning of the page */
1275 lli_array[0].physical_address = (unsigned long) virt_to_phys((unsigned long *) kernel_virt_addr);
1277 /* check that not all the data is in the first page only */
1278 if ((PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK))) >= data_size)
1279 lli_array[0].block_size = data_size;
1281 lli_array[0].block_size = PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK));
1284 dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
1286 /* advance the address to the start of the next page */
1287 next_kernel_address = (kernel_virt_addr & PAGE_MASK) + PAGE_SIZE;
1289 /* go from the second page to the prev before last */
1290 for (count = 1; count < (num_pages - 1); count++) {
1291 lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
1292 lli_array[count].block_size = PAGE_SIZE;
1294 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
1295 next_kernel_address += PAGE_SIZE;
1298 /* if more then 1 pages locked - then update for the last page size needed */
1299 if (num_pages > 1) {
1300 /* update the address of the last page */
1301 lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
1303 /* set the size of the last page */
1304 lli_array[count].block_size = (kernel_virt_addr + data_size) & (~PAGE_MASK);
1306 if (lli_array[count].block_size == 0) {
1307 dbg("app_virt_addr is %08lx\n", kernel_virt_addr);
1308 dbg("data_size is %lu\n", data_size);
1312 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
1314 /* set output params */
1315 *lli_array_ptr = lli_array;
1316 *num_pages_ptr = num_pages;
1317 *page_array_ptr = 0;
1319 dbg("SEP Driver:<-------- sep_lock_kernel_pages end\n");
1324 This function releases all the application virtual buffer physical pages,
1325 that were previously locked
1327 int sep_free_dma_pages(struct page **page_array_ptr, unsigned long num_pages, unsigned long dirtyFlag)
1329 unsigned long count;
1332 for (count = 0; count < num_pages; count++) {
1333 /* the out array was written, therefore the data was changed */
1334 if (!PageReserved(page_array_ptr[count]))
1335 SetPageDirty(page_array_ptr[count]);
1336 page_cache_release(page_array_ptr[count]);
1339 /* free in pages - the data was only read, therefore no update was done
1341 for (count = 0; count < num_pages; count++)
1342 page_cache_release(page_array_ptr[count]);
1346 /* free the array */
1347 kfree(page_array_ptr);
1353 This function raises interrupt to SEP that signals that is has a new
1356 static void sep_send_command_handler()
1358 unsigned long count;
1360 dbg("SEP Driver:--------> sep_send_command_handler start\n");
1366 for (count = 0; count < 12 * 4; count += 4)
1367 edbg("Word %lu of the message is %lu\n", count, *((unsigned long *) (sep_dev->shared_area_addr + count)));
1369 /* update counter */
1370 sep_dev->host_to_sep_send_counter++;
1371 /* send interrupt to SEP */
1372 sep_write_reg(sep_dev, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
1373 dbg("SEP Driver:<-------- sep_send_command_handler end\n");
1378 This function raises interrupt to SEPm that signals that is has a
1379 new command from HOST
1381 static void sep_send_reply_command_handler()
1383 unsigned long count;
1385 dbg("SEP Driver:--------> sep_send_reply_command_handler start\n");
1389 for (count = 0; count < 12 * 4; count += 4)
1390 edbg("Word %lu of the message is %lu\n", count, *((unsigned long *) (sep_dev->shared_area_addr + count)));
1391 /* update counter */
1392 sep_dev->host_to_sep_send_counter++;
1393 /* send the interrupt to SEP */
1394 sep_write_reg(sep_dev, HW_HOST_HOST_SEP_GPR2_REG_ADDR, sep_dev->host_to_sep_send_counter);
1395 /* update both counters */
1396 sep_dev->host_to_sep_send_counter++;
1397 sep_dev->sep_to_host_reply_counter++;
1398 dbg("SEP Driver:<-------- sep_send_reply_command_handler end\n");
1404 This function handles the allocate data pool memory request
1405 This function returns calculates the physical address of the
1406 allocated memory, and the offset of this area from the mapped address.
1407 Therefore, the FVOs in user space can calculate the exact virtual
1408 address of this allocated memory
1410 static int sep_allocate_data_pool_memory_handler(unsigned long arg)
1413 struct sep_driver_alloc_t command_args;
1415 dbg("SEP Driver:--------> sep_allocate_data_pool_memory_handler start\n");
1417 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_alloc_t));
1421 /* allocate memory */
1422 if ((sep_dev->data_pool_bytes_allocated + command_args.num_bytes) > SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
1427 /* set the virtual and physical address */
1428 command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep_dev->data_pool_bytes_allocated;
1429 command_args.phys_address = sep_dev->phys_shared_area_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep_dev->data_pool_bytes_allocated;
1431 /* write the memory back to the user space */
1432 error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_alloc_t));
1436 /* set the allocation */
1437 sep_dev->data_pool_bytes_allocated += command_args.num_bytes;
1440 dbg("SEP Driver:<-------- sep_allocate_data_pool_memory_handler end\n");
1445 This function handles write into allocated data pool command
1447 static int sep_write_into_data_pool_handler(unsigned long arg)
1450 unsigned long virt_address;
1451 unsigned long app_in_address;
1452 unsigned long num_bytes;
1453 unsigned long data_pool_area_addr;
1455 dbg("SEP Driver:--------> sep_write_into_data_pool_handler start\n");
1457 /* get the application address */
1458 error = get_user(app_in_address, &(((struct sep_driver_write_t *) arg)->app_address));
1462 /* get the virtual kernel address address */
1463 error = get_user(virt_address, &(((struct sep_driver_write_t *) arg)->datapool_address));
1467 /* get the number of bytes */
1468 error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
1472 /* calculate the start of the data pool */
1473 data_pool_area_addr = sep_dev->shared_area_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
1476 /* check that the range of the virtual kernel address is correct */
1477 if ((virt_address < data_pool_area_addr) || (virt_address > (data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES))) {
1481 /* copy the application data */
1482 error = copy_from_user((void *) virt_address, (void *) app_in_address, num_bytes);
1484 dbg("SEP Driver:<-------- sep_write_into_data_pool_handler end\n");
1489 this function handles the read from data pool command
1491 static int sep_read_from_data_pool_handler(unsigned long arg)
1494 /* virtual address of dest application buffer */
1495 unsigned long app_out_address;
1496 /* virtual address of the data pool */
1497 unsigned long virt_address;
1498 unsigned long num_bytes;
1499 unsigned long data_pool_area_addr;
1501 dbg("SEP Driver:--------> sep_read_from_data_pool_handler start\n");
1503 /* get the application address */
1504 error = get_user(app_out_address, &(((struct sep_driver_write_t *) arg)->app_address));
1508 /* get the virtual kernel address address */
1509 error = get_user(virt_address, &(((struct sep_driver_write_t *) arg)->datapool_address));
1513 /* get the number of bytes */
1514 error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
1518 /* calculate the start of the data pool */
1519 data_pool_area_addr = sep_dev->shared_area_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
1521 /* check that the range of the virtual kernel address is correct */
1522 if ((virt_address < data_pool_area_addr) || (virt_address > (data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES))) {
1527 /* copy the application data */
1528 error = copy_to_user((void *) app_out_address, (void *) virt_address, num_bytes);
1530 dbg("SEP Driver:<-------- sep_read_from_data_pool_handler end\n");
1536 this function handles tha request for creation of the DMA table
1537 for the synchronic symmetric operations (AES,DES)
1539 static int sep_create_sync_dma_tables_handler(unsigned long arg)
1542 /* command arguments */
1543 struct sep_driver_build_sync_table_t command_args;
1545 dbg("SEP Driver:--------> sep_create_sync_dma_tables_handler start\n");
1547 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_sync_table_t));
1551 edbg("app_in_address is %08lx\n", command_args.app_in_address);
1552 edbg("app_out_address is %08lx\n", command_args.app_out_address);
1553 edbg("data_size is %lu\n", command_args.data_in_size);
1554 edbg("block_size is %lu\n", command_args.block_size);
1556 /* check if we need to build only input table or input/output */
1557 if (command_args.app_out_address)
1558 /* prepare input and output tables */
1559 error = sep_prepare_input_output_dma_table(command_args.app_in_address,
1560 command_args.app_out_address,
1561 command_args.data_in_size,
1562 command_args.block_size,
1563 &command_args.in_table_address,
1564 &command_args.out_table_address, &command_args.in_table_num_entries, &command_args.out_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
1566 /* prepare input tables */
1567 error = sep_prepare_input_dma_table(command_args.app_in_address,
1568 command_args.data_in_size, command_args.block_size, &command_args.in_table_address, &command_args.in_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
1573 error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_build_sync_table_t));
1575 dbg("SEP Driver:<-------- sep_create_sync_dma_tables_handler end\n");
1580 this function handles the request for freeing dma table for synhronic actions
1582 int sep_free_dma_table_data_handler()
1584 dbg("SEP Driver:--------> sep_free_dma_table_data_handler start\n");
1586 /* free input pages array */
1587 sep_free_dma_pages(sep_dev->in_page_array, sep_dev->in_num_pages, 0);
1589 /* free output pages array if needed */
1590 if (sep_dev->out_page_array)
1591 sep_free_dma_pages(sep_dev->out_page_array, sep_dev->out_num_pages, 1);
1593 /* reset all the values */
1594 sep_dev->in_page_array = 0;
1595 sep_dev->out_page_array = 0;
1596 sep_dev->in_num_pages = 0;
1597 sep_dev->out_num_pages = 0;
1598 dbg("SEP Driver:<-------- sep_free_dma_table_data_handler end\n");
1603 this function handles the request to create the DMA tables for flow
1605 static int sep_create_flow_dma_tables_handler(unsigned long arg)
1608 struct sep_driver_build_flow_table_t command_args;
1609 /* first table - output */
1610 struct sep_lli_entry_t first_table_data;
1611 /* dma table data */
1612 struct sep_lli_entry_t last_table_data;
1613 /* pointer to the info entry of the previuos DMA table */
1614 struct sep_lli_entry_t *prev_info_entry_ptr;
1615 /* pointer to the flow data strucutre */
1616 struct sep_flow_context_t *flow_context_ptr;
1618 dbg("SEP Driver:--------> sep_create_flow_dma_tables_handler start\n");
1620 /* init variables */
1621 prev_info_entry_ptr = 0;
1622 first_table_data.physical_address = 0xffffffff;
1624 /* find the free structure for flow data */
1625 error = sep_find_flow_context(SEP_FREE_FLOW_ID, &flow_context_ptr);
1629 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_flow_table_t));
1633 /* create flow tables */
1634 error = sep_prepare_flow_dma_tables(command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
1636 goto end_function_with_error;
1638 /* check if flow is static */
1639 if (!command_args.flow_type)
1640 /* point the info entry of the last to the info entry of the first */
1641 last_table_data = first_table_data;
1643 /* set output params */
1644 command_args.first_table_addr = first_table_data.physical_address;
1645 command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
1646 command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
1648 /* send the parameters to user application */
1649 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_build_flow_table_t));
1651 goto end_function_with_error;
1653 /* all the flow created - update the flow entry with temp id */
1654 flow_context_ptr->flow_id = SEP_TEMP_FLOW_ID;
1656 /* set the processing tables data in the context */
1657 if (command_args.input_output_flag == SEP_DRIVER_IN_FLAG)
1658 flow_context_ptr->input_tables_in_process = first_table_data;
1660 flow_context_ptr->output_tables_in_process = first_table_data;
1664 end_function_with_error:
1665 /* free the allocated tables */
1666 sep_deallocated_flow_tables(&first_table_data);
1668 dbg("SEP Driver:<-------- sep_create_flow_dma_tables_handler end\n");
1673 this functio n handles add tables to flow
1675 static int sep_add_flow_tables_handler(unsigned long arg)
1678 unsigned long num_entries;
1679 struct sep_driver_add_flow_table_t command_args;
1680 struct sep_flow_context_t *flow_context_ptr;
1681 /* first dma table data */
1682 struct sep_lli_entry_t first_table_data;
1683 /* last dma table data */
1684 struct sep_lli_entry_t last_table_data;
1685 /* pointer to the info entry of the current DMA table */
1686 struct sep_lli_entry_t *info_entry_ptr;
1688 dbg("SEP Driver:--------> sep_add_flow_tables_handler start\n");
1690 /* get input parameters */
1691 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_flow_table_t));
1695 /* find the flow structure for the flow id */
1696 error = sep_find_flow_context(command_args.flow_id, &flow_context_ptr);
1700 /* prepare the flow dma tables */
1701 error = sep_prepare_flow_dma_tables(command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
1703 goto end_function_with_error;
1705 /* now check if there is already an existing add table for this flow */
1706 if (command_args.inputOutputFlag == SEP_DRIVER_IN_FLAG) {
1707 /* this buffer was for input buffers */
1708 if (flow_context_ptr->input_tables_flag) {
1709 /* add table already exists - add the new tables to the end
1711 num_entries = (flow_context_ptr->last_input_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1713 info_entry_ptr = (struct sep_lli_entry_t *)
1714 (flow_context_ptr->last_input_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
1716 /* connect to list of tables */
1717 *info_entry_ptr = first_table_data;
1719 /* set the first table data */
1720 first_table_data = flow_context_ptr->first_input_table;
1722 /* set the input flag */
1723 flow_context_ptr->input_tables_flag = 1;
1725 /* set the first table data */
1726 flow_context_ptr->first_input_table = first_table_data;
1728 /* set the last table data */
1729 flow_context_ptr->last_input_table = last_table_data;
1730 } else { /* this is output tables */
1732 /* this buffer was for input buffers */
1733 if (flow_context_ptr->output_tables_flag) {
1734 /* add table already exists - add the new tables to
1735 the end of the previous */
1736 num_entries = (flow_context_ptr->last_output_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1738 info_entry_ptr = (struct sep_lli_entry_t *)
1739 (flow_context_ptr->last_output_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
1741 /* connect to list of tables */
1742 *info_entry_ptr = first_table_data;
1744 /* set the first table data */
1745 first_table_data = flow_context_ptr->first_output_table;
1747 /* set the input flag */
1748 flow_context_ptr->output_tables_flag = 1;
1750 /* set the first table data */
1751 flow_context_ptr->first_output_table = first_table_data;
1753 /* set the last table data */
1754 flow_context_ptr->last_output_table = last_table_data;
1757 /* set output params */
1758 command_args.first_table_addr = first_table_data.physical_address;
1759 command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
1760 command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
1762 /* send the parameters to user application */
1763 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_add_flow_table_t));
1764 end_function_with_error:
1765 /* free the allocated tables */
1766 sep_deallocated_flow_tables(&first_table_data);
1768 dbg("SEP Driver:<-------- sep_add_flow_tables_handler end\n");
1773 this function add the flow add message to the specific flow
1775 static int sep_add_flow_tables_message_handler(unsigned long arg)
1778 struct sep_driver_add_message_t command_args;
1779 struct sep_flow_context_t *flow_context_ptr;
1781 dbg("SEP Driver:--------> sep_add_flow_tables_message_handler start\n");
1783 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_message_t));
1788 if (command_args.message_size_in_bytes > SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES) {
1793 /* find the flow context */
1794 error = sep_find_flow_context(command_args.flow_id, &flow_context_ptr);
1798 /* copy the message into context */
1799 flow_context_ptr->message_size_in_bytes = command_args.message_size_in_bytes;
1800 error = copy_from_user(flow_context_ptr->message, (void *) command_args.message_address, command_args.message_size_in_bytes);
1802 dbg("SEP Driver:<-------- sep_add_flow_tables_message_handler end\n");
1808 this function returns the physical and virtual addresses of the static pool
1810 static int sep_get_static_pool_addr_handler(unsigned long arg)
1813 struct sep_driver_static_pool_addr_t command_args;
1815 dbg("SEP Driver:--------> sep_get_static_pool_addr_handler start\n");
1817 /*prepare the output parameters in the struct */
1818 command_args.physical_static_address = sep_dev->phys_shared_area_addr + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
1819 command_args.virtual_static_address = sep_dev->shared_area_addr + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
1821 edbg("SEP Driver:physical_static_address is %08lx, virtual_static_address %08lx\n", command_args.physical_static_address, command_args.virtual_static_address);
1823 /* send the parameters to user application */
1824 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_static_pool_addr_t));
1825 dbg("SEP Driver:<-------- sep_get_static_pool_addr_handler end\n");
1830 this address gets the offset of the physical address from the start
1833 static int sep_get_physical_mapped_offset_handler(unsigned long arg)
1836 struct sep_driver_get_mapped_offset_t command_args;
1838 dbg("SEP Driver:--------> sep_get_physical_mapped_offset_handler start\n");
1840 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_get_mapped_offset_t));
1844 if (command_args.physical_address < sep_dev->phys_shared_area_addr) {
1849 /*prepare the output parameters in the struct */
1850 command_args.offset = command_args.physical_address - sep_dev->phys_shared_area_addr;
1852 edbg("SEP Driver:physical_address is %08lx, offset is %lu\n", command_args.physical_address, command_args.offset);
1854 /* send the parameters to user application */
1855 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_get_mapped_offset_t));
1857 dbg("SEP Driver:<-------- sep_get_physical_mapped_offset_handler end\n");
1865 static int sep_start_handler(void)
1867 unsigned long reg_val;
1868 unsigned long error = 0;
1870 dbg("SEP Driver:--------> sep_start_handler start\n");
1872 /* wait in polling for message from SEP */
1874 reg_val = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
1877 /* check the value */
1879 /* fatal error - read erro status from GPRO */
1880 error = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
1882 dbg("SEP Driver:<-------- sep_start_handler end\n");
1887 this function handles the request for SEP initialization
1889 static int sep_init_handler(unsigned long arg)
1891 unsigned long message_word;
1892 unsigned long *message_ptr;
1893 struct sep_driver_init_t command_args;
1894 unsigned long counter;
1895 unsigned long error;
1896 unsigned long reg_val;
1898 dbg("SEP Driver:--------> sep_init_handler start\n");
1901 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_init_t));
1903 dbg("SEP Driver:--------> sep_init_handler - finished copy_from_user \n");
1908 /* PATCH - configure the DMA to single -burst instead of multi-burst */
1909 /*sep_configure_dma_burst(); */
1911 dbg("SEP Driver:--------> sep_init_handler - finished sep_configure_dma_burst \n");
1913 message_ptr = (unsigned long *) command_args.message_addr;
1915 /* set the base address of the SRAM */
1916 sep_write_reg(sep_dev, HW_SRAM_ADDR_REG_ADDR, HW_CC_SRAM_BASE_ADDRESS);
1918 for (counter = 0; counter < command_args.message_size_in_words; counter++, message_ptr++) {
1919 get_user(message_word, message_ptr);
1920 /* write data to SRAM */
1921 sep_write_reg(sep_dev, HW_SRAM_DATA_REG_ADDR, message_word);
1922 edbg("SEP Driver:message_word is %lu\n", message_word);
1923 /* wait for write complete */
1924 sep_wait_sram_write(sep_dev);
1926 dbg("SEP Driver:--------> sep_init_handler - finished getting messages from user space\n");
1928 sep_write_reg(sep_dev, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x1);
1931 reg_val = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
1932 while (!(reg_val & 0xFFFFFFFD));
1934 dbg("SEP Driver:--------> sep_init_handler - finished waiting for reg_val & 0xFFFFFFFD \n");
1936 /* check the value */
1937 if (reg_val == 0x1) {
1938 edbg("SEP Driver:init failed\n");
1940 error = sep_read_reg(sep_dev, 0x8060);
1941 edbg("SEP Driver:sw monitor is %lu\n", error);
1943 /* fatal error - read erro status from GPRO */
1944 error = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
1945 edbg("SEP Driver:error is %lu\n", error);
1948 dbg("SEP Driver:<-------- sep_init_handler end\n");
1954 this function handles the request cache and resident reallocation
1956 static int sep_realloc_cache_resident_handler(unsigned long arg)
1959 unsigned long phys_cache_address;
1960 unsigned long phys_resident_address;
1961 struct sep_driver_realloc_cache_resident_t command_args;
1964 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_realloc_cache_resident_t));
1968 /* copy cache and resident to the their intended locations */
1969 error = sep_copy_cache_resident_to_area(command_args.cache_addr, command_args.cache_size_in_bytes, command_args.resident_addr, command_args.resident_size_in_bytes, &phys_cache_address, &phys_resident_address);
1973 /* lock the area (if needed) */
1974 sep_lock_cache_resident_area();
1976 command_args.new_base_addr = sep_dev->phys_shared_area_addr;
1978 /* find the new base address according to the lowest address between
1979 cache, resident and shared area */
1980 if (phys_resident_address < command_args.new_base_addr)
1981 command_args.new_base_addr = phys_resident_address;
1982 if (phys_cache_address < command_args.new_base_addr)
1983 command_args.new_base_addr = phys_cache_address;
1985 /* set the return parameters */
1986 command_args.new_cache_addr = phys_cache_address;
1987 command_args.new_resident_addr = phys_resident_address;
1989 /* set the new shared area */
1990 command_args.new_shared_area_addr = sep_dev->phys_shared_area_addr;
1992 edbg("SEP Driver:command_args.new_shared_area_addr is %08lx\n", command_args.new_shared_area_addr);
1993 edbg("SEP Driver:command_args.new_base_addr is %08lx\n", command_args.new_base_addr);
1994 edbg("SEP Driver:command_args.new_resident_addr is %08lx\n", command_args.new_resident_addr);
1995 edbg("SEP Driver:command_args.new_cache_addr is %08lx\n", command_args.new_cache_addr);
1997 /* return to user */
1998 error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_realloc_cache_resident_t));
2004 this function handles the request for get time
2006 static int sep_get_time_handler(unsigned long arg)
2009 struct sep_driver_get_time_t command_args;
2011 error = sep_set_time(&command_args.time_physical_address, &command_args.time_value);
2012 error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_get_time_t));
2018 This api handles the setting of API mode to blocking or non-blocking
2020 static int sep_set_api_mode_handler(unsigned long arg)
2023 unsigned long mode_flag;
2025 dbg("SEP Driver:--------> sep_set_api_mode_handler start\n");
2027 error = get_user(mode_flag, &(((struct sep_driver_set_api_mode_t *) arg)->mode));
2031 /* set the global flag */
2032 sep_dev->block_mode_flag = mode_flag;
2034 dbg("SEP Driver:<-------- sep_set_api_mode_handler end\n");
2039 This API handles the end transaction request
2041 static int sep_end_transaction_handler(unsigned long arg)
2043 dbg("SEP Driver:--------> sep_end_transaction_handler start\n");
2045 #if 0 /*!SEP_DRIVER_POLLING_MODE */
2047 sep_write_reg(sep_dev, HW_HOST_IMR_REG_ADDR, 0x7FFF);
2049 /* release IRQ line */
2050 free_irq(SEP_DIRVER_IRQ_NUM, &sep_dev->reg_base_address);
2052 /* lock the sep mutex */
2053 mutex_unlock(&sep_mutex);
2056 dbg("SEP Driver:<-------- sep_end_transaction_handler end\n");
2061 /* handler for flow done interrupt */
2062 static void sep_flow_done_handler(struct work_struct *work)
2064 struct sep_flow_context_t *flow_data_ptr;
2066 /* obtain the mutex */
2067 mutex_lock(&sep_mutex);
2069 /* get the pointer to context */
2070 flow_data_ptr = (struct sep_flow_context_t *) work;
2072 /* free all the current input tables in sep */
2073 sep_deallocated_flow_tables(&flow_data_ptr->input_tables_in_process);
2075 /* free all the current tables output tables in SEP (if needed) */
2076 if (flow_data_ptr->output_tables_in_process.physical_address != 0xffffffff)
2077 sep_deallocated_flow_tables(&flow_data_ptr->output_tables_in_process);
2079 /* check if we have additional tables to be sent to SEP only input
2080 flag may be checked */
2081 if (flow_data_ptr->input_tables_flag) {
2082 /* copy the message to the shared RAM and signal SEP */
2083 memcpy((void *) flow_data_ptr->message, (void *) sep_dev->shared_area_addr, flow_data_ptr->message_size_in_bytes);
2085 sep_write_reg(sep_dev, HW_HOST_HOST_SEP_GPR2_REG_ADDR, 0x2);
2087 mutex_unlock(&sep_mutex);
2092 This function creates a list of tables for flow and returns the data for
2093 the first and last tables of the list
2095 static int sep_prepare_flow_dma_tables(unsigned long num_virtual_buffers,
2096 unsigned long first_buff_addr, struct sep_flow_context_t *flow_data_ptr, struct sep_lli_entry_t *first_table_data_ptr, struct sep_lli_entry_t *last_table_data_ptr, bool isKernelVirtualAddress)
2099 unsigned long virt_buff_addr;
2100 unsigned long virt_buff_size;
2101 struct sep_lli_entry_t table_data;
2102 struct sep_lli_entry_t *info_entry_ptr;
2103 struct sep_lli_entry_t *prev_info_entry_ptr;
2108 prev_info_entry_ptr = 0;
2110 /* init the first table to default */
2111 table_data.physical_address = 0xffffffff;
2112 first_table_data_ptr->physical_address = 0xffffffff;
2113 table_data.block_size = 0;
2115 for (i = 0; i < num_virtual_buffers; i++) {
2116 /* get the virtual buffer address */
2117 error = get_user(virt_buff_addr, &first_buff_addr);
2121 /* get the virtual buffer size */
2123 error = get_user(virt_buff_size, &first_buff_addr);
2127 /* advance the address to point to the next pair of address|size */
2130 /* now prepare the one flow LLI table from the data */
2131 error = sep_prepare_one_flow_dma_table(virt_buff_addr, virt_buff_size, &table_data, &info_entry_ptr, flow_data_ptr, isKernelVirtualAddress);
2136 /* if this is the first table - save it to return to the user
2138 *first_table_data_ptr = table_data;
2140 /* set the pointer to info entry */
2141 prev_info_entry_ptr = info_entry_ptr;
2143 /* not first table - the previous table info entry should
2145 prev_info_entry_ptr->block_size = (0x1 << SEP_INT_FLAG_OFFSET_IN_BITS) | (table_data.block_size);
2147 /* set the pointer to info entry */
2148 prev_info_entry_ptr = info_entry_ptr;
2152 /* set the last table data */
2153 *last_table_data_ptr = table_data;
2160 This function creates one DMA table for flow and returns its data,
2161 and pointer to its info entry
2163 static int sep_prepare_one_flow_dma_table(unsigned long virt_buff_addr, unsigned long virt_buff_size, struct sep_lli_entry_t *table_data, struct sep_lli_entry_t **info_entry_ptr, struct sep_flow_context_t *flow_data_ptr, bool isKernelVirtualAddress)
2166 /* the range in pages */
2167 unsigned long lli_array_size;
2168 struct sep_lli_entry_t *lli_array;
2169 struct sep_lli_entry_t *flow_dma_table_entry_ptr;
2170 unsigned long *start_dma_table_ptr;
2171 /* total table data counter */
2172 unsigned long dma_table_data_count;
2173 /* pointer that will keep the pointer to the pages of the virtual buffer */
2174 struct page **page_array_ptr;
2175 unsigned long entry_count;
2177 /* find the space for the new table */
2178 error = sep_find_free_flow_dma_table_space(&start_dma_table_ptr);
2182 /* check if the pages are in Kernel Virtual Address layout */
2183 if (isKernelVirtualAddress == true)
2184 /* lock kernel buffer in the memory */
2185 error = sep_lock_kernel_pages(virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
2187 /* lock user buffer in the memory */
2188 error = sep_lock_user_pages(virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
2193 /* set the pointer to page array at the beginning of table - this table is
2194 now considered taken */
2195 *start_dma_table_ptr = lli_array_size;
2197 /* point to the place of the pages pointers of the table */
2198 start_dma_table_ptr++;
2200 /* set the pages pointer */
2201 *start_dma_table_ptr = (unsigned long) page_array_ptr;
2203 /* set the pointer to the first entry */
2204 flow_dma_table_entry_ptr = (struct sep_lli_entry_t *) (++start_dma_table_ptr);
2206 /* now create the entries for table */
2207 for (dma_table_data_count = entry_count = 0; entry_count < lli_array_size; entry_count++) {
2208 flow_dma_table_entry_ptr->physical_address = lli_array[entry_count].physical_address;
2210 flow_dma_table_entry_ptr->block_size = lli_array[entry_count].block_size;
2212 /* set the total data of a table */
2213 dma_table_data_count += lli_array[entry_count].block_size;
2215 flow_dma_table_entry_ptr++;
2218 /* set the physical address */
2219 table_data->physical_address = virt_to_phys(start_dma_table_ptr);
2221 /* set the num_entries and total data size */
2222 table_data->block_size = ((lli_array_size + 1) << SEP_NUM_ENTRIES_OFFSET_IN_BITS) | (dma_table_data_count);
2224 /* set the info entry */
2225 flow_dma_table_entry_ptr->physical_address = 0xffffffff;
2226 flow_dma_table_entry_ptr->block_size = 0;
2228 /* set the pointer to info entry */
2229 *info_entry_ptr = flow_dma_table_entry_ptr;
2231 /* the array of the lli entries */
2239 This function returns pointer to the flow data structure
2240 that contains the given id
2242 static int sep_find_flow_context(unsigned long flow_id, struct sep_flow_context_t **flow_data_ptr)
2244 unsigned long count;
2248 always search for flow with id default first - in case we
2249 already started working on the flow there can be no situation
2250 when 2 flows are with default flag
2252 for (count = 0; count < SEP_DRIVER_NUM_FLOWS; count++) {
2253 if (sep_dev->flows_data_array[count].flow_id == flow_id) {
2254 *flow_data_ptr = &sep_dev->flows_data_array[count];
2259 if (count == SEP_DRIVER_NUM_FLOWS)
2267 this function find a space for the new flow dma table
2269 static int sep_find_free_flow_dma_table_space(unsigned long **table_address_ptr)
2272 /* pointer to the id field of the flow dma table */
2273 unsigned long *start_table_ptr;
2274 unsigned long flow_dma_area_start_addr;
2275 unsigned long flow_dma_area_end_addr;
2276 /* maximum table size in words */
2277 unsigned long table_size_in_words;
2279 /* find the start address of the flow DMA table area */
2280 flow_dma_area_start_addr = sep_dev->shared_area_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES;
2282 /* set end address of the flow table area */
2283 flow_dma_area_end_addr = flow_dma_area_start_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES;
2285 /* set table size in words */
2286 table_size_in_words = SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE * (sizeof(struct sep_lli_entry_t) / sizeof(long)) + 2;
2288 /* set the pointer to the start address of DMA area */
2289 start_table_ptr = (unsigned long *) flow_dma_area_start_addr;
2291 /* find the space for the next table */
2292 while (((*start_table_ptr & 0x7FFFFFFF) != 0) && ((unsigned long) start_table_ptr < flow_dma_area_end_addr))
2293 start_table_ptr += table_size_in_words;
2295 /* check if we reached the end of floa tables area */
2296 if ((unsigned long) start_table_ptr >= flow_dma_area_end_addr)
2299 *table_address_ptr = start_table_ptr;
2305 this function goes over all the flow tables connected to the given
2306 table and deallocate them
2308 static void sep_deallocated_flow_tables(struct sep_lli_entry_t *first_table_ptr)
2311 unsigned long *table_ptr;
2312 /* end address of the flow dma area */
2313 unsigned long num_entries;
2314 unsigned long num_pages;
2315 struct page **pages_ptr;
2316 /* maximum table size in words */
2317 struct sep_lli_entry_t *info_entry_ptr;
2319 /* set the pointer to the first table */
2320 table_ptr = (unsigned long *) first_table_ptr->physical_address;
2322 /* set the num of entries */
2323 num_entries = (first_table_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS)
2324 & SEP_NUM_ENTRIES_MASK;
2326 /* go over all the connected tables */
2327 while (*table_ptr != 0xffffffff) {
2328 /* get number of pages */
2329 num_pages = *(table_ptr - 2);
2331 /* get the pointer to the pages */
2332 pages_ptr = (struct page **) (*(table_ptr - 1));
2334 /* free the pages */
2335 sep_free_dma_pages(pages_ptr, num_pages, 1);
2337 /* goto to the info entry */
2338 info_entry_ptr = ((struct sep_lli_entry_t *) table_ptr) + (num_entries - 1);
2340 table_ptr = (unsigned long *) info_entry_ptr->physical_address;
2341 num_entries = (info_entry_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
2348 This function handler the set flow id command
2350 static int sep_set_flow_id_handler(unsigned long arg)
2353 unsigned long flow_id;
2354 struct sep_flow_context_t *flow_data_ptr;
2356 dbg("------------>SEP Driver: sep_set_flow_id_handler start\n");
2358 error = get_user(flow_id, &(((struct sep_driver_set_flow_id_t *) arg)->flow_id));
2362 /* find the flow data structure that was just used for creating new flow
2363 - its id should be default */
2364 error = sep_find_flow_context(SEP_TEMP_FLOW_ID, &flow_data_ptr);
2369 flow_data_ptr->flow_id = flow_id;
2372 dbg("SEP Driver:<-------- sep_set_flow_id_handler end\n");
2378 calculates time and sets it at the predefined address
2380 static int sep_set_time(unsigned long *address_ptr, unsigned long *time_in_sec_ptr)
2382 struct timeval time;
2383 /* address of time in the kernel */
2384 unsigned long time_addr;
2387 dbg("SEP Driver:--------> sep_set_time start\n");
2389 do_gettimeofday(&time);
2391 /* set value in the SYSTEM MEMORY offset */
2392 time_addr = sep_dev->message_shared_area_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
2394 *(unsigned long *) time_addr = SEP_TIME_VAL_TOKEN;
2395 *(unsigned long *) (time_addr + 4) = time.tv_sec;
2397 edbg("SEP Driver:time.tv_sec is %lu\n", time.tv_sec);
2398 edbg("SEP Driver:time_addr is %lu\n", time_addr);
2399 edbg("SEP Driver:g_message_shared_area_addr is %lu\n", sep_dev->message_shared_area_addr);
2401 /* set the output parameters if needed */
2403 *address_ptr = sep_shared_area_virt_to_phys(time_addr);
2405 if (time_in_sec_ptr)
2406 *time_in_sec_ptr = time.tv_sec;
2408 dbg("SEP Driver:<-------- sep_set_time end\n");
2413 static void sep_wait_busy(struct sep_device *dev)
2418 reg = sep_read_reg(sep_dev, HW_HOST_SEP_BUSY_REG_ADDR);
2423 PATCH for configuring the DMA to single burst instead of multi-burst
2425 static void sep_configure_dma_burst(void)
2427 #define HW_AHB_RD_WR_BURSTS_REG_ADDR 0x0E10UL
2429 dbg("SEP Driver:<-------- sep_configure_dma_burst start \n");
2431 /* request access to registers from SEP */
2432 sep_write_reg(sep_dev, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
2434 dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (write reg) \n");
2436 sep_wait_busy(sep_dev);
2438 dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (while(revVal) wait loop) \n");
2440 /* set the DMA burst register to single burst */
2441 sep_write_reg(sep_dev, HW_AHB_RD_WR_BURSTS_REG_ADDR, 0x0UL);
2443 /* release the sep busy */
2444 sep_write_reg(sep_dev, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x0UL);
2445 sep_wait_busy(sep_dev);
2447 dbg("SEP Driver:<-------- sep_configure_dma_burst done \n");
2451 /* major and minor device numbers */
2452 static dev_t sep_devno;
2454 /* the files operations structure of the driver */
2455 static struct file_operations sep_file_operations = {
2456 .owner = THIS_MODULE,
2460 .release = sep_release,
2465 /* cdev struct of the driver */
2466 static struct cdev sep_cdev;
2469 this function registers the driver to the file system
2471 static int sep_register_driver_to_fs(void)
2473 int ret_val = alloc_chrdev_region(&sep_devno, 0, 1, "sep_sec_driver");
2475 edbg("sep_driver:major number allocation failed, retval is %d\n", ret_val);
2480 cdev_init(&sep_cdev, &sep_file_operations);
2481 sep_cdev.owner = THIS_MODULE;
2483 /* register the driver with the kernel */
2484 ret_val = cdev_add(&sep_cdev, sep_devno, 1);
2487 edbg("sep_driver:cdev_add failed, retval is %d\n", ret_val);
2488 goto end_function_unregister_devnum;
2493 end_function_unregister_devnum:
2495 /* unregister dev numbers */
2496 unregister_chrdev_region(sep_devno, 1);
2503 this function unregisters driver from fs
2505 static void sep_unregister_driver_from_fs(void)
2507 cdev_del(&sep_cdev);
2508 /* unregister dev numbers */
2509 unregister_chrdev_region(sep_devno, 1);
2513 /*--------------------------------------------------------------
2515 ----------------------------------------------------------------*/
2516 static int __init sep_init(void)
2520 int size; /* size of memory for allocation */
2522 dbg("SEP Driver:-------->Init start\n");
2523 edbg("sep->shared_area_addr = %lx\n", (unsigned long) &sep_dev->shared_area_addr);
2525 /* transaction counter that coordinates the transactions between SEP
2527 sep_dev->host_to_sep_send_counter = 0;
2529 /* counter for the messages from sep */
2530 sep_dev->sep_to_host_reply_counter = 0;
2532 /* counter for the number of bytes allocated in the pool
2533 for the current transaction */
2534 sep_dev->data_pool_bytes_allocated = 0;
2536 /* set the starting mode to blocking */
2537 sep_dev->block_mode_flag = 1;
2539 ret_val = sep_register_driver_to_device();
2541 edbg("sep_driver:sep_driver_to_device failed, ret_val is %d\n", ret_val);
2542 goto end_function_unregister_from_fs;
2544 /* calculate the total size for allocation */
2545 size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
2546 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
2548 /* allocate the shared area */
2549 if (sep_map_and_alloc_shared_area(size, &sep_dev->shared_area_addr, &sep_dev->phys_shared_area_addr)) {
2551 /* allocation failed */
2552 goto end_function_unmap_io_memory;
2554 /* now set the memory regions */
2555 sep_dev->message_shared_area_addr = sep_dev->shared_area_addr;
2557 edbg("SEP Driver: g_message_shared_area_addr is %08lx\n", sep_dev->message_shared_area_addr);
2559 #if (SEP_DRIVER_RECONFIG_MESSAGE_AREA == 1)
2560 /* send the new SHARED MESSAGE AREA to the SEP */
2561 sep_write_reg(sep_dev, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep_dev->phys_shared_area_addr);
2563 /* poll for SEP response */
2564 retVal = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2565 while (retVal != 0xffffffff && retVal != sep_dev->phys_shared_area_addr)
2566 retVal = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2568 /* check the return value (register) */
2569 if (retVal != sep_dev->phys_shared_area_addr) {
2571 goto end_function_deallocate_message_area;
2574 /* init the flow contextes */
2575 for (counter = 0; counter < SEP_DRIVER_NUM_FLOWS; counter++)
2576 sep_dev->flows_data_array[counter].flow_id = SEP_FREE_FLOW_ID;
2578 sep_dev->flow_wq_ptr = create_singlethread_workqueue("sepflowwq");
2579 if (sep_dev->flow_wq_ptr == 0) {
2581 edbg("sep_driver:flow queue creation failed\n");
2582 goto end_function_deallocate_sep_shared_area;
2584 edbg("SEP Driver: create flow workqueue \n");
2586 /* register driver to fs */
2587 ret_val = sep_register_driver_to_fs();
2589 goto end_function_deallocate_sep_shared_area;
2590 /* load the rom code */
2591 sep_load_rom_code();
2593 end_function_unregister_from_fs:
2594 /* unregister from fs */
2595 sep_unregister_driver_from_fs();
2596 end_function_deallocate_sep_shared_area:
2597 /* de-allocate shared area */
2598 sep_unmap_and_free_shared_area(size, sep_dev->shared_area_addr, sep_dev->phys_shared_area_addr);
2599 end_function_unmap_io_memory:
2600 iounmap((void *) sep_dev->reg_base_address);
2601 /* release io memory region */
2602 release_mem_region(SEP_IO_MEM_REGION_START_ADDRESS, SEP_IO_MEM_REGION_SIZE);
2604 dbg("SEP Driver:<-------- Init end\n");
2609 /*-------------------------------------------------------------
2611 --------------------------------------------------------------*/
2612 static void __exit sep_exit(void)
2616 dbg("SEP Driver:--------> Exit start\n");
2618 /* unregister from fs */
2619 sep_unregister_driver_from_fs();
2620 /* calculate the total size for de-allocation */
2621 size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
2622 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
2623 /* free shared area */
2624 sep_unmap_and_free_shared_area(size, sep_dev->shared_area_addr, sep_dev->phys_shared_area_addr);
2625 edbg("SEP Driver: free pages SEP SHARED AREA \n");
2626 iounmap((void *) sep_dev->reg_base_address);
2627 edbg("SEP Driver: iounmap \n");
2628 /* release io memory region */
2629 release_mem_region(SEP_IO_MEM_REGION_START_ADDRESS, SEP_IO_MEM_REGION_SIZE);
2630 edbg("SEP Driver: release_mem_region \n");
2631 dbg("SEP Driver:<-------- Exit end\n");
2635 module_init(sep_init);
2636 module_exit(sep_exit);
2638 MODULE_LICENSE("GPL");