atomic_set(&kctx->setup_in_progress, 0);
kctx->keep_gpu_powered = MALI_FALSE;
- /* keep a reference to the process' user side memory manager */
- kbase_os_store_process_mm(kctx);
-
if (kbase_mem_usage_init(&kctx->usage, kctx->kbdev->memdev.per_process_memory_limit >> PAGE_SHIFT))
{
goto free_kctx;
* Mutable flags *must* be accessed under jctx.sched_info.ctx.jsctx_mutex
*
* All other flags must be added there */
-
- /* Pointer to hold a reference to process memory manager of the context owner,
- * must be set using kbase_os_store_process_mm().
- */
- void * process_mm;
};
typedef enum kbase_reg_access_type
goto out;
}
- /* As the tmem is being unmapped we need to update the pages used by the process */
- if ( (reg->flags & KBASE_REG_ZONE_MASK) == KBASE_REG_ZONE_TMEM )
- {
- kbase_process_page_usage_inc(reg->kctx, map->nr_pages);
- }
-
OSK_DLIST_REMOVE(®->map_list, map, link);
kfree(map);
nr_pages -= commit->nr_pages;
reg->nr_alloc_pages -= commit->nr_pages;
- if ( (reg->flags & KBASE_REG_ZONE_MASK) == KBASE_REG_ZONE_TMEM )
- {
- kbase_process_page_usage_dec(reg->kctx, commit->nr_pages);
- }
-
/* free the node (unless it's the root node) */
if (commit != ®->root_commit)
{
page_array + reg->nr_alloc_pages - nr_pages);
commit->nr_pages -= nr_pages;
reg->nr_alloc_pages -= nr_pages;
- if ( (reg->flags & KBASE_REG_ZONE_MASK) == KBASE_REG_ZONE_TMEM )
- {
- kbase_process_page_usage_dec(reg->kctx, nr_pages);
- }
break; /* end the loop */
}
}
if (!nr_pages_left)
{
- if ( (reg->flags & KBASE_REG_ZONE_MASK) == KBASE_REG_ZONE_TMEM)
- {
- kbase_process_page_usage_inc(reg->kctx, nr_pages_requested);
- }
return MALI_ERROR_NONE;
}
}
if (nr_pages_left == 0)
{
- if ( (reg->flags & KBASE_REG_ZONE_MASK) == KBASE_REG_ZONE_TMEM)
- {
- kbase_process_page_usage_inc(reg->kctx, nr_pages_requested);
- }
return MALI_ERROR_NONE;
}
}
void kbase_os_mem_map_lock(kbase_context * kctx);
void kbase_os_mem_map_unlock(kbase_context * kctx);
-/**
- * @brief Update the memory allocation counters for the current process
- *
- * OS specific call to updates the current memory allocation counters for the current process with
- * the supplied delta.
- *
- * @param[in] pages The desired delta to apply to the memory usage counters.
- */
-
-void kbasep_os_process_page_usage_update( struct kbase_context * kctx, long pages );
-
-/**
- * @brief Add to the memory allocation counters for the current process
- *
- * OS specific call to add to the current memory allocation counters for the current process by
- * the supplied amount.
- *
- * @param[in] kctx The kernel base context used for the allocation.
- * @param[in] pages The desired delta to apply to the memory usage counters.
- */
-
-static INLINE void kbase_process_page_usage_inc( struct kbase_context *kctx, unsigned long pages )
-{
- kbasep_os_process_page_usage_update( kctx, pages );
-}
-
-/**
- * @brief Subtract from the memory allocation counters for the current process
- *
- * OS specific call to subtract from the current memory allocation counters for the current process by
- * the supplied amount.
- *
- * @param[in] kctx The kernel base context used for the allocation.
- * @param[in] pages The desired delta to apply to the memory usage counters.
- */
-
-static INLINE void kbase_process_page_usage_dec( struct kbase_context *kctx, unsigned long pages )
-{
- kbasep_os_process_page_usage_update( kctx, 0 - pages );
-}
-
-/**
- * @brief Store the memory manager for the process associated with this context
- *
- * OS specific call to store a pointer to the memory manager for this process.
- *
- * @param[in,out] kctx The kernel base context used for the allocation.
- */
-
-void kbase_os_store_process_mm(kbase_context *kctx);
-
/**
* @brief Find a CPU mapping of a memory allocation containing a given address range
*
kbase_mem_usage_release_pages(&kctx->usage, 1);
return 0;
}
- kbase_process_page_usage_inc(kctx, 1);
+
for (i = 0; i < KBASE_MMU_PAGE_ENTRIES; i++)
page[i] = ENTRY_IS_INVAL;
if (zap)
{
kbase_phy_pages_free(kctx->kbdev, &kctx->pgd_allocator, 1, &target_pgd);
- kbase_process_page_usage_dec(kctx, 1 );
kbase_mem_usage_release_pages(&kctx->usage, 1);
}
}
beenthere("pgd %lx", (unsigned long)kctx->pgd);
kbase_phy_pages_free(kctx->kbdev, &kctx->pgd_allocator, 1, &kctx->pgd);
- kbase_process_page_usage_dec(kctx, 1 );
kbase_mem_usage_release_pages(&kctx->usage, 1);
}
KBASE_EXPORT_TEST_API(kbase_mmu_free_pgd)
map->page_off = start_off;
map->private = vma;
- if ( (reg->flags & KBASE_REG_ZONE_MASK) == KBASE_REG_ZONE_TMEM)
- {
- kbase_process_page_usage_dec(reg->kctx, nr_pages);
- }
-
OSK_DLIST_PUSH_FRONT(®->map_list, map,
struct kbase_cpu_mapping, link);
}
KBASE_EXPORT_SYMBOL(kbase_va_alloc)
-void kbasep_os_process_page_usage_update( kbase_context *kctx, long pages )
-{
- struct mm_struct *mm = ( struct mm_struct *)kctx->process_mm;
- if ( NULL != mm )
- {
-#ifdef SPLIT_RSS_COUNTING
- add_mm_counter(mm, MM_FILEPAGES, pages);
-#else
- spin_lock(&mm->page_table_lock);
- add_mm_counter(mm, MM_FILEPAGES, pages);
- spin_unlock(&mm->page_table_lock);
-#endif
- }
-}
-
-void kbase_os_store_process_mm(kbase_context *kctx)
-{
- struct mm_struct *mm = current->mm;
-
- kctx->process_mm = mm;
-}
-
void kbase_va_free(kbase_context *kctx, void *va)
{
struct kbase_va_region *reg;