aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorYifan Lu2016-10-18 22:03:52 -0700
committerYifan Lu2016-10-18 22:03:52 -0700
commitee786dee1c25cf11233f3318bec929d1dc05ae42 (patch)
treecc216a2f2303136709905882802fbbde4802047c
parentMoved slab allocator out of substitute (diff)
downloadsubstitute-ee786dee1c25cf11233f3318bec929d1dc05ae42.tar.gz
Added exemem for Vita
Removed unneeded field for hooks
-rw-r--r--lib/hook-functions.c3
-rw-r--r--lib/substitute.h2
-rw-r--r--lib/vita/execmem.c37
3 files changed, 36 insertions, 6 deletions
diff --git a/lib/hook-functions.c b/lib/hook-functions.c
index 3f332f0..f59126a 100644
--- a/lib/hook-functions.c
+++ b/lib/hook-functions.c
@@ -184,8 +184,7 @@ int substitute_hook_functions(const struct substitute_function_hook *hooks,
#endif
hi->code = code;
hi->arch_dis_ctx = arch;
- uintptr_t pc_patch_start = hook->func_addr ?
- hook->func_addr : (uintptr_t) code;
+ uintptr_t pc_patch_start = (uintptr_t) code;
int patch_size;
bool need_intro_trampoline;
if ((ret = check_intro_trampoline(&trampoline_ptr, &trampoline_addr,
diff --git a/lib/substitute.h b/lib/substitute.h
index 224fd04..becf4b0 100644
--- a/lib/substitute.h
+++ b/lib/substitute.h
@@ -98,8 +98,6 @@ struct substitute_function_hook {
/* Currently unused; pass 0. (Protip: When using C {} struct initializer
* syntax, you can just omit this.) */
int options;
- /* If not zero, then assume the actual address of function is func_addr */
- uintptr_t func_addr;
/* Any platform specific auxiliary data */
void *opt;
};
diff --git a/lib/vita/execmem.c b/lib/vita/execmem.c
index 4b65a05..1b37269 100644
--- a/lib/vita/execmem.c
+++ b/lib/vita/execmem.c
@@ -19,6 +19,17 @@
const int g_exe_slab_item_size = PATCH_ITEM_SIZE > sizeof(tai_hook_t) ? PATCH_ITEM_SIZE : sizeof(tai_hook_t);
/**
+ * The reason we use the same slab allocator for patches (sized 216 max) and
+ * tai_hook_t (size 16) is because in both cases, we need to allocate memory in
+ * the user's memory space. One option is to use two different slabs and that
+ * would make more sense. However my prediction is that there is not a large
+ * number of hooks per process, so the minimum size for the slab (0x1000 bytes)
+ * is already too much. Better to waste 200 bytes per allocation than 2000 bytes
+ * per process. If usage dictates a need for change, it is easy enough to put
+ * them in different slabs.
+ */
+
+/**
* @file execmem.c
*
* @brief Functions for allocating executable memory and writing to RO
@@ -46,7 +57,15 @@ const int g_exe_slab_item_size = PATCH_ITEM_SIZE > sizeof(tai_hook_t) ? PATCH_IT
*/
int execmem_alloc_unsealed(UNUSED uintptr_t hint, void **ptr_p, uintptr_t *vma_p,
size_t *size_p, void *opt) {
- return SUBSTITUTE_OK;
+ struct slab_chain *slab = (struct slab_chain *)opt;
+
+ *ptr_p = slab_alloc(slab, vma_p);
+ *size_p = PATCH_ITEM_SIZE;
+ if (*ptr_p == NULL) {
+ return SUBSTITUTE_ERR_VM;
+ } else {
+ return SUBSTITUTE_OK;
+ }
}
/**
@@ -57,7 +76,19 @@ int execmem_alloc_unsealed(UNUSED uintptr_t hint, void **ptr_p, uintptr_t *vma_p
*
* @return `SUBSTITUTE_OK`
*/
-int execmem_seal(UNUSED void *ptr, void *opt) {
+int execmem_seal(void *ptr, void *opt) {
+ uintptr_t vma, ptr_align;
+ size_t len_align;
+ struct slab_chain *slab = (struct slab_chain *)opt;
+
+ vma = slab_getmirror(slab, ptr);
+ vma = vma & ~0x1F;
+ ptr_align = (uintptr_t)ptr & ~0x1F;
+ len_align = (((uintptr_t)ptr + PATCH_ITEM_SIZE + 0x1F) & ~0x1F) - ptr_align;
+
+ sceKernelCpuDcacheAndL2Flush((void *)ptr_align, len_align);
+ sceKernelCpuIcacheAndL2Flush((void *)vma, len_align);
+
return SUBSTITUTE_OK;
}
@@ -68,6 +99,8 @@ int execmem_seal(UNUSED void *ptr, void *opt) {
* @param opt A `tai_substitute_args_t` structure
*/
void execmem_free(void *ptr, void *opt) {
+ struct slab_chain *slab = (struct slab_chain *)opt;
+ slab_free(slab, ptr);
}
/**