detour_attach, except for internal_detour_copy_instruction

This commit is contained in:
Lysann Tranvouez 2025-09-29 23:06:15 +02:00
parent f5a3491203
commit 4963fbc33c
4 changed files with 597 additions and 28 deletions

View file

@ -12,6 +12,8 @@ typedef void* detour_func_t;
#define detour_err_in_progress (err_local | 1) #define detour_err_in_progress (err_local | 1)
#define detour_err_wrong_thread (err_local | 2) #define detour_err_wrong_thread (err_local | 2)
#define detour_err_too_small (err_local | 3)
#define detour_err_too_large (err_local | 4)
mach_error_t detour_transaction_begin(); mach_error_t detour_transaction_begin();
mach_error_t detour_transaction_abort(); mach_error_t detour_transaction_abort();

View file

@ -7,12 +7,6 @@
#include <stdint.h> #include <stdint.h>
typedef struct detour_align
{
uint8_t offset_target;
uint8_t offset_trampoline;
} detour_align;
typedef struct detour_trampoline typedef struct detour_trampoline
{ {
// An ARM64 instruction is 4 bytes long. // An ARM64 instruction is 4 bytes long.
@ -54,21 +48,25 @@ typedef struct detour_trampoline
static_assert(sizeof(detour_trampoline) == 192); static_assert(sizeof(detour_trampoline) == 192);
typedef uint32_t detours_arm64_opcode_t; enum {
DETOUR_SIZE_OF_JMP = 12
};
static inline detours_arm64_opcode_t fetch_opcode(const uint8_t* code) typedef uint32_t detour_arm64_opcode_t;
static inline detour_arm64_opcode_t fetch_opcode(const uint8_t* code)
{ {
return *(detours_arm64_opcode_t*)code; return *(detour_arm64_opcode_t*)code;
} }
static inline void write_opcode(uint8_t** int_out_code, const detours_arm64_opcode_t opcode) static inline void write_opcode(uint8_t** int_out_code, const detour_arm64_opcode_t opcode)
{ {
uint8_t* code = *int_out_code; uint8_t* code = *int_out_code;
*(detours_arm64_opcode_t*)code = opcode; *(detour_arm64_opcode_t*)code = opcode;
*int_out_code += sizeof(detours_arm64_opcode_t); *int_out_code += sizeof(detour_arm64_opcode_t);
} }
struct detours_arm64_indirect_jmp { struct detour_arm64_indirect_jmp {
struct { struct {
uint32_t Rd : 5; uint32_t Rd : 5;
uint32_t immhi : 19; uint32_t immhi : 19;
@ -91,7 +89,7 @@ struct detours_arm64_indirect_jmp {
uint32_t br; uint32_t br;
}; };
union detours_arm64_indirect_imm { union detour_arm64_indirect_imm {
struct { struct {
uint64_t pad : 12; uint64_t pad : 12;
uint64_t adrp_immlo : 2; uint64_t adrp_immlo : 2;
@ -107,12 +105,12 @@ static inline uint8_t* internal_detour_gen_jmp_indirect(uint8_t* code, const uin
// ldr x17, [x17, jmpval] // ldr x17, [x17, jmpval]
// br x17 // br x17
union detours_arm64_indirect_imm jmp_ind_addr; union detour_arm64_indirect_imm jmp_ind_addr;
jmp_ind_addr.value = (((uint64_t)jump_val) & 0xFFFFFFFFFFFFF000) - jmp_ind_addr.value = (((uint64_t)jump_val) & 0xFFFFFFFFFFFFF000) -
(((uint64_t)code) & 0xFFFFFFFFFFFFF000); (((uint64_t)code) & 0xFFFFFFFFFFFFF000);
struct detours_arm64_indirect_jmp* ind_jmp = (struct detours_arm64_indirect_jmp*)code; struct detour_arm64_indirect_jmp* ind_jmp = (struct detour_arm64_indirect_jmp*)code;
code = (uint8_t*)(ind_jmp + 1); code = (uint8_t*)(ind_jmp + 1);
ind_jmp->ardp.Rd = 17; ind_jmp->ardp.Rd = 17;
@ -135,6 +133,20 @@ static inline uint8_t* internal_detour_gen_jmp_indirect(uint8_t* code, const uin
return code; return code;
} }
static inline uint8_t* internal_detour_gen_jmp_immediate(uint8_t* code, uint8_t** inout_code_limit, uint8_t* jmp_val)
{
*inout_code_limit = *inout_code_limit - 8;
uint8_t* literal = *inout_code_limit;
*(uint8_t**)literal = jmp_val;
const int32_t delta = (int32_t)(literal - code);
write_opcode(&code, 0x58000011 | ((delta / 4) << 5)); // LDR X17,[PC+n]
write_opcode(&code, 0xd61f0000 | (17 << 5)); // BR X17
return code;
}
static inline uint8_t* internal_detour_gen_brk(uint8_t* code, const uint8_t* limit) static inline uint8_t* internal_detour_gen_brk(uint8_t* code, const uint8_t* limit)
{ {
while (code < limit) { while (code < limit) {
@ -143,4 +155,81 @@ static inline uint8_t* internal_detour_gen_brk(uint8_t* code, const uint8_t* lim
return code; return code;
} }
static inline void internal_detour_find_jmp_bounds(uint8_t* code, detour_trampoline** out_lower, detour_trampoline** out_upper)
{
// The encoding used by detour_gen_jmp_indirect actually enables a
// displacement of +/- 4GiB. In the future, this could be changed to
// reflect that. For now, just reuse the x86 logic which is plenty.
const uintptr_t lo = internal_detour_2gb_below((uintptr_t)code);
const uintptr_t hi = internal_detour_2gb_above((uintptr_t)code);
DETOUR_TRACE(("[%p..%p..%p]\n", (void*)lo, (void*)code, (void*)hi));
*out_lower = (detour_trampoline*)lo;
*out_upper = (detour_trampoline*)hi;
}
static inline bool internal_detour_is_code_os_patched(const uint8_t* code)
{
// Identify whether the provided code pointer is a OS patch jump.
// We can do this by checking if a branch (b <imm26>) is present, and if so,
// it must be jumping to an HPAT page containing ldr <reg> [PC+PAGE_SIZE-4], br <reg>.
const uint32_t opcode = fetch_opcode(code);
if ((opcode & 0xfc000000) != 0x14000000) {
return false;
}
// The branch must be jumping forward if it's going into the HPAT.
// Check that the sign bit is cleared.
if ((opcode & 0x2000000) != 0) {
return false;
}
const uint32_t delta = (uint32_t)((opcode & 0x1FFFFFF) * 4);
const uint8_t* branch_target = code + delta;
// Now inspect the opcodes of the code we jumped to in order to determine if it's HPAT.
const uint32_t hpat_opcode1 = fetch_opcode(branch_target);
const uint32_t hpat_opcode2 = fetch_opcode(branch_target + 4);
if (hpat_opcode1 != 0x58008010) { // ldr <reg> [PC+PAGE_SIZE]
return false;
}
if (hpat_opcode2 != 0xd61f0200) { // br <reg>
return false;
}
return true;
}
static inline bool internal_detour_does_code_end_function(const uint8_t* code)
{
const uint32_t opcode = fetch_opcode(code);
// When the OS has patched a function entry point, it will incorrectly
// appear as though the function is just a single branch instruction.
if (internal_detour_is_code_os_patched(code)) {
return false;
}
if ((opcode & 0xffbffc1f) == 0xd61f0000 || // ret/br <reg>
(opcode & 0xfc000000) == 0x14000000) { // b <imm26>
return true;
}
return false;
}
static inline uint32_t internal_detour_is_code_filler(const uint8_t* code)
{
if (*(uint32_t *)code == 0xd503201f) { // nop.
return 4;
}
if (*(uint32_t *)code == 0x00000000) { // zero-filled padding.
return 4;
}
return 0;
}
static inline uint8_t* internal_detour_skip_jmp(uint8_t* code)
{
// nothing special implemented
return code;
}
#endif //MACH_DETOURS_ARM64_H #endif //MACH_DETOURS_ARM64_H

View file

@ -5,8 +5,11 @@
#ifndef MACH_DETOURS_INTERNAL_H #ifndef MACH_DETOURS_INTERNAL_H
#define MACH_DETOURS_INTERNAL_H #define MACH_DETOURS_INTERNAL_H
#ifndef DETOUR_TRACE
#if DETOUR_DEBUG #if DETOUR_DEBUG
#include <signal.h>
#include <stdio.h>
#define DETOUR_TRACE(x) printf x #define DETOUR_TRACE(x) printf x
#define DETOUR_BREAK() raise(SIGTRAP) #define DETOUR_BREAK() raise(SIGTRAP)
#define DETOUR_CHECK(x) \ #define DETOUR_CHECK(x) \
@ -16,14 +19,15 @@
DETOUR_BREAK(); \ DETOUR_BREAK(); \
} \ } \
} }
#else #else
#include <signal.h>
#include <stdio.h>
#define DETOUR_TRACE(x) #define DETOUR_TRACE(x)
#define DETOUR_BREAK() #define DETOUR_BREAK()
#define DETOUR_CHECK(x) (x) #define DETOUR_CHECK(x) (x)
#endif #endif
#endif
#ifndef ARRAYSIZE #ifndef ARRAYSIZE
#define ARRAYSIZE(x) (sizeof(x)/sizeof(x[0])) #define ARRAYSIZE(x) (sizeof(x)/sizeof(x[0]))
@ -33,4 +37,29 @@
#define UNUSED_VARIABLE(x) (void)(x) #define UNUSED_VARIABLE(x) (void)(x)
#endif #endif
typedef struct detour_align
{
uint8_t offset_target;
uint8_t offset_trampoline;
} detour_align;
// Region reserved for system DLLs, which cannot be used for trampolines.
static void* s_system_region_lower_bound = (void*)(uintptr_t)0x70000000;
static void* s_system_region_upper_bound = (void*)(uintptr_t)0x80000000;
static inline uintptr_t internal_detour_2gb_below(const uintptr_t address)
{
return (address > 0x7ff80000) ? address - 0x7ff80000 : 0x80000;
}
static inline uintptr_t internal_detour_2gb_above(uintptr_t address)
{
#if defined(DETOURS_64BIT)
return (address < 0xffffffff80000000) ? address + 0x7ff80000 : 0xfffffffffff80000;
#else
return (address < 0x80000000) ? address + 0x7ff80000 : 0xfff80000;
#endif
}
#endif //MACH_DETOURS_INTERNAL_H #endif //MACH_DETOURS_INTERNAL_H

View file

@ -3,7 +3,17 @@
#include "mach_detours.h" #include "mach_detours.h"
#define DETOUR_DEBUG 1
#ifdef __arm64__
#define DETOURS_ARM64
#define DETOURS_64BIT
#else
#error Unsupported architecture (arm64)
#endif
#include "detours_internal.h" #include "detours_internal.h"
#include "arm64/detours_arm64.h" #include "arm64/detours_arm64.h"
#include <inttypes.h> #include <inttypes.h>
@ -14,12 +24,6 @@
#include <mach/mach.h> #include <mach/mach.h>
#include <mach/mach_vm.h> #include <mach/mach_vm.h>
#ifdef __arm64__
#define DETOURS_ARM64
#else
#error Unsupported architecture (arm64)
#endif
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Trampoline Memory Management // Trampoline Memory Management
@ -64,6 +68,190 @@ static void internal_detour_runnable_trampoline_regions()
} }
} }
static uintptr_t internal_detour_round_down_to_page(const uintptr_t address)
{
return address & ~(PAGE_SIZE - 1);
}
static const uint8_t* internal_detour_alloc_round_up_to_region(const uint8_t* address)
{
// WinXP64 returns free areas that aren't REGION aligned to 32-bit applications.
const uintptr_t extra = ((uintptr_t)address) & (DETOUR_REGION_SIZE - 1);
if (extra != 0) {
const uintptr_t adjust = DETOUR_REGION_SIZE - extra;
address += adjust;
}
return address;
}
// Starting at lo, try to allocate a memory region, continue until hi.
static void* internal_detour_alloc_region_from_lo(const uint8_t* lo, const uint8_t* hi)
{
const uint8_t* try_addr = internal_detour_alloc_round_up_to_region(lo);
DETOUR_TRACE((" Looking for free region in %p..%p from %p:\n", lo, hi, try_addr));
const vm_map_t task_self = mach_task_self();
for (vm_address_t page = (vm_address_t)try_addr; page < (vm_address_t)hi; page += PAGE_SIZE) {
DETOUR_TRACE((" Try %p\n", (void*)page));
const mach_error_t err = vm_allocate(task_self, &page, DETOUR_REGION_SIZE, 0);
if (err == err_none) {
return (void*)page;
}
if (err != KERN_NO_SPACE && err != KERN_INVALID_ADDRESS) {
DETOUR_BREAK();
return nullptr;
}
}
return nullptr;
}
// Starting at hi, try to allocate a memory region, continue until lo.
static void* internal_detour_alloc_region_from_hi(const uint8_t* lo, const uint8_t* hi)
{
uintptr_t try_addr = internal_detour_round_down_to_page((uintptr_t)(hi - DETOUR_REGION_SIZE));
DETOUR_TRACE((" Looking for free region in %p..%p from %p:\n", lo, hi, (void*)try_addr));
const vm_map_t task_self = mach_task_self();
for (vm_address_t page = try_addr; page > (vm_address_t)lo; page -= PAGE_SIZE) {
DETOUR_TRACE((" Try %p\n", (void*)page));
if ((void*)page >= s_system_region_lower_bound && (void*)page <= s_system_region_upper_bound) {
// Skip region reserved for system DLLs, but preserve address space entropy.
try_addr -= 0x08000000;
continue;
}
const mach_error_t err = vm_allocate(task_self, &page, DETOUR_REGION_SIZE, 0);
if (err == err_none) {
return (void*)page;
}
if (err != KERN_NO_SPACE && err != KERN_INVALID_ADDRESS) {
DETOUR_BREAK();
return nullptr;
}
}
return nullptr;
}
static void* internal_detour_alloc_trampoline_allocate_new(const uint8_t* target,
detour_trampoline* lo,
detour_trampoline* hi)
{
void* try_addr = nullptr;
// NB: We must always also start the search at an offset from pbTarget
// in order to maintain ASLR entropy.
#if defined(DETOURS_64BIT)
// Try looking 1GB below or lower.
if (!try_addr && target > (uint8_t*)0x40000000) {
try_addr = internal_detour_alloc_region_from_hi((uint8_t*)lo, target - 0x40000000);
}
// Try looking 1GB above or higher.
if (!try_addr && target < (uint8_t*)0xffffffff40000000) {
try_addr = internal_detour_alloc_region_from_lo(target + 0x40000000, (uint8_t*)hi);
}
// Try looking 1GB below or higher.
if (!try_addr && target > (uint8_t*)0x40000000) {
try_addr = internal_detour_alloc_region_from_lo(target - 0x40000000, target);
}
// Try looking 1GB above or lower.
if (!try_addr && target < (uint8_t*)0xffffffff40000000) {
try_addr = internal_detour_alloc_region_from_hi(target, target + 0x40000000);
}
#endif
// Try anything below.
if (!try_addr) {
try_addr = internal_detour_alloc_region_from_hi((uint8_t*)lo, target);
}
// try anything above.
if (!try_addr) {
try_addr = internal_detour_alloc_region_from_lo(target, (uint8_t*)hi);
}
return try_addr;
}
static detour_trampoline* internal_detour_alloc_trampoline(uint8_t* target)
{
// We have to place trampolines within +/- 2GB of target.
detour_trampoline* lo;
detour_trampoline* hi;
internal_detour_find_jmp_bounds(target, &lo, &hi);
// ReSharper disable once CppDFAUnusedValue
detour_trampoline* trampoline = nullptr;
// Ensure that there is a default region.
if (!s_default_region && s_regions_head) {
s_default_region = s_regions_head;
}
// First check the default region for a valid free block.
if (s_default_region && s_default_region->free_list_head &&
s_default_region->free_list_head >= lo && s_default_region->free_list_head <= hi) {
found_region:
trampoline = s_default_region->free_list_head;
// do a last sanity check on region.
if (trampoline < lo || trampoline > hi) {
raise(SIGTRAP);
return nullptr;
}
s_default_region->free_list_head = (detour_trampoline*)trampoline->ptr_remain;
memset(trampoline, 0xcc, sizeof(*trampoline));
return trampoline;
}
// Then check the existing regions for a valid free block.
for (s_default_region = s_regions_head; s_default_region != nullptr; s_default_region = s_default_region->next) {
// ReSharper disable once CppDFANullDereference // false positive
if (s_default_region->free_list_head &&
s_default_region->free_list_head >= lo && s_default_region->free_list_head <= hi) {
goto found_region;
}
}
// We need to allocate a new region.
// Round pbTarget down to 64KB block.
// /RTCc RuntimeChecks breaks PtrToUlong.
target = target - (uint32_t)((uintptr_t)target & 0xffff);
void* newly_allocated = internal_detour_alloc_trampoline_allocate_new(target, lo, hi);
if (newly_allocated) {
s_default_region = (detour_region*)newly_allocated;
s_default_region->signature = DETOUR_REGION_SIGNATURE;
s_default_region->free_list_head = nullptr;
s_default_region->next = s_regions_head;
s_regions_head = s_default_region;
DETOUR_TRACE((" Allocated region %p..%p\n\n",
s_default_region, ((uint8_t*)s_default_region) + DETOUR_REGION_SIZE - 1));
// Put everything but the first trampoline on the free list.
uint8_t* free = nullptr;
trampoline = ((detour_trampoline*)s_default_region) + 1;
for (uint32_t i = DETOUR_TRAMPOLINES_PER_REGION - 1; i > 1; i--) {
trampoline[i].ptr_remain = free;
free = (uint8_t*)&trampoline[i];
}
s_default_region->free_list_head = (detour_trampoline*)free;
goto found_region;
}
DETOUR_TRACE(("Couldn't find available memory region!\n"));
return nullptr;
}
static void internal_detour_free_trampoline(detour_trampoline* trampoline) static void internal_detour_free_trampoline(detour_trampoline* trampoline)
{ {
detour_region* region = (detour_region*)((uintptr_t)trampoline & ~(uintptr_t)0xffff); detour_region* region = (detour_region*)((uintptr_t)trampoline & ~(uintptr_t)0xffff);
@ -174,7 +362,7 @@ static detour_operation* s_pending_operations_head = nullptr;
static detour_pending_thread* s_pending_threads_head = nullptr; static detour_pending_thread* s_pending_threads_head = nullptr;
static mach_error_t s_pending_error = err_none; static mach_error_t s_pending_error = err_none;
static void** s_pending_error_pointer = nullptr; static detour_func_t* s_pending_error_pointer = nullptr;
mach_error_t detour_transaction_begin() mach_error_t detour_transaction_begin()
{ {
@ -425,11 +613,13 @@ mach_error_t detour_manage_thread(thread_t thread)
return err_none; return err_none;
} }
detour_pending_thread* new_pending_thread = malloc(sizeof(detour_pending_thread)); detour_pending_thread* new_pending_thread = calloc(1, sizeof(detour_pending_thread));
if (!new_pending_thread) { if (!new_pending_thread) {
error = KERN_RESOURCE_SHORTAGE; error = KERN_RESOURCE_SHORTAGE;
fail: fail:
free(new_pending_thread); free(new_pending_thread);
// ReSharper disable once CppDFAUnusedValue
new_pending_thread = nullptr;
s_pending_error = error; s_pending_error = error;
s_pending_error_pointer = nullptr; s_pending_error_pointer = nullptr;
DETOUR_BREAK(); DETOUR_BREAK();
@ -448,3 +638,262 @@ mach_error_t detour_manage_thread(thread_t thread)
return err_none; return err_none;
} }
mach_error_t detour_attach(detour_func_t* inout_pointer, detour_func_t detour)
{
return detour_attach_ex(inout_pointer, detour, nullptr, nullptr, nullptr);
}
mach_error_t detour_attach_ex(detour_func_t* inout_pointer, detour_func_t detour, detour_func_t* out_real_trampoline, detour_func_t* out_real_target, detour_func_t* out_real_detour)
{
if (out_real_trampoline) {
*out_real_trampoline = nullptr;
}
if (out_real_target) {
*out_real_target = nullptr;
}
if (out_real_detour) {
*out_real_detour = nullptr;
}
if (!detour) {
DETOUR_TRACE(("empty detour\n"));
return KERN_INVALID_ARGUMENT;
}
if (!inout_pointer) {
DETOUR_TRACE(("inout_pointer is null\n"));
return KERN_INVALID_ARGUMENT;
}
if (!(*inout_pointer)) {
s_pending_error = KERN_INVALID_ARGUMENT;
s_pending_error_pointer = inout_pointer;
DETOUR_TRACE(("*inout_pointer is null (inout_pointer=%p)\n", inout_pointer));
DETOUR_BREAK();
return s_pending_error;
}
{
const thread_t active_thread = s_transaction_thread;
if (active_thread != mach_thread_self()) {
DETOUR_TRACE(("transaction conflict with thread id=%u\n", active_thread));
return detour_err_wrong_thread;
}
}
// If any of the pending operations failed, then we don't need to do this.
if (s_pending_error != err_none) {
DETOUR_TRACE(("pending transaction error=%d\n", s_pending_error));
return s_pending_error;
}
mach_error_t error = err_none;
detour_trampoline* trampoline = nullptr;
detour_operation* op = nullptr;
uint8_t* target = internal_detour_skip_jmp(*inout_pointer);
detour = internal_detour_skip_jmp(detour);
// Don't follow a jump if its destination is the target function.
// This happens when the detour does nothing other than call the target.
if (detour == (detour_func_t)target) {
if (s_ignore_too_small) {
goto stop;
} else {
DETOUR_BREAK();
error = detour_err_too_small;
goto fail;
}
}
if (out_real_target) {
*out_real_target = target;
}
if (out_real_detour) {
*out_real_detour = detour;
}
op = calloc(1, sizeof(detour_operation));
if (!op) {
error = KERN_RESOURCE_SHORTAGE;
fail:
s_pending_error = error;
DETOUR_BREAK();
stop:
if (trampoline) {
internal_detour_free_trampoline(trampoline);
// ReSharper disable once CppDFAUnusedValue
trampoline = nullptr;
if (out_real_trampoline) {
*out_real_trampoline = nullptr;
}
}
free(op);
// ReSharper disable once CppDFAUnusedValue
op = nullptr;
if (out_real_detour) {
*out_real_detour = nullptr;
}
if (out_real_target) {
*out_real_target = nullptr;
}
s_pending_error_pointer = inout_pointer;
return error;
}
trampoline = internal_detour_alloc_trampoline(target);
if (!trampoline) {
error = KERN_RESOURCE_SHORTAGE;
DETOUR_BREAK();
goto fail;
}
if (out_real_trampoline) {
*out_real_trampoline = trampoline;
}
memset(trampoline->align, 0, sizeof(trampoline->align));
DETOUR_TRACE(("detours: trampoline=%p, detour=%p\n", trampoline, detour));
// Determine the number of movable target instructions.
uint8_t* src = target;
uint8_t* trampoline_code = trampoline->code;
uint8_t* trampoline_code_limit = trampoline_code + sizeof(trampoline->code);
uint32_t offset_target = 0;
uint32_t align_idx = 0;
while (offset_target < DETOUR_SIZE_OF_JMP) {
const uint8_t* curr_op = src;
int32_t extra_len = 0;
DETOUR_TRACE((" copy instruction src=%p, dest=%p\n", src, trampoline_code));
src = (uint8_t*)internal_detour_copy_instruction(trampoline_code, (void**)&trampoline_code_limit, src, nullptr, &extra_len);
DETOUR_TRACE((" after: src=%p (copied %d bytes)\n", src, (int)(src - curr_op)));
trampoline_code += (src - curr_op) + extra_len;
offset_target = (int32_t)(src - target);
trampoline->align[align_idx].offset_target = offset_target;
trampoline->align[align_idx].offset_trampoline = trampoline_code - trampoline->code;
align_idx++;
if (align_idx >= ARRAYSIZE(trampoline->align)) {
break;
}
if (internal_detour_does_code_end_function(curr_op)) {
break;
}
}
// Consume, but don't duplicate padding if it is needed and available.
while (offset_target < DETOUR_SIZE_OF_JMP) {
const uint32_t len_filler = internal_detour_is_code_filler(src);
if (len_filler == 0) {
break;
}
src += len_filler;
offset_target = (int32_t)(src - target);
}
#if DETOUR_DEBUG
{
DETOUR_TRACE((" detours: align ["));
int32_t n = 0;
for (n = 0; n < ARRAYSIZE(trampoline->align); n++) {
if (trampoline->align[n].offset_target == 0 && trampoline->align[n].offset_trampoline == 0) {
break;
}
DETOUR_TRACE((" %u/%u", trampoline->align[n].offset_target, trampoline->align[n].offset_trampoline));
}
DETOUR_TRACE((" ]\n"));
}
#endif
if (offset_target < DETOUR_SIZE_OF_JMP || align_idx > ARRAYSIZE(trampoline->align)) {
// Too few instructions.
error = detour_err_too_small;
if (s_ignore_too_small) {
goto stop;
} else {
DETOUR_BREAK();
goto fail;
}
}
if (trampoline_code > trampoline_code_limit) {
DETOUR_BREAK();
}
trampoline->code_size = (uint8_t)(trampoline_code - trampoline->code);
trampoline->restore_code_size = (uint8_t)offset_target;
memcpy(trampoline->restore_code, target, offset_target);
if (offset_target > sizeof(trampoline->code) - DETOUR_SIZE_OF_JMP) {
// Too many instructions.
error = detour_err_too_large;
DETOUR_BREAK();
goto fail;
}
trampoline->ptr_remain = target + offset_target;
trampoline->ptr_detour = (uint8_t*)detour;
trampoline_code = trampoline->code + trampoline->code_size;
trampoline_code = internal_detour_gen_jmp_immediate(trampoline_code, &trampoline_code_limit, trampoline->ptr_remain);
trampoline_code = internal_detour_gen_brk(trampoline_code, trampoline_code_limit);
UNUSED_VARIABLE(trampoline_code);
const mach_port_t port = mach_task_self();
const mach_vm_address_t page_addr = internal_detour_round_down_to_page((uintptr_t)target);
vm_region_submap_short_info_data_64_t region_info;
{
mach_vm_address_t region_addr = (mach_vm_address_t)target;
mach_vm_size_t region_size = 0;
natural_t nesting_depth = 99999;
mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
error = mach_vm_region_recurse(port, &region_addr, &region_size, &nesting_depth, (vm_region_recurse_info_t)&region_info, &count);
if (error != err_none) {
DETOUR_BREAK();
goto fail;
}
}
const vm_prot_t old_perm = region_info.protection;
error = mach_vm_protect(port, page_addr, PAGE_SIZE, false, VM_PROT_READ | VM_PROT_WRITE | VM_PROT_COPY);
if (error != err_none) {
DETOUR_BREAK();
goto fail;
}
DETOUR_TRACE(("detours: target=%p: "
"%02x %02x %02x %02x "
"%02x %02x %02x %02x "
"%02x %02x %02x %02x\n",
target,
target[0], target[1], target[2], target[3],
target[4], target[5], target[6], target[7],
target[8], target[9], target[10], target[11]));
DETOUR_TRACE(("detours: trampoline =%p: "
"%02x %02x %02x %02x "
"%02x %02x %02x %02x "
"%02x %02x %02x %02x\n",
trampoline,
trampoline->code[0], trampoline->code[1],
trampoline->code[2], trampoline->code[3],
trampoline->code[4], trampoline->code[5],
trampoline->code[6], trampoline->code[7],
trampoline->code[8], trampoline->code[9],
trampoline->code[10], trampoline->code[11]));
op->kind = detour_operation_kind_attach;
op->pointer = (uint8_t**)inout_pointer;
op->trampoline = trampoline;
op->target = target;
op->perm = old_perm;
op->next = s_pending_operations_head;
s_pending_operations_head = op;
return err_none;
}