150 lines
		
	
	
		
			3.6 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			150 lines
		
	
	
		
			3.6 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| /*
 | |
|  *  x86 memory access helpers
 | |
|  *
 | |
|  *  Copyright (c) 2003 Fabrice Bellard
 | |
|  *
 | |
|  * This library is free software; you can redistribute it and/or
 | |
|  * modify it under the terms of the GNU Lesser General Public
 | |
|  * License as published by the Free Software Foundation; either
 | |
|  * version 2 of the License, or (at your option) any later version.
 | |
|  *
 | |
|  * This library is distributed in the hope that it will be useful,
 | |
|  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 | |
|  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 | |
|  * Lesser General Public License for more details.
 | |
|  *
 | |
|  * You should have received a copy of the GNU Lesser General Public
 | |
|  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
 | |
|  */
 | |
| 
 | |
| #include "cpu.h"
 | |
| #include "helper.h"
 | |
| 
 | |
| #if !defined(CONFIG_USER_ONLY)
 | |
| #include "exec/softmmu_exec.h"
 | |
| #endif /* !defined(CONFIG_USER_ONLY) */
 | |
| 
 | |
| /* broken thread support */
 | |
| 
 | |
| static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
 | |
| 
 | |
| void helper_lock(void)
 | |
| {
 | |
|     spin_lock(&global_cpu_lock);
 | |
| }
 | |
| 
 | |
| void helper_unlock(void)
 | |
| {
 | |
|     spin_unlock(&global_cpu_lock);
 | |
| }
 | |
| 
 | |
| void helper_cmpxchg8b(CPUX86State *env, target_ulong a0)
 | |
| {
 | |
|     uint64_t d;
 | |
|     int eflags;
 | |
| 
 | |
|     eflags = cpu_cc_compute_all(env, CC_OP);
 | |
|     d = cpu_ldq_data(env, a0);
 | |
|     if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
 | |
|         cpu_stq_data(env, a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
 | |
|         eflags |= CC_Z;
 | |
|     } else {
 | |
|         /* always do the store */
 | |
|         cpu_stq_data(env, a0, d);
 | |
|         EDX = (uint32_t)(d >> 32);
 | |
|         EAX = (uint32_t)d;
 | |
|         eflags &= ~CC_Z;
 | |
|     }
 | |
|     CC_SRC = eflags;
 | |
| }
 | |
| 
 | |
| #ifdef TARGET_X86_64
 | |
| void helper_cmpxchg16b(CPUX86State *env, target_ulong a0)
 | |
| {
 | |
|     uint64_t d0, d1;
 | |
|     int eflags;
 | |
| 
 | |
|     if ((a0 & 0xf) != 0) {
 | |
|         raise_exception(env, EXCP0D_GPF);
 | |
|     }
 | |
|     eflags = cpu_cc_compute_all(env, CC_OP);
 | |
|     d0 = cpu_ldq_data(env, a0);
 | |
|     d1 = cpu_ldq_data(env, a0 + 8);
 | |
|     if (d0 == EAX && d1 == EDX) {
 | |
|         cpu_stq_data(env, a0, EBX);
 | |
|         cpu_stq_data(env, a0 + 8, ECX);
 | |
|         eflags |= CC_Z;
 | |
|     } else {
 | |
|         /* always do the store */
 | |
|         cpu_stq_data(env, a0, d0);
 | |
|         cpu_stq_data(env, a0 + 8, d1);
 | |
|         EDX = d1;
 | |
|         EAX = d0;
 | |
|         eflags &= ~CC_Z;
 | |
|     }
 | |
|     CC_SRC = eflags;
 | |
| }
 | |
| #endif
 | |
| 
 | |
| void helper_boundw(CPUX86State *env, target_ulong a0, int v)
 | |
| {
 | |
|     int low, high;
 | |
| 
 | |
|     low = cpu_ldsw_data(env, a0);
 | |
|     high = cpu_ldsw_data(env, a0 + 2);
 | |
|     v = (int16_t)v;
 | |
|     if (v < low || v > high) {
 | |
|         raise_exception(env, EXCP05_BOUND);
 | |
|     }
 | |
| }
 | |
| 
 | |
| void helper_boundl(CPUX86State *env, target_ulong a0, int v)
 | |
| {
 | |
|     int low, high;
 | |
| 
 | |
|     low = cpu_ldl_data(env, a0);
 | |
|     high = cpu_ldl_data(env, a0 + 4);
 | |
|     if (v < low || v > high) {
 | |
|         raise_exception(env, EXCP05_BOUND);
 | |
|     }
 | |
| }
 | |
| 
 | |
| #if !defined(CONFIG_USER_ONLY)
 | |
| 
 | |
| #define MMUSUFFIX _mmu
 | |
| 
 | |
| #define SHIFT 0
 | |
| #include "exec/softmmu_template.h"
 | |
| 
 | |
| #define SHIFT 1
 | |
| #include "exec/softmmu_template.h"
 | |
| 
 | |
| #define SHIFT 2
 | |
| #include "exec/softmmu_template.h"
 | |
| 
 | |
| #define SHIFT 3
 | |
| #include "exec/softmmu_template.h"
 | |
| 
 | |
| #endif
 | |
| 
 | |
| #if !defined(CONFIG_USER_ONLY)
 | |
| /* try to fill the TLB and return an exception if error. If retaddr is
 | |
|    NULL, it means that the function was called in C code (i.e. not
 | |
|    from generated code or from helper.c) */
 | |
| /* XXX: fix it to restore all registers */
 | |
| void tlb_fill(CPUX86State *env, target_ulong addr, int is_write, int mmu_idx,
 | |
|               uintptr_t retaddr)
 | |
| {
 | |
|     int ret;
 | |
| 
 | |
|     ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx);
 | |
|     if (ret) {
 | |
|         if (retaddr) {
 | |
|             /* now we have a real cpu fault */
 | |
|             cpu_restore_state(env, retaddr);
 | |
|         }
 | |
|         raise_exception_err(env, env->exception_index, env->error_code);
 | |
|     }
 | |
| }
 | |
| #endif
 | 
