/* Definitions of target machine for GCC for IA-32. Copyright (C) 1988-2022 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see . */ /* The purpose of this file is to define the characteristics of the i386, independent of assembler syntax or operating system. Three other files build on this one to describe a specific assembler syntax: bsd386.h, att386.h, and sun386.h. The actual tm.h file for a particular system should include this file, and then the file for the appropriate assembler syntax. Many macros that specify assembler syntax are omitted entirely from this file because they really belong in the files for particular assemblers. These include RP, IP, LPREFIX, PUT_OP_SIZE, USE_STAR, ADDR_BEG, ADDR_END, PRINT_IREG, PRINT_SCALE, PRINT_B_I_S, and many that start with ASM_ or end in ASM_OP. */ /* Redefines for option macros. */ #define TARGET_CMPXCHG16B TARGET_CX16 #define TARGET_CMPXCHG16B_P(x) TARGET_CX16_P(x) #define TARGET_LP64 TARGET_ABI_64 #define TARGET_LP64_P(x) TARGET_ABI_64_P(x) #define TARGET_X32 TARGET_ABI_X32 #define TARGET_X32_P(x) TARGET_ABI_X32_P(x) #define TARGET_16BIT TARGET_CODE16 #define TARGET_16BIT_P(x) TARGET_CODE16_P(x) #define TARGET_MMX_WITH_SSE (TARGET_64BIT && TARGET_SSE2) #include "config/vxworks-dummy.h" #include "config/i386/i386-opts.h" #define MAX_STRINGOP_ALGS 4 /* Specify what algorithm to use for stringops on known size. When size is unknown, the UNKNOWN_SIZE alg is used. When size is known at compile time or estimated via feedback, the SIZE array is walked in order until MAX is greater then the estimate (or -1 means infinity). Corresponding ALG is used then. When NOALIGN is true the code guaranting the alignment of the memory block is skipped. For example initializer: {{256, loop}, {-1, rep_prefix_4_byte}} will use loop for blocks smaller or equal to 256 bytes, rep prefix will be used otherwise. */ struct stringop_algs { const enum stringop_alg unknown_size; const struct stringop_strategy { /* Several older compilers delete the default constructor because of the const entries (see PR100246). Manually specifying a CTOR works around this issue. Since this header is used by code compiled with the C compiler we must guard the addition. */ #ifdef __cplusplus constexpr stringop_strategy (int _max = -1, enum stringop_alg _alg = libcall, int _noalign = false) : max (_max), alg (_alg), noalign (_noalign) {} #endif const int max; const enum stringop_alg alg; int noalign; } size [MAX_STRINGOP_ALGS]; }; /* Analog of COSTS_N_INSNS when optimizing for size. */ #ifndef COSTS_N_BYTES #define COSTS_N_BYTES(N) ((N) * 2) #endif /* Define the specific costs for a given cpu. NB: hard_register is used by TARGET_REGISTER_MOVE_COST and TARGET_MEMORY_MOVE_COST to compute hard register move costs by register allocator. Relative costs of pseudo register load and store versus pseudo register moves in RTL expressions for TARGET_RTX_COSTS can be different from relative costs of hard registers to get the most efficient operations with pseudo registers. */ struct processor_costs { /* Costs used by register allocator. integer->integer register move cost is 2. */ struct { const int movzbl_load; /* cost of loading using movzbl */ const int int_load[3]; /* cost of loading integer registers in QImode, HImode and SImode relative to reg-reg move (2). */ const int int_store[3]; /* cost of storing integer register in QImode, HImode and SImode */ const int fp_move; /* cost of reg,reg fld/fst */ const int fp_load[3]; /* cost of loading FP register in SFmode, DFmode and XFmode */ const int fp_store[3]; /* cost of storing FP register in SFmode, DFmode and XFmode */ const int mmx_move; /* cost of moving MMX register. */ const int mmx_load[2]; /* cost of loading MMX register in SImode and DImode */ const int mmx_store[2]; /* cost of storing MMX register in SImode and DImode */ const int xmm_move; /* cost of moving XMM register. */ const int ymm_move; /* cost of moving XMM register. */ const int zmm_move; /* cost of moving XMM register. */ const int sse_load[5]; /* cost of loading SSE register in 32bit, 64bit, 128bit, 256bit and 512bit */ const int sse_store[5]; /* cost of storing SSE register in SImode, DImode and TImode. */ const int sse_to_integer; /* cost of moving SSE register to integer. */ const int integer_to_sse; /* cost of moving integer register to SSE. */ const int mask_to_integer; /* cost of moving mask register to integer. */ const int integer_to_mask; /* cost of moving integer register to mask. */ const int mask_load[3]; /* cost of loading mask registers in QImode, HImode and SImode. */ const int mask_store[3]; /* cost of storing mask register in QImode, HImode and SImode. */ const int mask_move; /* cost of moving mask register. */ } hard_register; const int add; /* cost of an add instruction */ const int lea; /* cost of a lea instruction */ const int shift_var; /* variable shift costs */ const int shift_const; /* constant shift costs */ const int mult_init[5]; /* cost of starting a multiply in QImode, HImode, SImode, DImode, TImode*/ const int mult_bit; /* cost of multiply per each bit set */ const int divide[5]; /* cost of a divide/mod in QImode, HImode, SImode, DImode, TImode*/ int movsx; /* The cost of movsx operation. */ int movzx; /* The cost of movzx operation. */ const int large_insn; /* insns larger than this cost more */ const int move_ratio; /* The threshold of number of scalar memory-to-memory move insns. */ const int clear_ratio; /* The threshold of number of scalar memory clearing insns. */ const int int_load[3]; /* cost of loading integer registers in QImode, HImode and SImode relative to reg-reg move (2). */ const int int_store[3]; /* cost of storing integer register in QImode, HImode and SImode */ const int sse_load[5]; /* cost of loading SSE register in 32bit, 64bit, 128bit, 256bit and 512bit */ const int sse_store[5]; /* cost of storing SSE register in 32bit, 64bit, 128bit, 256bit and 512bit */ const int sse_unaligned_load[5];/* cost of unaligned load. */ const int sse_unaligned_store[5];/* cost of unaligned store. */ const int xmm_move, ymm_move, /* cost of moving XMM and YMM register. */ zmm_move; const int sse_to_integer; /* cost of moving SSE register to integer. */ const int gather_static, gather_per_elt; /* Cost of gather load is computed as static + per_item * nelts. */ const int scatter_static, scatter_per_elt; /* Cost of gather store is computed as static + per_item * nelts. */ const int l1_cache_size; /* size of l1 cache, in kilobytes. */ const int l2_cache_size; /* size of l2 cache, in kilobytes. */ const int prefetch_block; /* bytes moved to cache for prefetch. */ const int simultaneous_prefetches; /* number of parallel prefetch operations. */ const int branch_cost; /* Default value for BRANCH_COST. */ const int fadd; /* cost of FADD and FSUB instructions. */ const int fmul; /* cost of FMUL instruction. */ const int fdiv; /* cost of FDIV instruction. */ const int fabs; /* cost of FABS instruction. */ const int fchs; /* cost of FCHS instruction. */ const int fsqrt; /* cost of FSQRT instruction. */ /* Specify what algorithm to use for stringops on unknown size. */ const int sse_op; /* cost of cheap SSE instruction. */ const int addss; /* cost of ADDSS/SD SUBSS/SD instructions. */ const int mulss; /* cost of MULSS instructions. */ const int mulsd; /* cost of MULSD instructions. */ const int fmass; /* cost of FMASS instructions. */ const int fmasd; /* cost of FMASD instructions. */ const int divss; /* cost of DIVSS instructions. */ const int divsd; /* cost of DIVSD instructions. */ const int sqrtss; /* cost of SQRTSS instructions. */ const int sqrtsd; /* cost of SQRTSD instructions. */ const int reassoc_int, reassoc_fp, reassoc_vec_int, reassoc_vec_fp; /* Specify reassociation width for integer, fp, vector integer and vector fp operations. Generally should correspond to number of instructions executed in parallel. See also ix86_reassociation_width. */ struct stringop_algs *memcpy, *memset; const int cond_taken_branch_cost; /* Cost of taken branch for vectorizer cost model. */ const int cond_not_taken_branch_cost;/* Cost of not taken branch for vectorizer cost model. */ /* The "0:0:8" label alignment specified for some processors generates secondary 8-byte alignment only for those label/jump/loop targets which have primary alignment. */ const char *const align_loop; /* Loop alignment. */ const char *const align_jump; /* Jump alignment. */ const char *const align_label; /* Label alignment. */ const char *const align_func; /* Function alignment. */ }; extern const struct processor_costs *ix86_cost; extern const struct processor_costs ix86_size_cost; #define ix86_cur_cost() \ (optimize_insn_for_size_p () ? &ix86_size_cost: ix86_cost) /* Macros used in the machine description to test the flags. */ /* configure can arrange to change it. */ #ifndef TARGET_CPU_DEFAULT #define TARGET_CPU_DEFAULT PROCESSOR_GENERIC #endif #ifndef TARGET_FPMATH_DEFAULT #define TARGET_FPMATH_DEFAULT \ (TARGET_64BIT && TARGET_SSE ? FPMATH_SSE : FPMATH_387) #endif #ifndef TARGET_FPMATH_DEFAULT_P #define TARGET_FPMATH_DEFAULT_P(x) \ (TARGET_64BIT_P(x) && TARGET_SSE_P(x) ? FPMATH_SSE : FPMATH_387) #endif /* If the i387 is disabled or -miamcu is used , then do not return values in it. */ #define TARGET_FLOAT_RETURNS_IN_80387 \ (TARGET_FLOAT_RETURNS && TARGET_80387 && !TARGET_IAMCU) #define TARGET_FLOAT_RETURNS_IN_80387_P(x) \ (TARGET_FLOAT_RETURNS_P(x) && TARGET_80387_P(x) && !TARGET_IAMCU_P(x)) /* 64bit Sledgehammer mode. For libgcc2 we make sure this is a compile-time constant. */ #ifdef IN_LIBGCC2 #undef TARGET_64BIT #ifdef __x86_64__ #define TARGET_64BIT 1 #else #define TARGET_64BIT 0 #endif #else #ifndef TARGET_BI_ARCH #undef TARGET_64BIT #undef TARGET_64BIT_P #if TARGET_64BIT_DEFAULT #define TARGET_64BIT 1 #define TARGET_64BIT_P(x) 1 #else #define TARGET_64BIT 0 #define TARGET_64BIT_P(x) 0 #endif #endif #endif #define HAS_LONG_COND_BRANCH 1 #define HAS_LONG_UNCOND_BRANCH 1 #define TARGET_CPU_P(CPU) (ix86_tune == PROCESSOR_ ## CPU) /* Feature tests against the various tunings. */ enum ix86_tune_indices { #undef DEF_TUNE #define DEF_TUNE(tune, name, selector) tune, #include "x86-tune.def" #undef DEF_TUNE X86_TUNE_LAST }; extern unsigned char ix86_tune_features[X86_TUNE_LAST]; #define TARGET_USE_LEAVE ix86_tune_features[X86_TUNE_USE_LEAVE] #define TARGET_PUSH_MEMORY ix86_tune_features[X86_TUNE_PUSH_MEMORY] #define TARGET_ZERO_EXTEND_WITH_AND \ ix86_tune_features[X86_TUNE_ZERO_EXTEND_WITH_AND] #define TARGET_UNROLL_STRLEN ix86_tune_features[X86_TUNE_UNROLL_STRLEN] #define TARGET_BRANCH_PREDICTION_HINTS \ ix86_tune_features[X86_TUNE_BRANCH_PREDICTION_HINTS] #define TARGET_DOUBLE_WITH_ADD ix86_tune_features[X86_TUNE_DOUBLE_WITH_ADD] #define TARGET_USE_SAHF ix86_tune_features[X86_TUNE_USE_SAHF] #define TARGET_MOVX ix86_tune_features[X86_TUNE_MOVX] #define TARGET_PARTIAL_REG_STALL ix86_tune_features[X86_TUNE_PARTIAL_REG_STALL] #define TARGET_PARTIAL_FLAG_REG_STALL \ ix86_tune_features[X86_TUNE_PARTIAL_FLAG_REG_STALL] #define TARGET_LCP_STALL \ ix86_tune_features[X86_TUNE_LCP_STALL] #define TARGET_USE_HIMODE_FIOP ix86_tune_features[X86_TUNE_USE_HIMODE_FIOP] #define TARGET_USE_SIMODE_FIOP ix86_tune_features[X86_TUNE_USE_SIMODE_FIOP] #define TARGET_USE_MOV0 ix86_tune_features[X86_TUNE_USE_MOV0] #define TARGET_USE_CLTD ix86_tune_features[X86_TUNE_USE_CLTD] #define TARGET_USE_XCHGB ix86_tune_features[X86_TUNE_USE_XCHGB] #define TARGET_SPLIT_LONG_MOVES ix86_tune_features[X86_TUNE_SPLIT_LONG_MOVES] #define TARGET_READ_MODIFY_WRITE ix86_tune_features[X86_TUNE_READ_MODIFY_WRITE] #define TARGET_READ_MODIFY ix86_tune_features[X86_TUNE_READ_MODIFY] #define TARGET_PROMOTE_QImode ix86_tune_features[X86_TUNE_PROMOTE_QIMODE] #define TARGET_FAST_PREFIX ix86_tune_features[X86_TUNE_FAST_PREFIX] #define TARGET_SINGLE_STRINGOP ix86_tune_features[X86_TUNE_SINGLE_STRINGOP] #define TARGET_PREFER_KNOWN_REP_MOVSB_STOSB \ ix86_tune_features[X86_TUNE_PREFER_KNOWN_REP_MOVSB_STOSB] #define TARGET_MISALIGNED_MOVE_STRING_PRO_EPILOGUES \ ix86_tune_features[X86_TUNE_MISALIGNED_MOVE_STRING_PRO_EPILOGUES] #define TARGET_QIMODE_MATH ix86_tune_features[X86_TUNE_QIMODE_MATH] #define TARGET_HIMODE_MATH ix86_tune_features[X86_TUNE_HIMODE_MATH] #define TARGET_PROMOTE_QI_REGS ix86_tune_features[X86_TUNE_PROMOTE_QI_REGS] #define TARGET_PROMOTE_HI_REGS ix86_tune_features[X86_TUNE_PROMOTE_HI_REGS] #define TARGET_SINGLE_POP ix86_tune_features[X86_TUNE_SINGLE_POP] #define TARGET_DOUBLE_POP ix86_tune_features[X86_TUNE_DOUBLE_POP] #define TARGET_SINGLE_PUSH ix86_tune_features[X86_TUNE_SINGLE_PUSH] #define TARGET_DOUBLE_PUSH ix86_tune_features[X86_TUNE_DOUBLE_PUSH] #define TARGET_INTEGER_DFMODE_MOVES \ ix86_tune_features[X86_TUNE_INTEGER_DFMODE_MOVES] #define TARGET_PARTIAL_REG_DEPENDENCY \ ix86_tune_features[X86_TUNE_PARTIAL_REG_DEPENDENCY] #define TARGET_SSE_PARTIAL_REG_DEPENDENCY \ ix86_tune_features[X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY] #define TARGET_SSE_PARTIAL_REG_FP_CONVERTS_DEPENDENCY \ ix86_tune_features[X86_TUNE_SSE_PARTIAL_REG_FP_CONVERTS_DEPENDENCY] #define TARGET_SSE_PARTIAL_REG_CONVERTS_DEPENDENCY \ ix86_tune_features[X86_TUNE_SSE_PARTIAL_REG_CONVERTS_DEPENDENCY] #define TARGET_SSE_UNALIGNED_LOAD_OPTIMAL \ ix86_tune_features[X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL] #define TARGET_SSE_UNALIGNED_STORE_OPTIMAL \ ix86_tune_features[X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL] #define TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL \ ix86_tune_features[X86_TUNE_SSE_PACKED_SINGLE_INSN_OPTIMAL] #define TARGET_SSE_SPLIT_REGS ix86_tune_features[X86_TUNE_SSE_SPLIT_REGS] #define TARGET_SSE_TYPELESS_STORES \ ix86_tune_features[X86_TUNE_SSE_TYPELESS_STORES] #define TARGET_SSE_LOAD0_BY_PXOR ix86_tune_features[X86_TUNE_SSE_LOAD0_BY_PXOR] #define TARGET_MEMORY_MISMATCH_STALL \ ix86_tune_features[X86_TUNE_MEMORY_MISMATCH_STALL] #define TARGET_PROLOGUE_USING_MOVE \ ix86_tune_features[X86_TUNE_PROLOGUE_USING_MOVE] #define TARGET_EPILOGUE_USING_MOVE \ ix86_tune_features[X86_TUNE_EPILOGUE_USING_MOVE] #define TARGET_SHIFT1 ix86_tune_features[X86_TUNE_SHIFT1] #define TARGET_USE_FFREEP ix86_tune_features[X86_TUNE_USE_FFREEP] #define TARGET_INTER_UNIT_MOVES_TO_VEC \ ix86_tune_features[X86_TUNE_INTER_UNIT_MOVES_TO_VEC] #define TARGET_INTER_UNIT_MOVES_FROM_VEC \ ix86_tune_features[X86_TUNE_INTER_UNIT_MOVES_FROM_VEC] #define TARGET_INTER_UNIT_CONVERSIONS \ ix86_tune_features[X86_TUNE_INTER_UNIT_CONVERSIONS] #define TARGET_FOUR_JUMP_LIMIT ix86_tune_features[X86_TUNE_FOUR_JUMP_LIMIT] #define TARGET_SCHEDULE ix86_tune_features[X86_TUNE_SCHEDULE] #define TARGET_USE_BT ix86_tune_features[X86_TUNE_USE_BT] #define TARGET_USE_INCDEC ix86_tune_features[X86_TUNE_USE_INCDEC] #define TARGET_PAD_RETURNS ix86_tune_features[X86_TUNE_PAD_RETURNS] #define TARGET_PAD_SHORT_FUNCTION \ ix86_tune_features[X86_TUNE_PAD_SHORT_FUNCTION] #define TARGET_EXT_80387_CONSTANTS \ ix86_tune_features[X86_TUNE_EXT_80387_CONSTANTS] #define TARGET_AVOID_VECTOR_DECODE \ ix86_tune_features[X86_TUNE_AVOID_VECTOR_DECODE] #define TARGET_TUNE_PROMOTE_HIMODE_IMUL \ ix86_tune_features[X86_TUNE_PROMOTE_HIMODE_IMUL] #define TARGET_SLOW_IMUL_IMM32_MEM \ ix86_tune_features[X86_TUNE_SLOW_IMUL_IMM32_MEM] #define TARGET_SLOW_IMUL_IMM8 ix86_tune_features[X86_TUNE_SLOW_IMUL_IMM8] #define TARGET_MOVE_M1_VIA_OR ix86_tune_features[X86_TUNE_MOVE_M1_VIA_OR] #define TARGET_NOT_UNPAIRABLE ix86_tune_features[X86_TUNE_NOT_UNPAIRABLE] #define TARGET_NOT_VECTORMODE ix86_tune_features[X86_TUNE_NOT_VECTORMODE] #define TARGET_USE_VECTOR_FP_CONVERTS \ ix86_tune_features[X86_TUNE_USE_VECTOR_FP_CONVERTS] #define TARGET_USE_VECTOR_CONVERTS \ ix86_tune_features[X86_TUNE_USE_VECTOR_CONVERTS] #define TARGET_SLOW_PSHUFB \ ix86_tune_features[X86_TUNE_SLOW_PSHUFB] #define TARGET_AVOID_4BYTE_PREFIXES \ ix86_tune_features[X86_TUNE_AVOID_4BYTE_PREFIXES] #define TARGET_USE_GATHER_2PARTS \ ix86_tune_features[X86_TUNE_USE_GATHER_2PARTS] #define TARGET_USE_GATHER_4PARTS \ ix86_tune_features[X86_TUNE_USE_GATHER_4PARTS] #define TARGET_USE_GATHER \ ix86_tune_features[X86_TUNE_USE_GATHER] #define TARGET_FUSE_CMP_AND_BRANCH_32 \ ix86_tune_features[X86_TUNE_FUSE_CMP_AND_BRANCH_32] #define TARGET_FUSE_CMP_AND_BRANCH_64 \ ix86_tune_features[X86_TUNE_FUSE_CMP_AND_BRANCH_64] #define TARGET_FUSE_CMP_AND_BRANCH \ (TARGET_64BIT ? TARGET_FUSE_CMP_AND_BRANCH_64 \ : TARGET_FUSE_CMP_AND_BRANCH_32) #define TARGET_FUSE_CMP_AND_BRANCH_SOFLAGS \ ix86_tune_features[X86_TUNE_FUSE_CMP_AND_BRANCH_SOFLAGS] #define TARGET_FUSE_ALU_AND_BRANCH \ ix86_tune_features[X86_TUNE_FUSE_ALU_AND_BRANCH] #define TARGET_OPT_AGU ix86_tune_features[X86_TUNE_OPT_AGU] #define TARGET_AVOID_LEA_FOR_ADDR \ ix86_tune_features[X86_TUNE_AVOID_LEA_FOR_ADDR] #define TARGET_SOFTWARE_PREFETCHING_BENEFICIAL \ ix86_tune_features[X86_TUNE_SOFTWARE_PREFETCHING_BENEFICIAL] #define TARGET_AVX256_SPLIT_REGS \ ix86_tune_features[X86_TUNE_AVX256_SPLIT_REGS] #define TARGET_GENERAL_REGS_SSE_SPILL \ ix86_tune_features[X86_TUNE_GENERAL_REGS_SSE_SPILL] #define TARGET_AVOID_MEM_OPND_FOR_CMOVE \ ix86_tune_features[X86_TUNE_AVOID_MEM_OPND_FOR_CMOVE] #define TARGET_SPLIT_MEM_OPND_FOR_FP_CONVERTS \ ix86_tune_features[X86_TUNE_SPLIT_MEM_OPND_FOR_FP_CONVERTS] #define TARGET_ADJUST_UNROLL \ ix86_tune_features[X86_TUNE_ADJUST_UNROLL] #define TARGET_AVOID_FALSE_DEP_FOR_BMI \ ix86_tune_features[X86_TUNE_AVOID_FALSE_DEP_FOR_BMI] #define TARGET_ONE_IF_CONV_INSN \ ix86_tune_features[X86_TUNE_ONE_IF_CONV_INSN] #define TARGET_AVOID_MFENCE ix86_tune_features[X86_TUNE_AVOID_MFENCE] #define TARGET_EMIT_VZEROUPPER \ ix86_tune_features[X86_TUNE_EMIT_VZEROUPPER] #define TARGET_EXPAND_ABS \ ix86_tune_features[X86_TUNE_EXPAND_ABS] #define TARGET_V2DF_REDUCTION_PREFER_HADDPD \ ix86_tune_features[X86_TUNE_V2DF_REDUCTION_PREFER_HADDPD] #define TARGET_DEST_FALSE_DEP_FOR_GLC \ ix86_tune_features[X86_TUNE_DEST_FALSE_DEP_FOR_GLC] /* Feature tests against the various architecture variations. */ enum ix86_arch_indices { X86_ARCH_CMOV, X86_ARCH_CMPXCHG, X86_ARCH_CMPXCHG8B, X86_ARCH_XADD, X86_ARCH_BSWAP, X86_ARCH_LAST }; extern unsigned char ix86_arch_features[X86_ARCH_LAST]; #define TARGET_CMOV ix86_arch_features[X86_ARCH_CMOV] #define TARGET_CMPXCHG ix86_arch_features[X86_ARCH_CMPXCHG] #define TARGET_CMPXCHG8B ix86_arch_features[X86_ARCH_CMPXCHG8B] #define TARGET_XADD ix86_arch_features[X86_ARCH_XADD] #define TARGET_BSWAP ix86_arch_features[X86_ARCH_BSWAP] /* For sane SSE instruction set generation we need fcomi instruction. It is safe to enable all CMOVE instructions. Also, RDRAND intrinsic expands to a sequence that includes conditional move. */ #define TARGET_CMOVE (TARGET_CMOV || TARGET_SSE || TARGET_RDRND) #define TARGET_FISTTP (TARGET_SSE3 && TARGET_80387) extern unsigned char ix86_prefetch_sse; #define TARGET_PREFETCH_SSE ix86_prefetch_sse #define ASSEMBLER_DIALECT (ix86_asm_dialect) #define TARGET_SSE_MATH ((ix86_fpmath & FPMATH_SSE) != 0) #define TARGET_MIX_SSE_I387 \ ((ix86_fpmath & (FPMATH_SSE | FPMATH_387)) == (FPMATH_SSE | FPMATH_387)) #define TARGET_HARD_SF_REGS (TARGET_80387 || TARGET_MMX || TARGET_SSE) #define TARGET_HARD_DF_REGS (TARGET_80387 || TARGET_SSE) #define TARGET_HARD_XF_REGS (TARGET_80387) #define TARGET_GNU_TLS (ix86_tls_dialect == TLS_DIALECT_GNU) #define TARGET_GNU2_TLS (ix86_tls_dialect == TLS_DIALECT_GNU2) #define TARGET_ANY_GNU_TLS (TARGET_GNU_TLS || TARGET_GNU2_TLS) #define TARGET_SUN_TLS 0 #ifndef TARGET_64BIT_DEFAULT #define TARGET_64BIT_DEFAULT 0 #endif #ifndef TARGET_TLS_DIRECT_SEG_REFS_DEFAULT #define TARGET_TLS_DIRECT_SEG_REFS_DEFAULT 0 #endif #define TARGET_SSP_GLOBAL_GUARD (ix86_stack_protector_guard == SSP_GLOBAL) #define TARGET_SSP_TLS_GUARD (ix86_stack_protector_guard == SSP_TLS) /* Fence to use after loop using storent. */ extern GTY(()) tree x86_mfence; #define FENCE_FOLLOWING_MOVNT x86_mfence /* Once GDB has been enhanced to deal with functions without frame pointers, we can change this to allow for elimination of the frame pointer in leaf functions. */ #define TARGET_DEFAULT 0 /* Extra bits to force. */ #define TARGET_SUBTARGET_DEFAULT 0 #define TARGET_SUBTARGET_ISA_DEFAULT 0 /* Extra bits to force on w/ 32-bit mode. */ #define TARGET_SUBTARGET32_DEFAULT 0 #define TARGET_SUBTARGET32_ISA_DEFAULT 0 /* Extra bits to force on w/ 64-bit mode. */ #define TARGET_SUBTARGET64_DEFAULT 0 /* Enable MMX, SSE and SSE2 by default. */ #define TARGET_SUBTARGET64_ISA_DEFAULT \ (OPTION_MASK_ISA_MMX | OPTION_MASK_ISA_SSE | OPTION_MASK_ISA_SSE2) /* Replace MACH-O, ifdefs by in-line tests, where possible. (a) Macros defined in config/i386/darwin.h */ #define TARGET_MACHO 0 #define TARGET_MACHO_SYMBOL_STUBS 0 #define MACHOPIC_ATT_STUB 0 /* (b) Macros defined in config/darwin.h */ #define MACHO_DYNAMIC_NO_PIC_P 0 #define MACHOPIC_INDIRECT 0 #define MACHOPIC_PURE 0 /* For the RDOS */ #define TARGET_RDOS 0 /* For the Windows 64-bit ABI. */ #define TARGET_64BIT_MS_ABI (TARGET_64BIT && ix86_cfun_abi () == MS_ABI) /* For the Windows 32-bit ABI. */ #define TARGET_32BIT_MS_ABI (!TARGET_64BIT && ix86_cfun_abi () == MS_ABI) /* This is re-defined by cygming.h. */ #define TARGET_SEH 0 /* The default abi used by target. */ #define DEFAULT_ABI SYSV_ABI /* The default TLS segment register used by target. */ #define DEFAULT_TLS_SEG_REG \ (TARGET_64BIT ? ADDR_SPACE_SEG_FS : ADDR_SPACE_SEG_GS) /* Subtargets may reset this to 1 in order to enable 96-bit long double with the rounding mode forced to 53 bits. */ #define TARGET_96_ROUND_53_LONG_DOUBLE 0 #ifndef SUBTARGET_DRIVER_SELF_SPECS # define SUBTARGET_DRIVER_SELF_SPECS "" #endif #define DRIVER_SELF_SPECS SUBTARGET_DRIVER_SELF_SPECS /* -march=native handling only makes sense with compiler running on an x86 or x86_64 chip. If changing this condition, also change the condition in driver-i386.cc. */ #if defined(__i386__) || defined(__x86_64__) /* In driver-i386.cc. */ extern const char *host_detect_local_cpu (int argc, const char **argv); #define EXTRA_SPEC_FUNCTIONS \ { "local_cpu_detect", host_detect_local_cpu }, #define HAVE_LOCAL_CPU_DETECT #endif #if TARGET_64BIT_DEFAULT #define OPT_ARCH64 "!m32" #define OPT_ARCH32 "m32" #else #define OPT_ARCH64 "m64|mx32" #define OPT_ARCH32 "m64|mx32:;" #endif /* Support for configure-time defaults of some command line options. The order here is important so that -march doesn't squash the tune or cpu values. */ #define OPTION_DEFAULT_SPECS \ {"tune", "%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}" }, \ {"tune_32", "%{" OPT_ARCH32 ":%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}}" }, \ {"tune_64", "%{" OPT_ARCH64 ":%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}}" }, \ {"cpu", "%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}" }, \ {"cpu_32", "%{" OPT_ARCH32 ":%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}}" }, \ {"cpu_64", "%{" OPT_ARCH64 ":%{!mtune=*:%{!mcpu=*:%{!march=*:-mtune=%(VALUE)}}}}" }, \ {"arch", "%{!march=*:-march=%(VALUE)}"}, \ {"arch_32", "%{" OPT_ARCH32 ":%{!march=*:-march=%(VALUE)}}"}, \ {"arch_64", "%{" OPT_ARCH64 ":%{!march=*:-march=%(VALUE)}}"}, /* Specs for the compiler proper */ #ifndef CC1_CPU_SPEC #define CC1_CPU_SPEC_1 "" #ifndef HAVE_LOCAL_CPU_DETECT #define CC1_CPU_SPEC CC1_CPU_SPEC_1 #else #define ARCH_ARG "%{" OPT_ARCH64 ":64;:32}" #define CC1_CPU_SPEC CC1_CPU_SPEC_1 \ "%{march=native:%>march=native %:local_cpu_detect(arch " ARCH_ARG ") \ %{!mtune=*:%>mtune=native %:local_cpu_detect(tune " ARCH_ARG ")}} \ %{mtune=native:%>mtune=native %:local_cpu_detect(tune " ARCH_ARG ")}" #endif #endif