|  | diff -ruN gcc-4.9.2/config.sub gcc-4.9.2-riscv/config.sub | 
|  | --- gcc-4.9.2/config.sub	2013-10-01 09:50:56.000000000 -0700 | 
|  | +++ gcc-4.9.2-riscv/config.sub	2014-12-02 18:04:50.107949563 -0800 | 
|  | @@ -334,6 +334,9 @@ | 
|  | ms1) | 
|  | basic_machine=mt-unknown | 
|  | ;; | 
|  | +	riscv) | 
|  | +		basic_machine=riscv-ucb | 
|  | +		;; | 
|  |  | 
|  | strongarm | thumb | xscale) | 
|  | basic_machine=arm-unknown | 
|  | diff -ruN gcc-4.9.2/gcc/config/riscv/constraints.md gcc-4.9.2-riscv/gcc/config/riscv/constraints.md | 
|  | --- gcc-4.9.2/gcc/config/riscv/constraints.md	1969-12-31 16:00:00.000000000 -0800 | 
|  | +++ gcc-4.9.2-riscv/gcc/config/riscv/constraints.md	2014-12-02 18:04:50.107949563 -0800 | 
|  | @@ -0,0 +1,121 @@ | 
|  | +;; Constraint definitions for MIPS. | 
|  | +;; Copyright (C) 2006, 2007, 2008, 2010 Free Software Foundation, Inc. | 
|  | +;; | 
|  | +;; This file is part of GCC. | 
|  | +;; | 
|  | +;; GCC is free software; you can redistribute it and/or modify | 
|  | +;; it under the terms of the GNU General Public License as published by | 
|  | +;; the Free Software Foundation; either version 3, or (at your option) | 
|  | +;; any later version. | 
|  | +;; | 
|  | +;; GCC is distributed in the hope that it will be useful, | 
|  | +;; but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | +;; GNU General Public License for more details. | 
|  | +;; | 
|  | +;; You should have received a copy of the GNU General Public License | 
|  | +;; along with GCC; see the file COPYING3.  If not see | 
|  | +;; <http://www.gnu.org/licenses/>. | 
|  | + | 
|  | +;; Vector register constraints | 
|  | + | 
|  | +(define_register_constraint "A" "VEC_GR_REGS" | 
|  | +  "A vector integer register.") | 
|  | + | 
|  | +(define_register_constraint "B" "VEC_FP_REGS" | 
|  | +  "A vector floating-point register.") | 
|  | + | 
|  | +;; Register constraints | 
|  | + | 
|  | +(define_register_constraint "d" "GR_REGS" | 
|  | +  "An address register.  This is equivalent to @code{r} unless | 
|  | +   generating MIPS16 code.") | 
|  | + | 
|  | +(define_register_constraint "f" "TARGET_HARD_FLOAT ? FP_REGS : NO_REGS" | 
|  | +  "A floating-point register (if available).") | 
|  | + | 
|  | +(define_register_constraint "h" "NO_REGS" | 
|  | +  "Formerly the @code{hi} register.  This constraint is no longer supported.") | 
|  | + | 
|  | +(define_register_constraint "b" "ALL_REGS" | 
|  | +  "@internal") | 
|  | + | 
|  | +(define_register_constraint "j" "V1_REG" | 
|  | +  "@internal") | 
|  | + | 
|  | +(define_register_constraint "z" "GR_REGS" | 
|  | +  "A floating-point condition code register.") | 
|  | + | 
|  | +;; This is a normal rather than a register constraint because we can | 
|  | +;; never use the stack pointer as a reload register. | 
|  | +(define_constraint "ks" | 
|  | +  "@internal" | 
|  | +  (and (match_code "reg") | 
|  | +       (match_test "REGNO (op) == STACK_POINTER_REGNUM"))) | 
|  | + | 
|  | +;; Integer constraints | 
|  | + | 
|  | +(define_constraint "Z" | 
|  | +  "@internal" | 
|  | +  (and (match_code "const_int") | 
|  | +       (match_test "1"))) | 
|  | + | 
|  | +(define_constraint "I" | 
|  | +  "An I-type 12-bit signed immediate." | 
|  | +  (and (match_code "const_int") | 
|  | +       (match_test "SMALL_OPERAND (ival)"))) | 
|  | + | 
|  | +(define_constraint "J" | 
|  | +  "Integer zero." | 
|  | +  (and (match_code "const_int") | 
|  | +       (match_test "ival == 0"))) | 
|  | + | 
|  | +;; Floating-point constraints | 
|  | + | 
|  | +(define_constraint "G" | 
|  | +  "Floating-point zero." | 
|  | +  (and (match_code "const_double") | 
|  | +       (match_test "op == CONST0_RTX (mode)"))) | 
|  | + | 
|  | +;; General constraints | 
|  | + | 
|  | +(define_constraint "Q" | 
|  | +  "@internal" | 
|  | +  (match_operand 0 "const_arith_operand")) | 
|  | + | 
|  | +(define_memory_constraint "YR" | 
|  | +  "An address that is held in a general-purpose register." | 
|  | +  (and (match_code "mem") | 
|  | +       (match_test "GET_CODE(XEXP(op,0)) == REG"))) | 
|  | + | 
|  | +(define_memory_constraint "R" | 
|  | +  "An address that can be used in a non-macro load or store." | 
|  | +  (and (match_code "mem") | 
|  | +       (match_test "mips_address_insns (XEXP (op, 0), mode, false) == 1"))) | 
|  | + | 
|  | +(define_constraint "S" | 
|  | +  "@internal | 
|  | +   A constant call address." | 
|  | +  (and (match_operand 0 "call_insn_operand") | 
|  | +       (match_test "CONSTANT_P (op)"))) | 
|  | + | 
|  | +(define_constraint "T" | 
|  | +  "@internal | 
|  | +   A constant @code{move_operand}." | 
|  | +  (and (match_operand 0 "move_operand") | 
|  | +       (match_test "CONSTANT_P (op)"))) | 
|  | + | 
|  | +(define_memory_constraint "W" | 
|  | +  "@internal | 
|  | +   A memory address based on a member of @code{BASE_REG_CLASS}.  This is | 
|  | +   true for all references (although it can sometimes be implicit if | 
|  | +   @samp{!TARGET_EXPLICIT_RELOCS}).  For MIPS16, it excludes stack and | 
|  | +   constant-pool references." | 
|  | +  (and (match_code "mem") | 
|  | +       (match_operand 0 "memory_operand"))) | 
|  | + | 
|  | +(define_constraint "YG" | 
|  | +  "@internal | 
|  | +   A vector zero." | 
|  | +  (and (match_code "const_vector") | 
|  | +       (match_test "op == CONST0_RTX (mode)"))) | 
|  | diff -ruN gcc-4.9.2/gcc/config/riscv/crti.asm gcc-4.9.2-riscv/gcc/config/riscv/crti.asm | 
|  | --- gcc-4.9.2/gcc/config/riscv/crti.asm	1969-12-31 16:00:00.000000000 -0800 | 
|  | +++ gcc-4.9.2-riscv/gcc/config/riscv/crti.asm	2014-12-02 18:04:50.107949563 -0800 | 
|  | @@ -0,0 +1,42 @@ | 
|  | +/* Copyright (C) 2001, 2002 Free Software Foundation, Inc. | 
|  | + | 
|  | +This file is part of GCC. | 
|  | + | 
|  | +GCC is free software; you can redistribute it and/or modify it under | 
|  | +the terms of the GNU General Public License as published by the Free | 
|  | +Software Foundation; either version 3, or (at your option) any later | 
|  | +version. | 
|  | + | 
|  | +GCC is distributed in the hope that it will be useful, but WITHOUT ANY | 
|  | +WARRANTY; without even the implied warranty of MERCHANTABILITY or | 
|  | +FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License | 
|  | +for more details. | 
|  | + | 
|  | +Under Section 7 of GPL version 3, you are granted additional | 
|  | +permissions described in the GCC Runtime Library Exception, version | 
|  | +3.1, as published by the Free Software Foundation. | 
|  | + | 
|  | +You should have received a copy of the GNU General Public License and | 
|  | +a copy of the GCC Runtime Library Exception along with this program; | 
|  | +see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see | 
|  | +<http://www.gnu.org/licenses/>.  */ | 
|  | + | 
|  | +#ifdef __riscv64 | 
|  | +# define SR sd | 
|  | +#else | 
|  | +# define SR sw | 
|  | +#endif | 
|  | + | 
|  | +	.section .init,"ax",@progbits | 
|  | +	.globl	_init | 
|  | +	.type	_init,@function | 
|  | +_init: | 
|  | +	add	sp, sp, -8 | 
|  | +	SR	ra, 0(sp) | 
|  | + | 
|  | +	.section .fini,"ax",@progbits | 
|  | +	.globl	_fini | 
|  | +	.type	_fini,@function | 
|  | +_fini: | 
|  | +	add	sp, sp, -8 | 
|  | +	SR	ra, 0(sp) | 
|  | diff -ruN gcc-4.9.2/gcc/config/riscv/crtn.asm gcc-4.9.2-riscv/gcc/config/riscv/crtn.asm | 
|  | --- gcc-4.9.2/gcc/config/riscv/crtn.asm	1969-12-31 16:00:00.000000000 -0800 | 
|  | +++ gcc-4.9.2-riscv/gcc/config/riscv/crtn.asm	2014-12-02 18:04:50.107949563 -0800 | 
|  | @@ -0,0 +1,38 @@ | 
|  | +/* Copyright (C) 2001, 2002 Free Software Foundation, Inc. | 
|  | + | 
|  | +This file is part of GCC. | 
|  | + | 
|  | +GCC is free software; you can redistribute it and/or modify it under | 
|  | +the terms of the GNU General Public License as published by the Free | 
|  | +Software Foundation; either version 3, or (at your option) any later | 
|  | +version. | 
|  | + | 
|  | +GCC is distributed in the hope that it will be useful, but WITHOUT ANY | 
|  | +WARraNTY; without even the implied warranty of MERCHANTABILITY or | 
|  | +FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License | 
|  | +for more details. | 
|  | + | 
|  | +Under Section 7 of GPL version 3, you are granted additional | 
|  | +permissions described in the GCC Runtime Library Exception, version | 
|  | +3.1, as published by the Free Software Foundation. | 
|  | + | 
|  | +You should have received a copy of the GNU General Public License and | 
|  | +a copy of the GCC Runtime Library Exception along with this program; | 
|  | +see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see | 
|  | +<http://www.gnu.org/licenses/>.  */ | 
|  | + | 
|  | +#ifdef __riscv64 | 
|  | +# define LR ld | 
|  | +#else | 
|  | +# define LR lw | 
|  | +#endif | 
|  | + | 
|  | +	.section .init,"ax",@progbits | 
|  | +	LR	ra, 0(sp) | 
|  | +	addi	sp, sp, 8 | 
|  | +	ret | 
|  | + | 
|  | +	.section .fini,"ax",@progbits | 
|  | +	LR	ra, 0(sp) | 
|  | +	addi	sp, sp, 8 | 
|  | +	ret | 
|  | diff -ruN gcc-4.9.2/gcc/config/riscv/elf.h gcc-4.9.2-riscv/gcc/config/riscv/elf.h | 
|  | --- gcc-4.9.2/gcc/config/riscv/elf.h	1969-12-31 16:00:00.000000000 -0800 | 
|  | +++ gcc-4.9.2-riscv/gcc/config/riscv/elf.h	2014-12-02 18:04:50.107949563 -0800 | 
|  | @@ -0,0 +1,51 @@ | 
|  | +/* Target macros for mips*-elf targets. | 
|  | +   Copyright (C) 1994, 1997, 1999, 2000, 2002, 2003, 2004, 2007, 2010 | 
|  | +   Free Software Foundation, Inc. | 
|  | + | 
|  | +This file is part of GCC. | 
|  | + | 
|  | +GCC is free software; you can redistribute it and/or modify | 
|  | +it under the terms of the GNU General Public License as published by | 
|  | +the Free Software Foundation; either version 3, or (at your option) | 
|  | +any later version. | 
|  | + | 
|  | +GCC is distributed in the hope that it will be useful, | 
|  | +but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | +GNU General Public License for more details. | 
|  | + | 
|  | +You should have received a copy of the GNU General Public License | 
|  | +along with GCC; see the file COPYING3.  If not see | 
|  | +<http://www.gnu.org/licenses/>.  */ | 
|  | + | 
|  | +/* MIPS assemblers don't have the usual .set foo,bar construct; | 
|  | +   .set is used for assembler options instead.  */ | 
|  | +#undef SET_ASM_OP | 
|  | +#define ASM_OUTPUT_DEF(FILE, LABEL1, LABEL2)			\ | 
|  | +  do								\ | 
|  | +    {								\ | 
|  | +      fputc ('\t', FILE);					\ | 
|  | +      assemble_name (FILE, LABEL1);				\ | 
|  | +      fputs (" = ", FILE);					\ | 
|  | +      assemble_name (FILE, LABEL2);				\ | 
|  | +      fputc ('\n', FILE);					\ | 
|  | +    }								\ | 
|  | +  while (0) | 
|  | + | 
|  | +#undef ASM_DECLARE_OBJECT_NAME | 
|  | +#define ASM_DECLARE_OBJECT_NAME mips_declare_object_name | 
|  | + | 
|  | +#undef ASM_FINISH_DECLARE_OBJECT | 
|  | +#define ASM_FINISH_DECLARE_OBJECT mips_finish_declare_object | 
|  | + | 
|  | +/* Leave the linker script to choose the appropriate libraries.  */ | 
|  | +#undef  LIB_SPEC | 
|  | +#define LIB_SPEC "" | 
|  | + | 
|  | +#undef  STARTFILE_SPEC | 
|  | +#define STARTFILE_SPEC "crti%O%s crtbegin%O%s" | 
|  | + | 
|  | +#undef  ENDFILE_SPEC | 
|  | +#define ENDFILE_SPEC "crtend%O%s crtn%O%s" | 
|  | + | 
|  | +#define NO_IMPLICIT_EXTERN_C 1 | 
|  | diff -ruN gcc-4.9.2/gcc/config/riscv/generic.md gcc-4.9.2-riscv/gcc/config/riscv/generic.md | 
|  | --- gcc-4.9.2/gcc/config/riscv/generic.md	1969-12-31 16:00:00.000000000 -0800 | 
|  | +++ gcc-4.9.2-riscv/gcc/config/riscv/generic.md	2014-12-02 18:04:50.107949563 -0800 | 
|  | @@ -0,0 +1,105 @@ | 
|  | +;; Generic DFA-based pipeline description for MIPS targets | 
|  | +;;   Copyright (C) 2004, 2005, 2007 Free Software Foundation, Inc. | 
|  | +;; | 
|  | +;; This file is part of GCC. | 
|  | + | 
|  | +;; GCC is free software; you can redistribute it and/or modify it | 
|  | +;; under the terms of the GNU General Public License as published | 
|  | +;; by the Free Software Foundation; either version 3, or (at your | 
|  | +;; option) any later version. | 
|  | + | 
|  | +;; GCC is distributed in the hope that it will be useful, but WITHOUT | 
|  | +;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | 
|  | +;; or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public | 
|  | +;; License for more details. | 
|  | + | 
|  | +;; You should have received a copy of the GNU General Public License | 
|  | +;; along with GCC; see the file COPYING3.  If not see | 
|  | +;; <http://www.gnu.org/licenses/>. | 
|  | + | 
|  | + | 
|  | +;; This file is derived from the old define_function_unit description. | 
|  | +;; Each reservation can be overridden on a processor-by-processor basis. | 
|  | + | 
|  | +(define_insn_reservation "generic_alu" 1 | 
|  | +  (eq_attr "type" "unknown,prefetch,prefetchx,condmove,const,arith, | 
|  | +		   shift,slt,clz,trap,multi,nop,logical,signext,move") | 
|  | +  "alu") | 
|  | + | 
|  | +(define_insn_reservation "generic_load" 3 | 
|  | +  (eq_attr "type" "load,fpload,fpidxload") | 
|  | +  "alu") | 
|  | + | 
|  | +(define_insn_reservation "generic_store" 1 | 
|  | +  (eq_attr "type" "store,fpstore,fpidxstore") | 
|  | +  "alu") | 
|  | + | 
|  | +(define_insn_reservation "generic_xfer" 2 | 
|  | +  (eq_attr "type" "mfc,mtc") | 
|  | +  "alu") | 
|  | + | 
|  | +(define_insn_reservation "generic_branch" 1 | 
|  | +  (eq_attr "type" "branch,jump,call") | 
|  | +  "alu") | 
|  | + | 
|  | +(define_insn_reservation "generic_hilo" 1 | 
|  | +  (eq_attr "type" "mfhilo,mthilo") | 
|  | +  "imuldiv*3") | 
|  | + | 
|  | +(define_insn_reservation "generic_imul" 17 | 
|  | +  (eq_attr "type" "imul,imul3,imadd") | 
|  | +  "imuldiv*17") | 
|  | + | 
|  | +(define_insn_reservation "generic_idiv" 38 | 
|  | +  (eq_attr "type" "idiv") | 
|  | +  "imuldiv*38") | 
|  | + | 
|  | +(define_insn_reservation "generic_fcvt" 1 | 
|  | +  (eq_attr "type" "fcvt") | 
|  | +  "alu") | 
|  | + | 
|  | +(define_insn_reservation "generic_fmove" 2 | 
|  | +  (eq_attr "type" "fabs,fneg,fmove") | 
|  | +  "alu") | 
|  | + | 
|  | +(define_insn_reservation "generic_fcmp" 3 | 
|  | +  (eq_attr "type" "fcmp") | 
|  | +  "alu") | 
|  | + | 
|  | +(define_insn_reservation "generic_fadd" 4 | 
|  | +  (eq_attr "type" "fadd") | 
|  | +  "alu") | 
|  | + | 
|  | +(define_insn_reservation "generic_fmul_single" 7 | 
|  | +  (and (eq_attr "type" "fmul,fmadd") | 
|  | +       (eq_attr "mode" "SF")) | 
|  | +  "alu") | 
|  | + | 
|  | +(define_insn_reservation "generic_fmul_double" 8 | 
|  | +  (and (eq_attr "type" "fmul,fmadd") | 
|  | +       (eq_attr "mode" "DF")) | 
|  | +  "alu") | 
|  | + | 
|  | +(define_insn_reservation "generic_fdiv_single" 23 | 
|  | +  (and (eq_attr "type" "fdiv,frdiv") | 
|  | +       (eq_attr "mode" "SF")) | 
|  | +  "alu") | 
|  | + | 
|  | +(define_insn_reservation "generic_fdiv_double" 36 | 
|  | +  (and (eq_attr "type" "fdiv,frdiv") | 
|  | +       (eq_attr "mode" "DF")) | 
|  | +  "alu") | 
|  | + | 
|  | +(define_insn_reservation "generic_fsqrt_single" 54 | 
|  | +  (and (eq_attr "type" "fsqrt,frsqrt") | 
|  | +       (eq_attr "mode" "SF")) | 
|  | +  "alu") | 
|  | + | 
|  | +(define_insn_reservation "generic_fsqrt_double" 112 | 
|  | +  (and (eq_attr "type" "fsqrt,frsqrt") | 
|  | +       (eq_attr "mode" "DF")) | 
|  | +  "alu") | 
|  | + | 
|  | +(define_insn_reservation "generic_frecip_fsqrt_step" 5 | 
|  | +  (eq_attr "type" "frdiv1,frdiv2,frsqrt1,frsqrt2") | 
|  | +  "alu") | 
|  | diff -ruN gcc-4.9.2/gcc/config/riscv/linux64.h gcc-4.9.2-riscv/gcc/config/riscv/linux64.h | 
|  | --- gcc-4.9.2/gcc/config/riscv/linux64.h	1969-12-31 16:00:00.000000000 -0800 | 
|  | +++ gcc-4.9.2-riscv/gcc/config/riscv/linux64.h	2014-12-02 18:04:50.107949563 -0800 | 
|  | @@ -0,0 +1,58 @@ | 
|  | +/* Definitions for MIPS running Linux-based GNU systems with ELF format | 
|  | +   using n32/64 abi. | 
|  | +   Copyright 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2010, 2011 | 
|  | +   Free Software Foundation, Inc. | 
|  | + | 
|  | +This file is part of GCC. | 
|  | + | 
|  | +GCC is free software; you can redistribute it and/or modify | 
|  | +it under the terms of the GNU General Public License as published by | 
|  | +the Free Software Foundation; either version 3, or (at your option) | 
|  | +any later version. | 
|  | + | 
|  | +GCC is distributed in the hope that it will be useful, | 
|  | +but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | +GNU General Public License for more details. | 
|  | + | 
|  | +You should have received a copy of the GNU General Public License | 
|  | +along with GCC; see the file COPYING3.  If not see | 
|  | +<http://www.gnu.org/licenses/>.  */ | 
|  | + | 
|  | +/* Force the default endianness and ABI flags onto the command line | 
|  | +   in order to make the other specs easier to write.  */ | 
|  | +#undef DRIVER_SELF_SPECS | 
|  | +#define DRIVER_SELF_SPECS \ | 
|  | +  LINUX_DRIVER_SELF_SPECS \ | 
|  | +  " %{!EB:%{!EL:%(endian_spec)}}" \ | 
|  | +  " %{" OPT_ARCH32 ": -m32} %{" OPT_ARCH64 ": -m64}" \ | 
|  | + | 
|  | +#undef LIB_SPEC | 
|  | +#define LIB_SPEC "\ | 
|  | +%{pthread:-lpthread} \ | 
|  | +%{shared:-lc} \ | 
|  | +%{!shared: \ | 
|  | +  %{profile:-lc_p} %{!profile:-lc}}" | 
|  | + | 
|  | +#define GLIBC_DYNAMIC_LINKER32 "/lib32/ld.so.1" | 
|  | +#define GLIBC_DYNAMIC_LINKER64 "/lib/ld.so.1" | 
|  | + | 
|  | +#undef LINK_SPEC | 
|  | +#define LINK_SPEC "\ | 
|  | +%{G*} %{EB} %{EL} %{mips1} %{mips2} %{mips3} %{mips4} \ | 
|  | +%{shared} \ | 
|  | + %(endian_spec) \ | 
|  | +  %{!shared: \ | 
|  | +    %{!static: \ | 
|  | +      %{rdynamic:-export-dynamic} \ | 
|  | +      %{" OPT_ARCH64 ": -dynamic-linker " LINUX_DYNAMIC_LINKER64 "} \ | 
|  | +      %{" OPT_ARCH32 ": -dynamic-linker " LINUX_DYNAMIC_LINKER32 "}} \ | 
|  | +    %{static:-static}} \ | 
|  | +%{" OPT_ARCH64 ":-melf64%{EB:b}%{EL:l}riscv} \ | 
|  | +%{" OPT_ARCH32 ":-melf32%{EB:b}%{EL:l}riscv}" | 
|  | + | 
|  | +/* GNU/Linux doesn't use the same floating-point format that IRIX uses | 
|  | +   for long double.  There's no need to override this here, since | 
|  | +   ieee_quad_format is the default, but let's put this here to make | 
|  | +   sure nobody thinks we just forgot to set it to something else.  */ | 
|  | +#define MIPS_TFMODE_FORMAT mips_quad_format | 
|  | diff -ruN gcc-4.9.2/gcc/config/riscv/linux.h gcc-4.9.2-riscv/gcc/config/riscv/linux.h | 
|  | --- gcc-4.9.2/gcc/config/riscv/linux.h	1969-12-31 16:00:00.000000000 -0800 | 
|  | +++ gcc-4.9.2-riscv/gcc/config/riscv/linux.h	2014-12-02 18:04:50.107949563 -0800 | 
|  | @@ -0,0 +1,115 @@ | 
|  | +/* Definitions for MIPS running Linux-based GNU systems with ELF format. | 
|  | +   Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, | 
|  | +   2007, 2008, 2010, 2011 Free Software Foundation, Inc. | 
|  | + | 
|  | +This file is part of GCC. | 
|  | + | 
|  | +GCC is free software; you can redistribute it and/or modify | 
|  | +it under the terms of the GNU General Public License as published by | 
|  | +the Free Software Foundation; either version 3, or (at your option) | 
|  | +any later version. | 
|  | + | 
|  | +GCC is distributed in the hope that it will be useful, | 
|  | +but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | +GNU General Public License for more details. | 
|  | + | 
|  | +You should have received a copy of the GNU General Public License | 
|  | +along with GCC; see the file COPYING3.  If not see | 
|  | +<http://www.gnu.org/licenses/>.  */ | 
|  | + | 
|  | +#undef WCHAR_TYPE | 
|  | +#define WCHAR_TYPE "int" | 
|  | + | 
|  | +#undef WCHAR_TYPE_SIZE | 
|  | +#define WCHAR_TYPE_SIZE 32 | 
|  | + | 
|  | +#undef ASM_DECLARE_OBJECT_NAME | 
|  | +#define ASM_DECLARE_OBJECT_NAME mips_declare_object_name | 
|  | + | 
|  | +#undef TARGET_VERSION | 
|  | +#if TARGET_ENDIAN_DEFAULT == 0 | 
|  | +#define TARGET_VERSION fprintf (stderr, " (RISC-V LE Linux/ELF)"); | 
|  | +#else | 
|  | +#define TARGET_VERSION fprintf (stderr, " (RISC-V BE Linux/ELF)"); | 
|  | +#endif | 
|  | + | 
|  | +/* If we don't set MASK_ABICALLS, we can't default to PIC.  */ | 
|  | +#undef TARGET_DEFAULT | 
|  | +#define TARGET_DEFAULT MASK_ABICALLS | 
|  | + | 
|  | +#define TARGET_OS_CPP_BUILTINS()				\ | 
|  | +  do {								\ | 
|  | +    LINUX_TARGET_OS_CPP_BUILTINS();				\ | 
|  | +    /* The GNU C++ standard library requires this.  */		\ | 
|  | +    if (c_dialect_cxx ())					\ | 
|  | +      builtin_define ("_GNU_SOURCE");				\ | 
|  | +  } while (0) | 
|  | + | 
|  | +#undef SUBTARGET_CPP_SPEC | 
|  | +#define SUBTARGET_CPP_SPEC "%{posix:-D_POSIX_SOURCE} %{pthread:-D_REENTRANT}" | 
|  | + | 
|  | +/* A standard GNU/Linux mapping.  On most targets, it is included in | 
|  | +   CC1_SPEC itself by config/linux.h, but mips.h overrides CC1_SPEC | 
|  | +   and provides this hook instead.  */ | 
|  | +#undef SUBTARGET_CC1_SPEC | 
|  | +#define SUBTARGET_CC1_SPEC "%{profile:-p}" | 
|  | + | 
|  | +#define GLIBC_DYNAMIC_LINKER "/lib/ld.so.1" | 
|  | + | 
|  | +/* Borrowed from sparc/linux.h */ | 
|  | +#undef LINK_SPEC | 
|  | +#define LINK_SPEC \ | 
|  | + "%(endian_spec) \ | 
|  | +  %{shared:-shared} \ | 
|  | +  %{!shared: \ | 
|  | +    %{!static: \ | 
|  | +      %{rdynamic:-export-dynamic} \ | 
|  | +      -dynamic-linker " LINUX_DYNAMIC_LINKER "} \ | 
|  | +      %{static:-static}}" | 
|  | + | 
|  | +/* The MIPS assembler has different syntax for .set. We set it to | 
|  | +   .dummy to trap any errors.  */ | 
|  | +#undef SET_ASM_OP | 
|  | +#define SET_ASM_OP "\t.dummy\t" | 
|  | + | 
|  | +#undef ASM_OUTPUT_DEF | 
|  | +#define ASM_OUTPUT_DEF(FILE,LABEL1,LABEL2)				\ | 
|  | + do {									\ | 
|  | +	fputc ( '\t', FILE);						\ | 
|  | +	assemble_name (FILE, LABEL1);					\ | 
|  | +	fputs ( " = ", FILE);						\ | 
|  | +	assemble_name (FILE, LABEL2);					\ | 
|  | +	fputc ( '\n', FILE);						\ | 
|  | + } while (0) | 
|  | + | 
|  | +/* The glibc _mcount stub will save $v0 for us.  Don't mess with saving | 
|  | +   it, since ASM_OUTPUT_REG_PUSH/ASM_OUTPUT_REG_POP do not work in the | 
|  | +   presence of $gp-relative calls.  */ | 
|  | +#undef ASM_OUTPUT_REG_PUSH | 
|  | +#undef ASM_OUTPUT_REG_POP | 
|  | + | 
|  | +#undef LIB_SPEC | 
|  | +#define LIB_SPEC "\ | 
|  | +%{pthread:-lpthread} \ | 
|  | +%{shared:-lc} \ | 
|  | +%{!shared: \ | 
|  | +  %{profile:-lc_p} %{!profile:-lc}}" | 
|  | + | 
|  | +#define MD_UNWIND_SUPPORT "config/riscv/linux-unwind.h" | 
|  | + | 
|  | +/* -march=native handling only makes sense with compiler running on | 
|  | +   a RISC-V machine.  */ | 
|  | +#define MARCH_MTUNE_NATIVE_SPECS "" | 
|  | + | 
|  | +#define LINUX_DRIVER_SELF_SPECS \ | 
|  | +  MARCH_MTUNE_NATIVE_SPECS | 
|  | + | 
|  | +#undef DRIVER_SELF_SPECS | 
|  | +#define DRIVER_SELF_SPECS \ | 
|  | +  LINUX_DRIVER_SELF_SPECS | 
|  | + | 
|  | +/* Similar to standard Linux, but adding -ffast-math support.  */ | 
|  | +#undef  ENDFILE_SPEC | 
|  | +#define ENDFILE_SPEC \ | 
|  | +   "%{shared|pie:crtendS.o%s;:crtend.o%s} crtn.o%s" | 
|  | diff -ruN gcc-4.9.2/gcc/config/riscv/linux-unwind.h gcc-4.9.2-riscv/gcc/config/riscv/linux-unwind.h | 
|  | --- gcc-4.9.2/gcc/config/riscv/linux-unwind.h	1969-12-31 16:00:00.000000000 -0800 | 
|  | +++ gcc-4.9.2-riscv/gcc/config/riscv/linux-unwind.h	2014-12-02 18:04:50.107949563 -0800 | 
|  | @@ -0,0 +1,120 @@ | 
|  | +/* DWARF2 EH unwinding support for MIPS Linux. | 
|  | +   Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc. | 
|  | + | 
|  | +This file is part of GCC. | 
|  | + | 
|  | +GCC is free software; you can redistribute it and/or modify | 
|  | +it under the terms of the GNU General Public License as published by | 
|  | +the Free Software Foundation; either version 3, or (at your option) | 
|  | +any later version. | 
|  | + | 
|  | +GCC is distributed in the hope that it will be useful, | 
|  | +but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | +GNU General Public License for more details. | 
|  | + | 
|  | +Under Section 7 of GPL version 3, you are granted additional | 
|  | +permissions described in the GCC Runtime Library Exception, version | 
|  | +3.1, as published by the Free Software Foundation. | 
|  | + | 
|  | +You should have received a copy of the GNU General Public License and | 
|  | +a copy of the GCC Runtime Library Exception along with this program; | 
|  | +see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see | 
|  | +<http://www.gnu.org/licenses/>.  */ | 
|  | + | 
|  | +#ifndef inhibit_libc | 
|  | +/* Do code reading to identify a signal frame, and set the frame | 
|  | +   state data appropriately.  See unwind-dw2.c for the structs.  */ | 
|  | + | 
|  | +#include <signal.h> | 
|  | +#include <asm/unistd.h> | 
|  | + | 
|  | +/* The third parameter to the signal handler points to something with | 
|  | + * this structure defined in asm/ucontext.h, but the name clashes with | 
|  | + * struct ucontext from sys/ucontext.h so this private copy is used.  */ | 
|  | +typedef struct _sig_ucontext { | 
|  | +    unsigned long         uc_flags; | 
|  | +    struct _sig_ucontext  *uc_link; | 
|  | +    stack_t               uc_stack; | 
|  | +    struct sigcontext uc_mcontext; | 
|  | +    sigset_t      uc_sigmask; | 
|  | +} _sig_ucontext_t; | 
|  | + | 
|  | +#define MD_FALLBACK_FRAME_STATE_FOR mips_fallback_frame_state | 
|  | + | 
|  | +static _Unwind_Reason_Code | 
|  | +mips_fallback_frame_state (struct _Unwind_Context *context, | 
|  | +			   _Unwind_FrameState *fs) | 
|  | +{ | 
|  | +  u_int32_t *pc = (u_int32_t *) context->ra; | 
|  | +  struct sigcontext *sc; | 
|  | +  _Unwind_Ptr new_cfa, reg_offset; | 
|  | +  int i; | 
|  | + | 
|  | +  /* 24021061 li v0, 0x1061 (rt_sigreturn)*/ | 
|  | +  /* 0000000c syscall    */ | 
|  | +  /*    or */ | 
|  | +  /* 24021017 li v0, 0x1017 (sigreturn) */ | 
|  | +  /* 0000000c syscall  */ | 
|  | +  if (pc[1] != 0x0000000c) | 
|  | +    return _URC_END_OF_STACK; | 
|  | +#if _MIPS_SIM == _ABIO32 | 
|  | +  if (pc[0] == (0x24020000 | __NR_sigreturn)) | 
|  | +    { | 
|  | +      struct sigframe { | 
|  | +	u_int32_t ass[4];  /* Argument save space for o32.  */ | 
|  | +	u_int32_t trampoline[2]; | 
|  | +	struct sigcontext sigctx; | 
|  | +      } *rt_ = context->cfa; | 
|  | +      sc = &rt_->sigctx; | 
|  | +    } | 
|  | +  else | 
|  | +#endif | 
|  | +  if (pc[0] == (0x24020000 | __NR_rt_sigreturn)) | 
|  | +    { | 
|  | +      struct rt_sigframe { | 
|  | +	u_int32_t ass[4];  /* Argument save space for o32.  */ | 
|  | +	u_int32_t trampoline[2]; | 
|  | +	struct siginfo info; | 
|  | +	_sig_ucontext_t uc; | 
|  | +      } *rt_ = context->cfa; | 
|  | +      sc = &rt_->uc.uc_mcontext; | 
|  | +    } | 
|  | +  else | 
|  | +    return _URC_END_OF_STACK; | 
|  | + | 
|  | +  new_cfa = (_Unwind_Ptr) sc; | 
|  | +  fs->regs.cfa_how = CFA_REG_OFFSET; | 
|  | +  fs->regs.cfa_reg = STACK_POINTER_REGNUM; | 
|  | +  fs->regs.cfa_offset = new_cfa - (_Unwind_Ptr) context->cfa; | 
|  | + | 
|  | +  /* On o32 Linux, the register save slots in the sigcontext are | 
|  | +     eight bytes.  We need the lower half of each register slot, | 
|  | +     so slide our view of the structure back four bytes.  */ | 
|  | +#if _MIPS_SIM == _ABIO32 && defined __MIPSEB__ | 
|  | +  reg_offset = 4; | 
|  | +#else | 
|  | +  reg_offset = 0; | 
|  | +#endif | 
|  | + | 
|  | +  for (i = 0; i < 32; i++) { | 
|  | +    fs->regs.reg[i].how = REG_SAVED_OFFSET; | 
|  | +    fs->regs.reg[i].loc.offset | 
|  | +      = (_Unwind_Ptr)&(sc->sc_regs[i]) + reg_offset - new_cfa; | 
|  | +  } | 
|  | +  /* "PC & -2" points to the faulting instruction, but the unwind code | 
|  | +     searches for "(ADDR & -2) - 1".  (See MASK_RETURN_ADDR for the source | 
|  | +     of the -2 mask.)  Adding 2 here ensures that "(ADDR & -2) - 1" is the | 
|  | +     address of the second byte of the faulting instruction. | 
|  | + | 
|  | +     Note that setting fs->signal_frame would not work.  As the comment | 
|  | +     above MASK_RETURN_ADDR explains, MIPS unwinders must earch for an | 
|  | +     odd-valued address.  */ | 
|  | +  fs->regs.reg[DWARF_ALT_FRAME_RETURN_COLUMN].how = REG_SAVED_VAL_OFFSET; | 
|  | +  fs->regs.reg[DWARF_ALT_FRAME_RETURN_COLUMN].loc.offset | 
|  | +    = (_Unwind_Ptr)(sc->sc_pc) + 2 - new_cfa; | 
|  | +  fs->retaddr_column = DWARF_ALT_FRAME_RETURN_COLUMN; | 
|  | + | 
|  | +  return _URC_NO_REASON; | 
|  | +} | 
|  | +#endif | 
|  | diff -ruN gcc-4.9.2/gcc/config/riscv/opcode-riscv.h gcc-4.9.2-riscv/gcc/config/riscv/opcode-riscv.h | 
|  | --- gcc-4.9.2/gcc/config/riscv/opcode-riscv.h	1969-12-31 16:00:00.000000000 -0800 | 
|  | +++ gcc-4.9.2-riscv/gcc/config/riscv/opcode-riscv.h	2014-12-02 18:04:50.107949563 -0800 | 
|  | @@ -0,0 +1,253 @@ | 
|  | +/* riscv.h.  RISC-V opcode list for GDB, the GNU debugger. | 
|  | +   Copyright 2011 | 
|  | +   Free Software Foundation, Inc. | 
|  | +   Contributed by Andrew Waterman | 
|  | + | 
|  | +This file is part of GDB, GAS, and the GNU binutils. | 
|  | + | 
|  | +GDB, GAS, and the GNU binutils are free software; you can redistribute | 
|  | +them and/or modify them under the terms of the GNU General Public | 
|  | +License as published by the Free Software Foundation; either version | 
|  | +1, or (at your option) any later version. | 
|  | + | 
|  | +GDB, GAS, and the GNU binutils are distributed in the hope that they | 
|  | +will be useful, but WITHOUT ANY WARRANTY; without even the implied | 
|  | +warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See | 
|  | +the GNU General Public License for more details. | 
|  | + | 
|  | +You should have received a copy of the GNU General Public License | 
|  | +along with this file; see the file COPYING.  If not, write to the Free | 
|  | +Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.  */ | 
|  | + | 
|  | +#ifndef _RISCV_H_ | 
|  | +#define _RISCV_H_ | 
|  | + | 
|  | +/* RVC fields */ | 
|  | + | 
|  | +#define OP_MASK_COP		0x1f | 
|  | +#define OP_SH_COP		0 | 
|  | +#define OP_MASK_CRD		0x1f | 
|  | +#define OP_SH_CRD		5 | 
|  | +#define OP_MASK_CRS2	0x1f | 
|  | +#define OP_SH_CRS2	5 | 
|  | +#define OP_MASK_CRS1	0x1f | 
|  | +#define OP_SH_CRS1	10 | 
|  | +#define OP_MASK_CRDS		0x7 | 
|  | +#define OP_SH_CRDS		13 | 
|  | +#define OP_MASK_CRS2S	0x7 | 
|  | +#define OP_SH_CRS2S	13 | 
|  | +#define OP_MASK_CRS2BS	0x7 | 
|  | +#define OP_SH_CRS2BS	5 | 
|  | +#define OP_MASK_CRS1S	0x7 | 
|  | +#define OP_SH_CRS1S	10 | 
|  | +#define OP_MASK_CIMM6	0x3f | 
|  | +#define OP_SH_CIMM6	10 | 
|  | +#define OP_MASK_CIMM5	0x1f | 
|  | +#define OP_SH_CIMM5	5 | 
|  | +#define OP_MASK_CIMM10	0x3ff | 
|  | +#define OP_SH_CIMM10	5 | 
|  | + | 
|  | +static const char rvc_rs1_regmap[8] = { 20, 21, 2, 3, 4, 5, 6, 7 }; | 
|  | +#define rvc_rd_regmap rvc_rs1_regmap | 
|  | +#define rvc_rs2b_regmap rvc_rs1_regmap | 
|  | +static const char rvc_rs2_regmap[8] = { 20, 21, 2, 3, 4, 5, 6, 0 }; | 
|  | + | 
|  | +#define RVC_JUMP_BITS 10 | 
|  | +#define RVC_JUMP_ALIGN_BITS 1 | 
|  | +#define RVC_JUMP_ALIGN (1 << RVC_JUMP_ALIGN_BITS) | 
|  | +#define RVC_JUMP_REACH ((1ULL<<RVC_JUMP_BITS)*RVC_JUMP_ALIGN) | 
|  | + | 
|  | +#define RVC_BRANCH_BITS 5 | 
|  | +#define RVC_BRANCH_ALIGN_BITS RVC_JUMP_ALIGN_BITS | 
|  | +#define RVC_BRANCH_ALIGN (1 << RVC_BRANCH_ALIGN_BITS) | 
|  | +#define RVC_BRANCH_REACH ((1ULL<<RVC_BRANCH_BITS)*RVC_BRANCH_ALIGN) | 
|  | + | 
|  | +#define RISCV_JTYPE(insn, target) \ | 
|  | +  ((MATCH_ ## insn) | (((target) & ((1<<RISCV_JUMP_BITS)-1)) << OP_SH_TARGET)) | 
|  | +#define RISCV_LTYPE(insn, rd, bigimm) \ | 
|  | +  ((MATCH_ ## insn) | ((rd) << OP_SH_RD) | (((bigimm) & ((1<<RISCV_BIGIMM_BITS)-1)) << OP_SH_BIGIMMEDIATE)) | 
|  | +#define RISCV_ITYPE(insn, rd, rs1, imm) \ | 
|  | +  ((MATCH_ ## insn) | ((rd) << OP_SH_RD) | ((rs1) << OP_SH_RS) | (((imm) & (RISCV_IMM_REACH-1)) << OP_SH_IMMEDIATE)) | 
|  | +#define RISCV_RTYPE(insn, rd, rs1, rs2) \ | 
|  | +  ((MATCH_ ## insn) | ((rd) << OP_SH_RD) | ((rs1) << OP_SH_RS) | ((rs2) << OP_SH_RT)) | 
|  | + | 
|  | +#define RISCV_NOP RISCV_ITYPE(ADDI, 0, 0, 0) | 
|  | + | 
|  | +#define RISCV_JUMP_TARGET(address) ((address) >> RISCV_JUMP_ALIGN_BITS) | 
|  | +#define RISCV_CONST_HIGH_PART(VALUE) \ | 
|  | +  (((VALUE) + (RISCV_IMM_REACH/2)) & ~(RISCV_IMM_REACH-1)) | 
|  | +#define RISCV_CONST_LOW_PART(VALUE) ((VALUE) - RISCV_CONST_HIGH_PART (VALUE)) | 
|  | +#define RISCV_LUI_HIGH_PART(VALUE) (RISCV_CONST_HIGH_PART(VALUE) >> RISCV_IMM_BITS) | 
|  | + | 
|  | +/* RV fields */ | 
|  | + | 
|  | +#define OP_MASK_OP		0x7f | 
|  | +#define OP_SH_OP		0 | 
|  | +#define OP_MASK_RT		0x1f | 
|  | +#define OP_SH_RT		17 | 
|  | +#define OP_MASK_FT		0x1f | 
|  | +#define OP_SH_FT		17 | 
|  | +#define OP_MASK_RS		0x1f | 
|  | +#define OP_SH_RS		22 | 
|  | +#define OP_MASK_FS		0x1f | 
|  | +#define OP_SH_FS		22 | 
|  | +#define OP_MASK_FR		0x1f | 
|  | +#define OP_SH_FR		12 | 
|  | +#define OP_MASK_RD		0x1f | 
|  | +#define OP_SH_RD		27 | 
|  | +#define OP_MASK_FD		0x1f | 
|  | +#define OP_SH_FD		27 | 
|  | +#define OP_MASK_SHAMT		0x3f | 
|  | +#define OP_SH_SHAMT		10 | 
|  | +#define OP_MASK_SHAMTW		0x1f | 
|  | +#define OP_SH_SHAMTW	10 | 
|  | +#define OP_MASK_RM		0x7 | 
|  | +#define OP_SH_RM	9 | 
|  | + | 
|  | +static const char * const riscv_rm[8] = | 
|  | +  { "rne", "rtz", "rdn", "rup", "rmm", 0, 0, "dyn" }; | 
|  | + | 
|  | +#define OP_MASK_VRD		0x1f | 
|  | +#define OP_SH_VRD		27 | 
|  | +#define OP_MASK_VRS		0x1f | 
|  | +#define OP_SH_VRS		22 | 
|  | +#define OP_MASK_VRT		0x1f | 
|  | +#define OP_SH_VRT		17 | 
|  | +#define OP_MASK_VRR		0x1f | 
|  | +#define OP_SH_VRR		12 | 
|  | + | 
|  | +#define OP_MASK_VFD		0x1f | 
|  | +#define OP_SH_VFD		27 | 
|  | +#define OP_MASK_VFS		0x1f | 
|  | +#define OP_SH_VFS		22 | 
|  | +#define OP_MASK_VFT		0x1f | 
|  | +#define OP_SH_VFT		17 | 
|  | +#define OP_MASK_VFR		0x1f | 
|  | +#define OP_SH_VFR		12 | 
|  | + | 
|  | +#define OP_MASK_IMMNGPR         0x3f | 
|  | +#define OP_SH_IMMNGPR           10 | 
|  | +#define OP_MASK_IMMNFPR         0x3f | 
|  | +#define OP_SH_IMMNFPR           16 | 
|  | +#define OP_MASK_IMMSEGNELM      0x1f | 
|  | +#define OP_SH_IMMSEGNELM        17 | 
|  | +#define OP_MASK_IMMSEGSTNELM    0x1f | 
|  | +#define OP_SH_IMMSEGSTNELM      12 | 
|  | + | 
|  | +#define LINK_REG 1 | 
|  | + | 
|  | +#define RISCV_JUMP_BITS 25 | 
|  | +#define RISCV_JUMP_ALIGN_BITS 1 | 
|  | +#define RISCV_JUMP_ALIGN (1 << RISCV_JUMP_ALIGN_BITS) | 
|  | +#define RISCV_JUMP_REACH ((1ULL<<RISCV_JUMP_BITS)*RISCV_JUMP_ALIGN) | 
|  | + | 
|  | +#define OP_MASK_TARGET		((1<<RISCV_JUMP_BITS)-1) | 
|  | +#define OP_SH_TARGET		7 | 
|  | + | 
|  | +#define RISCV_IMM_BITS 12 | 
|  | +#define RISCV_IMMLO_BITS 7 | 
|  | +#define RISCV_IMMHI_BITS (RISCV_IMM_BITS - RISCV_IMMLO_BITS) | 
|  | +#define RISCV_BIGIMM_BITS (32-RISCV_IMM_BITS) | 
|  | +#define RISCV_IMM_REACH (1LL<<RISCV_IMM_BITS) | 
|  | +#define RISCV_BIGIMM_REACH (1LL<<RISCV_BIGIMM_BITS) | 
|  | +#define RISCV_BRANCH_BITS RISCV_IMM_BITS | 
|  | +#define RISCV_BRANCH_ALIGN_BITS RISCV_JUMP_ALIGN_BITS | 
|  | +#define RISCV_BRANCH_ALIGN (1 << RISCV_BRANCH_ALIGN_BITS) | 
|  | +#define RISCV_BRANCH_REACH (RISCV_IMM_REACH*RISCV_BRANCH_ALIGN) | 
|  | + | 
|  | +#define OP_MASK_BIGIMMEDIATE	((1<<RISCV_BIGIMM_BITS)-1) | 
|  | +#define OP_SH_BIGIMMEDIATE		7 | 
|  | +#define OP_MASK_IMMEDIATE	((1<<RISCV_IMM_BITS)-1) | 
|  | +#define OP_SH_IMMEDIATE		10 | 
|  | +#define OP_MASK_IMMLO ((1<<RISCV_IMMLO_BITS)-1) | 
|  | +#define OP_SH_IMMLO   10 | 
|  | +#define OP_MASK_IMMHI ((1<<(RISCV_IMM_BITS-RISCV_IMMLO_BITS))-1) | 
|  | +#define OP_SH_IMMHI   27 | 
|  | + | 
|  | +#include "riscv-opc.h" | 
|  | + | 
|  | +/* This structure holds information for a particular instruction.  */ | 
|  | + | 
|  | +struct riscv_opcode | 
|  | +{ | 
|  | +  /* The name of the instruction.  */ | 
|  | +  const char *name; | 
|  | +  /* A string describing the arguments for this instruction.  */ | 
|  | +  const char *args; | 
|  | +  /* The basic opcode for the instruction.  When assembling, this | 
|  | +     opcode is modified by the arguments to produce the actual opcode | 
|  | +     that is used.  If pinfo is INSN_MACRO, then this is 0.  */ | 
|  | +  unsigned long match; | 
|  | +  /* If pinfo is not INSN_MACRO, then this is a bit mask for the | 
|  | +     relevant portions of the opcode when disassembling.  If the | 
|  | +     actual opcode anded with the match field equals the opcode field, | 
|  | +     then we have found the correct instruction.  If pinfo is | 
|  | +     INSN_MACRO, then this field is the macro identifier.  */ | 
|  | +  unsigned long mask; | 
|  | +  /* For a macro, this is INSN_MACRO.  Otherwise, it is a collection | 
|  | +     of bits describing the instruction, notably any relevant hazard | 
|  | +     information.  */ | 
|  | +  unsigned long pinfo; | 
|  | +}; | 
|  | + | 
|  | +#define INSN_WRITE_GPR_D            0x00000001 | 
|  | +#define INSN_WRITE_GPR_RA           0x00000004 | 
|  | +#define INSN_WRITE_FPR_D            0x00000008 | 
|  | +#define INSN_READ_GPR_S             0x00000040 | 
|  | +#define INSN_READ_GPR_T             0x00000080 | 
|  | +#define INSN_READ_FPR_S             0x00000100 | 
|  | +#define INSN_READ_FPR_T             0x00000200 | 
|  | +#define INSN_READ_FPR_R        	    0x00000400 | 
|  | +/* Instruction is a simple alias (I.E. "move" for daddu/addu/or) */ | 
|  | +#define	INSN_ALIAS		    0x00001000 | 
|  | +/* Instruction is actually a macro.  It should be ignored by the | 
|  | +   disassembler, and requires special treatment by the assembler.  */ | 
|  | +#define INSN_MACRO                  0xffffffff | 
|  | + | 
|  | +/* These are the bits which may be set in the pinfo2 field of an | 
|  | +   instruction. */ | 
|  | + | 
|  | +/* MIPS ISA defines, use instead of hardcoding ISA level.  */ | 
|  | + | 
|  | +#define       ISA_UNKNOWN     0               /* Gas internal use.  */ | 
|  | +#define       ISA_RV32        1 | 
|  | +#define       ISA_RV64        2 | 
|  | + | 
|  | +#define CPU_UNKNOWN    0 | 
|  | +#define CPU_ROCKET32 132 | 
|  | +#define CPU_ROCKET64 164 | 
|  | + | 
|  | +/* This is a list of macro expanded instructions. | 
|  | + | 
|  | +   _I appended means immediate | 
|  | +   _A appended means address | 
|  | +   _AB appended means address with base register | 
|  | +   _D appended means 64 bit floating point constant | 
|  | +   _S appended means 32 bit floating point constant.  */ | 
|  | + | 
|  | +enum | 
|  | +{ | 
|  | +  M_LA_AB, | 
|  | +  M_J, | 
|  | +  M_LI, | 
|  | +  M_NUM_MACROS | 
|  | +}; | 
|  | + | 
|  | + | 
|  | +/* The order of overloaded instructions matters.  Label arguments and | 
|  | +   register arguments look the same. Instructions that can have either | 
|  | +   for arguments must apear in the correct order in this table for the | 
|  | +   assembler to pick the right one. In other words, entries with | 
|  | +   immediate operands must apear after the same instruction with | 
|  | +   registers. | 
|  | + | 
|  | +   Many instructions are short hand for other instructions (i.e., The | 
|  | +   jal <register> instruction is short for jalr <register>).  */ | 
|  | + | 
|  | +extern const struct riscv_opcode riscv_builtin_opcodes[]; | 
|  | +extern const int bfd_riscv_num_builtin_opcodes; | 
|  | +extern struct riscv_opcode *riscv_opcodes; | 
|  | +extern int bfd_riscv_num_opcodes; | 
|  | +#define NUMOPCODES bfd_riscv_num_opcodes | 
|  | + | 
|  | +#endif /* _MIPS_H_ */ | 
|  | diff -ruN gcc-4.9.2/gcc/config/riscv/predicates.md gcc-4.9.2-riscv/gcc/config/riscv/predicates.md | 
|  | --- gcc-4.9.2/gcc/config/riscv/predicates.md	1969-12-31 16:00:00.000000000 -0800 | 
|  | +++ gcc-4.9.2-riscv/gcc/config/riscv/predicates.md	2014-12-02 18:04:50.107949563 -0800 | 
|  | @@ -0,0 +1,191 @@ | 
|  | +;; Predicate definitions for MIPS. | 
|  | +;; Copyright (C) 2004, 2007, 2008 Free Software Foundation, Inc. | 
|  | +;; | 
|  | +;; This file is part of GCC. | 
|  | +;; | 
|  | +;; GCC is free software; you can redistribute it and/or modify | 
|  | +;; it under the terms of the GNU General Public License as published by | 
|  | +;; the Free Software Foundation; either version 3, or (at your option) | 
|  | +;; any later version. | 
|  | +;; | 
|  | +;; GCC is distributed in the hope that it will be useful, | 
|  | +;; but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | +;; GNU General Public License for more details. | 
|  | +;; | 
|  | +;; You should have received a copy of the GNU General Public License | 
|  | +;; along with GCC; see the file COPYING3.  If not see | 
|  | +;; <http://www.gnu.org/licenses/>. | 
|  | + | 
|  | +(define_predicate "const_arith_operand" | 
|  | +  (and (match_code "const_int") | 
|  | +       (match_test "SMALL_OPERAND (INTVAL (op))"))) | 
|  | + | 
|  | +(define_predicate "arith_operand" | 
|  | +  (ior (match_operand 0 "const_arith_operand") | 
|  | +       (match_operand 0 "register_operand"))) | 
|  | + | 
|  | +(define_predicate "sle_operand" | 
|  | +  (and (match_code "const_int") | 
|  | +       (match_test "SMALL_OPERAND (INTVAL (op) + 1)"))) | 
|  | + | 
|  | +(define_predicate "sleu_operand" | 
|  | +  (and (match_operand 0 "sle_operand") | 
|  | +       (match_test "INTVAL (op) + 1 != 0"))) | 
|  | + | 
|  | +(define_predicate "const_0_operand" | 
|  | +  (and (match_code "const_int,const_double,const_vector") | 
|  | +       (match_test "op == CONST0_RTX (GET_MODE (op))"))) | 
|  | + | 
|  | +(define_predicate "reg_or_0_operand" | 
|  | +  (ior (match_operand 0 "const_0_operand") | 
|  | +       (match_operand 0 "register_operand"))) | 
|  | + | 
|  | +(define_predicate "const_1_operand" | 
|  | +  (and (match_code "const_int,const_double,const_vector") | 
|  | +       (match_test "op == CONST1_RTX (GET_MODE (op))"))) | 
|  | + | 
|  | +(define_predicate "reg_or_1_operand" | 
|  | +  (ior (match_operand 0 "const_1_operand") | 
|  | +       (match_operand 0 "register_operand"))) | 
|  | + | 
|  | +;; This is used for indexing into vectors, and hence only accepts const_int. | 
|  | +(define_predicate "const_0_or_1_operand" | 
|  | +  (and (match_code "const_int") | 
|  | +       (ior (match_test "op == CONST0_RTX (GET_MODE (op))") | 
|  | +	    (match_test "op == CONST1_RTX (GET_MODE (op))")))) | 
|  | + | 
|  | +(define_special_predicate "pc_or_label_operand" | 
|  | +  (match_code "pc,label_ref")) | 
|  | + | 
|  | +(define_predicate "const_call_insn_operand" | 
|  | +  (match_code "const,symbol_ref,label_ref") | 
|  | +{ | 
|  | +  enum mips_symbol_type symbol_type; | 
|  | + | 
|  | +  if (!mips_symbolic_constant_p (op, &symbol_type)) | 
|  | +    return false; | 
|  | + | 
|  | +  if (symbol_type == SYMBOL_ABSOLUTE) | 
|  | +    { | 
|  | +      if (GET_CODE (op) == SYMBOL_REF) | 
|  | +	{ | 
|  | +	  if (flag_pic && !riscv_symbol_binds_local_p (op)) | 
|  | +	    return false; | 
|  | +	  if (SYMBOL_REF_LONG_CALL_P (op)) | 
|  | +	    return false; | 
|  | +	} | 
|  | +      return true; | 
|  | +    } | 
|  | + | 
|  | +  return false; | 
|  | +}) | 
|  | + | 
|  | +(define_predicate "call_insn_operand" | 
|  | +  (ior (match_operand 0 "const_call_insn_operand") | 
|  | +       (match_operand 0 "register_operand"))) | 
|  | + | 
|  | +;; A legitimate CONST_INT operand that takes more than one instruction | 
|  | +;; to load. | 
|  | +(define_predicate "splittable_const_int_operand" | 
|  | +  (match_code "const_int") | 
|  | +{ | 
|  | +  /* Don't handle multi-word moves this way; we don't want to introduce | 
|  | +     the individual word-mode moves until after reload.  */ | 
|  | +  if (GET_MODE_SIZE (mode) > UNITS_PER_WORD) | 
|  | +    return false; | 
|  | + | 
|  | +  /* Otherwise check whether the constant can be loaded in a single | 
|  | +     instruction.  */ | 
|  | +  return !LUI_INT (op) && !SMALL_INT (op); | 
|  | +}) | 
|  | + | 
|  | +(define_predicate "move_operand" | 
|  | +  (match_operand 0 "general_operand") | 
|  | +{ | 
|  | +  enum mips_symbol_type symbol_type; | 
|  | + | 
|  | +  /* The thinking here is as follows: | 
|  | + | 
|  | +     (1) The move expanders should split complex load sequences into | 
|  | +	 individual instructions.  Those individual instructions can | 
|  | +	 then be optimized by all rtl passes. | 
|  | + | 
|  | +     (2) The target of pre-reload load sequences should not be used | 
|  | +	 to store temporary results.  If the target register is only | 
|  | +	 assigned one value, reload can rematerialize that value | 
|  | +	 on demand, rather than spill it to the stack. | 
|  | + | 
|  | +     (3) If we allowed pre-reload passes like combine and cse to recreate | 
|  | +	 complex load sequences, we would want to be able to split the | 
|  | +	 sequences before reload as well, so that the pre-reload scheduler | 
|  | +	 can see the individual instructions.  This falls foul of (2); | 
|  | +	 the splitter would be forced to reuse the target register for | 
|  | +	 intermediate results. | 
|  | + | 
|  | +     (4) We want to define complex load splitters for combine.  These | 
|  | +	 splitters can request a temporary scratch register, which avoids | 
|  | +	 the problem in (2).  They allow things like: | 
|  | + | 
|  | +	      (set (reg T1) (high SYM)) | 
|  | +	      (set (reg T2) (low (reg T1) SYM)) | 
|  | +	      (set (reg X) (plus (reg T2) (const_int OFFSET))) | 
|  | + | 
|  | +	 to be combined into: | 
|  | + | 
|  | +	      (set (reg T3) (high SYM+OFFSET)) | 
|  | +	      (set (reg X) (lo_sum (reg T3) SYM+OFFSET)) | 
|  | + | 
|  | +	 if T2 is only used this once.  */ | 
|  | +  switch (GET_CODE (op)) | 
|  | +    { | 
|  | +    case CONST_INT: | 
|  | +      return !splittable_const_int_operand (op, mode); | 
|  | + | 
|  | +    case CONST: | 
|  | +    case SYMBOL_REF: | 
|  | +    case LABEL_REF: | 
|  | +      return (mips_symbolic_constant_p (op, &symbol_type) | 
|  | +	      && !mips_split_p[symbol_type]); | 
|  | + | 
|  | +    case HIGH: | 
|  | +      op = XEXP (op, 0); | 
|  | +      return mips_symbolic_constant_p (op, &symbol_type); | 
|  | + | 
|  | +    default: | 
|  | +      return true; | 
|  | +    } | 
|  | +}) | 
|  | + | 
|  | +(define_predicate "consttable_operand" | 
|  | +  (match_test "CONSTANT_P (op)")) | 
|  | + | 
|  | +(define_predicate "symbolic_operand" | 
|  | +  (match_code "const,symbol_ref,label_ref") | 
|  | +{ | 
|  | +  enum mips_symbol_type type; | 
|  | +  return mips_symbolic_constant_p (op, &type); | 
|  | +}) | 
|  | + | 
|  | +(define_predicate "absolute_symbolic_operand" | 
|  | +  (match_code "const,symbol_ref,label_ref") | 
|  | +{ | 
|  | +  enum mips_symbol_type type; | 
|  | +  return (mips_symbolic_constant_p (op, &type) | 
|  | +	  && type == SYMBOL_ABSOLUTE); | 
|  | +}) | 
|  | + | 
|  | +(define_predicate "symbol_ref_operand" | 
|  | +  (match_code "symbol_ref")) | 
|  | + | 
|  | +(define_predicate "equality_operator" | 
|  | +  (match_code "eq,ne")) | 
|  | + | 
|  | +(define_predicate "order_operator" | 
|  | +  (match_code "eq,ne,lt,ltu,le,leu,ge,geu,gt,gtu")) | 
|  | + | 
|  | +(define_predicate "fp_order_operator" | 
|  | +  (match_code "eq,lt,le,gt,ge")) | 
|  | + | 
|  | +(define_predicate "fp_unorder_operator" | 
|  | +  (match_code "ordered,unordered")) | 
|  | diff -ruN gcc-4.9.2/gcc/config/riscv/riscv.c gcc-4.9.2-riscv/gcc/config/riscv/riscv.c | 
|  | --- gcc-4.9.2/gcc/config/riscv/riscv.c	1969-12-31 16:00:00.000000000 -0800 | 
|  | +++ gcc-4.9.2-riscv/gcc/config/riscv/riscv.c	2014-12-02 18:04:50.111949590 -0800 | 
|  | @@ -0,0 +1,5958 @@ | 
|  | +/* Subroutines used for MIPS code generation. | 
|  | +   Copyright (C) 1989, 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998, | 
|  | +   1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, | 
|  | +   2011 | 
|  | +   Free Software Foundation, Inc. | 
|  | +   Contributed by A. Lichnewsky, lich@inria.inria.fr. | 
|  | +   Changes by Michael Meissner, meissner@osf.org. | 
|  | +   64-bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and | 
|  | +   Brendan Eich, brendan@microunity.com. | 
|  | + | 
|  | +This file is part of GCC. | 
|  | + | 
|  | +GCC is free software; you can redistribute it and/or modify | 
|  | +it under the terms of the GNU General Public License as published by | 
|  | +the Free Software Foundation; either version 3, or (at your option) | 
|  | +any later version. | 
|  | + | 
|  | +GCC is distributed in the hope that it will be useful, | 
|  | +but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | +GNU General Public License for more details. | 
|  | + | 
|  | +You should have received a copy of the GNU General Public License | 
|  | +along with GCC; see the file COPYING3.  If not see | 
|  | +<http://www.gnu.org/licenses/>.  */ | 
|  | + | 
|  | +#include "config.h" | 
|  | +#include "system.h" | 
|  | +#include "coretypes.h" | 
|  | +#include "tm.h" | 
|  | +#include "rtl.h" | 
|  | +#include "regs.h" | 
|  | +#include "hard-reg-set.h" | 
|  | +#include "insn-config.h" | 
|  | +#include "conditions.h" | 
|  | +#include "insn-attr.h" | 
|  | +#include "recog.h" | 
|  | +#include "output.h" | 
|  | +#include "tree.h" | 
|  | +#include "function.h" | 
|  | +#include "expr.h" | 
|  | +#include "optabs.h" | 
|  | +#include "libfuncs.h" | 
|  | +#include "flags.h" | 
|  | +#include "reload.h" | 
|  | +#include "tm_p.h" | 
|  | +#include "ggc.h" | 
|  | +#include "gstab.h" | 
|  | +#include "hashtab.h" | 
|  | +#include "debug.h" | 
|  | +#include "target.h" | 
|  | +#include "target-def.h" | 
|  | +#include "integrate.h" | 
|  | +#include "langhooks.h" | 
|  | +#include "cfglayout.h" | 
|  | +#include "sched-int.h" | 
|  | +#include "gimple.h" | 
|  | +#include "bitmap.h" | 
|  | +#include "diagnostic.h" | 
|  | +#include "target-globals.h" | 
|  | +#include <stdint.h> | 
|  | + | 
|  | +/*----------------------------------------------------------------------*/ | 
|  | +/* RISCV_SYSCFG_VLEN_MAX                                                */ | 
|  | +/*----------------------------------------------------------------------*/ | 
|  | +/* Eventually we want to include syscfg.h here so that we can use the | 
|  | +   common definition of RISCV_SYSCFG_VLEN_MAX, but for now it is not | 
|  | +   clear how to do this. syscfg.h in in libgloss which is not used when | 
|  | +   building the actual cross-compiler. We kind of want to use the | 
|  | +   "version" in sims - the one for native programs instead of RISC-V | 
|  | +   programs. Even if we could include syscfg.h though, we would still | 
|  | +   need to figure out a way to include it in the mips-riscv.md since the | 
|  | +   machine description file also refers to these modes. */ | 
|  | + | 
|  | +#define RISCV_SYSCFG_VLEN_MAX 32 | 
|  | + | 
|  | +/*----------------------------------------------------------------------*/ | 
|  | +/* MIPS_RISCV_VECTOR_MODE_NAME                                          */ | 
|  | +/*----------------------------------------------------------------------*/ | 
|  | +/* This is a helper macro which creates a RISC-V vector mode name from | 
|  | +   the given inner_mode. It does this by concatenating a 'V' prefix, the | 
|  | +   maximum RISC-V vector length, and the inner mode together. For | 
|  | +   example, MIPS_RISCV_VECTOR_MODE_NAME(SI) should expand to V32SI if | 
|  | +   the RISC-V maximum vector length is 32. We need to use the nested | 
|  | +   macros to make sure RISCV_SYSCFG_VLEN_MAX is expanded _before_ | 
|  | +   concatenation. */ | 
|  | + | 
|  | +#define MIPS_RISCV_VECTOR_MODE_NAME_H2( res_ ) res_ | 
|  | + | 
|  | +#define MIPS_RISCV_VECTOR_MODE_NAME_H1( arg0_, arg1_ ) \ | 
|  | +  MIPS_RISCV_VECTOR_MODE_NAME_H2( V ## arg0_ ## arg1_ ## mode ) | 
|  | + | 
|  | +#define MIPS_RISCV_VECTOR_MODE_NAME_H0( arg0_, arg1_ ) \ | 
|  | +  MIPS_RISCV_VECTOR_MODE_NAME_H1( arg0_, arg1_ ) | 
|  | + | 
|  | +#define MIPS_RISCV_VECTOR_MODE_NAME( inner_mode_ ) \ | 
|  | +  MIPS_RISCV_VECTOR_MODE_NAME_H0( RISCV_SYSCFG_VLEN_MAX, inner_mode_ ) | 
|  | + | 
|  | +/* True if X is an UNSPEC wrapper around a SYMBOL_REF or LABEL_REF.  */ | 
|  | +#define UNSPEC_ADDRESS_P(X)					\ | 
|  | +  (GET_CODE (X) == UNSPEC					\ | 
|  | +   && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST			\ | 
|  | +   && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES) | 
|  | + | 
|  | +/* Extract the symbol or label from UNSPEC wrapper X.  */ | 
|  | +#define UNSPEC_ADDRESS(X) \ | 
|  | +  XVECEXP (X, 0, 0) | 
|  | + | 
|  | +/* Extract the symbol type from UNSPEC wrapper X.  */ | 
|  | +#define UNSPEC_ADDRESS_TYPE(X) \ | 
|  | +  ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST)) | 
|  | + | 
|  | +/* The maximum distance between the top of the stack frame and the | 
|  | +   value $sp has when we save and restore registers. | 
|  | + | 
|  | +   The value for normal-mode code must be a SMALL_OPERAND and must | 
|  | +   preserve the maximum stack alignment.  We therefore use a value | 
|  | +   of 0x7ff0 in this case. | 
|  | + | 
|  | +   MIPS16e SAVE and RESTORE instructions can adjust the stack pointer by | 
|  | +   up to 0x7f8 bytes and can usually save or restore all the registers | 
|  | +   that we need to save or restore.  (Note that we can only use these | 
|  | +   instructions for o32, for which the stack alignment is 8 bytes.) | 
|  | + | 
|  | +   We use a maximum gap of 0x100 or 0x400 for MIPS16 code when SAVE and | 
|  | +   RESTORE are not available.  We can then use unextended instructions | 
|  | +   to save and restore registers, and to allocate and deallocate the top | 
|  | +   part of the frame.  */ | 
|  | +#define MIPS_MAX_FIRST_STACK_STEP (RISCV_IMM_REACH/2 - 16) | 
|  | + | 
|  | +/* True if INSN is a mips.md pattern or asm statement.  */ | 
|  | +#define USEFUL_INSN_P(INSN)						\ | 
|  | +  (NONDEBUG_INSN_P (INSN)						\ | 
|  | +   && GET_CODE (PATTERN (INSN)) != USE					\ | 
|  | +   && GET_CODE (PATTERN (INSN)) != CLOBBER				\ | 
|  | +   && GET_CODE (PATTERN (INSN)) != ADDR_VEC				\ | 
|  | +   && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC) | 
|  | + | 
|  | +/* True if bit BIT is set in VALUE.  */ | 
|  | +#define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0) | 
|  | + | 
|  | +/* Classifies an address. | 
|  | + | 
|  | +   ADDRESS_REG | 
|  | +       A natural register + offset address.  The register satisfies | 
|  | +       mips_valid_base_register_p and the offset is a const_arith_operand. | 
|  | + | 
|  | +   ADDRESS_LO_SUM | 
|  | +       A LO_SUM rtx.  The first operand is a valid base register and | 
|  | +       the second operand is a symbolic address. | 
|  | + | 
|  | +   ADDRESS_CONST_INT | 
|  | +       A signed 16-bit constant address. | 
|  | + | 
|  | +   ADDRESS_SYMBOLIC: | 
|  | +       A constant symbolic address.  */ | 
|  | +enum mips_address_type { | 
|  | +  ADDRESS_REG, | 
|  | +  ADDRESS_LO_SUM, | 
|  | +  ADDRESS_CONST_INT, | 
|  | +  ADDRESS_SYMBOLIC | 
|  | +}; | 
|  | + | 
|  | +/* Macros to create an enumeration identifier for a function prototype.  */ | 
|  | +#define MIPS_FTYPE_NAME1(A, B) MIPS_##A##_FTYPE_##B | 
|  | +#define MIPS_FTYPE_NAME2(A, B, C) MIPS_##A##_FTYPE_##B##_##C | 
|  | +#define MIPS_FTYPE_NAME3(A, B, C, D) MIPS_##A##_FTYPE_##B##_##C##_##D | 
|  | +#define MIPS_FTYPE_NAME4(A, B, C, D, E) MIPS_##A##_FTYPE_##B##_##C##_##D##_##E | 
|  | + | 
|  | +/* Classifies the prototype of a built-in function.  */ | 
|  | +enum mips_function_type { | 
|  | +#define DEF_MIPS_FTYPE(NARGS, LIST) MIPS_FTYPE_NAME##NARGS LIST, | 
|  | +#include "config/riscv/riscv-ftypes.def" | 
|  | +#undef DEF_MIPS_FTYPE | 
|  | +  MIPS_MAX_FTYPE_MAX | 
|  | +}; | 
|  | + | 
|  | +/* Specifies how a built-in function should be converted into rtl.  */ | 
|  | +enum mips_builtin_type { | 
|  | +  /* The function corresponds directly to an .md pattern.  The return | 
|  | +     value is mapped to operand 0 and the arguments are mapped to | 
|  | +     operands 1 and above.  */ | 
|  | +  MIPS_BUILTIN_DIRECT, | 
|  | + | 
|  | +  /* The function corresponds directly to an .md pattern.  There is no return | 
|  | +     value and the arguments are mapped to operands 0 and above.  */ | 
|  | +  MIPS_BUILTIN_DIRECT_NO_TARGET | 
|  | +}; | 
|  | + | 
|  | +/* Information about a function's frame layout.  */ | 
|  | +struct GTY(())  mips_frame_info { | 
|  | +  /* The size of the frame in bytes.  */ | 
|  | +  HOST_WIDE_INT total_size; | 
|  | + | 
|  | +  /* Bit X is set if the function saves or restores GPR X.  */ | 
|  | +  unsigned int mask; | 
|  | + | 
|  | +  /* Likewise FPR X.  */ | 
|  | +  unsigned int fmask; | 
|  | + | 
|  | +  /* Offsets of fixed-point and floating-point save areas from frame bottom */ | 
|  | +  HOST_WIDE_INT gp_sp_offset; | 
|  | +  HOST_WIDE_INT fp_sp_offset; | 
|  | + | 
|  | +  /* Offset of virtual frame pointer from stack pointer/frame bottom */ | 
|  | +  HOST_WIDE_INT frame_pointer_offset; | 
|  | + | 
|  | +  /* Offset of hard frame pointer from stack pointer/frame bottom */ | 
|  | +  HOST_WIDE_INT hard_frame_pointer_offset; | 
|  | + | 
|  | +  /* The offset of arg_pointer_rtx from the bottom of the frame.  */ | 
|  | +  HOST_WIDE_INT arg_pointer_offset; | 
|  | +}; | 
|  | + | 
|  | +struct GTY(())  machine_function { | 
|  | +  /* The number of extra stack bytes taken up by register varargs. | 
|  | +     This area is allocated by the callee at the very top of the frame.  */ | 
|  | +  int varargs_size; | 
|  | + | 
|  | +  /* The current frame information, calculated by mips_compute_frame_info.  */ | 
|  | +  struct mips_frame_info frame; | 
|  | +}; | 
|  | + | 
|  | +/* Information about a single argument.  */ | 
|  | +struct mips_arg_info { | 
|  | +  /* True if the argument is passed in a floating-point register, or | 
|  | +     would have been if we hadn't run out of registers.  */ | 
|  | +  bool fpr_p; | 
|  | + | 
|  | +  /* The number of words passed in registers, rounded up.  */ | 
|  | +  unsigned int reg_words; | 
|  | + | 
|  | +  /* For EABI, the offset of the first register from GP_ARG_FIRST or | 
|  | +     FP_ARG_FIRST.  For other ABIs, the offset of the first register from | 
|  | +     the start of the ABI's argument structure (see the CUMULATIVE_ARGS | 
|  | +     comment for details). | 
|  | + | 
|  | +     The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely | 
|  | +     on the stack.  */ | 
|  | +  unsigned int reg_offset; | 
|  | + | 
|  | +  /* The number of words that must be passed on the stack, rounded up.  */ | 
|  | +  unsigned int stack_words; | 
|  | + | 
|  | +  /* The offset from the start of the stack overflow area of the argument's | 
|  | +     first stack word.  Only meaningful when STACK_WORDS is nonzero.  */ | 
|  | +  unsigned int stack_offset; | 
|  | +}; | 
|  | + | 
|  | +/* Information about an address described by mips_address_type. | 
|  | + | 
|  | +   ADDRESS_CONST_INT | 
|  | +       No fields are used. | 
|  | + | 
|  | +   ADDRESS_REG | 
|  | +       REG is the base register and OFFSET is the constant offset. | 
|  | + | 
|  | +   ADDRESS_LO_SUM | 
|  | +       REG and OFFSET are the operands to the LO_SUM and SYMBOL_TYPE | 
|  | +       is the type of symbol it references. | 
|  | + | 
|  | +   ADDRESS_SYMBOLIC | 
|  | +       SYMBOL_TYPE is the type of symbol that the address references.  */ | 
|  | +struct mips_address_info { | 
|  | +  enum mips_address_type type; | 
|  | +  rtx reg; | 
|  | +  rtx offset; | 
|  | +  enum mips_symbol_type symbol_type; | 
|  | +}; | 
|  | + | 
|  | +/* One stage in a constant building sequence.  These sequences have | 
|  | +   the form: | 
|  | + | 
|  | +	A = VALUE[0] | 
|  | +	A = A CODE[1] VALUE[1] | 
|  | +	A = A CODE[2] VALUE[2] | 
|  | +	... | 
|  | + | 
|  | +   where A is an accumulator, each CODE[i] is a binary rtl operation | 
|  | +   and each VALUE[i] is a constant integer.  CODE[0] is undefined.  */ | 
|  | +struct mips_integer_op { | 
|  | +  enum rtx_code code; | 
|  | +  unsigned HOST_WIDE_INT value; | 
|  | +}; | 
|  | + | 
|  | +/* The largest number of operations needed to load an integer constant. | 
|  | +   The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI. | 
|  | +   When the lowest bit is clear, we can try, but reject a sequence with | 
|  | +   an extra SLL at the end.  */ | 
|  | +#define MIPS_MAX_INTEGER_OPS 32 | 
|  | + | 
|  | +/* Costs of various operations on the different architectures.  */ | 
|  | + | 
|  | +struct mips_rtx_cost_data | 
|  | +{ | 
|  | +  unsigned short fp_add; | 
|  | +  unsigned short fp_mult_sf; | 
|  | +  unsigned short fp_mult_df; | 
|  | +  unsigned short fp_div_sf; | 
|  | +  unsigned short fp_div_df; | 
|  | +  unsigned short int_mult_si; | 
|  | +  unsigned short int_mult_di; | 
|  | +  unsigned short int_div_si; | 
|  | +  unsigned short int_div_di; | 
|  | +  unsigned short branch_cost; | 
|  | +  unsigned short memory_latency; | 
|  | +}; | 
|  | + | 
|  | +/* Global variables for machine-dependent things.  */ | 
|  | + | 
|  | +/* The number of file directives written by mips_output_filename.  */ | 
|  | +int num_source_filenames; | 
|  | + | 
|  | +/* The name that appeared in the last .file directive written by | 
|  | +   mips_output_filename, or "" if mips_output_filename hasn't | 
|  | +   written anything yet.  */ | 
|  | +const char *current_function_file = ""; | 
|  | + | 
|  | +/* Arrays that map GCC register numbers to debugger register numbers.  */ | 
|  | +int mips_dbx_regno[FIRST_PSEUDO_REGISTER]; | 
|  | +int mips_dwarf_regno[FIRST_PSEUDO_REGISTER]; | 
|  | + | 
|  | +/* The processor that we should tune the code for.  */ | 
|  | +enum processor mips_tune; | 
|  | + | 
|  | +/* Which cost information to use.  */ | 
|  | +static const struct mips_rtx_cost_data *mips_cost; | 
|  | + | 
|  | +/* Index [M][R] is true if register R is allowed to hold a value of mode M.  */ | 
|  | +bool mips_hard_regno_mode_ok[(int) MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER]; | 
|  | + | 
|  | +static GTY (()) int mips_output_filename_first_time = 1; | 
|  | + | 
|  | +/* mips_split_p[X] is true if symbols of type X can be split by | 
|  | +   mips_split_symbol.  */ | 
|  | +bool mips_split_p[NUM_SYMBOL_TYPES]; | 
|  | + | 
|  | +/* mips_lo_relocs[X] is the relocation to use when a symbol of type X | 
|  | +   appears in a LO_SUM.  It can be null if such LO_SUMs aren't valid or | 
|  | +   if they are matched by a special .md file pattern.  */ | 
|  | +static const char *mips_lo_relocs[NUM_SYMBOL_TYPES]; | 
|  | + | 
|  | +/* Likewise for HIGHs.  */ | 
|  | +static const char *mips_hi_relocs[NUM_SYMBOL_TYPES]; | 
|  | + | 
|  | +/* Target state for MIPS16.  */ | 
|  | +struct target_globals *mips16_globals; | 
|  | + | 
|  | +/* Index R is the smallest register class that contains register R.  */ | 
|  | +const enum reg_class mips_regno_to_class[FIRST_PSEUDO_REGISTER] = { | 
|  | +  GR_REGS,	GR_REGS,	GR_REGS,	GR_REGS, | 
|  | +  GR_REGS,	GR_REGS,	GR_REGS,	GR_REGS, | 
|  | +  GR_REGS,	GR_REGS,	GR_REGS,	GR_REGS, | 
|  | +  GR_REGS,	GR_REGS,	GR_REGS,	GR_REGS, | 
|  | +  GR_REGS,	V1_REG, 	GR_REGS,	GR_REGS, | 
|  | +  GR_REGS,	GR_REGS,	GR_REGS,	GR_REGS, | 
|  | +  GR_REGS,	GR_REGS,	GR_REGS,	GR_REGS, | 
|  | +  GR_REGS,	GR_REGS,	GR_REGS,	GR_REGS, | 
|  | +  FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS, | 
|  | +  FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS, | 
|  | +  FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS, | 
|  | +  FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS, | 
|  | +  FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS, | 
|  | +  FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS, | 
|  | +  FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS, | 
|  | +  FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS, | 
|  | +  VEC_GR_REGS,	VEC_GR_REGS,	VEC_GR_REGS,	VEC_GR_REGS, | 
|  | +  VEC_GR_REGS,	VEC_GR_REGS,	VEC_GR_REGS,	VEC_GR_REGS, | 
|  | +  VEC_GR_REGS,	VEC_GR_REGS,	VEC_GR_REGS,	VEC_GR_REGS, | 
|  | +  VEC_GR_REGS,	VEC_GR_REGS,	VEC_GR_REGS,	VEC_GR_REGS, | 
|  | +  VEC_GR_REGS,	VEC_GR_REGS,	VEC_GR_REGS,	VEC_GR_REGS, | 
|  | +  VEC_GR_REGS,	VEC_GR_REGS,	VEC_GR_REGS,	VEC_GR_REGS, | 
|  | +  VEC_GR_REGS,	VEC_GR_REGS,	VEC_GR_REGS,	VEC_GR_REGS, | 
|  | +  VEC_GR_REGS,	VEC_GR_REGS,	VEC_GR_REGS,	VEC_GR_REGS, | 
|  | +  VEC_FP_REGS,	VEC_FP_REGS,	VEC_FP_REGS,	VEC_FP_REGS, | 
|  | +  VEC_FP_REGS,	VEC_FP_REGS,	VEC_FP_REGS,	VEC_FP_REGS, | 
|  | +  VEC_FP_REGS,	VEC_FP_REGS,	VEC_FP_REGS,	VEC_FP_REGS, | 
|  | +  VEC_FP_REGS,	VEC_FP_REGS,	VEC_FP_REGS,	VEC_FP_REGS, | 
|  | +  VEC_FP_REGS,	VEC_FP_REGS,	VEC_FP_REGS,	VEC_FP_REGS, | 
|  | +  VEC_FP_REGS,	VEC_FP_REGS,	VEC_FP_REGS,	VEC_FP_REGS, | 
|  | +  VEC_FP_REGS,	VEC_FP_REGS,	VEC_FP_REGS,	VEC_FP_REGS, | 
|  | +  VEC_FP_REGS,	VEC_FP_REGS,	VEC_FP_REGS,	VEC_FP_REGS, | 
|  | +  FRAME_REGS,	FRAME_REGS,	NO_REGS,	NO_REGS, | 
|  | +}; | 
|  | + | 
|  | +/* The value of TARGET_ATTRIBUTE_TABLE.  */ | 
|  | +static const struct attribute_spec mips_attribute_table[] = { | 
|  | +  /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */ | 
|  | +  { "long_call",   0, 0, false, true,  true,  NULL }, | 
|  | +  { "far",     	   0, 0, false, true,  true,  NULL }, | 
|  | +  { "near",        0, 0, false, true,  true,  NULL }, | 
|  | +  { "utfunc",      0, 0, true,  false, false, NULL }, | 
|  | +  { NULL,	   0, 0, false, false, false, NULL } | 
|  | +}; | 
|  | + | 
|  | +/* A table describing all the processors GCC knows about.  Names are | 
|  | +   matched in the order listed.  The first mention of an ISA level is | 
|  | +   taken as the canonical name for that ISA. | 
|  | + | 
|  | +   To ease comparison, please keep this table in the same order | 
|  | +   as GAS's mips_cpu_info_table.  Please also make sure that | 
|  | +   MIPS_ISA_LEVEL_SPEC and MIPS_ARCH_FLOAT_SPEC handle all -march | 
|  | +   options correctly.  */ | 
|  | +static const struct mips_cpu_info mips_cpu_info_table[] = { | 
|  | +  /* Entries for generic ISAs.  */ | 
|  | +  { "rocket", PROCESSOR_ROCKET, 0 }, | 
|  | +}; | 
|  | + | 
|  | +/* Default costs.  If these are used for a processor we should look | 
|  | +   up the actual costs.  */ | 
|  | +#define DEFAULT_COSTS COSTS_N_INSNS (8),  /* fp_add */       \ | 
|  | +                      COSTS_N_INSNS (8),  /* fp_mult_sf */   \ | 
|  | +                      COSTS_N_INSNS (8),  /* fp_mult_df */   \ | 
|  | +                      COSTS_N_INSNS (20), /* fp_div_sf */    \ | 
|  | +                      COSTS_N_INSNS (20), /* fp_div_df */    \ | 
|  | +                      COSTS_N_INSNS (10), /* int_mult_si */  \ | 
|  | +                      COSTS_N_INSNS (10), /* int_mult_di */  \ | 
|  | +                      COSTS_N_INSNS (69), /* int_div_si */   \ | 
|  | +                      COSTS_N_INSNS (69), /* int_div_di */   \ | 
|  | +                                       2, /* branch_cost */  \ | 
|  | +                                       7  /* memory_latency */ | 
|  | + | 
|  | +/* Floating-point costs for processors without an FPU.  Just assume that | 
|  | +   all floating-point libcalls are very expensive.  */ | 
|  | +#define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */       \ | 
|  | +                      COSTS_N_INSNS (256), /* fp_mult_sf */   \ | 
|  | +                      COSTS_N_INSNS (256), /* fp_mult_df */   \ | 
|  | +                      COSTS_N_INSNS (256), /* fp_div_sf */    \ | 
|  | +                      COSTS_N_INSNS (256)  /* fp_div_df */ | 
|  | + | 
|  | +/* Costs to use when optimizing for size.  */ | 
|  | +static const struct mips_rtx_cost_data mips_rtx_cost_optimize_size = { | 
|  | +  COSTS_N_INSNS (1),            /* fp_add */ | 
|  | +  COSTS_N_INSNS (1),            /* fp_mult_sf */ | 
|  | +  COSTS_N_INSNS (1),            /* fp_mult_df */ | 
|  | +  COSTS_N_INSNS (1),            /* fp_div_sf */ | 
|  | +  COSTS_N_INSNS (1),            /* fp_div_df */ | 
|  | +  COSTS_N_INSNS (1),            /* int_mult_si */ | 
|  | +  COSTS_N_INSNS (1),            /* int_mult_di */ | 
|  | +  COSTS_N_INSNS (1),            /* int_div_si */ | 
|  | +  COSTS_N_INSNS (1),            /* int_div_di */ | 
|  | +		   2,           /* branch_cost */ | 
|  | +		   4            /* memory_latency */ | 
|  | +}; | 
|  | + | 
|  | +/* Costs to use when optimizing for speed, indexed by processor.  */ | 
|  | +static const struct mips_rtx_cost_data | 
|  | +  mips_rtx_cost_data[NUM_PROCESSOR_VALUES] = { | 
|  | +  { /* Rocket */ DEFAULT_COSTS}, | 
|  | +}; | 
|  | + | 
|  | +static int mips_register_move_cost (enum machine_mode, reg_class_t, | 
|  | +				    reg_class_t); | 
|  | +static unsigned int mips_function_arg_boundary (enum machine_mode, const_tree); | 
|  | + | 
|  | +/* Predicates to test for presence of "near" and "far"/"long_call" | 
|  | +   attributes on the given TYPE.  */ | 
|  | + | 
|  | +static bool | 
|  | +mips_near_type_p (const_tree type) | 
|  | +{ | 
|  | +  return lookup_attribute ("near", TYPE_ATTRIBUTES (type)) != NULL; | 
|  | +} | 
|  | + | 
|  | +static bool | 
|  | +mips_far_type_p (const_tree type) | 
|  | +{ | 
|  | +  return (lookup_attribute ("long_call", TYPE_ATTRIBUTES (type)) != NULL | 
|  | +	  || lookup_attribute ("far", TYPE_ATTRIBUTES (type)) != NULL); | 
|  | +} | 
|  | + | 
|  | +/* Implement TARGET_COMP_TYPE_ATTRIBUTES.  */ | 
|  | + | 
|  | +static int | 
|  | +mips_comp_type_attributes (const_tree type1, const_tree type2) | 
|  | +{ | 
|  | +  /* Disallow mixed near/far attributes.  */ | 
|  | +  if (mips_far_type_p (type1) && mips_near_type_p (type2)) | 
|  | +    return 0; | 
|  | +  if (mips_near_type_p (type1) && mips_far_type_p (type2)) | 
|  | +    return 0; | 
|  | +  return 1; | 
|  | +} | 
|  | + | 
|  | +/* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR | 
|  | +   and *OFFSET_PTR.  Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise.  */ | 
|  | + | 
|  | +static void | 
|  | +mips_split_plus (rtx x, rtx *base_ptr, HOST_WIDE_INT *offset_ptr) | 
|  | +{ | 
|  | +  if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT) | 
|  | +    { | 
|  | +      *base_ptr = XEXP (x, 0); | 
|  | +      *offset_ptr = INTVAL (XEXP (x, 1)); | 
|  | +    } | 
|  | +  else | 
|  | +    { | 
|  | +      *base_ptr = x; | 
|  | +      *offset_ptr = 0; | 
|  | +    } | 
|  | +} | 
|  | + | 
|  | +/* Fill CODES with a sequence of rtl operations to load VALUE. | 
|  | +   Return the number of operations needed.  */ | 
|  | + | 
|  | +static int | 
|  | +riscv_build_integer_simple (struct mips_integer_op *codes, HOST_WIDE_INT value) | 
|  | +{ | 
|  | +  HOST_WIDE_INT low_part = RISCV_CONST_LOW_PART (value); | 
|  | +  int cost = INT_MAX, alt_cost; | 
|  | +  struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS]; | 
|  | + | 
|  | +  if (SMALL_OPERAND (value) || LUI_OPERAND (value)) | 
|  | +    { | 
|  | +      /* Simply ADDI or LUI */ | 
|  | +      codes[0].code = UNKNOWN; | 
|  | +      codes[0].value = value; | 
|  | +      return 1; | 
|  | +    } | 
|  | + | 
|  | +  /* End with ADDI */ | 
|  | +  if (low_part != 0) | 
|  | +    { | 
|  | +      cost = 1 + riscv_build_integer_simple (codes, value - low_part); | 
|  | +      codes[cost-1].code = PLUS; | 
|  | +      codes[cost-1].value = low_part; | 
|  | +    } | 
|  | + | 
|  | +  /* End with XORI */ | 
|  | +  if (low_part < 0) | 
|  | +    { | 
|  | +      alt_cost = 1 + riscv_build_integer_simple (alt_codes, value ^ low_part); | 
|  | +      alt_codes[alt_cost-1].code = XOR; | 
|  | +      alt_codes[alt_cost-1].value = low_part; | 
|  | +      if (alt_cost < cost) | 
|  | +	cost = alt_cost, memcpy (codes, alt_codes, sizeof(alt_codes)); | 
|  | +    } | 
|  | + | 
|  | +  /* Eliminate trailing zeros and end with SLLI */ | 
|  | +  if ((value & 1) == 0) | 
|  | +    { | 
|  | +      int shift = __builtin_ctzl(value); | 
|  | +      alt_cost = 1 + riscv_build_integer_simple (alt_codes, value >> shift); | 
|  | +      alt_codes[alt_cost-1].code = ASHIFT; | 
|  | +      alt_codes[alt_cost-1].value = shift; | 
|  | +      if (alt_cost < cost) | 
|  | +	cost = alt_cost, memcpy (codes, alt_codes, sizeof(alt_codes)); | 
|  | +    } | 
|  | + | 
|  | +  gcc_assert (cost <= MIPS_MAX_INTEGER_OPS); | 
|  | +  return cost; | 
|  | +} | 
|  | + | 
|  | +static int | 
|  | +riscv_build_integer (struct mips_integer_op *codes, HOST_WIDE_INT value) | 
|  | +{ | 
|  | +  int cost = riscv_build_integer_simple (codes, value); | 
|  | + | 
|  | +  /* Eliminate leading zeros and end with SRLI */ | 
|  | +  if (value > 0 && cost > 2) | 
|  | +    { | 
|  | +      struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS]; | 
|  | +      int alt_cost, shift; | 
|  | + | 
|  | +      shift = __builtin_clzl(value); | 
|  | +      alt_cost = 1 + riscv_build_integer_simple (alt_codes, value << shift); | 
|  | +      alt_codes[alt_cost-1].code = LSHIFTRT; | 
|  | +      alt_codes[alt_cost-1].value = shift; | 
|  | +      if (alt_cost < cost) | 
|  | +	cost = alt_cost, memcpy (codes, alt_codes, sizeof(alt_codes)); | 
|  | + | 
|  | +      /* Also try filling discarded bits with 1s */ | 
|  | +      shift = __builtin_clzl(value); | 
|  | +      alt_cost = 1 + riscv_build_integer_simple (alt_codes, | 
|  | +			value << shift | ((1L<<shift)-1)); | 
|  | +      alt_codes[alt_cost-1].code = LSHIFTRT; | 
|  | +      alt_codes[alt_cost-1].value = shift; | 
|  | +      if (alt_cost < cost) | 
|  | +	cost = alt_cost, memcpy (codes, alt_codes, sizeof(alt_codes)); | 
|  | +    } | 
|  | + | 
|  | +  return cost; | 
|  | +} | 
|  | + | 
|  | +/* Return true if X is a thread-local symbol.  */ | 
|  | + | 
|  | +static bool | 
|  | +mips_tls_symbol_p (const_rtx x) | 
|  | +{ | 
|  | +  return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0; | 
|  | +} | 
|  | + | 
|  | +bool | 
|  | +riscv_symbol_binds_local_p (const_rtx x) | 
|  | +{ | 
|  | +  if (SYMBOL_REF_DECL (x)) | 
|  | +    { | 
|  | +      if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (x))) | 
|  | +	return true; /* Force local binding of memset etc. */ | 
|  | +      return targetm.binds_local_p (SYMBOL_REF_DECL (x)); | 
|  | +    } | 
|  | +  return SYMBOL_REF_LOCAL_P (x); | 
|  | +} | 
|  | + | 
|  | +/* Return the method that should be used to access SYMBOL_REF or | 
|  | +   LABEL_REF X in context CONTEXT.  */ | 
|  | + | 
|  | +static enum mips_symbol_type | 
|  | +mips_classify_symbol (const_rtx x) | 
|  | +{ | 
|  | +  if (mips_tls_symbol_p (x)) | 
|  | +    return SYMBOL_TLS; | 
|  | +  return SYMBOL_ABSOLUTE; | 
|  | +} | 
|  | + | 
|  | +/* Classify the base of symbolic expression X, given that X appears in | 
|  | +   context CONTEXT.  */ | 
|  | + | 
|  | +static enum mips_symbol_type | 
|  | +mips_classify_symbolic_expression (rtx x) | 
|  | +{ | 
|  | +  rtx offset; | 
|  | + | 
|  | +  split_const (x, &x, &offset); | 
|  | +  if (UNSPEC_ADDRESS_P (x)) | 
|  | +    return UNSPEC_ADDRESS_TYPE (x); | 
|  | + | 
|  | +  return mips_classify_symbol (x); | 
|  | +} | 
|  | + | 
|  | +/* Return true if OFFSET is within the range [0, ALIGN), where ALIGN | 
|  | +   is the alignment in bytes of SYMBOL_REF X.  */ | 
|  | + | 
|  | +static bool | 
|  | +mips_offset_within_alignment_p (rtx x, HOST_WIDE_INT offset) | 
|  | +{ | 
|  | +  HOST_WIDE_INT align; | 
|  | + | 
|  | +  align = SYMBOL_REF_DECL (x) ? DECL_ALIGN_UNIT (SYMBOL_REF_DECL (x)) : 1; | 
|  | +  return IN_RANGE (offset, 0, align - 1); | 
|  | +} | 
|  | + | 
|  | +/* Return true if X is a symbolic constant that can be used in context | 
|  | +   CONTEXT.  If it is, store the type of the symbol in *SYMBOL_TYPE.  */ | 
|  | + | 
|  | +bool | 
|  | +mips_symbolic_constant_p (rtx x, enum mips_symbol_type *symbol_type) | 
|  | +{ | 
|  | +  rtx offset; | 
|  | + | 
|  | +  split_const (x, &x, &offset); | 
|  | +  if (UNSPEC_ADDRESS_P (x)) | 
|  | +    { | 
|  | +      *symbol_type = UNSPEC_ADDRESS_TYPE (x); | 
|  | +      x = UNSPEC_ADDRESS (x); | 
|  | +    } | 
|  | +  else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF) | 
|  | +    *symbol_type = mips_classify_symbol (x); | 
|  | +  else | 
|  | +    return false; | 
|  | + | 
|  | +  if (offset == const0_rtx) | 
|  | +    return true; | 
|  | + | 
|  | +  if (flag_pic) | 
|  | +  /* Load the base address from the GOT, then add the offset. The offset | 
|  | +     calculation can usually be folded into the load or store instruction. */ | 
|  | +    return false; | 
|  | + | 
|  | +  /* Check whether a nonzero offset is valid for the underlying | 
|  | +     relocations.  */ | 
|  | +  switch (*symbol_type) | 
|  | +    { | 
|  | +    case SYMBOL_ABSOLUTE: | 
|  | +      /* If the target has 64-bit pointers and the object file only | 
|  | +	 supports 32-bit symbols, the values of those symbols will be | 
|  | +	 sign-extended.  In this case we can't allow an arbitrary offset | 
|  | +	 in case the 32-bit value X + OFFSET has a different sign from X.  */ | 
|  | +      return Pmode == SImode || offset_within_block_p (x, INTVAL (offset)); | 
|  | + | 
|  | +    case SYMBOL_TPREL: | 
|  | +      /* There is no carry between the HI and LO REL relocations, so the | 
|  | +	 offset is only valid if we know it won't lead to such a carry.  */ | 
|  | +      return mips_offset_within_alignment_p (x, INTVAL (offset)); | 
|  | + | 
|  | +    case SYMBOL_TLS: | 
|  | +      return false; | 
|  | +    } | 
|  | +  gcc_unreachable (); | 
|  | +} | 
|  | + | 
|  | +/* Like mips_symbol_insns, but treat extended MIPS16 instructions as a | 
|  | +   single instruction.  We rely on the fact that, in the worst case, | 
|  | +   all instructions involved in a MIPS16 address calculation are usually | 
|  | +   extended ones.  */ | 
|  | + | 
|  | +static int | 
|  | +mips_symbol_insns (enum mips_symbol_type type, enum machine_mode mode) | 
|  | +{ | 
|  | +  switch (type) | 
|  | +    { | 
|  | +    case SYMBOL_ABSOLUTE: | 
|  | +    case SYMBOL_TPREL: | 
|  | +      /* One of LUI or AUIPC, followed by one of ADDI, LD, or LW. */ | 
|  | +      return 2; | 
|  | + | 
|  | +    case SYMBOL_TLS: | 
|  | +      /* We don't treat a bare TLS symbol as a constant.  */ | 
|  | +      return 0; | 
|  | +    } | 
|  | +  gcc_unreachable (); | 
|  | +} | 
|  | + | 
|  | +/* A for_each_rtx callback.  Stop the search if *X references a | 
|  | +   thread-local symbol.  */ | 
|  | + | 
|  | +static int | 
|  | +mips_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED) | 
|  | +{ | 
|  | +  return mips_tls_symbol_p (*x); | 
|  | +} | 
|  | + | 
|  | +/* Implement TARGET_CANNOT_FORCE_CONST_MEM.  */ | 
|  | + | 
|  | +static bool | 
|  | +mips_cannot_force_const_mem (rtx x) | 
|  | +{ | 
|  | +  enum mips_symbol_type type; | 
|  | +  rtx base, offset; | 
|  | + | 
|  | +  /* There is no assembler syntax for expressing an address-sized | 
|  | +     high part.  */ | 
|  | +  if (GET_CODE (x) == HIGH) | 
|  | +    return true; | 
|  | + | 
|  | +  /* As an optimization, reject constants that mips_legitimize_move | 
|  | +     can expand inline. | 
|  | + | 
|  | +     Suppose we have a multi-instruction sequence that loads constant C | 
|  | +     into register R.  If R does not get allocated a hard register, and | 
|  | +     R is used in an operand that allows both registers and memory | 
|  | +     references, reload will consider forcing C into memory and using | 
|  | +     one of the instruction's memory alternatives.  Returning false | 
|  | +     here will force it to use an input reload instead.  */ | 
|  | +  if (CONST_INT_P (x) && LEGITIMATE_CONSTANT_P (x)) | 
|  | +    return true; | 
|  | + | 
|  | +  split_const (x, &base, &offset); | 
|  | +  if (mips_symbolic_constant_p (base, &type)) | 
|  | +    { | 
|  | +      /* The same optimization as for CONST_INT.  */ | 
|  | +      if (SMALL_INT (offset) && mips_symbol_insns (type, MAX_MACHINE_MODE) > 0) | 
|  | +	return true; | 
|  | +    } | 
|  | + | 
|  | +  /* TLS symbols must be computed by mips_legitimize_move.  */ | 
|  | +  if (for_each_rtx (&x, &mips_tls_symbol_ref_1, NULL)) | 
|  | +    return true; | 
|  | + | 
|  | +  return false; | 
|  | +} | 
|  | + | 
|  | +/* Return true if register REGNO is a valid base register for mode MODE. | 
|  | +   STRICT_P is true if REG_OK_STRICT is in effect.  */ | 
|  | + | 
|  | +int | 
|  | +mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode ATTRIBUTE_UNUSED, | 
|  | +			       bool strict_p) | 
|  | +{ | 
|  | +  if (!HARD_REGISTER_NUM_P (regno)) | 
|  | +    { | 
|  | +      if (!strict_p) | 
|  | +	return true; | 
|  | +      regno = reg_renumber[regno]; | 
|  | +    } | 
|  | + | 
|  | +  /* These fake registers will be eliminated to either the stack or | 
|  | +     hard frame pointer, both of which are usually valid base registers. | 
|  | +     Reload deals with the cases where the eliminated form isn't valid.  */ | 
|  | +  if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM) | 
|  | +    return true; | 
|  | + | 
|  | +  return GP_REG_P (regno); | 
|  | +} | 
|  | + | 
|  | +/* Return true if X is a valid base register for mode MODE. | 
|  | +   STRICT_P is true if REG_OK_STRICT is in effect.  */ | 
|  | + | 
|  | +static bool | 
|  | +mips_valid_base_register_p (rtx x, enum machine_mode mode, bool strict_p) | 
|  | +{ | 
|  | +  if (!strict_p && GET_CODE (x) == SUBREG) | 
|  | +    x = SUBREG_REG (x); | 
|  | + | 
|  | +  return (REG_P (x) | 
|  | +	  && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict_p)); | 
|  | +} | 
|  | + | 
|  | +/* Return true if, for every base register BASE_REG, (plus BASE_REG X) | 
|  | +   can address a value of mode MODE.  */ | 
|  | + | 
|  | +static bool | 
|  | +mips_valid_offset_p (rtx x, enum machine_mode mode) | 
|  | +{ | 
|  | +  /* Check that X is a signed 12-bit number.  */ | 
|  | +  if (!const_arith_operand (x, Pmode)) | 
|  | +    return false; | 
|  | + | 
|  | +  /* We may need to split multiword moves, so make sure that every word | 
|  | +     is accessible.  */ | 
|  | +  if (GET_MODE_SIZE (mode) > UNITS_PER_WORD | 
|  | +      && !SMALL_OPERAND (INTVAL (x) + GET_MODE_SIZE (mode) - UNITS_PER_WORD)) | 
|  | +    return false; | 
|  | + | 
|  | +  return true; | 
|  | +} | 
|  | + | 
|  | +/* Return true if a LO_SUM can address a value of mode MODE when the | 
|  | +   LO_SUM symbol has type SYMBOL_TYPE.  */ | 
|  | + | 
|  | +static bool | 
|  | +mips_valid_lo_sum_p (enum mips_symbol_type symbol_type, enum machine_mode mode) | 
|  | +{ | 
|  | +  /* Check that symbols of type SYMBOL_TYPE can be used to access values | 
|  | +     of mode MODE.  */ | 
|  | +  if (mips_symbol_insns (symbol_type, mode) == 0) | 
|  | +    return false; | 
|  | + | 
|  | +  /* Check that there is a known low-part relocation.  */ | 
|  | +  if (mips_lo_relocs[symbol_type] == NULL) | 
|  | +    return false; | 
|  | + | 
|  | +  /* We may need to split multiword moves, so make sure that each word | 
|  | +     can be accessed without inducing a carry.  This is mainly needed | 
|  | +     for o64, which has historically only guaranteed 64-bit alignment | 
|  | +     for 128-bit types.  */ | 
|  | +  if (GET_MODE_SIZE (mode) > UNITS_PER_WORD | 
|  | +      && GET_MODE_BITSIZE (mode) > GET_MODE_ALIGNMENT (mode)) | 
|  | +    return false; | 
|  | + | 
|  | +  return true; | 
|  | +} | 
|  | + | 
|  | +/* Return true if X is a valid address for machine mode MODE.  If it is, | 
|  | +   fill in INFO appropriately.  STRICT_P is true if REG_OK_STRICT is in | 
|  | +   effect.  */ | 
|  | + | 
|  | +static bool | 
|  | +mips_classify_address (struct mips_address_info *info, rtx x, | 
|  | +		       enum machine_mode mode, bool strict_p) | 
|  | +{ | 
|  | +  switch (GET_CODE (x)) | 
|  | +    { | 
|  | +    case REG: | 
|  | +    case SUBREG: | 
|  | +      info->type = ADDRESS_REG; | 
|  | +      info->reg = x; | 
|  | +      info->offset = const0_rtx; | 
|  | +      return mips_valid_base_register_p (info->reg, mode, strict_p); | 
|  | + | 
|  | +    case PLUS: | 
|  | +      info->type = ADDRESS_REG; | 
|  | +      info->reg = XEXP (x, 0); | 
|  | +      info->offset = XEXP (x, 1); | 
|  | +      return (mips_valid_base_register_p (info->reg, mode, strict_p) | 
|  | +	      && mips_valid_offset_p (info->offset, mode)); | 
|  | + | 
|  | +    case LO_SUM: | 
|  | +      info->type = ADDRESS_LO_SUM; | 
|  | +      info->reg = XEXP (x, 0); | 
|  | +      info->offset = XEXP (x, 1); | 
|  | +      /* We have to trust the creator of the LO_SUM to do something vaguely | 
|  | +	 sane.  Target-independent code that creates a LO_SUM should also | 
|  | +	 create and verify the matching HIGH.  Target-independent code that | 
|  | +	 adds an offset to a LO_SUM must prove that the offset will not | 
|  | +	 induce a carry.  Failure to do either of these things would be | 
|  | +	 a bug, and we are not required to check for it here.  The MIPS | 
|  | +	 backend itself should only create LO_SUMs for valid symbolic | 
|  | +	 constants, with the high part being either a HIGH or a copy | 
|  | +	 of _gp. */ | 
|  | +      info->symbol_type | 
|  | +	= mips_classify_symbolic_expression (info->offset); | 
|  | +      return (mips_valid_base_register_p (info->reg, mode, strict_p) | 
|  | +	      && mips_valid_lo_sum_p (info->symbol_type, mode)); | 
|  | + | 
|  | +    case CONST_INT: | 
|  | +      /* Small-integer addresses don't occur very often, but they | 
|  | +	 are legitimate if $0 is a valid base register.  */ | 
|  | +      info->type = ADDRESS_CONST_INT; | 
|  | +      return SMALL_INT (x); | 
|  | + | 
|  | +    case CONST: | 
|  | +    case LABEL_REF: | 
|  | +    case SYMBOL_REF: | 
|  | +      info->type = ADDRESS_SYMBOLIC; | 
|  | +      return false; | 
|  | + | 
|  | +    default: | 
|  | +      return false; | 
|  | +    } | 
|  | +} | 
|  | + | 
|  | +/* Implement TARGET_LEGITIMATE_ADDRESS_P.  */ | 
|  | + | 
|  | +static bool | 
|  | +mips_legitimate_address_p (enum machine_mode mode, rtx x, bool strict_p) | 
|  | +{ | 
|  | +  struct mips_address_info addr; | 
|  | + | 
|  | +  return mips_classify_address (&addr, x, mode, strict_p); | 
|  | +} | 
|  | + | 
|  | +/* Return the number of instructions needed to load or store a value | 
|  | +   of mode MODE at address X.  Return 0 if X isn't valid for MODE. | 
|  | +   Assume that multiword moves may need to be split into word moves | 
|  | +   if MIGHT_SPLIT_P, otherwise assume that a single load or store is | 
|  | +   enough. | 
|  | + | 
|  | +   For MIPS16 code, count extended instructions as two instructions.  */ | 
|  | + | 
|  | +int | 
|  | +mips_address_insns (rtx x, enum machine_mode mode, bool might_split_p) | 
|  | +{ | 
|  | +  struct mips_address_info addr; | 
|  | + | 
|  | +  if (mips_classify_address (&addr, x, mode, false)) | 
|  | +    { | 
|  | +      int factor = 1; | 
|  | + | 
|  | +      /* BLKmode is used for single unaligned loads and stores and should | 
|  | +         not count as a multiword mode. */ | 
|  | +      if (mode != BLKmode && might_split_p) | 
|  | +        factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD; | 
|  | + | 
|  | +      if (addr.type == ADDRESS_SYMBOLIC) | 
|  | +	factor *= mips_symbol_insns (addr.symbol_type, mode); | 
|  | + | 
|  | +      return factor; | 
|  | +    } | 
|  | + | 
|  | +  return 0; | 
|  | +} | 
|  | + | 
|  | +/* Return the number of instructions needed to load constant X. | 
|  | +   Return 0 if X isn't a valid constant.  */ | 
|  | + | 
|  | +int | 
|  | +mips_const_insns (rtx x) | 
|  | +{ | 
|  | +  struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS]; | 
|  | +  enum mips_symbol_type symbol_type; | 
|  | +  rtx offset; | 
|  | + | 
|  | +  switch (GET_CODE (x)) | 
|  | +    { | 
|  | +    case HIGH: | 
|  | +      if (!mips_symbolic_constant_p (XEXP (x, 0), &symbol_type) | 
|  | +	  || !mips_split_p[symbol_type]) | 
|  | +	return 0; | 
|  | + | 
|  | +      /* This is simply an LUI. */ | 
|  | +      return 1; | 
|  | + | 
|  | +    case CONST_INT: | 
|  | +      return riscv_build_integer (codes, INTVAL (x)); | 
|  | + | 
|  | +    case CONST_DOUBLE: | 
|  | +    case CONST_VECTOR: | 
|  | +      /* Allow zeros for normal mode, where we can use $0.  */ | 
|  | +      return x == CONST0_RTX (GET_MODE (x)) ? 1 : 0; | 
|  | + | 
|  | +    case CONST: | 
|  | +      /* See if we can refer to X directly.  */ | 
|  | +      if (mips_symbolic_constant_p (x, &symbol_type)) | 
|  | +	return mips_symbol_insns (symbol_type, MAX_MACHINE_MODE); | 
|  | + | 
|  | +      /* Otherwise try splitting the constant into a base and offset. | 
|  | +	 If the offset is a 16-bit value, we can load the base address | 
|  | +	 into a register and then use (D)ADDIU to add in the offset. | 
|  | +	 If the offset is larger, we can load the base and offset | 
|  | +	 into separate registers and add them together with (D)ADDU. | 
|  | +	 However, the latter is only possible before reload; during | 
|  | +	 and after reload, we must have the option of forcing the | 
|  | +	 constant into the pool instead.  */ | 
|  | +      split_const (x, &x, &offset); | 
|  | +      if (offset != 0) | 
|  | +	{ | 
|  | +	  int n = mips_const_insns (x); | 
|  | +	  if (n != 0) | 
|  | +	    { | 
|  | +	      if (SMALL_INT (offset)) | 
|  | +		return n + 1; | 
|  | +	      else if (!targetm.cannot_force_const_mem (x)) | 
|  | +		return n + 1 + riscv_build_integer (codes, INTVAL (offset)); | 
|  | +	    } | 
|  | +	} | 
|  | +      return 0; | 
|  | + | 
|  | +    case SYMBOL_REF: | 
|  | +    case LABEL_REF: | 
|  | +      return mips_symbol_insns (mips_classify_symbol (x), MAX_MACHINE_MODE); | 
|  | + | 
|  | +    default: | 
|  | +      return 0; | 
|  | +    } | 
|  | +} | 
|  | + | 
|  | +/* X is a doubleword constant that can be handled by splitting it into | 
|  | +   two words and loading each word separately.  Return the number of | 
|  | +   instructions required to do this.  */ | 
|  | + | 
|  | +int | 
|  | +mips_split_const_insns (rtx x) | 
|  | +{ | 
|  | +  unsigned int low, high; | 
|  | + | 
|  | +  low = mips_const_insns (mips_subword (x, false)); | 
|  | +  high = mips_const_insns (mips_subword (x, true)); | 
|  | +  gcc_assert (low > 0 && high > 0); | 
|  | +  return low + high; | 
|  | +} | 
|  | + | 
|  | +/* Return the number of instructions needed to implement INSN, | 
|  | +   given that it loads from or stores to MEM.  Count extended | 
|  | +   MIPS16 instructions as two instructions.  */ | 
|  | + | 
|  | +int | 
|  | +mips_load_store_insns (rtx mem, rtx insn) | 
|  | +{ | 
|  | +  enum machine_mode mode; | 
|  | +  bool might_split_p; | 
|  | +  rtx set; | 
|  | + | 
|  | +  gcc_assert (MEM_P (mem)); | 
|  | +  mode = GET_MODE (mem); | 
|  | + | 
|  | +  /* Try to prove that INSN does not need to be split.  */ | 
|  | +  might_split_p = true; | 
|  | +  if (GET_MODE_BITSIZE (mode) == 64) | 
|  | +    { | 
|  | +      set = single_set (insn); | 
|  | +      if (set && !mips_split_64bit_move_p (SET_DEST (set), SET_SRC (set))) | 
|  | +	might_split_p = false; | 
|  | +    } | 
|  | + | 
|  | +  return mips_address_insns (XEXP (mem, 0), mode, might_split_p); | 
|  | +} | 
|  | + | 
|  | +/* Emit a move from SRC to DEST.  Assume that the move expanders can | 
|  | +   handle all moves if !can_create_pseudo_p ().  The distinction is | 
|  | +   important because, unlike emit_move_insn, the move expanders know | 
|  | +   how to force Pmode objects into the constant pool even when the | 
|  | +   constant pool address is not itself legitimate.  */ | 
|  | + | 
|  | +rtx | 
|  | +mips_emit_move (rtx dest, rtx src) | 
|  | +{ | 
|  | +  return (can_create_pseudo_p () | 
|  | +	  ? emit_move_insn (dest, src) | 
|  | +	  : emit_move_insn_1 (dest, src)); | 
|  | +} | 
|  | + | 
|  | +/* Emit an instruction of the form (set TARGET (CODE OP0 OP1)).  */ | 
|  | + | 
|  | +static void | 
|  | +mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1) | 
|  | +{ | 
|  | +  emit_insn (gen_rtx_SET (VOIDmode, target, | 
|  | +			  gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1))); | 
|  | +} | 
|  | + | 
|  | +/* Compute (CODE OP0 OP1) and store the result in a new register | 
|  | +   of mode MODE.  Return that new register.  */ | 
|  | + | 
|  | +static rtx | 
|  | +mips_force_binary (enum machine_mode mode, enum rtx_code code, rtx op0, rtx op1) | 
|  | +{ | 
|  | +  rtx reg; | 
|  | + | 
|  | +  reg = gen_reg_rtx (mode); | 
|  | +  mips_emit_binary (code, reg, op0, op1); | 
|  | +  return reg; | 
|  | +} | 
|  | + | 
|  | +/* Copy VALUE to a register and return that register.  If new pseudos | 
|  | +   are allowed, copy it into a new register, otherwise use DEST.  */ | 
|  | + | 
|  | +static rtx | 
|  | +mips_force_temporary (rtx dest, rtx value) | 
|  | +{ | 
|  | +  if (can_create_pseudo_p ()) | 
|  | +    return force_reg (Pmode, value); | 
|  | +  else | 
|  | +    { | 
|  | +      mips_emit_move (dest, value); | 
|  | +      return dest; | 
|  | +    } | 
|  | +} | 
|  | + | 
|  | +/* Wrap symbol or label BASE in an UNSPEC address of type SYMBOL_TYPE, | 
|  | +   then add CONST_INT OFFSET to the result.  */ | 
|  | + | 
|  | +static rtx | 
|  | +mips_unspec_address_offset (rtx base, rtx offset, | 
|  | +			    enum mips_symbol_type symbol_type) | 
|  | +{ | 
|  | +  base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base), | 
|  | +			 UNSPEC_ADDRESS_FIRST + symbol_type); | 
|  | +  if (offset != const0_rtx) | 
|  | +    base = gen_rtx_PLUS (Pmode, base, offset); | 
|  | +  return gen_rtx_CONST (Pmode, base); | 
|  | +} | 
|  | + | 
|  | +/* Return an UNSPEC address with underlying address ADDRESS and symbol | 
|  | +   type SYMBOL_TYPE.  */ | 
|  | + | 
|  | +rtx | 
|  | +mips_unspec_address (rtx address, enum mips_symbol_type symbol_type) | 
|  | +{ | 
|  | +  rtx base, offset; | 
|  | + | 
|  | +  split_const (address, &base, &offset); | 
|  | +  return mips_unspec_address_offset (base, offset, symbol_type); | 
|  | +} | 
|  | + | 
|  | +/* If OP is an UNSPEC address, return the address to which it refers, | 
|  | +   otherwise return OP itself.  */ | 
|  | + | 
|  | +static rtx | 
|  | +mips_strip_unspec_address (rtx op) | 
|  | +{ | 
|  | +  rtx base, offset; | 
|  | + | 
|  | +  split_const (op, &base, &offset); | 
|  | +  if (UNSPEC_ADDRESS_P (base)) | 
|  | +    op = plus_constant (UNSPEC_ADDRESS (base), INTVAL (offset)); | 
|  | +  return op; | 
|  | +} | 
|  | + | 
|  | +/* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the | 
|  | +   high part to BASE and return the result.  Just return BASE otherwise. | 
|  | +   TEMP is as for mips_force_temporary. | 
|  | + | 
|  | +   The returned expression can be used as the first operand to a LO_SUM.  */ | 
|  | + | 
|  | +static rtx | 
|  | +mips_unspec_offset_high (rtx temp, rtx base, rtx addr, | 
|  | +			 enum mips_symbol_type symbol_type) | 
|  | +{ | 
|  | +  if (mips_split_p[symbol_type]) | 
|  | +    { | 
|  | +      addr = gen_rtx_HIGH (Pmode, mips_unspec_address (addr, symbol_type)); | 
|  | +      addr = mips_force_temporary (temp, addr); | 
|  | +      base = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, addr, base)); | 
|  | +    } | 
|  | +  return base; | 
|  | +} | 
|  | + | 
|  | +/* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise | 
|  | +   it appears in a MEM of that mode.  Return true if ADDR is a legitimate | 
|  | +   constant in that context and can be split into high and low parts. | 
|  | +   If so, and if LOW_OUT is nonnull, emit the high part and store the | 
|  | +   low part in *LOW_OUT.  Leave *LOW_OUT unchanged otherwise. | 
|  | + | 
|  | +   TEMP is as for mips_force_temporary and is used to load the high | 
|  | +   part into a register. | 
|  | + | 
|  | +   When MODE is MAX_MACHINE_MODE, the low part is guaranteed to be | 
|  | +   a legitimize SET_SRC for an .md pattern, otherwise the low part | 
|  | +   is guaranteed to be a legitimate address for mode MODE.  */ | 
|  | + | 
|  | +bool | 
|  | +mips_split_symbol (rtx temp, rtx addr, enum machine_mode mode, rtx *low_out) | 
|  | +{ | 
|  | +  enum mips_symbol_type symbol_type; | 
|  | +  rtx high; | 
|  | + | 
|  | +  if (!(GET_CODE (addr) == HIGH && mode == MAX_MACHINE_MODE)) | 
|  | +    { | 
|  | +      if (mips_symbolic_constant_p (addr, &symbol_type) | 
|  | +	  && mips_symbol_insns (symbol_type, mode) > 0 | 
|  | +	  && mips_split_p[symbol_type]) | 
|  | +	{ | 
|  | +	  if (low_out) | 
|  | +	    { | 
|  | +	      high = gen_rtx_HIGH (Pmode, copy_rtx (addr)); | 
|  | +	      high = mips_force_temporary (temp, high); | 
|  | +	      *low_out = gen_rtx_LO_SUM (Pmode, high, addr); | 
|  | +	    } | 
|  | +	  return true; | 
|  | +	} | 
|  | +    } | 
|  | +  return false; | 
|  | +} | 
|  | + | 
|  | +/* Return a legitimate address for REG + OFFSET.  TEMP is as for | 
|  | +   mips_force_temporary; it is only needed when OFFSET is not a | 
|  | +   SMALL_OPERAND.  */ | 
|  | + | 
|  | +static rtx | 
|  | +mips_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset) | 
|  | +{ | 
|  | +  if (!SMALL_OPERAND (offset)) | 
|  | +    { | 
|  | +      rtx high; | 
|  | + | 
|  | +      /* Leave OFFSET as a 16-bit offset and put the excess in HIGH. | 
|  | +         The addition inside the macro CONST_HIGH_PART may cause an | 
|  | +         overflow, so we need to force a sign-extension check.  */ | 
|  | +      high = gen_int_mode (RISCV_CONST_HIGH_PART (offset), Pmode); | 
|  | +      offset = RISCV_CONST_LOW_PART (offset); | 
|  | +      high = mips_force_temporary (temp, high); | 
|  | +      reg = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg)); | 
|  | +    } | 
|  | +  return plus_constant (reg, offset); | 
|  | +} | 
|  | + | 
|  | +/* Load an entry from the GOT. */ | 
|  | +static rtx riscv_got_load(rtx dest, rtx sym) | 
|  | +{ | 
|  | +  return (Pmode == DImode ? gen_got_loaddi(dest, sym) : gen_got_loadsi(dest, sym)); | 
|  | +} | 
|  | +static rtx riscv_got_load_tls_gd(rtx dest, rtx sym) | 
|  | +{ | 
|  | +  return (Pmode == DImode ? gen_got_load_tls_gddi(dest, sym) : gen_got_load_tls_gdsi(dest, sym)); | 
|  | +} | 
|  | +static rtx riscv_got_load_tls_ie(rtx dest, rtx sym) | 
|  | +{ | 
|  | +  return (Pmode == DImode ? gen_got_load_tls_iedi(dest, sym) : gen_got_load_tls_iesi(dest, sym)); | 
|  | +} | 
|  | + | 
|  | +/* The __tls_get_attr symbol.  */ | 
|  | +static GTY(()) rtx mips_tls_symbol; | 
|  | + | 
|  | +/* Return an instruction sequence that calls __tls_get_addr.  SYM is | 
|  | +   the TLS symbol we are referencing and TYPE is the symbol type to use | 
|  | +   (either global dynamic or local dynamic).  V0 is an RTX for the | 
|  | +   return value location.  */ | 
|  | + | 
|  | +static rtx | 
|  | +mips_call_tls_get_addr (rtx sym, rtx v0) | 
|  | +{ | 
|  | +  rtx insn, a0; | 
|  | + | 
|  | +  a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST); | 
|  | + | 
|  | +  if (!mips_tls_symbol) | 
|  | +    mips_tls_symbol = init_one_libfunc ("__tls_get_addr"); | 
|  | + | 
|  | +  start_sequence (); | 
|  | + | 
|  | +  emit_insn (riscv_got_load_tls_gd(a0, sym)); | 
|  | +  insn = mips_expand_call (false, v0, mips_tls_symbol, const0_rtx); | 
|  | +  RTL_CONST_CALL_P (insn) = 1; | 
|  | +  use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0); | 
|  | +  insn = get_insns (); | 
|  | + | 
|  | +  end_sequence (); | 
|  | + | 
|  | +  return insn; | 
|  | +} | 
|  | + | 
|  | +/* Generate the code to access LOC, a thread-local SYMBOL_REF, and return | 
|  | +   its address.  The return value will be both a valid address and a valid | 
|  | +   SET_SRC (either a REG or a LO_SUM).  */ | 
|  | + | 
|  | +static rtx | 
|  | +mips_legitimize_tls_address (rtx loc) | 
|  | +{ | 
|  | +  rtx dest, insn, v0, tp, tmp1; | 
|  | +  enum tls_model model; | 
|  | + | 
|  | +  model = SYMBOL_REF_TLS_MODEL (loc); | 
|  | +  /* Only TARGET_ABICALLS code can have more than one module; other | 
|  | +     code must be be static and should not use a GOT.  All TLS models | 
|  | +     reduce to local exec in this situation.  */ | 
|  | +  if (!TARGET_ABICALLS) | 
|  | +    model = TLS_MODEL_LOCAL_EXEC; | 
|  | + | 
|  | +  switch (model) | 
|  | +    { | 
|  | +    case TLS_MODEL_LOCAL_DYNAMIC: | 
|  | +      /* We don't support LDM TLS, so fall through.*/ | 
|  | +    case TLS_MODEL_GLOBAL_DYNAMIC: | 
|  | +      v0 = gen_rtx_REG (Pmode, GP_RETURN); | 
|  | +      insn = mips_call_tls_get_addr (loc, v0); | 
|  | +      dest = gen_reg_rtx (Pmode); | 
|  | +      emit_libcall_block (insn, dest, v0, loc); | 
|  | +      break; | 
|  | + | 
|  | +    case TLS_MODEL_INITIAL_EXEC: | 
|  | +      tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM); | 
|  | +      tmp1 = gen_reg_rtx (Pmode); | 
|  | +      emit_insn (riscv_got_load_tls_ie(tmp1, loc)); | 
|  | +      dest = gen_reg_rtx (Pmode); | 
|  | +      emit_insn (gen_add3_insn (dest, tmp1, tp)); | 
|  | +      break; | 
|  | + | 
|  | +    case TLS_MODEL_LOCAL_EXEC: | 
|  | +      tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM); | 
|  | +      tmp1 = mips_unspec_offset_high (NULL, tp, loc, SYMBOL_TPREL); | 
|  | +      dest = gen_rtx_LO_SUM (Pmode, tmp1, | 
|  | +			     mips_unspec_address (loc, SYMBOL_TPREL)); | 
|  | +      break; | 
|  | + | 
|  | +    default: | 
|  | +      gcc_unreachable (); | 
|  | +    } | 
|  | +  return dest; | 
|  | +} | 
|  | + | 
|  | +/* If X is not a valid address for mode MODE, force it into a register.  */ | 
|  | + | 
|  | +static rtx | 
|  | +mips_force_address (rtx x, enum machine_mode mode) | 
|  | +{ | 
|  | +  if (!mips_legitimate_address_p (mode, x, false)) | 
|  | +    x = force_reg (Pmode, x); | 
|  | +  return x; | 
|  | +} | 
|  | + | 
|  | +/* This function is used to implement LEGITIMIZE_ADDRESS.  If X can | 
|  | +   be legitimized in a way that the generic machinery might not expect, | 
|  | +   return a new address, otherwise return NULL.  MODE is the mode of | 
|  | +   the memory being accessed.  */ | 
|  | + | 
|  | +static rtx | 
|  | +mips_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, | 
|  | +			 enum machine_mode mode) | 
|  | +{ | 
|  | +  rtx addr; | 
|  | + | 
|  | +  if (mips_tls_symbol_p (x)) | 
|  | +    return mips_legitimize_tls_address (x); | 
|  | + | 
|  | +  /* See if the address can split into a high part and a LO_SUM.  */ | 
|  | +  if (mips_split_symbol (NULL, x, mode, &addr)) | 
|  | +    return mips_force_address (addr, mode); | 
|  | + | 
|  | +  /* Handle BASE + OFFSET using mips_add_offset.  */ | 
|  | +  if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)) | 
|  | +      && INTVAL (XEXP (x, 1)) != 0) | 
|  | +    { | 
|  | +      rtx base = XEXP (x, 0); | 
|  | +      HOST_WIDE_INT offset = INTVAL (XEXP (x, 1)); | 
|  | + | 
|  | +      if (!mips_valid_base_register_p (base, mode, false)) | 
|  | +	base = copy_to_mode_reg (Pmode, base); | 
|  | +      addr = mips_add_offset (NULL, base, offset); | 
|  | +      return mips_force_address (addr, mode); | 
|  | +    } | 
|  | + | 
|  | +  return x; | 
|  | +} | 
|  | + | 
|  | +static int | 
|  | +riscv_split_integer_cost (HOST_WIDE_INT val) | 
|  | +{ | 
|  | +  int cost = 0; | 
|  | +  struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS]; | 
|  | +  int32_t loval = val, hival = (val - (int32_t)val) >> 32; | 
|  | + | 
|  | +  cost += riscv_build_integer(codes, loval); | 
|  | +  if (loval != hival) | 
|  | +    cost += riscv_build_integer(codes, hival); | 
|  | +  return cost + 2; | 
|  | +} | 
|  | + | 
|  | +/* Try to split a 64b integer into 32b parts, then reassemble. */ | 
|  | + | 
|  | +static rtx | 
|  | +riscv_split_integer (HOST_WIDE_INT val, enum machine_mode mode) | 
|  | +{ | 
|  | +  int32_t loval = val, hival = (val - (int32_t)val) >> 32; | 
|  | +  rtx hi = gen_reg_rtx (mode), lo = gen_reg_rtx (mode); | 
|  | + | 
|  | +  mips_move_integer (hi, hi, hival); | 
|  | +  mips_move_integer (lo, lo, loval); | 
|  | + | 
|  | +  hi = gen_rtx_fmt_ee (ASHIFT, mode, hi, GEN_INT (32)); | 
|  | +  hi = force_reg (mode, hi); | 
|  | + | 
|  | +  return gen_rtx_fmt_ee (PLUS, mode, hi, lo); | 
|  | +} | 
|  | + | 
|  | +/* Load VALUE into DEST.  TEMP is as for mips_force_temporary.  */ | 
|  | + | 
|  | +void | 
|  | +mips_move_integer (rtx temp, rtx dest, HOST_WIDE_INT value) | 
|  | +{ | 
|  | +  struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS]; | 
|  | +  enum machine_mode mode; | 
|  | +  unsigned int i, num_ops; | 
|  | +  rtx x; | 
|  | + | 
|  | +  mode = GET_MODE (dest); | 
|  | +  num_ops = riscv_build_integer (codes, value); | 
|  | + | 
|  | +  if (can_create_pseudo_p () && num_ops >= riscv_split_integer_cost (value)) | 
|  | +    x = riscv_split_integer (value, mode); | 
|  | +  else | 
|  | +    { | 
|  | +      /* Apply each binary operation to X. */ | 
|  | +      x = GEN_INT (codes[0].value); | 
|  | + | 
|  | +      for (i = 1; i < num_ops; i++) | 
|  | +        { | 
|  | +          if (!can_create_pseudo_p ()) | 
|  | +            { | 
|  | +              emit_insn (gen_rtx_SET (VOIDmode, temp, x)); | 
|  | +              x = temp; | 
|  | +            } | 
|  | +          else | 
|  | +            x = force_reg (mode == HImode ? SImode : mode, x); | 
|  | + | 
|  | +          x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value)); | 
|  | +        } | 
|  | +    } | 
|  | + | 
|  | +  emit_insn (gen_rtx_SET (VOIDmode, dest, x)); | 
|  | +} | 
|  | + | 
|  | +/* Subroutine of mips_legitimize_move.  Move constant SRC into register | 
|  | +   DEST given that SRC satisfies immediate_operand but doesn't satisfy | 
|  | +   move_operand.  */ | 
|  | + | 
|  | +static void | 
|  | +mips_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src) | 
|  | +{ | 
|  | +  rtx base, offset; | 
|  | + | 
|  | +  /* Split moves of big integers into smaller pieces.  */ | 
|  | +  if (splittable_const_int_operand (src, mode)) | 
|  | +    { | 
|  | +      mips_move_integer (dest, dest, INTVAL (src)); | 
|  | +      return; | 
|  | +    } | 
|  | + | 
|  | +  /* Split moves of symbolic constants into high/low pairs.  */ | 
|  | +  if (mips_split_symbol (dest, src, MAX_MACHINE_MODE, &src)) | 
|  | +    { | 
|  | +      emit_insn (gen_rtx_SET (VOIDmode, dest, src)); | 
|  | +      return; | 
|  | +    } | 
|  | + | 
|  | +  /* Generate the appropriate access sequences for TLS symbols.  */ | 
|  | +  if (mips_tls_symbol_p (src)) | 
|  | +    { | 
|  | +      mips_emit_move (dest, mips_legitimize_tls_address (src)); | 
|  | +      return; | 
|  | +    } | 
|  | + | 
|  | +  /* If we have (const (plus symbol offset)), and that expression cannot | 
|  | +     be forced into memory, load the symbol first and add in the offset. | 
|  | +     In non-MIPS16 mode, prefer to do this even if the constant _can_ be | 
|  | +     forced into memory, as it usually produces better code.  */ | 
|  | +  split_const (src, &base, &offset); | 
|  | +  if (offset != const0_rtx | 
|  | +      && (targetm.cannot_force_const_mem (src) | 
|  | +	  || can_create_pseudo_p ())) | 
|  | +    { | 
|  | +      base = mips_force_temporary (dest, base); | 
|  | +      mips_emit_move (dest, mips_add_offset (NULL, base, INTVAL (offset))); | 
|  | +      return; | 
|  | +    } | 
|  | + | 
|  | +  src = force_const_mem (mode, src); | 
|  | + | 
|  | +  /* When using explicit relocs, constant pool references are sometimes | 
|  | +     not legitimate addresses.  */ | 
|  | +  mips_split_symbol (dest, XEXP (src, 0), mode, &XEXP (src, 0)); | 
|  | +  mips_emit_move (dest, src); | 
|  | +} | 
|  | + | 
|  | +/* If (set DEST SRC) is not a valid move instruction, emit an equivalent | 
|  | +   sequence that is valid.  */ | 
|  | + | 
|  | +bool | 
|  | +mips_legitimize_move (enum machine_mode mode, rtx dest, rtx src) | 
|  | +{ | 
|  | +  if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode)) | 
|  | +    { | 
|  | +      mips_emit_move (dest, force_reg (mode, src)); | 
|  | +      return true; | 
|  | +    } | 
|  | + | 
|  | +  /* We need to deal with constants that would be legitimate | 
|  | +     immediate_operands but aren't legitimate move_operands.  */ | 
|  | +  if (CONSTANT_P (src) && !move_operand (src, mode)) | 
|  | +    { | 
|  | +      mips_legitimize_const_move (mode, dest, src); | 
|  | +      set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src)); | 
|  | +      return true; | 
|  | +    } | 
|  | +  return false; | 
|  | +} | 
|  | + | 
|  | +bool | 
|  | +mips_legitimize_vector_move (enum machine_mode mode, rtx dest, rtx src) | 
|  | +{ | 
|  | +  bool dest_mem, dest_mem_reg; | 
|  | +  bool src_mem, src_mem_reg; | 
|  | + | 
|  | +  dest_mem = (GET_CODE(dest) == MEM); | 
|  | +  dest_mem_reg = dest_mem && GET_CODE(XEXP(dest, 0)) == REG; | 
|  | + | 
|  | +  src_mem = (GET_CODE(src) == MEM); | 
|  | +  src_mem_reg = src_mem && GET_CODE(XEXP(src, 0)) == REG; | 
|  | + | 
|  | +  if (dest_mem && !dest_mem_reg) | 
|  | +  { | 
|  | +    rtx add, scratch, base, move; | 
|  | +    HOST_WIDE_INT offset; | 
|  | + | 
|  | +    mips_split_plus(XEXP(dest,0), &base, &offset); | 
|  | + | 
|  | +    scratch = gen_reg_rtx(Pmode); | 
|  | +    add = gen_add3_insn(scratch, base, GEN_INT(offset)); | 
|  | +    emit_insn(add); | 
|  | + | 
|  | +    switch (mode) | 
|  | +    { | 
|  | +      case MIPS_RISCV_VECTOR_MODE_NAME(DI): | 
|  | +        move = gen_movv32di(gen_rtx_MEM(mode, scratch), src); | 
|  | +        break; | 
|  | +      case MIPS_RISCV_VECTOR_MODE_NAME(SI): | 
|  | +        move = gen_movv32si(gen_rtx_MEM(mode, scratch), src); | 
|  | +        break; | 
|  | +      case MIPS_RISCV_VECTOR_MODE_NAME(HI): | 
|  | +        move = gen_movv32hi(gen_rtx_MEM(mode, scratch), src); | 
|  | +        break; | 
|  | +      case MIPS_RISCV_VECTOR_MODE_NAME(QI): | 
|  | +        move = gen_movv32qi(gen_rtx_MEM(mode, scratch), src); | 
|  | +        break; | 
|  | +      case MIPS_RISCV_VECTOR_MODE_NAME(DF): | 
|  | +        move = gen_movv32df(gen_rtx_MEM(mode, scratch), src); | 
|  | +        break; | 
|  | +      case MIPS_RISCV_VECTOR_MODE_NAME(SF): | 
|  | +        move = gen_movv32sf(gen_rtx_MEM(mode, scratch), src); | 
|  | +        break; | 
|  | +      default: | 
|  | +        gcc_unreachable(); | 
|  | +    } | 
|  | + | 
|  | +    emit_insn(move); | 
|  | + | 
|  | +    return true; | 
|  | +  } | 
|  | + | 
|  | +  if (src_mem && !src_mem_reg) | 
|  | +  { | 
|  | +    rtx add, scratch, base, move; | 
|  | +    HOST_WIDE_INT offset; | 
|  | + | 
|  | +    mips_split_plus(XEXP(src,0), &base, &offset); | 
|  | + | 
|  | +    scratch = gen_reg_rtx(Pmode); | 
|  | +    add = gen_add3_insn(scratch, base, GEN_INT(offset)); | 
|  | +    emit_insn(add); | 
|  | + | 
|  | +    switch (mode) | 
|  | +    { | 
|  | +      case MIPS_RISCV_VECTOR_MODE_NAME(DI): | 
|  | +        move = gen_movv32di(dest, gen_rtx_MEM(mode, scratch)); | 
|  | +        break; | 
|  | +      case MIPS_RISCV_VECTOR_MODE_NAME(SI): | 
|  | +        move = gen_movv32si(dest, gen_rtx_MEM(mode, scratch)); | 
|  | +        break; | 
|  | +      case MIPS_RISCV_VECTOR_MODE_NAME(HI): | 
|  | +        move = gen_movv32hi(dest, gen_rtx_MEM(mode, scratch)); | 
|  | +        break; | 
|  | +      case MIPS_RISCV_VECTOR_MODE_NAME(QI): | 
|  | +        move = gen_movv32qi(dest, gen_rtx_MEM(mode, scratch)); | 
|  | +        break; | 
|  | +      case MIPS_RISCV_VECTOR_MODE_NAME(DF): | 
|  | +        move = gen_movv32df(dest, gen_rtx_MEM(mode, scratch)); | 
|  | +        break; | 
|  | +      case MIPS_RISCV_VECTOR_MODE_NAME(SF): | 
|  | +        move = gen_movv32sf(dest, gen_rtx_MEM(mode, scratch)); | 
|  | +        break; | 
|  | +      default: | 
|  | +        gcc_unreachable(); | 
|  | +    } | 
|  | + | 
|  | +    emit_insn(move); | 
|  | + | 
|  | +    return true; | 
|  | +  } | 
|  | + | 
|  | +  return false; | 
|  | +} | 
|  | + | 
|  | +/* The cost of loading values from the constant pool.  It should be | 
|  | +   larger than the cost of any constant we want to synthesize inline.  */ | 
|  | +#define CONSTANT_POOL_COST COSTS_N_INSNS (8) | 
|  | + | 
|  | +/* Return true if there is a non-MIPS16 instruction that implements CODE | 
|  | +   and if that instruction accepts X as an immediate operand.  */ | 
|  | + | 
|  | +static int | 
|  | +mips_immediate_operand_p (int code, HOST_WIDE_INT x) | 
|  | +{ | 
|  | +  switch (code) | 
|  | +    { | 
|  | +    case ASHIFT: | 
|  | +    case ASHIFTRT: | 
|  | +    case LSHIFTRT: | 
|  | +      /* All shift counts are truncated to a valid constant.  */ | 
|  | +      return true; | 
|  | + | 
|  | +    case AND: | 
|  | +    case IOR: | 
|  | +    case XOR: | 
|  | +    case PLUS: | 
|  | +    case LT: | 
|  | +    case LTU: | 
|  | +      /* These instructions take 12-bit signed immediates.  */ | 
|  | +      return SMALL_OPERAND (x); | 
|  | + | 
|  | +    case LE: | 
|  | +      /* We add 1 to the immediate and use SLT.  */ | 
|  | +      return SMALL_OPERAND (x + 1); | 
|  | + | 
|  | +    case LEU: | 
|  | +      /* Likewise SLTU, but reject the always-true case.  */ | 
|  | +      return SMALL_OPERAND (x + 1) && x + 1 != 0; | 
|  | + | 
|  | +    case GE: | 
|  | +    case GEU: | 
|  | +      /* We can emulate an immediate of 1 by using GT/GTU against x0. */ | 
|  | +      return x == 1; | 
|  | + | 
|  | +    default: | 
|  | +      /* By default assume that x0 can be used for 0.  */ | 
|  | +      return x == 0; | 
|  | +    } | 
|  | +} | 
|  | + | 
|  | +/* Return the cost of binary operation X, given that the instruction | 
|  | +   sequence for a word-sized or smaller operation has cost SINGLE_COST | 
|  | +   and that the sequence of a double-word operation has cost DOUBLE_COST. | 
|  | +   If SPEED is true, optimize for speed otherwise optimize for size.  */ | 
|  | + | 
|  | +static int | 
|  | +mips_binary_cost (rtx x, int single_cost, int double_cost, bool speed) | 
|  | +{ | 
|  | +  if (GET_MODE_SIZE (GET_MODE (x)) == UNITS_PER_WORD * 2) | 
|  | +    single_cost = double_cost; | 
|  | + | 
|  | +  return (single_cost | 
|  | +	  + rtx_cost (XEXP (x, 0), SET, speed) | 
|  | +	  + rtx_cost (XEXP (x, 1), GET_CODE (x), speed)); | 
|  | +} | 
|  | + | 
|  | +/* Return the cost of floating-point multiplications of mode MODE.  */ | 
|  | + | 
|  | +static int | 
|  | +mips_fp_mult_cost (enum machine_mode mode) | 
|  | +{ | 
|  | +  return mode == DFmode ? mips_cost->fp_mult_df : mips_cost->fp_mult_sf; | 
|  | +} | 
|  | + | 
|  | +/* Return the cost of floating-point divisions of mode MODE.  */ | 
|  | + | 
|  | +static int | 
|  | +mips_fp_div_cost (enum machine_mode mode) | 
|  | +{ | 
|  | +  return mode == DFmode ? mips_cost->fp_div_df : mips_cost->fp_div_sf; | 
|  | +} | 
|  | + | 
|  | +/* Return the cost of sign-extending OP to mode MODE, not including the | 
|  | +   cost of OP itself.  */ | 
|  | + | 
|  | +static int | 
|  | +mips_sign_extend_cost (enum machine_mode mode, rtx op) | 
|  | +{ | 
|  | +  if (MEM_P (op)) | 
|  | +    /* Extended loads are as cheap as unextended ones.  */ | 
|  | +    return 0; | 
|  | + | 
|  | +  if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode) | 
|  | +    /* A sign extension from SImode to DImode in 64-bit mode is free.  */ | 
|  | +    return 0; | 
|  | + | 
|  | +  /* We need to use a shift left and a shift right.  */ | 
|  | +  return COSTS_N_INSNS (2); | 
|  | +} | 
|  | + | 
|  | +/* Return the cost of zero-extending OP to mode MODE, not including the | 
|  | +   cost of OP itself.  */ | 
|  | + | 
|  | +static int | 
|  | +mips_zero_extend_cost (enum machine_mode mode, rtx op) | 
|  | +{ | 
|  | +  if (MEM_P (op)) | 
|  | +    /* Extended loads are as cheap as unextended ones.  */ | 
|  | +    return 0; | 
|  | + | 
|  | +  if ((TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode) || | 
|  | +      ((mode == DImode || mode == SImode) && GET_MODE (op) == HImode)) | 
|  | +    /* We need a shift left by 32 bits and a shift right by 32 bits.  */ | 
|  | +    return COSTS_N_INSNS (2); | 
|  | + | 
|  | +  /* We can use ANDI.  */ | 
|  | +  return COSTS_N_INSNS (1); | 
|  | +} | 
|  | + | 
|  | +/* Implement TARGET_RTX_COSTS.  */ | 
|  | + | 
|  | +static bool | 
|  | +mips_rtx_costs (rtx x, int code, int outer_code, int *total, bool speed) | 
|  | +{ | 
|  | +  enum machine_mode mode = GET_MODE (x); | 
|  | +  bool float_mode_p = FLOAT_MODE_P (mode); | 
|  | +  int cost; | 
|  | +  rtx addr; | 
|  | + | 
|  | +  /* The cost of a COMPARE is hard to define for MIPS.  COMPAREs don't | 
|  | +     appear in the instruction stream, and the cost of a comparison is | 
|  | +     really the cost of the branch or scc condition.  At the time of | 
|  | +     writing, GCC only uses an explicit outer COMPARE code when optabs | 
|  | +     is testing whether a constant is expensive enough to force into a | 
|  | +     register.  We want optabs to pass such constants through the MIPS | 
|  | +     expanders instead, so make all constants very cheap here.  */ | 
|  | +  if (outer_code == COMPARE) | 
|  | +    { | 
|  | +      gcc_assert (CONSTANT_P (x)); | 
|  | +      *total = 0; | 
|  | +      return true; | 
|  | +    } | 
|  | + | 
|  | +  switch (code) | 
|  | +    { | 
|  | +    case CONST_INT: | 
|  | +      /* Treat *clear_upper32-style ANDs as having zero cost in the | 
|  | +	 second operand.  The cost is entirely in the first operand. | 
|  | + | 
|  | +	 ??? This is needed because we would otherwise try to CSE | 
|  | +	 the constant operand.  Although that's the right thing for | 
|  | +	 instructions that continue to be a register operation throughout | 
|  | +	 compilation, it is disastrous for instructions that could | 
|  | +	 later be converted into a memory operation.  */ | 
|  | +      if (TARGET_64BIT | 
|  | +	  && outer_code == AND | 
|  | +	  && UINTVAL (x) == 0xffffffff) | 
|  | +	{ | 
|  | +	  *total = 0; | 
|  | +	  return true; | 
|  | +	} | 
|  | + | 
|  | +      /* When not optimizing for size, we care more about the cost | 
|  | +         of hot code, and hot code is often in a loop.  If a constant | 
|  | +         operand needs to be forced into a register, we will often be | 
|  | +         able to hoist the constant load out of the loop, so the load | 
|  | +         should not contribute to the cost.  */ | 
|  | +      if (speed || mips_immediate_operand_p (outer_code, INTVAL (x))) | 
|  | +        { | 
|  | +          *total = 0; | 
|  | +          return true; | 
|  | +        } | 
|  | +      /* Fall through.  */ | 
|  | + | 
|  | +    case CONST: | 
|  | +    case SYMBOL_REF: | 
|  | +    case LABEL_REF: | 
|  | +    case CONST_DOUBLE: | 
|  | +      cost = mips_const_insns (x); | 
|  | +      if (cost > 0) | 
|  | +	{ | 
|  | +	  /* If the constant is likely to be stored in a GPR, SETs of | 
|  | +	     single-insn constants are as cheap as register sets; we | 
|  | +	     never want to CSE them. | 
|  | + | 
|  | +	     Don't reduce the cost of storing a floating-point zero in | 
|  | +	     FPRs.  If we have a zero in an FPR for other reasons, we | 
|  | +	     can get better cfg-cleanup and delayed-branch results by | 
|  | +	     using it consistently, rather than using $0 sometimes and | 
|  | +	     an FPR at other times.  Also, moves between floating-point | 
|  | +	     registers are sometimes cheaper than (D)MTC1 $0.  */ | 
|  | +	  if (cost == 1 | 
|  | +	      && outer_code == SET | 
|  | +	      && !(float_mode_p && TARGET_HARD_FLOAT)) | 
|  | +	    cost = 0; | 
|  | +	  /* When non-MIPS16 code loads a constant N>1 times, we rarely | 
|  | +	     want to CSE the constant itself.  It is usually better to | 
|  | +	     have N copies of the last operation in the sequence and one | 
|  | +	     shared copy of the other operations.  (Note that this is | 
|  | +	     not true for MIPS16 code, where the final operation in the | 
|  | +	     sequence is often an extended instruction.) | 
|  | + | 
|  | +	     Also, if we have a CONST_INT, we don't know whether it is | 
|  | +	     for a word or doubleword operation, so we cannot rely on | 
|  | +	     the result of riscv_build_integer.  */ | 
|  | +	  else if (outer_code == SET || mode == VOIDmode) | 
|  | +	    cost = 1; | 
|  | +	  *total = COSTS_N_INSNS (cost); | 
|  | +	  return true; | 
|  | +	} | 
|  | +      /* The value will need to be fetched from the constant pool.  */ | 
|  | +      *total = CONSTANT_POOL_COST; | 
|  | +      return true; | 
|  | + | 
|  | +    case MEM: | 
|  | +      /* If the address is legitimate, return the number of | 
|  | +	 instructions it needs.  */ | 
|  | +      addr = XEXP (x, 0); | 
|  | +      cost = mips_address_insns (addr, mode, true); | 
|  | +      if (cost > 0) | 
|  | +	{ | 
|  | +	  *total = COSTS_N_INSNS (cost + (speed ? mips_cost->memory_latency : 1)); | 
|  | +	  return true; | 
|  | +	} | 
|  | +      /* Otherwise use the default handling.  */ | 
|  | +      return false; | 
|  | + | 
|  | +    case FFS: | 
|  | +      *total = COSTS_N_INSNS (6); | 
|  | +      return false; | 
|  | + | 
|  | +    case NOT: | 
|  | +      *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1); | 
|  | +      return false; | 
|  | + | 
|  | +    case AND: | 
|  | +      /* Check for a *clear_upper32 pattern and treat it like a zero | 
|  | +	 extension.  See the pattern's comment for details.  */ | 
|  | +      if (TARGET_64BIT | 
|  | +	  && mode == DImode | 
|  | +	  && CONST_INT_P (XEXP (x, 1)) | 
|  | +	  && UINTVAL (XEXP (x, 1)) == 0xffffffff) | 
|  | +	{ | 
|  | +	  *total = (mips_zero_extend_cost (mode, XEXP (x, 0)) | 
|  | +		    + rtx_cost (XEXP (x, 0), SET, speed)); | 
|  | +	  return true; | 
|  | +	} | 
|  | +      /* Fall through.  */ | 
|  | + | 
|  | +    case IOR: | 
|  | +    case XOR: | 
|  | +      /* Double-word operations use two single-word operations.  */ | 
|  | +      *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (2), | 
|  | +				 speed); | 
|  | +      return true; | 
|  | + | 
|  | +    case ASHIFT: | 
|  | +    case ASHIFTRT: | 
|  | +    case LSHIFTRT: | 
|  | +    case ROTATE: | 
|  | +    case ROTATERT: | 
|  | +      if (CONSTANT_P (XEXP (x, 1))) | 
|  | +	*total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4), | 
|  | +				   speed); | 
|  | +      else | 
|  | +	*total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (12), | 
|  | +				   speed); | 
|  | +      return true; | 
|  | + | 
|  | +    case ABS: | 
|  | +      if (float_mode_p) | 
|  | +        *total = mips_cost->fp_add; | 
|  | +      else | 
|  | +        *total = COSTS_N_INSNS (4); | 
|  | +      return false; | 
|  | + | 
|  | +    case LO_SUM: | 
|  | +      /* Low-part immediates need an extended MIPS16 instruction.  */ | 
|  | +      *total = (COSTS_N_INSNS (1) | 
|  | +		+ rtx_cost (XEXP (x, 0), SET, speed)); | 
|  | +      return true; | 
|  | + | 
|  | +    case LT: | 
|  | +    case LTU: | 
|  | +    case LE: | 
|  | +    case LEU: | 
|  | +    case GT: | 
|  | +    case GTU: | 
|  | +    case GE: | 
|  | +    case GEU: | 
|  | +    case EQ: | 
|  | +    case NE: | 
|  | +    case UNORDERED: | 
|  | +    case LTGT: | 
|  | +      /* Branch comparisons have VOIDmode, so use the first operand's | 
|  | +	 mode instead.  */ | 
|  | +      mode = GET_MODE (XEXP (x, 0)); | 
|  | +      if (FLOAT_MODE_P (mode)) | 
|  | +	{ | 
|  | +	  *total = mips_cost->fp_add; | 
|  | +	  return false; | 
|  | +	} | 
|  | +      *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4), | 
|  | +				 speed); | 
|  | +      return true; | 
|  | + | 
|  | +    case MINUS: | 
|  | +      if (float_mode_p | 
|  | +	  && !HONOR_NANS (mode) | 
|  | +	  && !HONOR_SIGNED_ZEROS (mode)) | 
|  | +	{ | 
|  | +	  /* See if we can use NMADD or NMSUB.  See mips.md for the | 
|  | +	     associated patterns.  */ | 
|  | +	  rtx op0 = XEXP (x, 0); | 
|  | +	  rtx op1 = XEXP (x, 1); | 
|  | +	  if (GET_CODE (op0) == MULT && GET_CODE (XEXP (op0, 0)) == NEG) | 
|  | +	    { | 
|  | +	      *total = (mips_fp_mult_cost (mode) | 
|  | +			+ rtx_cost (XEXP (XEXP (op0, 0), 0), SET, speed) | 
|  | +			+ rtx_cost (XEXP (op0, 1), SET, speed) | 
|  | +			+ rtx_cost (op1, SET, speed)); | 
|  | +	      return true; | 
|  | +	    } | 
|  | +	  if (GET_CODE (op1) == MULT) | 
|  | +	    { | 
|  | +	      *total = (mips_fp_mult_cost (mode) | 
|  | +			+ rtx_cost (op0, SET, speed) | 
|  | +			+ rtx_cost (XEXP (op1, 0), SET, speed) | 
|  | +			+ rtx_cost (XEXP (op1, 1), SET, speed)); | 
|  | +	      return true; | 
|  | +	    } | 
|  | +	} | 
|  | +      /* Fall through.  */ | 
|  | + | 
|  | +    case PLUS: | 
|  | +      if (float_mode_p) | 
|  | +	{ | 
|  | +	  /* If this is part of a MADD or MSUB, treat the PLUS as | 
|  | +	     being free.  */ | 
|  | +	  if (GET_CODE (XEXP (x, 0)) == MULT) | 
|  | +	    *total = 0; | 
|  | +	  else | 
|  | +	    *total = mips_cost->fp_add; | 
|  | +	  return false; | 
|  | +	} | 
|  | + | 
|  | +      /* Double-word operations require three single-word operations and | 
|  | +	 an SLTU.  The MIPS16 version then needs to move the result of | 
|  | +	 the SLTU from $24 to a MIPS16 register.  */ | 
|  | +      *total = mips_binary_cost (x, COSTS_N_INSNS (1), | 
|  | +				 COSTS_N_INSNS (4), | 
|  | +				 speed); | 
|  | +      return true; | 
|  | + | 
|  | +    case NEG: | 
|  | +      if (float_mode_p | 
|  | +	  && !HONOR_NANS (mode) | 
|  | +	  && HONOR_SIGNED_ZEROS (mode)) | 
|  | +	{ | 
|  | +	  /* See if we can use NMADD or NMSUB.  See mips.md for the | 
|  | +	     associated patterns.  */ | 
|  | +	  rtx op = XEXP (x, 0); | 
|  | +	  if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS) | 
|  | +	      && GET_CODE (XEXP (op, 0)) == MULT) | 
|  | +	    { | 
|  | +	      *total = (mips_fp_mult_cost (mode) | 
|  | +			+ rtx_cost (XEXP (XEXP (op, 0), 0), SET, speed) | 
|  | +			+ rtx_cost (XEXP (XEXP (op, 0), 1), SET, speed) | 
|  | +			+ rtx_cost (XEXP (op, 1), SET, speed)); | 
|  | +	      return true; | 
|  | +	    } | 
|  | +	} | 
|  | + | 
|  | +      if (float_mode_p) | 
|  | +	*total = mips_cost->fp_add; | 
|  | +      else | 
|  | +	*total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 4 : 1); | 
|  | +      return false; | 
|  | + | 
|  | +    case MULT: | 
|  | +      if (float_mode_p) | 
|  | +	*total = mips_fp_mult_cost (mode); | 
|  | +      else if (mode == DImode && !TARGET_64BIT) | 
|  | +	/* We use a MUL and a MULH[[S]U]. */ | 
|  | +	*total = mips_cost->int_mult_si * 2; | 
|  | +      else if (!speed) | 
|  | +	*total = 1; | 
|  | +      else if (mode == DImode) | 
|  | +	*total = mips_cost->int_mult_di; | 
|  | +      else | 
|  | +	*total = mips_cost->int_mult_si; | 
|  | +      return false; | 
|  | + | 
|  | +    case DIV: | 
|  | +      /* Check for a reciprocal.  */ | 
|  | +      if (float_mode_p | 
|  | +	  && flag_unsafe_math_optimizations | 
|  | +	  && XEXP (x, 0) == CONST1_RTX (mode)) | 
|  | +	{ | 
|  | +	  if (outer_code == SQRT || GET_CODE (XEXP (x, 1)) == SQRT) | 
|  | +	    /* An rsqrt<mode>a or rsqrt<mode>b pattern.  Count the | 
|  | +	       division as being free.  */ | 
|  | +	    *total = rtx_cost (XEXP (x, 1), SET, speed); | 
|  | +	  else | 
|  | +	    *total = (mips_fp_div_cost (mode) | 
|  | +		      + rtx_cost (XEXP (x, 1), SET, speed)); | 
|  | +	  return true; | 
|  | +	} | 
|  | +      /* Fall through.  */ | 
|  | + | 
|  | +    case SQRT: | 
|  | +    case MOD: | 
|  | +      if (float_mode_p) | 
|  | +	{ | 
|  | +	  *total = mips_fp_div_cost (mode); | 
|  | +	  return false; | 
|  | +	} | 
|  | +      /* Fall through.  */ | 
|  | + | 
|  | +    case UDIV: | 
|  | +    case UMOD: | 
|  | +      if (!speed) | 
|  | +	*total = 1; | 
|  | +      else if (mode == DImode) | 
|  | +        *total = mips_cost->int_div_di; | 
|  | +      else | 
|  | +	*total = mips_cost->int_div_si; | 
|  | +      return false; | 
|  | + | 
|  | +    case SIGN_EXTEND: | 
|  | +      *total = mips_sign_extend_cost (mode, XEXP (x, 0)); | 
|  | +      return false; | 
|  | + | 
|  | +    case ZERO_EXTEND: | 
|  | +      *total = mips_zero_extend_cost (mode, XEXP (x, 0)); | 
|  | +      return false; | 
|  | + | 
|  | +    case FLOAT: | 
|  | +    case UNSIGNED_FLOAT: | 
|  | +    case FIX: | 
|  | +    case FLOAT_EXTEND: | 
|  | +    case FLOAT_TRUNCATE: | 
|  | +      *total = mips_cost->fp_add; | 
|  | +      return false; | 
|  | + | 
|  | +    default: | 
|  | +      return false; | 
|  | +    } | 
|  | +} | 
|  | + | 
|  | +/* Implement TARGET_ADDRESS_COST.  */ | 
|  | + | 
|  | +static int | 
|  | +mips_address_cost (rtx addr, bool speed ATTRIBUTE_UNUSED) | 
|  | +{ | 
|  | +  return mips_address_insns (addr, SImode, false); | 
|  | +} | 
|  | + | 
|  | +/* Return one word of double-word value OP, taking into account the fixed | 
|  | +   endianness of certain registers.  HIGH_P is true to select the high part, | 
|  | +   false to select the low part.  */ | 
|  | + | 
|  | +rtx | 
|  | +mips_subword (rtx op, bool high_p) | 
|  | +{ | 
|  | +  unsigned int byte, offset; | 
|  | +  enum machine_mode mode; | 
|  | + | 
|  | +  mode = GET_MODE (op); | 
|  | +  if (mode == VOIDmode) | 
|  | +    mode = TARGET_64BIT ? TImode : DImode; | 
|  | + | 
|  | +  if (TARGET_BIG_ENDIAN ? !high_p : high_p) | 
|  | +    byte = UNITS_PER_WORD; | 
|  | +  else | 
|  | +    byte = 0; | 
|  | + | 
|  | +  if (FP_REG_RTX_P (op)) | 
|  | +    { | 
|  | +      /* Paired FPRs are always ordered little-endian.  */ | 
|  | +      offset = (UNITS_PER_WORD < UNITS_PER_HWFPVALUE ? high_p : byte != 0); | 
|  | +      return gen_rtx_REG (word_mode, REGNO (op) + offset); | 
|  | +    } | 
|  | + | 
|  | +  if (MEM_P (op)) | 
|  | +    return adjust_address (op, word_mode, byte); | 
|  | + | 
|  | +  return simplify_gen_subreg (word_mode, op, mode, byte); | 
|  | +} | 
|  | + | 
|  | +/* Return true if a 64-bit move from SRC to DEST should be split into two.  */ | 
|  | + | 
|  | +bool | 
|  | +mips_split_64bit_move_p (rtx dest, rtx src) | 
|  | +{ | 
|  | +  /* All 64b moves are legal in 64b mode.  All 64b FPR <-> FPR and | 
|  | +     FPR <-> MEM moves are legal in 32b mode, too.  Although | 
|  | +     FPR <-> GPR moves are not available in general in 32b mode, | 
|  | +     we can at least load 0 into an FPR with fcvt.d.w fpr, x0. */ | 
|  | +  return !(TARGET_64BIT | 
|  | +	   || (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest)) | 
|  | +	   || (FP_REG_RTX_P (dest) && MEM_P (src)) | 
|  | +	   || (FP_REG_RTX_P (src) && MEM_P (dest)) | 
|  | +	   || (FP_REG_RTX_P(dest) && src == CONST0_RTX(GET_MODE(src)))); | 
|  | +} | 
|  | + | 
|  | +/* Split a doubleword move from SRC to DEST.  On 32-bit targets, | 
|  | +   this function handles 64-bit moves for which mips_split_64bit_move_p | 
|  | +   holds.  For 64-bit targets, this function handles 128-bit moves.  */ | 
|  | + | 
|  | +void | 
|  | +mips_split_doubleword_move (rtx dest, rtx src) | 
|  | +{ | 
|  | +  rtx low_dest; | 
|  | + | 
|  | +   /* The operation can be split into two normal moves.  Decide in | 
|  | +      which order to do them.  */ | 
|  | +   low_dest = mips_subword (dest, false); | 
|  | +   if (REG_P (low_dest) && reg_overlap_mentioned_p (low_dest, src)) | 
|  | +     { | 
|  | +       mips_emit_move (mips_subword (dest, true), mips_subword (src, true)); | 
|  | +       mips_emit_move (low_dest, mips_subword (src, false)); | 
|  | +     } | 
|  | +   else | 
|  | +     { | 
|  | +       mips_emit_move (low_dest, mips_subword (src, false)); | 
|  | +       mips_emit_move (mips_subword (dest, true), mips_subword (src, true)); | 
|  | +     } | 
|  | +} | 
|  | + | 
|  | +/* Return the appropriate instructions to move SRC into DEST.  Assume | 
|  | +   that SRC is operand 1 and DEST is operand 0.  */ | 
|  | + | 
|  | +const char * | 
|  | +mips_output_move (rtx dest, rtx src) | 
|  | +{ | 
|  | +  enum rtx_code dest_code, src_code; | 
|  | +  enum machine_mode mode; | 
|  | +  enum mips_symbol_type symbol_type; | 
|  | +  bool dbl_p; | 
|  | + | 
|  | +  dest_code = GET_CODE (dest); | 
|  | +  src_code = GET_CODE (src); | 
|  | +  mode = GET_MODE (dest); | 
|  | +  dbl_p = (GET_MODE_SIZE (mode) == 8); | 
|  | + | 
|  | +  if (dbl_p && mips_split_64bit_move_p (dest, src)) | 
|  | +    return "#"; | 
|  | + | 
|  | +  if ((src_code == REG && GP_REG_P (REGNO (src))) | 
|  | +      || (src == CONST0_RTX (mode))) | 
|  | +    { | 
|  | +      if (dest_code == REG) | 
|  | +	{ | 
|  | +	  if (GP_REG_P (REGNO (dest))) | 
|  | +	    return "move\t%0,%z1"; | 
|  | + | 
|  | +	  if (FP_REG_P (REGNO (dest))) | 
|  | +	    { | 
|  | +	      if (!dbl_p) | 
|  | +		return "mxtf.s\t%0,%z1"; | 
|  | +	      if (TARGET_64BIT) | 
|  | +		return "mxtf.d\t%0,%z1"; | 
|  | +	      /* in RV32, we can emulate mxtf.d %0, x0 using fcvt.d.w */ | 
|  | +	      gcc_assert (src == CONST0_RTX (mode)); | 
|  | +	      return "fcvt.d.w\t%0,x0"; | 
|  | +	    } | 
|  | +	} | 
|  | +      if (dest_code == MEM) | 
|  | +	switch (GET_MODE_SIZE (mode)) | 
|  | +	  { | 
|  | +	  case 1: return "sb\t%z1,%0"; | 
|  | +	  case 2: return "sh\t%z1,%0"; | 
|  | +	  case 4: return "sw\t%z1,%0"; | 
|  | +	  case 8: return "sd\t%z1,%0"; | 
|  | +	  } | 
|  | +    } | 
|  | +  if (dest_code == REG && GP_REG_P (REGNO (dest))) | 
|  | +    { | 
|  | +      if (src_code == REG) | 
|  | +	{ | 
|  | +	  if (FP_REG_P (REGNO (src))) | 
|  | +	    return dbl_p ? "mftx.d\t%0,%1" : "mftx.s\t%0,%1"; | 
|  | +	} | 
|  | + | 
|  | +      if (src_code == MEM) | 
|  | +	switch (GET_MODE_SIZE (mode)) | 
|  | +	  { | 
|  | +	  case 1: return "lbu\t%0,%1"; | 
|  | +	  case 2: return "lhu\t%0,%1"; | 
|  | +	  case 4: return "lw\t%0,%1"; | 
|  | +	  case 8: return "ld\t%0,%1"; | 
|  | +	  } | 
|  | + | 
|  | +      if (src_code == CONST_INT) | 
|  | +	return "li\t%0,%1\t\t\t# %X1"; | 
|  | + | 
|  | +      if (src_code == HIGH) | 
|  | +	return "lui\t%0,%h1"; | 
|  | + | 
|  | +      if (mips_symbolic_constant_p (src, &symbol_type) | 
|  | +	  && mips_lo_relocs[symbol_type] != 0) | 
|  | +	{ | 
|  | +	  /* A signed 16-bit constant formed by applying a relocation | 
|  | +	     operator to a symbolic address.  */ | 
|  | +	  gcc_assert (!mips_split_p[symbol_type]); | 
|  | +	  return "li\t%0,%R1"; | 
|  | +	} | 
|  | + | 
|  | +      if (symbolic_operand (src, VOIDmode)) | 
|  | +	{ | 
|  | +	  gcc_assert (flag_pic); | 
|  | +	  return "la\t%0,%1"; | 
|  | +	} | 
|  | +    } | 
|  | +  if (src_code == REG && FP_REG_P (REGNO (src))) | 
|  | +    { | 
|  | +      if (dest_code == REG && FP_REG_P (REGNO (dest))) | 
|  | +	return dbl_p ? "fsgnj.d\t%0,%1,%1" : "fsgnj.s\t%0,%1,%1"; | 
|  | + | 
|  | +      if (dest_code == MEM) | 
|  | +	return dbl_p ? "fsd\t%1,%0" : "fsw\t%1,%0"; | 
|  | +    } | 
|  | +  if (dest_code == REG && FP_REG_P (REGNO (dest))) | 
|  | +    { | 
|  | +      if (src_code == MEM) | 
|  | +	return dbl_p ? "fld\t%0,%1" : "flw\t%0,%1"; | 
|  | +    } | 
|  | +  gcc_unreachable (); | 
|  | +} | 
|  | + | 
|  | +/* Return true if CMP1 is a suitable second operand for integer ordering | 
|  | +   test CODE.  See also the *sCC patterns in mips.md.  */ | 
|  | + | 
|  | +static bool | 
|  | +mips_int_order_operand_ok_p (enum rtx_code code, rtx cmp1) | 
|  | +{ | 
|  | +  switch (code) | 
|  | +    { | 
|  | +    case GT: | 
|  | +    case GTU: | 
|  | +      return reg_or_0_operand (cmp1, VOIDmode); | 
|  | + | 
|  | +    case GE: | 
|  | +    case GEU: | 
|  | +      return cmp1 == const1_rtx; | 
|  | + | 
|  | +    case LT: | 
|  | +    case LTU: | 
|  | +      return arith_operand (cmp1, VOIDmode); | 
|  | + | 
|  | +    case LE: | 
|  | +      return sle_operand (cmp1, VOIDmode); | 
|  | + | 
|  | +    case LEU: | 
|  | +      return sleu_operand (cmp1, VOIDmode); | 
|  | + | 
|  | +    default: | 
|  | +      gcc_unreachable (); | 
|  | +    } | 
|  | +} | 
|  | + | 
|  | +/* Return true if *CMP1 (of mode MODE) is a valid second operand for | 
|  | +   integer ordering test *CODE, or if an equivalent combination can | 
|  | +   be formed by adjusting *CODE and *CMP1.  When returning true, update | 
|  | +   *CODE and *CMP1 with the chosen code and operand, otherwise leave | 
|  | +   them alone.  */ | 
|  | + | 
|  | +static bool | 
|  | +mips_canonicalize_int_order_test (enum rtx_code *code, rtx *cmp1, | 
|  | +				  enum machine_mode mode) | 
|  | +{ | 
|  | +  HOST_WIDE_INT plus_one; | 
|  | + | 
|  | +  if (mips_int_order_operand_ok_p (*code, *cmp1)) | 
|  | +    return true; | 
|  | + | 
|  | +  if (CONST_INT_P (*cmp1)) | 
|  | +    switch (*code) | 
|  | +      { | 
|  | +      case LE: | 
|  | +	plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode); | 
|  | +	if (INTVAL (*cmp1) < plus_one) | 
|  | +	  { | 
|  | +	    *code = LT; | 
|  | +	    *cmp1 = force_reg (mode, GEN_INT (plus_one)); | 
|  | +	    return true; | 
|  | +	  } | 
|  | +	break; | 
|  | + | 
|  | +      case LEU: | 
|  | +	plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode); | 
|  | +	if (plus_one != 0) | 
|  | +	  { | 
|  | +	    *code = LTU; | 
|  | +	    *cmp1 = force_reg (mode, GEN_INT (plus_one)); | 
|  | +	    return true; | 
|  | +	  } | 
|  | +	break; | 
|  | + | 
|  | +      default: | 
|  | +	break; | 
|  | +      } | 
|  | +  return false; | 
|  | +} | 
|  | + | 
|  | +/* Compare CMP0 and CMP1 using ordering test CODE and store the result | 
|  | +   in TARGET.  CMP0 and TARGET are register_operands.  If INVERT_PTR | 
|  | +   is nonnull, it's OK to set TARGET to the inverse of the result and | 
|  | +   flip *INVERT_PTR instead.  */ | 
|  | + | 
|  | +static void | 
|  | +mips_emit_int_order_test (enum rtx_code code, bool *invert_ptr, | 
|  | +			  rtx target, rtx cmp0, rtx cmp1) | 
|  | +{ | 
|  | +  enum machine_mode mode; | 
|  | + | 
|  | +  /* First see if there is a MIPS instruction that can do this operation. | 
|  | +     If not, try doing the same for the inverse operation.  If that also | 
|  | +     fails, force CMP1 into a register and try again.  */ | 
|  | +  mode = GET_MODE (cmp0); | 
|  | +  if (mips_canonicalize_int_order_test (&code, &cmp1, mode)) | 
|  | +    mips_emit_binary (code, target, cmp0, cmp1); | 
|  | +  else | 
|  | +    { | 
|  | +      enum rtx_code inv_code = reverse_condition (code); | 
|  | +      if (!mips_canonicalize_int_order_test (&inv_code, &cmp1, mode)) | 
|  | +	{ | 
|  | +	  cmp1 = force_reg (mode, cmp1); | 
|  | +	  mips_emit_int_order_test (code, invert_ptr, target, cmp0, cmp1); | 
|  | +	} | 
|  | +      else if (invert_ptr == 0) | 
|  | +	{ | 
|  | +	  rtx inv_target; | 
|  | + | 
|  | +	  inv_target = mips_force_binary (GET_MODE (target), | 
|  | +					  inv_code, cmp0, cmp1); | 
|  | +	  mips_emit_binary (XOR, target, inv_target, const1_rtx); | 
|  | +	} | 
|  | +      else | 
|  | +	{ | 
|  | +	  *invert_ptr = !*invert_ptr; | 
|  | +	  mips_emit_binary (inv_code, target, cmp0, cmp1); | 
|  | +	} | 
|  | +    } | 
|  | +} | 
|  | + | 
|  | +/* Return a register that is zero iff CMP0 and CMP1 are equal. | 
|  | +   The register will have the same mode as CMP0.  */ | 
|  | + | 
|  | +static rtx | 
|  | +mips_zero_if_equal (rtx cmp0, rtx cmp1) | 
|  | +{ | 
|  | +  if (cmp1 == const0_rtx) | 
|  | +    return cmp0; | 
|  | + | 
|  | +  return expand_binop (GET_MODE (cmp0), sub_optab, | 
|  | +		       cmp0, cmp1, 0, 0, OPTAB_DIRECT); | 
|  | +} | 
|  | + | 
|  | +/* Convert *CODE into a code that can be used in a floating-point | 
|  | +   scc instruction (C.cond.fmt).  Return true if the values of | 
|  | +   the condition code registers will be inverted, with 0 indicating | 
|  | +   that the condition holds.  */ | 
|  | + | 
|  | +static bool | 
|  | +mips_reversed_fp_cond (enum rtx_code *code) | 
|  | +{ | 
|  | +  switch (*code) | 
|  | +    { | 
|  | +    case NE: | 
|  | +    case LTGT: | 
|  | +    case ORDERED: | 
|  | +      *code = reverse_condition_maybe_unordered (*code); | 
|  | +      return true; | 
|  | + | 
|  | +    default: | 
|  | +      return false; | 
|  | +    } | 
|  | +} | 
|  | + | 
|  | +/* Convert a comparison into something that can be used in a branch or | 
|  | +   conditional move.  On entry, *OP0 and *OP1 are the values being | 
|  | +   compared and *CODE is the code used to compare them. | 
|  | + | 
|  | +   Update *CODE, *OP0 and *OP1 so that they describe the final comparison. | 
|  | +   If NEED_EQ_NE_P, then only EQ or NE comparisons against zero are possible, | 
|  | +   otherwise any standard branch condition can be used.  The standard branch | 
|  | +   conditions are: | 
|  | + | 
|  | +      - EQ or NE between two registers. | 
|  | +      - any comparison between a register and zero.  */ | 
|  | + | 
|  | +static void | 
|  | +mips_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1, bool need_eq_ne_p) | 
|  | +{ | 
|  | +  rtx cmp_op0 = *op0; | 
|  | +  rtx cmp_op1 = *op1; | 
|  | + | 
|  | +  if (GET_MODE_CLASS (GET_MODE (*op0)) == MODE_INT) | 
|  | +    { | 
|  | +      if (!need_eq_ne_p && *op1 == const0_rtx) | 
|  | +	; | 
|  | +      else if (*code == EQ || *code == NE) | 
|  | +	{ | 
|  | +	  if (need_eq_ne_p) | 
|  | +	    { | 
|  | +	      *op0 = mips_zero_if_equal (cmp_op0, cmp_op1); | 
|  | +	      *op1 = const0_rtx; | 
|  | +	    } | 
|  | +	  else | 
|  | +	    *op1 = force_reg (GET_MODE (cmp_op0), cmp_op1); | 
|  | +	} | 
|  | +      else | 
|  | +	{ | 
|  | +	  /* The comparison needs a separate scc instruction.  Store the | 
|  | +	     result of the scc in *OP0 and compare it against zero.  */ | 
|  | +	  bool invert = false; | 
|  | +	  *op0 = gen_reg_rtx (GET_MODE (cmp_op0)); | 
|  | +	  mips_emit_int_order_test (*code, &invert, *op0, cmp_op0, cmp_op1); | 
|  | +	  *code = (invert ? EQ : NE); | 
|  | +	  *op1 = const0_rtx; | 
|  | +	} | 
|  | +    } | 
|  | +  else | 
|  | +    { | 
|  | +      enum rtx_code cmp_code; | 
|  | + | 
|  | +      /* Floating-point tests use a separate C.cond.fmt comparison to | 
|  | +	 set a condition code register.  The branch or conditional move | 
|  | +	 will then compare that register against zero. | 
|  | + | 
|  | +	 Set CMP_CODE to the code of the comparison instruction and | 
|  | +	 *CODE to the code that the branch or move should use.  */ | 
|  | +      cmp_code = *code; | 
|  | +      *code = mips_reversed_fp_cond (&cmp_code) ? EQ : NE; | 
|  | +      *op0 = gen_reg_rtx (SImode); | 
|  | +      *op1 = const0_rtx; | 
|  | +      mips_emit_binary (cmp_code, *op0, cmp_op0, cmp_op1); | 
|  | +    } | 
|  | +} | 
|  | + | 
|  | +/* Try performing the comparison in OPERANDS[1], whose arms are OPERANDS[2] | 
|  | +   and OPERAND[3].  Store the result in OPERANDS[0]. | 
|  | + | 
|  | +   On 64-bit targets, the mode of the comparison and target will always be | 
|  | +   SImode, thus possibly narrower than that of the comparison's operands.  */ | 
|  | + | 
|  | +void | 
|  | +mips_expand_scc (rtx operands[]) | 
|  | +{ | 
|  | +  rtx target = operands[0]; | 
|  | +  enum rtx_code code = GET_CODE (operands[1]); | 
|  | +  rtx op0 = operands[2]; | 
|  | +  rtx op1 = operands[3]; | 
|  | + | 
|  | +  gcc_assert (GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT); | 
|  | + | 
|  | +  if (code == EQ || code == NE) | 
|  | +    { | 
|  | +      rtx zie = mips_zero_if_equal (op0, op1); | 
|  | +      mips_emit_binary (code, target, zie, const0_rtx); | 
|  | +    } | 
|  | +  else | 
|  | +    mips_emit_int_order_test (code, 0, target, op0, op1); | 
|  | +} | 
|  | + | 
|  | +/* Compare OPERANDS[1] with OPERANDS[2] using comparison code | 
|  | +   CODE and jump to OPERANDS[3] if the condition holds.  */ | 
|  | + | 
|  | +void | 
|  | +mips_expand_conditional_branch (rtx *operands) | 
|  | +{ | 
|  | +  enum rtx_code code = GET_CODE (operands[0]); | 
|  | +  rtx op0 = operands[1]; | 
|  | +  rtx op1 = operands[2]; | 
|  | +  rtx condition; | 
|  | + | 
|  | +  mips_emit_compare (&code, &op0, &op1, false); | 
|  | +  condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1); | 
|  | +  emit_jump_insn (gen_condjump (condition, operands[3])); | 
|  | +} | 
|  | + | 
|  | +/* Perform the comparison in OPERANDS[1].  Move OPERANDS[2] into OPERANDS[0] | 
|  | +   if the condition holds, otherwise move OPERANDS[3] into OPERANDS[0].  */ | 
|  | + | 
|  | +void | 
|  | +mips_expand_conditional_move (rtx *operands) | 
|  | +{ | 
|  | +  rtx cond; | 
|  | +  enum rtx_code code = GET_CODE (operands[1]); | 
|  | +  rtx op0 = XEXP (operands[1], 0); | 
|  | +  rtx op1 = XEXP (operands[1], 1); | 
|  | + | 
|  | +  mips_emit_compare (&code, &op0, &op1, true); | 
|  | +  cond = gen_rtx_fmt_ee (code, GET_MODE (op0), op0, op1); | 
|  | +  emit_insn (gen_rtx_SET (VOIDmode, operands[0], | 
|  | +			  gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]), cond, | 
|  | +						operands[2], operands[3]))); | 
|  | +} | 
|  | + | 
|  | +/* Initialize *CUM for a call to a function of type FNTYPE.  */ | 
|  | + | 
|  | +void | 
|  | +mips_init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype ATTRIBUTE_UNUSED) | 
|  | +{ | 
|  | +  memset (cum, 0, sizeof (*cum)); | 
|  | +} | 
|  | + | 
|  | +/* Fill INFO with information about a single argument.  CUM is the | 
|  | +   cumulative state for earlier arguments.  MODE is the mode of this | 
|  | +   argument and TYPE is its type (if known).  NAMED is true if this | 
|  | +   is a named (fixed) argument rather than a variable one.  */ | 
|  | + | 
|  | +static void | 
|  | +mips_get_arg_info (struct mips_arg_info *info, const CUMULATIVE_ARGS *cum, | 
|  | +		   enum machine_mode mode, const_tree type, bool named) | 
|  | +{ | 
|  | +  bool doubleword_aligned_p; | 
|  | +  unsigned int num_bytes, num_words, max_regs; | 
|  | + | 
|  | +  /* Work out the size of the argument.  */ | 
|  | +  num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode); | 
|  | +  num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD; | 
|  | + | 
|  | +  /* Scalar, complex and vector floating-point types are passed in | 
|  | +     floating-point registers, as long as this is a named rather | 
|  | +     than a variable argument.  */ | 
|  | +  info->fpr_p = (named | 
|  | +		 && (type == 0 || FLOAT_TYPE_P (type)) | 
|  | +		 && (GET_MODE_CLASS (mode) == MODE_FLOAT | 
|  | +		     || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT | 
|  | +		     || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT) | 
|  | +		 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE); | 
|  | + | 
|  | +  /* ??? According to the ABI documentation, the real and imaginary | 
|  | +     parts of complex floats should be passed in individual registers. | 
|  | +     The real and imaginary parts of stack arguments are supposed | 
|  | +     to be contiguous and there should be an extra word of padding | 
|  | +     at the end. | 
|  | + | 
|  | +     This has two problems.  First, it makes it impossible to use a | 
|  | +     single "void *" va_list type, since register and stack arguments | 
|  | +     are passed differently.  (At the time of writing, MIPSpro cannot | 
|  | +     handle complex float varargs correctly.)  Second, it's unclear | 
|  | +     what should happen when there is only one register free. | 
|  | + | 
|  | +     For now, we assume that named complex floats should go into FPRs | 
|  | +     if there are two FPRs free, otherwise they should be passed in the | 
|  | +     same way as a struct containing two floats.  */ | 
|  | +  if (info->fpr_p | 
|  | +      && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT | 
|  | +      && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE) | 
|  | +    { | 
|  | +      if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1) | 
|  | +        info->fpr_p = false; | 
|  | +      else | 
|  | +        num_words = 2; | 
|  | +    } | 
|  | + | 
|  | +  /* See whether the argument has doubleword alignment.  */ | 
|  | +  doubleword_aligned_p = (mips_function_arg_boundary (mode, type) | 
|  | +			  > BITS_PER_WORD); | 
|  | + | 
|  | +  /* Set REG_OFFSET to the register count we're interested in. | 
|  | +     The EABI allocates the floating-point registers separately, | 
|  | +     but the other ABIs allocate them like integer registers.  */ | 
|  | +  info->reg_offset = cum->num_gprs; | 
|  | + | 
|  | +  /* Advance to an even register if the argument is doubleword-aligned.  */ | 
|  | +  if (doubleword_aligned_p) | 
|  | +    info->reg_offset += info->reg_offset & 1; | 
|  | + | 
|  | +  /* Work out the offset of a stack argument.  */ | 
|  | +  info->stack_offset = cum->stack_words; | 
|  | +  if (doubleword_aligned_p) | 
|  | +    info->stack_offset += info->stack_offset & 1; | 
|  | + | 
|  | +  max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset; | 
|  | + | 
|  | +  /* Partition the argument between registers and stack.  */ | 
|  | +  info->reg_words = MIN (num_words, max_regs); | 
|  | +  info->stack_words = num_words - info->reg_words; | 
|  | +} | 
|  | + | 
|  | +/* INFO describes a register argument that has the normal format for the | 
|  | +   argument's mode.  Return the register it uses, assuming that FPRs are | 
|  | +   available if HARD_FLOAT_P.  */ | 
|  | + | 
|  | +static unsigned int | 
|  | +mips_arg_regno (const struct mips_arg_info *info, bool hard_float_p) | 
|  | +{ | 
|  | +  if (!info->fpr_p || !hard_float_p) | 
|  | +    return GP_ARG_FIRST + info->reg_offset; | 
|  | +  else | 
|  | +    return FP_ARG_FIRST + info->reg_offset; | 
|  | +} | 
|  | + | 
|  | +/* Implement TARGET_FUNCTION_ARG.  */ | 
|  | + | 
|  | +static rtx | 
|  | +mips_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, | 
|  | +		   const_tree type, bool named) | 
|  | +{ | 
|  | +  struct mips_arg_info info; | 
|  | + | 
|  | +  if (mode == VOIDmode) | 
|  | +    return NULL; | 
|  | + | 
|  | +  mips_get_arg_info (&info, cum, mode, type, named); | 
|  | + | 
|  | +  /* Return straight away if the whole argument is passed on the stack.  */ | 
|  | +  if (info.reg_offset == MAX_ARGS_IN_REGISTERS) | 
|  | +    return NULL; | 
|  | + | 
|  | +  /* The n32 and n64 ABIs say that if any 64-bit chunk of the structure | 
|  | +     contains a double in its entirety, then that 64-bit chunk is passed | 
|  | +     in a floating-point register.  */ | 
|  | +  if (TARGET_HARD_FLOAT | 
|  | +      && named | 
|  | +      && type != 0 | 
|  | +      && TREE_CODE (type) == RECORD_TYPE | 
|  | +      && TYPE_SIZE_UNIT (type) | 
|  | +      && host_integerp (TYPE_SIZE_UNIT (type), 1)) | 
|  | +    { | 
|  | +      tree field; | 
|  | + | 
|  | +      /* First check to see if there is any such field.  */ | 
|  | +      for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field)) | 
|  | +	if (TREE_CODE (field) == FIELD_DECL | 
|  | +	    && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field)) | 
|  | +	    && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD | 
|  | +	    && host_integerp (bit_position (field), 0) | 
|  | +	    && int_bit_position (field) % BITS_PER_WORD == 0) | 
|  | +	  break; | 
|  | + | 
|  | +      if (field != 0) | 
|  | +	{ | 
|  | +	  /* Now handle the special case by returning a PARALLEL | 
|  | +	     indicating where each 64-bit chunk goes.  INFO.REG_WORDS | 
|  | +	     chunks are passed in registers.  */ | 
|  | +	  unsigned int i; | 
|  | +	  HOST_WIDE_INT bitpos; | 
|  | +	  rtx ret; | 
|  | + | 
|  | +	  /* assign_parms checks the mode of ENTRY_PARM, so we must | 
|  | +	     use the actual mode here.  */ | 
|  | +	  ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words)); | 
|  | + | 
|  | +	  bitpos = 0; | 
|  | +	  field = TYPE_FIELDS (type); | 
|  | +	  for (i = 0; i < info.reg_words; i++) | 
|  | +	    { | 
|  | +	      rtx reg; | 
|  | + | 
|  | +	      for (; field; field = DECL_CHAIN (field)) | 
|  | +		if (TREE_CODE (field) == FIELD_DECL | 
|  | +		    && int_bit_position (field) >= bitpos) | 
|  | +		  break; | 
|  | + | 
|  | +	      if (field | 
|  | +		  && int_bit_position (field) == bitpos | 
|  | +		  && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field)) | 
|  | +		  && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD) | 
|  | +		reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i); | 
|  | +	      else | 
|  | +		reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i); | 
|  | + | 
|  | +	      XVECEXP (ret, 0, i) | 
|  | +		= gen_rtx_EXPR_LIST (VOIDmode, reg, | 
|  | +				     GEN_INT (bitpos / BITS_PER_UNIT)); | 
|  | + | 
|  | +	      bitpos += BITS_PER_WORD; | 
|  | +	    } | 
|  | +	  return ret; | 
|  | +	} | 
|  | +    } | 
|  | + | 
|  | +  /* Handle the n32/n64 conventions for passing complex floating-point | 
|  | +     arguments in FPR pairs.  The real part goes in the lower register | 
|  | +     and the imaginary part goes in the upper register.  */ | 
|  | +  if (info.fpr_p | 
|  | +      && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT) | 
|  | +    { | 
|  | +      rtx real, imag; | 
|  | +      enum machine_mode inner; | 
|  | +      unsigned int regno; | 
|  | + | 
|  | +      inner = GET_MODE_INNER (mode); | 
|  | +      regno = FP_ARG_FIRST + info.reg_offset; | 
|  | +      if (info.reg_words * UNITS_PER_WORD == GET_MODE_SIZE (inner)) | 
|  | +	{ | 
|  | +	  /* Real part in registers, imaginary part on stack.  */ | 
|  | +	  gcc_assert (info.stack_words == info.reg_words); | 
|  | +	  return gen_rtx_REG (inner, regno); | 
|  | +	} | 
|  | +      else | 
|  | +	{ | 
|  | +	  gcc_assert (info.stack_words == 0); | 
|  | +	  real = gen_rtx_EXPR_LIST (VOIDmode, | 
|  | +				    gen_rtx_REG (inner, regno), | 
|  | +				    const0_rtx); | 
|  | +	  imag = gen_rtx_EXPR_LIST (VOIDmode, | 
|  | +				    gen_rtx_REG (inner, | 
|  | +						 regno + info.reg_words / 2), | 
|  | +				    GEN_INT (GET_MODE_SIZE (inner))); | 
|  | +	  return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag)); | 
|  | +	} | 
|  | +    } | 
|  | + | 
|  | +  return gen_rtx_REG (mode, mips_arg_regno (&info, TARGET_HARD_FLOAT)); | 
|  | +} | 
|  | + | 
|  | +/* Implement TARGET_FUNCTION_ARG_ADVANCE.  */ | 
|  | + | 
|  | +static void | 
|  | +mips_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode, | 
|  | +			   const_tree type, bool named) | 
|  | +{ | 
|  | +  struct mips_arg_info info; | 
|  | + | 
|  | +  mips_get_arg_info (&info, cum, mode, type, named); | 
|  | + | 
|  | +  /* Advance the register count.  This has the effect of setting | 
|  | +     num_gprs to MAX_ARGS_IN_REGISTERS if a doubleword-aligned | 
|  | +     argument required us to skip the final GPR and pass the whole | 
|  | +     argument on the stack.  */ | 
|  | +  cum->num_gprs = info.reg_offset + info.reg_words; | 
|  | + | 
|  | +  /* Advance the stack word count.  */ | 
|  | +  if (info.stack_words > 0) | 
|  | +    cum->stack_words = info.stack_offset + info.stack_words; | 
|  | +} | 
|  | + | 
|  | +/* Implement TARGET_ARG_PARTIAL_BYTES.  */ | 
|  | + | 
|  | +static int | 
|  | +mips_arg_partial_bytes (CUMULATIVE_ARGS *cum, | 
|  | +			enum machine_mode mode, tree type, bool named) | 
|  | +{ | 
|  | +  struct mips_arg_info info; | 
|  | + | 
|  | +  mips_get_arg_info (&info, cum, mode, type, named); | 
|  | +  return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0; | 
|  | +} | 
|  | + | 
|  | +/* Implement TARGET_FUNCTION_ARG_BOUNDARY.  Every parameter gets at | 
|  | +   least PARM_BOUNDARY bits of alignment, but will be given anything up | 
|  | +   to STACK_BOUNDARY bits if the type requires it.  */ | 
|  | + | 
|  | +static unsigned int | 
|  | +mips_function_arg_boundary (enum machine_mode mode, const_tree type) | 
|  | +{ | 
|  | +  unsigned int alignment; | 
|  | + | 
|  | +  alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode); | 
|  | +  if (alignment < PARM_BOUNDARY) | 
|  | +    alignment = PARM_BOUNDARY; | 
|  | +  if (alignment > STACK_BOUNDARY) | 
|  | +    alignment = STACK_BOUNDARY; | 
|  | +  return alignment; | 
|  | +} | 
|  | + | 
|  | +/* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return | 
|  | +   upward rather than downward.  In other words, return true if the | 
|  | +   first byte of the stack slot has useful data, false if the last | 
|  | +   byte does.  */ | 
|  | + | 
|  | +bool | 
|  | +mips_pad_arg_upward (enum machine_mode mode, const_tree type) | 
|  | +{ | 
|  | +  /* On little-endian targets, the first byte of every stack argument | 
|  | +     is passed in the first byte of the stack slot.  */ | 
|  | +  if (!BYTES_BIG_ENDIAN) | 
|  | +    return true; | 
|  | + | 
|  | +  /* Otherwise, integral types are padded downward: the last byte of a | 
|  | +     stack argument is passed in the last byte of the stack slot.  */ | 
|  | +  if (type != 0 | 
|  | +      ? (INTEGRAL_TYPE_P (type) | 
|  | +	 || POINTER_TYPE_P (type) | 
|  | +	 || FIXED_POINT_TYPE_P (type)) | 
|  | +      : (SCALAR_INT_MODE_P (mode) | 
|  | +	 || ALL_SCALAR_FIXED_POINT_MODE_P (mode))) | 
|  | +    return false; | 
|  | + | 
|  | +  return true; | 
|  | +} | 
|  | + | 
|  | +/* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...).  Return !BYTES_BIG_ENDIAN | 
|  | +   if the least significant byte of the register has useful data.  Return | 
|  | +   the opposite if the most significant byte does.  */ | 
|  | + | 
|  | +bool | 
|  | +mips_pad_reg_upward (enum machine_mode mode, tree type) | 
|  | +{ | 
|  | +  /* No shifting is required for floating-point arguments.  */ | 
|  | +  if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT) | 
|  | +    return !BYTES_BIG_ENDIAN; | 
|  | + | 
|  | +  /* Otherwise, apply the same padding to register arguments as we do | 
|  | +     to stack arguments.  */ | 
|  | +  return mips_pad_arg_upward (mode, type); | 
|  | +} | 
|  | + | 
|  | +/* See whether VALTYPE is a record whose fields should be returned in | 
|  | +   floating-point registers.  If so, return the number of fields and | 
|  | +   list them in FIELDS (which should have two elements).  Return 0 | 
|  | +   otherwise. | 
|  | + | 
|  | +   For n32 & n64, a structure with one or two fields is returned in | 
|  | +   floating-point registers as long as every field has a floating-point | 
|  | +   type.  */ | 
|  | + | 
|  | +static int | 
|  | +mips_fpr_return_fields (const_tree valtype, tree *fields) | 
|  | +{ | 
|  | +  tree field; | 
|  | +  int i; | 
|  | + | 
|  | +  if (TREE_CODE (valtype) != RECORD_TYPE) | 
|  | +    return 0; | 
|  | + | 
|  | +  i = 0; | 
|  | +  for (field = TYPE_FIELDS (valtype); field != 0; field = DECL_CHAIN (field)) | 
|  | +    { | 
|  | +      if (TREE_CODE (field) != FIELD_DECL) | 
|  | +	continue; | 
|  | + | 
|  | +      if (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (field))) | 
|  | +	return 0; | 
|  | + | 
|  | +      if (i == 2) | 
|  | +	return 0; | 
|  | + | 
|  | +      fields[i++] = field; | 
|  | +    } | 
|  | +  return i; | 
|  | +} | 
|  | + | 
|  | +/* Implement TARGET_RETURN_IN_MSB.  For n32 & n64, we should return | 
|  | +   a value in the most significant part of $2/$3 if: | 
|  | + | 
|  | +      - the target is big-endian; | 
|  | + | 
|  | +      - the value has a structure or union type (we generalize this to | 
|  | +	cover aggregates from other languages too); and | 
|  | + | 
|  | +      - the structure is not returned in floating-point registers.  */ | 
|  | + | 
|  | +static bool | 
|  | +mips_return_in_msb (const_tree valtype) | 
|  | +{ | 
|  | +  tree fields[2]; | 
|  | + | 
|  | +  return (TARGET_BIG_ENDIAN | 
|  | +	  && AGGREGATE_TYPE_P (valtype) | 
|  | +	  && mips_fpr_return_fields (valtype, fields) == 0); | 
|  | +} | 
|  | + | 
|  | +/* Return true if the function return value MODE will get returned in a | 
|  | +   floating-point register.  */ | 
|  | + | 
|  | +static bool | 
|  | +mips_return_mode_in_fpr_p (enum machine_mode mode) | 
|  | +{ | 
|  | +  return ((GET_MODE_CLASS (mode) == MODE_FLOAT | 
|  | +	   || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT | 
|  | +	   || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT) | 
|  | +	  && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_HWFPVALUE); | 
|  | +} | 
|  | + | 
|  | +/* Return the representation of an FPR return register when the | 
|  | +   value being returned in FP_RETURN has mode VALUE_MODE and the | 
|  | +   return type itself has mode TYPE_MODE.  On NewABI targets, | 
|  | +   the two modes may be different for structures like: | 
|  | + | 
|  | +       struct __attribute__((packed)) foo { float f; } | 
|  | + | 
|  | +   where we return the SFmode value of "f" in FP_RETURN, but where | 
|  | +   the structure itself has mode BLKmode.  */ | 
|  | + | 
|  | +static rtx | 
|  | +mips_return_fpr_single (enum machine_mode type_mode, | 
|  | +			enum machine_mode value_mode) | 
|  | +{ | 
|  | +  rtx x; | 
|  | + | 
|  | +  x = gen_rtx_REG (value_mode, FP_RETURN); | 
|  | +  if (type_mode != value_mode) | 
|  | +    { | 
|  | +      x = gen_rtx_EXPR_LIST (VOIDmode, x, const0_rtx); | 
|  | +      x = gen_rtx_PARALLEL (type_mode, gen_rtvec (1, x)); | 
|  | +    } | 
|  | +  return x; | 
|  | +} | 
|  | + | 
|  | +/* Return a composite value in a pair of floating-point registers. | 
|  | +   MODE1 and OFFSET1 are the mode and byte offset for the first value, | 
|  | +   likewise MODE2 and OFFSET2 for the second.  MODE is the mode of the | 
|  | +   complete value. | 
|  | + | 
|  | +   For n32 & n64, $f0 always holds the first value and $f2 the second. | 
|  | +   Otherwise the values are packed together as closely as possible.  */ | 
|  | + | 
|  | +static rtx | 
|  | +mips_return_fpr_pair (enum machine_mode mode, | 
|  | +		      enum machine_mode mode1, HOST_WIDE_INT offset1, | 
|  | +		      enum machine_mode mode2, HOST_WIDE_INT offset2) | 
|  | +{ | 
|  | +  return gen_rtx_PARALLEL | 
|  | +    (mode, | 
|  | +     gen_rtvec (2, | 
|  | +		gen_rtx_EXPR_LIST (VOIDmode, | 
|  | +				   gen_rtx_REG (mode1, FP_RETURN), | 
|  | +				   GEN_INT (offset1)), | 
|  | +		gen_rtx_EXPR_LIST (VOIDmode, | 
|  | +				   gen_rtx_REG (mode2, FP_RETURN + 1), | 
|  | +				   GEN_INT (offset2)))); | 
|  | + | 
|  | +} | 
|  | + | 
|  | +/* Implement FUNCTION_VALUE and LIBCALL_VALUE.  For normal calls, | 
|  | +   VALTYPE is the return type and MODE is VOIDmode.  For libcalls, | 
|  | +   VALTYPE is null and MODE is the mode of the return value.  */ | 
|  | + | 
|  | +rtx | 
|  | +mips_function_value (const_tree valtype, const_tree func, enum machine_mode mode) | 
|  | +{ | 
|  | +  if (valtype) | 
|  | +    { | 
|  | +      tree fields[2]; | 
|  | +      int unsigned_p; | 
|  | + | 
|  | +      mode = TYPE_MODE (valtype); | 
|  | +      unsigned_p = TYPE_UNSIGNED (valtype); | 
|  | + | 
|  | +      /* Since TARGET_PROMOTE_FUNCTION_MODE unconditionally promotes, | 
|  | +	 return values, promote the mode here too.  */ | 
|  | +      mode = promote_function_mode (valtype, mode, &unsigned_p, func, 1); | 
|  | + | 
|  | +      /* Handle structures whose fields are returned in $f0/$f2.  */ | 
|  | +      switch (mips_fpr_return_fields (valtype, fields)) | 
|  | +	{ | 
|  | +	case 1: | 
|  | +	  return mips_return_fpr_single (mode, | 
|  | +					 TYPE_MODE (TREE_TYPE (fields[0]))); | 
|  | + | 
|  | +	case 2: | 
|  | +	  return mips_return_fpr_pair (mode, | 
|  | +				       TYPE_MODE (TREE_TYPE (fields[0])), | 
|  | +				       int_byte_position (fields[0]), | 
|  | +				       TYPE_MODE (TREE_TYPE (fields[1])), | 
|  | +				       int_byte_position (fields[1])); | 
|  | +	} | 
|  | + | 
|  | +      /* If a value is passed in the most significant part of a register, see | 
|  | +	 whether we have to round the mode up to a whole number of words.  */ | 
|  | +      if (mips_return_in_msb (valtype)) | 
|  | +	{ | 
|  | +	  HOST_WIDE_INT size = int_size_in_bytes (valtype); | 
|  | +	  if (size % UNITS_PER_WORD != 0) | 
|  | +	    { | 
|  | +	      size += UNITS_PER_WORD - size % UNITS_PER_WORD; | 
|  | +	      mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0); | 
|  | +	    } | 
|  | +	} | 
|  | + | 
|  | +      /* Only use FPRs for scalar, complex or vector types.  */ | 
|  | +      if (!FLOAT_TYPE_P (valtype)) | 
|  | +	return gen_rtx_REG (mode, GP_RETURN); | 
|  | +    } | 
|  | + | 
|  | +  /* Handle long doubles for n32 & n64.  */ | 
|  | +  if (mode == TFmode) | 
|  | +    return mips_return_fpr_pair (mode, | 
|  | +    			     DImode, 0, | 
|  | +    			     DImode, GET_MODE_SIZE (mode) / 2); | 
|  | + | 
|  | +  if (mips_return_mode_in_fpr_p (mode)) | 
|  | +    { | 
|  | +      if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT) | 
|  | +        return mips_return_fpr_pair (mode, | 
|  | +    				 GET_MODE_INNER (mode), 0, | 
|  | +    				 GET_MODE_INNER (mode), | 
|  | +    				 GET_MODE_SIZE (mode) / 2); | 
|  | +      else | 
|  | +        return gen_rtx_REG (mode, FP_RETURN); | 
|  | +    } | 
|  | + | 
|  | +  return gen_rtx_REG (mode, GP_RETURN); | 
|  | +} | 
|  | + | 
|  | +/* Implement TARGET_RETURN_IN_MEMORY.  Scalars and small structures | 
|  | +   that fit in two registers are returned in v0/v1. */ | 
|  | + | 
|  | +static bool | 
|  | +mips_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED) | 
|  | +{ | 
|  | +  return !IN_RANGE (int_size_in_bytes (type), 0, 2 * UNITS_PER_WORD); | 
|  | +} | 
|  | + | 
|  | +/* Implement TARGET_SETUP_INCOMING_VARARGS.  */ | 
|  | + | 
|  | +static void | 
|  | +mips_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode, | 
|  | +			     tree type, int *pretend_size ATTRIBUTE_UNUSED, | 
|  | +			     int no_rtl) | 
|  | +{ | 
|  | +  CUMULATIVE_ARGS local_cum; | 
|  | +  int gp_saved; | 
|  | + | 
|  | +  /* The caller has advanced CUM up to, but not beyond, the last named | 
|  | +     argument.  Advance a local copy of CUM past the last "real" named | 
|  | +     argument, to find out how many registers are left over.  */ | 
|  | +  local_cum = *cum; | 
|  | +  mips_function_arg_advance (&local_cum, mode, type, true); | 
|  | + | 
|  | +  /* Found out how many registers we need to save.  */ | 
|  | +  gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs; | 
|  | + | 
|  | +  if (!no_rtl && gp_saved > 0) | 
|  | +    { | 
|  | +      rtx ptr, mem; | 
|  | + | 
|  | +      ptr = plus_constant (virtual_incoming_args_rtx, | 
|  | +			   REG_PARM_STACK_SPACE (cfun->decl) | 
|  | +			   - gp_saved * UNITS_PER_WORD); | 
|  | +      mem = gen_frame_mem (BLKmode, ptr); | 
|  | +      set_mem_alias_set (mem, get_varargs_alias_set ()); | 
|  | + | 
|  | +      move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST, | 
|  | +			   mem, gp_saved); | 
|  | +    } | 
|  | +  if (REG_PARM_STACK_SPACE (cfun->decl) == 0) | 
|  | +    cfun->machine->varargs_size = gp_saved * UNITS_PER_WORD; | 
|  | +} | 
|  | + | 
|  | +/* Implement TARGET_EXPAND_BUILTIN_VA_START.  */ | 
|  | + | 
|  | +static void | 
|  | +mips_va_start (tree valist, rtx nextarg) | 
|  | +{ | 
|  | +  nextarg = plus_constant (nextarg, -cfun->machine->varargs_size); | 
|  | +  std_expand_builtin_va_start (valist, nextarg); | 
|  | +} | 
|  | + | 
|  | +/* Expand a call of type TYPE.  RESULT is where the result will go (null | 
|  | +   for "call"s and "sibcall"s), ADDR is the address of the function, | 
|  | +   ARGS_SIZE is the size of the arguments and AUX is the value passed | 
|  | +   to us by mips_function_arg.  LAZY_P is true if this call already | 
|  | +   involves a lazily-bound function address (such as when calling | 
|  | +   functions through a MIPS16 hard-float stub). | 
|  | + | 
|  | +   Return the call itself.  */ | 
|  | + | 
|  | +rtx | 
|  | +mips_expand_call (bool sibcall_p, rtx result, rtx addr, rtx args_size) | 
|  | +{ | 
|  | +  rtx pattern, insn; | 
|  | + | 
|  | +  if (!call_insn_operand (addr, VOIDmode)) | 
|  | +    { | 
|  | +      rtx reg = MIPS_EPILOGUE_TEMP (Pmode, true); | 
|  | +      mips_emit_move (reg, addr); | 
|  | +      addr = reg; | 
|  | +    } | 
|  | + | 
|  | +  if (result == 0) | 
|  | +    { | 
|  | +      rtx (*fn) (rtx, rtx); | 
|  | + | 
|  | +      if (sibcall_p) | 
|  | +	fn = gen_sibcall_internal; | 
|  | +      else | 
|  | +	fn = gen_call_internal; | 
|  | + | 
|  | +      pattern = fn (addr, args_size); | 
|  | +    } | 
|  | +  else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2) | 
|  | +    { | 
|  | +      /* Handle return values created by mips_return_fpr_pair.  */ | 
|  | +      rtx (*fn) (rtx, rtx, rtx, rtx); | 
|  | +      rtx reg1, reg2; | 
|  | + | 
|  | +      if (sibcall_p) | 
|  | +	fn = gen_sibcall_value_multiple_internal; | 
|  | +      else | 
|  | +	fn = gen_call_value_multiple_internal; | 
|  | + | 
|  | +      reg1 = XEXP (XVECEXP (result, 0, 0), 0); | 
|  | +      reg2 = XEXP (XVECEXP (result, 0, 1), 0); | 
|  | +      pattern = fn (reg1, addr, args_size, reg2); | 
|  | +    } | 
|  | +  else | 
|  | +    { | 
|  | +      rtx (*fn) (rtx, rtx, rtx); | 
|  | + | 
|  | +      if (sibcall_p) | 
|  | +	fn = gen_sibcall_value_internal; | 
|  | +      else | 
|  | +	fn = gen_call_value_internal; | 
|  | + | 
|  | +      /* Handle return values created by mips_return_fpr_single.  */ | 
|  | +      if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 1) | 
|  | +	result = XEXP (XVECEXP (result, 0, 0), 0); | 
|  | +      pattern = fn (result, addr, args_size); | 
|  | +    } | 
|  | + | 
|  | +  insn = emit_call_insn (pattern); | 
|  | + | 
|  | +  if (TARGET_USE_GOT) | 
|  | +    { | 
|  | +      /* See the comment above load_call<mode> for details.  */ | 
|  | +      use_reg (&CALL_INSN_FUNCTION_USAGE (insn), | 
|  | +	       gen_rtx_REG (Pmode, GOT_VERSION_REGNUM)); | 
|  | +      emit_insn (gen_update_got_version ()); | 
|  | +    } | 
|  | +  return insn; | 
|  | +} | 
|  | + | 
|  | +/* Emit straight-line code to move LENGTH bytes from SRC to DEST. | 
|  | +   Assume that the areas do not overlap.  */ | 
|  | + | 
|  | +static void | 
|  | +mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length) | 
|  | +{ | 
|  | +  HOST_WIDE_INT offset, delta; | 
|  | +  unsigned HOST_WIDE_INT bits; | 
|  | +  int i; | 
|  | +  enum machine_mode mode; | 
|  | +  rtx *regs; | 
|  | + | 
|  | +  bits = MAX( BITS_PER_UNIT, | 
|  | +             MIN( BITS_PER_WORD, MIN( MEM_ALIGN(src),MEM_ALIGN(dest) ) ) ); | 
|  | + | 
|  | +  mode = mode_for_size (bits, MODE_INT, 0); | 
|  | +  delta = bits / BITS_PER_UNIT; | 
|  | + | 
|  | +  /* Allocate a buffer for the temporary registers.  */ | 
|  | +  regs = XALLOCAVEC (rtx, length / delta); | 
|  | + | 
|  | +  /* Load as many BITS-sized chunks as possible.  Use a normal load if | 
|  | +     the source has enough alignment, otherwise use left/right pairs.  */ | 
|  | +  for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++) | 
|  | +    { | 
|  | +      regs[i] = gen_reg_rtx (mode); | 
|  | +	mips_emit_move (regs[i], adjust_address (src, mode, offset)); | 
|  | +    } | 
|  | + | 
|  | +  /* Copy the chunks to the destination.  */ | 
|  | +  for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++) | 
|  | +      mips_emit_move (adjust_address (dest, mode, offset), regs[i]); | 
|  | + | 
|  | +  /* Mop up any left-over bytes.  */ | 
|  | +  if (offset < length) | 
|  | +    { | 
|  | +      src = adjust_address (src, BLKmode, offset); | 
|  | +      dest = adjust_address (dest, BLKmode, offset); | 
|  | +      move_by_pieces (dest, src, length - offset, | 
|  | +		      MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0); | 
|  | +    } | 
|  | +} | 
|  | + | 
|  | +/* Helper function for doing a loop-based block operation on memory | 
|  | +   reference MEM.  Each iteration of the loop will operate on LENGTH | 
|  | +   bytes of MEM. | 
|  | + | 
|  | +   Create a new base register for use within the loop and point it to | 
|  | +   the start of MEM.  Create a new memory reference that uses this | 
|  | +   register.  Store them in *LOOP_REG and *LOOP_MEM respectively.  */ | 
|  | + | 
|  | +static void | 
|  | +mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length, | 
|  | +		       rtx *loop_reg, rtx *loop_mem) | 
|  | +{ | 
|  | +  *loop_reg = copy_addr_to_reg (XEXP (mem, 0)); | 
|  | + | 
|  | +  /* Although the new mem does not refer to a known location, | 
|  | +     it does keep up to LENGTH bytes of alignment.  */ | 
|  | +  *loop_mem = change_address (mem, BLKmode, *loop_reg); | 
|  | +  set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT)); | 
|  | +} | 
|  | + | 
|  | +/* Move LENGTH bytes from SRC to DEST using a loop that moves BYTES_PER_ITER | 
|  | +   bytes at a time.  LENGTH must be at least BYTES_PER_ITER.  Assume that | 
|  | +   the memory regions do not overlap.  */ | 
|  | + | 
|  | +static void | 
|  | +mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length, | 
|  | +		      HOST_WIDE_INT bytes_per_iter) | 
|  | +{ | 
|  | +  rtx label, src_reg, dest_reg, final_src, test; | 
|  | +  HOST_WIDE_INT leftover; | 
|  | + | 
|  | +  leftover = length % bytes_per_iter; | 
|  | +  length -= leftover; | 
|  | + | 
|  | +  /* Create registers and memory references for use within the loop.  */ | 
|  | +  mips_adjust_block_mem (src, bytes_per_iter, &src_reg, &src); | 
|  | +  mips_adjust_block_mem (dest, bytes_per_iter, &dest_reg, &dest); | 
|  | + | 
|  | +  /* Calculate the value that SRC_REG should have after the last iteration | 
|  | +     of the loop.  */ | 
|  | +  final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length), | 
|  | +				   0, 0, OPTAB_WIDEN); | 
|  | + | 
|  | +  /* Emit the start of the loop.  */ | 
|  | +  label = gen_label_rtx (); | 
|  | +  emit_label (label); | 
|  | + | 
|  | +  /* Emit the loop body.  */ | 
|  | +  mips_block_move_straight (dest, src, bytes_per_iter); | 
|  | + | 
|  | +  /* Move on to the next block.  */ | 
|  | +  mips_emit_move (src_reg, plus_constant (src_reg, bytes_per_iter)); | 
|  | +  mips_emit_move (dest_reg, plus_constant (dest_reg, bytes_per_iter)); | 
|  | + | 
|  | +  /* Emit the loop condition.  */ | 
|  | +  test = gen_rtx_NE (VOIDmode, src_reg, final_src); | 
|  | +  if (Pmode == DImode) | 
|  | +    emit_jump_insn (gen_cbranchdi4 (test, src_reg, final_src, label)); | 
|  | +  else | 
|  | +    emit_jump_insn (gen_cbranchsi4 (test, src_reg, final_src, label)); | 
|  | + | 
|  | +  /* Mop up any left-over bytes.  */ | 
|  | +  if (leftover) | 
|  | +    mips_block_move_straight (dest, src, leftover); | 
|  | +} | 
|  | + | 
|  | +/* Expand a movmemsi instruction, which copies LENGTH bytes from | 
|  | +   memory reference SRC to memory reference DEST.  */ | 
|  | + | 
|  | +bool | 
|  | +mips_expand_block_move (rtx dest, rtx src, rtx length) | 
|  | +{ | 
|  | +  if (CONST_INT_P (length)) | 
|  | +    { | 
|  | +      HOST_WIDE_INT factor, align; | 
|  | + | 
|  | +      align = MIN (MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), BITS_PER_WORD); | 
|  | +      factor = BITS_PER_WORD / align; | 
|  | + | 
|  | +      if (INTVAL (length) <= MIPS_MAX_MOVE_BYTES_STRAIGHT / factor) | 
|  | +	{ | 
|  | +	  mips_block_move_straight (dest, src, INTVAL (length)); | 
|  | +	  return true; | 
|  | +	} | 
|  | +      else if (optimize && align >= BITS_PER_WORD) | 
|  | +	{ | 
|  | +	  mips_block_move_loop (dest, src, INTVAL (length), | 
|  | +				MIPS_MAX_MOVE_BYTES_PER_LOOP_ITER / factor); | 
|  | +	  return true; | 
|  | +	} | 
|  | +    } | 
|  | +  return false; | 
|  | +} | 
|  | + | 
|  | +/* Return true if X is a MEM with the same size as MODE.  */ | 
|  | + | 
|  | +bool | 
|  | +mips_mem_fits_mode_p (enum machine_mode mode, rtx x) | 
|  | +{ | 
|  | +  rtx size; | 
|  | + | 
|  | +  if (!MEM_P (x)) | 
|  | +    return false; | 
|  | + | 
|  | +  size = MEM_SIZE (x); | 
|  | +  return size && INTVAL (size) == GET_MODE_SIZE (mode); | 
|  | +} | 
|  | + | 
|  | +/* (Re-)Initialize mips_split_p, mips_lo_relocs and mips_hi_relocs.  */ | 
|  | + | 
|  | +static void | 
|  | +mips_init_relocs (void) | 
|  | +{ | 
|  | +  memset (mips_split_p, '\0', sizeof (mips_split_p)); | 
|  | +  memset (mips_hi_relocs, '\0', sizeof (mips_hi_relocs)); | 
|  | +  memset (mips_lo_relocs, '\0', sizeof (mips_lo_relocs)); | 
|  | + | 
|  | +  if (!flag_pic) | 
|  | +    { | 
|  | +      mips_split_p[SYMBOL_ABSOLUTE] = true; | 
|  | +      mips_hi_relocs[SYMBOL_ABSOLUTE] = "%hi("; | 
|  | +      mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo("; | 
|  | +    } | 
|  | + | 
|  | +  mips_split_p[SYMBOL_TPREL] = true; | 
|  | +  mips_hi_relocs[SYMBOL_TPREL] = "%tprel_hi("; | 
|  | +  mips_lo_relocs[SYMBOL_TPREL] = "%tprel_lo("; | 
|  | +} | 
|  | + | 
|  | +/* Print symbolic operand OP, which is part of a HIGH or LO_SUM | 
|  | +   in context CONTEXT.  RELOCS is the array of relocations to use.  */ | 
|  | + | 
|  | +static void | 
|  | +mips_print_operand_reloc (FILE *file, rtx op, const char **relocs) | 
|  | +{ | 
|  | +  enum mips_symbol_type symbol_type; | 
|  | +  const char *p; | 
|  | + | 
|  | +  symbol_type = mips_classify_symbolic_expression (op); | 
|  | +  gcc_assert (relocs[symbol_type]); | 
|  | + | 
|  | +  fputs (relocs[symbol_type], file); | 
|  | +  output_addr_const (file, mips_strip_unspec_address (op)); | 
|  | +  for (p = relocs[symbol_type]; *p != 0; p++) | 
|  | +    if (*p == '(') | 
|  | +      fputc (')', file); | 
|  | +} | 
|  | + | 
|  | +/* PRINT_OPERAND prefix LETTER refers to the integer branch instruction | 
|  | +   associated with condition CODE.  Print the condition part of the | 
|  | +   opcode to FILE.  */ | 
|  | + | 
|  | +static void | 
|  | +mips_print_int_branch_condition (FILE *file, enum rtx_code code, int letter) | 
|  | +{ | 
|  | +  switch (code) | 
|  | +    { | 
|  | +    case EQ: | 
|  | +    case NE: | 
|  | +    case GT: | 
|  | +    case GE: | 
|  | +    case LT: | 
|  | +    case LE: | 
|  | +    case GTU: | 
|  | +    case GEU: | 
|  | +    case LTU: | 
|  | +    case LEU: | 
|  | +      /* Conveniently, the MIPS names for these conditions are the same | 
|  | +	 as their RTL equivalents.  */ | 
|  | +      fputs (GET_RTX_NAME (code), file); | 
|  | +      break; | 
|  | + | 
|  | +    default: | 
|  | +      output_operand_lossage ("'%%%c' is not a valid operand prefix", letter); | 
|  | +      break; | 
|  | +    } | 
|  | +} | 
|  | + | 
|  | +/* Implement TARGET_PRINT_OPERAND.  The MIPS-specific operand codes are: | 
|  | + | 
|  | +   'X'	Print CONST_INT OP in hexadecimal format. | 
|  | +   'x'	Print the low 16 bits of CONST_INT OP in hexadecimal format. | 
|  | +   'd'	Print CONST_INT OP in decimal. | 
|  | +   'm'	Print one less than CONST_INT OP in decimal. | 
|  | +   'h'	Print the high-part relocation associated with OP, after stripping | 
|  | +	  any outermost HIGH. | 
|  | +   'R'	Print the low-part relocation associated with OP. | 
|  | +   'C'	Print the integer branch condition for comparison OP. | 
|  | +   'N'	Print the inverse of the integer branch condition for comparison OP. | 
|  | +   'T'	Print 'f' for (eq:CC ...), 't' for (ne:CC ...), | 
|  | +	      'z' for (eq:?I ...), 'n' for (ne:?I ...). | 
|  | +   't'	Like 'T', but with the EQ/NE cases reversed | 
|  | +   'Z'	Print OP and a comma for ISA_HAS_8CC, otherwise print nothing. | 
|  | +   'D'	Print the second part of a double-word register or memory operand. | 
|  | +   'L'	Print the low-order register in a double-word register operand. | 
|  | +   'M'	Print high-order register in a double-word register operand. | 
|  | +   'z'	Print $0 if OP is zero, otherwise print OP normally.  */ | 
|  | + | 
|  | +static void | 
|  | +mips_print_operand (FILE *file, rtx op, int letter) | 
|  | +{ | 
|  | +  enum rtx_code code; | 
|  | + | 
|  | +  gcc_assert (op); | 
|  | +  code = GET_CODE (op); | 
|  | + | 
|  | +  switch (letter) | 
|  | +    { | 
|  | +    case 'X': | 
|  | +      if (CONST_INT_P (op)) | 
|  | +	fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op)); | 
|  | +      else | 
|  | +	output_operand_lossage ("invalid use of '%%%c'", letter); | 
|  | +      break; | 
|  | + | 
|  | +    case 'x': | 
|  | +      if (CONST_INT_P (op)) | 
|  | +	fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op) & 0xffff); | 
|  | +      else | 
|  | +	output_operand_lossage ("invalid use of '%%%c'", letter); | 
|  | +      break; | 
|  | + | 
|  | +    case 'd': | 
|  | +      if (CONST_INT_P (op)) | 
|  | +	fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (op)); | 
|  | +      else | 
|  | +	output_operand_lossage ("invalid use of '%%%c'", letter); | 
|  | +      break; | 
|  | + | 
|  | +    case 'm': | 
|  | +      if (CONST_INT_P (op)) | 
|  | +	fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (op) - 1); | 
|  | +      else | 
|  | +	output_operand_lossage ("invalid use of '%%%c'", letter); | 
|  | +      break; | 
|  | + | 
|  | +    case 'h': | 
|  | +      if (code == HIGH) | 
|  | +	op = XEXP (op, 0); | 
|  | +      mips_print_operand_reloc (file, op, mips_hi_relocs); | 
|  | +      break; | 
|  | + | 
|  | +    case 'R': | 
|  | +      mips_print_operand_reloc (file, op, mips_lo_relocs); | 
|  | +      break; | 
|  | + | 
|  | +    case 'C': | 
|  | +      mips_print_int_branch_condition (file, code, letter); | 
|  | +      break; | 
|  | + | 
|  | +    case 'N': | 
|  | +      mips_print_int_branch_condition (file, reverse_condition (code), letter); | 
|  | +      break; | 
|  | + | 
|  | +    case 'S': | 
|  | +      mips_print_int_branch_condition (file, swap_condition (code), letter); | 
|  | +      break; | 
|  | + | 
|  | +    case 'T': | 
|  | +    case 't': | 
|  | +      { | 
|  | +	int truth = (code == NE) == (letter == 'T'); | 
|  | +	fputc ("zfnt"[truth * 2 + (GET_MODE (op) == CCmode)], file); | 
|  | +      } | 
|  | +      break; | 
|  | + | 
|  | +    case 'Z': | 
|  | +      mips_print_operand (file, op, 0); | 
|  | +      fputc (',', file); | 
|  | +      break; | 
|  | + | 
|  | +    default: | 
|  | +      switch (code) | 
|  | +	{ | 
|  | +	case REG: | 
|  | +	  { | 
|  | +	    unsigned int regno = REGNO (op); | 
|  | +	    if ((letter == 'M' && TARGET_LITTLE_ENDIAN) | 
|  | +		|| (letter == 'L' && TARGET_BIG_ENDIAN) | 
|  | +		|| letter == 'D') | 
|  | +	      regno++; | 
|  | +	    else if (letter && letter != 'z' && letter != 'M' && letter != 'L') | 
|  | +	      output_operand_lossage ("invalid use of '%%%c'", letter); | 
|  | +	    fprintf (file, "%s", reg_names[regno]); | 
|  | +	  } | 
|  | +	  break; | 
|  | + | 
|  | +	case MEM: | 
|  | +	  if (letter == 'y') | 
|  | +	    fprintf (file, "%s", reg_names[REGNO(XEXP(op, 0))]); | 
|  | +	  else if (letter && letter == 'D') | 
|  | +	    output_address (plus_constant (XEXP (op, 0), 4)); | 
|  | +	  else if (letter && letter != 'z') | 
|  | +	    output_operand_lossage ("invalid use of '%%%c'", letter); | 
|  | +	  else | 
|  | +	    output_address (XEXP (op, 0)); | 
|  | +	  break; | 
|  | + | 
|  | +	default: | 
|  | +	  if (letter == 'z' && op == CONST0_RTX (GET_MODE (op))) | 
|  | +	    fputs (reg_names[GP_REG_FIRST], file); | 
|  | +	  else if (letter && letter != 'z') | 
|  | +	    output_operand_lossage ("invalid use of '%%%c'", letter); | 
|  | +	  else | 
|  | +	    output_addr_const (file, mips_strip_unspec_address (op)); | 
|  | +	  break; | 
|  | +	} | 
|  | +    } | 
|  | +} | 
|  | + | 
|  | +/* Implement TARGET_PRINT_OPERAND_ADDRESS.  */ | 
|  | + | 
|  | +static void | 
|  | +mips_print_operand_address (FILE *file, rtx x) | 
|  | +{ | 
|  | +  struct mips_address_info addr; | 
|  | + | 
|  | +  if (mips_classify_address (&addr, x, word_mode, true)) | 
|  | +    switch (addr.type) | 
|  | +      { | 
|  | +      case ADDRESS_REG: | 
|  | +	mips_print_operand (file, addr.offset, 0); | 
|  | +	fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]); | 
|  | +	return; | 
|  | + | 
|  | +      case ADDRESS_LO_SUM: | 
|  | +	mips_print_operand_reloc (file, addr.offset, mips_lo_relocs); | 
|  | +	fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]); | 
|  | +	return; | 
|  | + | 
|  | +      case ADDRESS_CONST_INT: | 
|  | +	output_addr_const (file, x); | 
|  | +	fprintf (file, "(%s)", reg_names[GP_REG_FIRST]); | 
|  | +	return; | 
|  | + | 
|  | +      case ADDRESS_SYMBOLIC: | 
|  | +	output_addr_const (file, mips_strip_unspec_address (x)); | 
|  | +	return; | 
|  | +      } | 
|  | +  gcc_unreachable (); | 
|  | +} | 
|  | + | 
|  | +/* Implement TARGET_ENCODE_SECTION_INFO.  */ | 
|  | + | 
|  | +static void | 
|  | +mips_encode_section_info (tree decl, rtx rtl, int first) | 
|  | +{ | 
|  | +  default_encode_section_info (decl, rtl, first); | 
|  | + | 
|  | +  if (TREE_CODE (decl) == FUNCTION_DECL) | 
|  | +    { | 
|  | +      rtx symbol = XEXP (rtl, 0); | 
|  | +      tree type = TREE_TYPE (decl); | 
|  | + | 
|  | +      /* Encode whether the symbol is short or long.  */ | 
|  | +      if ((TARGET_LONG_CALLS && !mips_near_type_p (type)) | 
|  | +	  || mips_far_type_p (type)) | 
|  | +	SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_LONG_CALL; | 
|  | +    } | 
|  | +} | 
|  | + | 
|  | +/* The MIPS debug format wants all automatic variables and arguments | 
|  | +   to be in terms of the virtual frame pointer (stack pointer before | 
|  | +   any adjustment in the function), while the MIPS 3.0 linker wants | 
|  | +   the frame pointer to be the stack pointer after the initial | 
|  | +   adjustment.  So, we do the adjustment here.  The arg pointer (which | 
|  | +   is eliminated) points to the virtual frame pointer, while the frame | 
|  | +   pointer (which may be eliminated) points to the stack pointer after | 
|  | +   the initial adjustments.  */ | 
|  | + | 
|  | +HOST_WIDE_INT | 
|  | +mips_debugger_offset (rtx addr, HOST_WIDE_INT offset) | 
|  | +{ | 
|  | +  rtx offset2 = const0_rtx; | 
|  | +  rtx reg = eliminate_constant_term (addr, &offset2); | 
|  | + | 
|  | +  if (offset == 0) | 
|  | +    offset = INTVAL (offset2); | 
|  | + | 
|  | +  if (reg == stack_pointer_rtx) | 
|  | +    offset -= cfun->machine->frame.total_size; | 
|  | +  else | 
|  | +    gcc_assert (reg == frame_pointer_rtx || reg == hard_frame_pointer_rtx); | 
|  | + | 
|  | +  return offset; | 
|  | +} | 
|  | + | 
|  | +/* Implement TARGET_ASM_OUTPUT_SOURCE_FILENAME.  */ | 
|  | + | 
|  | +static void | 
|  | +mips_output_filename (FILE *stream, const char *name) | 
|  | +{ | 
|  | +  /* If we are emitting DWARF-2, let dwarf2out handle the ".file" | 
|  | +     directives.  */ | 
|  | +  if (write_symbols == DWARF2_DEBUG) | 
|  | +    return; | 
|  | +  else if (mips_output_filename_first_time) | 
|  | +    { | 
|  | +      mips_output_filename_first_time = 0; | 
|  | +      num_source_filenames += 1; | 
|  | +      current_function_file = name; | 
|  | +      fprintf (stream, "\t.file\t%d ", num_source_filenames); | 
|  | +      output_quoted_string (stream, name); | 
|  | +      putc ('\n', stream); | 
|  | +    } | 
|  | +  /* If we are emitting stabs, let dbxout.c handle this (except for | 
|  | +     the mips_output_filename_first_time case).  */ | 
|  | +  else if (write_symbols == DBX_DEBUG) | 
|  | +    return; | 
|  | +  else if (name != current_function_file | 
|  | +	   && strcmp (name, current_function_file) != 0) | 
|  | +    { | 
|  | +      num_source_filenames += 1; | 
|  | +      current_function_file = name; | 
|  | +      fprintf (stream, "\t.file\t%d ", num_source_filenames); | 
|  | +      output_quoted_string (stream, name); | 
|  | +      putc ('\n', stream); | 
|  | +    } | 
|  | +} | 
|  | + | 
|  | +/* Implement TARGET_ASM_OUTPUT_DWARF_DTPREL.  */ | 
|  | + | 
|  | +static void ATTRIBUTE_UNUSED | 
|  | +mips_output_dwarf_dtprel (FILE *file, int size, rtx x) | 
|  | +{ | 
|  | +  switch (size) | 
|  | +    { | 
|  | +    case 4: | 
|  | +      fputs ("\t.dtprelword\t", file); | 
|  | +      break; | 
|  | + | 
|  | +    case 8: | 
|  | +      fputs ("\t.dtpreldword\t", file); | 
|  | +      break; | 
|  | + | 
|  | +    default: | 
|  | +      gcc_unreachable (); | 
|  | +    } | 
|  | +  output_addr_const (file, x); | 
|  | +  fputs ("+0x8000", file); | 
|  | +} | 
|  | + | 
|  | +/* Implement ASM_OUTPUT_ASCII.  */ | 
|  | + | 
|  | +void | 
|  | +mips_output_ascii (FILE *stream, const char *string, size_t len) | 
|  | +{ | 
|  | +  size_t i; | 
|  | +  int cur_pos; | 
|  | + | 
|  | +  cur_pos = 17; | 
|  | +  fprintf (stream, "\t.ascii\t\""); | 
|  | +  for (i = 0; i < len; i++) | 
|  | +    { | 
|  | +      int c; | 
|  | + | 
|  | +      c = (unsigned char) string[i]; | 
|  | +      if (ISPRINT (c)) | 
|  | +	{ | 
|  | +	  if (c == '\\' || c == '\"') | 
|  | +	    { | 
|  | +	      putc ('\\', stream); | 
|  | +	      cur_pos++; | 
|  | +	    } | 
|  | +	  putc (c, stream); | 
|  | +	  cur_pos++; | 
|  | +	} | 
|  | +      else | 
|  | +	{ | 
|  | +	  fprintf (stream, "\\%03o", c); | 
|  | +	  cur_pos += 4; | 
|  | +	} | 
|  | + | 
|  | +      if (cur_pos > 72 && i+1 < len) | 
|  | +	{ | 
|  | +	  cur_pos = 17; | 
|  | +	  fprintf (stream, "\"\n\t.ascii\t\""); | 
|  | +	} | 
|  | +    } | 
|  | +  fprintf (stream, "\"\n"); | 
|  | +} | 
|  | + | 
|  | +/* Emit either a label, .comm, or .lcomm directive.  When using assembler | 
|  | +   macros, mark the symbol as written so that mips_asm_output_external | 
|  | +   won't emit an .extern for it.  STREAM is the output file, NAME is the | 
|  | +   name of the symbol, INIT_STRING is the string that should be written | 
|  | +   before the symbol and FINAL_STRING is the string that should be | 
|  | +   written after it.  FINAL_STRING is a printf format that consumes the | 
|  | +   remaining arguments.  */ | 
|  | + | 
|  | +void | 
|  | +mips_declare_object (FILE *stream, const char *name, const char *init_string, | 
|  | +		     const char *final_string, ...) | 
|  | +{ | 
|  | +  va_list ap; | 
|  | + | 
|  | +  fputs (init_string, stream); | 
|  | +  assemble_name (stream, name); | 
|  | +  va_start (ap, final_string); | 
|  | +  vfprintf (stream, final_string, ap); | 
|  | +  va_end (ap); | 
|  | +} | 
|  | + | 
|  | +/* Declare a common object of SIZE bytes using asm directive INIT_STRING. | 
|  | +   NAME is the name of the object and ALIGN is the required alignment | 
|  | +   in bytes.  TAKES_ALIGNMENT_P is true if the directive takes a third | 
|  | +   alignment argument.  */ | 
|  | + | 
|  | +void | 
|  | +mips_declare_common_object (FILE *stream, const char *name, | 
|  | +			    const char *init_string, | 
|  | +			    unsigned HOST_WIDE_INT size, | 
|  | +			    unsigned int align, bool takes_alignment_p) | 
|  | +{ | 
|  | +  if (!takes_alignment_p) | 
|  | +    { | 
|  | +      size += (align / BITS_PER_UNIT) - 1; | 
|  | +      size -= size % (align / BITS_PER_UNIT); | 
|  | +      mips_declare_object (stream, name, init_string, | 
|  | +			   "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size); | 
|  | +    } | 
|  | +  else | 
|  | +    mips_declare_object (stream, name, init_string, | 
|  | +			 "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n", | 
|  | +			 size, align / BITS_PER_UNIT); | 
|  | +} | 
|  | + | 
|  | +/* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON.  This is usually the same as the | 
|  | +   elfos.h version, but we also need to handle -muninit-const-in-rodata.  */ | 
|  | + | 
|  | +void | 
|  | +mips_output_aligned_decl_common (FILE *stream, tree decl ATTRIBUTE_UNUSED, | 
|  | +				 const char *name, | 
|  | +				 unsigned HOST_WIDE_INT size, | 
|  | +				 unsigned int align) | 
|  | +{ | 
|  | +  mips_declare_common_object (stream, name, "\n\t.comm\t", size, align, true); | 
|  | +} | 
|  | + | 
|  | +#ifdef ASM_OUTPUT_SIZE_DIRECTIVE | 
|  | +extern int size_directive_output; | 
|  | + | 
|  | +/* Implement ASM_DECLARE_OBJECT_NAME.  This is like most of the standard ELF | 
|  | +   definitions except that it uses mips_declare_object to emit the label.  */ | 
|  | + | 
|  | +void | 
|  | +mips_declare_object_name (FILE *stream, const char *name, | 
|  | +			  tree decl ATTRIBUTE_UNUSED) | 
|  | +{ | 
|  | +#ifdef ASM_OUTPUT_TYPE_DIRECTIVE | 
|  | +  ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object"); | 
|  | +#endif | 
|  | + | 
|  | +  size_directive_output = 0; | 
|  | +  if (!flag_inhibit_size_directive && DECL_SIZE (decl)) | 
|  | +    { | 
|  | +      HOST_WIDE_INT size; | 
|  | + | 
|  | +      size_directive_output = 1; | 
|  | +      size = int_size_in_bytes (TREE_TYPE (decl)); | 
|  | +      ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size); | 
|  | +    } | 
|  | + | 
|  | +  mips_declare_object (stream, name, "", ":\n"); | 
|  | +} | 
|  | + | 
|  | +/* Implement ASM_FINISH_DECLARE_OBJECT.  This is generic ELF stuff.  */ | 
|  | + | 
|  | +void | 
|  | +mips_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end) | 
|  | +{ | 
|  | +  const char *name; | 
|  | + | 
|  | +  name = XSTR (XEXP (DECL_RTL (decl), 0), 0); | 
|  | +  if (!flag_inhibit_size_directive | 
|  | +      && DECL_SIZE (decl) != 0 | 
|  | +      && !at_end | 
|  | +      && top_level | 
|  | +      && DECL_INITIAL (decl) == error_mark_node | 
|  | +      && !size_directive_output) | 
|  | +    { | 
|  | +      HOST_WIDE_INT size; | 
|  | + | 
|  | +      size_directive_output = 1; | 
|  | +      size = int_size_in_bytes (TREE_TYPE (decl)); | 
|  | +      ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size); | 
|  | +    } | 
|  | +} | 
|  | +#endif | 
|  | + | 
|  | +/* Make the last instruction frame-related and note that it performs | 
|  | +   the operation described by FRAME_PATTERN.  */ | 
|  | + | 
|  | +static void | 
|  | +mips_set_frame_expr (rtx frame_pattern) | 
|  | +{ | 
|  | +  rtx insn; | 
|  | + | 
|  | +  insn = get_last_insn (); | 
|  | +  RTX_FRAME_RELATED_P (insn) = 1; | 
|  | +  REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR, | 
|  | +				      frame_pattern, | 
|  | +				      REG_NOTES (insn)); | 
|  | +} | 
|  | + | 
|  | +/* Return a frame-related rtx that stores REG at MEM. | 
|  | +   REG must be a single register.  */ | 
|  | + | 
|  | +static rtx | 
|  | +mips_frame_set (rtx mem, rtx reg) | 
|  | +{ | 
|  | +  rtx set; | 
|  | + | 
|  | +  set = gen_rtx_SET (VOIDmode, mem, reg); | 
|  | +  RTX_FRAME_RELATED_P (set) = 1; | 
|  | + | 
|  | +  return set; | 
|  | +} | 
|  | + | 
|  | +/* Return true if the current function must save register REGNO.  */ | 
|  | + | 
|  | +static bool | 
|  | +mips_save_reg_p (unsigned int regno) | 
|  | +{ | 
|  | +  bool call_saved = !global_regs[regno] && !call_really_used_regs[regno]; | 
|  | +  bool might_clobber = crtl->saves_all_registers | 
|  | +		       || df_regs_ever_live_p (regno) | 
|  | +		       || (regno == HARD_FRAME_POINTER_REGNUM | 
|  | +			   && frame_pointer_needed); | 
|  | + | 
|  | +  return (call_saved && might_clobber) | 
|  | +	 || (regno == RETURN_ADDR_REGNUM && crtl->calls_eh_return); | 
|  | +} | 
|  | + | 
|  | +/* Populate the current function's mips_frame_info structure. | 
|  | + | 
|  | +   MIPS stack frames look like: | 
|  | + | 
|  | +	+-------------------------------+ | 
|  | +	|                               | | 
|  | +	|  incoming stack arguments     | | 
|  | +	|                               | | 
|  | +	+-------------------------------+ | 
|  | +	|                               | | 
|  | +	|  caller-allocated save area   | | 
|  | +      A |  for register arguments       | | 
|  | +	|                               | | 
|  | +	+-------------------------------+ <-- incoming stack pointer | 
|  | +	|                               | | 
|  | +	|  callee-allocated save area   | | 
|  | +      B |  for arguments that are       | | 
|  | +	|  split between registers and  | | 
|  | +	|  the stack                    | | 
|  | +	|                               | | 
|  | +	+-------------------------------+ <-- arg_pointer_rtx | 
|  | +	|                               | | 
|  | +      C |  callee-allocated save area   | | 
|  | +	|  for register varargs         | | 
|  | +	|                               | | 
|  | +	+-------------------------------+ <-- hard_frame_pointer_rtx; | 
|  | +	|                               |     stack_pointer_rtx + gp_sp_offset | 
|  | +	|  GPR save area                |       + UNITS_PER_WORD | 
|  | +	|                               | | 
|  | +	+-------------------------------+ <-- stack_pointer_rtx + fp_sp_offset | 
|  | +	|                               |       + UNITS_PER_HWVALUE | 
|  | +	|  FPR save area                | | 
|  | +	|                               | | 
|  | +	+-------------------------------+ <-- frame_pointer_rtx (virtual) | 
|  | +	|                               | | 
|  | +	|  local variables              | | 
|  | +	|                               | | 
|  | +      P +-------------------------------+ | 
|  | +	|                               | | 
|  | +	|  outgoing stack arguments     | | 
|  | +	|                               | | 
|  | +	+-------------------------------+ | 
|  | +	|                               | | 
|  | +	|  caller-allocated save area   | | 
|  | +	|  for register arguments       | | 
|  | +	|                               | | 
|  | +	+-------------------------------+ <-- stack_pointer_rtx | 
|  | + | 
|  | +   At least two of A, B and C will be empty. | 
|  | + | 
|  | +   Dynamic stack allocations such as alloca insert data at point P. | 
|  | +   They decrease stack_pointer_rtx but leave frame_pointer_rtx and | 
|  | +   hard_frame_pointer_rtx unchanged.  */ | 
|  | + | 
|  | +static void | 
|  | +mips_compute_frame_info (void) | 
|  | +{ | 
|  | +  struct mips_frame_info *frame; | 
|  | +  HOST_WIDE_INT offset; | 
|  | +  unsigned int regno, i; | 
|  | + | 
|  | +  frame = &cfun->machine->frame; | 
|  | +  memset (frame, 0, sizeof (*frame)); | 
|  | + | 
|  | +  /* Find out which GPRs we need to save.  */ | 
|  | +  for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++) | 
|  | +    if (mips_save_reg_p (regno)) | 
|  | +      frame->mask |= 1 << (regno - GP_REG_FIRST); | 
|  | + | 
|  | +  /* If this function calls eh_return, we must also save and restore the | 
|  | +     EH data registers.  */ | 
|  | +  if (crtl->calls_eh_return) | 
|  | +    for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; i++) | 
|  | +      frame->mask |= 1 << (EH_RETURN_DATA_REGNO (i) - GP_REG_FIRST); | 
|  | + | 
|  | +  /* Find out which FPRs we need to save.  This loop must iterate over | 
|  | +     the same space as its companion in mips_for_each_saved_gpr_and_fpr.  */ | 
|  | +  if (TARGET_HARD_FLOAT) | 
|  | +    for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++) | 
|  | +      if (mips_save_reg_p (regno)) | 
|  | +        frame->fmask |= 1 << (regno - FP_REG_FIRST); | 
|  | + | 
|  | +  /* At the bottom of the frame are any outgoing stack arguments. */ | 
|  | +  offset = crtl->outgoing_args_size; | 
|  | +  /* Next are local stack variables. */ | 
|  | +  offset += MIPS_STACK_ALIGN (get_frame_size ()); | 
|  | +  /* The virtual frame pointer points above the local variables. */ | 
|  | +  frame->frame_pointer_offset = offset; | 
|  | +  /* Next are the callee-saved FPRs. */ | 
|  | +  if (frame->fmask) | 
|  | +    { | 
|  | +      unsigned num_saved = __builtin_popcount(frame->fmask); | 
|  | +      offset += MIPS_STACK_ALIGN (num_saved * UNITS_PER_FPREG); | 
|  | +      frame->fp_sp_offset = offset - UNITS_PER_HWFPVALUE; | 
|  | +    } | 
|  | +  /* Next are the callee-saved GPRs. */ | 
|  | +  if (frame->mask) | 
|  | +    { | 
|  | +      unsigned num_saved = __builtin_popcount(frame->mask); | 
|  | +      offset += MIPS_STACK_ALIGN (num_saved * UNITS_PER_WORD); | 
|  | +      frame->gp_sp_offset = offset - UNITS_PER_WORD; | 
|  | +    } | 
|  | +  /* The hard frame pointer points above the callee-saved GPRs. */ | 
|  | +  frame->hard_frame_pointer_offset = offset; | 
|  | +  /* Above the hard frame pointer is the callee-allocated varags save area. */ | 
|  | +  offset += MIPS_STACK_ALIGN (cfun->machine->varargs_size); | 
|  | +  frame->arg_pointer_offset = offset; | 
|  | +  /* Next is the callee-allocated area for pretend stack arguments.  */ | 
|  | +  offset += crtl->args.pretend_args_size; | 
|  | +  frame->total_size = offset; | 
|  | +  /* Next points the incoming stack pointer and any incoming arguments. */ | 
|  | +} | 
|  | + | 
|  | +/* Make sure that we're not trying to eliminate to the wrong hard frame | 
|  | +   pointer.  */ | 
|  | + | 
|  | +static bool | 
|  | +mips_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to) | 
|  | +{ | 
|  | +  return (to == HARD_FRAME_POINTER_REGNUM || to == STACK_POINTER_REGNUM); | 
|  | +} | 
|  | + | 
|  | +/* Implement INITIAL_ELIMINATION_OFFSET.  FROM is either the frame pointer | 
|  | +   or argument pointer.  TO is either the stack pointer or hard frame | 
|  | +   pointer.  */ | 
|  | + | 
|  | +HOST_WIDE_INT | 
|  | +mips_initial_elimination_offset (int from, int to) | 
|  | +{ | 
|  | +  HOST_WIDE_INT src, dest; | 
|  | + | 
|  | +  mips_compute_frame_info (); | 
|  | + | 
|  | +  if (to == HARD_FRAME_POINTER_REGNUM) | 
|  | +    dest = cfun->machine->frame.hard_frame_pointer_offset; | 
|  | +  else if (to == STACK_POINTER_REGNUM) | 
|  | +    dest = 0; /* this is the base of all offsets */ | 
|  | +  else | 
|  | +    gcc_unreachable (); | 
|  | + | 
|  | +  if (from == FRAME_POINTER_REGNUM) | 
|  | +    src = cfun->machine->frame.frame_pointer_offset; | 
|  | +  else if (from == ARG_POINTER_REGNUM) | 
|  | +    src = cfun->machine->frame.arg_pointer_offset; | 
|  | +  else | 
|  | +    gcc_unreachable (); | 
|  | + | 
|  | +  return src - dest; | 
|  | +} | 
|  | + | 
|  | +/* Implement TARGET_EXTRA_LIVE_ON_ENTRY.  */ | 
|  | + | 
|  | +static void | 
|  | +mips_extra_live_on_entry (bitmap regs) | 
|  | +{ | 
|  | +  if (TARGET_USE_GOT) | 
|  | +    { | 
|  | +      /* See the comment above load_call<mode> for details.  */ | 
|  | +      bitmap_set_bit (regs, GOT_VERSION_REGNUM); | 
|  | +    } | 
|  | +} | 
|  | + | 
|  | +/* Implement RETURN_ADDR_RTX.  We do not support moving back to a | 
|  | +   previous frame.  */ | 
|  | + | 
|  | +rtx | 
|  | +mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED) | 
|  | +{ | 
|  | +  if (count != 0) | 
|  | +    return const0_rtx; | 
|  | + | 
|  | +  return get_hard_reg_initial_val (Pmode, RETURN_ADDR_REGNUM); | 
|  | +} | 
|  | + | 
|  | +/* Emit code to change the current function's return address to | 
|  | +   ADDRESS.  SCRATCH is available as a scratch register, if needed. | 
|  | +   ADDRESS and SCRATCH are both word-mode GPRs.  */ | 
|  | + | 
|  | +void | 
|  | +mips_set_return_address (rtx address, rtx scratch) | 
|  | +{ | 
|  | +  rtx slot_address; | 
|  | + | 
|  | +  gcc_assert (BITSET_P (cfun->machine->frame.mask, RETURN_ADDR_REGNUM)); | 
|  | +  slot_address = mips_add_offset (scratch, stack_pointer_rtx, | 
|  | +				  cfun->machine->frame.gp_sp_offset); | 
|  | +  mips_emit_move (gen_frame_mem (GET_MODE (address), slot_address), address); | 
|  | +} | 
|  | + | 
|  | +/* A function to save or store a register.  The first argument is the | 
|  | +   register and the second is the stack slot.  */ | 
|  | +typedef void (*mips_save_restore_fn) (rtx, rtx); | 
|  | + | 
|  | +/* Use FN to save or restore register REGNO.  MODE is the register's | 
|  | +   mode and OFFSET is the offset of its save slot from the current | 
|  | +   stack pointer.  */ | 
|  | + | 
|  | +static void | 
|  | +mips_save_restore_reg (enum machine_mode mode, int regno, | 
|  | +		       HOST_WIDE_INT offset, mips_save_restore_fn fn) | 
|  | +{ | 
|  | +  rtx mem; | 
|  | + | 
|  | +  mem = gen_frame_mem (mode, plus_constant (stack_pointer_rtx, offset)); | 
|  | +  fn (gen_rtx_REG (mode, regno), mem); | 
|  | +} | 
|  | + | 
|  | +/* Call FN for each register that is saved by the current function. | 
|  | +   SP_OFFSET is the offset of the current stack pointer from the start | 
|  | +   of the frame.  */ | 
|  | + | 
|  | +static void | 
|  | +mips_for_each_saved_gpr_and_fpr (HOST_WIDE_INT sp_offset, | 
|  | +				 mips_save_restore_fn fn) | 
|  | +{ | 
|  | +  HOST_WIDE_INT offset; | 
|  | +  int regno; | 
|  | + | 
|  | +  /* Save the link register and s-registers. */ | 
|  | +  offset = cfun->machine->frame.gp_sp_offset - sp_offset; | 
|  | +  for (regno = GP_REG_FIRST; regno <= GP_REG_LAST-1; regno++) | 
|  | +    if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST)) | 
|  | +      { | 
|  | +        mips_save_restore_reg (word_mode, regno, offset, fn); | 
|  | +        offset -= UNITS_PER_WORD; | 
|  | +      } | 
|  | + | 
|  | +  /* This loop must iterate over the same space as its companion in | 
|  | +     mips_compute_frame_info.  */ | 
|  | +  offset = cfun->machine->frame.fp_sp_offset - sp_offset; | 
|  | +  for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++) | 
|  | +    if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST)) | 
|  | +      { | 
|  | +	mips_save_restore_reg (DFmode, regno, offset, fn); | 
|  | +	offset -= GET_MODE_SIZE (DFmode); | 
|  | +      } | 
|  | +} | 
|  | + | 
|  | +/* Return true if a move between register REGNO and its save slot (MEM) | 
|  | +   can be done in a single move.  LOAD_P is true if we are loading | 
|  | +   from the slot, false if we are storing to it.  */ | 
|  | + | 
|  | +static bool | 
|  | +mips_direct_save_slot_move_p (unsigned int regno, rtx mem, bool load_p) | 
|  | +{ | 
|  | +  return mips_secondary_reload_class (REGNO_REG_CLASS (regno), | 
|  | +				      GET_MODE (mem), mem, load_p) == NO_REGS; | 
|  | +} | 
|  | + | 
|  | +/* Emit a move from SRC to DEST, given that one of them is a register | 
|  | +   save slot and that the other is a register.  TEMP is a temporary | 
|  | +   GPR of the same mode that is available if need be.  */ | 
|  | + | 
|  | +void | 
|  | +mips_emit_save_slot_move (rtx dest, rtx src, rtx temp) | 
|  | +{ | 
|  | +  unsigned int regno; | 
|  | +  rtx mem; | 
|  | + | 
|  | +  if (REG_P (src)) | 
|  | +    { | 
|  | +      regno = REGNO (src); | 
|  | +      mem = dest; | 
|  | +    } | 
|  | +  else | 
|  | +    { | 
|  | +      regno = REGNO (dest); | 
|  | +      mem = src; | 
|  | +    } | 
|  | + | 
|  | +  if (mips_direct_save_slot_move_p (regno, mem, mem == src)) | 
|  | +    mips_emit_move (dest, src); | 
|  | +  else | 
|  | +    { | 
|  | +      gcc_assert (!reg_overlap_mentioned_p (dest, temp)); | 
|  | +      mips_emit_move (temp, src); | 
|  | +      mips_emit_move (dest, temp); | 
|  | +    } | 
|  | +  if (MEM_P (dest)) | 
|  | +    mips_set_frame_expr (mips_frame_set (dest, src)); | 
|  | +} | 
|  | + | 
|  | +/* Implement TARGET_OUTPUT_FUNCTION_PROLOGUE.  */ | 
|  | + | 
|  | +static void | 
|  | +mips_output_function_prologue (FILE *file ATTRIBUTE_UNUSED, | 
|  | +                               HOST_WIDE_INT size ATTRIBUTE_UNUSED) | 
|  | +{ | 
|  | +  const char *fnname; | 
|  | + | 
|  | +  /* Get the function name the same way that toplev.c does before calling | 
|  | +     assemble_start_function.  This is needed so that the name used here | 
|  | +     exactly matches the name used in ASM_DECLARE_FUNCTION_NAME.  */ | 
|  | +  fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0); | 
|  | + | 
|  | +  ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, fnname, "function"); | 
|  | +  assemble_name (asm_out_file, fnname); | 
|  | +  fputs (":\n", asm_out_file); | 
|  | +} | 
|  | + | 
|  | +/* Save register REG to MEM.  Make the instruction frame-related.  */ | 
|  | + | 
|  | +static void | 
|  | +mips_save_reg (rtx reg, rtx mem) | 
|  | +{ | 
|  | +  mips_emit_save_slot_move (mem, reg, MIPS_PROLOGUE_TEMP (GET_MODE (reg))); | 
|  | +} | 
|  | + | 
|  | + | 
|  | +/* Expand the "prologue" pattern.  */ | 
|  | + | 
|  | +void | 
|  | +mips_expand_prologue (void) | 
|  | +{ | 
|  | +  const struct mips_frame_info *frame; | 
|  | +  HOST_WIDE_INT size; | 
|  | +  rtx insn; | 
|  | + | 
|  | +  frame = &cfun->machine->frame; | 
|  | +  size = frame->total_size; | 
|  | + | 
|  | +  if (flag_stack_usage) | 
|  | +    current_function_static_stack_size = size; | 
|  | + | 
|  | +  /* Save the registers.  Allocate up to MIPS_MAX_FIRST_STACK_STEP | 
|  | +     bytes beforehand; this is enough to cover the register save area | 
|  | +     without going out of range.  */ | 
|  | +  if ((frame->mask | frame->fmask) != 0) | 
|  | +    { | 
|  | +      HOST_WIDE_INT step1; | 
|  | + | 
|  | +      step1 = MIN (size, MIPS_MAX_FIRST_STACK_STEP); | 
|  | +      insn = gen_add3_insn (stack_pointer_rtx, | 
|  | +			    stack_pointer_rtx, | 
|  | +			    GEN_INT (-step1)); | 
|  | +      RTX_FRAME_RELATED_P (emit_insn (insn)) = 1; | 
|  | +      size -= step1; | 
|  | +      mips_for_each_saved_gpr_and_fpr (size, mips_save_reg); | 
|  | +    } | 
|  | + | 
|  | +  /* Set up the frame pointer, if we're using one.  */ | 
|  | +  if (frame_pointer_needed) | 
|  | +    { | 
|  | +      insn = gen_add3_insn (hard_frame_pointer_rtx, stack_pointer_rtx, | 
|  | +                            GEN_INT (frame->hard_frame_pointer_offset - size)); | 
|  | +      RTX_FRAME_RELATED_P (emit_insn (insn)) = 1; | 
|  | +    } | 
|  | + | 
|  | +  /* Allocate the rest of the frame.  */ | 
|  | +  if (size > 0) | 
|  | +    { | 
|  | +      if (SMALL_OPERAND (-size)) | 
|  | +	RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx, | 
|  | +						       stack_pointer_rtx, | 
|  | +						       GEN_INT (-size)))) = 1; | 
|  | +      else | 
|  | +	{ | 
|  | +	  mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (size)); | 
|  | +	  emit_insn (gen_sub3_insn (stack_pointer_rtx, | 
|  | +				    stack_pointer_rtx, | 
|  | +				    MIPS_PROLOGUE_TEMP (Pmode))); | 
|  | + | 
|  | +	  /* Describe the combined effect of the previous instructions.  */ | 
|  | +	  mips_set_frame_expr | 
|  | +	    (gen_rtx_SET (VOIDmode, stack_pointer_rtx, | 
|  | +			  plus_constant (stack_pointer_rtx, -size))); | 
|  | +	} | 
|  | +    } | 
|  | +} | 
|  | + | 
|  | +/* Emit instructions to restore register REG from slot MEM.  */ | 
|  | + | 
|  | +static void | 
|  | +mips_restore_reg (rtx reg, rtx mem) | 
|  | +{ | 
|  | +  mips_emit_save_slot_move (reg, mem, MIPS_EPILOGUE_TEMP (GET_MODE (reg), false)); | 
|  | +} | 
|  | + | 
|  | +static void | 
|  | +mips_restore_reg_sibcall (rtx reg, rtx mem) | 
|  | +{ | 
|  | +  mips_emit_save_slot_move (reg, mem, MIPS_EPILOGUE_TEMP (GET_MODE (reg), true)); | 
|  | +} | 
|  | + | 
|  | +/* Expand an "epilogue" or "sibcall_epilogue" pattern; SIBCALL_P | 
|  | +   says which.  */ | 
|  | + | 
|  | +static bool riscv_in_utfunc = false; | 
|  | + | 
|  | +void | 
|  | +mips_expand_epilogue (bool sibcall_p) | 
|  | +{ | 
|  | +  const struct mips_frame_info *frame; | 
|  | +  HOST_WIDE_INT step1, step2; | 
|  | + | 
|  | +  if (!sibcall_p && mips_can_use_return_insn ()) | 
|  | +    { | 
|  | +      if (riscv_in_utfunc) | 
|  | +        emit_insn(gen_riscv_stop()); | 
|  | + | 
|  | +      emit_jump_insn (gen_return ()); | 
|  | +      return; | 
|  | +    } | 
|  | + | 
|  | +  /* Split the frame into two.  STEP1 is the amount of stack we should | 
|  | +     deallocate before restoring the registers.  STEP2 is the amount we | 
|  | +     should deallocate afterwards. | 
|  | + | 
|  | +     Start off by assuming that no registers need to be restored.  */ | 
|  | +  frame = &cfun->machine->frame; | 
|  | +  step1 = frame->total_size; | 
|  | +  step2 = 0; | 
|  | + | 
|  | +  /* Move past any dynamic stack allocations. */ | 
|  | +  if (cfun->calls_alloca) | 
|  | +    { | 
|  | +      rtx adjust = GEN_INT (-frame->hard_frame_pointer_offset); | 
|  | +      if (!SMALL_INT (adjust)) | 
|  | +	{ | 
|  | +	  mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode, sibcall_p), adjust); | 
|  | +	  adjust = MIPS_EPILOGUE_TEMP (Pmode, sibcall_p); | 
|  | +	} | 
|  | + | 
|  | +      emit_insn (gen_add3_insn (stack_pointer_rtx, hard_frame_pointer_rtx, adjust)); | 
|  | +    } | 
|  | + | 
|  | +  /* If we need to restore registers, deallocate as much stack as | 
|  | +     possible in the second step without going out of range.  */ | 
|  | +  if ((frame->mask | frame->fmask) != 0) | 
|  | +    { | 
|  | +      step2 = MIN (step1, MIPS_MAX_FIRST_STACK_STEP); | 
|  | +      step1 -= step2; | 
|  | +    } | 
|  | + | 
|  | +  /* Set TARGET to BASE + STEP1.  */ | 
|  | +  if (step1 > 0) | 
|  | +    { | 
|  | +      /* Get an rtx for STEP1 that we can add to BASE.  */ | 
|  | +      rtx adjust = GEN_INT (step1); | 
|  | +      if (!SMALL_OPERAND (step1)) | 
|  | +	{ | 
|  | +	  mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode, sibcall_p), adjust); | 
|  | +	  adjust = MIPS_EPILOGUE_TEMP (Pmode, sibcall_p); | 
|  | +	} | 
|  | + | 
|  | +      emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, adjust)); | 
|  | +    } | 
|  | + | 
|  | +  /* Restore the registers.  */ | 
|  | +  mips_for_each_saved_gpr_and_fpr (frame->total_size - step2, | 
|  | +    sibcall_p ? mips_restore_reg_sibcall : mips_restore_reg); | 
|  | + | 
|  | +  /* Deallocate the final bit of the frame.  */ | 
|  | +  if (step2 > 0) | 
|  | +    emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, | 
|  | +			      GEN_INT (step2))); | 
|  | + | 
|  | +  /* Add in the __builtin_eh_return stack adjustment.  We need to | 
|  | +     use a temporary in MIPS16 code.  */ | 
|  | +  if (crtl->calls_eh_return) | 
|  | +    emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, | 
|  | +			      EH_RETURN_STACKADJ_RTX)); | 
|  | + | 
|  | +  if (!sibcall_p) | 
|  | +    emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM))); | 
|  | +} | 
|  | + | 
|  | +/* Return nonzero if this function is known to have a null epilogue. | 
|  | +   This allows the optimizer to omit jumps to jumps if no stack | 
|  | +   was created.  */ | 
|  | + | 
|  | +bool | 
|  | +mips_can_use_return_insn (void) | 
|  | +{ | 
|  | +  return reload_completed && cfun->machine->frame.total_size == 0; | 
|  | +} | 
|  | + | 
|  | +/* Return true if register REGNO can store a value of mode MODE. | 
|  | +   The result of this function is cached in mips_hard_regno_mode_ok.  */ | 
|  | + | 
|  | +static bool | 
|  | +mips_hard_regno_mode_ok_p (unsigned int regno, enum machine_mode mode) | 
|  | +{ | 
|  | +  unsigned int size; | 
|  | +  enum mode_class mclass; | 
|  | + | 
|  | +  if (VECTOR_MODE_P(mode)) | 
|  | +  { | 
|  | +    switch (mode) | 
|  | +    { | 
|  | +      case MIPS_RISCV_VECTOR_MODE_NAME(DI): | 
|  | +      case MIPS_RISCV_VECTOR_MODE_NAME(SI): | 
|  | +      case MIPS_RISCV_VECTOR_MODE_NAME(HI): | 
|  | +      case MIPS_RISCV_VECTOR_MODE_NAME(QI): | 
|  | +        return VEC_GP_REG_P(regno); | 
|  | + | 
|  | +      case MIPS_RISCV_VECTOR_MODE_NAME(DF): | 
|  | +      case MIPS_RISCV_VECTOR_MODE_NAME(SF): | 
|  | +        return VEC_FP_REG_P(regno); | 
|  | + | 
|  | +      default: | 
|  | +        return false; | 
|  | +    } | 
|  | +  } | 
|  | + | 
|  | +  if (mode == CCmode) | 
|  | +    return GP_REG_P (regno); | 
|  | + | 
|  | +  size = GET_MODE_SIZE (mode); | 
|  | +  mclass = GET_MODE_CLASS (mode); | 
|  | + | 
|  | +  if (GP_REG_P (regno)) | 
|  | +    return ((regno - GP_REG_FIRST) & 1) == 0 || size <= UNITS_PER_WORD; | 
|  | + | 
|  | +  if (FP_REG_P (regno)) | 
|  | +    { | 
|  | +      /* Allow TFmode for CCmode reloads.  */ | 
|  | +      if (mode == TFmode) | 
|  | +	return true; | 
|  | + | 
|  | +      if (mclass == MODE_FLOAT | 
|  | +	  || mclass == MODE_COMPLEX_FLOAT | 
|  | +	  || mclass == MODE_VECTOR_FLOAT) | 
|  | +	return size <= UNITS_PER_FPVALUE; | 
|  | + | 
|  | +      /* Allow integer modes that fit into a single register.  We need | 
|  | +	 to put integers into FPRs when using instructions like CVT | 
|  | +	 and TRUNC.  There's no point allowing sizes smaller than a word, | 
|  | +	 because the FPU has no appropriate load/store instructions.  */ | 
|  | +      if (mclass == MODE_INT) | 
|  | +	return size >= MIN_UNITS_PER_WORD && size <= UNITS_PER_FPREG; | 
|  | +    } | 
|  | + | 
|  | +  if (regno == GOT_VERSION_REGNUM) | 
|  | +    return mode == SImode; | 
|  | + | 
|  | +  return false; | 
|  | +} | 
|  | + | 
|  | +/* Implement HARD_REGNO_NREGS.  */ | 
|  | + | 
|  | +unsigned int | 
|  | +mips_hard_regno_nregs (int regno, enum machine_mode mode) | 
|  | +{ | 
|  | +  if (VECTOR_MODE_P(mode)) | 
|  | +    return 1; | 
|  | + | 
|  | +  if (VEC_GP_REG_P(regno) || VEC_FP_REG_P(regno)) | 
|  | +    return 1; | 
|  | + | 
|  | +  if (FP_REG_P (regno)) | 
|  | +    return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG; | 
|  | + | 
|  | +  /* All other registers are word-sized.  */ | 
|  | +  return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD; | 
|  | +} | 
|  | + | 
|  | +/* Implement CLASS_MAX_NREGS, taking the maximum of the cases | 
|  | +   in mips_hard_regno_nregs.  */ | 
|  | + | 
|  | +int | 
|  | +mips_class_max_nregs (enum reg_class rclass, enum machine_mode mode) | 
|  | +{ | 
|  | +  int size; | 
|  | +  HARD_REG_SET left; | 
|  | + | 
|  | +  if (VECTOR_MODE_P(mode)) | 
|  | +    return 1; | 
|  | + | 
|  | +  if ((rclass == VEC_GR_REGS) || (rclass == VEC_FP_REGS)) | 
|  | +    return 1; | 
|  | + | 
|  | +  size = 0x8000; | 
|  | +  COPY_HARD_REG_SET (left, reg_class_contents[(int) rclass]); | 
|  | +  if (hard_reg_set_intersect_p (left, reg_class_contents[(int) FP_REGS])) | 
|  | +    { | 
|  | +      size = MIN (size, UNITS_PER_FPREG); | 
|  | +      AND_COMPL_HARD_REG_SET (left, reg_class_contents[(int) FP_REGS]); | 
|  | +    } | 
|  | +  if (!hard_reg_set_empty_p (left)) | 
|  | +    size = MIN (size, UNITS_PER_WORD); | 
|  | +  return (GET_MODE_SIZE (mode) + size - 1) / size; | 
|  | +} | 
|  | + | 
|  | +/* Implement CANNOT_CHANGE_MODE_CLASS.  */ | 
|  | + | 
|  | +bool | 
|  | +mips_cannot_change_mode_class (enum machine_mode from ATTRIBUTE_UNUSED, | 
|  | +			       enum machine_mode to ATTRIBUTE_UNUSED, | 
|  | +			       enum reg_class rclass) | 
|  | +{ | 
|  | +  /* There are several problems with changing the modes of values | 
|  | +     in floating-point registers: | 
|  | + | 
|  | +     - When a multi-word value is stored in paired floating-point | 
|  | +       registers, the first register always holds the low word. | 
|  | +       We therefore can't allow FPRs to change between single-word | 
|  | +       and multi-word modes on big-endian targets. | 
|  | + | 
|  | +     - GCC assumes that each word of a multiword register can be accessed | 
|  | +       individually using SUBREGs.  This is not true for floating-point | 
|  | +       registers if they are bigger than a word. | 
|  | + | 
|  | +     - Loading a 32-bit value into a 64-bit floating-point register | 
|  | +       will not sign-extend the value, despite what LOAD_EXTEND_OP says. | 
|  | +       We can't allow FPRs to change from SImode to to a wider mode on | 
|  | +       64-bit targets. | 
|  | + | 
|  | +     - If the FPU has already interpreted a value in one format, we must | 
|  | +       not ask it to treat the value as having a different format. | 
|  | + | 
|  | +     We therefore disallow all mode changes involving FPRs.  */ | 
|  | +  return reg_classes_intersect_p (FP_REGS, rclass); | 
|  | +} | 
|  | + | 
|  | +/* Return true if moves in mode MODE can use the FPU's mov.fmt instruction.  */ | 
|  | + | 
|  | +static bool | 
|  | +mips_mode_ok_for_mov_fmt_p (enum machine_mode mode) | 
|  | +{ | 
|  | +  switch (mode) | 
|  | +    { | 
|  | +    case SFmode: | 
|  | +    case DFmode: | 
|  | +      return TARGET_HARD_FLOAT; | 
|  | + | 
|  | +    default: | 
|  | +      return false; | 
|  | +    } | 
|  | +} | 
|  | + | 
|  | +/* Implement MODES_TIEABLE_P.  */ | 
|  | + | 
|  | +bool | 
|  | +mips_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2) | 
|  | +{ | 
|  | +  /* FPRs allow no mode punning, so it's not worth tying modes if we'd | 
|  | +     prefer to put one of them in FPRs.  */ | 
|  | +  return (mode1 == mode2 | 
|  | +	  || (!mips_mode_ok_for_mov_fmt_p (mode1) | 
|  | +	      && !mips_mode_ok_for_mov_fmt_p (mode2))); | 
|  | +} | 
|  | + | 
|  | +/* Implement TARGET_PREFERRED_RELOAD_CLASS.  */ | 
|  | + | 
|  | +static reg_class_t | 
|  | +mips_preferred_reload_class (rtx x, reg_class_t rclass) | 
|  | +{ | 
|  | +  if (reg_class_subset_p (FP_REGS, rclass) | 
|  | +      && mips_mode_ok_for_mov_fmt_p (GET_MODE (x))) | 
|  | +    return FP_REGS; | 
|  | + | 
|  | +  if (reg_class_subset_p (GR_REGS, rclass)) | 
|  | +    rclass = GR_REGS; | 
|  | + | 
|  | +  return rclass; | 
|  | +} | 
|  | + | 
|  | +/* RCLASS is a class involved in a REGISTER_MOVE_COST calculation. | 
|  | +   Return a "canonical" class to represent it in later calculations.  */ | 
|  | + | 
|  | +static reg_class_t | 
|  | +mips_canonicalize_move_class (reg_class_t rclass) | 
|  | +{ | 
|  | +  if (reg_class_subset_p (rclass, GENERAL_REGS)) | 
|  | +    rclass = GENERAL_REGS; | 
|  | + | 
|  | +  return rclass; | 
|  | +} | 
|  | + | 
|  | +/* Return the cost of moving a value of mode MODE from a register of | 
|  | +   class FROM to a GPR.  Return 0 for classes that are unions of other | 
|  | +   classes handled by this function.  */ | 
|  | + | 
|  | +static int | 
|  | +mips_move_to_gpr_cost (reg_class_t from) | 
|  | +{ | 
|  | +  switch (from) | 
|  | +    { | 
|  | +    case GENERAL_REGS: | 
|  | +      return 1; | 
|  | + | 
|  | +    case FP_REGS: | 
|  | +      /* FP->int moves can cause recoupling on decoupled implementations */ | 
|  | +      return 4; | 
|  | + | 
|  | +    default: | 
|  | +      return 0; | 
|  | +    } | 
|  | +} | 
|  | + | 
|  | +/* Return the cost of moving a value of mode MODE from a GPR to a | 
|  | +   register of class TO.  Return 0 for classes that are unions of | 
|  | +   other classes handled by this function.  */ | 
|  | + | 
|  | +static int | 
|  | +mips_move_from_gpr_cost (reg_class_t to) | 
|  | +{ | 
|  | +  switch (to) | 
|  | +    { | 
|  | +    case GENERAL_REGS: | 
|  | +    case FP_REGS: | 
|  | +      return 1; | 
|  | + | 
|  | +    default: | 
|  | +      return 0; | 
|  | +    } | 
|  | +} | 
|  | + | 
|  | +/* Implement TARGET_REGISTER_MOVE_COST.  Return 0 for classes that are the | 
|  | +   maximum of the move costs for subclasses; regclass will work out | 
|  | +   the maximum for us.  */ | 
|  | + | 
|  | +static int | 
|  | +mips_register_move_cost (enum machine_mode mode, | 
|  | +			 reg_class_t from, reg_class_t to) | 
|  | +{ | 
|  | +  int cost1, cost2; | 
|  | + | 
|  | +  from = mips_canonicalize_move_class (from); | 
|  | +  to = mips_canonicalize_move_class (to); | 
|  | + | 
|  | +  /* Handle moves that can be done without using general-purpose registers.  */ | 
|  | +  if (from == FP_REGS) | 
|  | +    if (to == FP_REGS && mips_mode_ok_for_mov_fmt_p (mode)) | 
|  | +      /* fsgnj.fmt.  */ | 
|  | +      return 1; | 
|  | + | 
|  | +  /* Handle cases in which only one class deviates from the ideal.  */ | 
|  | +  if (from == GENERAL_REGS) | 
|  | +    return mips_move_from_gpr_cost (to); | 
|  | +  if (to == GENERAL_REGS) | 
|  | +    return mips_move_to_gpr_cost (from); | 
|  | + | 
|  | +  /* Handles cases that require a GPR temporary.  */ | 
|  | +  cost1 = mips_move_to_gpr_cost (from); | 
|  | +  if (cost1 != 0) | 
|  | +    { | 
|  | +      cost2 = mips_move_from_gpr_cost (to); | 
|  | +      if (cost2 != 0) | 
|  | +	return cost1 + cost2; | 
|  | +    } | 
|  | + | 
|  | +  return 0; | 
|  | +} | 
|  | + | 
|  | +/* Implement TARGET_MEMORY_MOVE_COST.  */ | 
|  | + | 
|  | +static int | 
|  | +mips_memory_move_cost (enum machine_mode mode, reg_class_t rclass, bool in) | 
|  | +{ | 
|  | +  return (mips_cost->memory_latency | 
|  | +	  + memory_move_secondary_cost (mode, rclass, in)); | 
|  | +} | 
|  | + | 
|  | +/* Implement TARGET_IRA_COVER_CLASSES.  */ | 
|  | + | 
|  | +static const reg_class_t * | 
|  | +mips_ira_cover_classes (void) | 
|  | +{ | 
|  | +  static const reg_class_t no_acc_classes[] = { | 
|  | +    GR_REGS, FP_REGS, VEC_GR_REGS, VEC_FP_REGS, | 
|  | +    LIM_REG_CLASSES | 
|  | +  }; | 
|  | + | 
|  | +  return no_acc_classes; | 
|  | +} | 
|  | + | 
|  | +/* Return the register class required for a secondary register when | 
|  | +   copying between one of the registers in RCLASS and value X, which | 
|  | +   has mode MODE.  X is the source of the move if IN_P, otherwise it | 
|  | +   is the destination.  Return NO_REGS if no secondary register is | 
|  | +   needed.  */ | 
|  | + | 
|  | +enum reg_class | 
|  | +mips_secondary_reload_class (enum reg_class rclass, | 
|  | +			     enum machine_mode mode, rtx x, | 
|  | +			     bool in_p ATTRIBUTE_UNUSED) | 
|  | +{ | 
|  | +  int regno; | 
|  | + | 
|  | +  regno = true_regnum (x); | 
|  | + | 
|  | +  if (reg_class_subset_p (rclass, FP_REGS)) | 
|  | +    { | 
|  | +      if (MEM_P (x) | 
|  | +	  && (GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8)) | 
|  | +	/* In this case we can use lwc1, swc1, ldc1 or sdc1.  We'll use | 
|  | +	   pairs of lwc1s and swc1s if ldc1 and sdc1 are not supported.  */ | 
|  | +	return NO_REGS; | 
|  | + | 
|  | +      if (GP_REG_P (regno) || x == CONST0_RTX (mode)) | 
|  | +	/* In this case we can use mtc1, mfc1, dmtc1 or dmfc1.  */ | 
|  | +	return NO_REGS; | 
|  | + | 
|  | +      if (CONSTANT_P (x) && !targetm.cannot_force_const_mem (x)) | 
|  | +	/* We can force the constant to memory and use lwc1 | 
|  | +	   and ldc1.  As above, we will use pairs of lwc1s if | 
|  | +	   ldc1 is not supported.  */ | 
|  | +	return NO_REGS; | 
|  | + | 
|  | +      if (FP_REG_P (regno) && mips_mode_ok_for_mov_fmt_p (mode)) | 
|  | +	/* In this case we can use mov.fmt.  */ | 
|  | +	return NO_REGS; | 
|  | + | 
|  | +      /* Otherwise, we need to reload through an integer register.  */ | 
|  | +      return GR_REGS; | 
|  | +    } | 
|  | +  if (FP_REG_P (regno)) | 
|  | +    return reg_class_subset_p (rclass, GR_REGS) ? NO_REGS : GR_REGS; | 
|  | + | 
|  | +  return NO_REGS; | 
|  | +} | 
|  | + | 
|  | +/* Implement TARGET_MODE_REP_EXTENDED.  */ | 
|  | + | 
|  | +static int | 
|  | +mips_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep) | 
|  | +{ | 
|  | +  /* On 64-bit targets, SImode register values are sign-extended to DImode.  */ | 
|  | +  if (TARGET_64BIT && mode == SImode && mode_rep == DImode) | 
|  | +    return SIGN_EXTEND; | 
|  | + | 
|  | +  return UNKNOWN; | 
|  | +} | 
|  | + | 
|  | +/* Implement TARGET_VALID_POINTER_MODE.  */ | 
|  | + | 
|  | +static bool | 
|  | +mips_valid_pointer_mode (enum machine_mode mode) | 
|  | +{ | 
|  | +  return mode == SImode || (TARGET_64BIT && mode == DImode); | 
|  | +} | 
|  | + | 
|  | +/* Implement TARGET_VECTOR_MODE_SUPPORTED_P.  */ | 
|  | + | 
|  | +static bool | 
|  | +mips_vector_mode_supported_p (enum machine_mode mode ATTRIBUTE_UNUSED) | 
|  | +{ | 
|  | +  switch (mode) | 
|  | +    { | 
|  | +    case MIPS_RISCV_VECTOR_MODE_NAME(DI): | 
|  | +    case MIPS_RISCV_VECTOR_MODE_NAME(SI): | 
|  | +    case MIPS_RISCV_VECTOR_MODE_NAME(HI): | 
|  | +    case MIPS_RISCV_VECTOR_MODE_NAME(QI): | 
|  | +    case MIPS_RISCV_VECTOR_MODE_NAME(DF): | 
|  | +    case MIPS_RISCV_VECTOR_MODE_NAME(SF): | 
|  | +      return true; | 
|  | + | 
|  | +    default: | 
|  | +      return false; | 
|  | +    } | 
|  | +} | 
|  | + | 
|  | +/* Implement TARGET_SCALAR_MODE_SUPPORTED_P.  */ | 
|  | + | 
|  | +static bool | 
|  | +mips_scalar_mode_supported_p (enum machine_mode mode) | 
|  | +{ | 
|  | +  if (ALL_FIXED_POINT_MODE_P (mode) | 
|  | +      && GET_MODE_PRECISION (mode) <= 2 * BITS_PER_WORD) | 
|  | +    return true; | 
|  | + | 
|  | +  return default_scalar_mode_supported_p (mode); | 
|  | +} | 
|  | + | 
|  | +/* Implement TARGET_VECTORIZE_PREFERRED_SIMD_MODE.  */ | 
|  | + | 
|  | +static enum machine_mode | 
|  | +mips_preferred_simd_mode (enum machine_mode mode ATTRIBUTE_UNUSED) | 
|  | +{ | 
|  | +  return word_mode; | 
|  | +} | 
|  | + | 
|  | +/* Implement TARGET_INIT_LIBFUNCS.  */ | 
|  | + | 
|  | +static void | 
|  | +mips_init_libfuncs (void) | 
|  | +{ | 
|  | +} | 
|  | + | 
|  | +/* Return the assembly code for INSN, which has the operands given by | 
|  | +   OPERANDS, and which branches to OPERANDS[0] if some condition is true. | 
|  | +   BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[0] | 
|  | +   is in range of a direct branch.  BRANCH_IF_FALSE is an inverted | 
|  | +   version of BRANCH_IF_TRUE.  */ | 
|  | + | 
|  | +const char * | 
|  | +mips_output_conditional_branch (rtx insn, rtx *operands, | 
|  | +				const char *branch_if_true, | 
|  | +				const char *branch_if_false) | 
|  | +{ | 
|  | +  unsigned int length; | 
|  | +  rtx taken, not_taken; | 
|  | + | 
|  | +  gcc_assert (LABEL_P (operands[0])); | 
|  | + | 
|  | +  length = get_attr_length (insn); | 
|  | +  if (length <= 4) | 
|  | +    return branch_if_true; | 
|  | + | 
|  | +  /* Generate a reversed branch around a direct jump.  This fallback does | 
|  | +     not use branch-likely instructions.  */ | 
|  | +  not_taken = gen_label_rtx (); | 
|  | +  taken = operands[0]; | 
|  | + | 
|  | +  /* Generate the reversed branch to NOT_TAKEN.  */ | 
|  | +  operands[0] = not_taken; | 
|  | +  output_asm_insn (branch_if_false, operands); | 
|  | + | 
|  | +  /* Output the unconditional branch to TAKEN.  */ | 
|  | +  output_asm_insn ("j\t%0", &taken); | 
|  | + | 
|  | +  /* Output NOT_TAKEN.  */ | 
|  | +  targetm.asm_out.internal_label (asm_out_file, "L", | 
|  | +				  CODE_LABEL_NUMBER (not_taken)); | 
|  | +  return ""; | 
|  | +} | 
|  | + | 
|  | +/* Implement TARGET_SCHED_ADJUST_COST.  We assume that anti and output | 
|  | +   dependencies have no cost. */ | 
|  | + | 
|  | +static int | 
|  | +mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link, | 
|  | +		  rtx dep ATTRIBUTE_UNUSED, int cost) | 
|  | +{ | 
|  | +  if (REG_NOTE_KIND (link) != 0) | 
|  | +    return 0; | 
|  | +  return cost; | 
|  | +} | 
|  | + | 
|  | +/* Return the number of instructions that can be issued per cycle.  */ | 
|  | + | 
|  | +static int | 
|  | +mips_issue_rate (void) | 
|  | +{ | 
|  | +  switch (mips_tune) | 
|  | +    { | 
|  | +    case PROCESSOR_ROCKET: | 
|  | +      return 1; | 
|  | + | 
|  | +    default: | 
|  | +      return 1; | 
|  | +    } | 
|  | +} | 
|  | + | 
|  | +/* This structure describes a single built-in function.  */ | 
|  | +struct mips_builtin_description { | 
|  | +  /* The code of the main .md file instruction.  See mips_builtin_type | 
|  | +     for more information.  */ | 
|  | +  enum insn_code icode; | 
|  | + | 
|  | +  /* The name of the built-in function.  */ | 
|  | +  const char *name; | 
|  | + | 
|  | +  /* Specifies how the function should be expanded.  */ | 
|  | +  enum mips_builtin_type builtin_type; | 
|  | + | 
|  | +  /* The function's prototype.  */ | 
|  | +  enum mips_function_type function_type; | 
|  | + | 
|  | +  /* Whether the function is available.  */ | 
|  | +  unsigned int (*avail) (void); | 
|  | +}; | 
|  | + | 
|  | +static unsigned int | 
|  | +mips_builtin_avail_cache (void) | 
|  | +{ | 
|  | +  return 0; | 
|  | +} | 
|  | + | 
|  | +static unsigned int | 
|  | +mips_builtin_avail_riscv (void) | 
|  | +{ | 
|  | +  return 1; | 
|  | +} | 
|  | + | 
|  | +/* Construct a mips_builtin_description from the given arguments. | 
|  | + | 
|  | +   INSN is the name of the associated instruction pattern, without the | 
|  | +   leading CODE_FOR_mips_. | 
|  | + | 
|  | +   CODE is the floating-point condition code associated with the | 
|  | +   function.  It can be 'f' if the field is not applicable. | 
|  | + | 
|  | +   NAME is the name of the function itself, without the leading | 
|  | +   "__builtin_mips_". | 
|  | + | 
|  | +   BUILTIN_TYPE and FUNCTION_TYPE are mips_builtin_description fields. | 
|  | + | 
|  | +   AVAIL is the name of the availability predicate, without the leading | 
|  | +   mips_builtin_avail_.  */ | 
|  | +#define MIPS_BUILTIN(INSN, NAME, BUILTIN_TYPE, FUNCTION_TYPE, AVAIL)	\ | 
|  | +  { CODE_FOR_mips_ ## INSN, "__builtin_mips_" NAME,			\ | 
|  | +    BUILTIN_TYPE, FUNCTION_TYPE, mips_builtin_avail_ ## AVAIL } | 
|  | + | 
|  | +/* Define __builtin_mips_<INSN>, which is a MIPS_BUILTIN_DIRECT function | 
|  | +   mapped to instruction CODE_FOR_mips_<INSN>,  FUNCTION_TYPE and AVAIL | 
|  | +   are as for MIPS_BUILTIN.  */ | 
|  | +#define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, AVAIL)			\ | 
|  | +  MIPS_BUILTIN (INSN, #INSN, MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, AVAIL) | 
|  | + | 
|  | +/* Define __builtin_mips_<INSN>, which is a MIPS_BUILTIN_DIRECT_NO_TARGET | 
|  | +   function mapped to instruction CODE_FOR_mips_<INSN>,  FUNCTION_TYPE | 
|  | +   and AVAIL are as for MIPS_BUILTIN.  */ | 
|  | +#define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, AVAIL)		\ | 
|  | +  MIPS_BUILTIN (INSN, #INSN, MIPS_BUILTIN_DIRECT_NO_TARGET,		\ | 
|  | +		FUNCTION_TYPE, AVAIL) | 
|  | + | 
|  | +static const struct mips_builtin_description mips_builtins[] = { | 
|  | +  DIRECT_NO_TARGET_BUILTIN (cache, MIPS_VOID_FTYPE_SI_CVPOINTER, cache), | 
|  | + | 
|  | +  DIRECT_BUILTIN( riscv_vload_vdi, MIPS_VDI_FTYPE_CPOINTER, riscv ), | 
|  | +  DIRECT_BUILTIN( riscv_vload_vsi, MIPS_VSI_FTYPE_CPOINTER, riscv ), | 
|  | +  DIRECT_BUILTIN( riscv_vload_vhi, MIPS_VHI_FTYPE_CPOINTER, riscv ), | 
|  | +  DIRECT_BUILTIN( riscv_vload_vqi, MIPS_VQI_FTYPE_CPOINTER, riscv ), | 
|  | +  DIRECT_BUILTIN( riscv_vload_vdf, MIPS_VDF_FTYPE_CPOINTER, riscv ), | 
|  | +  DIRECT_BUILTIN( riscv_vload_vsf, MIPS_VSF_FTYPE_CPOINTER, riscv ), | 
|  | + | 
|  | +  DIRECT_BUILTIN( riscv_vload_strided_vdi, MIPS_VDI_FTYPE_CPOINTER_DI, riscv ), | 
|  | +  DIRECT_BUILTIN( riscv_vload_strided_vsi, MIPS_VSI_FTYPE_CPOINTER_DI, riscv ), | 
|  | +  DIRECT_BUILTIN( riscv_vload_strided_vhi, MIPS_VHI_FTYPE_CPOINTER_DI, riscv ), | 
|  | +  DIRECT_BUILTIN( riscv_vload_strided_vqi, MIPS_VQI_FTYPE_CPOINTER_DI, riscv ), | 
|  | +  DIRECT_BUILTIN( riscv_vload_strided_vdf, MIPS_VDF_FTYPE_CPOINTER_DI, riscv ), | 
|  | +  DIRECT_BUILTIN( riscv_vload_strided_vsf, MIPS_VSF_FTYPE_CPOINTER_DI, riscv ), | 
|  | + | 
|  | +  DIRECT_NO_TARGET_BUILTIN( riscv_vstore_vdi, MIPS_VOID_FTYPE_VDI_POINTER, riscv ), | 
|  | +  DIRECT_NO_TARGET_BUILTIN( riscv_vstore_vsi, MIPS_VOID_FTYPE_VSI_POINTER, riscv ), | 
|  | +  DIRECT_NO_TARGET_BUILTIN( riscv_vstore_vhi, MIPS_VOID_FTYPE_VHI_POINTER, riscv ), | 
|  | +  DIRECT_NO_TARGET_BUILTIN( riscv_vstore_vqi, MIPS_VOID_FTYPE_VQI_POINTER, riscv ), | 
|  | +  DIRECT_NO_TARGET_BUILTIN( riscv_vstore_vdf, MIPS_VOID_FTYPE_VDF_POINTER, riscv ), | 
|  | +  DIRECT_NO_TARGET_BUILTIN( riscv_vstore_vsf, MIPS_VOID_FTYPE_VSF_POINTER, riscv ), | 
|  | + | 
|  | +  DIRECT_NO_TARGET_BUILTIN( riscv_vstore_strided_vdi, MIPS_VOID_FTYPE_VDI_POINTER_DI, riscv ), | 
|  | +  DIRECT_NO_TARGET_BUILTIN( riscv_vstore_strided_vsi, MIPS_VOID_FTYPE_VSI_POINTER_DI, riscv ), | 
|  | +  DIRECT_NO_TARGET_BUILTIN( riscv_vstore_strided_vhi, MIPS_VOID_FTYPE_VHI_POINTER_DI, riscv ), | 
|  | +  DIRECT_NO_TARGET_BUILTIN( riscv_vstore_strided_vqi, MIPS_VOID_FTYPE_VQI_POINTER_DI, riscv ), | 
|  | +  DIRECT_NO_TARGET_BUILTIN( riscv_vstore_strided_vdf, MIPS_VOID_FTYPE_VDF_POINTER_DI, riscv ), | 
|  | +  DIRECT_NO_TARGET_BUILTIN( riscv_vstore_strided_vsf, MIPS_VOID_FTYPE_VSF_POINTER_DI, riscv ), | 
|  | +}; | 
|  | + | 
|  | +/* Index I is the function declaration for mips_builtins[I], or null if the | 
|  | +   function isn't defined on this target.  */ | 
|  | +static GTY(()) tree mips_builtin_decls[ARRAY_SIZE (mips_builtins)]; | 
|  | + | 
|  | +/* MODE is a vector mode whose elements have type TYPE.  Return the type | 
|  | +   of the vector itself.  */ | 
|  | + | 
|  | +static tree | 
|  | +mips_builtin_vector_type (tree type, enum machine_mode mode) | 
|  | +{ | 
|  | +  static tree types[2 * (int) MAX_MACHINE_MODE]; | 
|  | +  int mode_index; | 
|  | + | 
|  | +  mode_index = (int) mode; | 
|  | + | 
|  | +  if (TREE_CODE (type) == INTEGER_TYPE && TYPE_UNSIGNED (type)) | 
|  | +    mode_index += MAX_MACHINE_MODE; | 
|  | + | 
|  | +  if (types[mode_index] == NULL_TREE) | 
|  | +    types[mode_index] = build_vector_type_for_mode (type, mode); | 
|  | +  return types[mode_index]; | 
|  | +} | 
|  | + | 
|  | +/* Return a type for 'const volatile void *'.  */ | 
|  | + | 
|  | +static tree | 
|  | +mips_build_cvpointer_type (void) | 
|  | +{ | 
|  | +  static tree cache; | 
|  | + | 
|  | +  if (cache == NULL_TREE) | 
|  | +    cache = build_pointer_type (build_qualified_type | 
|  | +				(void_type_node, | 
|  | +				 TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE)); | 
|  | +  return cache; | 
|  | +} | 
|  | + | 
|  | +/* Source-level argument types.  */ | 
|  | +#define MIPS_ATYPE_VOID void_type_node | 
|  | +#define MIPS_ATYPE_INT integer_type_node | 
|  | +#define MIPS_ATYPE_POINTER ptr_type_node | 
|  | +#define MIPS_ATYPE_CPOINTER const_ptr_type_node | 
|  | +#define MIPS_ATYPE_CVPOINTER mips_build_cvpointer_type () | 
|  | + | 
|  | +/* Standard mode-based argument types.  */ | 
|  | +#define MIPS_ATYPE_UQI unsigned_intQI_type_node | 
|  | +#define MIPS_ATYPE_SI intSI_type_node | 
|  | +#define MIPS_ATYPE_USI unsigned_intSI_type_node | 
|  | +#define MIPS_ATYPE_DI intDI_type_node | 
|  | +#define MIPS_ATYPE_UDI unsigned_intDI_type_node | 
|  | +#define MIPS_ATYPE_SF float_type_node | 
|  | +#define MIPS_ATYPE_DF double_type_node | 
|  | + | 
|  | +/* Vector argument types.  */ | 
|  | +#define MIPS_ATYPE_V2SF mips_builtin_vector_type (float_type_node, V2SFmode) | 
|  | +#define MIPS_ATYPE_V2HI mips_builtin_vector_type (intHI_type_node, V2HImode) | 
|  | +#define MIPS_ATYPE_V2SI mips_builtin_vector_type (intSI_type_node, V2SImode) | 
|  | +#define MIPS_ATYPE_V4QI mips_builtin_vector_type (intQI_type_node, V4QImode) | 
|  | +#define MIPS_ATYPE_V4HI mips_builtin_vector_type (intHI_type_node, V4HImode) | 
|  | +#define MIPS_ATYPE_V8QI mips_builtin_vector_type (intQI_type_node, V8QImode) | 
|  | +#define MIPS_ATYPE_UV2SI					\ | 
|  | +  mips_builtin_vector_type (unsigned_intSI_type_node, V2SImode) | 
|  | +#define MIPS_ATYPE_UV4HI					\ | 
|  | +  mips_builtin_vector_type (unsigned_intHI_type_node, V4HImode) | 
|  | +#define MIPS_ATYPE_UV8QI					\ | 
|  | +  mips_builtin_vector_type (unsigned_intQI_type_node, V8QImode) | 
|  | + | 
|  | +#define MIPS_ATYPE_VDI \ | 
|  | +  mips_builtin_vector_type( intDI_type_node, \ | 
|  | +    MIPS_RISCV_VECTOR_MODE_NAME(DI) ) | 
|  | + | 
|  | +#define MIPS_ATYPE_VSI \ | 
|  | +  mips_builtin_vector_type( intSI_type_node, \ | 
|  | +    MIPS_RISCV_VECTOR_MODE_NAME(SI) ) | 
|  | + | 
|  | +#define MIPS_ATYPE_VHI \ | 
|  | +  mips_builtin_vector_type( intHI_type_node, \ | 
|  | +    MIPS_RISCV_VECTOR_MODE_NAME(HI) ) | 
|  | + | 
|  | +#define MIPS_ATYPE_VQI \ | 
|  | +  mips_builtin_vector_type( intQI_type_node, \ | 
|  | +    MIPS_RISCV_VECTOR_MODE_NAME(QI) ) | 
|  | + | 
|  | +#define MIPS_ATYPE_VDF \ | 
|  | +  mips_builtin_vector_type( double_type_node, \ | 
|  | +    MIPS_RISCV_VECTOR_MODE_NAME(DF) ) | 
|  | + | 
|  | +#define MIPS_ATYPE_VSF \ | 
|  | +  mips_builtin_vector_type( float_type_node, \ | 
|  | +    MIPS_RISCV_VECTOR_MODE_NAME(SF) ) | 
|  | + | 
|  | +/* MIPS_FTYPE_ATYPESN takes N MIPS_FTYPES-like type codes and lists | 
|  | +   their associated MIPS_ATYPEs.  */ | 
|  | +#define MIPS_FTYPE_ATYPES1(A, B) \ | 
|  | +  MIPS_ATYPE_##A, MIPS_ATYPE_##B | 
|  | + | 
|  | +#define MIPS_FTYPE_ATYPES2(A, B, C) \ | 
|  | +  MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C | 
|  | + | 
|  | +#define MIPS_FTYPE_ATYPES3(A, B, C, D) \ | 
|  | +  MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C, MIPS_ATYPE_##D | 
|  | + | 
|  | +#define MIPS_FTYPE_ATYPES4(A, B, C, D, E) \ | 
|  | +  MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C, MIPS_ATYPE_##D, \ | 
|  | +  MIPS_ATYPE_##E | 
|  | + | 
|  | +/* Return the function type associated with function prototype TYPE.  */ | 
|  | + | 
|  | +static tree | 
|  | +mips_build_function_type (enum mips_function_type type) | 
|  | +{ | 
|  | +  static tree types[(int) MIPS_MAX_FTYPE_MAX]; | 
|  | + | 
|  | +  if (types[(int) type] == NULL_TREE) | 
|  | +    switch (type) | 
|  | +      { | 
|  | +#define DEF_MIPS_FTYPE(NUM, ARGS)					\ | 
|  | +  case MIPS_FTYPE_NAME##NUM ARGS:					\ | 
|  | +    types[(int) type]							\ | 
|  | +      = build_function_type_list (MIPS_FTYPE_ATYPES##NUM ARGS,		\ | 
|  | +				  NULL_TREE);				\ | 
|  | +    break; | 
|  | +#include "config/riscv/riscv-ftypes.def" | 
|  | +#undef DEF_MIPS_FTYPE | 
|  | +      default: | 
|  | +	gcc_unreachable (); | 
|  | +      } | 
|  | + | 
|  | +  return types[(int) type]; | 
|  | +} | 
|  | + | 
|  | +/* Implement TARGET_INIT_BUILTINS.  */ | 
|  | + | 
|  | +static void | 
|  | +mips_init_builtins (void) | 
|  | +{ | 
|  | +  const struct mips_builtin_description *d; | 
|  | +  unsigned int i; | 
|  | + | 
|  | +  /* Iterate through all of the bdesc arrays, initializing all of the | 
|  | +     builtin functions.  */ | 
|  | +  for (i = 0; i < ARRAY_SIZE (mips_builtins); i++) | 
|  | +    { | 
|  | +      d = &mips_builtins[i]; | 
|  | +      if (d->avail ()) | 
|  | +	mips_builtin_decls[i] | 
|  | +	  = add_builtin_function (d->name, | 
|  | +				  mips_build_function_type (d->function_type), | 
|  | +				  i, BUILT_IN_MD, NULL, NULL); | 
|  | +    } | 
|  | +} | 
|  | + | 
|  | +/* Implement TARGET_BUILTIN_DECL.  */ | 
|  | + | 
|  | +static tree | 
|  | +mips_builtin_decl (unsigned int code, bool initialize_p ATTRIBUTE_UNUSED) | 
|  | +{ | 
|  | +  if (code >= ARRAY_SIZE (mips_builtins)) | 
|  | +    return error_mark_node; | 
|  | +  return mips_builtin_decls[code]; | 
|  | +} | 
|  | + | 
|  | +/* Take argument ARGNO from EXP's argument list and convert it into a | 
|  | +   form suitable for input operand OPNO of instruction ICODE.  Return the | 
|  | +   value.  */ | 
|  | + | 
|  | +static rtx | 
|  | +mips_prepare_builtin_arg (enum insn_code icode, | 
|  | +			  unsigned int opno, tree exp, unsigned int argno) | 
|  | +{ | 
|  | +  tree arg; | 
|  | +  rtx value; | 
|  | +  enum machine_mode mode; | 
|  | + | 
|  | +  arg = CALL_EXPR_ARG (exp, argno); | 
|  | +  value = expand_normal (arg); | 
|  | +  mode = insn_data[icode].operand[opno].mode; | 
|  | +  if (!insn_data[icode].operand[opno].predicate (value, mode)) | 
|  | +    { | 
|  | +      /* We need to get the mode from ARG for two reasons: | 
|  | + | 
|  | +	   - to cope with address operands, where MODE is the mode of the | 
|  | +	     memory, rather than of VALUE itself. | 
|  | + | 
|  | +	   - to cope with special predicates like pmode_register_operand, | 
|  | +	     where MODE is VOIDmode.  */ | 
|  | +      value = copy_to_mode_reg (TYPE_MODE (TREE_TYPE (arg)), value); | 
|  | + | 
|  | +      /* Check the predicate again.  */ | 
|  | +      if (!insn_data[icode].operand[opno].predicate (value, mode)) | 
|  | +	{ | 
|  | +	  error ("invalid argument to built-in function"); | 
|  | +	  return const0_rtx; | 
|  | +	} | 
|  | +    } | 
|  | + | 
|  | +  return value; | 
|  | +} | 
|  | + | 
|  | +/* Return an rtx suitable for output operand OP of instruction ICODE. | 
|  | +   If TARGET is non-null, try to use it where possible.  */ | 
|  | + | 
|  | +static rtx | 
|  | +mips_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target) | 
|  | +{ | 
|  | +  enum machine_mode mode; | 
|  | + | 
|  | +  mode = insn_data[icode].operand[op].mode; | 
|  | +  if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode)) | 
|  | +    target = gen_reg_rtx (mode); | 
|  | + | 
|  | +  return target; | 
|  | +} | 
|  | + | 
|  | +/* Expand a MIPS_BUILTIN_DIRECT or MIPS_BUILTIN_DIRECT_NO_TARGET function; | 
|  | +   HAS_TARGET_P says which.  EXP is the CALL_EXPR that calls the function | 
|  | +   and ICODE is the code of the associated .md pattern.  TARGET, if nonnull, | 
|  | +   suggests a good place to put the result.  */ | 
|  | + | 
|  | +static rtx | 
|  | +mips_expand_builtin_direct (enum insn_code icode, rtx target, tree exp, | 
|  | +			    bool has_target_p) | 
|  | +{ | 
|  | +  rtx ops[MAX_RECOG_OPERANDS]; | 
|  | +  int opno, argno; | 
|  | + | 
|  | +  /* Map any target to operand 0.  */ | 
|  | +  opno = 0; | 
|  | +  if (has_target_p) | 
|  | +    { | 
|  | +      target = mips_prepare_builtin_target (icode, opno, target); | 
|  | +      ops[opno] = target; | 
|  | +      opno++; | 
|  | +    } | 
|  | + | 
|  | +  /* Map the arguments to the other operands.  The n_operands value | 
|  | +     for an expander includes match_dups and match_scratches as well as | 
|  | +     match_operands, so n_operands is only an upper bound on the number | 
|  | +     of arguments to the expander function.  */ | 
|  | +  gcc_assert (opno + call_expr_nargs (exp) <= insn_data[icode].n_operands); | 
|  | +  for (argno = 0; argno < call_expr_nargs (exp); argno++, opno++) | 
|  | +    ops[opno] = mips_prepare_builtin_arg (icode, opno, exp, argno); | 
|  | + | 
|  | +  switch (opno) | 
|  | +    { | 
|  | +    case 2: | 
|  | +      emit_insn (GEN_FCN (icode) (ops[0], ops[1])); | 
|  | +      break; | 
|  | + | 
|  | +    case 3: | 
|  | +      emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2])); | 
|  | +      break; | 
|  | + | 
|  | +    case 4: | 
|  | +      emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3])); | 
|  | +      break; | 
|  | + | 
|  | +    default: | 
|  | +      gcc_unreachable (); | 
|  | +    } | 
|  | +  return target; | 
|  | +} | 
|  | + | 
|  | +/* Implement TARGET_EXPAND_BUILTIN.  */ | 
|  | + | 
|  | +static rtx | 
|  | +mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED, | 
|  | +		     enum machine_mode mode ATTRIBUTE_UNUSED, | 
|  | +		     int ignore ATTRIBUTE_UNUSED) | 
|  | +{ | 
|  | +  tree fndecl; | 
|  | +  unsigned int fcode, avail; | 
|  | +  const struct mips_builtin_description *d; | 
|  | + | 
|  | +  fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0); | 
|  | +  fcode = DECL_FUNCTION_CODE (fndecl); | 
|  | +  gcc_assert (fcode < ARRAY_SIZE (mips_builtins)); | 
|  | +  d = &mips_builtins[fcode]; | 
|  | +  avail = d->avail (); | 
|  | +  gcc_assert (avail != 0); | 
|  | +  switch (d->builtin_type) | 
|  | +    { | 
|  | +    case MIPS_BUILTIN_DIRECT: | 
|  | +      return mips_expand_builtin_direct (d->icode, target, exp, true); | 
|  | + | 
|  | +    case MIPS_BUILTIN_DIRECT_NO_TARGET: | 
|  | +      return mips_expand_builtin_direct (d->icode, target, exp, false); | 
|  | +    } | 
|  | +  gcc_unreachable (); | 
|  | +} | 
|  | + | 
|  | +/* This structure records that the current function has a LO_SUM | 
|  | +   involving SYMBOL_REF or LABEL_REF BASE and that MAX_OFFSET is | 
|  | +   the largest offset applied to BASE by all such LO_SUMs.  */ | 
|  | +struct mips_lo_sum_offset { | 
|  | +  rtx base; | 
|  | +  HOST_WIDE_INT offset; | 
|  | +}; | 
|  | + | 
|  | +/* Return a hash value for SYMBOL_REF or LABEL_REF BASE.  */ | 
|  | + | 
|  | +static hashval_t | 
|  | +mips_hash_base (rtx base) | 
|  | +{ | 
|  | +  int do_not_record_p; | 
|  | + | 
|  | +  return hash_rtx (base, GET_MODE (base), &do_not_record_p, NULL, false); | 
|  | +} | 
|  | + | 
|  | +/* Hash-table callbacks for mips_lo_sum_offsets.  */ | 
|  | + | 
|  | +static hashval_t | 
|  | +mips_lo_sum_offset_hash (const void *entry) | 
|  | +{ | 
|  | +  return mips_hash_base (((const struct mips_lo_sum_offset *) entry)->base); | 
|  | +} | 
|  | + | 
|  | +static int | 
|  | +mips_lo_sum_offset_eq (const void *entry, const void *value) | 
|  | +{ | 
|  | +  return rtx_equal_p (((const struct mips_lo_sum_offset *) entry)->base, | 
|  | +		      (const_rtx) value); | 
|  | +} | 
|  | + | 
|  | +/* Look up symbolic constant X in HTAB, which is a hash table of | 
|  | +   mips_lo_sum_offsets.  If OPTION is NO_INSERT, return true if X can be | 
|  | +   paired with a recorded LO_SUM, otherwise record X in the table.  */ | 
|  | + | 
|  | +static bool | 
|  | +mips_lo_sum_offset_lookup (htab_t htab, rtx x, enum insert_option option) | 
|  | +{ | 
|  | +  rtx base, offset; | 
|  | +  void **slot; | 
|  | +  struct mips_lo_sum_offset *entry; | 
|  | + | 
|  | +  /* Split X into a base and offset.  */ | 
|  | +  split_const (x, &base, &offset); | 
|  | +  if (UNSPEC_ADDRESS_P (base)) | 
|  | +    base = UNSPEC_ADDRESS (base); | 
|  | + | 
|  | +  /* Look up the base in the hash table.  */ | 
|  | +  slot = htab_find_slot_with_hash (htab, base, mips_hash_base (base), option); | 
|  | +  if (slot == NULL) | 
|  | +    return false; | 
|  | + | 
|  | +  entry = (struct mips_lo_sum_offset *) *slot; | 
|  | +  if (option == INSERT) | 
|  | +    { | 
|  | +      if (entry == NULL) | 
|  | +	{ | 
|  | +	  entry = XNEW (struct mips_lo_sum_offset); | 
|  | +	  entry->base = base; | 
|  | +	  entry->offset = INTVAL (offset); | 
|  | +	  *slot = entry; | 
|  | +	} | 
|  | +      else | 
|  | +	{ | 
|  | +	  if (INTVAL (offset) > entry->offset) | 
|  | +	    entry->offset = INTVAL (offset); | 
|  | +	} | 
|  | +    } | 
|  | +  return INTVAL (offset) <= entry->offset; | 
|  | +} | 
|  | + | 
|  | +/* A for_each_rtx callback for which DATA is a mips_lo_sum_offset hash table. | 
|  | +   Record every LO_SUM in *LOC.  */ | 
|  | + | 
|  | +static int | 
|  | +mips_record_lo_sum (rtx *loc, void *data) | 
|  | +{ | 
|  | +  if (GET_CODE (*loc) == LO_SUM) | 
|  | +    mips_lo_sum_offset_lookup ((htab_t) data, XEXP (*loc, 1), INSERT); | 
|  | +  return 0; | 
|  | +} | 
|  | + | 
|  | +/* Return true if INSN is a SET of an orphaned high-part relocation. | 
|  | +   HTAB is a hash table of mips_lo_sum_offsets that describes all the | 
|  | +   LO_SUMs in the current function.  */ | 
|  | + | 
|  | +static bool | 
|  | +mips_orphaned_high_part_p (htab_t htab, rtx insn) | 
|  | +{ | 
|  | +  rtx x, set; | 
|  | + | 
|  | +  set = single_set (insn); | 
|  | +  if (set) | 
|  | +    { | 
|  | +      /* Check for %his.  */ | 
|  | +      x = SET_SRC (set); | 
|  | +      if (GET_CODE (x) == HIGH | 
|  | +	  && absolute_symbolic_operand (XEXP (x, 0), VOIDmode)) | 
|  | +	return !mips_lo_sum_offset_lookup (htab, XEXP (x, 0), NO_INSERT); | 
|  | +    } | 
|  | +  return false; | 
|  | +} | 
|  | + | 
|  | +/* Delete any high-part relocations whose partnering low parts are dead. */ | 
|  | + | 
|  | +static void | 
|  | +mips_reorg_process_insns (void) | 
|  | +{ | 
|  | +  rtx insn, next_insn; | 
|  | +  htab_t htab; | 
|  | + | 
|  | +  /* Force all instructions to be split into their final form.  */ | 
|  | +  split_all_insns_noflow (); | 
|  | + | 
|  | +  /* Recalculate instruction lengths without taking nops into account.  */ | 
|  | +  shorten_branches (get_insns ()); | 
|  | + | 
|  | +  htab = htab_create (37, mips_lo_sum_offset_hash, | 
|  | +		      mips_lo_sum_offset_eq, free); | 
|  | + | 
|  | +  /* Make a first pass over the instructions, recording all the LO_SUMs.  */ | 
|  | +  for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn)) | 
|  | +    if (USEFUL_INSN_P (insn)) | 
|  | +      for_each_rtx (&PATTERN (insn), mips_record_lo_sum, htab); | 
|  | + | 
|  | +  /* Make a second pass over the instructions.  Delete orphaned | 
|  | +     high-part relocations or turn them into NOPs.  Avoid hazards | 
|  | +     by inserting NOPs.  */ | 
|  | +  for (insn = get_insns (); insn != 0; insn = next_insn) | 
|  | +    { | 
|  | +      next_insn = NEXT_INSN (insn); | 
|  | +      if (USEFUL_INSN_P (insn)) | 
|  | +	{ | 
|  | +	  /* INSN is a single instruction.  Delete it if it's an | 
|  | +	     orphaned high-part relocation.  */ | 
|  | +	  if (mips_orphaned_high_part_p (htab, insn)) | 
|  | +	    delete_insn (insn); | 
|  | +	} | 
|  | +    } | 
|  | + | 
|  | +  htab_delete (htab); | 
|  | +} | 
|  | + | 
|  | +/* Implement TARGET_MACHINE_DEPENDENT_REORG.  */ | 
|  | + | 
|  | +static void | 
|  | +mips_reorg (void) | 
|  | +{ | 
|  | +  mips_reorg_process_insns (); | 
|  | +} | 
|  | + | 
|  | +/* Implement TARGET_ASM_OUTPUT_MI_THUNK.  Generate rtl rather than asm text | 
|  | +   in order to avoid duplicating too much logic from elsewhere.  */ | 
|  | + | 
|  | +static void | 
|  | +mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED, | 
|  | +		      HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset, | 
|  | +		      tree function) | 
|  | +{ | 
|  | +  rtx this_rtx, temp1, temp2, insn, fnaddr; | 
|  | +  bool use_sibcall_p; | 
|  | + | 
|  | +  /* Pretend to be a post-reload pass while generating rtl.  */ | 
|  | +  reload_completed = 1; | 
|  | + | 
|  | +  /* Mark the end of the (empty) prologue.  */ | 
|  | +  emit_note (NOTE_INSN_PROLOGUE_END); | 
|  | + | 
|  | +  /* Determine if we can use a sibcall to call FUNCTION directly.  */ | 
|  | +  fnaddr = XEXP (DECL_RTL (function), 0); | 
|  | +  use_sibcall_p = const_call_insn_operand (fnaddr, Pmode); | 
|  | + | 
|  | +  /* We need two temporary registers in some cases.  */ | 
|  | +  temp1 = gen_rtx_REG (Pmode, 2); | 
|  | +  temp2 = gen_rtx_REG (Pmode, 3); | 
|  | + | 
|  | +  /* Find out which register contains the "this" pointer.  */ | 
|  | +  if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)) | 
|  | +    this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1); | 
|  | +  else | 
|  | +    this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST); | 
|  | + | 
|  | +  /* Add DELTA to THIS_RTX.  */ | 
|  | +  if (delta != 0) | 
|  | +    { | 
|  | +      rtx offset = GEN_INT (delta); | 
|  | +      if (!SMALL_OPERAND (delta)) | 
|  | +	{ | 
|  | +	  mips_emit_move (temp1, offset); | 
|  | +	  offset = temp1; | 
|  | +	} | 
|  | +      emit_insn (gen_add3_insn (this_rtx, this_rtx, offset)); | 
|  | +    } | 
|  | + | 
|  | +  /* If needed, add *(*THIS_RTX + VCALL_OFFSET) to THIS_RTX.  */ | 
|  | +  if (vcall_offset != 0) | 
|  | +    { | 
|  | +      rtx addr; | 
|  | + | 
|  | +      /* Set TEMP1 to *THIS_RTX.  */ | 
|  | +      mips_emit_move (temp1, gen_rtx_MEM (Pmode, this_rtx)); | 
|  | + | 
|  | +      /* Set ADDR to a legitimate address for *THIS_RTX + VCALL_OFFSET.  */ | 
|  | +      addr = mips_add_offset (temp2, temp1, vcall_offset); | 
|  | + | 
|  | +      /* Load the offset and add it to THIS_RTX.  */ | 
|  | +      mips_emit_move (temp1, gen_rtx_MEM (Pmode, addr)); | 
|  | +      emit_insn (gen_add3_insn (this_rtx, this_rtx, temp1)); | 
|  | +    } | 
|  | + | 
|  | +  /* Jump to the target function.  Use a sibcall if direct jumps are | 
|  | +     allowed, otherwise load the address into a register first.  */ | 
|  | +  if (use_sibcall_p) | 
|  | +    { | 
|  | +      insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx)); | 
|  | +      SIBLING_CALL_P (insn) = 1; | 
|  | +    } | 
|  | +  else | 
|  | +    { | 
|  | +      mips_emit_move(temp1, fnaddr); | 
|  | +      emit_jump_insn (gen_indirect_jump (temp1)); | 
|  | +    } | 
|  | + | 
|  | +  /* Run just enough of rest_of_compilation.  This sequence was | 
|  | +     "borrowed" from alpha.c.  */ | 
|  | +  insn = get_insns (); | 
|  | +  insn_locators_alloc (); | 
|  | +  split_all_insns_noflow (); | 
|  | +  shorten_branches (insn); | 
|  | +  final_start_function (insn, file, 1); | 
|  | +  final (insn, file, 1); | 
|  | +  final_end_function (); | 
|  | + | 
|  | +  /* Clean up the vars set above.  Note that final_end_function resets | 
|  | +     the global pointer for us.  */ | 
|  | +  reload_completed = 0; | 
|  | +} | 
|  | + | 
|  | +/* Implement TARGET_SET_CURRENT_FUNCTION.  Decide whether the current | 
|  | +   function should use the MIPS16 ISA and switch modes accordingly.  */ | 
|  | + | 
|  | +static void | 
|  | +mips_set_current_function (tree fndecl) | 
|  | +{ | 
|  | +  bool utfunc = fndecl && (lookup_attribute("utfunc", DECL_ATTRIBUTES(fndecl)) != NULL); | 
|  | +  if (riscv_in_utfunc != utfunc) | 
|  | +    reinit_regs(); | 
|  | +  riscv_in_utfunc = utfunc; | 
|  | +} | 
|  | + | 
|  | +/* Allocate a chunk of memory for per-function machine-dependent data.  */ | 
|  | + | 
|  | +static struct machine_function * | 
|  | +mips_init_machine_status (void) | 
|  | +{ | 
|  | +  return ggc_alloc_cleared_machine_function (); | 
|  | +} | 
|  | + | 
|  | +/* Return the mips_cpu_info entry for the processor or ISA given | 
|  | +   by CPU_STRING.  Return null if the string isn't recognized. | 
|  | + | 
|  | +   A similar function exists in GAS.  */ | 
|  | + | 
|  | +static const struct mips_cpu_info * | 
|  | +mips_parse_cpu (const char *cpu_string) | 
|  | +{ | 
|  | +  unsigned int i; | 
|  | + | 
|  | +  for (i = 0; i < ARRAY_SIZE (mips_cpu_info_table); i++) | 
|  | +    if (strcmp (mips_cpu_info_table[i].name, cpu_string) == 0) | 
|  | +      return mips_cpu_info_table + i; | 
|  | + | 
|  | +  return NULL; | 
|  | +} | 
|  | + | 
|  | +/* Implement TARGET_HANDLE_OPTION.  */ | 
|  | + | 
|  | +static bool | 
|  | +mips_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED) | 
|  | +{ | 
|  | +  switch (code) | 
|  | +    { | 
|  | +    case OPT_mtune_: | 
|  | +      return mips_parse_cpu (arg) != 0; | 
|  | + | 
|  | +    default: | 
|  | +      return true; | 
|  | +    } | 
|  | +} | 
|  | + | 
|  | +/* Implement TARGET_OPTION_OVERRIDE.  */ | 
|  | + | 
|  | +static void | 
|  | +mips_option_override (void) | 
|  | +{ | 
|  | +  int i, start, regno, mode; | 
|  | +  const struct mips_cpu_info *info; | 
|  | + | 
|  | +#ifdef SUBTARGET_OVERRIDE_OPTIONS | 
|  | +  SUBTARGET_OVERRIDE_OPTIONS; | 
|  | +#endif | 
|  | + | 
|  | +  info = mips_parse_cpu (MIPS_CPU_STRING_DEFAULT); | 
|  | +  gcc_assert (info); | 
|  | +  mips_tune = info->cpu; | 
|  | + | 
|  | +  if (mips_tune_string != 0) | 
|  | +    { | 
|  | +      const struct mips_cpu_info *tune = mips_parse_cpu (mips_tune_string); | 
|  | +      if (tune) | 
|  | +	mips_tune = tune->cpu; | 
|  | +    } | 
|  | + | 
|  | +  flag_pcc_struct_return = 0; | 
|  | + | 
|  | +  /* Decide which rtx_costs structure to use.  */ | 
|  | +  if (optimize_size) | 
|  | +    mips_cost = &mips_rtx_cost_optimize_size; | 
|  | +  else | 
|  | +    mips_cost = &mips_rtx_cost_data[mips_tune]; | 
|  | + | 
|  | +  /* If the user hasn't specified a branch cost, use the processor's | 
|  | +     default.  */ | 
|  | +  if (mips_branch_cost == 0) | 
|  | +    mips_branch_cost = mips_cost->branch_cost; | 
|  | + | 
|  | +  if (flag_pic) | 
|  | +    target_flags |= MASK_ABICALLS; | 
|  | + | 
|  | +  /* Prefer a call to memcpy over inline code when optimizing for size, | 
|  | +     though see MOVE_RATIO in mips.h.  */ | 
|  | +  if (optimize_size && (target_flags_explicit & MASK_MEMCPY) == 0) | 
|  | +    target_flags |= MASK_MEMCPY; | 
|  | + | 
|  | +#ifdef MIPS_TFMODE_FORMAT | 
|  | +  REAL_MODE_FORMAT (TFmode) = &MIPS_TFMODE_FORMAT; | 
|  | +#endif | 
|  | + | 
|  | +  /* .cfi_* directives generate a read-only section, so fall back on | 
|  | +     manual .eh_frame creation if we need the section to be writable.  */ | 
|  | +  if (TARGET_WRITABLE_EH_FRAME) | 
|  | +    flag_dwarf2_cfi_asm = 0; | 
|  | + | 
|  | +  /* Set up array to map GCC register number to debug register number. | 
|  | +     Ignore the special purpose register numbers.  */ | 
|  | + | 
|  | +  for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) | 
|  | +    { | 
|  | +      mips_dbx_regno[i] = INVALID_REGNUM; | 
|  | +      if (GP_REG_P (i) || FP_REG_P (i)) | 
|  | +	mips_dwarf_regno[i] = i; | 
|  | +      else | 
|  | +	mips_dwarf_regno[i] = INVALID_REGNUM; | 
|  | +    } | 
|  | + | 
|  | +  start = GP_DBX_FIRST - GP_REG_FIRST; | 
|  | +  for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++) | 
|  | +    mips_dbx_regno[i] = i + start; | 
|  | + | 
|  | +  start = FP_DBX_FIRST - FP_REG_FIRST; | 
|  | +  for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++) | 
|  | +    mips_dbx_regno[i] = i + start; | 
|  | + | 
|  | +  /* Set up mips_hard_regno_mode_ok.  */ | 
|  | +  for (mode = 0; mode < MAX_MACHINE_MODE; mode++) | 
|  | +    for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) | 
|  | +      mips_hard_regno_mode_ok[mode][regno] | 
|  | +	= mips_hard_regno_mode_ok_p (regno, (enum machine_mode) mode); | 
|  | + | 
|  | +  /* Function to allocate machine-dependent function status.  */ | 
|  | +  init_machine_status = &mips_init_machine_status; | 
|  | + | 
|  | +  targetm.min_anchor_offset = -RISCV_IMM_REACH/2; | 
|  | +  targetm.max_anchor_offset = RISCV_IMM_REACH/2-1; | 
|  | + | 
|  | +  targetm.const_anchor = RISCV_IMM_REACH/2; | 
|  | + | 
|  | +  mips_init_relocs (); | 
|  | + | 
|  | +  restore_target_globals (&default_target_globals); | 
|  | +} | 
|  | + | 
|  | +/* Implement TARGET_OPTION_OPTIMIZATION_TABLE.  */ | 
|  | +static const struct default_options mips_option_optimization_table[] = | 
|  | +  { | 
|  | +    { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 }, | 
|  | +    { OPT_LEVELS_NONE, 0, NULL, 0 } | 
|  | +  }; | 
|  | + | 
|  | +/* Implement TARGET_CONDITIONAL_REGISTER_USAGE.  */ | 
|  | + | 
|  | +static void | 
|  | +mips_conditional_register_usage (void) | 
|  | +{ | 
|  | +  int regno; | 
|  | + | 
|  | +  if (!TARGET_HARD_FLOAT) | 
|  | +    { | 
|  | +      for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++) | 
|  | +	fixed_regs[regno] = call_used_regs[regno] = 1; | 
|  | +    } | 
|  | + | 
|  | +  if (riscv_in_utfunc) | 
|  | +  { | 
|  | +    for (regno = CALLEE_SAVED_GP_REG_FIRST; | 
|  | +         regno <= CALLEE_SAVED_GP_REG_LAST; regno++) | 
|  | +    { | 
|  | +      call_used_regs[regno] = 1; | 
|  | +      call_really_used_regs[regno] = 1; | 
|  | +    } | 
|  | + | 
|  | +    call_used_regs[RETURN_ADDR_REGNUM] = 1; | 
|  | +    call_really_used_regs[RETURN_ADDR_REGNUM] = 1; | 
|  | + | 
|  | +    for (regno = CALLEE_SAVED_FP_REG_FIRST; | 
|  | +         regno <= CALLEE_SAVED_FP_REG_LAST; regno++) | 
|  | +    { | 
|  | +      call_used_regs[regno] = 1; | 
|  | +      call_really_used_regs[regno] = 1; | 
|  | +    } | 
|  | +  } | 
|  | +  else | 
|  | +  { | 
|  | +    for (regno = CALLEE_SAVED_GP_REG_FIRST; | 
|  | +         regno <= CALLEE_SAVED_GP_REG_LAST; regno++) | 
|  | +    { | 
|  | +      call_used_regs[regno] = 0; | 
|  | +      call_really_used_regs[regno] = 0; | 
|  | +    } | 
|  | + | 
|  | +    call_used_regs[GP_REG_FIRST + 28] = 1; | 
|  | + | 
|  | +    call_used_regs[RETURN_ADDR_REGNUM] = 0; | 
|  | +    call_really_used_regs[RETURN_ADDR_REGNUM] = 0; | 
|  | + | 
|  | +    for (regno = CALLEE_SAVED_FP_REG_FIRST; | 
|  | +         regno <= CALLEE_SAVED_FP_REG_LAST; regno++) | 
|  | +    { | 
|  | +      call_used_regs[regno] = 0; | 
|  | +      call_really_used_regs[regno] = 0; | 
|  | +    } | 
|  | +  } | 
|  | +} | 
|  | + | 
|  | +/* Initialize vector TARGET to VALS.  */ | 
|  | + | 
|  | +void | 
|  | +mips_expand_vector_init (rtx target, rtx vals) | 
|  | +{ | 
|  | +  enum machine_mode mode; | 
|  | +  enum machine_mode inner; | 
|  | +  unsigned int i, n_elts; | 
|  | +  rtx mem; | 
|  | + | 
|  | +  mode = GET_MODE (target); | 
|  | +  inner = GET_MODE_INNER (mode); | 
|  | +  n_elts = GET_MODE_NUNITS (mode); | 
|  | + | 
|  | +  gcc_assert (VECTOR_MODE_P (mode)); | 
|  | + | 
|  | +  mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), 0); | 
|  | +  for (i = 0; i < n_elts; i++) | 
|  | +    emit_move_insn (adjust_address_nv (mem, inner, i * GET_MODE_SIZE (inner)), | 
|  | +                    XVECEXP (vals, 0, i)); | 
|  | + | 
|  | +  emit_move_insn (target, mem); | 
|  | +} | 
|  | + | 
|  | +/* Implement EPILOGUE_USES.  */ | 
|  | + | 
|  | +bool | 
|  | +mips_epilogue_uses (unsigned int regno) | 
|  | +{ | 
|  | +  /* Say that the epilogue uses the return address register.  Note that | 
|  | +     in the case of sibcalls, the values "used by the epilogue" are | 
|  | +     considered live at the start of the called function.  */ | 
|  | +  if (regno == RETURN_ADDR_REGNUM) | 
|  | +    return true; | 
|  | + | 
|  | +  /* If using a GOT, say that the epilogue also uses GOT_VERSION_REGNUM. | 
|  | +     See the comment above load_call<mode> for details.  */ | 
|  | +  if (TARGET_USE_GOT && (regno) == GOT_VERSION_REGNUM) | 
|  | +    return true; | 
|  | + | 
|  | +  return false; | 
|  | +} | 
|  | + | 
|  | +/* Implement TARGET_TRAMPOLINE_INIT.  */ | 
|  | + | 
|  | +static void | 
|  | +mips_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value) | 
|  | +{ | 
|  | +  rtx addr, end_addr, mem; | 
|  | +  rtx trampoline[8]; | 
|  | +  unsigned int i, j; | 
|  | +  HOST_WIDE_INT static_chain_offset, target_function_offset; | 
|  | + | 
|  | +  /* Work out the offsets of the pointers from the start of the | 
|  | +     trampoline code.  */ | 
|  | +  static_chain_offset = TRAMPOLINE_CODE_SIZE; | 
|  | +  target_function_offset = static_chain_offset + GET_MODE_SIZE (ptr_mode); | 
|  | + | 
|  | +  /* Get pointers to the beginning and end of the code block.  */ | 
|  | +  addr = force_reg (Pmode, XEXP (m_tramp, 0)); | 
|  | +  end_addr = mips_force_binary (Pmode, PLUS, addr, GEN_INT (TRAMPOLINE_CODE_SIZE)); | 
|  | + | 
|  | +#define OP(X) gen_int_mode (X, SImode) | 
|  | +#define MATCH_LREG ((Pmode) == DImode ? MATCH_LD : MATCH_LW) | 
|  | + | 
|  | +  /* auipc   v0, 0x0 | 
|  | +     l[wd]   v1, target_function_offset(v0) | 
|  | +     l[wd]   $static_chain, static_chain_offset(v0) | 
|  | +     jr      v1 | 
|  | +  */ | 
|  | +  i = 0; | 
|  | + | 
|  | +  trampoline[i++] = OP (RISCV_LTYPE (AUIPC, STATIC_CHAIN_REGNUM, 0)); | 
|  | +  trampoline[i++] = OP (RISCV_ITYPE (LREG, MIPS_PROLOGUE_TEMP_REGNUM, | 
|  | +    			  STATIC_CHAIN_REGNUM, target_function_offset)); | 
|  | +  trampoline[i++] = OP (RISCV_ITYPE (LREG, STATIC_CHAIN_REGNUM, | 
|  | +    			  STATIC_CHAIN_REGNUM, static_chain_offset)); | 
|  | +  trampoline[i++] = OP (RISCV_ITYPE (JALR_J, 0, MIPS_PROLOGUE_TEMP_REGNUM, 0)); | 
|  | + | 
|  | +  gcc_assert (i * 4 == TRAMPOLINE_CODE_SIZE); | 
|  | + | 
|  | +#undef MATCH_LREG | 
|  | +#undef OP | 
|  | + | 
|  | +  /* Copy the trampoline code.  Leave any padding uninitialized.  */ | 
|  | +  for (j = 0; j < i; j++) | 
|  | +    { | 
|  | +      mem = adjust_address (m_tramp, SImode, j * GET_MODE_SIZE (SImode)); | 
|  | +      mips_emit_move (mem, trampoline[j]); | 
|  | +    } | 
|  | + | 
|  | +  /* Set up the static chain pointer field.  */ | 
|  | +  mem = adjust_address (m_tramp, ptr_mode, static_chain_offset); | 
|  | +  mips_emit_move (mem, chain_value); | 
|  | + | 
|  | +  /* Set up the target function field.  */ | 
|  | +  mem = adjust_address (m_tramp, ptr_mode, target_function_offset); | 
|  | +  mips_emit_move (mem, XEXP (DECL_RTL (fndecl), 0)); | 
|  | + | 
|  | +  /* Flush the code part of the trampoline.  */ | 
|  | +  emit_insn (gen_add3_insn (end_addr, addr, GEN_INT (TRAMPOLINE_SIZE))); | 
|  | +  emit_insn (gen_clear_cache (addr, end_addr)); | 
|  | +} | 
|  | + | 
|  | +/* Implement TARGET_SHIFT_TRUNCATION_MASK. */ | 
|  | + | 
|  | +static unsigned HOST_WIDE_INT | 
|  | +mips_shift_truncation_mask (enum machine_mode mode) | 
|  | +{ | 
|  | +  return GET_MODE_BITSIZE (mode) - 1; | 
|  | +} | 
|  | + | 
|  | +const char* | 
|  | +mips_riscv_output_vector_move(enum machine_mode mode, rtx dest, rtx src) | 
|  | +{ | 
|  | +  bool dest_mem, dest_vgp_reg, dest_vfp_reg; | 
|  | +  bool src_mem, src_vgp_reg, src_vfp_reg; | 
|  | + | 
|  | +  dest_mem = (GET_CODE(dest) == MEM); | 
|  | +  dest_vgp_reg = (GET_CODE(dest) == REG) && VEC_GP_REG_P(REGNO(dest)); | 
|  | +  dest_vfp_reg = (GET_CODE(dest) == REG) && VEC_FP_REG_P(REGNO(dest)); | 
|  | + | 
|  | +  src_mem = (GET_CODE(src) == MEM); | 
|  | +  src_vgp_reg = (GET_CODE(src) == REG) && VEC_GP_REG_P(REGNO(src)); | 
|  | +  src_vfp_reg = (GET_CODE(src) == REG) && VEC_FP_REG_P(REGNO(src)); | 
|  | + | 
|  | +  if (dest_vgp_reg && src_vgp_reg) | 
|  | +    return "vmvv\t%0,%1"; | 
|  | + | 
|  | +  if (dest_vfp_reg && src_vfp_reg) | 
|  | +    return "vfmvv\t%0,%1"; | 
|  | + | 
|  | +  if (dest_vgp_reg && src_mem) | 
|  | +  { | 
|  | +    switch (mode) | 
|  | +    { | 
|  | +      case MIPS_RISCV_VECTOR_MODE_NAME(DI): return "vld\t%0,%y1"; | 
|  | +      case MIPS_RISCV_VECTOR_MODE_NAME(SI): return "vlw\t%0,%y1"; | 
|  | +      case MIPS_RISCV_VECTOR_MODE_NAME(HI): return "vlh\t%0,%y1"; | 
|  | +      case MIPS_RISCV_VECTOR_MODE_NAME(QI): return "vlb\t%0,%y1"; | 
|  | +      default: gcc_unreachable(); | 
|  | +    } | 
|  | +  } | 
|  | + | 
|  | +  if (dest_vfp_reg && src_mem) | 
|  | +  { | 
|  | +    switch (mode) | 
|  | +    { | 
|  | +      case MIPS_RISCV_VECTOR_MODE_NAME(DF): return "vfld\t%0,%y1"; | 
|  | +      case MIPS_RISCV_VECTOR_MODE_NAME(SF): return "vflw\t%0,%y1"; | 
|  | +      default: gcc_unreachable(); | 
|  | +    } | 
|  | +  } | 
|  | + | 
|  | +  if (dest_mem && src_vgp_reg) | 
|  | +  { | 
|  | +    switch (mode) | 
|  | +    { | 
|  | +      case MIPS_RISCV_VECTOR_MODE_NAME(DI): return "vsd\t%1,%y0"; | 
|  | +      case MIPS_RISCV_VECTOR_MODE_NAME(SI): return "vsw\t%1,%y0"; | 
|  | +      case MIPS_RISCV_VECTOR_MODE_NAME(HI): return "vsh\t%1,%y0"; | 
|  | +      case MIPS_RISCV_VECTOR_MODE_NAME(QI): return "vsb\t%1,%y0"; | 
|  | +      default: gcc_unreachable(); | 
|  | +    } | 
|  | +  } | 
|  | + | 
|  | +  if (dest_mem && src_vfp_reg) | 
|  | +  { | 
|  | +    switch (mode) | 
|  | +    { | 
|  | +      case MIPS_RISCV_VECTOR_MODE_NAME(DF): return "vfsd\t%1,%y0"; | 
|  | +      case MIPS_RISCV_VECTOR_MODE_NAME(SF): return "vfsw\t%1,%y0"; | 
|  | +      default: gcc_unreachable(); | 
|  | +    } | 
|  | +  } | 
|  | + | 
|  | +  gcc_unreachable(); | 
|  | +} | 
|  | + | 
|  | +/* Implement TARGET_ASM_FUNCTION_RODATA_SECTION. | 
|  | + | 
|  | +   The complication here is that, with the combination TARGET_ABICALLS | 
|  | +   && !TARGET_ABSOLUTE_ABICALLS && !TARGET_GPWORD, jump tables will use | 
|  | +   absolute addresses, and should therefore not be included in the | 
|  | +   read-only part of a DSO.  Handle such cases by selecting a normal | 
|  | +   data section instead of a read-only one.  The logic apes that in | 
|  | +   default_function_rodata_section.  */ | 
|  | + | 
|  | +static section * | 
|  | +mips_function_rodata_section (tree decl) | 
|  | +{ | 
|  | +  if (!TARGET_ABICALLS) | 
|  | +    return default_function_rodata_section (decl); | 
|  | + | 
|  | +  if (decl && DECL_SECTION_NAME (decl)) | 
|  | +    { | 
|  | +      const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl)); | 
|  | +      if (DECL_ONE_ONLY (decl) && strncmp (name, ".gnu.linkonce.t.", 16) == 0) | 
|  | +	{ | 
|  | +	  char *rname = ASTRDUP (name); | 
|  | +	  rname[14] = 'd'; | 
|  | +	  return get_section (rname, SECTION_LINKONCE | SECTION_WRITE, decl); | 
|  | +	} | 
|  | +      else if (flag_function_sections | 
|  | +	       && flag_data_sections | 
|  | +	       && strncmp (name, ".text.", 6) == 0) | 
|  | +	{ | 
|  | +	  char *rname = ASTRDUP (name); | 
|  | +	  memcpy (rname + 1, "data", 4); | 
|  | +	  return get_section (rname, SECTION_WRITE, decl); | 
|  | +	} | 
|  | +    } | 
|  | +  return data_section; | 
|  | +} | 
|  | + | 
|  | + | 
|  | + | 
|  | +/* Initialize the GCC target structure.  */ | 
|  | +#undef TARGET_ASM_ALIGNED_HI_OP | 
|  | +#define TARGET_ASM_ALIGNED_HI_OP "\t.half\t" | 
|  | +#undef TARGET_ASM_ALIGNED_SI_OP | 
|  | +#define TARGET_ASM_ALIGNED_SI_OP "\t.word\t" | 
|  | +#undef TARGET_ASM_ALIGNED_DI_OP | 
|  | +#define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t" | 
|  | + | 
|  | +#undef TARGET_OPTION_OVERRIDE | 
|  | +#define TARGET_OPTION_OVERRIDE mips_option_override | 
|  | +#undef TARGET_OPTION_OPTIMIZATION_TABLE | 
|  | +#define TARGET_OPTION_OPTIMIZATION_TABLE mips_option_optimization_table | 
|  | + | 
|  | +#undef TARGET_LEGITIMIZE_ADDRESS | 
|  | +#define TARGET_LEGITIMIZE_ADDRESS mips_legitimize_address | 
|  | + | 
|  | +#undef TARGET_ASM_FUNCTION_PROLOGUE | 
|  | +#define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue | 
|  | +#undef TARGET_ASM_FUNCTION_RODATA_SECTION | 
|  | +#define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section | 
|  | + | 
|  | +#undef TARGET_SCHED_ADJUST_COST | 
|  | +#define TARGET_SCHED_ADJUST_COST mips_adjust_cost | 
|  | +#undef TARGET_SCHED_ISSUE_RATE | 
|  | +#define TARGET_SCHED_ISSUE_RATE mips_issue_rate | 
|  | + | 
|  | +#undef TARGET_DEFAULT_TARGET_FLAGS | 
|  | +#define TARGET_DEFAULT_TARGET_FLAGS		\ | 
|  | +  (TARGET_DEFAULT				\ | 
|  | +   | TARGET_CPU_DEFAULT				\ | 
|  | +   | (TARGET_64BIT_DEFAULT ? 0 : MASK_32BIT)	\ | 
|  | +   | TARGET_ENDIAN_DEFAULT) | 
|  | +#undef TARGET_HANDLE_OPTION | 
|  | +#define TARGET_HANDLE_OPTION mips_handle_option | 
|  | + | 
|  | +#undef TARGET_FUNCTION_OK_FOR_SIBCALL | 
|  | +#define TARGET_FUNCTION_OK_FOR_SIBCALL hook_bool_tree_tree_true | 
|  | + | 
|  | +#undef TARGET_SET_CURRENT_FUNCTION | 
|  | +#define TARGET_SET_CURRENT_FUNCTION mips_set_current_function | 
|  | + | 
|  | +#undef TARGET_VALID_POINTER_MODE | 
|  | +#define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode | 
|  | +#undef TARGET_REGISTER_MOVE_COST | 
|  | +#define TARGET_REGISTER_MOVE_COST mips_register_move_cost | 
|  | +#undef TARGET_MEMORY_MOVE_COST | 
|  | +#define TARGET_MEMORY_MOVE_COST mips_memory_move_cost | 
|  | +#undef TARGET_RTX_COSTS | 
|  | +#define TARGET_RTX_COSTS mips_rtx_costs | 
|  | +#undef TARGET_ADDRESS_COST | 
|  | +#define TARGET_ADDRESS_COST mips_address_cost | 
|  | + | 
|  | +#undef TARGET_MACHINE_DEPENDENT_REORG | 
|  | +#define TARGET_MACHINE_DEPENDENT_REORG mips_reorg | 
|  | + | 
|  | +#undef  TARGET_PREFERRED_RELOAD_CLASS | 
|  | +#define TARGET_PREFERRED_RELOAD_CLASS mips_preferred_reload_class | 
|  | + | 
|  | +#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE | 
|  | +#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true | 
|  | + | 
|  | +#undef TARGET_INIT_LIBFUNCS | 
|  | +#define TARGET_INIT_LIBFUNCS mips_init_libfuncs | 
|  | + | 
|  | +#undef TARGET_EXPAND_BUILTIN_VA_START | 
|  | +#define TARGET_EXPAND_BUILTIN_VA_START mips_va_start | 
|  | + | 
|  | +#undef  TARGET_PROMOTE_FUNCTION_MODE | 
|  | +#define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote | 
|  | +#undef TARGET_PROMOTE_PROTOTYPES | 
|  | +#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true | 
|  | + | 
|  | +#undef TARGET_RETURN_IN_MEMORY | 
|  | +#define TARGET_RETURN_IN_MEMORY mips_return_in_memory | 
|  | +#undef TARGET_RETURN_IN_MSB | 
|  | +#define TARGET_RETURN_IN_MSB mips_return_in_msb | 
|  | + | 
|  | +#undef TARGET_ASM_OUTPUT_MI_THUNK | 
|  | +#define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk | 
|  | +#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK | 
|  | +#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true | 
|  | + | 
|  | +#undef TARGET_PRINT_OPERAND | 
|  | +#define TARGET_PRINT_OPERAND mips_print_operand | 
|  | +#undef TARGET_PRINT_OPERAND_ADDRESS | 
|  | +#define TARGET_PRINT_OPERAND_ADDRESS mips_print_operand_address | 
|  | + | 
|  | +#undef TARGET_SETUP_INCOMING_VARARGS | 
|  | +#define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs | 
|  | +#undef TARGET_STRICT_ARGUMENT_NAMING | 
|  | +#define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true | 
|  | +#undef TARGET_MUST_PASS_IN_STACK | 
|  | +#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size | 
|  | +#undef TARGET_PASS_BY_REFERENCE | 
|  | +#define TARGET_PASS_BY_REFERENCE hook_pass_by_reference_must_pass_in_stack | 
|  | +#undef TARGET_ARG_PARTIAL_BYTES | 
|  | +#define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes | 
|  | +#undef TARGET_FUNCTION_ARG | 
|  | +#define TARGET_FUNCTION_ARG mips_function_arg | 
|  | +#undef TARGET_FUNCTION_ARG_ADVANCE | 
|  | +#define TARGET_FUNCTION_ARG_ADVANCE mips_function_arg_advance | 
|  | +#undef TARGET_FUNCTION_ARG_BOUNDARY | 
|  | +#define TARGET_FUNCTION_ARG_BOUNDARY mips_function_arg_boundary | 
|  | + | 
|  | +#undef TARGET_MODE_REP_EXTENDED | 
|  | +#define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended | 
|  | + | 
|  | +#undef TARGET_VECTOR_MODE_SUPPORTED_P | 
|  | +#define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p | 
|  | + | 
|  | +#undef TARGET_SCALAR_MODE_SUPPORTED_P | 
|  | +#define TARGET_SCALAR_MODE_SUPPORTED_P mips_scalar_mode_supported_p | 
|  | + | 
|  | +#undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE | 
|  | +#define TARGET_VECTORIZE_PREFERRED_SIMD_MODE mips_preferred_simd_mode | 
|  | + | 
|  | +#undef TARGET_INIT_BUILTINS | 
|  | +#define TARGET_INIT_BUILTINS mips_init_builtins | 
|  | +#undef TARGET_BUILTIN_DECL | 
|  | +#define TARGET_BUILTIN_DECL mips_builtin_decl | 
|  | +#undef TARGET_EXPAND_BUILTIN | 
|  | +#define TARGET_EXPAND_BUILTIN mips_expand_builtin | 
|  | + | 
|  | +#undef TARGET_HAVE_TLS | 
|  | +#define TARGET_HAVE_TLS HAVE_AS_TLS | 
|  | + | 
|  | +#undef TARGET_CANNOT_FORCE_CONST_MEM | 
|  | +#define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem | 
|  | + | 
|  | +#undef TARGET_ENCODE_SECTION_INFO | 
|  | +#define TARGET_ENCODE_SECTION_INFO mips_encode_section_info | 
|  | + | 
|  | +#undef TARGET_ATTRIBUTE_TABLE | 
|  | +#define TARGET_ATTRIBUTE_TABLE mips_attribute_table | 
|  | +/* All our function attributes are related to how out-of-line copies should | 
|  | +   be compiled or called.  They don't in themselves prevent inlining.  */ | 
|  | +#undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P | 
|  | +#define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true | 
|  | + | 
|  | +#undef TARGET_EXTRA_LIVE_ON_ENTRY | 
|  | +#define TARGET_EXTRA_LIVE_ON_ENTRY mips_extra_live_on_entry | 
|  | + | 
|  | +#undef TARGET_USE_BLOCKS_FOR_CONSTANT_P | 
|  | +#define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true | 
|  | + | 
|  | +#undef  TARGET_COMP_TYPE_ATTRIBUTES | 
|  | +#define TARGET_COMP_TYPE_ATTRIBUTES mips_comp_type_attributes | 
|  | + | 
|  | +#ifdef HAVE_AS_DTPRELWORD | 
|  | +#undef TARGET_ASM_OUTPUT_DWARF_DTPREL | 
|  | +#define TARGET_ASM_OUTPUT_DWARF_DTPREL mips_output_dwarf_dtprel | 
|  | +#endif | 
|  | + | 
|  | +#undef TARGET_IRA_COVER_CLASSES | 
|  | +#define TARGET_IRA_COVER_CLASSES mips_ira_cover_classes | 
|  | + | 
|  | +#undef TARGET_LEGITIMATE_ADDRESS_P | 
|  | +#define TARGET_LEGITIMATE_ADDRESS_P	mips_legitimate_address_p | 
|  | + | 
|  | +#undef TARGET_CAN_ELIMINATE | 
|  | +#define TARGET_CAN_ELIMINATE mips_can_eliminate | 
|  | + | 
|  | +#undef TARGET_CONDITIONAL_REGISTER_USAGE | 
|  | +#define TARGET_CONDITIONAL_REGISTER_USAGE mips_conditional_register_usage | 
|  | + | 
|  | +#undef TARGET_TRAMPOLINE_INIT | 
|  | +#define TARGET_TRAMPOLINE_INIT mips_trampoline_init | 
|  | + | 
|  | +#undef TARGET_ASM_OUTPUT_SOURCE_FILENAME | 
|  | +#define TARGET_ASM_OUTPUT_SOURCE_FILENAME mips_output_filename | 
|  | + | 
|  | +#undef TARGET_SHIFT_TRUNCATION_MASK | 
|  | +#define TARGET_SHIFT_TRUNCATION_MASK mips_shift_truncation_mask | 
|  | + | 
|  | +struct gcc_target targetm = TARGET_INITIALIZER; | 
|  | + | 
|  | +#include "gt-riscv.h" | 
|  | diff -ruN gcc-4.9.2/gcc/config/riscv/riscv-ftypes.def gcc-4.9.2-riscv/gcc/config/riscv/riscv-ftypes.def | 
|  | --- gcc-4.9.2/gcc/config/riscv/riscv-ftypes.def	1969-12-31 16:00:00.000000000 -0800 | 
|  | +++ gcc-4.9.2-riscv/gcc/config/riscv/riscv-ftypes.def	2014-12-02 18:04:50.107949563 -0800 | 
|  | @@ -0,0 +1,157 @@ | 
|  | +/* Definitions of prototypes for MIPS built-in functions.  -*- C -*- | 
|  | +   Copyright (C) 2007, 2008 | 
|  | +   Free Software Foundation, Inc. | 
|  | + | 
|  | +This file is part of GCC. | 
|  | + | 
|  | +GCC is free software; you can redistribute it and/or modify | 
|  | +it under the terms of the GNU General Public License as published by | 
|  | +the Free Software Foundation; either version 3, or (at your option) | 
|  | +any later version. | 
|  | + | 
|  | +GCC is distributed in the hope that it will be useful, | 
|  | +but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | +GNU General Public License for more details. | 
|  | + | 
|  | +You should have received a copy of the GNU General Public License | 
|  | +along with GCC; see the file COPYING3.  If not see | 
|  | +<http://www.gnu.org/licenses/>.  */ | 
|  | + | 
|  | +/* Invoke DEF_MIPS_FTYPE (NARGS, LIST) for each prototype used by | 
|  | +   MIPS built-in functions, where: | 
|  | + | 
|  | +      NARGS is the number of arguments. | 
|  | +      LIST contains the return-type code followed by the codes for each | 
|  | +        argument type. | 
|  | + | 
|  | +   Argument- and return-type codes are either modes or one of the following: | 
|  | + | 
|  | +      VOID for void_type_node | 
|  | +      INT for integer_type_node | 
|  | +      POINTER for ptr_type_node | 
|  | + | 
|  | +   (we don't use PTR because that's a ANSI-compatibillity macro). | 
|  | + | 
|  | +   Please keep this list lexicographically sorted by the LIST argument.  */ | 
|  | +DEF_MIPS_FTYPE (1, (DF, DF)) | 
|  | +DEF_MIPS_FTYPE (2, (DF, DF, DF)) | 
|  | + | 
|  | +DEF_MIPS_FTYPE (2, (DI, DI, DI)) | 
|  | +DEF_MIPS_FTYPE (2, (DI, DI, SI)) | 
|  | +DEF_MIPS_FTYPE (3, (DI, DI, SI, SI)) | 
|  | +DEF_MIPS_FTYPE (3, (DI, DI, USI, USI)) | 
|  | +DEF_MIPS_FTYPE (3, (DI, DI, V2HI, V2HI)) | 
|  | +DEF_MIPS_FTYPE (3, (DI, DI, V4QI, V4QI)) | 
|  | +DEF_MIPS_FTYPE (2, (DI, SI, SI)) | 
|  | +DEF_MIPS_FTYPE (2, (DI, USI, USI)) | 
|  | + | 
|  | +DEF_MIPS_FTYPE (2, (INT, DF, DF)) | 
|  | +DEF_MIPS_FTYPE (2, (INT, SF, SF)) | 
|  | +DEF_MIPS_FTYPE (2, (INT, V2SF, V2SF)) | 
|  | +DEF_MIPS_FTYPE (4, (INT, V2SF, V2SF, V2SF, V2SF)) | 
|  | + | 
|  | +DEF_MIPS_FTYPE (2, (SI, DI, SI)) | 
|  | +DEF_MIPS_FTYPE (2, (SI, POINTER, SI)) | 
|  | +DEF_MIPS_FTYPE (1, (SI, SI)) | 
|  | +DEF_MIPS_FTYPE (2, (SI, SI, SI)) | 
|  | +DEF_MIPS_FTYPE (3, (SI, SI, SI, SI)) | 
|  | +DEF_MIPS_FTYPE (1, (SI, V2HI)) | 
|  | +DEF_MIPS_FTYPE (2, (SI, V2HI, V2HI)) | 
|  | +DEF_MIPS_FTYPE (1, (SI, V4QI)) | 
|  | +DEF_MIPS_FTYPE (2, (SI, V4QI, V4QI)) | 
|  | +DEF_MIPS_FTYPE (1, (SI, VOID)) | 
|  | + | 
|  | +DEF_MIPS_FTYPE (1, (SF, SF)) | 
|  | +DEF_MIPS_FTYPE (2, (SF, SF, SF)) | 
|  | +DEF_MIPS_FTYPE (1, (SF, V2SF)) | 
|  | + | 
|  | +DEF_MIPS_FTYPE (2, (UDI, UDI, UDI)) | 
|  | +DEF_MIPS_FTYPE (2, (UDI, UV2SI, UV2SI)) | 
|  | + | 
|  | +DEF_MIPS_FTYPE (2, (UV2SI, UV2SI, UQI)) | 
|  | +DEF_MIPS_FTYPE (2, (UV2SI, UV2SI, UV2SI)) | 
|  | + | 
|  | +DEF_MIPS_FTYPE (2, (UV4HI, UV4HI, UQI)) | 
|  | +DEF_MIPS_FTYPE (2, (UV4HI, UV4HI, USI)) | 
|  | +DEF_MIPS_FTYPE (3, (UV4HI, UV4HI, UV4HI, UQI)) | 
|  | +DEF_MIPS_FTYPE (3, (UV4HI, UV4HI, UV4HI, USI)) | 
|  | +DEF_MIPS_FTYPE (2, (UV4HI, UV4HI, UV4HI)) | 
|  | +DEF_MIPS_FTYPE (1, (UV4HI, UV8QI)) | 
|  | +DEF_MIPS_FTYPE (2, (UV4HI, UV8QI, UV8QI)) | 
|  | + | 
|  | +DEF_MIPS_FTYPE (2, (UV8QI, UV4HI, UV4HI)) | 
|  | +DEF_MIPS_FTYPE (1, (UV8QI, UV8QI)) | 
|  | +DEF_MIPS_FTYPE (2, (UV8QI, UV8QI, UV8QI)) | 
|  | + | 
|  | +DEF_MIPS_FTYPE (1, (V2HI, SI)) | 
|  | +DEF_MIPS_FTYPE (2, (V2HI, SI, SI)) | 
|  | +DEF_MIPS_FTYPE (3, (V2HI, SI, SI, SI)) | 
|  | +DEF_MIPS_FTYPE (1, (V2HI, V2HI)) | 
|  | +DEF_MIPS_FTYPE (2, (V2HI, V2HI, SI)) | 
|  | +DEF_MIPS_FTYPE (2, (V2HI, V2HI, V2HI)) | 
|  | +DEF_MIPS_FTYPE (1, (V2HI, V4QI)) | 
|  | +DEF_MIPS_FTYPE (2, (V2HI, V4QI, V2HI)) | 
|  | + | 
|  | +DEF_MIPS_FTYPE (2, (V2SF, SF, SF)) | 
|  | +DEF_MIPS_FTYPE (1, (V2SF, V2SF)) | 
|  | +DEF_MIPS_FTYPE (2, (V2SF, V2SF, V2SF)) | 
|  | +DEF_MIPS_FTYPE (3, (V2SF, V2SF, V2SF, INT)) | 
|  | +DEF_MIPS_FTYPE (4, (V2SF, V2SF, V2SF, V2SF, V2SF)) | 
|  | + | 
|  | +DEF_MIPS_FTYPE (2, (V2SI, V2SI, UQI)) | 
|  | +DEF_MIPS_FTYPE (2, (V2SI, V2SI, V2SI)) | 
|  | +DEF_MIPS_FTYPE (2, (V2SI, V4HI, V4HI)) | 
|  | + | 
|  | +DEF_MIPS_FTYPE (2, (V4HI, V2SI, V2SI)) | 
|  | +DEF_MIPS_FTYPE (2, (V4HI, V4HI, UQI)) | 
|  | +DEF_MIPS_FTYPE (2, (V4HI, V4HI, USI)) | 
|  | +DEF_MIPS_FTYPE (2, (V4HI, V4HI, V4HI)) | 
|  | +DEF_MIPS_FTYPE (3, (V4HI, V4HI, V4HI, UQI)) | 
|  | +DEF_MIPS_FTYPE (3, (V4HI, V4HI, V4HI, USI)) | 
|  | + | 
|  | +DEF_MIPS_FTYPE (1, (V4QI, SI)) | 
|  | +DEF_MIPS_FTYPE (2, (V4QI, V2HI, V2HI)) | 
|  | +DEF_MIPS_FTYPE (1, (V4QI, V4QI)) | 
|  | +DEF_MIPS_FTYPE (2, (V4QI, V4QI, SI)) | 
|  | +DEF_MIPS_FTYPE (2, (V4QI, V4QI, V4QI)) | 
|  | + | 
|  | +DEF_MIPS_FTYPE (2, (V8QI, V4HI, V4HI)) | 
|  | +DEF_MIPS_FTYPE (1, (V8QI, V8QI)) | 
|  | +DEF_MIPS_FTYPE (2, (V8QI, V8QI, V8QI)) | 
|  | + | 
|  | +DEF_MIPS_FTYPE (2, (VOID, SI, CVPOINTER)) | 
|  | +DEF_MIPS_FTYPE (2, (VOID, SI, SI)) | 
|  | +DEF_MIPS_FTYPE (2, (VOID, V2HI, V2HI)) | 
|  | +DEF_MIPS_FTYPE (2, (VOID, V4QI, V4QI)) | 
|  | + | 
|  | +/* RISC-V builtin function types */ | 
|  | + | 
|  | +DEF_MIPS_FTYPE (1, (VDI, CPOINTER)) | 
|  | +DEF_MIPS_FTYPE (1, (VSI, CPOINTER)) | 
|  | +DEF_MIPS_FTYPE (1, (VHI, CPOINTER)) | 
|  | +DEF_MIPS_FTYPE (1, (VQI, CPOINTER)) | 
|  | +DEF_MIPS_FTYPE (1, (VDF, CPOINTER)) | 
|  | +DEF_MIPS_FTYPE (1, (VSF, CPOINTER)) | 
|  | + | 
|  | +DEF_MIPS_FTYPE (2, (VDI, CPOINTER, DI)) | 
|  | +DEF_MIPS_FTYPE (2, (VSI, CPOINTER, DI)) | 
|  | +DEF_MIPS_FTYPE (2, (VHI, CPOINTER, DI)) | 
|  | +DEF_MIPS_FTYPE (2, (VQI, CPOINTER, DI)) | 
|  | +DEF_MIPS_FTYPE (2, (VDF, CPOINTER, DI)) | 
|  | +DEF_MIPS_FTYPE (2, (VSF, CPOINTER, DI)) | 
|  | + | 
|  | +DEF_MIPS_FTYPE (2, (VOID, VDI, POINTER)) | 
|  | +DEF_MIPS_FTYPE (2, (VOID, VSI, POINTER)) | 
|  | +DEF_MIPS_FTYPE (2, (VOID, VHI, POINTER)) | 
|  | +DEF_MIPS_FTYPE (2, (VOID, VQI, POINTER)) | 
|  | +DEF_MIPS_FTYPE (2, (VOID, VDF, POINTER)) | 
|  | +DEF_MIPS_FTYPE (2, (VOID, VSF, POINTER)) | 
|  | + | 
|  | +DEF_MIPS_FTYPE (3, (VOID, VDI, POINTER, DI)) | 
|  | +DEF_MIPS_FTYPE (3, (VOID, VSI, POINTER, DI)) | 
|  | +DEF_MIPS_FTYPE (3, (VOID, VHI, POINTER, DI)) | 
|  | +DEF_MIPS_FTYPE (3, (VOID, VQI, POINTER, DI)) | 
|  | +DEF_MIPS_FTYPE (3, (VOID, VDF, POINTER, DI)) | 
|  | +DEF_MIPS_FTYPE (3, (VOID, VSF, POINTER, DI)) | 
|  | + | 
|  | diff -ruN gcc-4.9.2/gcc/config/riscv/riscv.h gcc-4.9.2-riscv/gcc/config/riscv/riscv.h | 
|  | --- gcc-4.9.2/gcc/config/riscv/riscv.h	1969-12-31 16:00:00.000000000 -0800 | 
|  | +++ gcc-4.9.2-riscv/gcc/config/riscv/riscv.h	2014-12-02 18:04:50.111949590 -0800 | 
|  | @@ -0,0 +1,1666 @@ | 
|  | +/* Definitions of target machine for GNU compiler.  MIPS version. | 
|  | +   Copyright (C) 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998 | 
|  | +   1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007, 2008, 2009, 2010, 2011 | 
|  | +   Free Software Foundation, Inc. | 
|  | +   Contributed by A. Lichnewsky (lich@inria.inria.fr). | 
|  | +   Changed by Michael Meissner	(meissner@osf.org). | 
|  | +   64-bit r4000 support by Ian Lance Taylor (ian@cygnus.com) and | 
|  | +   Brendan Eich (brendan@microunity.com). | 
|  | + | 
|  | +This file is part of GCC. | 
|  | + | 
|  | +GCC is free software; you can redistribute it and/or modify | 
|  | +it under the terms of the GNU General Public License as published by | 
|  | +the Free Software Foundation; either version 3, or (at your option) | 
|  | +any later version. | 
|  | + | 
|  | +GCC is distributed in the hope that it will be useful, | 
|  | +but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | +GNU General Public License for more details. | 
|  | + | 
|  | +You should have received a copy of the GNU General Public License | 
|  | +along with GCC; see the file COPYING3.  If not see | 
|  | +<http://www.gnu.org/licenses/>.  */ | 
|  | + | 
|  | + | 
|  | +#include "config/vxworks-dummy.h" | 
|  | + | 
|  | +#ifdef GENERATOR_FILE | 
|  | +/* This is used in some insn conditions, so needs to be declared, but | 
|  | +   does not need to be defined.  */ | 
|  | +extern int target_flags_explicit; | 
|  | +#endif | 
|  | + | 
|  | +/* MIPS external variables defined in mips.c.  */ | 
|  | + | 
|  | +/* Which ABI to use. */ | 
|  | + | 
|  | +#define ABI_32  1 | 
|  | +#define ABI_64  3 | 
|  | + | 
|  | +/* Information about one recognized processor.  Defined here for the | 
|  | +   benefit of TARGET_CPU_CPP_BUILTINS.  */ | 
|  | +struct mips_cpu_info { | 
|  | +  /* The 'canonical' name of the processor as far as GCC is concerned. | 
|  | +     It's typically a manufacturer's prefix followed by a numerical | 
|  | +     designation.  It should be lowercase.  */ | 
|  | +  const char *name; | 
|  | + | 
|  | +  /* The internal processor number that most closely matches this | 
|  | +     entry.  Several processors can have the same value, if there's no | 
|  | +     difference between them from GCC's point of view.  */ | 
|  | +  enum processor cpu; | 
|  | + | 
|  | +  /* A mask of PTF_* values.  */ | 
|  | +  unsigned int tune_flags; | 
|  | +}; | 
|  | + | 
|  | +/* Macros to silence warnings about numbers being signed in traditional | 
|  | +   C and unsigned in ISO C when compiled on 32-bit hosts.  */ | 
|  | + | 
|  | +#define BITMASK_HIGH	(((unsigned long)1) << 31)	/* 0x80000000 */ | 
|  | +#define BITMASK_UPPER16	((unsigned long)0xffff << 16)	/* 0xffff0000 */ | 
|  | +#define BITMASK_LOWER16	((unsigned long)0xffff)		/* 0x0000ffff */ | 
|  | + | 
|  | +/* True if we need to use a global offset table to access some symbols.  */ | 
|  | +#define TARGET_USE_GOT TARGET_ABICALLS | 
|  | + | 
|  | +/* True if the output must have a writable .eh_frame. | 
|  | +   See ASM_PREFERRED_EH_DATA_FORMAT for details.  */ | 
|  | +#ifdef HAVE_LD_PERSONALITY_RELAXATION | 
|  | +#define TARGET_WRITABLE_EH_FRAME 0 | 
|  | +#else | 
|  | +#define TARGET_WRITABLE_EH_FRAME flag_pic | 
|  | +#endif | 
|  | + | 
|  | +/* TARGET_HARD_FLOAT and TARGET_SOFT_FLOAT reflect whether the FPU is | 
|  | +   directly accessible, while the command-line options select | 
|  | +   TARGET_HARD_FLOAT_ABI and TARGET_SOFT_FLOAT_ABI to reflect the ABI | 
|  | +   in use.  */ | 
|  | +#define TARGET_HARD_FLOAT TARGET_HARD_FLOAT_ABI | 
|  | +#define TARGET_SOFT_FLOAT TARGET_SOFT_FLOAT_ABI | 
|  | + | 
|  | +/* Target CPU builtins.  */ | 
|  | +#define TARGET_CPU_CPP_BUILTINS()					\ | 
|  | +  do									\ | 
|  | +    {									\ | 
|  | +      builtin_assert ("machine=riscv");                        	        \ | 
|  | +									\ | 
|  | +      builtin_assert ("cpu=riscv");					\ | 
|  | +      builtin_define ("__riscv__");     				\ | 
|  | +      builtin_define ("__riscv");     					\ | 
|  | +      builtin_define ("_riscv");					\ | 
|  | +									\ | 
|  | +      if (TARGET_64BIT)							\ | 
|  | +	{								\ | 
|  | +	  builtin_define ("__riscv64");					\ | 
|  | +	  builtin_define ("_RISCV_SIM=_ABI64");			        \ | 
|  | +	}								\ | 
|  | +      else						        	\ | 
|  | +	builtin_define ("_RISCV_SIM=_ABI32");			        \ | 
|  | +									\ | 
|  | +      builtin_define ("_ABI32=1");					\ | 
|  | +      builtin_define ("_ABI64=3");					\ | 
|  | +									\ | 
|  | +									\ | 
|  | +      builtin_define_with_int_value ("_RISCV_SZINT", INT_TYPE_SIZE);	\ | 
|  | +      builtin_define_with_int_value ("_RISCV_SZLONG", LONG_TYPE_SIZE);	\ | 
|  | +      builtin_define_with_int_value ("_RISCV_SZPTR", POINTER_SIZE);	\ | 
|  | +      builtin_define_with_int_value ("_RISCV_FPSET", 32);		\ | 
|  | +									\ | 
|  | +      /* These defines reflect the ABI in use, not whether the  	\ | 
|  | +	 FPU is directly accessible.  */				\ | 
|  | +      if (TARGET_HARD_FLOAT_ABI)					\ | 
|  | +	builtin_define ("__riscv_hard_float");				\ | 
|  | +      else								\ | 
|  | +	builtin_define ("__riscv_soft_float");				\ | 
|  | +									\ | 
|  | +      if (TARGET_BIG_ENDIAN)						\ | 
|  | +	{								\ | 
|  | +	  builtin_define_std ("RISCVEB");				\ | 
|  | +	  builtin_define ("_RISCVEB");					\ | 
|  | +	}								\ | 
|  | +      else								\ | 
|  | +	{								\ | 
|  | +	  builtin_define_std ("RISCVEL");				\ | 
|  | +	  builtin_define ("_RISCVEL");					\ | 
|  | +	}								\ | 
|  | +									\ | 
|  | +      /* Macros dependent on the C dialect.  */				\ | 
|  | +      if (preprocessing_asm_p ())					\ | 
|  | +	{								\ | 
|  | +	  builtin_define_std ("LANGUAGE_ASSEMBLY");			\ | 
|  | +	  builtin_define ("_LANGUAGE_ASSEMBLY");			\ | 
|  | +	}								\ | 
|  | +      else if (c_dialect_cxx ())					\ | 
|  | +	{								\ | 
|  | +	  builtin_define ("_LANGUAGE_C_PLUS_PLUS");			\ | 
|  | +	  builtin_define ("__LANGUAGE_C_PLUS_PLUS");			\ | 
|  | +	  builtin_define ("__LANGUAGE_C_PLUS_PLUS__");			\ | 
|  | +	}								\ | 
|  | +      else								\ | 
|  | +	{								\ | 
|  | +	  builtin_define_std ("LANGUAGE_C");				\ | 
|  | +	  builtin_define ("_LANGUAGE_C");				\ | 
|  | +	}								\ | 
|  | +      if (c_dialect_objc ())						\ | 
|  | +	{								\ | 
|  | +	  builtin_define ("_LANGUAGE_OBJECTIVE_C");			\ | 
|  | +	  builtin_define ("__LANGUAGE_OBJECTIVE_C");			\ | 
|  | +	  /* Bizarre, but needed at least for Irix.  */			\ | 
|  | +	  builtin_define_std ("LANGUAGE_C");				\ | 
|  | +	  builtin_define ("_LANGUAGE_C");				\ | 
|  | +	}								\ | 
|  | +    }									\ | 
|  | +  while (0) | 
|  | + | 
|  | +/* Default target_flags if no switches are specified  */ | 
|  | + | 
|  | +#ifndef TARGET_DEFAULT | 
|  | +#define TARGET_DEFAULT 0 | 
|  | +#endif | 
|  | + | 
|  | +#ifndef TARGET_CPU_DEFAULT | 
|  | +#define TARGET_CPU_DEFAULT 0 | 
|  | +#endif | 
|  | + | 
|  | +#ifndef TARGET_ENDIAN_DEFAULT | 
|  | +#define TARGET_ENDIAN_DEFAULT 0 | 
|  | +#endif | 
|  | + | 
|  | +#ifndef MIPS_CPU_STRING_DEFAULT | 
|  | +#define MIPS_CPU_STRING_DEFAULT "rocket" | 
|  | +#endif | 
|  | + | 
|  | +#ifndef MULTILIB_ENDIAN_DEFAULT | 
|  | +#if TARGET_ENDIAN_DEFAULT == 0 | 
|  | +#define MULTILIB_ENDIAN_DEFAULT "EL" | 
|  | +#else | 
|  | +#define MULTILIB_ENDIAN_DEFAULT "EB" | 
|  | +#endif | 
|  | +#endif | 
|  | + | 
|  | +#ifndef TARGET_64BIT_DEFAULT | 
|  | +#define TARGET_64BIT_DEFAULT 1 | 
|  | +#endif | 
|  | + | 
|  | +#if TARGET_64BIT_DEFAULT | 
|  | +# define MULTILIB_ARCH_DEFAULT "m64" | 
|  | +# define OPT_ARCH64 "!m32" | 
|  | +# define OPT_ARCH32 "m32" | 
|  | +#else | 
|  | +# define MULTILIB_ARCH_DEFAULT "m32" | 
|  | +# define OPT_ARCH64 "m64" | 
|  | +# define OPT_ARCH32 "!m64" | 
|  | +#endif | 
|  | + | 
|  | +#ifndef MULTILIB_DEFAULTS | 
|  | +#define MULTILIB_DEFAULTS \ | 
|  | +    { MULTILIB_ENDIAN_DEFAULT, MULTILIB_ARCH_DEFAULT } | 
|  | +#endif | 
|  | + | 
|  | +/* We must pass -EL to the linker by default for little endian embedded | 
|  | +   targets using linker scripts with a OUTPUT_FORMAT line.  Otherwise, the | 
|  | +   linker will default to using big-endian output files.  The OUTPUT_FORMAT | 
|  | +   line must be in the linker script, otherwise -EB/-EL will not work.  */ | 
|  | + | 
|  | +#ifndef ENDIAN_SPEC | 
|  | +#if TARGET_ENDIAN_DEFAULT == 0 | 
|  | +#define ENDIAN_SPEC "%{!EB:%{!meb:-EL}} %{EB|meb:-EB}" | 
|  | +#else | 
|  | +#define ENDIAN_SPEC "%{!EL:%{!mel:-EB}} %{EL|mel:-EL}" | 
|  | +#endif | 
|  | +#endif | 
|  | + | 
|  | +/* A spec condition that matches all non-mips16 -mips arguments.  */ | 
|  | + | 
|  | +#define MIPS_ISA_LEVEL_OPTION_SPEC \ | 
|  | +  "mips1|mips2|mips3|mips4|mips32*|mips64*" | 
|  | + | 
|  | +/* A spec condition that matches all non-mips16 architecture arguments.  */ | 
|  | + | 
|  | +#define MIPS_ARCH_OPTION_SPEC \ | 
|  | +  MIPS_ISA_LEVEL_OPTION_SPEC "|march=*" | 
|  | + | 
|  | +/* A spec that infers a -mhard-float or -msoft-float setting from an | 
|  | +   -march argument.  Note that soft-float and hard-float code are not | 
|  | +   link-compatible.  */ | 
|  | + | 
|  | +#define MIPS_ARCH_FLOAT_SPEC \ | 
|  | +  "%{mhard-float|msoft-float|march=mips*:; \ | 
|  | +     march=vr41*|march=m4k|march=4k*|march=24kc|march=24kec \ | 
|  | +     |march=34kc|march=74kc|march=1004kc|march=5kc \ | 
|  | +     |march=octeon|march=xlr: -msoft-float;		  \ | 
|  | +     march=*: -mhard-float}" | 
|  | + | 
|  | +/* Support for a compile-time default CPU, et cetera.  The rules are: | 
|  | +   --with-arch is ignored if -march is specified or a -mips is specified | 
|  | +     (other than -mips16); likewise --with-arch-32 and --with-arch-64. | 
|  | +   --with-tune is ignored if -mtune is specified; likewise | 
|  | +     --with-tune-32 and --with-tune-64. | 
|  | +   --with-float is ignored if -mhard-float or -msoft-float are | 
|  | +     specified. | 
|  | +   --with-divide is ignored if -mdivide-traps or -mdivide-breaks are | 
|  | +     specified. */ | 
|  | +#define OPTION_DEFAULT_SPECS \ | 
|  | +  {"arch", "%{" MIPS_ARCH_OPTION_SPEC ":;: -march=%(VALUE)}" }, \ | 
|  | +  {"arch_32", "%{" OPT_ARCH32 ":%{m32}}" }, \ | 
|  | +  {"arch_64", "%{" OPT_ARCH64 ":%{m64}}" }, \ | 
|  | +  {"tune", "%{!mtune=*:-mtune=%(VALUE)}" }, \ | 
|  | +  {"tune_32", "%{" OPT_ARCH32 ":%{!mtune=*:-mtune=%(VALUE)}}" }, \ | 
|  | +  {"tune_64", "%{" OPT_ARCH64 ":%{!mtune=*:-mtune=%(VALUE)}}" }, \ | 
|  | +  {"float", "%{!msoft-float:%{!mhard-float:-m%(VALUE)-float}}" }, \ | 
|  | + | 
|  | +#define DRIVER_SELF_SPECS "" | 
|  | + | 
|  | +#ifdef IN_LIBGCC2 | 
|  | +#undef TARGET_64BIT | 
|  | +/* Make this compile time constant for libgcc2 */ | 
|  | +#ifdef __riscv64 | 
|  | +#define TARGET_64BIT		1 | 
|  | +#else | 
|  | +#define TARGET_64BIT		0 | 
|  | +#endif | 
|  | +#endif /* IN_LIBGCC2 */ | 
|  | + | 
|  | +/* Tell collect what flags to pass to nm.  */ | 
|  | +#ifndef NM_FLAGS | 
|  | +#define NM_FLAGS "-Bn" | 
|  | +#endif | 
|  | + | 
|  | +/* SUBTARGET_ASM_DEBUGGING_SPEC handles passing debugging options to | 
|  | +   the assembler.  It may be overridden by subtargets. | 
|  | + | 
|  | +   Beginning with gas 2.13, -mdebug must be passed to correctly handle | 
|  | +   COFF debugging info.  */ | 
|  | + | 
|  | +#ifndef SUBTARGET_ASM_DEBUGGING_SPEC | 
|  | +#define SUBTARGET_ASM_DEBUGGING_SPEC "\ | 
|  | +%{g} %{g0} %{g1} %{g2} %{g3} \ | 
|  | +%{ggdb:-g} %{ggdb0:-g0} %{ggdb1:-g1} %{ggdb2:-g2} %{ggdb3:-g3} \ | 
|  | +%{gstabs:-g} %{gstabs0:-g0} %{gstabs1:-g1} %{gstabs2:-g2} %{gstabs3:-g3} \ | 
|  | +%{gstabs+:-g} %{gstabs+0:-g0} %{gstabs+1:-g1} %{gstabs+2:-g2} %{gstabs+3:-g3}" | 
|  | +#endif | 
|  | + | 
|  | +/* SUBTARGET_ASM_SPEC is always passed to the assembler.  It may be | 
|  | +   overridden by subtargets.  */ | 
|  | + | 
|  | +#ifndef SUBTARGET_ASM_SPEC | 
|  | +#define SUBTARGET_ASM_SPEC "" | 
|  | +#endif | 
|  | + | 
|  | +#undef ASM_SPEC | 
|  | +#define ASM_SPEC "\ | 
|  | +%{G*} %(endian_spec) \ | 
|  | +%(subtarget_asm_debugging_spec) \ | 
|  | +%{m32} %{m64} %{!m32:%{!m64: %(asm_abi_default_spec)}} \ | 
|  | +%{fPIC|fpic|fPIE|fpie:-fpic} \ | 
|  | +%{march=*} \ | 
|  | +%(subtarget_asm_spec)" | 
|  | + | 
|  | +/* Extra switches sometimes passed to the linker.  */ | 
|  | + | 
|  | +#ifndef LINK_SPEC | 
|  | +#define LINK_SPEC "\ | 
|  | +%{!T:-dT riscv.ld} \ | 
|  | +%(endian_spec) \ | 
|  | +%{m64:-melf64%{EB:b}%{!EB:l}riscv} \ | 
|  | +%{m32:-melf32%{EB:b}%{!EB:l}riscv} \ | 
|  | +%{G*} %{mips1} %{mips2} %{mips3} %{mips4} %{mips32*} %{mips64*} \ | 
|  | +%{shared}" | 
|  | +#endif  /* LINK_SPEC defined */ | 
|  | + | 
|  | + | 
|  | +/* Specs for the compiler proper */ | 
|  | + | 
|  | +/* SUBTARGET_CC1_SPEC is passed to the compiler proper.  It may be | 
|  | +   overridden by subtargets.  */ | 
|  | +#ifndef SUBTARGET_CC1_SPEC | 
|  | +#define SUBTARGET_CC1_SPEC "" | 
|  | +#endif | 
|  | + | 
|  | +/* CC1_SPEC is the set of arguments to pass to the compiler proper.  */ | 
|  | + | 
|  | +#undef CC1_SPEC | 
|  | +#define CC1_SPEC "\ | 
|  | +%{G*} %{EB:-meb} %{EL:-mel} %{EB:%{EL:%emay not use both -EB and -EL}} \ | 
|  | +%(subtarget_cc1_spec)" | 
|  | + | 
|  | +/* Preprocessor specs.  */ | 
|  | + | 
|  | +/* SUBTARGET_CPP_SPEC is passed to the preprocessor.  It may be | 
|  | +   overridden by subtargets.  */ | 
|  | +#ifndef SUBTARGET_CPP_SPEC | 
|  | +#define SUBTARGET_CPP_SPEC "" | 
|  | +#endif | 
|  | + | 
|  | +#define CPP_SPEC "%(subtarget_cpp_spec)" | 
|  | + | 
|  | +/* This macro defines names of additional specifications to put in the specs | 
|  | +   that can be used in various specifications like CC1_SPEC.  Its definition | 
|  | +   is an initializer with a subgrouping for each command option. | 
|  | + | 
|  | +   Each subgrouping contains a string constant, that defines the | 
|  | +   specification name, and a string constant that used by the GCC driver | 
|  | +   program. | 
|  | + | 
|  | +   Do not define this macro if it does not need to do anything.  */ | 
|  | + | 
|  | +#define EXTRA_SPECS							\ | 
|  | +  { "subtarget_cc1_spec", SUBTARGET_CC1_SPEC },				\ | 
|  | +  { "subtarget_cpp_spec", SUBTARGET_CPP_SPEC },				\ | 
|  | +  { "subtarget_asm_debugging_spec", SUBTARGET_ASM_DEBUGGING_SPEC },	\ | 
|  | +  { "subtarget_asm_spec", SUBTARGET_ASM_SPEC },				\ | 
|  | +  { "asm_abi_default_spec", "-" MULTILIB_ARCH_DEFAULT },		\ | 
|  | +  { "endian_spec", ENDIAN_SPEC },					\ | 
|  | +  SUBTARGET_EXTRA_SPECS | 
|  | + | 
|  | +#ifndef SUBTARGET_EXTRA_SPECS | 
|  | +#define SUBTARGET_EXTRA_SPECS | 
|  | +#endif | 
|  | + | 
|  | +#define DBX_DEBUGGING_INFO 1		/* generate stabs (OSF/rose) */ | 
|  | +#define DWARF2_DEBUGGING_INFO 1         /* dwarf2 debugging info */ | 
|  | + | 
|  | +#ifndef PREFERRED_DEBUGGING_TYPE | 
|  | +#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG | 
|  | +#endif | 
|  | + | 
|  | +#define DWARF2_ADDR_SIZE UNITS_PER_WORD | 
|  | + | 
|  | +/* By default, turn on GDB extensions.  */ | 
|  | +#define DEFAULT_GDB_EXTENSIONS 1 | 
|  | + | 
|  | +#define LOCAL_LABEL_PREFIX	"." | 
|  | +#define USER_LABEL_PREFIX	"" | 
|  | + | 
|  | +/* On Sun 4, this limit is 2048.  We use 1500 to be safe, | 
|  | +   since the length can run past this up to a continuation point.  */ | 
|  | +#undef DBX_CONTIN_LENGTH | 
|  | +#define DBX_CONTIN_LENGTH 1500 | 
|  | + | 
|  | +/* How to renumber registers for dbx and gdb.  */ | 
|  | +#define DBX_REGISTER_NUMBER(REGNO) mips_dbx_regno[REGNO] | 
|  | + | 
|  | +/* The mapping from gcc register number to DWARF 2 CFA column number.  */ | 
|  | +#define DWARF_FRAME_REGNUM(REGNO) mips_dwarf_regno[REGNO] | 
|  | + | 
|  | +/* The DWARF 2 CFA column which tracks the return address.  */ | 
|  | +#define DWARF_FRAME_RETURN_COLUMN RETURN_ADDR_REGNUM | 
|  | + | 
|  | +/* Don't emit .cfi_sections, as it does not work */ | 
|  | +#undef HAVE_GAS_CFI_SECTIONS_DIRECTIVE | 
|  | +#define HAVE_GAS_CFI_SECTIONS_DIRECTIVE 0 | 
|  | + | 
|  | +/* Before the prologue, RA lives in r31.  */ | 
|  | +#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (VOIDmode, RETURN_ADDR_REGNUM) | 
|  | + | 
|  | +/* Describe how we implement __builtin_eh_return.  */ | 
|  | +#define EH_RETURN_DATA_REGNO(N) \ | 
|  | +  ((N) < 4 ? (N) + GP_ARG_FIRST : INVALID_REGNUM) | 
|  | + | 
|  | +#define EH_RETURN_STACKADJ_RTX  gen_rtx_REG (Pmode, GP_ARG_FIRST + 4) | 
|  | + | 
|  | +/* Offsets recorded in opcodes are a multiple of this alignment factor. | 
|  | +   The default for this in 64-bit mode is 8, which causes problems with | 
|  | +   SFmode register saves.  */ | 
|  | +#define DWARF_CIE_DATA_ALIGNMENT -4 | 
|  | + | 
|  | +/* Correct the offset of automatic variables and arguments.  Note that | 
|  | +   the MIPS debug format wants all automatic variables and arguments | 
|  | +   to be in terms of the virtual frame pointer (stack pointer before | 
|  | +   any adjustment in the function), while the MIPS 3.0 linker wants | 
|  | +   the frame pointer to be the stack pointer after the initial | 
|  | +   adjustment.  */ | 
|  | + | 
|  | +#define DEBUGGER_AUTO_OFFSET(X)				\ | 
|  | +  mips_debugger_offset (X, (HOST_WIDE_INT) 0) | 
|  | +#define DEBUGGER_ARG_OFFSET(OFFSET, X)			\ | 
|  | +  mips_debugger_offset (X, (HOST_WIDE_INT) OFFSET) | 
|  | + | 
|  | +/* Target machine storage layout */ | 
|  | + | 
|  | +#define BITS_BIG_ENDIAN 0 | 
|  | +#define BYTES_BIG_ENDIAN (TARGET_BIG_ENDIAN != 0) | 
|  | +#define WORDS_BIG_ENDIAN (TARGET_BIG_ENDIAN != 0) | 
|  | + | 
|  | +#define MAX_BITS_PER_WORD 64 | 
|  | + | 
|  | +/* Width of a word, in units (bytes).  */ | 
|  | +#define UNITS_PER_WORD (TARGET_64BIT ? 8 : 4) | 
|  | +#ifndef IN_LIBGCC2 | 
|  | +#define MIN_UNITS_PER_WORD 4 | 
|  | +#endif | 
|  | + | 
|  | +/* For MIPS, width of a floating point register.  */ | 
|  | +#define UNITS_PER_FPREG 8 | 
|  | + | 
|  | +/* The number of consecutive floating-point registers needed to store the | 
|  | +   smallest format supported by the FPU.  */ | 
|  | +#define MIN_FPRS_PER_FMT 1 | 
|  | + | 
|  | +/* The largest size of value that can be held in floating-point | 
|  | +   registers and moved with a single instruction.  */ | 
|  | +#define UNITS_PER_HWFPVALUE \ | 
|  | +  (TARGET_SOFT_FLOAT_ABI ? 0 : UNITS_PER_FPREG) | 
|  | + | 
|  | +/* The largest size of value that can be held in floating-point | 
|  | +   registers.  */ | 
|  | +#define UNITS_PER_FPVALUE			\ | 
|  | +  (TARGET_SOFT_FLOAT_ABI ? 0			\ | 
|  | +   : LONG_DOUBLE_TYPE_SIZE / BITS_PER_UNIT) | 
|  | + | 
|  | +/* The number of bytes in a double.  */ | 
|  | +#define UNITS_PER_DOUBLE (TYPE_PRECISION (double_type_node) / BITS_PER_UNIT) | 
|  | + | 
|  | +/* Set the sizes of the core types.  */ | 
|  | +#define SHORT_TYPE_SIZE 16 | 
|  | +#define INT_TYPE_SIZE 32 | 
|  | +#define LONG_TYPE_SIZE (TARGET_64BIT ? 64 : 32) | 
|  | +#define LONG_LONG_TYPE_SIZE 64 | 
|  | + | 
|  | +#define FLOAT_TYPE_SIZE 32 | 
|  | +#define DOUBLE_TYPE_SIZE 64 | 
|  | +#define LONG_DOUBLE_TYPE_SIZE 64 | 
|  | + | 
|  | +/* Define the sizes of fixed-point types.  */ | 
|  | +#define SHORT_FRACT_TYPE_SIZE 8 | 
|  | +#define FRACT_TYPE_SIZE 16 | 
|  | +#define LONG_FRACT_TYPE_SIZE 32 | 
|  | +#define LONG_LONG_FRACT_TYPE_SIZE 64 | 
|  | + | 
|  | +#define SHORT_ACCUM_TYPE_SIZE 16 | 
|  | +#define ACCUM_TYPE_SIZE 32 | 
|  | +#define LONG_ACCUM_TYPE_SIZE 64 | 
|  | +/* FIXME.  LONG_LONG_ACCUM_TYPE_SIZE should be 128 bits, but GCC | 
|  | +   doesn't support 128-bit integers for MIPS32 currently.  */ | 
|  | +#define LONG_LONG_ACCUM_TYPE_SIZE (TARGET_64BIT ? 128 : 64) | 
|  | + | 
|  | +/* long double is not a fixed mode, but the idea is that, if we | 
|  | +   support long double, we also want a 128-bit integer type.  */ | 
|  | +#define MAX_FIXED_MODE_SIZE LONG_DOUBLE_TYPE_SIZE | 
|  | + | 
|  | +#ifdef IN_LIBGCC2 | 
|  | +# define LIBGCC2_LONG_DOUBLE_TYPE_SIZE LONG_DOUBLE_TYPE_SIZE | 
|  | +#endif | 
|  | + | 
|  | +/* Width in bits of a pointer.  */ | 
|  | +#ifndef POINTER_SIZE | 
|  | +#define POINTER_SIZE (TARGET_64BIT ? 64 : 32) | 
|  | +#endif | 
|  | + | 
|  | +/* Allocation boundary (in *bits*) for storing arguments in argument list.  */ | 
|  | +#define PARM_BOUNDARY BITS_PER_WORD | 
|  | + | 
|  | +/* Allocation boundary (in *bits*) for the code of a function.  */ | 
|  | +#define FUNCTION_BOUNDARY 32 | 
|  | + | 
|  | +/* Alignment of field after `int : 0' in a structure.  */ | 
|  | +#define EMPTY_FIELD_BOUNDARY 32 | 
|  | + | 
|  | +/* Every structure's size must be a multiple of this.  */ | 
|  | +/* 8 is observed right on a DECstation and on riscos 4.02.  */ | 
|  | +#define STRUCTURE_SIZE_BOUNDARY 8 | 
|  | + | 
|  | +/* There is no point aligning anything to a rounder boundary than this.  */ | 
|  | +#define BIGGEST_ALIGNMENT LONG_DOUBLE_TYPE_SIZE | 
|  | + | 
|  | +/* All accesses must be aligned.  */ | 
|  | +#define STRICT_ALIGNMENT 1 | 
|  | + | 
|  | +/* Define this if you wish to imitate the way many other C compilers | 
|  | +   handle alignment of bitfields and the structures that contain | 
|  | +   them. | 
|  | + | 
|  | +   The behavior is that the type written for a bit-field (`int', | 
|  | +   `short', or other integer type) imposes an alignment for the | 
|  | +   entire structure, as if the structure really did contain an | 
|  | +   ordinary field of that type.  In addition, the bit-field is placed | 
|  | +   within the structure so that it would fit within such a field, | 
|  | +   not crossing a boundary for it. | 
|  | + | 
|  | +   Thus, on most machines, a bit-field whose type is written as `int' | 
|  | +   would not cross a four-byte boundary, and would force four-byte | 
|  | +   alignment for the whole structure.  (The alignment used may not | 
|  | +   be four bytes; it is controlled by the other alignment | 
|  | +   parameters.) | 
|  | + | 
|  | +   If the macro is defined, its definition should be a C expression; | 
|  | +   a nonzero value for the expression enables this behavior.  */ | 
|  | + | 
|  | +#define PCC_BITFIELD_TYPE_MATTERS 1 | 
|  | + | 
|  | +/* If defined, a C expression to compute the alignment given to a | 
|  | +   constant that is being placed in memory.  CONSTANT is the constant | 
|  | +   and ALIGN is the alignment that the object would ordinarily have. | 
|  | +   The value of this macro is used instead of that alignment to align | 
|  | +   the object. | 
|  | + | 
|  | +   If this macro is not defined, then ALIGN is used. | 
|  | + | 
|  | +   The typical use of this macro is to increase alignment for string | 
|  | +   constants to be word aligned so that `strcpy' calls that copy | 
|  | +   constants can be done inline.  */ | 
|  | + | 
|  | +#define CONSTANT_ALIGNMENT(EXP, ALIGN)					\ | 
|  | +  ((TREE_CODE (EXP) == STRING_CST  || TREE_CODE (EXP) == CONSTRUCTOR)	\ | 
|  | +   && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN)) | 
|  | + | 
|  | +/* If defined, a C expression to compute the alignment for a static | 
|  | +   variable.  TYPE is the data type, and ALIGN is the alignment that | 
|  | +   the object would ordinarily have.  The value of this macro is used | 
|  | +   instead of that alignment to align the object. | 
|  | + | 
|  | +   If this macro is not defined, then ALIGN is used. | 
|  | + | 
|  | +   One use of this macro is to increase alignment of medium-size | 
|  | +   data to make it all fit in fewer cache lines.  Another is to | 
|  | +   cause character arrays to be word-aligned so that `strcpy' calls | 
|  | +   that copy constants to character arrays can be done inline.  */ | 
|  | + | 
|  | +#undef DATA_ALIGNMENT | 
|  | +#define DATA_ALIGNMENT(TYPE, ALIGN)					\ | 
|  | +  ((((ALIGN) < BITS_PER_WORD)						\ | 
|  | +    && (TREE_CODE (TYPE) == ARRAY_TYPE					\ | 
|  | +	|| TREE_CODE (TYPE) == UNION_TYPE				\ | 
|  | +	|| TREE_CODE (TYPE) == RECORD_TYPE)) ? BITS_PER_WORD : (ALIGN)) | 
|  | + | 
|  | +/* We need this for the same reason as DATA_ALIGNMENT, namely to cause | 
|  | +   character arrays to be word-aligned so that `strcpy' calls that copy | 
|  | +   constants to character arrays can be done inline, and 'strcmp' can be | 
|  | +   optimised to use word loads. */ | 
|  | +#define LOCAL_ALIGNMENT(TYPE, ALIGN) \ | 
|  | +  DATA_ALIGNMENT (TYPE, ALIGN) | 
|  | + | 
|  | +#define PAD_VARARGS_DOWN \ | 
|  | +  (FUNCTION_ARG_PADDING (TYPE_MODE (type), type) == downward) | 
|  | + | 
|  | +/* Define if operations between registers always perform the operation | 
|  | +   on the full register even if a narrower mode is specified.  */ | 
|  | +#define WORD_REGISTER_OPERATIONS | 
|  | + | 
|  | +/* When in 64-bit mode, move insns will sign extend SImode and CCmode | 
|  | +   moves.  All other references are zero extended.  */ | 
|  | +#define LOAD_EXTEND_OP(MODE) \ | 
|  | +  (TARGET_64BIT && ((MODE) == SImode || (MODE) == CCmode) \ | 
|  | +   ? SIGN_EXTEND : ZERO_EXTEND) | 
|  | + | 
|  | +/* Define this macro if it is advisable to hold scalars in registers | 
|  | +   in a wider mode than that declared by the program.  In such cases, | 
|  | +   the value is constrained to be within the bounds of the declared | 
|  | +   type, but kept valid in the wider mode.  The signedness of the | 
|  | +   extension may differ from that of the type.  */ | 
|  | + | 
|  | +#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE)	\ | 
|  | +  if (GET_MODE_CLASS (MODE) == MODE_INT		\ | 
|  | +      && GET_MODE_SIZE (MODE) < UNITS_PER_WORD) \ | 
|  | +    {                                           \ | 
|  | +      if ((MODE) == SImode)                     \ | 
|  | +        (UNSIGNEDP) = 0;                        \ | 
|  | +      (MODE) = Pmode;                           \ | 
|  | +    } | 
|  | + | 
|  | +/* Pmode is always the same as ptr_mode, but not always the same as word_mode. | 
|  | +   Extensions of pointers to word_mode must be signed.  */ | 
|  | +#define POINTERS_EXTEND_UNSIGNED false | 
|  | + | 
|  | +/* RV32 double-precision FP <-> integer moves go through memory */ | 
|  | +#define SECONDARY_MEMORY_NEEDED(CLASS1,CLASS2,MODE) \ | 
|  | + (!TARGET_64BIT && GET_MODE_SIZE (MODE) == 8 && \ | 
|  | +   (((CLASS1) == FP_REGS && (CLASS2) != FP_REGS) \ | 
|  | +   || ((CLASS2) == FP_REGS && (CLASS1) != FP_REGS))) | 
|  | + | 
|  | +/* Define if loading short immediate values into registers sign extends.  */ | 
|  | +#define SHORT_IMMEDIATES_SIGN_EXTEND | 
|  | + | 
|  | +/* The [d]clz instructions have the natural values at 0.  */ | 
|  | + | 
|  | +#define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \ | 
|  | +  ((VALUE) = GET_MODE_BITSIZE (MODE), 2) | 
|  | + | 
|  | +/* Standard register usage.  */ | 
|  | + | 
|  | +/* Number of hardware registers.  We have: | 
|  | + | 
|  | +   - 32 integer registers | 
|  | +   - 32 floating point registers | 
|  | +   - 32 vector integer registers | 
|  | +   - 32 vector floating point registers | 
|  | +   - 3 fake registers: | 
|  | +	- ARG_POINTER_REGNUM | 
|  | +	- FRAME_POINTER_REGNUM | 
|  | +	- GOT_VERSION_REGNUM (see the comment above load_call<mode> for details) | 
|  | +   - 1 dummy entry that were used at various times in the past. */ | 
|  | + | 
|  | +#define FIRST_PSEUDO_REGISTER 132 | 
|  | + | 
|  | +/* By default, fix the global pointer ($28), the stack pointer ($30), | 
|  | +   and the thread pointer ($31). */ | 
|  | + | 
|  | +#define FIXED_REGISTERS							\ | 
|  | +{ /* General registers.  */                                             \ | 
|  | +  1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1,			\ | 
|  | +  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,			\ | 
|  | +  /* Floating-point registers.  */                                      \ | 
|  | +  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,			\ | 
|  | +  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,			\ | 
|  | +  /* Vector General registers.  */                                      \ | 
|  | +  1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,			\ | 
|  | +  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1,			\ | 
|  | +  /* Vector Floating-point registers.  */                               \ | 
|  | +  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,			\ | 
|  | +  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,			\ | 
|  | +  /* Others.  */                                                        \ | 
|  | +  1, 1, 1, 1 \ | 
|  | +} | 
|  | + | 
|  | + | 
|  | +/* Set up this array for o32 by default. | 
|  | + | 
|  | +   Note that we don't mark $1/$ra as a call-clobbered register.  The idea is | 
|  | +   that it's really the call instructions themselves which clobber $ra. | 
|  | +   We don't care what the called function does with it afterwards. | 
|  | + | 
|  | +   This approach makes it easier to implement sibcalls.  Unlike normal | 
|  | +   calls, sibcalls don't clobber $ra, so the register reaches the | 
|  | +   called function in tact.  EPILOGUE_USES says that $ra is useful | 
|  | +   to the called function.  */ | 
|  | + | 
|  | +#define CALL_USED_REGISTERS						\ | 
|  | +{ /* General registers.  */                                             \ | 
|  | +  1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1,			\ | 
|  | +  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,			\ | 
|  | +  /* Floating-point registers.  */                                      \ | 
|  | +  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,			\ | 
|  | +  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,			\ | 
|  | +  /* Vector General registers.  */                                      \ | 
|  | +  1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,			\ | 
|  | +  1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1,			\ | 
|  | +  /* Vector Floating-point registers.  */                               \ | 
|  | +  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,			\ | 
|  | +  1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0,			\ | 
|  | +  /* Others.  */                                                        \ | 
|  | +  1, 1, 1, 1 \ | 
|  | +} | 
|  | + | 
|  | +#define CALL_REALLY_USED_REGISTERS                                      \ | 
|  | +{ /* General registers.  */                                             \ | 
|  | +  1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1,			\ | 
|  | +  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,			\ | 
|  | +  /* Floating-point registers.  */                                      \ | 
|  | +  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,			\ | 
|  | +  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,			\ | 
|  | +  /* Vector General registers.  */                                      \ | 
|  | +  1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,                       \ | 
|  | +  1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1,                       \ | 
|  | +  /* Vector Floating-point registers.  */                               \ | 
|  | +  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,			\ | 
|  | +  1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0,			\ | 
|  | +  /* Others.  */                                                        \ | 
|  | +  1, 1, 0, 0 \ | 
|  | +} | 
|  | + | 
|  | +/* Internal macros to classify a register number as to whether it's a | 
|  | +   general purpose register, a floating point register, a | 
|  | +   multiply/divide register, or a status register.  */ | 
|  | + | 
|  | +#define GP_REG_FIRST 0 | 
|  | +#define GP_REG_LAST  31 | 
|  | +#define GP_REG_NUM   (GP_REG_LAST - GP_REG_FIRST + 1) | 
|  | +#define GP_DBX_FIRST 0 | 
|  | + | 
|  | +#define FP_REG_FIRST 32 | 
|  | +#define FP_REG_LAST  63 | 
|  | +#define FP_REG_NUM   (FP_REG_LAST - FP_REG_FIRST + 1) | 
|  | +#define FP_DBX_FIRST ((write_symbols == DBX_DEBUG) ? 38 : 32) | 
|  | + | 
|  | +#define CALLEE_SAVED_GP_REG_FIRST (GP_REG_FIRST + 2) | 
|  | +#define CALLEE_SAVED_GP_REG_LAST (CALLEE_SAVED_GP_REG_FIRST + 12 - 1) | 
|  | + | 
|  | +#define CALLEE_SAVED_FP_REG_FIRST (FP_REG_FIRST + 0) | 
|  | +#define CALLEE_SAVED_FP_REG_LAST (CALLEE_SAVED_FP_REG_FIRST + 16 - 1) | 
|  | + | 
|  | +#define VEC_GP_REG_FIRST 64 | 
|  | +#define VEC_GP_REG_LAST  95 | 
|  | +#define VEC_GP_REG_NUM   (VEC_GP_REG_LAST - VEC_GP_REG_FIRST + 1) | 
|  | + | 
|  | +#define VEC_FP_REG_FIRST 96 | 
|  | +#define VEC_FP_REG_LAST  127 | 
|  | +#define VEC_FP_REG_NUM   (VEC_FP_REG_LAST - VEC_FP_REG_FIRST + 1) | 
|  | + | 
|  | +/* The DWARF 2 CFA column which tracks the return address from a | 
|  | +   signal handler context.  This means that to maintain backwards | 
|  | +   compatibility, no hard register can be assigned this column if it | 
|  | +   would need to be handled by the DWARF unwinder.  */ | 
|  | +#define DWARF_ALT_FRAME_RETURN_COLUMN 66 | 
|  | + | 
|  | +#define GP_REG_P(REGNO)	\ | 
|  | +  ((unsigned int) ((int) (REGNO) - GP_REG_FIRST) < GP_REG_NUM) | 
|  | +#define FP_REG_P(REGNO)  \ | 
|  | +  ((unsigned int) ((int) (REGNO) - FP_REG_FIRST) < FP_REG_NUM) | 
|  | +#define VEC_GP_REG_P(REGNO)	\ | 
|  | +  ((unsigned int) ((int) (REGNO) - VEC_GP_REG_FIRST) < VEC_GP_REG_NUM) | 
|  | +#define VEC_FP_REG_P(REGNO)  \ | 
|  | +  ((unsigned int) ((int) (REGNO) - VEC_FP_REG_FIRST) < VEC_FP_REG_NUM) | 
|  | + | 
|  | +#define FP_REG_RTX_P(X) (REG_P (X) && FP_REG_P (REGNO (X))) | 
|  | + | 
|  | +/* Return coprocessor number from register number.  */ | 
|  | + | 
|  | +#define COPNUM_AS_CHAR_FROM_REGNUM(REGNO) 				\ | 
|  | +  (COP0_REG_P (REGNO) ? '0' : COP2_REG_P (REGNO) ? '2'			\ | 
|  | +   : COP3_REG_P (REGNO) ? '3' : '?') | 
|  | + | 
|  | + | 
|  | +#define HARD_REGNO_NREGS(REGNO, MODE) mips_hard_regno_nregs (REGNO, MODE) | 
|  | + | 
|  | +#define HARD_REGNO_MODE_OK(REGNO, MODE)					\ | 
|  | +  mips_hard_regno_mode_ok[ (int)(MODE) ][ (REGNO) ] | 
|  | + | 
|  | +#define MODES_TIEABLE_P mips_modes_tieable_p | 
|  | + | 
|  | +/* Register to use for pushing function arguments.  */ | 
|  | +#define STACK_POINTER_REGNUM (GP_REG_FIRST + 14) | 
|  | +#define HARD_FRAME_POINTER_REGNUM (GP_REG_FIRST + 2) | 
|  | + | 
|  | +#define THREAD_POINTER_REGNUM (GP_REG_FIRST + 15) | 
|  | + | 
|  | +/* These two registers don't really exist: they get eliminated to either | 
|  | +   the stack or hard frame pointer.  */ | 
|  | +#define ARG_POINTER_REGNUM 128 | 
|  | +#define FRAME_POINTER_REGNUM 129 | 
|  | + | 
|  | +#define HARD_FRAME_POINTER_IS_FRAME_POINTER 0 | 
|  | +#define HARD_FRAME_POINTER_IS_ARG_POINTER 0 | 
|  | + | 
|  | +/* Register in which static-chain is passed to a function.  */ | 
|  | +#define STATIC_CHAIN_REGNUM GP_RETURN | 
|  | + | 
|  | +/* Registers used as temporaries in prologue/epilogue code. | 
|  | + | 
|  | +   The prologue registers mustn't conflict with any | 
|  | +   incoming arguments, the static chain pointer, or the frame pointer. | 
|  | +   The epilogue temporary mustn't conflict with the return registers, | 
|  | +   the frame pointer, the EH stack adjustment, or the EH data registers. */ | 
|  | + | 
|  | +#define MIPS_PROLOGUE_TEMP_REGNUM (GP_RETURN + 1) | 
|  | +#define MIPS_EPILOGUE_TEMP_REGNUM(SIBCALL_P) \ | 
|  | +  ((SIBCALL_P) ? MIPS_PROLOGUE_TEMP_REGNUM : (GP_ARG_FIRST + 5)) | 
|  | + | 
|  | +#define MIPS_PROLOGUE_TEMP(MODE) gen_rtx_REG (MODE, MIPS_PROLOGUE_TEMP_REGNUM) | 
|  | +#define MIPS_EPILOGUE_TEMP(MODE, SIBCALL_P) \ | 
|  | +  gen_rtx_REG (MODE, MIPS_EPILOGUE_TEMP_REGNUM (SIBCALL_P)) | 
|  | + | 
|  | +#define FUNCTION_PROFILER(STREAM, LABELNO)	\ | 
|  | +{						\ | 
|  | +    sorry ("profiler support for RISC-V");	\ | 
|  | +} | 
|  | + | 
|  | +/* Define this macro if it is as good or better to call a constant | 
|  | +   function address than to call an address kept in a register.  */ | 
|  | +#define NO_FUNCTION_CSE 1 | 
|  | + | 
|  | +/* Define the classes of registers for register constraints in the | 
|  | +   machine description.  Also define ranges of constants. | 
|  | + | 
|  | +   One of the classes must always be named ALL_REGS and include all hard regs. | 
|  | +   If there is more than one class, another class must be named NO_REGS | 
|  | +   and contain no registers. | 
|  | + | 
|  | +   The name GENERAL_REGS must be the name of a class (or an alias for | 
|  | +   another name such as ALL_REGS).  This is the class of registers | 
|  | +   that is allowed by "g" or "r" in a register constraint. | 
|  | +   Also, registers outside this class are allocated only when | 
|  | +   instructions express preferences for them. | 
|  | + | 
|  | +   The classes must be numbered in nondecreasing order; that is, | 
|  | +   a larger-numbered class must never be contained completely | 
|  | +   in a smaller-numbered class. | 
|  | + | 
|  | +   For any two classes, it is very desirable that there be another | 
|  | +   class that represents their union.  */ | 
|  | + | 
|  | +enum reg_class | 
|  | +{ | 
|  | +  NO_REGS,			/* no registers in set */ | 
|  | +  V1_REG,			/* register used by indirect sibcalls */ | 
|  | +  GR_REGS,			/* integer registers */ | 
|  | +  FP_REGS,			/* floating point registers */ | 
|  | +  VEC_GR_REGS,			/* vector integer registers */ | 
|  | +  VEC_FP_REGS,			/* vector floating point registers */ | 
|  | +  FRAME_REGS,			/* $arg and $frame */ | 
|  | +  ALL_REGS,			/* all registers */ | 
|  | +  LIM_REG_CLASSES		/* max value + 1 */ | 
|  | +}; | 
|  | + | 
|  | +#define N_REG_CLASSES (int) LIM_REG_CLASSES | 
|  | + | 
|  | +#define GENERAL_REGS GR_REGS | 
|  | + | 
|  | +/* An initializer containing the names of the register classes as C | 
|  | +   string constants.  These names are used in writing some of the | 
|  | +   debugging dumps.  */ | 
|  | + | 
|  | +#define REG_CLASS_NAMES							\ | 
|  | +{									\ | 
|  | +  "NO_REGS",								\ | 
|  | +  "V1_REG",								\ | 
|  | +  "GR_REGS",								\ | 
|  | +  "FP_REGS",								\ | 
|  | +  "VEC_GR_REGS",							\ | 
|  | +  "VEC_FP_REGS",							\ | 
|  | +  "FRAME_REGS",								\ | 
|  | +  "ALL_REGS"								\ | 
|  | +} | 
|  | + | 
|  | +/* An initializer containing the contents of the register classes, | 
|  | +   as integers which are bit masks.  The Nth integer specifies the | 
|  | +   contents of class N.  The way the integer MASK is interpreted is | 
|  | +   that register R is in the class if `MASK & (1 << R)' is 1. | 
|  | + | 
|  | +   When the machine has more than 32 registers, an integer does not | 
|  | +   suffice.  Then the integers are replaced by sub-initializers, | 
|  | +   braced groupings containing several integers.  Each | 
|  | +   sub-initializer must be suitable as an initializer for the type | 
|  | +   `HARD_REG_SET' which is defined in `hard-reg-set.h'.  */ | 
|  | + | 
|  | +#define REG_CLASS_CONTENTS									\ | 
|  | +{												\ | 
|  | +  { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },	/* NO_REGS */		\ | 
|  | +  { 0x00020000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },	/* V1_REG */		\ | 
|  | +  { 0xffffffff, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },	/* GR_REGS */		\ | 
|  | +  { 0x00000000, 0xffffffff, 0x00000000, 0x00000000, 0x00000000 },	/* FP_REGS */		\ | 
|  | +  { 0x00000000, 0x00000000, 0xffffffff, 0x00000000, 0x00000000 },	/* VEC_GR_REGS */	\ | 
|  | +  { 0x00000000, 0x00000000, 0x00000000, 0xffffffff, 0x00000000 },	/* VEC_FP_REGS */	\ | 
|  | +  { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000003 },	/* FRAME_REGS */	\ | 
|  | +  { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0x00000003 }	/* ALL_REGS */		\ | 
|  | +} | 
|  | + | 
|  | +/* A C expression whose value is a register class containing hard | 
|  | +   register REGNO.  In general there is more that one such class; | 
|  | +   choose a class which is "minimal", meaning that no smaller class | 
|  | +   also contains the register.  */ | 
|  | + | 
|  | +#define REGNO_REG_CLASS(REGNO) mips_regno_to_class[ (REGNO) ] | 
|  | + | 
|  | +/* A macro whose definition is the name of the class to which a | 
|  | +   valid base register must belong.  A base register is one used in | 
|  | +   an address which is the register value plus a displacement.  */ | 
|  | + | 
|  | +#define BASE_REG_CLASS GR_REGS | 
|  | + | 
|  | +/* A macro whose definition is the name of the class to which a | 
|  | +   valid index register must belong.  An index register is one used | 
|  | +   in an address where its value is either multiplied by a scale | 
|  | +   factor or added to another register (as well as added to a | 
|  | +   displacement).  */ | 
|  | + | 
|  | +#define INDEX_REG_CLASS NO_REGS | 
|  | + | 
|  | +/* We generally want to put call-clobbered registers ahead of | 
|  | +   call-saved ones.  (IRA expects this.)  */ | 
|  | + | 
|  | +#define REG_ALLOC_ORDER							\ | 
|  | +{ \ | 
|  | +  /* Call-clobbered GPRs.  */						\ | 
|  | +  16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 1,	\ | 
|  | +  /* Call-saved GPRs.  */						\ | 
|  | +  2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,	       			\ | 
|  | +  /* GPRs that can never be exposed to the register allocator.  */	\ | 
|  | +  0,  14, 15,								\ | 
|  | +  /* Call-clobbered FPRs.  */						\ | 
|  | +  48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,	\ | 
|  | +  /* Call-saved FPRs.  */						\ | 
|  | +  32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,	\ | 
|  | +  /* Vector GPRs  */							\ | 
|  | +  64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,	\ | 
|  | +  80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,	\ | 
|  | +  /* Vector FPRs  */							\ | 
|  | +  96, 97, 98, 99,100,101,102,103,104,105,106,107,108,109,110,111,	\ | 
|  | + 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,	\ | 
|  | +  /* None of the remaining classes have defined call-saved		\ | 
|  | +     registers.  */							\ | 
|  | + 128,129,130,131 \ | 
|  | +} | 
|  | + | 
|  | +/* True if VALUE is a signed 16-bit number.  */ | 
|  | + | 
|  | +#include "opcode-riscv.h" | 
|  | +#define SMALL_OPERAND(VALUE) \ | 
|  | +  ((unsigned HOST_WIDE_INT) (VALUE) + RISCV_IMM_REACH/2 < RISCV_IMM_REACH) | 
|  | + | 
|  | +/* True if VALUE can be loaded into a register using LUI.  */ | 
|  | + | 
|  | +#define LUI_OPERAND(VALUE)					\ | 
|  | +  (((VALUE) | ((1UL<<31) - RISCV_IMM_REACH)) == ((1UL<<31) - RISCV_IMM_REACH) \ | 
|  | +   || ((VALUE) | ((1UL<<31) - RISCV_IMM_REACH)) + RISCV_IMM_REACH == 0) | 
|  | + | 
|  | +/* Return a value X with the low 16 bits clear, and such that | 
|  | +   VALUE - X is a signed 16-bit value.  */ | 
|  | + | 
|  | +#define SMALL_INT(X) SMALL_OPERAND (INTVAL (X)) | 
|  | +#define LUI_INT(X) LUI_OPERAND (INTVAL (X)) | 
|  | + | 
|  | +/* The HI and LO registers can only be reloaded via the general | 
|  | +   registers.  Condition code registers can only be loaded to the | 
|  | +   general registers, and from the floating point registers.  */ | 
|  | + | 
|  | +#define SECONDARY_INPUT_RELOAD_CLASS(CLASS, MODE, X)			\ | 
|  | +  mips_secondary_reload_class (CLASS, MODE, X, true) | 
|  | +#define SECONDARY_OUTPUT_RELOAD_CLASS(CLASS, MODE, X)			\ | 
|  | +  mips_secondary_reload_class (CLASS, MODE, X, false) | 
|  | + | 
|  | +/* Return the maximum number of consecutive registers | 
|  | +   needed to represent mode MODE in a register of class CLASS.  */ | 
|  | + | 
|  | +#define CLASS_MAX_NREGS(CLASS, MODE) mips_class_max_nregs (CLASS, MODE) | 
|  | + | 
|  | +#define CANNOT_CHANGE_MODE_CLASS(FROM, TO, CLASS) \ | 
|  | +  mips_cannot_change_mode_class (FROM, TO, CLASS) | 
|  | + | 
|  | +/* Stack layout; function entry, exit and calling.  */ | 
|  | + | 
|  | +#define STACK_GROWS_DOWNWARD | 
|  | + | 
|  | +#define FRAME_GROWS_DOWNWARD 1 | 
|  | + | 
|  | +#define STARTING_FRAME_OFFSET 0 | 
|  | + | 
|  | +#define RETURN_ADDR_RTX mips_return_addr | 
|  | + | 
|  | +/* Similarly, don't use the least-significant bit to tell pointers to | 
|  | +   code from vtable index.  */ | 
|  | + | 
|  | +#define TARGET_PTRMEMFUNC_VBIT_LOCATION ptrmemfunc_vbit_in_delta | 
|  | + | 
|  | +/* The eliminations to $17 are only used for mips16 code.  See the | 
|  | +   definition of HARD_FRAME_POINTER_REGNUM.  */ | 
|  | + | 
|  | +#define ELIMINABLE_REGS							\ | 
|  | +{{ ARG_POINTER_REGNUM,   STACK_POINTER_REGNUM},				\ | 
|  | + { ARG_POINTER_REGNUM,   HARD_FRAME_POINTER_REGNUM},			\ | 
|  | + { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM},				\ | 
|  | + { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}}				\ | 
|  | + | 
|  | +#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \ | 
|  | +  (OFFSET) = mips_initial_elimination_offset (FROM, TO) | 
|  | + | 
|  | +/* Allocate stack space for arguments at the beginning of each function.  */ | 
|  | +#define ACCUMULATE_OUTGOING_ARGS 1 | 
|  | + | 
|  | +/* The argument pointer always points to the first argument.  */ | 
|  | +#define FIRST_PARM_OFFSET(FNDECL) 0 | 
|  | + | 
|  | +#define REG_PARM_STACK_SPACE(FNDECL) 0 | 
|  | + | 
|  | +/* Define this if it is the responsibility of the caller to | 
|  | +   allocate the area reserved for arguments passed in registers. | 
|  | +   If `ACCUMULATE_OUTGOING_ARGS' is also defined, the only effect | 
|  | +   of this macro is to determine whether the space is included in | 
|  | +   `crtl->outgoing_args_size'.  */ | 
|  | +#define OUTGOING_REG_PARM_STACK_SPACE(FNTYPE) 1 | 
|  | + | 
|  | +#define STACK_BOUNDARY 128 | 
|  | + | 
|  | +/* Symbolic macros for the registers used to return integer and floating | 
|  | +   point values.  */ | 
|  | + | 
|  | +#define GP_RETURN (GP_REG_FIRST + 16) | 
|  | +#define FP_RETURN ((TARGET_SOFT_FLOAT) ? GP_RETURN : (FP_REG_FIRST + 16)) | 
|  | + | 
|  | +#define MAX_ARGS_IN_REGISTERS 14 | 
|  | + | 
|  | +/* Symbolic macros for the first/last argument registers.  */ | 
|  | + | 
|  | +#define GP_ARG_FIRST (GP_REG_FIRST + 18) | 
|  | +#define GP_ARG_LAST  (GP_ARG_FIRST + MAX_ARGS_IN_REGISTERS - 1) | 
|  | +#define FP_ARG_FIRST (FP_REG_FIRST + 18) | 
|  | +#define FP_ARG_LAST  (FP_ARG_FIRST + MAX_ARGS_IN_REGISTERS - 1) | 
|  | + | 
|  | +#define LIBCALL_VALUE(MODE) \ | 
|  | +  mips_function_value (NULL_TREE, NULL_TREE, MODE) | 
|  | + | 
|  | +#define FUNCTION_VALUE(VALTYPE, FUNC) \ | 
|  | +  mips_function_value (VALTYPE, FUNC, VOIDmode) | 
|  | + | 
|  | +/* 1 if N is a possible register number for a function value. | 
|  | +   On the MIPS, R2 R3 and F0 F2 are the only register thus used. | 
|  | +   Currently, R2 and F0 are only implemented here (C has no complex type)  */ | 
|  | + | 
|  | +#define FUNCTION_VALUE_REGNO_P(N) ((N) == GP_RETURN || (N) == FP_RETURN \ | 
|  | +  || (LONG_DOUBLE_TYPE_SIZE == 128 && FP_RETURN != GP_RETURN \ | 
|  | +      && (N) == FP_RETURN + 1)) | 
|  | + | 
|  | +/* 1 if N is a possible register number for function argument passing. | 
|  | +   We have no FP argument registers when soft-float.  When FP registers | 
|  | +   are 32 bits, we can't directly reference the odd numbered ones.  */ | 
|  | + | 
|  | +#define FUNCTION_ARG_REGNO_P(N)					\ | 
|  | +  ((IN_RANGE((N), GP_ARG_FIRST, GP_ARG_LAST)			\ | 
|  | +    || (IN_RANGE((N), FP_ARG_FIRST, FP_ARG_LAST)))		\ | 
|  | +   && !fixed_regs[N]) | 
|  | + | 
|  | +/* This structure has to cope with two different argument allocation | 
|  | +   schemes.  Most MIPS ABIs view the arguments as a structure, of which | 
|  | +   the first N words go in registers and the rest go on the stack.  If I | 
|  | +   < N, the Ith word might go in Ith integer argument register or in a | 
|  | +   floating-point register.  For these ABIs, we only need to remember | 
|  | +   the offset of the current argument into the structure. | 
|  | + | 
|  | +   The EABI instead allocates the integer and floating-point arguments | 
|  | +   separately.  The first N words of FP arguments go in FP registers, | 
|  | +   the rest go on the stack.  Likewise, the first N words of the other | 
|  | +   arguments go in integer registers, and the rest go on the stack.  We | 
|  | +   need to maintain three counts: the number of integer registers used, | 
|  | +   the number of floating-point registers used, and the number of words | 
|  | +   passed on the stack. | 
|  | + | 
|  | +   We could keep separate information for the two ABIs (a word count for | 
|  | +   the standard ABIs, and three separate counts for the EABI).  But it | 
|  | +   seems simpler to view the standard ABIs as forms of EABI that do not | 
|  | +   allocate floating-point registers. | 
|  | + | 
|  | +   So for the standard ABIs, the first N words are allocated to integer | 
|  | +   registers, and mips_function_arg decides on an argument-by-argument | 
|  | +   basis whether that argument should really go in an integer register, | 
|  | +   or in a floating-point one.  */ | 
|  | + | 
|  | +typedef struct mips_args { | 
|  | +  /* The number of integer registers used so far.  For all ABIs except | 
|  | +     EABI, this is the number of words that have been added to the | 
|  | +     argument structure, limited to MAX_ARGS_IN_REGISTERS.  */ | 
|  | +  unsigned int num_gprs; | 
|  | + | 
|  | +  /* The number of words passed on the stack.  */ | 
|  | +  unsigned int stack_words; | 
|  | +} CUMULATIVE_ARGS; | 
|  | + | 
|  | +/* Initialize a variable CUM of type CUMULATIVE_ARGS | 
|  | +   for a call to a function whose data type is FNTYPE. | 
|  | +   For a library call, FNTYPE is 0.  */ | 
|  | + | 
|  | +#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT, N_NAMED_ARGS) \ | 
|  | +  mips_init_cumulative_args (&CUM, FNTYPE) | 
|  | + | 
|  | +#define FUNCTION_ARG_PADDING(MODE, TYPE) \ | 
|  | +  (mips_pad_arg_upward (MODE, TYPE) ? upward : downward) | 
|  | + | 
|  | +#define BLOCK_REG_PADDING(MODE, TYPE, FIRST) \ | 
|  | +  (mips_pad_reg_upward (MODE, TYPE) ? upward : downward) | 
|  | + | 
|  | + | 
|  | +#define EPILOGUE_USES(REGNO)	mips_epilogue_uses (REGNO) | 
|  | + | 
|  | +/* Even on RV32, provide 8-byte alignment for 64b floats. */ | 
|  | +#define MIPS_STACK_ALIGN(LOC) (((LOC) + 7) & -8) | 
|  | + | 
|  | +/* No mips port has ever used the profiler counter word, so don't emit it | 
|  | +   or the label for it.  */ | 
|  | + | 
|  | +#define NO_PROFILE_COUNTERS 1 | 
|  | + | 
|  | +/* Define this macro if the code for function profiling should come | 
|  | +   before the function prologue.  Normally, the profiling code comes | 
|  | +   after.  */ | 
|  | + | 
|  | +/* #define PROFILE_BEFORE_PROLOGUE */ | 
|  | + | 
|  | +/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function, | 
|  | +   the stack pointer does not matter.  The value is tested only in | 
|  | +   functions that have frame pointers. | 
|  | +   No definition is equivalent to always zero.  */ | 
|  | + | 
|  | +#define EXIT_IGNORE_STACK 1 | 
|  | + | 
|  | + | 
|  | +/* Trampolines are a block of code followed by two pointers.  */ | 
|  | + | 
|  | +#define TRAMPOLINE_CODE_SIZE 16 | 
|  | +#define TRAMPOLINE_SIZE (TRAMPOLINE_CODE_SIZE + GET_MODE_SIZE (ptr_mode) * 2) | 
|  | + | 
|  | +/* Forcing a 64-bit alignment for 32-bit targets allows us to load two | 
|  | +   pointers from a single LUI base.  */ | 
|  | + | 
|  | +#define TRAMPOLINE_ALIGNMENT 64 | 
|  | + | 
|  | +/* Addressing modes, and classification of registers for them.  */ | 
|  | + | 
|  | +#define REGNO_OK_FOR_INDEX_P(REGNO) 0 | 
|  | +#define REGNO_MODE_OK_FOR_BASE_P(REGNO, MODE) \ | 
|  | +  mips_regno_mode_ok_for_base_p (REGNO, MODE, 1) | 
|  | + | 
|  | +/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx | 
|  | +   and check its validity for a certain class. | 
|  | +   We have two alternate definitions for each of them. | 
|  | +   The usual definition accepts all pseudo regs; the other rejects them all. | 
|  | +   The symbol REG_OK_STRICT causes the latter definition to be used. | 
|  | + | 
|  | +   Most source files want to accept pseudo regs in the hope that | 
|  | +   they will get allocated to the class that the insn wants them to be in. | 
|  | +   Some source files that are used after register allocation | 
|  | +   need to be strict.  */ | 
|  | + | 
|  | +#ifndef REG_OK_STRICT | 
|  | +#define REG_MODE_OK_FOR_BASE_P(X, MODE) \ | 
|  | +  mips_regno_mode_ok_for_base_p (REGNO (X), MODE, 0) | 
|  | +#else | 
|  | +#define REG_MODE_OK_FOR_BASE_P(X, MODE) \ | 
|  | +  mips_regno_mode_ok_for_base_p (REGNO (X), MODE, 1) | 
|  | +#endif | 
|  | + | 
|  | +#define REG_OK_FOR_INDEX_P(X) 0 | 
|  | + | 
|  | + | 
|  | +/* Maximum number of registers that can appear in a valid memory address.  */ | 
|  | + | 
|  | +#define MAX_REGS_PER_ADDRESS 1 | 
|  | + | 
|  | +/* Check for constness inline but use mips_legitimate_address_p | 
|  | +   to check whether a constant really is an address.  */ | 
|  | + | 
|  | +#define CONSTANT_ADDRESS_P(X) \ | 
|  | +  (CONSTANT_P (X) && memory_address_p (SImode, X)) | 
|  | + | 
|  | +#define LEGITIMATE_CONSTANT_P(X) (mips_const_insns (X) > 0) | 
|  | + | 
|  | +/* This handles the magic '..CURRENT_FUNCTION' symbol, which means | 
|  | +   'the start of the function that this code is output in'.  */ | 
|  | + | 
|  | +#define ASM_OUTPUT_LABELREF(FILE,NAME)  \ | 
|  | +  if (strcmp (NAME, "..CURRENT_FUNCTION") == 0)				\ | 
|  | +    asm_fprintf ((FILE), "%U%s",					\ | 
|  | +		 XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));	\ | 
|  | +  else									\ | 
|  | +    asm_fprintf ((FILE), "%U%s", (NAME)) | 
|  | + | 
|  | +/* Flag to mark a function decl symbol that requires a long call.  */ | 
|  | +#define SYMBOL_FLAG_LONG_CALL	(SYMBOL_FLAG_MACH_DEP << 0) | 
|  | +#define SYMBOL_REF_LONG_CALL_P(X)					\ | 
|  | +  ((SYMBOL_REF_FLAGS (X) & SYMBOL_FLAG_LONG_CALL) != 0) | 
|  | + | 
|  | +/* This flag marks functions that cannot be lazily bound.  */ | 
|  | +#define SYMBOL_FLAG_BIND_NOW (SYMBOL_FLAG_MACH_DEP << 1) | 
|  | +#define SYMBOL_REF_BIND_NOW_P(RTX) \ | 
|  | +  ((SYMBOL_REF_FLAGS (RTX) & SYMBOL_FLAG_BIND_NOW) != 0) | 
|  | + | 
|  | +#define JUMP_TABLES_IN_TEXT_SECTION 0 | 
|  | +#define CASE_VECTOR_MODE ptr_mode | 
|  | +#define CASE_VECTOR_PC_RELATIVE 0 | 
|  | + | 
|  | +/* Define this as 1 if `char' should by default be signed; else as 0.  */ | 
|  | +#ifndef DEFAULT_SIGNED_CHAR | 
|  | +#define DEFAULT_SIGNED_CHAR 1 | 
|  | +#endif | 
|  | + | 
|  | +/* Although LDC1 and SDC1 provide 64-bit moves on 32-bit targets, | 
|  | +   we generally don't want to use them for copying arbitrary data. | 
|  | +   A single N-word move is usually the same cost as N single-word moves.  */ | 
|  | +#define MOVE_MAX UNITS_PER_WORD | 
|  | +#define MAX_MOVE_MAX 8 | 
|  | + | 
|  | +/* Define this macro as a C expression which is nonzero if | 
|  | +   accessing less than a word of memory (i.e. a `char' or a | 
|  | +   `short') is no faster than accessing a word of memory, i.e., if | 
|  | +   such access require more than one instruction or if there is no | 
|  | +   difference in cost between byte and (aligned) word loads. | 
|  | + | 
|  | +   On RISC machines, it tends to generate better code to define | 
|  | +   this as 1, since it avoids making a QI or HI mode register. | 
|  | + | 
|  | +   But, generating word accesses for -mips16 is generally bad as shifts | 
|  | +   (often extended) would be needed for byte accesses.  */ | 
|  | +#define SLOW_BYTE_ACCESS 1 | 
|  | + | 
|  | +/* Standard MIPS integer shifts truncate the shift amount to the | 
|  | +   width of the shifted operand.  However, Loongson vector shifts | 
|  | +   do not truncate the shift amount at all.  */ | 
|  | +#define SHIFT_COUNT_TRUNCATED 1 | 
|  | + | 
|  | +/* Value is 1 if truncating an integer of INPREC bits to OUTPREC bits | 
|  | +   is done just by pretending it is already truncated.  */ | 
|  | +#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) \ | 
|  | +  (TARGET_64BIT ? ((INPREC) <= 32 || (OUTPREC) > 32) : 1) | 
|  | + | 
|  | + | 
|  | +/* Specify the machine mode that pointers have. | 
|  | +   After generation of rtl, the compiler makes no further distinction | 
|  | +   between pointers and any other objects of this machine mode.  */ | 
|  | + | 
|  | +#ifndef Pmode | 
|  | +#define Pmode (TARGET_64BIT ? DImode : SImode) | 
|  | +#endif | 
|  | + | 
|  | +/* Give call MEMs SImode since it is the "most permissive" mode | 
|  | +   for both 32-bit and 64-bit targets.  */ | 
|  | + | 
|  | +#define FUNCTION_MODE SImode | 
|  | + | 
|  | + | 
|  | + | 
|  | +/* Define if copies to/from condition code registers should be avoided. | 
|  | + | 
|  | +   This is needed for the MIPS because reload_outcc is not complete; | 
|  | +   it needs to handle cases where the source is a general or another | 
|  | +   condition code register.  */ | 
|  | +#define AVOID_CCMODE_COPIES | 
|  | + | 
|  | +/* A C expression for the cost of a branch instruction.  A value of | 
|  | +   1 is the default; other values are interpreted relative to that.  */ | 
|  | + | 
|  | +#define BRANCH_COST(speed_p, predictable_p) mips_branch_cost | 
|  | +#define LOGICAL_OP_NON_SHORT_CIRCUIT 0 | 
|  | + | 
|  | +/* Control the assembler format that we output.  */ | 
|  | + | 
|  | +/* Output to assembler file text saying following lines | 
|  | +   may contain character constants, extra white space, comments, etc.  */ | 
|  | + | 
|  | +#ifndef ASM_APP_ON | 
|  | +#define ASM_APP_ON " #APP\n" | 
|  | +#endif | 
|  | + | 
|  | +/* Output to assembler file text saying following lines | 
|  | +   no longer contain unusual constructs.  */ | 
|  | + | 
|  | +#ifndef ASM_APP_OFF | 
|  | +#define ASM_APP_OFF " #NO_APP\n" | 
|  | +#endif | 
|  | + | 
|  | +#define REGISTER_NAMES							  \ | 
|  | +{ "x0",  "x1",  "x2",  "x3",  "x4",  "x5",  "x6",  "x7",	   \ | 
|  | +  "x8",  "x9",  "x10", "x11", "x12", "x13", "x14", "x15",	   \ | 
|  | +  "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",	   \ | 
|  | +  "x24", "x25", "x26", "x27", "x28", "x29", "x30", "x31",	   \ | 
|  | +  "f0",  "f1",  "f2",  "f3",  "f4",  "f5",  "f6",  "f7",	   \ | 
|  | +  "f8",  "f9",  "f10", "f11", "f12", "f13", "f14", "f15",	   \ | 
|  | +  "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",	   \ | 
|  | +  "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31",	   \ | 
|  | +  "vx0", "vx1", "vx2", "vx3", "vx4", "vx5", "vx6", "vx7",	   \ | 
|  | +  "vx8", "vx9", "vx10","vx11","vx12","vx13","vx14","vx15",	   \ | 
|  | +  "vx16","vx17","vx18","vx19","vx20","vx21","vx22","vx23",	   \ | 
|  | +  "vx24","vx25","vx26","vx27","vx28","vx29","vx30","vx31",	   \ | 
|  | +  "vf0", "vf1", "vf2", "vf3", "vf4", "vf5", "vf6", "vf7",	   \ | 
|  | +  "vf8", "vf9", "vf10","vf11","vf12","vf13","vf14","vf15",	   \ | 
|  | +  "vf16","vf17","vf18","vf19","vf20","vf21","vf22","vf23",	   \ | 
|  | +  "vf24","vf25","vf26","vf27","vf28","vf29","vf30","vf31",	   \ | 
|  | +  "arg", "frame", "fakec", "", } | 
|  | + | 
|  | +/* List the "software" names for each register.  Also list the numerical | 
|  | +   names for $fp and $sp.  */ | 
|  | + | 
|  | +#define ADDITIONAL_REGISTER_NAMES					\ | 
|  | +{									\ | 
|  | +  { "zero",	 0 + GP_REG_FIRST },					\ | 
|  | +  { "ra",	 1 + GP_REG_FIRST },					\ | 
|  | +  { "s0",	 2 + GP_REG_FIRST },					\ | 
|  | +  { "s1",	 3 + GP_REG_FIRST },					\ | 
|  | +  { "s2",	 4 + GP_REG_FIRST },					\ | 
|  | +  { "s3",	 5 + GP_REG_FIRST },					\ | 
|  | +  { "s4",	 6 + GP_REG_FIRST },					\ | 
|  | +  { "s5",	 7 + GP_REG_FIRST },					\ | 
|  | +  { "s6",	 8 + GP_REG_FIRST },					\ | 
|  | +  { "s7",	 9 + GP_REG_FIRST },					\ | 
|  | +  { "s8",	10 + GP_REG_FIRST },					\ | 
|  | +  { "s9",	11 + GP_REG_FIRST },					\ | 
|  | +  { "s10",	12 + GP_REG_FIRST },					\ | 
|  | +  { "s11",	13 + GP_REG_FIRST },					\ | 
|  | +  { "sp",	14 + GP_REG_FIRST },					\ | 
|  | +  { "tp",	15 + GP_REG_FIRST },					\ | 
|  | +  { "v0",	16 + GP_REG_FIRST },					\ | 
|  | +  { "v1",	17 + GP_REG_FIRST },					\ | 
|  | +  { "a0",	18 + GP_REG_FIRST },					\ | 
|  | +  { "a1",	19 + GP_REG_FIRST },					\ | 
|  | +  { "a2",	20 + GP_REG_FIRST },					\ | 
|  | +  { "a3",	21 + GP_REG_FIRST },					\ | 
|  | +  { "a4",	22 + GP_REG_FIRST },					\ | 
|  | +  { "a5",	23 + GP_REG_FIRST },					\ | 
|  | +  { "a6",	24 + GP_REG_FIRST },					\ | 
|  | +  { "a7",	25 + GP_REG_FIRST },					\ | 
|  | +  { "a8",	26 + GP_REG_FIRST },					\ | 
|  | +  { "a9",	27 + GP_REG_FIRST },					\ | 
|  | +  { "a10",	28 + GP_REG_FIRST },					\ | 
|  | +  { "a11",	29 + GP_REG_FIRST },					\ | 
|  | +  { "a12",	30 + GP_REG_FIRST },					\ | 
|  | +  { "a13",	31 + GP_REG_FIRST },					\ | 
|  | +  { "fs0",	 0 + FP_REG_FIRST },					\ | 
|  | +  { "fs1",	 1 + FP_REG_FIRST },					\ | 
|  | +  { "fs2",	 2 + FP_REG_FIRST },					\ | 
|  | +  { "fs3",	 3 + FP_REG_FIRST },					\ | 
|  | +  { "fs4",	 4 + FP_REG_FIRST },					\ | 
|  | +  { "fs5",	 5 + FP_REG_FIRST },					\ | 
|  | +  { "fs6",	 6 + FP_REG_FIRST },					\ | 
|  | +  { "fs7",	 7 + FP_REG_FIRST },					\ | 
|  | +  { "fs8",	 8 + FP_REG_FIRST },					\ | 
|  | +  { "fs9",	 9 + FP_REG_FIRST },					\ | 
|  | +  { "fs10",	10 + FP_REG_FIRST },					\ | 
|  | +  { "fs11",	11 + FP_REG_FIRST },					\ | 
|  | +  { "fs12",	12 + FP_REG_FIRST },					\ | 
|  | +  { "fs13",	13 + FP_REG_FIRST },					\ | 
|  | +  { "fs14",	14 + FP_REG_FIRST },					\ | 
|  | +  { "fs15",	15 + FP_REG_FIRST },					\ | 
|  | +  { "fv0",	16 + FP_REG_FIRST },					\ | 
|  | +  { "fv1",	17 + FP_REG_FIRST },					\ | 
|  | +  { "fa0",	18 + FP_REG_FIRST },					\ | 
|  | +  { "fa1",	19 + FP_REG_FIRST },					\ | 
|  | +  { "fa2",	20 + FP_REG_FIRST },					\ | 
|  | +  { "fa3",	21 + FP_REG_FIRST },					\ | 
|  | +  { "fa4",	22 + FP_REG_FIRST },					\ | 
|  | +  { "fa5",	23 + FP_REG_FIRST },					\ | 
|  | +  { "fa6",	24 + FP_REG_FIRST },					\ | 
|  | +  { "fa7",	25 + FP_REG_FIRST },					\ | 
|  | +  { "fa8",	26 + FP_REG_FIRST },					\ | 
|  | +  { "fa9",	27 + FP_REG_FIRST },					\ | 
|  | +  { "fa10",	28 + FP_REG_FIRST },					\ | 
|  | +  { "fa11",	29 + FP_REG_FIRST },					\ | 
|  | +  { "fa12",	30 + FP_REG_FIRST },					\ | 
|  | +  { "fa13",	31 + FP_REG_FIRST },					\ | 
|  | +} | 
|  | + | 
|  | +/* This is meant to be redefined in the host dependent files.  It is a | 
|  | +   set of alternative names and regnums for mips coprocessors.  */ | 
|  | + | 
|  | +#define ALL_COP_ADDITIONAL_REGISTER_NAMES | 
|  | + | 
|  | +#define DBR_OUTPUT_SEQEND(STREAM)					\ | 
|  | +do									\ | 
|  | +  {									\ | 
|  | +    /* Emit a blank line after the delay slot for emphasis.  */		\ | 
|  | +    fputs ("\n", STREAM);						\ | 
|  | +  }									\ | 
|  | +while (0) | 
|  | + | 
|  | +/* mips-tfile does not understand .stabd directives.  */ | 
|  | +#define DBX_OUTPUT_SOURCE_LINE(STREAM, LINE, COUNTER) do {	\ | 
|  | +  dbxout_begin_stabn_sline (LINE);				\ | 
|  | +  dbxout_stab_value_internal_label ("LM", &COUNTER);		\ | 
|  | +} while (0) | 
|  | + | 
|  | +/* The MIPS implementation uses some labels for its own purpose.  The | 
|  | +   following lists what labels are created, and are all formed by the | 
|  | +   pattern $L[a-z].*.  The machine independent portion of GCC creates | 
|  | +   labels matching:  $L[A-Z][0-9]+ and $L[0-9]+. | 
|  | + | 
|  | +	LM[0-9]+	Silicon Graphics/ECOFF stabs label before each stmt. | 
|  | +	$Lb[0-9]+	Begin blocks for MIPS debug support | 
|  | +	$Lc[0-9]+	Label for use in s<xx> operation. | 
|  | +	$Le[0-9]+	End blocks for MIPS debug support  */ | 
|  | + | 
|  | +#undef ASM_DECLARE_OBJECT_NAME | 
|  | +#define ASM_DECLARE_OBJECT_NAME(STREAM, NAME, DECL) \ | 
|  | +  mips_declare_object (STREAM, NAME, "", ":\n") | 
|  | + | 
|  | +/* Globalizing directive for a label.  */ | 
|  | +#define GLOBAL_ASM_OP "\t.globl\t" | 
|  | + | 
|  | +/* This says how to define a global common symbol.  */ | 
|  | + | 
|  | +#define ASM_OUTPUT_ALIGNED_DECL_COMMON mips_output_aligned_decl_common | 
|  | + | 
|  | +/* This says how to define a local common symbol (i.e., not visible to | 
|  | +   linker).  */ | 
|  | + | 
|  | +#ifndef ASM_OUTPUT_ALIGNED_LOCAL | 
|  | +#define ASM_OUTPUT_ALIGNED_LOCAL(STREAM, NAME, SIZE, ALIGN) \ | 
|  | +  mips_declare_common_object (STREAM, NAME, "\n\t.lcomm\t", SIZE, ALIGN, false) | 
|  | +#endif | 
|  | + | 
|  | +/* This is how to declare a function name.  The actual work of | 
|  | +   emitting the label is moved to function_prologue, so that we can | 
|  | +   get the line number correctly emitted before the .ent directive, | 
|  | +   and after any .file directives.  Define as empty so that the function | 
|  | +   is not declared before the .ent directive elsewhere.  */ | 
|  | + | 
|  | +#undef ASM_DECLARE_FUNCTION_NAME | 
|  | +#define ASM_DECLARE_FUNCTION_NAME(STREAM,NAME,DECL) | 
|  | + | 
|  | +/* This is how to store into the string LABEL | 
|  | +   the symbol_ref name of an internal numbered label where | 
|  | +   PREFIX is the class of label and NUM is the number within the class. | 
|  | +   This is suitable for output with `assemble_name'.  */ | 
|  | + | 
|  | +#undef ASM_GENERATE_INTERNAL_LABEL | 
|  | +#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM)			\ | 
|  | +  sprintf ((LABEL), "*%s%s%ld", (LOCAL_LABEL_PREFIX), (PREFIX), (long)(NUM)) | 
|  | + | 
|  | +/* Print debug labels as "foo = ." rather than "foo:" because they should | 
|  | +   represent a byte pointer rather than an ISA-encoded address.  This is | 
|  | +   particularly important for code like: | 
|  | + | 
|  | +	$LFBxxx = . | 
|  | +		.cfi_startproc | 
|  | +		... | 
|  | +		.section .gcc_except_table,... | 
|  | +		... | 
|  | +		.uleb128 foo-$LFBxxx | 
|  | + | 
|  | +   The .uleb128 requies $LFBxxx to match the FDE start address, which is | 
|  | +   likewise a byte pointer rather than an ISA-encoded address. | 
|  | + | 
|  | +   At the time of writing, this hook is not used for the function end | 
|  | +   label: | 
|  | + | 
|  | +   	$LFExxx: | 
|  | +		.end foo | 
|  | + | 
|  | +   But this doesn't matter, because GAS doesn't treat a pre-.end label | 
|  | +   as a MIPS16 one anyway.  */ | 
|  | + | 
|  | +#define ASM_OUTPUT_DEBUG_LABEL(FILE, PREFIX, NUM)			\ | 
|  | +  fprintf (FILE, "%s%s%d = .\n", LOCAL_LABEL_PREFIX, PREFIX, NUM) | 
|  | + | 
|  | +/* This is how to output an element of a case-vector that is absolute.  */ | 
|  | + | 
|  | +#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM, VALUE)				\ | 
|  | +  fprintf (STREAM, "\t%s\t%sL%d\n",					\ | 
|  | +	   ptr_mode == DImode ? ".dword" : ".word",			\ | 
|  | +	   LOCAL_LABEL_PREFIX,						\ | 
|  | +	   VALUE) | 
|  | + | 
|  | +/* This is how to output an element of a case-vector.  We can make the | 
|  | +   entries PC-relative in MIPS16 code and GP-relative when .gp(d)word | 
|  | +   is supported.  */ | 
|  | + | 
|  | +#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL)		\ | 
|  | +do {									\ | 
|  | +  fprintf (STREAM, "\t%s\t%sL%d\n",					\ | 
|  | +	   ptr_mode == DImode ? ".dword" : ".word",			\ | 
|  | +	   LOCAL_LABEL_PREFIX, VALUE);					\ | 
|  | +} while (0) | 
|  | + | 
|  | +/* This is how to output an assembler line | 
|  | +   that says to advance the location counter | 
|  | +   to a multiple of 2**LOG bytes.  */ | 
|  | + | 
|  | +#define ASM_OUTPUT_ALIGN(STREAM,LOG)					\ | 
|  | +  fprintf (STREAM, "\t.align\t%d\n", (LOG)) | 
|  | + | 
|  | +/* This is how to output an assembler line to advance the location | 
|  | +   counter by SIZE bytes.  */ | 
|  | + | 
|  | +#undef ASM_OUTPUT_SKIP | 
|  | +#define ASM_OUTPUT_SKIP(STREAM,SIZE)					\ | 
|  | +  fprintf (STREAM, "\t.space\t"HOST_WIDE_INT_PRINT_UNSIGNED"\n", (SIZE)) | 
|  | + | 
|  | +/* This is how to output a string.  */ | 
|  | +#undef ASM_OUTPUT_ASCII | 
|  | +#define ASM_OUTPUT_ASCII mips_output_ascii | 
|  | + | 
|  | +/* Output #ident as a in the read-only data section.  */ | 
|  | +#undef  ASM_OUTPUT_IDENT | 
|  | +#define ASM_OUTPUT_IDENT(FILE, STRING)					\ | 
|  | +{									\ | 
|  | +  const char *p = STRING;						\ | 
|  | +  int size = strlen (p) + 1;						\ | 
|  | +  switch_to_section (readonly_data_section);				\ | 
|  | +  assemble_string (p, size);						\ | 
|  | +} | 
|  | + | 
|  | +/* Define the strings to put out for each section in the object file.  */ | 
|  | +#define TEXT_SECTION_ASM_OP	"\t.text"	/* instructions */ | 
|  | +#define DATA_SECTION_ASM_OP	"\t.data"	/* large data */ | 
|  | + | 
|  | +#define ASM_OUTPUT_REG_PUSH(STREAM,REGNO)				\ | 
|  | +do									\ | 
|  | +  {									\ | 
|  | +    fprintf (STREAM, "\t%s\t%s,%s,-8\n\t%s\t%s,0(%s)\n",		\ | 
|  | +	     TARGET_64BIT ? "daddiu" : "addiu",				\ | 
|  | +	     reg_names[STACK_POINTER_REGNUM],				\ | 
|  | +	     reg_names[STACK_POINTER_REGNUM],				\ | 
|  | +	     TARGET_64BIT ? "sd" : "sw",				\ | 
|  | +	     reg_names[REGNO],						\ | 
|  | +	     reg_names[STACK_POINTER_REGNUM]);				\ | 
|  | +  }									\ | 
|  | +while (0) | 
|  | + | 
|  | +#define ASM_OUTPUT_REG_POP(STREAM,REGNO)				\ | 
|  | +do									\ | 
|  | +  {									\ | 
|  | +    fprintf (STREAM, "\t%s\t%s,0(%s)\n\t%s\t%s,%s,8\n",			\ | 
|  | +	     TARGET_64BIT ? "ld" : "lw",				\ | 
|  | +	     reg_names[REGNO],						\ | 
|  | +	     reg_names[STACK_POINTER_REGNUM],				\ | 
|  | +	     TARGET_64BIT ? "daddu" : "addu",				\ | 
|  | +	     reg_names[STACK_POINTER_REGNUM],				\ | 
|  | +	     reg_names[STACK_POINTER_REGNUM]);				\ | 
|  | +  }									\ | 
|  | +while (0) | 
|  | + | 
|  | +/* How to start an assembler comment. | 
|  | +   The leading space is important (the mips native assembler requires it).  */ | 
|  | +#ifndef ASM_COMMENT_START | 
|  | +#define ASM_COMMENT_START " #" | 
|  | +#endif | 
|  | + | 
|  | +#undef SIZE_TYPE | 
|  | +#define SIZE_TYPE (POINTER_SIZE == 64 ? "long unsigned int" : "unsigned int") | 
|  | + | 
|  | +#undef PTRDIFF_TYPE | 
|  | +#define PTRDIFF_TYPE (POINTER_SIZE == 64 ? "long int" : "int") | 
|  | + | 
|  | +/* The maximum number of bytes that can be copied by one iteration of | 
|  | +   a movmemsi loop; see mips_block_move_loop.  */ | 
|  | +#define MIPS_MAX_MOVE_BYTES_PER_LOOP_ITER \ | 
|  | +  (UNITS_PER_WORD * 4) | 
|  | + | 
|  | +/* The maximum number of bytes that can be copied by a straight-line | 
|  | +   implementation of movmemsi; see mips_block_move_straight.  We want | 
|  | +   to make sure that any loop-based implementation will iterate at | 
|  | +   least twice.  */ | 
|  | +#define MIPS_MAX_MOVE_BYTES_STRAIGHT \ | 
|  | +  (MIPS_MAX_MOVE_BYTES_PER_LOOP_ITER * 2) | 
|  | + | 
|  | +/* The base cost of a memcpy call, for MOVE_RATIO and friends.  These | 
|  | +   values were determined experimentally by benchmarking with CSiBE. | 
|  | +   In theory, the call overhead is higher for TARGET_ABICALLS (especially | 
|  | +   for o32 where we have to restore $gp afterwards as well as make an | 
|  | +   indirect call), but in practice, bumping this up higher for | 
|  | +   TARGET_ABICALLS doesn't make much difference to code size.  */ | 
|  | + | 
|  | +#define MIPS_CALL_RATIO 8 | 
|  | + | 
|  | +/* Any loop-based implementation of movmemsi will have at least | 
|  | +   MIPS_MAX_MOVE_BYTES_STRAIGHT / UNITS_PER_WORD memory-to-memory | 
|  | +   moves, so allow individual copies of fewer elements. | 
|  | + | 
|  | +   When movmemsi is not available, use a value approximating | 
|  | +   the length of a memcpy call sequence, so that move_by_pieces | 
|  | +   will generate inline code if it is shorter than a function call. | 
|  | +   Since move_by_pieces_ninsns counts memory-to-memory moves, but | 
|  | +   we'll have to generate a load/store pair for each, halve the | 
|  | +   value of MIPS_CALL_RATIO to take that into account.  */ | 
|  | + | 
|  | +#define MOVE_RATIO(speed)				\ | 
|  | +  (HAVE_movmemsi					\ | 
|  | +   ? MIPS_MAX_MOVE_BYTES_STRAIGHT / MOVE_MAX		\ | 
|  | +   : MIPS_CALL_RATIO / 2) | 
|  | + | 
|  | +/* movmemsi is meant to generate code that is at least as good as | 
|  | +   move_by_pieces.  However, movmemsi effectively uses a by-pieces | 
|  | +   implementation both for moves smaller than a word and for word-aligned | 
|  | +   moves of no more than MIPS_MAX_MOVE_BYTES_STRAIGHT bytes.  We should | 
|  | +   allow the tree-level optimisers to do such moves by pieces, as it | 
|  | +   often exposes other optimization opportunities.  We might as well | 
|  | +   continue to use movmemsi at the rtl level though, as it produces | 
|  | +   better code when scheduling is disabled (such as at -O).  */ | 
|  | + | 
|  | +#define MOVE_BY_PIECES_P(SIZE, ALIGN)				\ | 
|  | +  (HAVE_movmemsi						\ | 
|  | +   ? (!currently_expanding_to_rtl				\ | 
|  | +      && ((ALIGN) < BITS_PER_WORD				\ | 
|  | +	  ? (SIZE) < UNITS_PER_WORD				\ | 
|  | +	  : (SIZE) <= MIPS_MAX_MOVE_BYTES_STRAIGHT))		\ | 
|  | +   : (move_by_pieces_ninsns (SIZE, ALIGN, MOVE_MAX_PIECES + 1)	\ | 
|  | +      < (unsigned int) MOVE_RATIO (false))) | 
|  | + | 
|  | +/* For CLEAR_RATIO, when optimizing for size, give a better estimate | 
|  | +   of the length of a memset call, but use the default otherwise.  */ | 
|  | + | 
|  | +#define CLEAR_RATIO(speed)\ | 
|  | +  ((speed) ? 15 : MIPS_CALL_RATIO) | 
|  | + | 
|  | +/* This is similar to CLEAR_RATIO, but for a non-zero constant, so when | 
|  | +   optimizing for size adjust the ratio to account for the overhead of | 
|  | +   loading the constant and replicating it across the word.  */ | 
|  | + | 
|  | +#define SET_RATIO(speed) \ | 
|  | +  ((speed) ? 15 : MIPS_CALL_RATIO - 2) | 
|  | + | 
|  | +/* STORE_BY_PIECES_P can be used when copying a constant string, but | 
|  | +   in that case each word takes 3 insns (lui, ori, sw), or more in | 
|  | +   64-bit mode, instead of 2 (lw, sw).  For now we always fail this | 
|  | +   and let the move_by_pieces code copy the string from read-only | 
|  | +   memory.  In the future, this could be tuned further for multi-issue | 
|  | +   CPUs that can issue stores down one pipe and arithmetic instructions | 
|  | +   down another; in that case, the lui/ori/sw combination would be a | 
|  | +   win for long enough strings.  */ | 
|  | + | 
|  | +#define STORE_BY_PIECES_P(SIZE, ALIGN) 0 | 
|  | + | 
|  | +#ifndef HAVE_AS_TLS | 
|  | +#define HAVE_AS_TLS 0 | 
|  | +#endif | 
|  | + | 
|  | +#ifndef USED_FOR_TARGET | 
|  | + | 
|  | +extern const enum reg_class mips_regno_to_class[]; | 
|  | +extern bool mips_hard_regno_mode_ok[][FIRST_PSEUDO_REGISTER]; | 
|  | +extern const char *current_function_file; /* filename current function is in */ | 
|  | +extern int num_source_filenames;	/* current .file # */ | 
|  | +extern int mips_dbx_regno[]; | 
|  | +extern int mips_dwarf_regno[]; | 
|  | +extern bool mips_split_p[]; | 
|  | +extern enum processor mips_arch;        /* which cpu to codegen for */ | 
|  | +extern enum processor mips_tune;        /* which cpu to schedule for */ | 
|  | +extern GTY(()) struct target_globals *mips16_globals; | 
|  | +#endif | 
|  | + | 
|  | +/* As on most targets, we want the .eh_frame section to be read-only where | 
|  | +   possible.  And as on most targets, this means two things: | 
|  | + | 
|  | +     (a) Non-locally-binding pointers must have an indirect encoding, | 
|  | +	 so that the addresses in the .eh_frame section itself become | 
|  | +	 locally-binding. | 
|  | + | 
|  | +     (b) A shared library's .eh_frame section must encode locally-binding | 
|  | +	 pointers in a relative (relocation-free) form. | 
|  | + | 
|  | +   However, MIPS has traditionally not allowed directives like: | 
|  | + | 
|  | +	.long	x-. | 
|  | + | 
|  | +   in cases where "x" is in a different section, or is not defined in the | 
|  | +   same assembly file.  We are therefore unable to emit the PC-relative | 
|  | +   form required by (b) at assembly time. | 
|  | + | 
|  | +   Fortunately, the linker is able to convert absolute addresses into | 
|  | +   PC-relative addresses on our behalf.  Unfortunately, only certain | 
|  | +   versions of the linker know how to do this for indirect pointers, | 
|  | +   and for personality data.  We must fall back on using writable | 
|  | +   .eh_frame sections for shared libraries if the linker does not | 
|  | +   support this feature.  */ | 
|  | +#define ASM_PREFERRED_EH_DATA_FORMAT(CODE,GLOBAL) \ | 
|  | +  (((GLOBAL) ? DW_EH_PE_indirect : 0) | DW_EH_PE_absptr) | 
|  | + | 
|  | +/* For switching between MIPS16 and non-MIPS16 modes.  */ | 
|  | +#define SWITCHABLE_TARGET 1 | 
|  | diff -ruN gcc-4.9.2/gcc/config/riscv/riscv.md gcc-4.9.2-riscv/gcc/config/riscv/riscv.md | 
|  | --- gcc-4.9.2/gcc/config/riscv/riscv.md	1969-12-31 16:00:00.000000000 -0800 | 
|  | +++ gcc-4.9.2-riscv/gcc/config/riscv/riscv.md	2014-12-02 18:04:50.111949590 -0800 | 
|  | @@ -0,0 +1,2888 @@ | 
|  | +;;  Mips.md	     Machine Description for MIPS based processors | 
|  | +;;  Copyright (C) 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, | 
|  | +;;  1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 | 
|  | +;;  Free Software Foundation, Inc. | 
|  | +;;  Contributed by   A. Lichnewsky, lich@inria.inria.fr | 
|  | +;;  Changes by       Michael Meissner, meissner@osf.org | 
|  | +;;  64-bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and | 
|  | +;;  Brendan Eich, brendan@microunity.com. | 
|  | + | 
|  | +;; This file is part of GCC. | 
|  | + | 
|  | +;; GCC is free software; you can redistribute it and/or modify | 
|  | +;; it under the terms of the GNU General Public License as published by | 
|  | +;; the Free Software Foundation; either version 3, or (at your option) | 
|  | +;; any later version. | 
|  | + | 
|  | +;; GCC is distributed in the hope that it will be useful, | 
|  | +;; but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | +;; GNU General Public License for more details. | 
|  | + | 
|  | +;; You should have received a copy of the GNU General Public License | 
|  | +;; along with GCC; see the file COPYING3.  If not see | 
|  | +;; <http://www.gnu.org/licenses/>. | 
|  | + | 
|  | +(define_enum "processor" [ | 
|  | +  rocket | 
|  | +]) | 
|  | + | 
|  | +(define_c_enum "unspec" [ | 
|  | +  ;; Unaligned accesses. | 
|  | +  UNSPEC_LOAD_LEFT | 
|  | +  UNSPEC_LOAD_RIGHT | 
|  | +  UNSPEC_STORE_LEFT | 
|  | +  UNSPEC_STORE_RIGHT | 
|  | + | 
|  | +  ;; Floating-point moves. | 
|  | +  UNSPEC_LOAD_LOW | 
|  | +  UNSPEC_LOAD_HIGH | 
|  | +  UNSPEC_STORE_WORD | 
|  | +  UNSPEC_MFHC1 | 
|  | +  UNSPEC_MTHC1 | 
|  | + | 
|  | +  ;; HI/LO moves. | 
|  | +  UNSPEC_MFHI | 
|  | +  UNSPEC_MTHI | 
|  | +  UNSPEC_SET_HILO | 
|  | + | 
|  | +  ;; GP manipulation. | 
|  | +  UNSPEC_EH_RETURN | 
|  | +  UNSPEC_SET_GOT_VERSION | 
|  | +  UNSPEC_UPDATE_GOT_VERSION | 
|  | + | 
|  | +  ;; Symbolic accesses. | 
|  | +  UNSPEC_LOAD_CALL | 
|  | +  UNSPEC_LOAD_GOT | 
|  | +  UNSPEC_TLS_GD | 
|  | +  UNSPEC_TLS_IE | 
|  | + | 
|  | +  ;; MIPS16 constant pools. | 
|  | +  UNSPEC_ALIGN | 
|  | +  UNSPEC_CONSTTABLE_INT | 
|  | +  UNSPEC_CONSTTABLE_FLOAT | 
|  | + | 
|  | +  ;; Blockage and synchronisation. | 
|  | +  UNSPEC_BLOCKAGE | 
|  | +  UNSPEC_CLEAR_HAZARD | 
|  | +  UNSPEC_RDHWR | 
|  | +  UNSPEC_SYNC | 
|  | + | 
|  | +  ;; Interrupt handling. | 
|  | +  UNSPEC_MIPS_CACHE | 
|  | +  UNSPEC_ERET | 
|  | +  UNSPEC_DERET | 
|  | +  UNSPEC_DI | 
|  | +  UNSPEC_EHB | 
|  | +  UNSPEC_RDPGPR | 
|  | +  UNSPEC_COP0 | 
|  | + | 
|  | +  ;; Used in a call expression in place of args_size.  It's present for PIC | 
|  | +  ;; indirect calls where it contains args_size and the function symbol. | 
|  | +  UNSPEC_CALL_ATTR | 
|  | + | 
|  | +  ;; Fences | 
|  | +  UNSPEC_FENCE | 
|  | +  UNSPEC_FENCE_I | 
|  | +]) | 
|  | + | 
|  | +(define_constants | 
|  | +  [(RETURN_ADDR_REGNUM		1) | 
|  | +   (GOT_VERSION_REGNUM		79) | 
|  | + | 
|  | +   (UNSPEC_RISCV_VLOAD          700) | 
|  | +   (UNSPEC_RISCV_VSTORE         701) | 
|  | +   (UNSPEC_RISCV_VLOAD_STRIDED  702) | 
|  | +   (UNSPEC_RISCV_VSTORE_STRIDED 703) | 
|  | +   (UNSPEC_RISCV_STOP           704) | 
|  | +  ] | 
|  | +) | 
|  | + | 
|  | +(include "predicates.md") | 
|  | +(include "constraints.md") | 
|  | + | 
|  | +;; .................... | 
|  | +;; | 
|  | +;;	Attributes | 
|  | +;; | 
|  | +;; .................... | 
|  | + | 
|  | +(define_attr "got" "unset,xgot_high,load" | 
|  | +  (const_string "unset")) | 
|  | + | 
|  | +;; For jal instructions, this attribute is DIRECT when the target address | 
|  | +;; is symbolic and INDIRECT when it is a register. | 
|  | +(define_attr "jal" "unset,direct,indirect" | 
|  | +  (const_string "unset")) | 
|  | + | 
|  | +;; Classification of moves, extensions and truncations.  Most values | 
|  | +;; are as for "type" (see below) but there are also the following | 
|  | +;; move-specific values: | 
|  | +;; | 
|  | +;; constN	move an N-constraint integer into a MIPS16 register | 
|  | +;; sll0		"sll DEST,SRC,0", which on 64-bit targets is guaranteed | 
|  | +;;		to produce a sign-extended DEST, even if SRC is not | 
|  | +;;		properly sign-extended | 
|  | +;; ext_ins	EXT, DEXT, INS or DINS instruction | 
|  | +;; andi		a single ANDI instruction | 
|  | +;; shift_shift	a shift left followed by a shift right | 
|  | +;; | 
|  | +;; This attribute is used to determine the instruction's length and | 
|  | +;; scheduling type.  For doubleword moves, the attribute always describes | 
|  | +;; the split instructions; in some cases, it is more appropriate for the | 
|  | +;; scheduling type to be "multi" instead. | 
|  | +(define_attr "move_type" | 
|  | +  "unknown,load,fpload,store,fpstore,mtc,mfc,mthilo,mfhilo,move,fmove, | 
|  | +   const,constN,signext,ext_ins,logical,arith,sll0,andi,shift_shift" | 
|  | +  (const_string "unknown")) | 
|  | + | 
|  | +(define_attr "alu_type" "unknown,add,sub,not,nor,and,or,xor" | 
|  | +  (const_string "unknown")) | 
|  | + | 
|  | +;; Main data type used by the insn | 
|  | +(define_attr "mode" "unknown,none,QI,HI,SI,DI,TI,SF,DF,TF,FPSW" | 
|  | +  (const_string "unknown")) | 
|  | + | 
|  | +;; True if the main data type is twice the size of a word. | 
|  | +(define_attr "dword_mode" "no,yes" | 
|  | +  (cond [(and (eq_attr "mode" "DI,DF") | 
|  | +	      (eq (symbol_ref "TARGET_64BIT") (const_int 0))) | 
|  | +	 (const_string "yes") | 
|  | + | 
|  | +	 (and (eq_attr "mode" "TI,TF") | 
|  | +	      (ne (symbol_ref "TARGET_64BIT") (const_int 0))) | 
|  | +	 (const_string "yes")] | 
|  | +	(const_string "no"))) | 
|  | + | 
|  | +;; Classification of each insn. | 
|  | +;; branch	conditional branch | 
|  | +;; jump		unconditional jump | 
|  | +;; call		unconditional call | 
|  | +;; load		load instruction(s) | 
|  | +;; fpload	floating point load | 
|  | +;; fpidxload    floating point indexed load | 
|  | +;; store	store instruction(s) | 
|  | +;; fpstore	floating point store | 
|  | +;; fpidxstore	floating point indexed store | 
|  | +;; prefetch	memory prefetch (register + offset) | 
|  | +;; prefetchx	memory indexed prefetch (register + register) | 
|  | +;; condmove	conditional moves | 
|  | +;; mtc		transfer to coprocessor | 
|  | +;; mfc		transfer from coprocessor | 
|  | +;; mthilo	transfer to hi/lo registers | 
|  | +;; mfhilo	transfer from hi/lo registers | 
|  | +;; const	load constant | 
|  | +;; arith	integer arithmetic instructions | 
|  | +;; logical      integer logical instructions | 
|  | +;; shift	integer shift instructions | 
|  | +;; slt		set less than instructions | 
|  | +;; signext      sign extend instructions | 
|  | +;; clz		the clz and clo instructions | 
|  | +;; pop		the pop instruction | 
|  | +;; trap		trap if instructions | 
|  | +;; imul		integer multiply 2 operands | 
|  | +;; imul3	integer multiply 3 operands | 
|  | +;; imul3nc	integer multiply 3 operands without clobbering HI/LO | 
|  | +;; imadd	integer multiply-add | 
|  | +;; idiv		integer divide 2 operands | 
|  | +;; idiv3	integer divide 3 operands | 
|  | +;; move		integer register move (addi rd, rs1, 0) | 
|  | +;; fmove	floating point register move | 
|  | +;; fadd		floating point add/subtract | 
|  | +;; fmul		floating point multiply | 
|  | +;; fmadd	floating point multiply-add | 
|  | +;; fdiv		floating point divide | 
|  | +;; frdiv	floating point reciprocal divide | 
|  | +;; frdiv1	floating point reciprocal divide step 1 | 
|  | +;; frdiv2	floating point reciprocal divide step 2 | 
|  | +;; fabs		floating point absolute value | 
|  | +;; fneg		floating point negation | 
|  | +;; fcmp		floating point compare | 
|  | +;; fcvt		floating point convert | 
|  | +;; fsqrt	floating point square root | 
|  | +;; frsqrt       floating point reciprocal square root | 
|  | +;; frsqrt1      floating point reciprocal square root step1 | 
|  | +;; frsqrt2      floating point reciprocal square root step2 | 
|  | +;; multi	multiword sequence (or user asm statements) | 
|  | +;; nop		no operation | 
|  | +;; ghost	an instruction that produces no real code | 
|  | +(define_attr "type" | 
|  | +  "unknown,branch,jump,call,load,fpload,fpidxload,store,fpstore,fpidxstore, | 
|  | +   prefetch,prefetchx,condmove,mtc,mfc,mthilo,mfhilo,const,arith,logical, | 
|  | +   shift,slt,signext,clz,pop,trap,imul,imul3,imul3nc,imadd,idiv,idiv3,move, | 
|  | +   fmove,fadd,fmul,fmadd,fdiv,frdiv,frdiv1,frdiv2,fabs,fneg,fcmp,fcvt,fsqrt, | 
|  | +   frsqrt,frsqrt1,frsqrt2,multi,nop,ghost" | 
|  | +  (cond [(eq_attr "jal" "!unset") (const_string "call") | 
|  | +	 (eq_attr "got" "load") (const_string "load") | 
|  | + | 
|  | +	 (eq_attr "alu_type" "add,sub") (const_string "arith") | 
|  | + | 
|  | +	 (eq_attr "alu_type" "not,nor,and,or,xor") (const_string "logical") | 
|  | + | 
|  | +	 ;; If a doubleword move uses these expensive instructions, | 
|  | +	 ;; it is usually better to schedule them in the same way | 
|  | +	 ;; as the singleword form, rather than as "multi". | 
|  | +	 (eq_attr "move_type" "load") (const_string "load") | 
|  | +	 (eq_attr "move_type" "fpload") (const_string "fpload") | 
|  | +	 (eq_attr "move_type" "store") (const_string "store") | 
|  | +	 (eq_attr "move_type" "fpstore") (const_string "fpstore") | 
|  | +	 (eq_attr "move_type" "mtc") (const_string "mtc") | 
|  | +	 (eq_attr "move_type" "mfc") (const_string "mfc") | 
|  | +	 (eq_attr "move_type" "mthilo") (const_string "mthilo") | 
|  | +	 (eq_attr "move_type" "mfhilo") (const_string "mfhilo") | 
|  | + | 
|  | +	 ;; These types of move are always single insns. | 
|  | +	 (eq_attr "move_type" "fmove") (const_string "fmove") | 
|  | +	 (eq_attr "move_type" "signext") (const_string "signext") | 
|  | +	 (eq_attr "move_type" "ext_ins") (const_string "arith") | 
|  | +	 (eq_attr "move_type" "arith") (const_string "arith") | 
|  | +	 (eq_attr "move_type" "logical") (const_string "logical") | 
|  | +	 (eq_attr "move_type" "sll0") (const_string "shift") | 
|  | +	 (eq_attr "move_type" "andi") (const_string "logical") | 
|  | + | 
|  | +	 ;; These types of move are always split. | 
|  | +	 (eq_attr "move_type" "constN,shift_shift") | 
|  | +	   (const_string "multi") | 
|  | + | 
|  | +	 ;; These types of move are split for doubleword modes only. | 
|  | +	 (and (eq_attr "move_type" "move,const") | 
|  | +	      (eq_attr "dword_mode" "yes")) | 
|  | +	   (const_string "multi") | 
|  | +	 (eq_attr "move_type" "move") (const_string "move") | 
|  | +	 (eq_attr "move_type" "const") (const_string "const")] | 
|  | +	(const_string "unknown"))) | 
|  | + | 
|  | +;; Mode for conversion types (fcvt) | 
|  | +;; I2S          integer to float single (SI/DI to SF) | 
|  | +;; I2D          integer to float double (SI/DI to DF) | 
|  | +;; S2I          float to integer (SF to SI/DI) | 
|  | +;; D2I          float to integer (DF to SI/DI) | 
|  | +;; D2S          double to float single | 
|  | +;; S2D          float single to double | 
|  | + | 
|  | +(define_attr "cnv_mode" "unknown,I2S,I2D,S2I,D2I,D2S,S2D" | 
|  | +  (const_string "unknown")) | 
|  | + | 
|  | +;; Attributes describing a sync loop.  These loops have the form: | 
|  | +;; | 
|  | +;;       if (RELEASE_BARRIER == YES) sync | 
|  | +;;    1: OLDVAL = *MEM | 
|  | +;;       if ((OLDVAL & INCLUSIVE_MASK) != REQUIRED_OLDVAL) goto 2 | 
|  | +;;       $TMP1 = OLDVAL & EXCLUSIVE_MASK | 
|  | +;;       $TMP2 = INSN1 (OLDVAL, INSN1_OP2) | 
|  | +;;       $TMP3 = INSN2 ($TMP2, INCLUSIVE_MASK) | 
|  | +;;       $AT |= $TMP1 | $TMP3 | 
|  | +;;       if (!commit (*MEM = $AT)) goto 1. | 
|  | +;;         if (INSN1 != MOVE && INSN1 != LI) NEWVAL = $TMP3 [delay slot] | 
|  | +;;       sync | 
|  | +;;    2: | 
|  | +;; | 
|  | +;; where "$" values are temporaries and where the other values are | 
|  | +;; specified by the attributes below.  Values are specified as operand | 
|  | +;; numbers and insns are specified as enums.  If no operand number is | 
|  | +;; specified, the following values are used instead: | 
|  | +;; | 
|  | +;;    - OLDVAL: $AT | 
|  | +;;    - NEWVAL: $AT | 
|  | +;;    - INCLUSIVE_MASK: -1 | 
|  | +;;    - REQUIRED_OLDVAL: OLDVAL & INCLUSIVE_MASK | 
|  | +;;    - EXCLUSIVE_MASK: 0 | 
|  | +;; | 
|  | +;; MEM and INSN1_OP2 are required. | 
|  | +;; | 
|  | +;; Ideally, the operand attributes would be integers, with -1 meaning "none", | 
|  | +;; but the gen* programs don't yet support that. | 
|  | +(define_attr "sync_mem" "none,0,1,2,3,4,5" (const_string "none")) | 
|  | +(define_attr "sync_oldval" "none,0,1,2,3,4,5" (const_string "none")) | 
|  | +(define_attr "sync_newval" "none,0,1,2,3,4,5" (const_string "none")) | 
|  | +(define_attr "sync_inclusive_mask" "none,0,1,2,3,4,5" (const_string "none")) | 
|  | +(define_attr "sync_exclusive_mask" "none,0,1,2,3,4,5" (const_string "none")) | 
|  | +(define_attr "sync_required_oldval" "none,0,1,2,3,4,5" (const_string "none")) | 
|  | +(define_attr "sync_insn1_op2" "none,0,1,2,3,4,5" (const_string "none")) | 
|  | +(define_attr "sync_insn1" "move,li,add,addi,sub,and,andi,or,ori,xor,xori" | 
|  | +  (const_string "move")) | 
|  | +(define_attr "sync_insn2" "nop,and,xor,not" | 
|  | +  (const_string "nop")) | 
|  | +(define_attr "sync_release_barrier" "yes,no" | 
|  | +  (const_string "yes")) | 
|  | + | 
|  | +;; Length of instruction in bytes. | 
|  | +(define_attr "length" "" | 
|  | +   (cond [ | 
|  | +	  ;; Direct branch instructions have a range of [-0x1000,0xffc], | 
|  | +	  ;; relative to the address of the delay slot.  If a branch is | 
|  | +	  ;; outside this range, convert a branch like: | 
|  | +	  ;; | 
|  | +	  ;;	bne	r1,r2,target | 
|  | +	  ;; | 
|  | +	  ;; to: | 
|  | +	  ;; | 
|  | +	  ;;	beq	r1,r2,1f | 
|  | +	  ;;  j target | 
|  | +	  ;; 1: | 
|  | +	  ;; | 
|  | +	  (eq_attr "type" "branch") | 
|  | +	  (if_then_else (and (le (minus (match_dup 0) (pc)) (const_int 4088)) | 
|  | +				  (le (minus (pc) (match_dup 0)) (const_int 4092))) | 
|  | +	  (const_int 4) | 
|  | +	  (const_int 8)) | 
|  | + | 
|  | +	  ;; "Ghost" instructions occupy no space. | 
|  | +	  (eq_attr "type" "ghost") | 
|  | +	  (const_int 0) | 
|  | + | 
|  | +	  (eq_attr "got" "load") (const_int 8) | 
|  | + | 
|  | +	  ;; SHIFT_SHIFTs are decomposed into two separate instructions. | 
|  | +	  ;; They are extended instructions on MIPS16 targets. | 
|  | +	  (eq_attr "move_type" "shift_shift") | 
|  | +		(const_int 8) | 
|  | + | 
|  | +	  ;; Check for doubleword moves that are decomposed into two | 
|  | +	  ;; instructions. | 
|  | +	  (and (eq_attr "move_type" "mtc,mfc,mthilo,mfhilo,move") | 
|  | +	       (eq_attr "dword_mode" "yes")) | 
|  | +	  (const_int 8) | 
|  | + | 
|  | +	  ;; Doubleword CONST{,N} moves are split into two word | 
|  | +	  ;; CONST{,N} moves. | 
|  | +	  (and (eq_attr "move_type" "const,constN") | 
|  | +	       (eq_attr "dword_mode" "yes")) | 
|  | +	  (symbol_ref "mips_split_const_insns (operands[1]) * 4") | 
|  | + | 
|  | +	  ;; Otherwise, constants, loads and stores are handled by external | 
|  | +	  ;; routines. | 
|  | +	  (eq_attr "move_type" "const,constN") | 
|  | +	  (symbol_ref "mips_const_insns (operands[1]) * 4") | 
|  | +	  (eq_attr "move_type" "load,fpload") | 
|  | +	  (symbol_ref "mips_load_store_insns (operands[1], insn) * 4") | 
|  | +	  (eq_attr "move_type" "store,fpstore") | 
|  | +	  (symbol_ref "mips_load_store_insns (operands[0], insn) * 4") | 
|  | +	  ] (const_int 4))) | 
|  | + | 
|  | +;; Attribute describing the processor. | 
|  | +(define_enum_attr "cpu" "processor" | 
|  | +  (const (symbol_ref "mips_tune"))) | 
|  | + | 
|  | +;; Describe a user's asm statement. | 
|  | +(define_asm_attributes | 
|  | +  [(set_attr "type" "multi")]) | 
|  | + | 
|  | +;; This mode iterator allows 32-bit and 64-bit GPR patterns to be generated | 
|  | +;; from the same template. | 
|  | +(define_mode_iterator GPR [SI (DI "TARGET_64BIT")]) | 
|  | + | 
|  | +;; A copy of GPR that can be used when a pattern has two independent | 
|  | +;; modes. | 
|  | +(define_mode_iterator GPR2 [SI (DI "TARGET_64BIT")]) | 
|  | + | 
|  | +;; This mode iterator allows :P to be used for patterns that operate on | 
|  | +;; pointer-sized quantities.  Exactly one of the two alternatives will match. | 
|  | +(define_mode_iterator P [(SI "Pmode == SImode") (DI "Pmode == DImode")]) | 
|  | + | 
|  | +;; 32-bit integer moves for which we provide move patterns. | 
|  | +(define_mode_iterator IMOVE32 [SI]) | 
|  | + | 
|  | +;; 64-bit modes for which we provide move patterns. | 
|  | +(define_mode_iterator MOVE64 [DI DF]) | 
|  | + | 
|  | +;; 128-bit modes for which we provide move patterns on 64-bit targets. | 
|  | +(define_mode_iterator MOVE128 [TI TF]) | 
|  | + | 
|  | +;; This mode iterator allows the QI and HI extension patterns to be | 
|  | +;; defined from the same template. | 
|  | +(define_mode_iterator SHORT [QI HI]) | 
|  | + | 
|  | +;; Likewise the 64-bit truncate-and-shift patterns. | 
|  | +(define_mode_iterator SUBDI [QI HI SI]) | 
|  | +(define_mode_iterator HISI [HI SI]) | 
|  | + | 
|  | +;; This mode iterator allows :ANYF to be used wherever a scalar or vector | 
|  | +;; floating-point mode is allowed. | 
|  | +(define_mode_iterator ANYF [(SF "TARGET_HARD_FLOAT") | 
|  | +			    (DF "TARGET_HARD_FLOAT")]) | 
|  | + | 
|  | +;; Like ANYF, but only applies to scalar modes. | 
|  | +(define_mode_iterator SCALARF [(SF "TARGET_HARD_FLOAT") | 
|  | +			       (DF "TARGET_HARD_FLOAT")]) | 
|  | + | 
|  | +;; A floating-point mode for which moves involving FPRs may need to be split. | 
|  | +(define_mode_iterator SPLITF | 
|  | +  [(DF "!TARGET_64BIT") | 
|  | +   (DI "!TARGET_64BIT") | 
|  | +   (TF "TARGET_64BIT")]) | 
|  | + | 
|  | +;; This attribute gives the length suffix for a sign- or zero-extension | 
|  | +;; instruction. | 
|  | +(define_mode_attr size [(QI "b") (HI "h")]) | 
|  | + | 
|  | +;; This attributes gives the mode mask of a SHORT. | 
|  | +(define_mode_attr mask [(QI "0x00ff") (HI "0xffff")]) | 
|  | + | 
|  | +;; Mode attributes for GPR loads. | 
|  | +(define_mode_attr load [(SI "lw") (DI "ld")]) | 
|  | +;; Instruction names for stores. | 
|  | +(define_mode_attr store [(QI "sb") (HI "sh") (SI "sw") (DI "sd")]) | 
|  | + | 
|  | +;; This attribute gives the best constraint to use for registers of | 
|  | +;; a given mode. | 
|  | +(define_mode_attr reg [(SI "d") (DI "d") (CC "z")]) | 
|  | + | 
|  | +;; This attribute gives the format suffix for floating-point operations. | 
|  | +(define_mode_attr fmt [(SF "s") (DF "d") (V2SF "ps")]) | 
|  | + | 
|  | +;; This attribute gives the format suffix for atomic memory operations. | 
|  | +(define_mode_attr amo [(SI "w") (DI "d")]) | 
|  | + | 
|  | +;; This attribute gives the upper-case mode name for one unit of a | 
|  | +;; floating-point mode. | 
|  | +(define_mode_attr UNITMODE [(SF "SF") (DF "DF") (V2SF "SF")]) | 
|  | + | 
|  | +;; This attribute gives the integer mode that has the same size as a | 
|  | +;; fixed-point mode. | 
|  | +(define_mode_attr IMODE [(QQ "QI") (HQ "HI") (SQ "SI") (DQ "DI") | 
|  | +			 (UQQ "QI") (UHQ "HI") (USQ "SI") (UDQ "DI") | 
|  | +			 (HA "HI") (SA "SI") (DA "DI") | 
|  | +			 (UHA "HI") (USA "SI") (UDA "DI") | 
|  | +			 (V4UQQ "SI") (V2UHQ "SI") (V2UHA "SI") | 
|  | +			 (V2HQ "SI") (V2HA "SI")]) | 
|  | + | 
|  | +;; This attribute gives the integer mode that has half the size of | 
|  | +;; the controlling mode. | 
|  | +(define_mode_attr HALFMODE [(DF "SI") (DI "SI") (V2SF "SI") | 
|  | +			    (V2SI "SI") (V4HI "SI") (V8QI "SI") | 
|  | +			    (TF "DI")]) | 
|  | + | 
|  | +;; This code iterator allows signed and unsigned widening multiplications | 
|  | +;; to use the same template. | 
|  | +(define_code_iterator any_extend [sign_extend zero_extend]) | 
|  | + | 
|  | +;; This code iterator allows the two right shift instructions to be | 
|  | +;; generated from the same template. | 
|  | +(define_code_iterator any_shiftrt [ashiftrt lshiftrt]) | 
|  | + | 
|  | +;; This code iterator allows the three shift instructions to be generated | 
|  | +;; from the same template. | 
|  | +(define_code_iterator any_shift [ashift ashiftrt lshiftrt]) | 
|  | + | 
|  | +;; This code iterator allows unsigned and signed division to be generated | 
|  | +;; from the same template. | 
|  | +(define_code_iterator any_div [div udiv]) | 
|  | + | 
|  | +;; This code iterator allows unsigned and signed modulus to be generated | 
|  | +;; from the same template. | 
|  | +(define_code_iterator any_mod [mod umod]) | 
|  | + | 
|  | +;; Equality operators. | 
|  | +(define_code_iterator equality_op [eq ne]) | 
|  | + | 
|  | +;; These code iterators allow the signed and unsigned scc operations to use | 
|  | +;; the same template. | 
|  | +(define_code_iterator any_gt [gt gtu]) | 
|  | +(define_code_iterator any_ge [ge geu]) | 
|  | +(define_code_iterator any_lt [lt ltu]) | 
|  | +(define_code_iterator any_le [le leu]) | 
|  | + | 
|  | +;; <u> expands to an empty string when doing a signed operation and | 
|  | +;; "u" when doing an unsigned operation. | 
|  | +(define_code_attr u [(sign_extend "") (zero_extend "u") | 
|  | +		     (div "") (udiv "u") | 
|  | +		     (mod "") (umod "u") | 
|  | +		     (gt "") (gtu "u") | 
|  | +		     (ge "") (geu "u") | 
|  | +		     (lt "") (ltu "u") | 
|  | +		     (le "") (leu "u")]) | 
|  | + | 
|  | +;; <su> is like <u>, but the signed form expands to "s" rather than "". | 
|  | +(define_code_attr su [(sign_extend "s") (zero_extend "u")]) | 
|  | + | 
|  | +;; <optab> expands to the name of the optab for a particular code. | 
|  | +(define_code_attr optab [(ashift "ashl") | 
|  | +			 (ashiftrt "ashr") | 
|  | +			 (lshiftrt "lshr") | 
|  | +			 (ior "ior") | 
|  | +			 (xor "xor") | 
|  | +			 (and "and") | 
|  | +			 (plus "add") | 
|  | +			 (minus "sub")]) | 
|  | + | 
|  | +;; <insn> expands to the name of the insn that implements a particular code. | 
|  | +(define_code_attr insn [(ashift "sll") | 
|  | +			(ashiftrt "sra") | 
|  | +			(lshiftrt "srl") | 
|  | +			(ior "or") | 
|  | +			(xor "xor") | 
|  | +			(and "and") | 
|  | +			(plus "add") | 
|  | +			(minus "sub")]) | 
|  | + | 
|  | +;; The value of the bit when the branch is taken for branch_bit patterns. | 
|  | +;; Comparison is always against zero so this depends on the operator. | 
|  | +(define_code_attr bbv [(eq "0") (ne "1")]) | 
|  | + | 
|  | +;; This is the inverse value of bbv. | 
|  | +(define_code_attr bbinv [(eq "1") (ne "0")]) | 
|  | + | 
|  | +;; Pipeline descriptions. | 
|  | +;; | 
|  | +;; generic.md provides a fallback for processors without a specific | 
|  | +;; pipeline description.  It is derived from the old define_function_unit | 
|  | +;; version and uses the "alu" and "imuldiv" units declared below. | 
|  | +;; | 
|  | +;; Some of the processor-specific files are also derived from old | 
|  | +;; define_function_unit descriptions and simply override the parts of | 
|  | +;; generic.md that don't apply.  The other processor-specific files | 
|  | +;; are self-contained. | 
|  | +(define_automaton "alu,imuldiv") | 
|  | + | 
|  | +(define_cpu_unit "alu" "alu") | 
|  | +(define_cpu_unit "imuldiv" "imuldiv") | 
|  | + | 
|  | +;; Ghost instructions produce no real code and introduce no hazards. | 
|  | +;; They exist purely to express an effect on dataflow. | 
|  | +(define_insn_reservation "ghost" 0 | 
|  | +  (eq_attr "type" "ghost") | 
|  | +  "nothing") | 
|  | + | 
|  | +(include "generic.md") | 
|  | + | 
|  | +;;------------------------------------------------------------------------ | 
|  | +;; RISC-V vector mode iterators and attributes | 
|  | +;;------------------------------------------------------------------------ | 
|  | +;; cbatten - Ideally we would define this using syscfg.h so that it is | 
|  | +;; easy to change the maximum vector length. Since this file doesn't go | 
|  | +;; through the preprocessor, we would need to have a separate .md file | 
|  | +;; included from here which we preprocess explicitly in our t-riscv make | 
|  | +;; fragment. For now we just hard code the mode types. | 
|  | + | 
|  | +;; Basic set of vector modes and attributes | 
|  | + | 
|  | +(define_mode_iterator VEC | 
|  | +  [V32DI V32SI V32HI V32QI V32DF V32SF]) | 
|  | + | 
|  | +(define_mode_iterator IVEC | 
|  | +  [V32DI V32SI V32HI V32QI]) | 
|  | + | 
|  | +(define_mode_attr innermode | 
|  | +  [(V32DI "DI") (V32SI "SI") (V32HI "HI") (V32QI "QI") (V32DF "DF") (V32SF "SF")]) | 
|  | + | 
|  | +(define_mode_attr vmode | 
|  | +  [(V32DI "vdi") (V32SI "vsi") (V32HI "vhi") (V32QI "vqi") (V32DF "vdf") (V32SF "vsf")]) | 
|  | + | 
|  | +(define_mode_attr vec_mem_prefix | 
|  | +  [(V32DI "") (V32SI "") (V32HI "") (V32QI "") (V32DF "f") (V32SF "f")]) | 
|  | + | 
|  | +(define_mode_attr vec_mem_suffix | 
|  | +  [(V32DI "d") (V32SI "w") (V32HI "h") (V32QI "b") (V32DF "d") (V32SF "w")]) | 
|  | + | 
|  | +(define_mode_attr vec_umem_suffix | 
|  | +  [(V32DI "d") (V32SI "wu") (V32HI "hu") (V32QI "bu") (V32DF "d") (V32SF "w")]) | 
|  | + | 
|  | +(define_mode_attr vec_uarith_suffix | 
|  | +  [(V32DI "u") (V32SI "u") (V32HI "u") (V32QI "u") (V32DF ".d") (V32SF ".s")]) | 
|  | + | 
|  | +(define_mode_attr vec_arith_suffix | 
|  | +  [(V32DI "") (V32SI "") (V32HI "") (V32QI "") (V32DF ".d") (V32SF ".s")]) | 
|  | + | 
|  | +;;------------------------------------------------------------------------ | 
|  | +;; RISC-V vector move patterns | 
|  | +;;------------------------------------------------------------------------ | 
|  | +;; cbatten - This pattern is required to support vector modes and | 
|  | +;; registers. It basically tells gcc how to "reload" vector modes into | 
|  | +;; and out of vector registers. According to gcc internals we need to be | 
|  | +;; careful in terms of how we write this pattern - specifically I don't | 
|  | +;; think you can use multiple patterns via define_qsplit or mutually | 
|  | +;; exclusive predicates. There needs to be a single pattern for the | 
|  | +;; reload. This complicates things a bit, because what I really want to | 
|  | +;; do is a split to brake base offset addressing into two separate | 
|  | +;; pieces of RTL (the address calculation and the actual load). This | 
|  | +;; would allow gcc more flexibility in how to schedule and register | 
|  | +;; allocate the address calculation. Unforutnately, I couldn't get this | 
|  | +;; to work. I ended up having to hardcode the scheduling and use of the | 
|  | +;; $at register for the address calculation result (see | 
|  | +;; mips_riscv_output_vector_move). Works for now but maybe not the best | 
|  | +;; approach. My attempt at using a split is commented out below as an | 
|  | +;; example for future work. | 
|  | + | 
|  | +(define_expand "mov<mode>" | 
|  | +  [(set (match_operand:VEC 0 "") | 
|  | +        (match_operand:VEC 1 ""))] | 
|  | +  "" | 
|  | +{ | 
|  | +  if (mips_legitimize_vector_move (<MODE>mode, operands[0], operands[1])) | 
|  | +    DONE; | 
|  | +}) | 
|  | + | 
|  | +(define_insn "*mov<mode>_internal" | 
|  | +  [(set (match_operand:VEC 0 "nonimmediate_operand" "=A,A,YR,=B,B,YR") | 
|  | +        (match_operand:VEC 1 "nonimmediate_operand" "A,YR,A,B,YR,B"))] | 
|  | +  "" | 
|  | +{ | 
|  | +  return mips_riscv_output_vector_move( <MODE>mode, | 
|  | +                                        operands[0], operands[1] ); | 
|  | +}) | 
|  | + | 
|  | +;;------------------------------------------------------------------------ | 
|  | +;; RISC-V unit-stride vector load/store builtins | 
|  | +;;------------------------------------------------------------------------ | 
|  | +;; cbatten - The signed/unsigned load variants are actually quite tricky | 
|  | +;; to get right since normally gcc just always uses unsigned loads and | 
|  | +;; then has separate sign extension patterns which can be used when | 
|  | +;; necessary. Because with VT we won't know if sign extension is needed | 
|  | +;; until inside the vector fetched code (and then it is too late to use | 
|  | +;; a signed-extending vector load) so for now what we do is always use | 
|  | +;; unsigned types. Then in the VP code you can explicitly cast to an | 
|  | +;; signed type if you need to. Not as efficient but it will do for now. | 
|  | +;; Also note that we include an unspec in these patterns. This is to | 
|  | +;; make sure that these patterns don't match the normal reload movm | 
|  | +;; pattern above. I'm not positive whether or not this is necessary, but | 
|  | +;; from the gcc mailing lists it seems like it might be important. | 
|  | + | 
|  | +(define_insn "mips_riscv_vload_<VEC:vmode>" | 
|  | +  [(set (match_operand:VEC 0 "register_operand" "=A,=B") | 
|  | +        (unspec:VEC [(mem:VEC | 
|  | +                       (match_operand:DI 1 "pmode_register_operand" "b,b"))] | 
|  | +                    UNSPEC_RISCV_VLOAD))] | 
|  | +  "" | 
|  | +  "v<vec_mem_prefix>l<vec_mem_suffix>\t%0,%1") | 
|  | + | 
|  | +(define_insn "mips_riscv_vstore_<VEC:vmode>" | 
|  | +  [(set (mem:VEC (match_operand:DI 1 "pmode_register_operand" "b,b")) | 
|  | +        (unspec:VEC [(match_operand:VEC 0 "register_operand" "A,B")] | 
|  | +                    UNSPEC_RISCV_VSTORE))] | 
|  | +  "" | 
|  | +  "v<vec_mem_prefix>s<vec_mem_suffix>\t%0,%1") | 
|  | + | 
|  | +;;------------------------------------------------------------------------ | 
|  | +;; RISC-V strided vector load/store builtins | 
|  | +;;------------------------------------------------------------------------ | 
|  | +;; cbatten - Since we use a standard scalar register as the base | 
|  | +;; register, gcc will take care of generating any extra address | 
|  | +;; arithmetic itself. | 
|  | + | 
|  | +(define_insn "mips_riscv_vload_strided_<VEC:vmode>" | 
|  | +  [(set (match_operand:VEC 0 "register_operand" "=A,=B") | 
|  | +        (unspec:VEC [(mem:BLK (scratch)) | 
|  | +                     (match_operand:DI 1 "pmode_register_operand" "b,b") | 
|  | +                     (match_operand:DI 2 "register_operand" "r,r")] | 
|  | +                    UNSPEC_RISCV_VLOAD_STRIDED))] | 
|  | +  "" | 
|  | +  "v<vec_mem_prefix>lst<vec_mem_suffix>\t%0,%1,%2") | 
|  | + | 
|  | +(define_insn "mips_riscv_vstore_strided_<VEC:vmode>" | 
|  | +  [(set (mem:BLK (scratch)) | 
|  | +        (unspec:BLK [(match_operand:VEC 0 "register_operand" "=A,=B") | 
|  | +                     (match_operand:DI 1 "pmode_register_operand" "b,b") | 
|  | +                     (match_operand:DI 2 "register_operand" "r,r")] | 
|  | +                     UNSPEC_RISCV_VSTORE_STRIDED))] | 
|  | +  "" | 
|  | +  "v<vec_mem_prefix>sst<vec_mem_suffix>\t%0,%1,%2") | 
|  | + | 
|  | +;;------------------------------------------------------------------------ | 
|  | +;; RISC-V stop instruction | 
|  | +;;------------------------------------------------------------------------ | 
|  | +;; This is explicitly generated for functions with the utfunc attribute. | 
|  | +;; We do this from within the mips_expand_epilogue() function. I | 
|  | +;; needed some operand for this so I just used a constant. | 
|  | + | 
|  | +(define_insn "riscv_stop" | 
|  | +  [(unspec_volatile:VOID [(const_int 0)] UNSPEC_RISCV_STOP)] | 
|  | +  "" | 
|  | +  "stop") | 
|  | + | 
|  | +;; | 
|  | +;;  .................... | 
|  | +;; | 
|  | +;;	ADDITION | 
|  | +;; | 
|  | +;;  .................... | 
|  | +;; | 
|  | + | 
|  | +(define_insn "add<mode>3" | 
|  | +  [(set (match_operand:ANYF 0 "register_operand" "=f") | 
|  | +	(plus:ANYF (match_operand:ANYF 1 "register_operand" "f") | 
|  | +		   (match_operand:ANYF 2 "register_operand" "f")))] | 
|  | +  "" | 
|  | +  "fadd.<fmt>\t%0,%1,%2" | 
|  | +  [(set_attr "type" "fadd") | 
|  | +   (set_attr "mode" "<UNITMODE>")]) | 
|  | + | 
|  | +(define_expand "add<mode>3" | 
|  | +  [(set (match_operand:GPR 0 "register_operand") | 
|  | +	(plus:GPR (match_operand:GPR 1 "register_operand") | 
|  | +		  (match_operand:GPR 2 "arith_operand")))] | 
|  | +  "") | 
|  | + | 
|  | +(define_insn "*addsi3" | 
|  | +  [(set (match_operand:SI 0 "register_operand" "=d,d") | 
|  | +	(plus:SI (match_operand:GPR 1 "register_operand" "d,d") | 
|  | +		  (match_operand:GPR2 2 "arith_operand" "d,Q")))] | 
|  | +  "" | 
|  | +  { return TARGET_64BIT ? "addw\t%0,%1,%2" : "add\t%0,%1,%2"; } | 
|  | +  [(set_attr "type" "arith") | 
|  | +   (set_attr "mode" "SI")]) | 
|  | + | 
|  | +(define_insn "*adddi3" | 
|  | +  [(set (match_operand:DI 0 "register_operand" "=d,d") | 
|  | +	(plus:DI (match_operand:DI 1 "register_operand" "d,d") | 
|  | +		  (match_operand:DI 2 "arith_operand" "d,Q")))] | 
|  | +  "TARGET_64BIT" | 
|  | +  "add\t%0,%1,%2" | 
|  | +  [(set_attr "type" "arith") | 
|  | +   (set_attr "mode" "DI")]) | 
|  | + | 
|  | +(define_insn "*addsi3_extended" | 
|  | +  [(set (match_operand:DI 0 "register_operand" "=d,d") | 
|  | +	(sign_extend:DI | 
|  | +	     (plus:SI (match_operand:SI 1 "register_operand" "d,d") | 
|  | +		      (match_operand:SI 2 "arith_operand" "d,Q"))))] | 
|  | +  "TARGET_64BIT" | 
|  | +  "addw\t%0,%1,%2" | 
|  | +  [(set_attr "type" "arith") | 
|  | +   (set_attr "mode" "SI")]) | 
|  | + | 
|  | +(define_insn "*adddisi3" | 
|  | +  [(set (match_operand:SI 0 "register_operand" "=d,d") | 
|  | +	     (plus:SI (truncate:SI (match_operand:DI 1 "register_operand" "d,d")) | 
|  | +		      (truncate:SI (match_operand:DI 2 "arith_operand" "d,Q"))))] | 
|  | +  "TARGET_64BIT" | 
|  | +  "addw\t%0,%1,%2" | 
|  | +  [(set_attr "type" "arith") | 
|  | +   (set_attr "mode" "SI")]) | 
|  | + | 
|  | +(define_insn "*adddi3_truncsi" | 
|  | +  [(set (match_operand:SI 0 "register_operand" "=d,d") | 
|  | +          (truncate:SI | 
|  | +	     (plus:DI (match_operand:DI 1 "register_operand" "d,d") | 
|  | +		      (match_operand:DI 2 "arith_operand" "d,Q"))))] | 
|  | +  "TARGET_64BIT" | 
|  | +  "addw\t%0,%1,%2" | 
|  | +  [(set_attr "type" "arith") | 
|  | +   (set_attr "mode" "SI")]) | 
|  | + | 
|  | +;; HImode constant generation; see mips_move_integer for details. | 
|  | +;; si+si->hi without truncation is legal because of TRULY_NOOP_TRUNCATION. | 
|  | + | 
|  | +(define_insn "add<mode>hi3" | 
|  | +  [(set (match_operand:HI 0 "register_operand" "=d,d") | 
|  | +	(plus:HI (match_operand:HISI 1 "register_operand" "d,d") | 
|  | +		  (match_operand:HISI 2 "arith_operand" "d,Q")))] | 
|  | +  "" | 
|  | +  { return TARGET_64BIT ? "addw\t%0,%1,%2" : "add\t%0,%1,%2"; } | 
|  | +  [(set_attr "type" "arith") | 
|  | +   (set_attr "mode" "HI")]) | 
|  | + | 
|  | +;; | 
|  | +;;  .................... | 
|  | +;; | 
|  | +;;	SUBTRACTION | 
|  | +;; | 
|  | +;;  .................... | 
|  | +;; | 
|  | + | 
|  | +(define_insn "sub<mode>3" | 
|  | +  [(set (match_operand:ANYF 0 "register_operand" "=f") | 
|  | +	(minus:ANYF (match_operand:ANYF 1 "register_operand" "f") | 
|  | +		    (match_operand:ANYF 2 "register_operand" "f")))] | 
|  | +  "" | 
|  | +  "fsub.<fmt>\t%0,%1,%2" | 
|  | +  [(set_attr "type" "fadd") | 
|  | +   (set_attr "mode" "<UNITMODE>")]) | 
|  | + | 
|  | +(define_insn "subdi3" | 
|  | +  [(set (match_operand:DI 0 "register_operand" "=d") | 
|  | +	(minus:DI (match_operand:DI 1 "register_operand" "d") | 
|  | +		   (match_operand:DI 2 "register_operand" "d")))] | 
|  | +  "TARGET_64BIT" | 
|  | +  "sub\t%0,%1,%2" | 
|  | +  [(set_attr "type" "arith") | 
|  | +   (set_attr "mode" "DI")]) | 
|  | + | 
|  | +(define_insn "subsi3" | 
|  | +  [(set (match_operand:SI 0 "register_operand" "=d") | 
|  | +	(minus:SI (match_operand:SI 1 "register_operand" "d") | 
|  | +		   (match_operand:SI 2 "register_operand" "d")))] | 
|  | +  "" | 
|  | +  { return TARGET_64BIT ? "subw\t%0,%1,%2" : "sub\t%0,%1,%2"; } | 
|  | +  [(set_attr "type" "arith") | 
|  | +   (set_attr "mode" "SI")]) | 
|  | + | 
|  | +(define_insn "*subsi3_extended" | 
|  | +  [(set (match_operand:DI 0 "register_operand" "=d") | 
|  | +	(sign_extend:DI | 
|  | +	    (minus:SI (match_operand:SI 1 "register_operand" "d") | 
|  | +		      (match_operand:SI 2 "register_operand" "d"))))] | 
|  | +  "TARGET_64BIT" | 
|  | +  "subw\t%0,%1,%2" | 
|  | +  [(set_attr "type" "arith") | 
|  | +   (set_attr "mode" "DI")]) | 
|  | + | 
|  | +(define_insn "*subdisi3" | 
|  | +  [(set (match_operand:SI 0 "register_operand" "=d,d") | 
|  | +	     (minus:SI (truncate:SI (match_operand:DI 1 "register_operand" "d,d")) | 
|  | +		      (truncate:SI (match_operand:DI 2 "arith_operand" "d,Q"))))] | 
|  | +  "TARGET_64BIT" | 
|  | +  "subw\t%0,%1,%2" | 
|  | +  [(set_attr "type" "arith") | 
|  | +   (set_attr "mode" "SI")]) | 
|  | + | 
|  | +(define_insn "*subdi3_truncsi" | 
|  | +  [(set (match_operand:SI 0 "register_operand" "=d,d") | 
|  | +          (truncate:SI | 
|  | +	     (minus:DI (match_operand:DI 1 "register_operand" "d,d") | 
|  | +		      (match_operand:DI 2 "arith_operand" "d,Q"))))] | 
|  | +  "TARGET_64BIT" | 
|  | +  "subw\t%0,%1,%2" | 
|  | +  [(set_attr "type" "arith") | 
|  | +   (set_attr "mode" "SI")]) | 
|  | + | 
|  | +;; | 
|  | +;;  .................... | 
|  | +;; | 
|  | +;;	MULTIPLICATION | 
|  | +;; | 
|  | +;;  .................... | 
|  | +;; | 
|  | + | 
|  | +(define_insn "mul<mode>3" | 
|  | +  [(set (match_operand:SCALARF 0 "register_operand" "=f") | 
|  | +	(mult:SCALARF (match_operand:SCALARF 1 "register_operand" "f") | 
|  | +		      (match_operand:SCALARF 2 "register_operand" "f")))] | 
|  | +  "" | 
|  | +  "fmul.<fmt>\t%0,%1,%2" | 
|  | +  [(set_attr "type" "fmul") | 
|  | +   (set_attr "mode" "<UNITMODE>")]) | 
|  | + | 
|  | +(define_insn "mulsi3" | 
|  | +  [(set (match_operand:SI 0 "register_operand" "=d") | 
|  | +	(mult:SI (match_operand:SI 1 "register_operand" "d") | 
|  | +		  (match_operand:SI 2 "register_operand" "d")))] | 
|  | +  "" | 
|  | +  { return TARGET_64BIT ? "mulw\t%0,%1,%2" : "mul\t%0,%1,%2"; } | 
|  | +  [(set_attr "type" "imul3") | 
|  | +   (set_attr "mode" "SI")]) | 
|  | + | 
|  | +(define_insn "*muldisi3" | 
|  | +  [(set (match_operand:SI 0 "register_operand" "=d") | 
|  | +	     (mult:SI (truncate:SI (match_operand:DI 1 "register_operand" "d")) | 
|  | +		      (truncate:SI (match_operand:DI 2 "register_operand" "d"))))] | 
|  | +  "TARGET_64BIT" | 
|  | +  "mulw\t%0,%1,%2" | 
|  | +  [(set_attr "type" "imul3") | 
|  | +   (set_attr "mode" "SI")]) | 
|  | + | 
|  | +(define_insn "*muldi3_truncsi" | 
|  | +  [(set (match_operand:SI 0 "register_operand" "=d") | 
|  | +          (truncate:SI | 
|  | +	     (mult:DI (match_operand:DI 1 "register_operand" "d") | 
|  | +		      (match_operand:DI 2 "register_operand" "d"))))] | 
|  | +  "TARGET_64BIT" | 
|  | +  "mulw\t%0,%1,%2" | 
|  | +  [(set_attr "type" "imul3") | 
|  | +   (set_attr "mode" "SI")]) | 
|  | + | 
|  | +(define_insn "muldi3" | 
|  | +  [(set (match_operand:DI 0 "register_operand" "=d") | 
|  | +	(mult:DI (match_operand:DI 1 "register_operand" "d") | 
|  | +		  (match_operand:DI 2 "register_operand" "d")))] | 
|  | +  "TARGET_64BIT" | 
|  | +  "mul\t%0,%1,%2" | 
|  | +  [(set_attr "type" "imul3") | 
|  | +   (set_attr "mode" "DI")]) | 
|  | + | 
|  | +;; | 
|  | +;;  ........................ | 
|  | +;; | 
|  | +;;	MULTIPLICATION HIGH-PART | 
|  | +;; | 
|  | +;;  ........................ | 
|  | +;; | 
|  | + | 
|  | + | 
|  | +;; Using a clobber here is ghetto, but I'm not smart enough to do better. ' | 
|  | +(define_insn_and_split "<u>mulditi3" | 
|  | +  [(set (match_operand:TI 0 "register_operand" "=d") | 
|  | +	(mult:TI (any_extend:TI | 
|  | +		   (match_operand:DI 1 "register_operand" "d")) | 
|  | +		 (any_extend:TI | 
|  | +		   (match_operand:DI 2 "register_operand" "d")))) | 
|  | +  (clobber (match_scratch:DI 3 "=d"))] | 
|  | +  "TARGET_64BIT" | 
|  | +  "#" | 
|  | +  "reload_completed" | 
|  | +  [ | 
|  | +   (set (match_dup 3) (mult:DI (match_dup 1) (match_dup 2))) | 
|  | +   (set (match_dup 4) (truncate:DI | 
|  | +			(lshiftrt:TI | 
|  | +			  (mult:TI (any_extend:TI (match_dup 1)) | 
|  | +				   (any_extend:TI (match_dup 2))) | 
|  | +			  (const_int 64)))) | 
|  | +   (set (match_dup 5) (match_dup 3)) | 
|  | +  ] | 
|  | +{ | 
|  | +  operands[4] = mips_subword (operands[0], true); | 
|  | +  operands[5] = mips_subword (operands[0], false); | 
|  | +} | 
|  | +  ) | 
|  | + | 
|  | +(define_insn "<u>muldi3_highpart" | 
|  | +  [(set (match_operand:DI 0 "register_operand" "=d") | 
|  | +	(truncate:DI | 
|  | +	  (lshiftrt:TI | 
|  | +	    (mult:TI (any_extend:TI | 
|  | +		       (match_operand:DI 1 "register_operand" "d")) | 
|  | +		     (any_extend:TI | 
|  | +		       (match_operand:DI 2 "register_operand" "d"))) | 
|  | +	    (const_int 64))))] | 
|  | +  "TARGET_64BIT" | 
|  | +  "mulh<u>\t%0,%1,%2" | 
|  | +  [(set_attr "type" "imul3") | 
|  | +   (set_attr "mode" "DI")]) | 
|  | + | 
|  | + | 
|  | +(define_insn_and_split "usmulditi3" | 
|  | +  [(set (match_operand:TI 0 "register_operand" "=d") | 
|  | +	(mult:TI (zero_extend:TI | 
|  | +		   (match_operand:DI 1 "register_operand" "d")) | 
|  | +		 (sign_extend:TI | 
|  | +		   (match_operand:DI 2 "register_operand" "d")))) | 
|  | +  (clobber (match_scratch:DI 3 "=d"))] | 
|  | +  "TARGET_64BIT" | 
|  | +  "#" | 
|  | +  "reload_completed" | 
|  | +  [ | 
|  | +   (set (match_dup 3) (mult:DI (match_dup 1) (match_dup 2))) | 
|  | +   (set (match_dup 4) (truncate:DI | 
|  | +			(lshiftrt:TI | 
|  | +			  (mult:TI (zero_extend:TI (match_dup 1)) | 
|  | +				   (sign_extend:TI (match_dup 2))) | 
|  | +			  (const_int 64)))) | 
|  | +   (set (match_dup 5) (match_dup 3)) | 
|  | +  ] | 
|  | +{ | 
|  | +  operands[4] = mips_subword (operands[0], true); | 
|  | +  operands[5] = mips_subword (operands[0], false); | 
|  | +} | 
|  | +  ) | 
|  | + | 
|  | +(define_insn "usmuldi3_highpart" | 
|  | +  [(set (match_operand:DI 0 "register_operand" "=d") | 
|  | +	(truncate:DI | 
|  | +	  (lshiftrt:TI | 
|  | +	    (mult:TI (zero_extend:TI | 
|  | +		       (match_operand:DI 1 "register_operand" "d")) | 
|  | +		     (sign_extend:TI | 
|  | +		       (match_operand:DI 2 "register_operand" "d"))) | 
|  | +	    (const_int 64))))] | 
|  | +  "TARGET_64BIT" | 
|  | +  "mulhsu\t%0,%2,%1" | 
|  | +  [(set_attr "type" "imul3") | 
|  | +   (set_attr "mode" "DI")]) | 
|  | + | 
|  | +(define_insn_and_split "<u>mulsidi3" | 
|  | +  [(set (match_operand:DI 0 "register_operand" "=d") | 
|  | +	(mult:DI (any_extend:DI | 
|  | +		   (match_operand:SI 1 "register_operand" "d")) | 
|  | +		 (any_extend:DI | 
|  | +		   (match_operand:SI 2 "register_operand" "d")))) | 
|  | +  (clobber (match_scratch:SI 3 "=d"))] | 
|  | +  "!TARGET_64BIT" | 
|  | +  "#" | 
|  | +  "reload_completed" | 
|  | +  [ | 
|  | +   (set (match_dup 3) (mult:SI (match_dup 1) (match_dup 2))) | 
|  | +   (set (match_dup 4) (truncate:SI | 
|  | +			(lshiftrt:DI | 
|  | +			  (mult:DI (any_extend:DI (match_dup 1)) | 
|  | +				   (any_extend:DI (match_dup 2))) | 
|  | +			  (const_int 32)))) | 
|  | +   (set (match_dup 5) (match_dup 3)) | 
|  | +  ] | 
|  | +{ | 
|  | +  operands[4] = mips_subword (operands[0], true); | 
|  | +  operands[5] = mips_subword (operands[0], false); | 
|  | +} | 
|  | +  ) | 
|  | + | 
|  | +(define_insn "<u>mulsi3_highpart" | 
|  | +  [(set (match_operand:SI 0 "register_operand" "=d") | 
|  | +	(truncate:SI | 
|  | +	  (lshiftrt:DI | 
|  | +	    (mult:DI (any_extend:DI | 
|  | +		       (match_operand:SI 1 "register_operand" "d")) | 
|  | +		     (any_extend:DI | 
|  | +		       (match_operand:SI 2 "register_operand" "d"))) | 
|  | +	    (const_int 32))))] | 
|  | +  "!TARGET_64BIT" | 
|  | +  "mulh<u>\t%0,%1,%2" | 
|  | +  [(set_attr "type" "imul3") | 
|  | +   (set_attr "mode" "SI")]) | 
|  | + | 
|  | + | 
|  | +(define_insn_and_split "usmulsidi3" | 
|  | +  [(set (match_operand:DI 0 "register_operand" "=d") | 
|  | +	(mult:DI (zero_extend:DI | 
|  | +		   (match_operand:SI 1 "register_operand" "d")) | 
|  | +		 (sign_extend:DI | 
|  | +		   (match_operand:SI 2 "register_operand" "d")))) | 
|  | +  (clobber (match_scratch:SI 3 "=d"))] | 
|  | +  "!TARGET_64BIT" | 
|  | +  "#" | 
|  | +  "reload_completed" | 
|  | +  [ | 
|  | +   (set (match_dup 3) (mult:SI (match_dup 1) (match_dup 2))) | 
|  | +   (set (match_dup 4) (truncate:SI | 
|  | +			(lshiftrt:DI | 
|  | +			  (mult:DI (zero_extend:DI (match_dup 1)) | 
|  | +				   (sign_extend:DI (match_dup 2))) | 
|  | +			  (const_int 32)))) | 
|  | +   (set (match_dup 5) (match_dup 3)) | 
|  | +  ] | 
|  | +{ | 
|  | +  operands[4] = mips_subword (operands[0], true); | 
|  | +  operands[5] = mips_subword (operands[0], false); | 
|  | +} | 
|  | +  ) | 
|  | + | 
|  | +(define_insn "usmulsi3_highpart" | 
|  | +  [(set (match_operand:SI 0 "register_operand" "=d") | 
|  | +	(truncate:SI | 
|  | +	  (lshiftrt:DI | 
|  | +	    (mult:DI (zero_extend:DI | 
|  | +		       (match_operand:SI 1 "register_operand" "d")) | 
|  | +		     (sign_extend:DI | 
|  | +		       (match_operand:SI 2 "register_operand" "d"))) | 
|  | +	    (const_int 32))))] | 
|  | +  "!TARGET_64BIT" | 
|  | +  "mulhsu\t%0,%2,%1" | 
|  | +  [(set_attr "type" "imul3") | 
|  | +   (set_attr "mode" "SI")]) | 
|  | + | 
|  | +;; | 
|  | +;;  .................... | 
|  | +;; | 
|  | +;;	DIVISION and REMAINDER | 
|  | +;; | 
|  | +;;  .................... | 
|  | +;; | 
|  | + | 
|  | +(define_insn "<u>divsi3" | 
|  | +  [(set (match_operand:SI 0 "register_operand" "=d") | 
|  | +	(any_div:SI (match_operand:SI 1 "register_operand" "d") | 
|  | +		  (match_operand:SI 2 "register_operand" "d")))] | 
|  | +  "" | 
|  | +  { return TARGET_64BIT ? "div<u>w\t%0,%1,%2" : "div<u>\t%0,%1,%2"; } | 
|  | +  [(set_attr "type" "idiv3") | 
|  | +   (set_attr "mode" "SI")]) | 
|  | + | 
|  | +(define_insn "<u>divdi3" | 
|  | +  [(set (match_operand:DI 0 "register_operand" "=d") | 
|  | +	(any_div:DI (match_operand:DI 1 "register_operand" "d") | 
|  | +		  (match_operand:DI 2 "register_operand" "d")))] | 
|  | +  "TARGET_64BIT" | 
|  | +  "div<u>\t%0,%1,%2" | 
|  | +  [(set_attr "type" "idiv3") | 
|  | +   (set_attr "mode" "DI")]) | 
|  | + | 
|  | +(define_insn "<u>modsi3" | 
|  | +  [(set (match_operand:SI 0 "register_operand" "=d") | 
|  | +	(any_mod:SI (match_operand:SI 1 "register_operand" "d") | 
|  | +		  (match_operand:SI 2 "register_operand" "d")))] | 
|  | +  "" | 
|  | +  { return TARGET_64BIT ? "rem<u>w\t%0,%1,%2" : "rem<u>\t%0,%1,%2"; } | 
|  | +  [(set_attr "type" "idiv3") | 
|  | +   (set_attr "mode" "SI")]) | 
|  | + | 
|  | +(define_insn "<u>moddi3" | 
|  | +  [(set (match_operand:DI 0 "register_operand" "=d") | 
|  | +	(any_mod:DI (match_operand:DI 1 "register_operand" "d") | 
|  | +		  (match_operand:DI 2 "register_operand" "d")))] | 
|  | +  "TARGET_64BIT" | 
|  | +  "rem<u>\t%0,%1,%2" | 
|  | +  [(set_attr "type" "idiv3") | 
|  | +   (set_attr "mode" "DI")]) | 
|  | + | 
|  | +(define_insn "div<mode>3" | 
|  | +  [(set (match_operand:ANYF 0 "register_operand" "=f") | 
|  | +	(div:ANYF (match_operand:ANYF 1 "register_operand" "f") | 
|  | +		  (match_operand:ANYF 2 "register_operand" "f")))] | 
|  | +  "" | 
|  | +  "fdiv.<fmt>\t%0,%1,%2" | 
|  | +  [(set_attr "type" "fdiv") | 
|  | +   (set_attr "mode" "<UNITMODE>")]) | 
|  | + | 
|  | +;; | 
|  | +;;  .................... | 
|  | +;; | 
|  | +;;	SQUARE ROOT | 
|  | +;; | 
|  | +;;  .................... | 
|  | + | 
|  | +(define_insn "sqrt<mode>2" | 
|  | +  [(set (match_operand:ANYF 0 "register_operand" "=f") | 
|  | +	(sqrt:ANYF (match_operand:ANYF 1 "register_operand" "f")))] | 
|  | +  "" | 
|  | +{ | 
|  | +    return "fsqrt.<fmt>\t%0,%1"; | 
|  | +} | 
|  | +  [(set_attr "type" "fsqrt") | 
|  | +   (set_attr "mode" "<UNITMODE>") | 
|  | +   (set (attr "length") (const_int 4))]) | 
|  | + | 
|  | +;; Floating point multiply accumulate instructions. | 
|  | + | 
|  | +(define_insn "fma<mode>4" | 
|  | +  [(set (match_operand:ANYF 0 "register_operand" "=f") | 
|  | +    (fma:ANYF | 
|  | +      (match_operand:ANYF 1 "register_operand" "f") | 
|  | +      (match_operand:ANYF 2 "register_operand" "f") | 
|  | +      (match_operand:ANYF 3 "register_operand" "f")))] | 
|  | +  "TARGET_HARD_FLOAT" | 
|  | +  "fmadd.<fmt>\t%0,%1,%2,%3" | 
|  | +  [(set_attr "type" "fmadd") | 
|  | +   (set_attr "mode" "<UNITMODE>")]) | 
|  | + | 
|  | +(define_insn "fms<mode>4" | 
|  | +  [(set (match_operand:ANYF 0 "register_operand" "=f") | 
|  | +    (fma:ANYF | 
|  | +      (match_operand:ANYF 1 "register_operand" "f") | 
|  | +      (match_operand:ANYF 2 "register_operand" "f") | 
|  | +      (neg:ANYF (match_operand:ANYF 3 "register_operand" "f"))))] | 
|  | +  "TARGET_HARD_FLOAT" | 
|  | +  "fmsub.<fmt>\t%0,%1,%2,%3" | 
|  | +  [(set_attr "type" "fmadd") | 
|  | +   (set_attr "mode" "<UNITMODE>")]) | 
|  | + | 
|  | +(define_insn "nfma<mode>4" | 
|  | +  [(set (match_operand:ANYF 0 "register_operand" "=f") | 
|  | +    (neg:ANYF | 
|  | +      (fma:ANYF | 
|  | +        (match_operand:ANYF 1 "register_operand" "f") | 
|  | +        (match_operand:ANYF 2 "register_operand" "f") | 
|  | +        (match_operand:ANYF 3 "register_operand" "f"))))] | 
|  | +  "TARGET_HARD_FLOAT" | 
|  | +  "fnmadd.<fmt>\t%0,%1,%2,%3" | 
|  | +  [(set_attr "type" "fmadd") | 
|  | +   (set_attr "mode" "<UNITMODE>")]) | 
|  | + | 
|  | +(define_insn "nfms<mode>4" | 
|  | +  [(set (match_operand:ANYF 0 "register_operand" "=f") | 
|  | +    (neg:ANYF | 
|  | +      (fma:ANYF | 
|  | +        (match_operand:ANYF 1 "register_operand" "f") | 
|  | +        (match_operand:ANYF 2 "register_operand" "f") | 
|  | +        (neg:ANYF (match_operand:ANYF 3 "register_operand" "f")))))] | 
|  | +  "TARGET_HARD_FLOAT" | 
|  | +  "fnmsub.<fmt>\t%0,%1,%2,%3" | 
|  | +  [(set_attr "type" "fmadd") | 
|  | +   (set_attr "mode" "<UNITMODE>")]) | 
|  | + | 
|  | +;; modulo signed zeros, -(a*b+c) == -c-a*b | 
|  | +(define_insn "*nfma<mode>4_fastmath" | 
|  | +  [(set (match_operand:ANYF 0 "register_operand" "=f") | 
|  | +    (minus:ANYF | 
|  | +      (match_operand:ANYF 3 "register_operand" "f") | 
|  | +      (mult:ANYF | 
|  | +        (neg:ANYF (match_operand:ANYF 1 "register_operand" "f")) | 
|  | +        (match_operand:ANYF 2 "register_operand" "f"))))] | 
|  | +  "TARGET_HARD_FLOAT && !HONOR_SIGNED_ZEROS (<MODE>mode)" | 
|  | +  "fnmadd.<fmt>\t%0,%1,%2,%3" | 
|  | +  [(set_attr "type" "fmadd") | 
|  | +   (set_attr "mode" "<UNITMODE>")]) | 
|  | + | 
|  | +;; modulo signed zeros, -(a*b-c) == c-a*b | 
|  | +(define_insn "*nfms<mode>4_fastmath" | 
|  | +  [(set (match_operand:ANYF 0 "register_operand" "=f") | 
|  | +    (minus:ANYF | 
|  | +      (match_operand:ANYF 3 "register_operand" "f") | 
|  | +      (mult:ANYF | 
|  | +        (match_operand:ANYF 1 "register_operand" "f") | 
|  | +        (match_operand:ANYF 2 "register_operand" "f"))))] | 
|  | +  "TARGET_HARD_FLOAT && !HONOR_SIGNED_ZEROS (<MODE>mode)" | 
|  | +  "fnmsub.<fmt>\t%0,%1,%2,%3" | 
|  | +  [(set_attr "type" "fmadd") | 
|  | +   (set_attr "mode" "<UNITMODE>")]) | 
|  | + | 
|  | +;; | 
|  | +;;  .................... | 
|  | +;; | 
|  | +;;	ABSOLUTE VALUE | 
|  | +;; | 
|  | +;;  .................... | 
|  | + | 
|  | +(define_insn "abs<mode>2" | 
|  | +  [(set (match_operand:ANYF 0 "register_operand" "=f") | 
|  | +	(abs:ANYF (match_operand:ANYF 1 "register_operand" "f")))] | 
|  | +  "" | 
|  | +  "fsgnjx.<fmt>\t%0,%1,%1" | 
|  | +  [(set_attr "type" "fabs") | 
|  | +   (set_attr "mode" "<UNITMODE>")]) | 
|  | + | 
|  | + | 
|  | +;; | 
|  | +;;  .................... | 
|  | +;; | 
|  | +;;	MIN/MAX | 
|  | +;; | 
|  | +;;  .................... | 
|  | + | 
|  | +(define_insn "smin<mode>3" | 
|  | +  [(set (match_operand:ANYF 0 "register_operand" "=f") | 
|  | +		   (smin:ANYF (match_operand:ANYF 1 "register_operand" "f") | 
|  | +			    (match_operand:ANYF 2 "register_operand" "f")))] | 
|  | +  "" | 
|  | +  "fmin.<fmt>\t%0,%1,%2" | 
|  | +  [(set_attr "type" "fabs") | 
|  | +   (set_attr "mode" "<UNITMODE>")]) | 
|  | + | 
|  | +(define_insn "smax<mode>3" | 
|  | +  [(set (match_operand:ANYF 0 "register_operand" "=f") | 
|  | +		   (smax:ANYF (match_operand:ANYF 1 "register_operand" "f") | 
|  | +			    (match_operand:ANYF 2 "register_operand" "f")))] | 
|  | +  "" | 
|  | +  "fmax.<fmt>\t%0,%1,%2" | 
|  | +  [(set_attr "type" "fabs") | 
|  | +   (set_attr "mode" "<UNITMODE>")]) | 
|  | + | 
|  | + | 
|  | +;; | 
|  | +;;  .................... | 
|  | +;; | 
|  | +;;	NEGATION and ONE'S COMPLEMENT ' | 
|  | +;; | 
|  | +;;  .................... | 
|  | + | 
|  | +(define_insn "negsi2" | 
|  | +  [(set (match_operand:SI 0 "register_operand" "=d") | 
|  | +	(neg:SI (match_operand:SI 1 "register_operand" "d")))] | 
|  | +  "" | 
|  | +  { return TARGET_64BIT ? "subw\t%0,zero,%1" : "sub\t%0,zero,%1"; } | 
|  | +  [(set_attr "type"	"arith") | 
|  | +   (set_attr "mode"	"SI")]) | 
|  | + | 
|  | +(define_insn "negdi2" | 
|  | +  [(set (match_operand:DI 0 "register_operand" "=d") | 
|  | +	(neg:DI (match_operand:DI 1 "register_operand" "d")))] | 
|  | +  "TARGET_64BIT" | 
|  | +  "sub\t%0,zero,%1" | 
|  | +  [(set_attr "type"	"arith") | 
|  | +   (set_attr "mode"	"DI")]) | 
|  | + | 
|  | +(define_insn "neg<mode>2" | 
|  | +  [(set (match_operand:ANYF 0 "register_operand" "=f") | 
|  | +	(neg:ANYF (match_operand:ANYF 1 "register_operand" "f")))] | 
|  | +  "" | 
|  | +  "fsgnjn.<fmt>\t%0,%1,%1" | 
|  | +  [(set_attr "type" "fneg") | 
|  | +   (set_attr "mode" "<UNITMODE>")]) | 
|  | + | 
|  | +(define_insn "one_cmpl<mode>2" | 
|  | +  [(set (match_operand:GPR 0 "register_operand" "=d") | 
|  | +	(not:GPR (match_operand:GPR 1 "register_operand" "d")))] | 
|  | +  "" | 
|  | +  "xor\t%0,%1,-1" | 
|  | +  [(set_attr "type" "logical") | 
|  | +   (set_attr "mode" "<MODE>")]) | 
|  | + | 
|  | +;; | 
|  | +;;  .................... | 
|  | +;; | 
|  | +;;	LOGICAL | 
|  | +;; | 
|  | +;;  .................... | 
|  | +;; | 
|  | + | 
|  | +(define_insn "and<mode>3" | 
|  | +  [(set (match_operand:GPR 0 "register_operand" "=d,d") | 
|  | +	(and:GPR (match_operand:GPR 1 "register_operand" "%d,d") | 
|  | +		 (match_operand:GPR 2 "arith_operand" "d,Q")))] | 
|  | +  "" | 
|  | +  "and\t%0,%1,%2" | 
|  | +  [(set_attr "type" "logical") | 
|  | +   (set_attr "mode" "<MODE>")]) | 
|  | + | 
|  | +(define_insn "ior<mode>3" | 
|  | +  [(set (match_operand:GPR 0 "register_operand" "=d,d") | 
|  | +	(ior:GPR (match_operand:GPR 1 "register_operand" "%d,d") | 
|  | +		 (match_operand:GPR 2 "arith_operand" "d,Q")))] | 
|  | +  "" | 
|  | +  "or\t%0,%1,%2" | 
|  | +  [(set_attr "type" "logical") | 
|  | +   (set_attr "mode" "<MODE>")]) | 
|  | + | 
|  | +;; this is used for generating HImode constants. | 
|  | +(define_insn "iorhi3" | 
|  | +  [(set (match_operand:HI 0 "register_operand" "=d,d") | 
|  | +	(ior:HI (match_operand:HI 1 "register_operand" "%d,d") | 
|  | +		 (match_operand:HI 2 "arith_operand" "d,Q")))] | 
|  | +  "" | 
|  | +  "or\t%0,%1,%2" | 
|  | +  [(set_attr "type" "logical") | 
|  | +   (set_attr "mode" "HI")]) | 
|  | + | 
|  | +(define_insn "xor<mode>3" | 
|  | +  [(set (match_operand:GPR 0 "register_operand" "=d,d") | 
|  | +	(xor:GPR (match_operand:GPR 1 "register_operand" "%d,d") | 
|  | +		 (match_operand:GPR 2 "arith_operand" "d,Q")))] | 
|  | +  "" | 
|  | +  "xor\t%0,%1,%2" | 
|  | +  [(set_attr "type" "logical") | 
|  | +   (set_attr "mode" "<MODE>")]) | 
|  | + | 
|  | +;; | 
|  | +;;  .................... | 
|  | +;; | 
|  | +;;	TRUNCATION | 
|  | +;; | 
|  | +;;  .................... | 
|  | + | 
|  | + | 
|  | + | 
|  | +(define_insn "truncdfsf2" | 
|  | +  [(set (match_operand:SF 0 "register_operand" "=f") | 
|  | +	(float_truncate:SF (match_operand:DF 1 "register_operand" "f")))] | 
|  | +  "TARGET_HARD_FLOAT" | 
|  | +  "fcvt.s.d\t%0,%1" | 
|  | +  [(set_attr "type"	"fcvt") | 
|  | +   (set_attr "cnv_mode"	"D2S") | 
|  | +   (set_attr "mode"	"SF")]) | 
|  | + | 
|  | +;; Integer truncation patterns.  Truncating SImode values to smaller | 
|  | +;; modes is a no-op, as it is for most other GCC ports.  Truncating | 
|  | +;; DImode values to SImode is not a no-op for TARGET_64BIT since we | 
|  | +;; need to make sure that the lower 32 bits are properly sign-extended | 
|  | +;; (see TRULY_NOOP_TRUNCATION).  Truncating DImode values into modes | 
|  | +;; smaller than SImode is equivalent to two separate truncations: | 
|  | +;; | 
|  | +;;                        A       B | 
|  | +;;    DI ---> HI  ==  DI ---> SI ---> HI | 
|  | +;;    DI ---> QI  ==  DI ---> SI ---> QI | 
|  | +;; | 
|  | +;; Step A needs a real instruction but step B does not. | 
|  | + | 
|  | +(define_insn "truncdisi2" | 
|  | +  [(set (match_operand:SI 0 "nonimmediate_operand" "=d,m") | 
|  | +        (truncate:SI (match_operand:DI 1 "register_operand" "d,d")))] | 
|  | +  "TARGET_64BIT" | 
|  | +  "@ | 
|  | +    sllw\t%0,%1,0 | 
|  | +    sw\t%1,%0" | 
|  | +  [(set_attr "move_type" "sll0,store") | 
|  | +   (set_attr "mode" "SI")]) | 
|  | + | 
|  | +(define_insn "truncdihi2" | 
|  | +  [(set (match_operand:HI 0 "nonimmediate_operand" "=d,m") | 
|  | +        (truncate:HI (match_operand:DI 1 "register_operand" "d,d")))] | 
|  | +  "TARGET_64BIT" | 
|  | +  "@ | 
|  | +    sllw\t%0,%1,0 | 
|  | +    sh\t%1,%0" | 
|  | +  [(set_attr "move_type" "sll0,store") | 
|  | +   (set_attr "mode" "SI")]) | 
|  | + | 
|  | +(define_insn "truncdiqi2" | 
|  | +  [(set (match_operand:QI 0 "nonimmediate_operand" "=d,m") | 
|  | +        (truncate:QI (match_operand:DI 1 "register_operand" "d,d")))] | 
|  | +  "TARGET_64BIT" | 
|  | +  "@ | 
|  | +    sllw\t%0,%1,0 | 
|  | +    sb\t%1,%0" | 
|  | +  [(set_attr "move_type" "sll0,store") | 
|  | +   (set_attr "mode" "SI")]) | 
|  | + | 
|  | +;; Combiner patterns to optimize shift/truncate combinations. | 
|  | + | 
|  | +(define_insn "*ashr_trunc<mode>" | 
|  | +  [(set (match_operand:SUBDI 0 "register_operand" "=d") | 
|  | +        (truncate:SUBDI | 
|  | +	  (ashiftrt:DI (match_operand:DI 1 "register_operand" "d") | 
|  | +		       (match_operand:DI 2 "const_arith_operand" ""))))] | 
|  | +  "TARGET_64BIT && IN_RANGE (INTVAL (operands[2]), 32, 63)" | 
|  | +  "sra\t%0,%1,%2" | 
|  | +  [(set_attr "type" "shift") | 
|  | +   (set_attr "mode" "<MODE>")]) | 
|  | + | 
|  | +(define_insn "*lshr32_trunc<mode>" | 
|  | +  [(set (match_operand:SUBDI 0 "register_operand" "=d") | 
|  | +        (truncate:SUBDI | 
|  | +	  (lshiftrt:DI (match_operand:DI 1 "register_operand" "d") | 
|  | +		       (const_int 32))))] | 
|  | +  "TARGET_64BIT" | 
|  | +  "sra\t%0,%1,32" | 
|  | +  [(set_attr "type" "shift") | 
|  | +   (set_attr "mode" "<MODE>")]) | 
|  | + | 
|  | +;; Combiner patterns for truncate/sign_extend combinations.  The SI versions | 
|  | +;; use the shift/truncate patterns above. | 
|  | + | 
|  | +(define_insn_and_split "*extenddi_truncate<mode>" | 
|  | +  [(set (match_operand:DI 0 "register_operand" "=d") | 
|  | +	(sign_extend:DI | 
|  | +	    (truncate:SHORT (match_operand:DI 1 "register_operand" "d"))))] | 
|  | +  "TARGET_64BIT" | 
|  | +  "#" | 
|  | +  "&& reload_completed" | 
|  | +  [(set (match_dup 2) | 
|  | +	(ashift:DI (match_dup 1) | 
|  | +		   (match_dup 3))) | 
|  | +   (set (match_dup 0) | 
|  | +	(ashiftrt:DI (match_dup 2) | 
|  | +		     (match_dup 3)))] | 
|  | +{ | 
|  | +  operands[2] = gen_lowpart (DImode, operands[0]); | 
|  | +  operands[3] = GEN_INT (BITS_PER_WORD - GET_MODE_BITSIZE (<MODE>mode)); | 
|  | +}) | 
|  | + | 
|  | +(define_insn_and_split "*extendsi_truncate<mode>" | 
|  | +  [(set (match_operand:SI 0 "register_operand" "=d") | 
|  | +	(sign_extend:SI | 
|  | +	    (truncate:SHORT (match_operand:DI 1 "register_operand" "d"))))] | 
|  | +  "TARGET_64BIT" | 
|  | +  "#" | 
|  | +  "&& reload_completed" | 
|  | +  [(set (match_dup 2) | 
|  | +	(ashift:DI (match_dup 1) | 
|  | +		   (match_dup 3))) | 
|  | +   (set (match_dup 0) | 
|  | +	(truncate:SI (ashiftrt:DI (match_dup 2) | 
|  | +				  (match_dup 3))))] | 
|  | +{ | 
|  | +  operands[2] = gen_lowpart (DImode, operands[0]); | 
|  | +  operands[3] = GEN_INT (BITS_PER_WORD - GET_MODE_BITSIZE (<MODE>mode)); | 
|  | +}) | 
|  | + | 
|  | +;; Combiner patterns to optimize truncate/zero_extend combinations. | 
|  | + | 
|  | +(define_insn "*zero_extend<mode>_truncqi" | 
|  | +  [(set (match_operand:GPR 0 "register_operand" "=d") | 
|  | +        (zero_extend:GPR | 
|  | +	    (truncate:QI (match_operand:DI 1 "register_operand" "d"))))] | 
|  | +  "TARGET_64BIT" | 
|  | +  "andi\t%0,%1,0xff" | 
|  | +  [(set_attr "type" "logical") | 
|  | +   (set_attr "mode" "<MODE>")]) | 
|  | + | 
|  | +(define_insn "" | 
|  | +  [(set (match_operand:HI 0 "register_operand" "=d") | 
|  | +        (zero_extend:HI | 
|  | +	    (truncate:QI (match_operand:DI 1 "register_operand" "d"))))] | 
|  | +  "TARGET_64BIT" | 
|  | +  "andi\t%0,%1,0xff" | 
|  | +  [(set_attr "type" "logical") | 
|  | +   (set_attr "mode" "HI")]) | 
|  | + | 
|  | +;; | 
|  | +;;  .................... | 
|  | +;; | 
|  | +;;	ZERO EXTENSION | 
|  | +;; | 
|  | +;;  .................... | 
|  | + | 
|  | +;; Extension insns. | 
|  | + | 
|  | +(define_insn_and_split "zero_extendsidi2" | 
|  | +  [(set (match_operand:DI 0 "register_operand" "=d,d") | 
|  | +        (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "d,W")))] | 
|  | +  "TARGET_64BIT" | 
|  | +  "@ | 
|  | +   # | 
|  | +   lwu\t%0,%1" | 
|  | +  "&& reload_completed && REG_P (operands[1])" | 
|  | +  [(set (match_dup 0) | 
|  | +        (ashift:DI (match_dup 1) (const_int 32))) | 
|  | +   (set (match_dup 0) | 
|  | +        (lshiftrt:DI (match_dup 0) (const_int 32)))] | 
|  | +  { operands[1] = gen_lowpart (DImode, operands[1]); } | 
|  | +  [(set_attr "move_type" "shift_shift,load") | 
|  | +   (set_attr "mode" "DI")]) | 
|  | + | 
|  | +;; Combine is not allowed to convert this insn into a zero_extendsidi2 | 
|  | +;; because of TRULY_NOOP_TRUNCATION. | 
|  | + | 
|  | +(define_insn_and_split "*clear_upper32" | 
|  | +  [(set (match_operand:DI 0 "register_operand" "=d,d") | 
|  | +        (and:DI (match_operand:DI 1 "nonimmediate_operand" "d,W") | 
|  | +		(const_int 4294967295)))] | 
|  | +  "TARGET_64BIT" | 
|  | +{ | 
|  | +  if (which_alternative == 0) | 
|  | +    return "#"; | 
|  | + | 
|  | +  operands[1] = gen_lowpart (SImode, operands[1]); | 
|  | +  return "lwu\t%0,%1"; | 
|  | +} | 
|  | +  "&& reload_completed && REG_P (operands[1])" | 
|  | +  [(set (match_dup 0) | 
|  | +        (ashift:DI (match_dup 1) (const_int 32))) | 
|  | +   (set (match_dup 0) | 
|  | +        (lshiftrt:DI (match_dup 0) (const_int 32)))] | 
|  | +  "" | 
|  | +  [(set_attr "move_type" "shift_shift,load") | 
|  | +   (set_attr "mode" "DI")]) | 
|  | + | 
|  | +(define_insn_and_split "zero_extendhi<GPR:mode>2" | 
|  | +  [(set (match_operand:GPR 0 "register_operand" "=d,d") | 
|  | +        (zero_extend:GPR (match_operand:HI 1 "nonimmediate_operand" "d,m")))] | 
|  | +  "" | 
|  | +  "@ | 
|  | +   # | 
|  | +   lhu\t%0,%1" | 
|  | +  "&& reload_completed && REG_P (operands[1])" | 
|  | +  [(set (match_dup 0) | 
|  | +        (ashift:GPR (match_dup 1) (match_dup 2))) | 
|  | +   (set (match_dup 0) | 
|  | +        (lshiftrt:GPR (match_dup 0) (match_dup 2)))] | 
|  | +  { | 
|  | +    operands[1] = gen_lowpart (<GPR:MODE>mode, operands[1]); | 
|  | +    operands[2] = GEN_INT(GET_MODE_BITSIZE(<GPR:MODE>mode) - 16); | 
|  | +  } | 
|  | +  [(set_attr "move_type" "shift_shift,load") | 
|  | +   (set_attr "mode" "<GPR:MODE>")]) | 
|  | + | 
|  | +(define_insn "zero_extendqi<GPR:mode>2" | 
|  | +  [(set (match_operand:GPR 0 "register_operand" "=d,d") | 
|  | +        (zero_extend:GPR | 
|  | +	     (match_operand:QI 1 "nonimmediate_operand" "d,m")))] | 
|  | +  "" | 
|  | +  "@ | 
|  | +   and\t%0,%1,0xff | 
|  | +   lbu\t%0,%1" | 
|  | +  [(set_attr "move_type" "andi,load") | 
|  | +   (set_attr "mode" "<GPR:MODE>")]) | 
|  | + | 
|  | +(define_insn "zero_extendqihi2" | 
|  | +  [(set (match_operand:HI 0 "register_operand" "=d,d") | 
|  | +        (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "d,m")))] | 
|  | +  "" | 
|  | +  "@ | 
|  | +   andi\t%0,%1,0x00ff | 
|  | +   lbu\t%0,%1" | 
|  | +  [(set_attr "move_type" "andi,load") | 
|  | +   (set_attr "mode" "HI")]) | 
|  | + | 
|  | +;; | 
|  | +;;  .................... | 
|  | +;; | 
|  | +;;	SIGN EXTENSION | 
|  | +;; | 
|  | +;;  .................... | 
|  | + | 
|  | +;; Extension insns. | 
|  | +;; Those for integer source operand are ordered widest source type first. | 
|  | + | 
|  | +;; When TARGET_64BIT, all SImode integer registers should already be in | 
|  | +;; sign-extended form (see TRULY_NOOP_TRUNCATION and truncdisi2).  We can | 
|  | +;; therefore get rid of register->register instructions if we constrain | 
|  | +;; the source to be in the same register as the destination. | 
|  | +;; | 
|  | +;; The register alternative has type "arith" so that the pre-reload | 
|  | +;; scheduler will treat it as a move.  This reflects what happens if | 
|  | +;; the register alternative needs a reload. | 
|  | +(define_insn_and_split "extendsidi2" | 
|  | +  [(set (match_operand:DI 0 "register_operand" "=d,d") | 
|  | +        (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand" "0,m")))] | 
|  | +  "TARGET_64BIT" | 
|  | +  "@ | 
|  | +   # | 
|  | +   lw\t%0,%1" | 
|  | +  "&& reload_completed && register_operand (operands[1], VOIDmode)" | 
|  | +  [(const_int 0)] | 
|  | +{ | 
|  | +  emit_note (NOTE_INSN_DELETED); | 
|  | +  DONE; | 
|  | +} | 
|  | +  [(set_attr "move_type" "move,load") | 
|  | +   (set_attr "mode" "DI")]) | 
|  | + | 
|  | +(define_insn_and_split "extend<SHORT:mode><GPR:mode>2" | 
|  | +  [(set (match_operand:GPR 0 "register_operand" "=d,d") | 
|  | +        (sign_extend:GPR | 
|  | +	     (match_operand:SHORT 1 "nonimmediate_operand" "d,m")))] | 
|  | +  "" | 
|  | +  "@ | 
|  | +   # | 
|  | +   l<SHORT:size>\t%0,%1" | 
|  | +  "&& reload_completed && REG_P (operands[1])" | 
|  | +  [(set (match_dup 0) (ashift:GPR (match_dup 1) (match_dup 2))) | 
|  | +   (set (match_dup 0) (ashiftrt:GPR (match_dup 0) (match_dup 2)))] | 
|  | +{ | 
|  | +  operands[1] = gen_lowpart (<GPR:MODE>mode, operands[1]); | 
|  | +  operands[2] = GEN_INT (GET_MODE_BITSIZE (<GPR:MODE>mode) | 
|  | +			 - GET_MODE_BITSIZE (<SHORT:MODE>mode)); | 
|  | +} | 
|  | +  [(set_attr "move_type" "shift_shift,load") | 
|  | +   (set_attr "mode" "<GPR:MODE>")]) | 
|  | + | 
|  | +(define_insn_and_split "extendqihi2" | 
|  | +  [(set (match_operand:HI 0 "register_operand" "=d,d") | 
|  | +        (sign_extend:HI | 
|  | +	     (match_operand:QI 1 "nonimmediate_operand" "d,m")))] | 
|  | +  "" | 
|  | +  "@ | 
|  | +   # | 
|  | +   lb\t%0,%1" | 
|  | +  "&& reload_completed && REG_P (operands[1])" | 
|  | +  [(set (match_dup 0) (ashift:SI (match_dup 1) (match_dup 2))) | 
|  | +   (set (match_dup 0) (ashiftrt:SI (match_dup 0) (match_dup 2)))] | 
|  | +{ | 
|  | +  operands[0] = gen_lowpart (SImode, operands[0]); | 
|  | +  operands[1] = gen_lowpart (SImode, operands[1]); | 
|  | +  operands[2] = GEN_INT (GET_MODE_BITSIZE (SImode) | 
|  | +			 - GET_MODE_BITSIZE (QImode)); | 
|  | +} | 
|  | +  [(set_attr "move_type" "shift_shift,load") | 
|  | +   (set_attr "mode" "SI")]) | 
|  | + | 
|  | +(define_insn "extendsfdf2" | 
|  | +  [(set (match_operand:DF 0 "register_operand" "=f") | 
|  | +	(float_extend:DF (match_operand:SF 1 "register_operand" "f")))] | 
|  | +  "TARGET_HARD_FLOAT" | 
|  | +  "fcvt.d.s\t%0,%1" | 
|  | +  [(set_attr "type"	"fcvt") | 
|  | +   (set_attr "cnv_mode"	"S2D") | 
|  | +   (set_attr "mode"	"DF")]) | 
|  | + | 
|  | +;; | 
|  | +;;  .................... | 
|  | +;; | 
|  | +;;	CONVERSIONS | 
|  | +;; | 
|  | +;;  .................... | 
|  | + | 
|  | +(define_insn "fix_truncdfsi2" | 
|  | +  [(set (match_operand:SI 0 "register_operand" "=d") | 
|  | +	(fix:SI (match_operand:DF 1 "register_operand" "f")))] | 
|  | +  "TARGET_HARD_FLOAT" | 
|  | +  "fcvt.w.d %0,%1,rtz" | 
|  | +  [(set_attr "type"	"fcvt") | 
|  | +   (set_attr "mode"	"DF") | 
|  | +   (set_attr "cnv_mode"	"D2I")]) | 
|  | + | 
|  | + | 
|  | +(define_insn "fix_truncsfsi2" | 
|  | +  [(set (match_operand:SI 0 "register_operand" "=d") | 
|  | +	(fix:SI (match_operand:SF 1 "register_operand" "f")))] | 
|  | +  "TARGET_HARD_FLOAT" | 
|  | +  "fcvt.w.s %0,%1,rtz" | 
|  | +  [(set_attr "type"	"fcvt") | 
|  | +   (set_attr "mode"	"SF") | 
|  | +   (set_attr "cnv_mode"	"S2I")]) | 
|  | + | 
|  | + | 
|  | +(define_insn "fix_truncdfdi2" | 
|  | +  [(set (match_operand:DI 0 "register_operand" "=d") | 
|  | +	(fix:DI (match_operand:DF 1 "register_operand" "f")))] | 
|  | +  "TARGET_HARD_FLOAT" | 
|  | +  "fcvt.l.d %0,%1,rtz" | 
|  | +  [(set_attr "type"	"fcvt") | 
|  | +   (set_attr "mode"	"DF") | 
|  | +   (set_attr "cnv_mode"	"D2I")]) | 
|  | + | 
|  | + | 
|  | +(define_insn "fix_truncsfdi2" | 
|  | +  [(set (match_operand:DI 0 "register_operand" "=d") | 
|  | +	(fix:DI (match_operand:SF 1 "register_operand" "f")))] | 
|  | +  "TARGET_HARD_FLOAT" | 
|  | +  "fcvt.l.s %0,%1,rtz" | 
|  | +  [(set_attr "type"	"fcvt") | 
|  | +   (set_attr "mode"	"SF") | 
|  | +   (set_attr "cnv_mode"	"S2I")]) | 
|  | + | 
|  | + | 
|  | +(define_insn "floatsidf2" | 
|  | +  [(set (match_operand:DF 0 "register_operand" "=f") | 
|  | +	(float:DF (match_operand:SI 1 "register_operand" "d")))] | 
|  | +  "TARGET_HARD_FLOAT" | 
|  | +  "fcvt.d.w\t%0,%1" | 
|  | +  [(set_attr "type"	"fcvt") | 
|  | +   (set_attr "mode"	"DF") | 
|  | +   (set_attr "cnv_mode"	"I2D")]) | 
|  | + | 
|  | + | 
|  | +(define_insn "floatdidf2" | 
|  | +  [(set (match_operand:DF 0 "register_operand" "=f") | 
|  | +	(float:DF (match_operand:DI 1 "register_operand" "d")))] | 
|  | +  "TARGET_HARD_FLOAT" | 
|  | +  "fcvt.d.l\t%0,%1" | 
|  | +  [(set_attr "type"	"fcvt") | 
|  | +   (set_attr "mode"	"DF") | 
|  | +   (set_attr "cnv_mode"	"I2D")]) | 
|  | + | 
|  | + | 
|  | +(define_insn "floatsisf2" | 
|  | +  [(set (match_operand:SF 0 "register_operand" "=f") | 
|  | +	(float:SF (match_operand:SI 1 "register_operand" "d")))] | 
|  | +  "TARGET_HARD_FLOAT" | 
|  | +  "fcvt.s.w\t%0,%1" | 
|  | +  [(set_attr "type"	"fcvt") | 
|  | +   (set_attr "mode"	"SF") | 
|  | +   (set_attr "cnv_mode"	"I2S")]) | 
|  | + | 
|  | + | 
|  | +(define_insn "floatdisf2" | 
|  | +  [(set (match_operand:SF 0 "register_operand" "=f") | 
|  | +	(float:SF (match_operand:DI 1 "register_operand" "d")))] | 
|  | +  "TARGET_HARD_FLOAT" | 
|  | +  "fcvt.s.l\t%0,%1" | 
|  | +  [(set_attr "type"	"fcvt") | 
|  | +   (set_attr "mode"	"SF") | 
|  | +   (set_attr "cnv_mode"	"I2S")]) | 
|  | + | 
|  | + | 
|  | +(define_insn "floatunssidf2" | 
|  | +  [(set (match_operand:DF 0 "register_operand" "=f") | 
|  | +	(unsigned_float:DF (match_operand:SI 1 "register_operand" "d")))] | 
|  | +  "TARGET_HARD_FLOAT" | 
|  | +  "fcvt.d.wu\t%0,%1" | 
|  | +  [(set_attr "type"	"fcvt") | 
|  | +   (set_attr "mode"	"DF") | 
|  | +   (set_attr "cnv_mode"	"I2D")]) | 
|  | + | 
|  | + | 
|  | +(define_insn "floatunsdidf2" | 
|  | +  [(set (match_operand:DF 0 "register_operand" "=f") | 
|  | +	(unsigned_float:DF (match_operand:DI 1 "register_operand" "d")))] | 
|  | +  "TARGET_HARD_FLOAT" | 
|  | +  "fcvt.d.lu\t%0,%1" | 
|  | +  [(set_attr "type"	"fcvt") | 
|  | +   (set_attr "mode"	"DF") | 
|  | +   (set_attr "cnv_mode"	"I2D")]) | 
|  | + | 
|  | + | 
|  | +(define_insn "floatunssisf2" | 
|  | +  [(set (match_operand:SF 0 "register_operand" "=f") | 
|  | +	(unsigned_float:SF (match_operand:SI 1 "register_operand" "d")))] | 
|  | +  "TARGET_HARD_FLOAT" | 
|  | +  "fcvt.s.wu\t%0,%1" | 
|  | +  [(set_attr "type"	"fcvt") | 
|  | +   (set_attr "mode"	"SF") | 
|  | +   (set_attr "cnv_mode"	"I2S")]) | 
|  | + | 
|  | + | 
|  | +(define_insn "floatunsdisf2" | 
|  | +  [(set (match_operand:SF 0 "register_operand" "=f") | 
|  | +	(unsigned_float:SF (match_operand:DI 1 "register_operand" "d")))] | 
|  | +  "TARGET_HARD_FLOAT" | 
|  | +  "fcvt.s.lu\t%0,%1" | 
|  | +  [(set_attr "type"	"fcvt") | 
|  | +   (set_attr "mode"	"SF") | 
|  | +   (set_attr "cnv_mode"	"I2S")]) | 
|  | + | 
|  | + | 
|  | +(define_insn "fixuns_truncdfsi2" | 
|  | +  [(set (match_operand:SI 0 "register_operand" "=d") | 
|  | +	(unsigned_fix:SI (match_operand:DF 1 "register_operand" "f")))] | 
|  | +  "TARGET_HARD_FLOAT" | 
|  | +  "fcvt.wu.d %0,%1,rtz" | 
|  | +  [(set_attr "type"	"fcvt") | 
|  | +   (set_attr "mode"	"DF") | 
|  | +   (set_attr "cnv_mode"	"D2I")]) | 
|  | + | 
|  | + | 
|  | +(define_insn "fixuns_truncsfsi2" | 
|  | +  [(set (match_operand:SI 0 "register_operand" "=d") | 
|  | +	(unsigned_fix:SI (match_operand:SF 1 "register_operand" "f")))] | 
|  | +  "TARGET_HARD_FLOAT" | 
|  | +  "fcvt.wu.s %0,%1,rtz" | 
|  | +  [(set_attr "type"	"fcvt") | 
|  | +   (set_attr "mode"	"SF") | 
|  | +   (set_attr "cnv_mode"	"S2I")]) | 
|  | + | 
|  | + | 
|  | +(define_insn "fixuns_truncdfdi2" | 
|  | +  [(set (match_operand:DI 0 "register_operand" "=d") | 
|  | +	(unsigned_fix:DI (match_operand:DF 1 "register_operand" "f")))] | 
|  | +  "TARGET_HARD_FLOAT" | 
|  | +  "fcvt.lu.d %0,%1,rtz" | 
|  | +  [(set_attr "type"	"fcvt") | 
|  | +   (set_attr "mode"	"DF") | 
|  | +   (set_attr "cnv_mode"	"D2I")]) | 
|  | + | 
|  | + | 
|  | +(define_insn "fixuns_truncsfdi2" | 
|  | +  [(set (match_operand:DI 0 "register_operand" "=d") | 
|  | +	(unsigned_fix:DI (match_operand:SF 1 "register_operand" "f")))] | 
|  | +  "TARGET_HARD_FLOAT" | 
|  | +  "fcvt.lu.s %0,%1,rtz" | 
|  | +  [(set_attr "type"	"fcvt") | 
|  | +   (set_attr "mode"	"SF") | 
|  | +   (set_attr "cnv_mode"	"S2I")]) | 
|  | + | 
|  | +;; | 
|  | +;;  .................... | 
|  | +;; | 
|  | +;;	DATA MOVEMENT | 
|  | +;; | 
|  | +;;  .................... | 
|  | + | 
|  | +(define_insn "got_load<mode>" | 
|  | +  [(set (match_operand:P 0 "register_operand" "=d") | 
|  | +       (unspec:P [(match_operand:P 1 "symbolic_operand" "")] | 
|  | +                 UNSPEC_LOAD_GOT))] | 
|  | +  "TARGET_USE_GOT" | 
|  | +  "la\t%0,%1" | 
|  | +  [(set_attr "got" "load") | 
|  | +   (set_attr "mode" "<MODE>")]) | 
|  | + | 
|  | +(define_insn "got_load_tls_gd<mode>" | 
|  | +  [(set (match_operand:P 0 "register_operand" "=d") | 
|  | +       (unspec:P [(match_operand:P 1 "symbolic_operand" "")] | 
|  | +                 UNSPEC_TLS_GD))] | 
|  | +  "TARGET_USE_GOT" | 
|  | +  "la.tls.gd\t%0,%1" | 
|  | +  [(set_attr "got" "load") | 
|  | +   (set_attr "mode" "<MODE>")]) | 
|  | + | 
|  | +(define_insn "got_load_tls_ie<mode>" | 
|  | +  [(set (match_operand:P 0 "register_operand" "=d") | 
|  | +       (unspec:P [(match_operand:P 1 "symbolic_operand" "")] | 
|  | +                 UNSPEC_TLS_IE))] | 
|  | +  "TARGET_USE_GOT" | 
|  | +  "la.tls.ie\t%0,%1" | 
|  | +  [(set_attr "got" "load") | 
|  | +   (set_attr "mode" "<MODE>")]) | 
|  | + | 
|  | +;; Instructions for adding the low 16 bits of an address to a register. | 
|  | +;; Operand 2 is the address: mips_print_operand works out which relocation | 
|  | +;; should be applied. | 
|  | + | 
|  | +(define_insn "*low<mode>" | 
|  | +  [(set (match_operand:P 0 "register_operand" "=d") | 
|  | +	(lo_sum:P (match_operand:P 1 "register_operand" "d") | 
|  | +		  (match_operand:P 2 "immediate_operand" "")))] | 
|  | +  "" | 
|  | +  { return Pmode == SImode && TARGET_64BIT ? "addw\t%0,%1,%R2" : "add\t%0,%1,%R2"; } | 
|  | +  [(set_attr "alu_type" "add") | 
|  | +   (set_attr "mode" "<MODE>")]) | 
|  | + | 
|  | +;; Allow combine to split complex const_int load sequences, using operand 2 | 
|  | +;; to store the intermediate results.  See move_operand for details. | 
|  | +(define_split | 
|  | +  [(set (match_operand:GPR 0 "register_operand") | 
|  | +	(match_operand:GPR 1 "splittable_const_int_operand")) | 
|  | +   (clobber (match_operand:GPR 2 "register_operand"))] | 
|  | +  "" | 
|  | +  [(const_int 0)] | 
|  | +{ | 
|  | +  mips_move_integer (operands[2], operands[0], INTVAL (operands[1])); | 
|  | +  DONE; | 
|  | +}) | 
|  | + | 
|  | +;; Likewise, for symbolic operands. | 
|  | +(define_split | 
|  | +  [(set (match_operand:P 0 "register_operand") | 
|  | +	(match_operand:P 1)) | 
|  | +   (clobber (match_operand:P 2 "register_operand"))] | 
|  | +  "mips_split_symbol (operands[2], operands[1], MAX_MACHINE_MODE, NULL)" | 
|  | +  [(set (match_dup 0) (match_dup 3))] | 
|  | +{ | 
|  | +  mips_split_symbol (operands[2], operands[1], | 
|  | +		     MAX_MACHINE_MODE, &operands[3]); | 
|  | +}) | 
|  | + | 
|  | +;; 64-bit integer moves | 
|  | + | 
|  | +;; Unlike most other insns, the move insns can't be split with ' | 
|  | +;; different predicates, because register spilling and other parts of | 
|  | +;; the compiler, have memoized the insn number already. | 
|  | + | 
|  | +(define_expand "movdi" | 
|  | +  [(set (match_operand:DI 0 "") | 
|  | +	(match_operand:DI 1 ""))] | 
|  | +  "" | 
|  | +{ | 
|  | +  if (mips_legitimize_move (DImode, operands[0], operands[1])) | 
|  | +    DONE; | 
|  | +}) | 
|  | + | 
|  | +(define_insn "*movdi_32bit" | 
|  | +  [(set (match_operand:DI 0 "nonimmediate_operand" "=d,d,d,m,*f,*f,*d,*m") | 
|  | +	(match_operand:DI 1 "move_operand" "d,i,m,d,*J*d,*m,*f,*f"))] | 
|  | +  "!TARGET_64BIT | 
|  | +   && (register_operand (operands[0], DImode) | 
|  | +       || reg_or_0_operand (operands[1], DImode))" | 
|  | +  { return mips_output_move (operands[0], operands[1]); } | 
|  | +  [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fpstore") | 
|  | +   (set_attr "mode" "DI")]) | 
|  | + | 
|  | +(define_insn "*movdi_64bit" | 
|  | +  [(set (match_operand:DI 0 "nonimmediate_operand" "=d,d,d,m,*f,*f,*d,*m") | 
|  | +	(match_operand:DI 1 "move_operand" "d,T,m,dJ,*d*J,*m,*f,*f"))] | 
|  | +  "TARGET_64BIT | 
|  | +   && (register_operand (operands[0], DImode) | 
|  | +       || reg_or_0_operand (operands[1], DImode))" | 
|  | +  { return mips_output_move (operands[0], operands[1]); } | 
|  | +  [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fpstore") | 
|  | +   (set_attr "mode" "DI")]) | 
|  | + | 
|  | +;; 32-bit Integer moves | 
|  | + | 
|  | +;; Unlike most other insns, the move insns can't be split with | 
|  | +;; different predicates, because register spilling and other parts of | 
|  | +;; the compiler, have memoized the insn number already. | 
|  | + | 
|  | +(define_expand "mov<mode>" | 
|  | +  [(set (match_operand:IMOVE32 0 "") | 
|  | +	(match_operand:IMOVE32 1 ""))] | 
|  | +  "" | 
|  | +{ | 
|  | +  if (mips_legitimize_move (<MODE>mode, operands[0], operands[1])) | 
|  | +    DONE; | 
|  | +}) | 
|  | + | 
|  | +;; The difference between these two is whether or not ints are allowed | 
|  | +;; in FP registers (off by default, use -mdebugh to enable). | 
|  | + | 
|  | +(define_insn "*mov<mode>_internal" | 
|  | +  [(set (match_operand:IMOVE32 0 "nonimmediate_operand" "=d,d,d,m,*f,*f,*d,*m,*d,*z") | 
|  | +	(match_operand:IMOVE32 1 "move_operand" "d,T,m,dJ,*d*J,*m,*f,*f,*z,*d"))] | 
|  | +  "(register_operand (operands[0], <MODE>mode) | 
|  | +    || reg_or_0_operand (operands[1], <MODE>mode))" | 
|  | +  { return mips_output_move (operands[0], operands[1]); } | 
|  | +  [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fpstore,mfc,mtc") | 
|  | +   (set_attr "mode" "SI")]) | 
|  | + | 
|  | +;; 16-bit Integer moves | 
|  | + | 
|  | +;; Unlike most other insns, the move insns can't be split with | 
|  | +;; different predicates, because register spilling and other parts of | 
|  | +;; the compiler, have memoized the insn number already. | 
|  | +;; Unsigned loads are used because LOAD_EXTEND_OP returns ZERO_EXTEND. | 
|  | + | 
|  | +(define_expand "movhi" | 
|  | +  [(set (match_operand:HI 0 "") | 
|  | +	(match_operand:HI 1 ""))] | 
|  | +  "" | 
|  | +{ | 
|  | +  if (mips_legitimize_move (HImode, operands[0], operands[1])) | 
|  | +    DONE; | 
|  | +}) | 
|  | + | 
|  | +(define_insn "*movhi_internal" | 
|  | +  [(set (match_operand:HI 0 "nonimmediate_operand" "=d,d,d,m") | 
|  | +	(match_operand:HI 1 "move_operand"         "d,T,m,dJ"))] | 
|  | +  "(register_operand (operands[0], HImode) | 
|  | +    || reg_or_0_operand (operands[1], HImode))" | 
|  | +  { return mips_output_move (operands[0], operands[1]); } | 
|  | +  [(set_attr "move_type" "move,const,load,store") | 
|  | +   (set_attr "mode" "HI")]) | 
|  | + | 
|  | +;; 8-bit Integer moves | 
|  | + | 
|  | +(define_expand "movqi" | 
|  | +  [(set (match_operand:QI 0 "") | 
|  | +	(match_operand:QI 1 ""))] | 
|  | +  "" | 
|  | +{ | 
|  | +  if (mips_legitimize_move (QImode, operands[0], operands[1])) | 
|  | +    DONE; | 
|  | +}) | 
|  | + | 
|  | +(define_insn "*movqi_internal" | 
|  | +  [(set (match_operand:QI 0 "nonimmediate_operand" "=d,d,d,m") | 
|  | +	(match_operand:QI 1 "move_operand"         "d,I,m,dJ"))] | 
|  | +  "(register_operand (operands[0], QImode) | 
|  | +    || reg_or_0_operand (operands[1], QImode))" | 
|  | +  { return mips_output_move (operands[0], operands[1]); } | 
|  | +  [(set_attr "move_type" "move,const,load,store") | 
|  | +   (set_attr "mode" "QI")]) | 
|  | + | 
|  | +;; 32-bit floating point moves | 
|  | + | 
|  | +(define_expand "movsf" | 
|  | +  [(set (match_operand:SF 0 "") | 
|  | +	(match_operand:SF 1 ""))] | 
|  | +  "" | 
|  | +{ | 
|  | +  if (mips_legitimize_move (SFmode, operands[0], operands[1])) | 
|  | +    DONE; | 
|  | +}) | 
|  | + | 
|  | +(define_insn "*movsf_hardfloat" | 
|  | +  [(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,f,m,m,*f,*d,*d,*d,*m") | 
|  | +	(match_operand:SF 1 "move_operand" "f,G,m,f,G,*d,*f,*G*d,*m,*d"))] | 
|  | +  "TARGET_HARD_FLOAT | 
|  | +   && (register_operand (operands[0], SFmode) | 
|  | +       || reg_or_0_operand (operands[1], SFmode))" | 
|  | +  { return mips_output_move (operands[0], operands[1]); } | 
|  | +  [(set_attr "move_type" "fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store") | 
|  | +   (set_attr "mode" "SF")]) | 
|  | + | 
|  | +(define_insn "*movsf_softfloat" | 
|  | +  [(set (match_operand:SF 0 "nonimmediate_operand" "=d,d,m") | 
|  | +	(match_operand:SF 1 "move_operand" "Gd,m,d"))] | 
|  | +  "TARGET_SOFT_FLOAT | 
|  | +   && (register_operand (operands[0], SFmode) | 
|  | +       || reg_or_0_operand (operands[1], SFmode))" | 
|  | +  { return mips_output_move (operands[0], operands[1]); } | 
|  | +  [(set_attr "move_type" "move,load,store") | 
|  | +   (set_attr "mode" "SF")]) | 
|  | + | 
|  | +;; 64-bit floating point moves | 
|  | + | 
|  | +(define_expand "movdf" | 
|  | +  [(set (match_operand:DF 0 "") | 
|  | +	(match_operand:DF 1 ""))] | 
|  | +  "" | 
|  | +{ | 
|  | +  if (mips_legitimize_move (DFmode, operands[0], operands[1])) | 
|  | +    DONE; | 
|  | +}) | 
|  | + | 
|  | +;; In RV32, we lack mtf.d/mff.d.  Go through memory instead. | 
|  | +;; (except for moving a constant 0 to an FPR.  for that we use fcvt.d.w.) | 
|  | +(define_insn "*movdf_hardfloat_rv32" | 
|  | +  [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,f,m,m,*d,*d,*m") | 
|  | +	(match_operand:DF 1 "move_operand" "f,G,m,f,G,*d*G,*m,*d"))] | 
|  | +  "!TARGET_64BIT && TARGET_HARD_FLOAT | 
|  | +   && (register_operand (operands[0], DFmode) | 
|  | +       || reg_or_0_operand (operands[1], DFmode))" | 
|  | +  { return mips_output_move (operands[0], operands[1]); } | 
|  | +  [(set_attr "move_type" "fmove,mtc,fpload,fpstore,store,move,load,store") | 
|  | +   (set_attr "mode" "DF")]) | 
|  | + | 
|  | +(define_insn "*movdf_hardfloat_rv64" | 
|  | +  [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,f,m,m,*f,*d,*d,*d,*m") | 
|  | +	(match_operand:DF 1 "move_operand" "f,G,m,f,G,*d,*f,*d*G,*m,*d"))] | 
|  | +  "TARGET_64BIT && TARGET_HARD_FLOAT | 
|  | +   && (register_operand (operands[0], DFmode) | 
|  | +       || reg_or_0_operand (operands[1], DFmode))" | 
|  | +  { return mips_output_move (operands[0], operands[1]); } | 
|  | +  [(set_attr "move_type" "fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store") | 
|  | +   (set_attr "mode" "DF")]) | 
|  | + | 
|  | +(define_insn "*movdf_softfloat" | 
|  | +  [(set (match_operand:DF 0 "nonimmediate_operand" "=d,d,m") | 
|  | +	(match_operand:DF 1 "move_operand" "dG,m,dG"))] | 
|  | +  "TARGET_SOFT_FLOAT | 
|  | +   && (register_operand (operands[0], DFmode) | 
|  | +       || reg_or_0_operand (operands[1], DFmode))" | 
|  | +  { return mips_output_move (operands[0], operands[1]); } | 
|  | +  [(set_attr "move_type" "move,load,store") | 
|  | +   (set_attr "mode" "DF")]) | 
|  | + | 
|  | +;; 128-bit integer moves | 
|  | + | 
|  | +(define_expand "movti" | 
|  | +  [(set (match_operand:TI 0) | 
|  | +	(match_operand:TI 1))] | 
|  | +  "TARGET_64BIT" | 
|  | +{ | 
|  | +  if (mips_legitimize_move (TImode, operands[0], operands[1])) | 
|  | +    DONE; | 
|  | +}) | 
|  | + | 
|  | +(define_insn "*movti" | 
|  | +  [(set (match_operand:TI 0 "nonimmediate_operand" "=d,d,d,m") | 
|  | +	(match_operand:TI 1 "move_operand" "d,i,m,dJ"))] | 
|  | +  "TARGET_64BIT | 
|  | +   && (register_operand (operands[0], TImode) | 
|  | +       || reg_or_0_operand (operands[1], TImode))" | 
|  | +  "#" | 
|  | +  [(set_attr "move_type" "move,const,load,store") | 
|  | +   (set_attr "mode" "TI")]) | 
|  | + | 
|  | +(define_split | 
|  | +  [(set (match_operand:MOVE64 0 "nonimmediate_operand") | 
|  | +	(match_operand:MOVE64 1 "move_operand"))] | 
|  | +  "reload_completed && !TARGET_64BIT | 
|  | +   && mips_split_64bit_move_p (operands[0], operands[1])" | 
|  | +  [(const_int 0)] | 
|  | +{ | 
|  | +  mips_split_doubleword_move (operands[0], operands[1]); | 
|  | +  DONE; | 
|  | +}) | 
|  | + | 
|  | +(define_split | 
|  | +  [(set (match_operand:MOVE128 0 "nonimmediate_operand") | 
|  | +	(match_operand:MOVE128 1 "move_operand"))] | 
|  | +  "TARGET_64BIT && reload_completed" | 
|  | +  [(const_int 0)] | 
|  | +{ | 
|  | +  mips_split_doubleword_move (operands[0], operands[1]); | 
|  | +  DONE; | 
|  | +}) | 
|  | + | 
|  | +;; 64-bit paired-single floating point moves | 
|  | + | 
|  | +;; Load the low word of operand 0 with operand 1. | 
|  | +(define_insn "load_low<mode>" | 
|  | +  [(set (match_operand:SPLITF 0 "register_operand" "=f,f") | 
|  | +	(unspec:SPLITF [(match_operand:<HALFMODE> 1 "general_operand" "dJ,m")] | 
|  | +		       UNSPEC_LOAD_LOW))] | 
|  | +  "TARGET_HARD_FLOAT" | 
|  | +{ | 
|  | +  operands[0] = mips_subword (operands[0], 0); | 
|  | +  return mips_output_move (operands[0], operands[1]); | 
|  | +} | 
|  | +  [(set_attr "move_type" "mtc,fpload") | 
|  | +   (set_attr "mode" "<HALFMODE>")]) | 
|  | + | 
|  | +;; Load the high word of operand 0 from operand 1, preserving the value | 
|  | +;; in the low word. | 
|  | +(define_insn "load_high<mode>" | 
|  | +  [(set (match_operand:SPLITF 0 "register_operand" "=f,f") | 
|  | +	(unspec:SPLITF [(match_operand:<HALFMODE> 1 "general_operand" "dJ,m") | 
|  | +			(match_operand:SPLITF 2 "register_operand" "0,0")] | 
|  | +		       UNSPEC_LOAD_HIGH))] | 
|  | +  "TARGET_HARD_FLOAT" | 
|  | +{ | 
|  | +  operands[0] = mips_subword (operands[0], 1); | 
|  | +  return mips_output_move (operands[0], operands[1]); | 
|  | +} | 
|  | +  [(set_attr "move_type" "mtc,fpload") | 
|  | +   (set_attr "mode" "<HALFMODE>")]) | 
|  | + | 
|  | +;; Store one word of operand 1 in operand 0.  Operand 2 is 1 to store the | 
|  | +;; high word and 0 to store the low word. | 
|  | +(define_insn "store_word<mode>" | 
|  | +  [(set (match_operand:<HALFMODE> 0 "nonimmediate_operand" "=d,m") | 
|  | +	(unspec:<HALFMODE> [(match_operand:SPLITF 1 "register_operand" "f,f") | 
|  | +			    (match_operand 2 "const_int_operand")] | 
|  | +			   UNSPEC_STORE_WORD))] | 
|  | +  "TARGET_HARD_FLOAT" | 
|  | +{ | 
|  | +  operands[1] = mips_subword (operands[1], INTVAL (operands[2])); | 
|  | +  return mips_output_move (operands[0], operands[1]); | 
|  | +} | 
|  | +  [(set_attr "move_type" "mfc,fpstore") | 
|  | +   (set_attr "mode" "<HALFMODE>")]) | 
|  | + | 
|  | +;; Expand in-line code to clear the instruction cache between operand[0] and | 
|  | +;; operand[1]. | 
|  | +(define_expand "clear_cache" | 
|  | +  [(match_operand 0 "pmode_register_operand") | 
|  | +   (match_operand 1 "pmode_register_operand")] | 
|  | +  "" | 
|  | +  " | 
|  | +{ | 
|  | +  emit_insn(gen_fence_i()); | 
|  | +  DONE; | 
|  | +}") | 
|  | + | 
|  | +(define_insn "fence" | 
|  | +  [(unspec_volatile [(const_int 0)] UNSPEC_FENCE)] | 
|  | +  "" | 
|  | +  "%|fence%-") | 
|  | + | 
|  | +(define_insn "fence_i" | 
|  | +  [(unspec_volatile [(const_int 0)] UNSPEC_FENCE_I)] | 
|  | +  "" | 
|  | +  "fence.i") | 
|  | + | 
|  | +;; Block moves, see mips.c for more details. | 
|  | +;; Argument 0 is the destination | 
|  | +;; Argument 1 is the source | 
|  | +;; Argument 2 is the length | 
|  | +;; Argument 3 is the alignment | 
|  | + | 
|  | +(define_expand "movmemsi" | 
|  | +  [(parallel [(set (match_operand:BLK 0 "general_operand") | 
|  | +		   (match_operand:BLK 1 "general_operand")) | 
|  | +	      (use (match_operand:SI 2 "")) | 
|  | +	      (use (match_operand:SI 3 "const_int_operand"))])] | 
|  | +  "!TARGET_MEMCPY" | 
|  | +{ | 
|  | +  if (mips_expand_block_move (operands[0], operands[1], operands[2])) | 
|  | +    DONE; | 
|  | +  else | 
|  | +    FAIL; | 
|  | +}) | 
|  | + | 
|  | +;; | 
|  | +;;  .................... | 
|  | +;; | 
|  | +;;	SHIFTS | 
|  | +;; | 
|  | +;;  .................... | 
|  | + | 
|  | +(define_insn "<optab>si3" | 
|  | +  [(set (match_operand:SI 0 "register_operand" "=d") | 
|  | +	(any_shift:SI (match_operand:SI 1 "register_operand" "d") | 
|  | +		       (match_operand:SI 2 "arith_operand" "dI")))] | 
|  | +  "" | 
|  | +{ | 
|  | +  if (GET_CODE (operands[2]) == CONST_INT) | 
|  | +    operands[2] = GEN_INT (INTVAL (operands[2]) | 
|  | +			   & (GET_MODE_BITSIZE (SImode) - 1)); | 
|  | + | 
|  | +  return TARGET_64BIT ? "<insn>w\t%0,%1,%2" : "<insn>\t%0,%1,%2"; | 
|  | +} | 
|  | +  [(set_attr "type" "shift") | 
|  | +   (set_attr "mode" "SI")]) | 
|  | + | 
|  | +(define_insn "*<optab>disi3" | 
|  | +  [(set (match_operand:SI 0 "register_operand" "=d") | 
|  | +	     (any_shift:SI (truncate:SI (match_operand:DI 1 "register_operand" "d")) | 
|  | +		      (truncate:SI (match_operand:DI 2 "arith_operand" "dI"))))] | 
|  | +  "TARGET_64BIT" | 
|  | +  "<insn>w\t%0,%1,%2" | 
|  | +  [(set_attr "type" "shift") | 
|  | +   (set_attr "mode" "SI")]) | 
|  | + | 
|  | +(define_insn "*ashldi3_truncsi" | 
|  | +  [(set (match_operand:SI 0 "register_operand" "=d") | 
|  | +          (truncate:SI | 
|  | +	     (ashift:DI (match_operand:DI 1 "register_operand" "d") | 
|  | +		      (match_operand:DI 2 "arith_operand" "dI"))))] | 
|  | +  "TARGET_64BIT && (GET_CODE (operands[2]) == CONST_INT ? INTVAL (operands[2]) < 32 : 1)" | 
|  | +  "sllw\t%0,%1,%2" | 
|  | +  [(set_attr "type" "shift") | 
|  | +   (set_attr "mode" "SI")]) | 
|  | + | 
|  | +(define_insn "<optab>di3" | 
|  | +  [(set (match_operand:DI 0 "register_operand" "=d") | 
|  | +	(any_shift:DI (match_operand:DI 1 "register_operand" "d") | 
|  | +		       (match_operand:DI 2 "arith_operand" "dI")))] | 
|  | +  "TARGET_64BIT" | 
|  | +{ | 
|  | +  if (GET_CODE (operands[2]) == CONST_INT) | 
|  | +    operands[2] = GEN_INT (INTVAL (operands[2]) | 
|  | +			   & (GET_MODE_BITSIZE (DImode) - 1)); | 
|  | + | 
|  | +  return "<insn>\t%0,%1,%2"; | 
|  | +} | 
|  | +  [(set_attr "type" "shift") | 
|  | +   (set_attr "mode" "DI")]) | 
|  | + | 
|  | +(define_insn "<optab>si3_extend" | 
|  | +  [(set (match_operand:DI 0 "register_operand" "=d") | 
|  | +	(sign_extend:DI | 
|  | +	   (any_shift:SI (match_operand:SI 1 "register_operand" "d") | 
|  | +			 (match_operand:SI 2 "arith_operand" "dI"))))] | 
|  | +  "TARGET_64BIT" | 
|  | +{ | 
|  | +  if (GET_CODE (operands[2]) == CONST_INT) | 
|  | +    operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f); | 
|  | + | 
|  | +  return "<insn>w\t%0,%1,%2"; | 
|  | +} | 
|  | +  [(set_attr "type" "shift") | 
|  | +   (set_attr "mode" "SI")]) | 
|  | + | 
|  | +;; | 
|  | +;;  .................... | 
|  | +;; | 
|  | +;;	CONDITIONAL BRANCHES | 
|  | +;; | 
|  | +;;  .................... | 
|  | + | 
|  | +;; Conditional branches | 
|  | + | 
|  | +(define_insn "*branch_order<mode>" | 
|  | +  [(set (pc) | 
|  | +	(if_then_else | 
|  | +	 (match_operator 1 "order_operator" | 
|  | +			 [(match_operand:GPR 2 "register_operand" "d") | 
|  | +			  (match_operand:GPR 3 "reg_or_0_operand" "dJ")]) | 
|  | +	 (label_ref (match_operand 0 "" "")) | 
|  | +	 (pc)))] | 
|  | +  "" | 
|  | +{ | 
|  | +  return mips_output_conditional_branch (insn, operands, | 
|  | +					 "b%C1\t%2,%z3,%0", | 
|  | +					 "b%N1\t%2,%z3,%0"); | 
|  | +} | 
|  | +  [(set_attr "type" "branch") | 
|  | +   (set_attr "mode" "none")]) | 
|  | + | 
|  | +(define_insn "*branch_order<mode>_inverted" | 
|  | +  [(set (pc) | 
|  | +	(if_then_else | 
|  | +	 (match_operator 1 "order_operator" | 
|  | +			 [(match_operand:GPR 2 "register_operand" "d") | 
|  | +			  (match_operand:GPR 3 "reg_or_0_operand" "dJ")]) | 
|  | +	 (pc) | 
|  | +	 (label_ref (match_operand 0 "" ""))))] | 
|  | +  "" | 
|  | +{ | 
|  | +  return mips_output_conditional_branch (insn, operands, | 
|  | +					 "b%N1\t%2,%z3,%0", | 
|  | +					 "b%C1\t%2,%z3,%0"); | 
|  | +} | 
|  | +  [(set_attr "type" "branch") | 
|  | +   (set_attr "mode" "none")]) | 
|  | + | 
|  | +;; Used to implement built-in functions. | 
|  | +(define_expand "condjump" | 
|  | +  [(set (pc) | 
|  | +	(if_then_else (match_operand 0) | 
|  | +		      (label_ref (match_operand 1)) | 
|  | +		      (pc)))]) | 
|  | + | 
|  | +(define_expand "cbranch<mode>4" | 
|  | +  [(set (pc) | 
|  | +	(if_then_else (match_operator 0 "comparison_operator" | 
|  | +		       [(match_operand:GPR 1 "register_operand") | 
|  | +		        (match_operand:GPR 2 "nonmemory_operand")]) | 
|  | +		      (label_ref (match_operand 3 "")) | 
|  | +		      (pc)))] | 
|  | +  "" | 
|  | +{ | 
|  | +  mips_expand_conditional_branch (operands); | 
|  | +  DONE; | 
|  | +}) | 
|  | + | 
|  | +(define_expand "cbranch<mode>4" | 
|  | +  [(set (pc) | 
|  | +	(if_then_else (match_operator 0 "comparison_operator" | 
|  | +		       [(match_operand:SCALARF 1 "register_operand") | 
|  | +		        (match_operand:SCALARF 2 "register_operand")]) | 
|  | +		      (label_ref (match_operand 3 "")) | 
|  | +		      (pc)))] | 
|  | +  "" | 
|  | +{ | 
|  | +  mips_expand_conditional_branch (operands); | 
|  | +  DONE; | 
|  | +}) | 
|  | + | 
|  | +;; | 
|  | +;;  .................... | 
|  | +;; | 
|  | +;;	SETTING A REGISTER FROM A COMPARISON | 
|  | +;; | 
|  | +;;  .................... | 
|  | + | 
|  | +;; Destination is always set in SI mode. | 
|  | + | 
|  | +(define_expand "cstore<mode>4" | 
|  | +  [(set (match_operand:SI 0 "register_operand") | 
|  | +	(match_operator:SI 1 "order_operator" | 
|  | +	 [(match_operand:GPR 2 "register_operand") | 
|  | +	  (match_operand:GPR 3 "nonmemory_operand")]))] | 
|  | +  "" | 
|  | +{ | 
|  | +  mips_expand_scc (operands); | 
|  | +  DONE; | 
|  | +}) | 
|  | + | 
|  | +(define_insn "cstore<mode>4" | 
|  | +   [(set (match_operand:SI 0 "register_operand" "=d") | 
|  | +        (match_operator:SI 1 "fp_order_operator" | 
|  | +	      [(match_operand:SCALARF 2 "register_operand" "f") | 
|  | +	       (match_operand:SCALARF 3 "register_operand" "f")]))] | 
|  | +  "TARGET_HARD_FLOAT" | 
|  | +{ | 
|  | +  if (GET_CODE (operands[1]) == GT || GET_CODE (operands[1]) == GE) | 
|  | +    return "f%S1.<fmt>\t%0,%3,%2"; | 
|  | +  else | 
|  | +    return "f%C1.<fmt>\t%0,%2,%3"; | 
|  | +} | 
|  | +  [(set_attr "type" "fcmp") | 
|  | +   (set_attr "mode" "<UNITMODE>")]) | 
|  | + | 
|  | +; super ghetto | 
|  | +(define_insn "cstore<mode>4_ord" | 
|  | +   [(set (match_operand:SI 0 "register_operand" "=d") | 
|  | +        (match_operator:SI 1 "fp_unorder_operator" | 
|  | +	      [(match_operand:SCALARF 2 "register_operand" "f") | 
|  | +	       (match_dup 2)]))] | 
|  | +  "TARGET_HARD_FLOAT" | 
|  | +{ | 
|  | +  if (GET_CODE (operands[1]) == ORDERED) | 
|  | +    return "feq.<fmt>\t%0,%2,%2"; | 
|  | +  else /* UNORDERED */ | 
|  | +    return "feq.<fmt>\t%0,%2,%2; sltu\t%0,%0,1"; | 
|  | +} | 
|  | +  [(set_attr "type" "fcmp") | 
|  | +   (set_attr "mode" "<UNITMODE>")]) | 
|  | + | 
|  | +(define_insn "*seq_zero_<GPR:mode><GPR2:mode>" | 
|  | +  [(set (match_operand:GPR2 0 "register_operand" "=d") | 
|  | +	(eq:GPR2 (match_operand:GPR 1 "register_operand" "d") | 
|  | +		 (const_int 0)))] | 
|  | +  "" | 
|  | +  "sltu\t%0,%1,1" | 
|  | +  [(set_attr "type" "slt") | 
|  | +   (set_attr "mode" "<GPR:MODE>")]) | 
|  | + | 
|  | +(define_insn "*sne_zero_<GPR:mode><GPR2:mode>" | 
|  | +  [(set (match_operand:GPR2 0 "register_operand" "=d") | 
|  | +	(ne:GPR2 (match_operand:GPR 1 "register_operand" "d") | 
|  | +		 (const_int 0)))] | 
|  | +  "" | 
|  | +  "sltu\t%0,zero,%1" | 
|  | +  [(set_attr "type" "slt") | 
|  | +   (set_attr "mode" "<GPR:MODE>")]) | 
|  | + | 
|  | +(define_insn "*sgt<u>_<GPR:mode><GPR2:mode>" | 
|  | +  [(set (match_operand:GPR2 0 "register_operand" "=d") | 
|  | +	(any_gt:GPR2 (match_operand:GPR 1 "register_operand" "d") | 
|  | +		     (match_operand:GPR 2 "reg_or_0_operand" "dJ")))] | 
|  | +  "" | 
|  | +  "slt<u>\t%0,%z2,%1" | 
|  | +  [(set_attr "type" "slt") | 
|  | +   (set_attr "mode" "<GPR:MODE>")]) | 
|  | + | 
|  | +(define_insn "*sge<u>_<GPR:mode><GPR2:mode>" | 
|  | +  [(set (match_operand:GPR2 0 "register_operand" "=d") | 
|  | +	(any_ge:GPR2 (match_operand:GPR 1 "register_operand" "d") | 
|  | +		     (const_int 1)))] | 
|  | +  "" | 
|  | +  "slt<u>\t%0,zero,%1" | 
|  | +  [(set_attr "type" "slt") | 
|  | +   (set_attr "mode" "<GPR:MODE>")]) | 
|  | + | 
|  | +(define_insn "*slt<u>_<GPR:mode><GPR2:mode>" | 
|  | +  [(set (match_operand:GPR2 0 "register_operand" "=d") | 
|  | +	(any_lt:GPR2 (match_operand:GPR 1 "register_operand" "d") | 
|  | +		     (match_operand:GPR 2 "arith_operand" "dI")))] | 
|  | +  "" | 
|  | +  "slt<u>\t%0,%1,%2" | 
|  | +  [(set_attr "type" "slt") | 
|  | +   (set_attr "mode" "<GPR:MODE>")]) | 
|  | + | 
|  | +(define_insn "*sle<u>_<GPR:mode><GPR2:mode>" | 
|  | +  [(set (match_operand:GPR2 0 "register_operand" "=d") | 
|  | +	(any_le:GPR2 (match_operand:GPR 1 "register_operand" "d") | 
|  | +		     (match_operand:GPR 2 "sle_operand" "")))] | 
|  | +  "" | 
|  | +{ | 
|  | +  operands[2] = GEN_INT (INTVAL (operands[2]) + 1); | 
|  | +  return "slt<u>\t%0,%1,%2"; | 
|  | +} | 
|  | +  [(set_attr "type" "slt") | 
|  | +   (set_attr "mode" "<GPR:MODE>")]) | 
|  | + | 
|  | +;; | 
|  | +;;  .................... | 
|  | +;; | 
|  | +;;	UNCONDITIONAL BRANCHES | 
|  | +;; | 
|  | +;;  .................... | 
|  | + | 
|  | +;; Unconditional branches. | 
|  | + | 
|  | +(define_insn "jump" | 
|  | +  [(set (pc) | 
|  | +	(label_ref (match_operand 0 "" "")))] | 
|  | +  "" | 
|  | +  "j\t%l0" | 
|  | +  [(set_attr "type"	"jump") | 
|  | +   (set_attr "mode"	"none")]) | 
|  | + | 
|  | +(define_expand "indirect_jump" | 
|  | +  [(set (pc) (match_operand 0 "register_operand"))] | 
|  | +  "" | 
|  | +{ | 
|  | +  operands[0] = force_reg (Pmode, operands[0]); | 
|  | +  if (Pmode == SImode) | 
|  | +    emit_jump_insn (gen_indirect_jumpsi (operands[0])); | 
|  | +  else | 
|  | +    emit_jump_insn (gen_indirect_jumpdi (operands[0])); | 
|  | +  DONE; | 
|  | +}) | 
|  | + | 
|  | +(define_insn "indirect_jump<mode>" | 
|  | +  [(set (pc) (match_operand:P 0 "register_operand" "d"))] | 
|  | +  "" | 
|  | +  "j\t%0" | 
|  | +  [(set_attr "type" "jump") | 
|  | +   (set_attr "mode" "none")]) | 
|  | + | 
|  | +(define_expand "tablejump" | 
|  | +  [(set (pc) | 
|  | +	(match_operand 0 "register_operand")) | 
|  | +   (use (label_ref (match_operand 1 "")))] | 
|  | +  "" | 
|  | +{ | 
|  | +  if (Pmode == SImode) | 
|  | +    emit_jump_insn (gen_tablejumpsi (operands[0], operands[1])); | 
|  | +  else | 
|  | +    emit_jump_insn (gen_tablejumpdi (operands[0], operands[1])); | 
|  | +  DONE; | 
|  | +}) | 
|  | + | 
|  | +(define_insn "tablejump<mode>" | 
|  | +  [(set (pc) | 
|  | +	(match_operand:P 0 "register_operand" "d")) | 
|  | +   (use (label_ref (match_operand 1 "" "")))] | 
|  | +  "" | 
|  | +  "j\t%0" | 
|  | +  [(set_attr "type" "jump") | 
|  | +   (set_attr "mode" "none")]) | 
|  | + | 
|  | +;; | 
|  | +;;  .................... | 
|  | +;; | 
|  | +;;	Function prologue/epilogue | 
|  | +;; | 
|  | +;;  .................... | 
|  | +;; | 
|  | + | 
|  | +(define_expand "prologue" | 
|  | +  [(const_int 1)] | 
|  | +  "" | 
|  | +{ | 
|  | +  mips_expand_prologue (); | 
|  | +  DONE; | 
|  | +}) | 
|  | + | 
|  | +;; Block any insns from being moved before this point, since the | 
|  | +;; profiling call to mcount can use various registers that aren't | 
|  | +;; saved or used to pass arguments. | 
|  | + | 
|  | +(define_insn "blockage" | 
|  | +  [(unspec_volatile [(const_int 0)] UNSPEC_BLOCKAGE)] | 
|  | +  "" | 
|  | +  "" | 
|  | +  [(set_attr "type" "ghost") | 
|  | +   (set_attr "mode" "none")]) | 
|  | + | 
|  | +(define_expand "epilogue" | 
|  | +  [(const_int 2)] | 
|  | +  "" | 
|  | +{ | 
|  | +  mips_expand_epilogue (false); | 
|  | +  DONE; | 
|  | +}) | 
|  | + | 
|  | +(define_expand "sibcall_epilogue" | 
|  | +  [(const_int 2)] | 
|  | +  "" | 
|  | +{ | 
|  | +  mips_expand_epilogue (true); | 
|  | +  DONE; | 
|  | +}) | 
|  | + | 
|  | +;; Trivial return.  Make it look like a normal return insn as that | 
|  | +;; allows jump optimizations to work better. | 
|  | + | 
|  | +(define_insn "return" | 
|  | +  [(return)] | 
|  | +  "mips_can_use_return_insn ()" | 
|  | +  "ret" | 
|  | +  [(set_attr "type"	"jump") | 
|  | +   (set_attr "mode"	"none")]) | 
|  | + | 
|  | +;; Normal return. | 
|  | + | 
|  | +(define_insn "return_internal" | 
|  | +  [(return) | 
|  | +   (use (match_operand 0 "pmode_register_operand" ""))] | 
|  | +  "" | 
|  | +  "j\t%0" | 
|  | +  [(set_attr "type"	"jump") | 
|  | +   (set_attr "mode"	"none")]) | 
|  | + | 
|  | +;; This is used in compiling the unwind routines. | 
|  | +(define_expand "eh_return" | 
|  | +  [(use (match_operand 0 "general_operand"))] | 
|  | +  "" | 
|  | +{ | 
|  | +  if (GET_MODE (operands[0]) != word_mode) | 
|  | +    operands[0] = convert_to_mode (word_mode, operands[0], 0); | 
|  | +  if (TARGET_64BIT) | 
|  | +    emit_insn (gen_eh_set_lr_di (operands[0])); | 
|  | +  else | 
|  | +    emit_insn (gen_eh_set_lr_si (operands[0])); | 
|  | +  DONE; | 
|  | +}) | 
|  | + | 
|  | +;; Clobber the return address on the stack.  We can't expand this | 
|  | +;; until we know where it will be put in the stack frame. | 
|  | + | 
|  | +(define_insn "eh_set_lr_si" | 
|  | +  [(unspec [(match_operand:SI 0 "register_operand" "d")] UNSPEC_EH_RETURN) | 
|  | +   (clobber (match_scratch:SI 1 "=&d"))] | 
|  | +  "! TARGET_64BIT" | 
|  | +  "#") | 
|  | + | 
|  | +(define_insn "eh_set_lr_di" | 
|  | +  [(unspec [(match_operand:DI 0 "register_operand" "d")] UNSPEC_EH_RETURN) | 
|  | +   (clobber (match_scratch:DI 1 "=&d"))] | 
|  | +  "TARGET_64BIT" | 
|  | +  "#") | 
|  | + | 
|  | +(define_split | 
|  | +  [(unspec [(match_operand 0 "register_operand")] UNSPEC_EH_RETURN) | 
|  | +   (clobber (match_scratch 1))] | 
|  | +  "reload_completed" | 
|  | +  [(const_int 0)] | 
|  | +{ | 
|  | +  mips_set_return_address (operands[0], operands[1]); | 
|  | +  DONE; | 
|  | +}) | 
|  | + | 
|  | +(define_expand "exception_receiver" | 
|  | +  [(const_int 0)] | 
|  | +  "TARGET_USE_GOT" | 
|  | +{ | 
|  | +  /* See the comment above load_call<mode> for details.  */ | 
|  | +  emit_insn (gen_set_got_version ()); | 
|  | +  DONE; | 
|  | +}) | 
|  | + | 
|  | +(define_expand "nonlocal_goto_receiver" | 
|  | +  [(const_int 0)] | 
|  | +  "TARGET_USE_GOT" | 
|  | +{ | 
|  | +  /* See the comment above load_call<mode> for details.  */ | 
|  | +  emit_insn (gen_set_got_version ()); | 
|  | +  DONE; | 
|  | +}) | 
|  | + | 
|  | +;; | 
|  | +;;  .................... | 
|  | +;; | 
|  | +;;	FUNCTION CALLS | 
|  | +;; | 
|  | +;;  .................... | 
|  | + | 
|  | +;; Instructions to load a call address from the GOT.  The address might | 
|  | +;; point to a function or to a lazy binding stub.  In the latter case, | 
|  | +;; the stub will use the dynamic linker to resolve the function, which | 
|  | +;; in turn will change the GOT entry to point to the function's real | 
|  | +;; address. | 
|  | +;; | 
|  | +;; This means that every call, even pure and constant ones, can | 
|  | +;; potentially modify the GOT entry.  And once a stub has been called, | 
|  | +;; we must not call it again. | 
|  | +;; | 
|  | +;; We represent this restriction using an imaginary, fixed, call-saved | 
|  | +;; register called GOT_VERSION_REGNUM.  The idea is to make the register | 
|  | +;; live throughout the function and to change its value after every | 
|  | +;; potential call site.  This stops any rtx value that uses the register | 
|  | +;; from being computed before an earlier call.  To do this, we: | 
|  | +;; | 
|  | +;;    - Ensure that the register is live on entry to the function, | 
|  | +;;	so that it is never thought to be used uninitalized. | 
|  | +;; | 
|  | +;;    - Ensure that the register is live on exit from the function, | 
|  | +;;	so that it is live throughout. | 
|  | +;; | 
|  | +;;    - Make each call (lazily-bound or not) use the current value | 
|  | +;;	of GOT_VERSION_REGNUM, so that updates of the register are | 
|  | +;;	not moved across call boundaries. | 
|  | +;; | 
|  | +;;    - Add "ghost" definitions of the register to the beginning of | 
|  | +;;	blocks reached by EH and ABNORMAL_CALL edges, because those | 
|  | +;;	edges may involve calls that normal paths don't.  (E.g. the | 
|  | +;;	unwinding code that handles a non-call exception may change | 
|  | +;;	lazily-bound GOT entries.)  We do this by making the | 
|  | +;;	exception_receiver and nonlocal_goto_receiver expanders emit | 
|  | +;;	a set_got_version instruction. | 
|  | +;; | 
|  | +;;    - After each call (lazily-bound or not), use a "ghost" | 
|  | +;;	update_got_version instruction to change the register's value. | 
|  | +;;	This instruction mimics the _possible_ effect of the dynamic | 
|  | +;;	resolver during the call and it remains live even if the call | 
|  | +;;	itself becomes dead. | 
|  | +;; | 
|  | +;;    - Leave GOT_VERSION_REGNUM out of all register classes. | 
|  | +;;	The register is therefore not a valid register_operand | 
|  | +;;	and cannot be moved to or from other registers. | 
|  | + | 
|  | +(define_insn "load_call<mode>" | 
|  | +  [(set (match_operand:P 0 "register_operand" "=d") | 
|  | +	(unspec:P [(match_operand:P 1 "register_operand" "d") | 
|  | +		   (match_operand:P 2 "immediate_operand" "") | 
|  | +		   (reg:SI GOT_VERSION_REGNUM)] UNSPEC_LOAD_CALL))] | 
|  | +  "TARGET_USE_GOT" | 
|  | +  "<load>\t%0,%R2(%1)" | 
|  | +  [(set_attr "got" "load") | 
|  | +   (set_attr "mode" "<MODE>")]) | 
|  | + | 
|  | +(define_insn "set_got_version" | 
|  | +  [(set (reg:SI GOT_VERSION_REGNUM) | 
|  | +	(unspec_volatile:SI [(const_int 0)] UNSPEC_SET_GOT_VERSION))] | 
|  | +  "TARGET_USE_GOT" | 
|  | +  "" | 
|  | +  [(set_attr "type" "ghost")]) | 
|  | + | 
|  | +(define_insn "update_got_version" | 
|  | +  [(set (reg:SI GOT_VERSION_REGNUM) | 
|  | +	(unspec:SI [(reg:SI GOT_VERSION_REGNUM)] UNSPEC_UPDATE_GOT_VERSION))] | 
|  | +  "TARGET_USE_GOT" | 
|  | +  "" | 
|  | +  [(set_attr "type" "ghost")]) | 
|  | + | 
|  | +;; Sibling calls.  All these patterns use jump instructions. | 
|  | + | 
|  | +;; call_insn_operand will only accept constant | 
|  | +;; addresses if a direct jump is acceptable.  Since the 'S' constraint | 
|  | +;; is defined in terms of call_insn_operand, the same is true of the | 
|  | +;; constraints. | 
|  | + | 
|  | +;; When we use an indirect jump, we need a register that will be | 
|  | +;; preserved by the epilogue.  Since TARGET_ABICALLS forces us to use | 
|  | +;; t7 for this purpose, which the epilogue never clobbers, we may | 
|  | +;; as well use it in general. | 
|  | + | 
|  | +(define_expand "sibcall" | 
|  | +  [(parallel [(call (match_operand 0 "") | 
|  | +		    (match_operand 1 "")) | 
|  | +	      (use (match_operand 2 ""))	;; next_arg_reg | 
|  | +	      (use (match_operand 3 ""))])]	;; struct_value_size_rtx | 
|  | +  "" | 
|  | +{ | 
|  | +  mips_expand_call (true, NULL_RTX, XEXP (operands[0], 0), operands[1]); | 
|  | +  DONE; | 
|  | +}) | 
|  | + | 
|  | +(define_insn "sibcall_internal" | 
|  | +  [(call (mem:SI (match_operand 0 "call_insn_operand" "j,S")) | 
|  | +	 (match_operand 1 "" ""))] | 
|  | +  "SIBLING_CALL_P (insn)" | 
|  | +  { return REG_P (operands[0]) ? "jr\t%0" : "j\t%0"; } | 
|  | +  [(set_attr "type" "call")]) | 
|  | + | 
|  | +(define_expand "sibcall_value" | 
|  | +  [(parallel [(set (match_operand 0 "") | 
|  | +		   (call (match_operand 1 "") | 
|  | +			 (match_operand 2 ""))) | 
|  | +	      (use (match_operand 3 ""))])]		;; next_arg_reg | 
|  | +  "" | 
|  | +{ | 
|  | +  mips_expand_call (true, operands[0], XEXP (operands[1], 0), operands[2]); | 
|  | +  DONE; | 
|  | +}) | 
|  | + | 
|  | +(define_insn "sibcall_value_internal" | 
|  | +  [(set (match_operand 0 "register_operand" "") | 
|  | +        (call (mem:SI (match_operand 1 "call_insn_operand" "j,S")) | 
|  | +              (match_operand 2 "" "")))] | 
|  | +  "SIBLING_CALL_P (insn)" | 
|  | +  { return REG_P (operands[1]) ? "jr\t%1" : "j\t%1"; } | 
|  | +  [(set_attr "type" "call")]) | 
|  | + | 
|  | +(define_insn "sibcall_value_multiple_internal" | 
|  | +  [(set (match_operand 0 "register_operand" "") | 
|  | +        (call (mem:SI (match_operand 1 "call_insn_operand" "j,S")) | 
|  | +              (match_operand 2 "" ""))) | 
|  | +   (set (match_operand 3 "register_operand" "") | 
|  | +	(call (mem:SI (match_dup 1)) | 
|  | +	      (match_dup 2)))] | 
|  | +  "SIBLING_CALL_P (insn)" | 
|  | +  { return REG_P (operands[1]) ? "jr\t%1" : "j\t%1"; } | 
|  | +  [(set_attr "type" "call")]) | 
|  | + | 
|  | +(define_expand "call" | 
|  | +  [(parallel [(call (match_operand 0 "") | 
|  | +		    (match_operand 1 "")) | 
|  | +	      (use (match_operand 2 ""))	;; next_arg_reg | 
|  | +	      (use (match_operand 3 ""))])]	;; struct_value_size_rtx | 
|  | +  "" | 
|  | +{ | 
|  | +  mips_expand_call (false, NULL_RTX, XEXP (operands[0], 0), operands[1]); | 
|  | +  DONE; | 
|  | +}) | 
|  | + | 
|  | +(define_insn "call_internal" | 
|  | +  [(call (mem:SI (match_operand 0 "call_insn_operand" "r,S")) | 
|  | +	 (match_operand 1 "" "")) | 
|  | +   (clobber (reg:SI RETURN_ADDR_REGNUM))] | 
|  | +  "" | 
|  | +  { return REG_P (operands[0]) ? "jalr\t%0" : "jal\t%0"; } | 
|  | +  [(set_attr "jal" "indirect,direct")]) | 
|  | + | 
|  | +;; A pattern for calls that must be made directly.  It is used for | 
|  | +;; MIPS16 calls that the linker may need to redirect to a hard-float | 
|  | +;; stub; the linker relies on the call relocation type to detect when | 
|  | +;; such redirection is needed. | 
|  | +(define_insn "call_internal_direct" | 
|  | +  [(call (mem:SI (match_operand 0 "const_call_insn_operand")) | 
|  | +	 (match_operand 1)) | 
|  | +   (const_int 1) | 
|  | +   (clobber (reg:SI RETURN_ADDR_REGNUM))] | 
|  | +  "" | 
|  | +  "jal\t%0" | 
|  | +  [(set_attr "type" "call")]) | 
|  | + | 
|  | +(define_expand "call_value" | 
|  | +  [(parallel [(set (match_operand 0 "") | 
|  | +		   (call (match_operand 1 "") | 
|  | +			 (match_operand 2 ""))) | 
|  | +	      (use (match_operand 3 ""))])]		;; next_arg_reg | 
|  | +  "" | 
|  | +{ | 
|  | +  mips_expand_call (false, operands[0], XEXP (operands[1], 0), operands[2]); | 
|  | +  DONE; | 
|  | +}) | 
|  | + | 
|  | +;; See comment for call_internal. | 
|  | +(define_insn "call_value_internal" | 
|  | +  [(set (match_operand 0 "register_operand" "") | 
|  | +        (call (mem:SI (match_operand 1 "call_insn_operand" "r,S")) | 
|  | +              (match_operand 2 "" ""))) | 
|  | +   (clobber (reg:SI RETURN_ADDR_REGNUM))] | 
|  | +  "" | 
|  | +  { return REG_P (operands[1]) ? "jalr\t%1" : "jal\t%1"; } | 
|  | +  [(set_attr "jal" "indirect,direct")]) | 
|  | + | 
|  | +;; See call_internal_direct. | 
|  | +(define_insn "call_value_internal_direct" | 
|  | +  [(set (match_operand 0 "register_operand") | 
|  | +        (call (mem:SI (match_operand 1 "const_call_insn_operand")) | 
|  | +              (match_operand 2))) | 
|  | +   (const_int 1) | 
|  | +   (clobber (reg:SI RETURN_ADDR_REGNUM))] | 
|  | +  "" | 
|  | +  "jal\t%1" | 
|  | +  [(set_attr "type" "call")]) | 
|  | + | 
|  | +;; See comment for call_internal. | 
|  | +(define_insn "call_value_multiple_internal" | 
|  | +  [(set (match_operand 0 "register_operand" "") | 
|  | +        (call (mem:SI (match_operand 1 "call_insn_operand" "r,S")) | 
|  | +              (match_operand 2 "" ""))) | 
|  | +   (set (match_operand 3 "register_operand" "") | 
|  | +	(call (mem:SI (match_dup 1)) | 
|  | +	      (match_dup 2))) | 
|  | +   (clobber (reg:SI RETURN_ADDR_REGNUM))] | 
|  | +  "" | 
|  | +  { return REG_P (operands[1]) ? "jalr\t%1" : "jal\t%1"; } | 
|  | +  [(set_attr "jal" "indirect,direct")]) | 
|  | + | 
|  | +;; Call subroutine returning any type. | 
|  | + | 
|  | +(define_expand "untyped_call" | 
|  | +  [(parallel [(call (match_operand 0 "") | 
|  | +		    (const_int 0)) | 
|  | +	      (match_operand 1 "") | 
|  | +	      (match_operand 2 "")])] | 
|  | +  "" | 
|  | +{ | 
|  | +  int i; | 
|  | + | 
|  | +  emit_call_insn (GEN_CALL (operands[0], const0_rtx, NULL, const0_rtx)); | 
|  | + | 
|  | +  for (i = 0; i < XVECLEN (operands[2], 0); i++) | 
|  | +    { | 
|  | +      rtx set = XVECEXP (operands[2], 0, i); | 
|  | +      mips_emit_move (SET_DEST (set), SET_SRC (set)); | 
|  | +    } | 
|  | + | 
|  | +  emit_insn (gen_blockage ()); | 
|  | +  DONE; | 
|  | +}) | 
|  | + | 
|  | +(define_insn "mips_cache" | 
|  | +  [(set (mem:BLK (scratch)) | 
|  | +	(unspec:BLK [(match_operand:SI 0 "const_int_operand") | 
|  | +		     (match_operand:QI 1 "address_operand" "p")] | 
|  | +		    UNSPEC_MIPS_CACHE))] | 
|  | +  "0" | 
|  | +  "cache\t%X0,%a1") | 
|  | + | 
|  | +(define_insn "nop" | 
|  | +  [(const_int 0)] | 
|  | +  "" | 
|  | +  "nop" | 
|  | +  [(set_attr "type"	"nop") | 
|  | +   (set_attr "mode"	"none")]) | 
|  | + | 
|  | + | 
|  | +(define_insn "align" | 
|  | +  [(unspec_volatile [(match_operand 0 "const_int_operand" "")] UNSPEC_ALIGN)] | 
|  | +  "" | 
|  | +  ".align\t%0" | 
|  | +  [(set (attr "length") (symbol_ref "(1 << INTVAL (operands[0])) - 1"))]) | 
|  | + | 
|  | +;; Synchronization instructions. | 
|  | + | 
|  | +(include "sync.md") | 
|  | + | 
|  | +(define_c_enum "unspec" [ | 
|  | +  UNSPEC_ADDRESS_FIRST | 
|  | +]) | 
|  | diff -ruN gcc-4.9.2/gcc/config/riscv/riscv-modes.def gcc-4.9.2-riscv/gcc/config/riscv/riscv-modes.def | 
|  | --- gcc-4.9.2/gcc/config/riscv/riscv-modes.def	1969-12-31 16:00:00.000000000 -0800 | 
|  | +++ gcc-4.9.2-riscv/gcc/config/riscv/riscv-modes.def	2014-12-02 18:04:50.107949563 -0800 | 
|  | @@ -0,0 +1,101 @@ | 
|  | +/* MIPS extra machine modes. | 
|  | +   Copyright (C) 2003, 2004, 2007, 2008 Free Software Foundation, Inc. | 
|  | + | 
|  | +This file is part of GCC. | 
|  | + | 
|  | +GCC is free software; you can redistribute it and/or modify | 
|  | +it under the terms of the GNU General Public License as published by | 
|  | +the Free Software Foundation; either version 3, or (at your option) | 
|  | +any later version. | 
|  | + | 
|  | +GCC is distributed in the hope that it will be useful, | 
|  | +but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | +GNU General Public License for more details. | 
|  | + | 
|  | +You should have received a copy of the GNU General Public License | 
|  | +along with GCC; see the file COPYING3.  If not see | 
|  | +<http://www.gnu.org/licenses/>.  */ | 
|  | + | 
|  | +/* MIPS has a quirky almost-IEEE format for all its | 
|  | +   floating point.  */ | 
|  | +RESET_FLOAT_FORMAT (SF, mips_single_format); | 
|  | +RESET_FLOAT_FORMAT (DF, mips_double_format); | 
|  | + | 
|  | +/* Irix6 will override this via MIPS_TFMODE_FORMAT.  */ | 
|  | +FLOAT_MODE (TF, 16, mips_quad_format); | 
|  | + | 
|  | +/* Vector modes.  */ | 
|  | +VECTOR_MODES (INT, 8);        /*       V8QI V4HI V2SI */ | 
|  | +VECTOR_MODES (FLOAT, 8);      /*            V4HF V2SF */ | 
|  | +VECTOR_MODES (INT, 4);        /*            V4QI V2HI */ | 
|  | + | 
|  | +VECTOR_MODES (FRACT, 4);	/* V4QQ  V2HQ */ | 
|  | +VECTOR_MODES (UFRACT, 4);	/* V4UQQ V2UHQ */ | 
|  | +VECTOR_MODES (ACCUM, 4);	/*       V2HA */ | 
|  | +VECTOR_MODES (UACCUM, 4);	/*       V2UHA */ | 
|  | + | 
|  | +/* Paired single comparison instructions use 2 or 4 CC.  */ | 
|  | +CC_MODE (CCV2); | 
|  | +ADJUST_BYTESIZE (CCV2, 8); | 
|  | +ADJUST_ALIGNMENT (CCV2, 8); | 
|  | + | 
|  | +CC_MODE (CCV4); | 
|  | +ADJUST_BYTESIZE (CCV4, 16); | 
|  | +ADJUST_ALIGNMENT (CCV4, 16); | 
|  | + | 
|  | +/* Eventually we want to include syscfg.h here so that we can use the | 
|  | +   common definition of RISCV_SYSCFG_VLEN_MAX, but for now it is not | 
|  | +   clear how to do this. syscfg.h in in libgloss which is not used when | 
|  | +   building the actual cross-compiler. We kind of want to use the | 
|  | +   "version" in sims - the one for native programs instead of riscv | 
|  | +   programs. Even if we could include syscfg.h though, we would still | 
|  | +   need to figure out a way to include it in the mips-riscv.md since the | 
|  | +   machine description file also refers to these modes. */ | 
|  | + | 
|  | +#define RISCV_SYSCFG_VLEN_MAX 32 | 
|  | + | 
|  | +/*----------------------------------------------------------------------*/ | 
|  | +/* RISCV_VECTOR_MODE_NAME                                               */ | 
|  | +/*----------------------------------------------------------------------*/ | 
|  | +/* This is a helper macro which creates a riscv vector mode name from | 
|  | +   the given inner_mode. It does this by concatenating a 'V' prefix, the | 
|  | +   maximum riscv vector length, and the inner mode together. For | 
|  | +   example, RISCV_VECTOR_MODE_NAME(SI) should expand to V32SI if the | 
|  | +   riscv maximum vector length is 32. We need to use the nested macros | 
|  | +   to make sure RISCV_SYSCFG_VLEN_MAX is expanded _before_ | 
|  | +   concatenation. */ | 
|  | + | 
|  | +#define RISCV_VECTOR_MODE_NAME_H2( res_ ) res_ | 
|  | + | 
|  | +#define RISCV_VECTOR_MODE_NAME_H1( arg0_, arg1_, arg2_ ) \ | 
|  | +  RISCV_VECTOR_MODE_NAME_H2( arg0_ ## arg1_ ## arg2_ ) | 
|  | + | 
|  | +#define RISCV_VECTOR_MODE_NAME_H0( arg0_, arg1_, arg2_ ) \ | 
|  | +  RISCV_VECTOR_MODE_NAME_H1( arg0_, arg1_, arg2_ ) | 
|  | + | 
|  | +#define RISCV_VECTOR_MODE_NAME( inner_mode_ ) \ | 
|  | +  RISCV_VECTOR_MODE_NAME_H0( V, RISCV_SYSCFG_VLEN_MAX, inner_mode_ ) | 
|  | + | 
|  | +/*----------------------------------------------------------------------*/ | 
|  | +/* RISC-V Vector Modes                                                   */ | 
|  | +/*----------------------------------------------------------------------*/ | 
|  | + | 
|  | +VECTOR_MODE( INT,   DI, RISCV_SYSCFG_VLEN_MAX ); | 
|  | +VECTOR_MODE( INT,   SI, RISCV_SYSCFG_VLEN_MAX ); | 
|  | +VECTOR_MODE( INT,   HI, RISCV_SYSCFG_VLEN_MAX ); | 
|  | +VECTOR_MODE( INT,   QI, RISCV_SYSCFG_VLEN_MAX ); | 
|  | +VECTOR_MODE( FLOAT, DF, RISCV_SYSCFG_VLEN_MAX ); | 
|  | +VECTOR_MODE( FLOAT, SF, RISCV_SYSCFG_VLEN_MAX ); | 
|  | + | 
|  | +/* By default, vector types are forced to be aligned to the full vector | 
|  | +   size but in riscv we just need them to be aligned to the element | 
|  | +   size. */ | 
|  | + | 
|  | +ADJUST_ALIGNMENT( RISCV_VECTOR_MODE_NAME(DI), 8 ); | 
|  | +ADJUST_ALIGNMENT( RISCV_VECTOR_MODE_NAME(SI), 4 ); | 
|  | +ADJUST_ALIGNMENT( RISCV_VECTOR_MODE_NAME(HI), 2 ); | 
|  | +ADJUST_ALIGNMENT( RISCV_VECTOR_MODE_NAME(QI), 1 ); | 
|  | +ADJUST_ALIGNMENT( RISCV_VECTOR_MODE_NAME(DF), 8 ); | 
|  | +ADJUST_ALIGNMENT( RISCV_VECTOR_MODE_NAME(SF), 4 ); | 
|  | + | 
|  | diff -ruN gcc-4.9.2/gcc/config/riscv/riscv-opc.h gcc-4.9.2-riscv/gcc/config/riscv/riscv-opc.h | 
|  | --- gcc-4.9.2/gcc/config/riscv/riscv-opc.h	1969-12-31 16:00:00.000000000 -0800 | 
|  | +++ gcc-4.9.2-riscv/gcc/config/riscv/riscv-opc.h	2014-12-02 18:04:50.107949563 -0800 | 
|  | @@ -0,0 +1,571 @@ | 
|  | +/* Automatically generated by parse-opcodes */ | 
|  | +#define MATCH_MOVN 0x6f7 | 
|  | +#define  MASK_MOVN 0x1ffff | 
|  | +#define MATCH_VFSSTW 0x150f | 
|  | +#define  MASK_VFSSTW 0x1ffff | 
|  | +#define MATCH_REMUW 0x7bb | 
|  | +#define  MASK_REMUW 0x1ffff | 
|  | +#define MATCH_FMIN_D 0x180d3 | 
|  | +#define  MASK_FMIN_D 0x1ffff | 
|  | +#define MATCH_LR_W 0x1012b | 
|  | +#define  MASK_LR_W 0x3fffff | 
|  | +#define MATCH_VLSTHU 0x128b | 
|  | +#define  MASK_VLSTHU 0x1ffff | 
|  | +#define MATCH_C_SWSP 0x8 | 
|  | +#define  MASK_C_SWSP 0x1f | 
|  | +#define MATCH_BLTU 0x363 | 
|  | +#define  MASK_BLTU 0x3ff | 
|  | +#define MATCH_VLSEGSTWU 0xb0b | 
|  | +#define  MASK_VLSEGSTWU 0xfff | 
|  | +#define MATCH_VVCFG 0x473 | 
|  | +#define  MASK_VVCFG 0xf801ffff | 
|  | +#define MATCH_MOVZ 0x2f7 | 
|  | +#define  MASK_MOVZ 0x1ffff | 
|  | +#define MATCH_C_LD 0x9 | 
|  | +#define  MASK_C_LD 0x1f | 
|  | +#define MATCH_C_SRLI32 0xc19 | 
|  | +#define  MASK_C_SRLI32 0x1c1f | 
|  | +#define MATCH_FMIN_S 0x18053 | 
|  | +#define  MASK_FMIN_S 0x1ffff | 
|  | +#define MATCH_C_LW0 0x12 | 
|  | +#define  MASK_C_LW0 0x801f | 
|  | +#define MATCH_SLLIW 0x9b | 
|  | +#define  MASK_SLLIW 0x3f83ff | 
|  | +#define MATCH_LB 0x3 | 
|  | +#define  MASK_LB 0x3ff | 
|  | +#define MATCH_VLWU 0x30b | 
|  | +#define  MASK_VLWU 0x3fffff | 
|  | +#define MATCH_FCVT_S_WU 0xf053 | 
|  | +#define  MASK_FCVT_S_WU 0x3ff1ff | 
|  | +#define MATCH_FCVT_D_L 0xc0d3 | 
|  | +#define  MASK_FCVT_D_L 0x3ff1ff | 
|  | +#define MATCH_LH 0x83 | 
|  | +#define  MASK_LH 0x3ff | 
|  | +#define MATCH_FCVT_D_W 0xe0d3 | 
|  | +#define  MASK_FCVT_D_W 0x3ff1ff | 
|  | +#define MATCH_LW 0x103 | 
|  | +#define  MASK_LW 0x3ff | 
|  | +#define MATCH_ADD 0x33 | 
|  | +#define  MASK_ADD 0x1ffff | 
|  | +#define MATCH_FCVT_D_S 0x100d3 | 
|  | +#define  MASK_FCVT_D_S 0x3ff1ff | 
|  | +#define MATCH_MFPCR 0x17b | 
|  | +#define  MASK_MFPCR 0x3fffff | 
|  | +#define MATCH_C_FSD 0x18 | 
|  | +#define  MASK_C_FSD 0x1f | 
|  | +#define MATCH_FMAX_D 0x190d3 | 
|  | +#define  MASK_FMAX_D 0x1ffff | 
|  | +#define MATCH_BNE 0xe3 | 
|  | +#define  MASK_BNE 0x3ff | 
|  | +#define MATCH_RDCYCLE 0x277 | 
|  | +#define  MASK_RDCYCLE 0x7ffffff | 
|  | +#define MATCH_FCVT_S_D 0x11053 | 
|  | +#define  MASK_FCVT_S_D 0x3ff1ff | 
|  | +#define MATCH_VLH 0x8b | 
|  | +#define  MASK_VLH 0x3fffff | 
|  | +#define MATCH_BGEU 0x3e3 | 
|  | +#define  MASK_BGEU 0x3ff | 
|  | +#define MATCH_VFLSTD 0x158b | 
|  | +#define  MASK_VFLSTD 0x1ffff | 
|  | +#define MATCH_C_LI 0x0 | 
|  | +#define  MASK_C_LI 0x1f | 
|  | +#define MATCH_FADD_D 0xd3 | 
|  | +#define  MASK_FADD_D 0x1f1ff | 
|  | +#define MATCH_SLTIU 0x193 | 
|  | +#define  MASK_SLTIU 0x3ff | 
|  | +#define MATCH_MTPCR 0x1fb | 
|  | +#define  MASK_MTPCR 0x1ffff | 
|  | +#define MATCH_VLB 0xb | 
|  | +#define  MASK_VLB 0x3fffff | 
|  | +#define MATCH_STOP 0x177 | 
|  | +#define  MASK_STOP 0xffffffff | 
|  | +#define MATCH_VLD 0x18b | 
|  | +#define  MASK_VLD 0x3fffff | 
|  | +#define MATCH_C_SLLI 0x19 | 
|  | +#define  MASK_C_SLLI 0x1c1f | 
|  | +#define MATCH_BREAK 0xf7 | 
|  | +#define  MASK_BREAK 0xffffffff | 
|  | +#define MATCH_CFLUSH 0x2fb | 
|  | +#define  MASK_CFLUSH 0xffffffff | 
|  | +#define MATCH_FCVT_S_W 0xe053 | 
|  | +#define  MASK_FCVT_S_W 0x3ff1ff | 
|  | +#define MATCH_VFLSTW 0x150b | 
|  | +#define  MASK_VFLSTW 0x1ffff | 
|  | +#define MATCH_MUL 0x433 | 
|  | +#define  MASK_MUL 0x1ffff | 
|  | +#define MATCH_C_LW 0xa | 
|  | +#define  MASK_C_LW 0x1f | 
|  | +#define MATCH_VXCPTEVAC 0x237b | 
|  | +#define  MASK_VXCPTEVAC 0xf83fffff | 
|  | +#define MATCH_VLW 0x10b | 
|  | +#define  MASK_VLW 0x3fffff | 
|  | +#define MATCH_VSSEGSTW 0x90f | 
|  | +#define  MASK_VSSEGSTW 0xfff | 
|  | +#define MATCH_AMOMINU_D 0x19ab | 
|  | +#define  MASK_AMOMINU_D 0x1ffff | 
|  | +#define MATCH_C_SDSP 0x6 | 
|  | +#define  MASK_C_SDSP 0x1f | 
|  | +#define MATCH_UTIDX 0x1f7 | 
|  | +#define  MASK_UTIDX 0x7ffffff | 
|  | +#define MATCH_SRLI 0x293 | 
|  | +#define  MASK_SRLI 0x3f03ff | 
|  | +#define MATCH_C_SRLI 0x819 | 
|  | +#define  MASK_C_SRLI 0x1c1f | 
|  | +#define MATCH_C_LDSP 0x4 | 
|  | +#define  MASK_C_LDSP 0x1f | 
|  | +#define MATCH_C_FLW 0x14 | 
|  | +#define  MASK_C_FLW 0x1f | 
|  | +#define MATCH_C_SRAI32 0x1419 | 
|  | +#define  MASK_C_SRAI32 0x1c1f | 
|  | +#define MATCH_AMOMINU_W 0x192b | 
|  | +#define  MASK_AMOMINU_W 0x1ffff | 
|  | +#define MATCH_DIVUW 0x6bb | 
|  | +#define  MASK_DIVUW 0x1ffff | 
|  | +#define MATCH_MULW 0x43b | 
|  | +#define  MASK_MULW 0x1ffff | 
|  | +#define MATCH_VSSEGSTD 0x98f | 
|  | +#define  MASK_VSSEGSTD 0xfff | 
|  | +#define MATCH_SRLW 0x2bb | 
|  | +#define  MASK_SRLW 0x1ffff | 
|  | +#define MATCH_VSSEGSTB 0x80f | 
|  | +#define  MASK_VSSEGSTB 0xfff | 
|  | +#define MATCH_MFTX_D 0x1c0d3 | 
|  | +#define  MASK_MFTX_D 0x3fffff | 
|  | +#define MATCH_DIV 0x633 | 
|  | +#define  MASK_DIV 0x1ffff | 
|  | +#define MATCH_VTCFG 0xc73 | 
|  | +#define  MASK_VTCFG 0xf801ffff | 
|  | +#define MATCH_MFTX_S 0x1c053 | 
|  | +#define  MASK_MFTX_S 0x3fffff | 
|  | +#define MATCH_VSSEGSTH 0x88f | 
|  | +#define  MASK_VSSEGSTH 0xfff | 
|  | +#define MATCH_VVCFGIVL 0xf3 | 
|  | +#define  MASK_VVCFGIVL 0x3ff | 
|  | +#define MATCH_J 0x67 | 
|  | +#define  MASK_J 0x7f | 
|  | +#define MATCH_FENCE 0x12f | 
|  | +#define  MASK_FENCE 0x3ff | 
|  | +#define MATCH_VSW 0x10f | 
|  | +#define  MASK_VSW 0x3fffff | 
|  | +#define MATCH_FNMSUB_S 0x4b | 
|  | +#define  MASK_FNMSUB_S 0x1ff | 
|  | +#define MATCH_VFSSEGSTD 0xd8f | 
|  | +#define  MASK_VFSSEGSTD 0xfff | 
|  | +#define MATCH_FCVT_L_S 0x8053 | 
|  | +#define  MASK_FCVT_L_S 0x3ff1ff | 
|  | +#define MATCH_FLE_S 0x17053 | 
|  | +#define  MASK_FLE_S 0x1ffff | 
|  | +#define MATCH_FENCE_V_L 0x22f | 
|  | +#define  MASK_FENCE_V_L 0x3ff | 
|  | +#define MATCH_VSB 0xf | 
|  | +#define  MASK_VSB 0x3fffff | 
|  | +#define MATCH_MFFSR 0x1d053 | 
|  | +#define  MASK_MFFSR 0x7ffffff | 
|  | +#define MATCH_FDIV_S 0x3053 | 
|  | +#define  MASK_FDIV_S 0x1f1ff | 
|  | +#define MATCH_VLSTBU 0x120b | 
|  | +#define  MASK_VLSTBU 0x1ffff | 
|  | +#define MATCH_VSETVL 0x2f3 | 
|  | +#define  MASK_VSETVL 0x3fffff | 
|  | +#define MATCH_FLE_D 0x170d3 | 
|  | +#define  MASK_FLE_D 0x1ffff | 
|  | +#define MATCH_FENCE_I 0xaf | 
|  | +#define  MASK_FENCE_I 0x3ff | 
|  | +#define MATCH_VLSEGBU 0x220b | 
|  | +#define  MASK_VLSEGBU 0x1ffff | 
|  | +#define MATCH_FNMSUB_D 0xcb | 
|  | +#define  MASK_FNMSUB_D 0x1ff | 
|  | +#define MATCH_ADDW 0x3b | 
|  | +#define  MASK_ADDW 0x1ffff | 
|  | +#define MATCH_SLL 0xb3 | 
|  | +#define  MASK_SLL 0x1ffff | 
|  | +#define MATCH_XOR 0x233 | 
|  | +#define  MASK_XOR 0x1ffff | 
|  | +#define MATCH_SUB 0x10033 | 
|  | +#define  MASK_SUB 0x1ffff | 
|  | +#define MATCH_ERET 0x27b | 
|  | +#define  MASK_ERET 0xffffffff | 
|  | +#define MATCH_BLT 0x263 | 
|  | +#define  MASK_BLT 0x3ff | 
|  | +#define MATCH_VSSTW 0x110f | 
|  | +#define  MASK_VSSTW 0x1ffff | 
|  | +#define MATCH_MTFSR 0x1f053 | 
|  | +#define  MASK_MTFSR 0x3fffff | 
|  | +#define MATCH_VSSTH 0x108f | 
|  | +#define  MASK_VSSTH 0x1ffff | 
|  | +#define MATCH_SC_W 0x1052b | 
|  | +#define  MASK_SC_W 0x1ffff | 
|  | +#define MATCH_REM 0x733 | 
|  | +#define  MASK_REM 0x1ffff | 
|  | +#define MATCH_SRLIW 0x29b | 
|  | +#define  MASK_SRLIW 0x3f83ff | 
|  | +#define MATCH_LUI 0x37 | 
|  | +#define  MASK_LUI 0x7f | 
|  | +#define MATCH_VSSTB 0x100f | 
|  | +#define  MASK_VSSTB 0x1ffff | 
|  | +#define MATCH_FCVT_S_LU 0xd053 | 
|  | +#define  MASK_FCVT_S_LU 0x3ff1ff | 
|  | +#define MATCH_VSSTD 0x118f | 
|  | +#define  MASK_VSSTD 0x1ffff | 
|  | +#define MATCH_ADDI 0x13 | 
|  | +#define  MASK_ADDI 0x3ff | 
|  | +#define MATCH_VFMST 0x1173 | 
|  | +#define  MASK_VFMST 0x1ffff | 
|  | +#define MATCH_MULH 0x4b3 | 
|  | +#define  MASK_MULH 0x1ffff | 
|  | +#define MATCH_FMUL_S 0x2053 | 
|  | +#define  MASK_FMUL_S 0x1f1ff | 
|  | +#define MATCH_VLSEGSTHU 0xa8b | 
|  | +#define  MASK_VLSEGSTHU 0xfff | 
|  | +#define MATCH_SRAI 0x10293 | 
|  | +#define  MASK_SRAI 0x3f03ff | 
|  | +#define MATCH_AMOAND_D 0x9ab | 
|  | +#define  MASK_AMOAND_D 0x1ffff | 
|  | +#define MATCH_FLT_D 0x160d3 | 
|  | +#define  MASK_FLT_D 0x1ffff | 
|  | +#define MATCH_SRAW 0x102bb | 
|  | +#define  MASK_SRAW 0x1ffff | 
|  | +#define MATCH_FMUL_D 0x20d3 | 
|  | +#define  MASK_FMUL_D 0x1f1ff | 
|  | +#define MATCH_LD 0x183 | 
|  | +#define  MASK_LD 0x3ff | 
|  | +#define MATCH_ORI 0x313 | 
|  | +#define  MASK_ORI 0x3ff | 
|  | +#define MATCH_FLT_S 0x16053 | 
|  | +#define  MASK_FLT_S 0x1ffff | 
|  | +#define MATCH_ADDIW 0x1b | 
|  | +#define  MASK_ADDIW 0x3ff | 
|  | +#define MATCH_AMOAND_W 0x92b | 
|  | +#define  MASK_AMOAND_W 0x1ffff | 
|  | +#define MATCH_FEQ_S 0x15053 | 
|  | +#define  MASK_FEQ_S 0x1ffff | 
|  | +#define MATCH_FSGNJX_D 0x70d3 | 
|  | +#define  MASK_FSGNJX_D 0x1ffff | 
|  | +#define MATCH_SRA 0x102b3 | 
|  | +#define  MASK_SRA 0x1ffff | 
|  | +#define MATCH_C_LWSP 0x5 | 
|  | +#define  MASK_C_LWSP 0x1f | 
|  | +#define MATCH_BGE 0x2e3 | 
|  | +#define  MASK_BGE 0x3ff | 
|  | +#define MATCH_C_ADD3 0x1c | 
|  | +#define  MASK_C_ADD3 0x31f | 
|  | +#define MATCH_SRAIW 0x1029b | 
|  | +#define  MASK_SRAIW 0x3f83ff | 
|  | +#define MATCH_VSSEGD 0x218f | 
|  | +#define  MASK_VSSEGD 0x1ffff | 
|  | +#define MATCH_SRL 0x2b3 | 
|  | +#define  MASK_SRL 0x1ffff | 
|  | +#define MATCH_VENQCMD 0x2b7b | 
|  | +#define  MASK_VENQCMD 0xf801ffff | 
|  | +#define MATCH_FSUB_D 0x10d3 | 
|  | +#define  MASK_FSUB_D 0x1f1ff | 
|  | +#define MATCH_VFMTS 0x1973 | 
|  | +#define  MASK_VFMTS 0x1ffff | 
|  | +#define MATCH_VENQIMM1 0x2f7b | 
|  | +#define  MASK_VENQIMM1 0xf801ffff | 
|  | +#define MATCH_FSGNJX_S 0x7053 | 
|  | +#define  MASK_FSGNJX_S 0x1ffff | 
|  | +#define MATCH_VFMSV 0x973 | 
|  | +#define  MASK_VFMSV 0x3fffff | 
|  | +#define MATCH_VENQIMM2 0x337b | 
|  | +#define  MASK_VENQIMM2 0xf801ffff | 
|  | +#define MATCH_FCVT_D_WU 0xf0d3 | 
|  | +#define  MASK_FCVT_D_WU 0x3ff1ff | 
|  | +#define MATCH_VXCPTRESTORE 0x77b | 
|  | +#define  MASK_VXCPTRESTORE 0xf83fffff | 
|  | +#define MATCH_VMTS 0x1873 | 
|  | +#define  MASK_VMTS 0x1ffff | 
|  | +#define MATCH_OR 0x333 | 
|  | +#define  MASK_OR 0x1ffff | 
|  | +#define MATCH_RDINSTRET 0xa77 | 
|  | +#define  MASK_RDINSTRET 0x7ffffff | 
|  | +#define MATCH_FCVT_WU_D 0xb0d3 | 
|  | +#define  MASK_FCVT_WU_D 0x3ff1ff | 
|  | +#define MATCH_SUBW 0x1003b | 
|  | +#define  MASK_SUBW 0x1ffff | 
|  | +#define MATCH_JALR_C 0x6b | 
|  | +#define  MASK_JALR_C 0x3ff | 
|  | +#define MATCH_FMAX_S 0x19053 | 
|  | +#define  MASK_FMAX_S 0x1ffff | 
|  | +#define MATCH_AMOMAXU_D 0x1dab | 
|  | +#define  MASK_AMOMAXU_D 0x1ffff | 
|  | +#define MATCH_C_SLLIW 0x1819 | 
|  | +#define  MASK_C_SLLIW 0x1c1f | 
|  | +#define MATCH_JALR_J 0x16b | 
|  | +#define  MASK_JALR_J 0x3ff | 
|  | +#define MATCH_C_FLD 0x15 | 
|  | +#define  MASK_C_FLD 0x1f | 
|  | +#define MATCH_VLSTW 0x110b | 
|  | +#define  MASK_VLSTW 0x1ffff | 
|  | +#define MATCH_VLSTH 0x108b | 
|  | +#define  MASK_VLSTH 0x1ffff | 
|  | +#define MATCH_XORI 0x213 | 
|  | +#define  MASK_XORI 0x3ff | 
|  | +#define MATCH_JALR_R 0xeb | 
|  | +#define  MASK_JALR_R 0x3ff | 
|  | +#define MATCH_AMOMAXU_W 0x1d2b | 
|  | +#define  MASK_AMOMAXU_W 0x1ffff | 
|  | +#define MATCH_FCVT_WU_S 0xb053 | 
|  | +#define  MASK_FCVT_WU_S 0x3ff1ff | 
|  | +#define MATCH_VLSTB 0x100b | 
|  | +#define  MASK_VLSTB 0x1ffff | 
|  | +#define MATCH_VLSTD 0x118b | 
|  | +#define  MASK_VLSTD 0x1ffff | 
|  | +#define MATCH_C_LD0 0x8012 | 
|  | +#define  MASK_C_LD0 0x801f | 
|  | +#define MATCH_RDTIME 0x677 | 
|  | +#define  MASK_RDTIME 0x7ffffff | 
|  | +#define MATCH_ANDI 0x393 | 
|  | +#define  MASK_ANDI 0x3ff | 
|  | +#define MATCH_CLEARPCR 0x7b | 
|  | +#define  MASK_CLEARPCR 0x3ff | 
|  | +#define MATCH_VENQCNT 0x377b | 
|  | +#define  MASK_VENQCNT 0xf801ffff | 
|  | +#define MATCH_FSGNJN_D 0x60d3 | 
|  | +#define  MASK_FSGNJN_D 0x1ffff | 
|  | +#define MATCH_FNMADD_S 0x4f | 
|  | +#define  MASK_FNMADD_S 0x1ff | 
|  | +#define MATCH_JAL 0x6f | 
|  | +#define  MASK_JAL 0x7f | 
|  | +#define MATCH_LWU 0x303 | 
|  | +#define  MASK_LWU 0x3ff | 
|  | +#define MATCH_VLSEGSTBU 0xa0b | 
|  | +#define  MASK_VLSEGSTBU 0xfff | 
|  | +#define MATCH_C_BEQ 0x10 | 
|  | +#define  MASK_C_BEQ 0x1f | 
|  | +#define MATCH_VLHU 0x28b | 
|  | +#define  MASK_VLHU 0x3fffff | 
|  | +#define MATCH_VFSSTD 0x158f | 
|  | +#define  MASK_VFSSTD 0x1ffff | 
|  | +#define MATCH_C_BNE 0x11 | 
|  | +#define  MASK_C_BNE 0x1f | 
|  | +#define MATCH_FNMADD_D 0xcf | 
|  | +#define  MASK_FNMADD_D 0x1ff | 
|  | +#define MATCH_AMOADD_D 0x1ab | 
|  | +#define  MASK_AMOADD_D 0x1ffff | 
|  | +#define MATCH_C_SW 0xd | 
|  | +#define  MASK_C_SW 0x1f | 
|  | +#define MATCH_LR_D 0x101ab | 
|  | +#define  MASK_LR_D 0x3fffff | 
|  | +#define MATCH_C_MOVE 0x2 | 
|  | +#define  MASK_C_MOVE 0x801f | 
|  | +#define MATCH_FMOVN 0xef7 | 
|  | +#define  MASK_FMOVN 0x1ffff | 
|  | +#define MATCH_C_FSW 0x16 | 
|  | +#define  MASK_C_FSW 0x1f | 
|  | +#define MATCH_C_J 0x8002 | 
|  | +#define  MASK_C_J 0x801f | 
|  | +#define MATCH_MULHSU 0x533 | 
|  | +#define  MASK_MULHSU 0x1ffff | 
|  | +#define MATCH_C_SD 0xc | 
|  | +#define  MASK_C_SD 0x1f | 
|  | +#define MATCH_AMOADD_W 0x12b | 
|  | +#define  MASK_AMOADD_W 0x1ffff | 
|  | +#define MATCH_FCVT_D_LU 0xd0d3 | 
|  | +#define  MASK_FCVT_D_LU 0x3ff1ff | 
|  | +#define MATCH_AMOMAX_D 0x15ab | 
|  | +#define  MASK_AMOMAX_D 0x1ffff | 
|  | +#define MATCH_FSD 0x1a7 | 
|  | +#define  MASK_FSD 0x3ff | 
|  | +#define MATCH_FCVT_W_D 0xa0d3 | 
|  | +#define  MASK_FCVT_W_D 0x3ff1ff | 
|  | +#define MATCH_FMOVZ 0xaf7 | 
|  | +#define  MASK_FMOVZ 0x1ffff | 
|  | +#define MATCH_FEQ_D 0x150d3 | 
|  | +#define  MASK_FEQ_D 0x1ffff | 
|  | +#define MATCH_C_OR3 0x21c | 
|  | +#define  MASK_C_OR3 0x31f | 
|  | +#define MATCH_VMVV 0x73 | 
|  | +#define  MASK_VMVV 0x3fffff | 
|  | +#define MATCH_VFSSEGSTW 0xd0f | 
|  | +#define  MASK_VFSSEGSTW 0xfff | 
|  | +#define MATCH_SLT 0x133 | 
|  | +#define  MASK_SLT 0x1ffff | 
|  | +#define MATCH_MXTF_D 0x1e0d3 | 
|  | +#define  MASK_MXTF_D 0x3fffff | 
|  | +#define MATCH_SLLW 0xbb | 
|  | +#define  MASK_SLLW 0x1ffff | 
|  | +#define MATCH_AMOOR_D 0xdab | 
|  | +#define  MASK_AMOOR_D 0x1ffff | 
|  | +#define MATCH_SLTI 0x113 | 
|  | +#define  MASK_SLTI 0x3ff | 
|  | +#define MATCH_REMU 0x7b3 | 
|  | +#define  MASK_REMU 0x1ffff | 
|  | +#define MATCH_FLW 0x107 | 
|  | +#define  MASK_FLW 0x3ff | 
|  | +#define MATCH_REMW 0x73b | 
|  | +#define  MASK_REMW 0x1ffff | 
|  | +#define MATCH_SLTU 0x1b3 | 
|  | +#define  MASK_SLTU 0x1ffff | 
|  | +#define MATCH_SLLI 0x93 | 
|  | +#define  MASK_SLLI 0x3f03ff | 
|  | +#define MATCH_C_AND3 0x31c | 
|  | +#define  MASK_C_AND3 0x31f | 
|  | +#define MATCH_VSSEGW 0x210f | 
|  | +#define  MASK_VSSEGW 0x1ffff | 
|  | +#define MATCH_AMOOR_W 0xd2b | 
|  | +#define  MASK_AMOOR_W 0x1ffff | 
|  | +#define MATCH_VSD 0x18f | 
|  | +#define  MASK_VSD 0x3fffff | 
|  | +#define MATCH_BEQ 0x63 | 
|  | +#define  MASK_BEQ 0x3ff | 
|  | +#define MATCH_FLD 0x187 | 
|  | +#define  MASK_FLD 0x3ff | 
|  | +#define MATCH_MXTF_S 0x1e053 | 
|  | +#define  MASK_MXTF_S 0x3fffff | 
|  | +#define MATCH_FSUB_S 0x1053 | 
|  | +#define  MASK_FSUB_S 0x1f1ff | 
|  | +#define MATCH_AND 0x3b3 | 
|  | +#define  MASK_AND 0x1ffff | 
|  | +#define MATCH_VTCFGIVL 0x1f3 | 
|  | +#define  MASK_VTCFGIVL 0x3ff | 
|  | +#define MATCH_LBU 0x203 | 
|  | +#define  MASK_LBU 0x3ff | 
|  | +#define MATCH_VF 0x3f3 | 
|  | +#define  MASK_VF 0xf80003ff | 
|  | +#define MATCH_VLSEGSTW 0x90b | 
|  | +#define  MASK_VLSEGSTW 0xfff | 
|  | +#define MATCH_SYSCALL 0x77 | 
|  | +#define  MASK_SYSCALL 0xffffffff | 
|  | +#define MATCH_FSGNJ_S 0x5053 | 
|  | +#define  MASK_FSGNJ_S 0x1ffff | 
|  | +#define MATCH_C_ADDI 0x1 | 
|  | +#define  MASK_C_ADDI 0x1f | 
|  | +#define MATCH_VFMVV 0x173 | 
|  | +#define  MASK_VFMVV 0x3fffff | 
|  | +#define MATCH_VLSTWU 0x130b | 
|  | +#define  MASK_VLSTWU 0x1ffff | 
|  | +#define MATCH_C_SUB3 0x11c | 
|  | +#define  MASK_C_SUB3 0x31f | 
|  | +#define MATCH_VSH 0x8f | 
|  | +#define  MASK_VSH 0x3fffff | 
|  | +#define MATCH_VLSEGSTB 0x80b | 
|  | +#define  MASK_VLSEGSTB 0xfff | 
|  | +#define MATCH_VXCPTSAVE 0x37b | 
|  | +#define  MASK_VXCPTSAVE 0xf83fffff | 
|  | +#define MATCH_VLSEGSTD 0x98b | 
|  | +#define  MASK_VLSEGSTD 0xfff | 
|  | +#define MATCH_VFLSEGD 0x258b | 
|  | +#define  MASK_VFLSEGD 0x1ffff | 
|  | +#define MATCH_VFLSEGW 0x250b | 
|  | +#define  MASK_VFLSEGW 0x1ffff | 
|  | +#define MATCH_VLSEGSTH 0x88b | 
|  | +#define  MASK_VLSEGSTH 0xfff | 
|  | +#define MATCH_AMOMAX_W 0x152b | 
|  | +#define  MASK_AMOMAX_W 0x1ffff | 
|  | +#define MATCH_FSGNJ_D 0x50d3 | 
|  | +#define  MASK_FSGNJ_D 0x1ffff | 
|  | +#define MATCH_VFLSEGSTW 0xd0b | 
|  | +#define  MASK_VFLSEGSTW 0xfff | 
|  | +#define MATCH_C_SUB 0x801a | 
|  | +#define  MASK_C_SUB 0x801f | 
|  | +#define MATCH_MULHU 0x5b3 | 
|  | +#define  MASK_MULHU 0x1ffff | 
|  | +#define MATCH_FENCE_V_G 0x2af | 
|  | +#define  MASK_FENCE_V_G 0x3ff | 
|  | +#define MATCH_VMSV 0x873 | 
|  | +#define  MASK_VMSV 0x3fffff | 
|  | +#define MATCH_VMST 0x1073 | 
|  | +#define  MASK_VMST 0x1ffff | 
|  | +#define MATCH_SETPCR 0xfb | 
|  | +#define  MASK_SETPCR 0x3ff | 
|  | +#define MATCH_FCVT_LU_S 0x9053 | 
|  | +#define  MASK_FCVT_LU_S 0x3ff1ff | 
|  | +#define MATCH_VXCPTHOLD 0x277b | 
|  | +#define  MASK_VXCPTHOLD 0xffffffff | 
|  | +#define MATCH_FCVT_S_L 0xc053 | 
|  | +#define  MASK_FCVT_S_L 0x3ff1ff | 
|  | +#define MATCH_VFLSEGSTD 0xd8b | 
|  | +#define  MASK_VFLSEGSTD 0xfff | 
|  | +#define MATCH_AUIPC 0x17 | 
|  | +#define  MASK_AUIPC 0x7f | 
|  | +#define MATCH_C_ADD 0x1a | 
|  | +#define  MASK_C_ADD 0x801f | 
|  | +#define MATCH_FCVT_LU_D 0x90d3 | 
|  | +#define  MASK_FCVT_LU_D 0x3ff1ff | 
|  | +#define MATCH_VFLD 0x58b | 
|  | +#define  MASK_VFLD 0x3fffff | 
|  | +#define MATCH_SC_D 0x105ab | 
|  | +#define  MASK_SC_D 0x1ffff | 
|  | +#define MATCH_FMADD_S 0x43 | 
|  | +#define  MASK_FMADD_S 0x1ff | 
|  | +#define MATCH_FCVT_W_S 0xa053 | 
|  | +#define  MASK_FCVT_W_S 0x3ff1ff | 
|  | +#define MATCH_VSSEGH 0x208f | 
|  | +#define  MASK_VSSEGH 0x1ffff | 
|  | +#define MATCH_FSQRT_S 0x4053 | 
|  | +#define  MASK_FSQRT_S 0x3ff1ff | 
|  | +#define MATCH_VXCPTKILL 0xb7b | 
|  | +#define  MASK_VXCPTKILL 0xffffffff | 
|  | +#define MATCH_C_SRAI 0x1019 | 
|  | +#define  MASK_C_SRAI 0x1c1f | 
|  | +#define MATCH_AMOMIN_W 0x112b | 
|  | +#define  MASK_AMOMIN_W 0x1ffff | 
|  | +#define MATCH_FSGNJN_S 0x6053 | 
|  | +#define  MASK_FSGNJN_S 0x1ffff | 
|  | +#define MATCH_C_SLLI32 0x419 | 
|  | +#define  MASK_C_SLLI32 0x1c1f | 
|  | +#define MATCH_VLSEGWU 0x230b | 
|  | +#define  MASK_VLSEGWU 0x1ffff | 
|  | +#define MATCH_VFSW 0x50f | 
|  | +#define  MASK_VFSW 0x3fffff | 
|  | +#define MATCH_AMOSWAP_D 0x5ab | 
|  | +#define  MASK_AMOSWAP_D 0x1ffff | 
|  | +#define MATCH_FSQRT_D 0x40d3 | 
|  | +#define  MASK_FSQRT_D 0x3ff1ff | 
|  | +#define MATCH_VFLW 0x50b | 
|  | +#define  MASK_VFLW 0x3fffff | 
|  | +#define MATCH_FDIV_D 0x30d3 | 
|  | +#define  MASK_FDIV_D 0x1f1ff | 
|  | +#define MATCH_FMADD_D 0xc3 | 
|  | +#define  MASK_FMADD_D 0x1ff | 
|  | +#define MATCH_DIVW 0x63b | 
|  | +#define  MASK_DIVW 0x1ffff | 
|  | +#define MATCH_AMOMIN_D 0x11ab | 
|  | +#define  MASK_AMOMIN_D 0x1ffff | 
|  | +#define MATCH_DIVU 0x6b3 | 
|  | +#define  MASK_DIVU 0x1ffff | 
|  | +#define MATCH_AMOSWAP_W 0x52b | 
|  | +#define  MASK_AMOSWAP_W 0x1ffff | 
|  | +#define MATCH_VFSD 0x58f | 
|  | +#define  MASK_VFSD 0x3fffff | 
|  | +#define MATCH_FADD_S 0x53 | 
|  | +#define  MASK_FADD_S 0x1f1ff | 
|  | +#define MATCH_VLSEGB 0x200b | 
|  | +#define  MASK_VLSEGB 0x1ffff | 
|  | +#define MATCH_FCVT_L_D 0x80d3 | 
|  | +#define  MASK_FCVT_L_D 0x3ff1ff | 
|  | +#define MATCH_VLSEGD 0x218b | 
|  | +#define  MASK_VLSEGD 0x1ffff | 
|  | +#define MATCH_VLSEGH 0x208b | 
|  | +#define  MASK_VLSEGH 0x1ffff | 
|  | +#define MATCH_SW 0x123 | 
|  | +#define  MASK_SW 0x3ff | 
|  | +#define MATCH_FMSUB_S 0x47 | 
|  | +#define  MASK_FMSUB_S 0x1ff | 
|  | +#define MATCH_VFSSEGW 0x250f | 
|  | +#define  MASK_VFSSEGW 0x1ffff | 
|  | +#define MATCH_C_ADDIW 0x1d | 
|  | +#define  MASK_C_ADDIW 0x1f | 
|  | +#define MATCH_LHU 0x283 | 
|  | +#define  MASK_LHU 0x3ff | 
|  | +#define MATCH_SH 0xa3 | 
|  | +#define  MASK_SH 0x3ff | 
|  | +#define MATCH_VLSEGW 0x210b | 
|  | +#define  MASK_VLSEGW 0x1ffff | 
|  | +#define MATCH_FSW 0x127 | 
|  | +#define  MASK_FSW 0x3ff | 
|  | +#define MATCH_VLBU 0x20b | 
|  | +#define  MASK_VLBU 0x3fffff | 
|  | +#define MATCH_SB 0x23 | 
|  | +#define  MASK_SB 0x3ff | 
|  | +#define MATCH_FMSUB_D 0xc7 | 
|  | +#define  MASK_FMSUB_D 0x1ff | 
|  | +#define MATCH_VLSEGHU 0x228b | 
|  | +#define  MASK_VLSEGHU 0x1ffff | 
|  | +#define MATCH_VSSEGB 0x200f | 
|  | +#define  MASK_VSSEGB 0x1ffff | 
|  | +#define MATCH_VFSSEGD 0x258f | 
|  | +#define  MASK_VFSSEGD 0x1ffff | 
|  | +#define MATCH_SD 0x1a3 | 
|  | +#define  MASK_SD 0x3ff | 
|  | diff -ruN gcc-4.9.2/gcc/config/riscv/riscv.opt gcc-4.9.2-riscv/gcc/config/riscv/riscv.opt | 
|  | --- gcc-4.9.2/gcc/config/riscv/riscv.opt	1969-12-31 16:00:00.000000000 -0800 | 
|  | +++ gcc-4.9.2-riscv/gcc/config/riscv/riscv.opt	2014-12-02 18:04:50.111949590 -0800 | 
|  | @@ -0,0 +1,73 @@ | 
|  | +; Options for the MIPS port of the compiler | 
|  | +; | 
|  | +; Copyright (C) 2005, 2007, 2008, 2010, 2011 Free Software Foundation, Inc. | 
|  | +; | 
|  | +; This file is part of GCC. | 
|  | +; | 
|  | +; GCC is free software; you can redistribute it and/or modify it under | 
|  | +; the terms of the GNU General Public License as published by the Free | 
|  | +; Software Foundation; either version 3, or (at your option) any later | 
|  | +; version. | 
|  | +; | 
|  | +; GCC is distributed in the hope that it will be useful, but WITHOUT | 
|  | +; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | 
|  | +; or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public | 
|  | +; License for more details. | 
|  | +; | 
|  | +; You should have received a copy of the GNU General Public License | 
|  | +; along with GCC; see the file COPYING3.  If not see | 
|  | +; <http://www.gnu.org/licenses/>. | 
|  | + | 
|  | +EB | 
|  | +Driver | 
|  | + | 
|  | +EL | 
|  | +Driver | 
|  | + | 
|  | +m32 | 
|  | +Target RejectNegative Mask(32BIT) | 
|  | +Generate RV32 code | 
|  | + | 
|  | +m64 | 
|  | +Target RejectNegative InverseMask(32BIT, 64BIT) | 
|  | +Generate RV64 code | 
|  | + | 
|  | +mabicalls | 
|  | +Target Report Mask(ABICALLS) | 
|  | +Generate code that can be used in SVR4-style dynamic objects | 
|  | + | 
|  | +mbranch-cost= | 
|  | +Target RejectNegative Joined UInteger Var(mips_branch_cost) | 
|  | +-mbranch-cost=COST	Set the cost of branches to roughly COST instructions | 
|  | + | 
|  | +meb | 
|  | +Target Report RejectNegative Mask(BIG_ENDIAN) | 
|  | +Use big-endian byte order | 
|  | + | 
|  | +mel | 
|  | +Target Report RejectNegative InverseMask(BIG_ENDIAN, LITTLE_ENDIAN) | 
|  | +Use little-endian byte order | 
|  | + | 
|  | +mhard-float | 
|  | +Target Report RejectNegative InverseMask(SOFT_FLOAT_ABI, HARD_FLOAT_ABI) | 
|  | +Allow the use of hardware floating-point ABI and instructions | 
|  | + | 
|  | +mips | 
|  | +Target RejectNegative Joined | 
|  | +-mipsN	Generate code for ISA level N | 
|  | + | 
|  | +mlong-calls | 
|  | +Target Report Var(TARGET_LONG_CALLS) | 
|  | +Use indirect calls | 
|  | + | 
|  | +mmemcpy | 
|  | +Target Report Mask(MEMCPY) | 
|  | +Don't optimize block moves | 
|  | + | 
|  | +msoft-float | 
|  | +Target Report RejectNegative Mask(SOFT_FLOAT_ABI) | 
|  | +Prevent the use of all hardware floating-point instructions | 
|  | + | 
|  | +mtune= | 
|  | +Target RejectNegative Joined Var(mips_tune_string) | 
|  | +-mtune=PROCESSOR	Optimize the output for PROCESSOR | 
|  | diff -ruN gcc-4.9.2/gcc/config/riscv/riscv-protos.h gcc-4.9.2-riscv/gcc/config/riscv/riscv-protos.h | 
|  | --- gcc-4.9.2/gcc/config/riscv/riscv-protos.h	1969-12-31 16:00:00.000000000 -0800 | 
|  | +++ gcc-4.9.2-riscv/gcc/config/riscv/riscv-protos.h	2014-12-02 18:04:50.107949563 -0800 | 
|  | @@ -0,0 +1,135 @@ | 
|  | +/* Prototypes of target machine for GNU compiler.  MIPS version. | 
|  | +   Copyright (C) 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, | 
|  | +   1999, 2001, 2002, 2003, 2004, 2005, 2007, 2008, 2009, 2010, 2011 | 
|  | +   Free Software Foundation, Inc. | 
|  | +   Contributed by A. Lichnewsky (lich@inria.inria.fr). | 
|  | +   Changed by Michael Meissner	(meissner@osf.org). | 
|  | +   64-bit r4000 support by Ian Lance Taylor (ian@cygnus.com) and | 
|  | +   Brendan Eich (brendan@microunity.com). | 
|  | + | 
|  | +This file is part of GCC. | 
|  | + | 
|  | +GCC is free software; you can redistribute it and/or modify | 
|  | +it under the terms of the GNU General Public License as published by | 
|  | +the Free Software Foundation; either version 3, or (at your option) | 
|  | +any later version. | 
|  | + | 
|  | +GCC is distributed in the hope that it will be useful, | 
|  | +but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | +GNU General Public License for more details. | 
|  | + | 
|  | +You should have received a copy of the GNU General Public License | 
|  | +along with GCC; see the file COPYING3.  If not see | 
|  | +<http://www.gnu.org/licenses/>.  */ | 
|  | + | 
|  | +#ifndef GCC_MIPS_PROTOS_H | 
|  | +#define GCC_MIPS_PROTOS_H | 
|  | + | 
|  | +/* Classifies a SYMBOL_REF, LABEL_REF or UNSPEC address. | 
|  | + | 
|  | +   SYMBOL_ABSOLUTE | 
|  | +       The symbol's value will be calculated using absolute relocations, | 
|  | +       such as %hi and %lo. | 
|  | + | 
|  | +   SYMBOL_TLS | 
|  | +       A thread-local symbol. | 
|  | + | 
|  | +   SYMBOL_TPREL | 
|  | +       UNSPEC wrappers around SYMBOL_TLS, corresponding to the | 
|  | +       thread-local storage relocation operators. | 
|  | + | 
|  | +   SYMBOL_32_HIGH | 
|  | +       For a 32-bit symbolic address X, this is the value of %hi(X). */ | 
|  | +enum mips_symbol_type { | 
|  | +  SYMBOL_ABSOLUTE, | 
|  | +  SYMBOL_TLS, | 
|  | +  SYMBOL_TPREL, | 
|  | +}; | 
|  | +#define NUM_SYMBOL_TYPES (SYMBOL_TPREL + 1) | 
|  | + | 
|  | +extern bool mips_symbolic_constant_p (rtx, enum mips_symbol_type *); | 
|  | +extern int mips_regno_mode_ok_for_base_p (int, enum machine_mode, bool); | 
|  | +extern int mips_address_insns (rtx, enum machine_mode, bool); | 
|  | +extern int mips_const_insns (rtx); | 
|  | +extern int mips_split_const_insns (rtx); | 
|  | +extern int mips_load_store_insns (rtx, rtx); | 
|  | +extern rtx mips_emit_move (rtx, rtx); | 
|  | +extern bool mips_split_symbol (rtx, rtx, enum machine_mode, rtx *); | 
|  | +extern rtx mips_unspec_address (rtx, enum mips_symbol_type); | 
|  | +extern void mips_move_integer (rtx, rtx, HOST_WIDE_INT); | 
|  | +extern bool mips_legitimize_move (enum machine_mode, rtx, rtx); | 
|  | +extern bool mips_legitimize_vector_move (enum machine_mode, rtx, rtx); | 
|  | + | 
|  | +extern rtx mips_subword (rtx, bool); | 
|  | +extern bool mips_split_64bit_move_p (rtx, rtx); | 
|  | +extern void mips_split_doubleword_move (rtx, rtx); | 
|  | +extern const char *mips_output_move (rtx, rtx); | 
|  | +extern const char *mips_riscv_output_vector_move (enum machine_mode, rtx, rtx); | 
|  | +#ifdef RTX_CODE | 
|  | +extern void mips_expand_scc (rtx *); | 
|  | +extern void mips_expand_conditional_branch (rtx *); | 
|  | +extern void mips_expand_vcondv2sf (rtx, rtx, rtx, enum rtx_code, rtx, rtx); | 
|  | +extern void mips_expand_conditional_move (rtx *); | 
|  | +#endif | 
|  | +extern rtx mips_expand_call (bool, rtx, rtx, rtx); | 
|  | +extern void mips_expand_fcc_reload (rtx, rtx, rtx); | 
|  | +extern void mips_set_return_address (rtx, rtx); | 
|  | +extern bool mips_expand_block_move (rtx, rtx, rtx); | 
|  | +extern void mips_expand_synci_loop (rtx, rtx); | 
|  | + | 
|  | +extern void mips_init_cumulative_args (CUMULATIVE_ARGS *, tree); | 
|  | +extern bool mips_pad_arg_upward (enum machine_mode, const_tree); | 
|  | +extern bool mips_pad_reg_upward (enum machine_mode, tree); | 
|  | + | 
|  | +extern bool mips_expand_ext_as_unaligned_load (rtx, rtx, HOST_WIDE_INT, | 
|  | +					       HOST_WIDE_INT); | 
|  | +extern bool mips_expand_ins_as_unaligned_store (rtx, rtx, HOST_WIDE_INT, | 
|  | +						HOST_WIDE_INT); | 
|  | +extern bool mips_mem_fits_mode_p (enum machine_mode mode, rtx x); | 
|  | +extern void mips_order_regs_for_local_alloc (void); | 
|  | +extern HOST_WIDE_INT mips_debugger_offset (rtx, HOST_WIDE_INT); | 
|  | + | 
|  | +extern void mips_output_external (FILE *, tree, const char *); | 
|  | +extern void mips_output_ascii (FILE *, const char *, size_t); | 
|  | +extern void mips_output_aligned_decl_common (FILE *, tree, const char *, | 
|  | +					     unsigned HOST_WIDE_INT, | 
|  | +					     unsigned int); | 
|  | +extern void mips_declare_common_object (FILE *, const char *, | 
|  | +					const char *, unsigned HOST_WIDE_INT, | 
|  | +					unsigned int, bool); | 
|  | +extern void mips_declare_object (FILE *, const char *, const char *, | 
|  | +				 const char *, ...) ATTRIBUTE_PRINTF_4; | 
|  | +extern void mips_declare_object_name (FILE *, const char *, tree); | 
|  | +extern void mips_finish_declare_object (FILE *, tree, int, int); | 
|  | + | 
|  | +extern HOST_WIDE_INT mips_initial_elimination_offset (int, int); | 
|  | +extern rtx mips_return_addr (int, rtx); | 
|  | +extern void mips_emit_save_slot_move (rtx, rtx, rtx); | 
|  | +extern void mips_expand_prologue (void); | 
|  | +extern void mips_expand_epilogue (bool); | 
|  | +extern bool mips_can_use_return_insn (void); | 
|  | +extern rtx mips_function_value (const_tree, const_tree, enum machine_mode); | 
|  | + | 
|  | +extern bool mips_cannot_change_mode_class (enum machine_mode, | 
|  | +					   enum machine_mode, enum reg_class); | 
|  | +extern bool mips_modes_tieable_p (enum machine_mode, enum machine_mode); | 
|  | +extern enum reg_class mips_secondary_reload_class (enum reg_class, | 
|  | +						   enum machine_mode, | 
|  | +						   rtx, bool); | 
|  | +extern int mips_class_max_nregs (enum reg_class, enum machine_mode); | 
|  | + | 
|  | +extern const char *mips_output_conditional_branch (rtx, rtx *, const char *, | 
|  | +						   const char *); | 
|  | +extern unsigned int mips_hard_regno_nregs (int, enum machine_mode); | 
|  | + | 
|  | +extern void irix_asm_output_align (FILE *, unsigned); | 
|  | +extern const char *current_section_name (void); | 
|  | +extern unsigned int current_section_flags (void); | 
|  | + | 
|  | +extern void mips_expand_vector_init (rtx, rtx); | 
|  | + | 
|  | +extern bool mips_epilogue_uses (unsigned int); | 
|  | +extern bool riscv_symbol_binds_local_p (const_rtx x); | 
|  | + | 
|  | +#endif /* ! GCC_MIPS_PROTOS_H */ | 
|  | diff -ruN gcc-4.9.2/gcc/config/riscv/ros64.h gcc-4.9.2-riscv/gcc/config/riscv/ros64.h | 
|  | --- gcc-4.9.2/gcc/config/riscv/ros64.h	1969-12-31 16:00:00.000000000 -0800 | 
|  | +++ gcc-4.9.2-riscv/gcc/config/riscv/ros64.h	2014-12-02 18:04:50.111949590 -0800 | 
|  | @@ -0,0 +1,7 @@ | 
|  | +#include "linux64.h" | 
|  | + | 
|  | +#undef LINUX_DYNAMIC_LINKER32 | 
|  | +#define LINUX_DYNAMIC_LINKER32 GLIBC_DYNAMIC_LINKER32 | 
|  | + | 
|  | +#undef LINUX_DYNAMIC_LINKER64 | 
|  | +#define LINUX_DYNAMIC_LINKER64 GLIBC_DYNAMIC_LINKER64 | 
|  | diff -ruN gcc-4.9.2/gcc/config/riscv/ros.h gcc-4.9.2-riscv/gcc/config/riscv/ros.h | 
|  | --- gcc-4.9.2/gcc/config/riscv/ros.h	1969-12-31 16:00:00.000000000 -0800 | 
|  | +++ gcc-4.9.2-riscv/gcc/config/riscv/ros.h	2014-12-02 18:04:50.111949590 -0800 | 
|  | @@ -0,0 +1,9 @@ | 
|  | +#include "linux.h" | 
|  | +#define LINUX_TARGET_OS_CPP_BUILTINS ROS_TARGET_OS_CPP_BUILTINS | 
|  | + | 
|  | +#undef LINUX_DYNAMIC_LINKER | 
|  | +#define LINUX_DYNAMIC_LINKER GLIBC_DYNAMIC_LINKER | 
|  | + | 
|  | +#undef TARGET_VERSION | 
|  | +#define TARGET_VERSION fprintf (stderr, " (RISC-V ROS/ELF)"); | 
|  | + | 
|  | diff -ruN gcc-4.9.2/gcc/config/riscv/sync.md gcc-4.9.2-riscv/gcc/config/riscv/sync.md | 
|  | --- gcc-4.9.2/gcc/config/riscv/sync.md	1969-12-31 16:00:00.000000000 -0800 | 
|  | +++ gcc-4.9.2-riscv/gcc/config/riscv/sync.md	2014-12-02 18:04:50.111949590 -0800 | 
|  | @@ -0,0 +1,92 @@ | 
|  | +;;  Machine Description for MIPS based processor synchronization | 
|  | +;;  instructions. | 
|  | +;;  Copyright (C) 2007, 2008, 2009, 2010 | 
|  | +;;  Free Software Foundation, Inc. | 
|  | + | 
|  | +;; This file is part of GCC. | 
|  | + | 
|  | +;; GCC is free software; you can redistribute it and/or modify | 
|  | +;; it under the terms of the GNU General Public License as published by | 
|  | +;; the Free Software Foundation; either version 3, or (at your option) | 
|  | +;; any later version. | 
|  | + | 
|  | +;; GCC is distributed in the hope that it will be useful, | 
|  | +;; but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | +;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | +;; GNU General Public License for more details. | 
|  | + | 
|  | +;; You should have received a copy of the GNU General Public License | 
|  | +;; along with GCC; see the file COPYING3.  If not see | 
|  | +;; <http://www.gnu.org/licenses/>. | 
|  | + | 
|  | +(define_c_enum "unspec" [ | 
|  | +  UNSPEC_COMPARE_AND_SWAP | 
|  | +  UNSPEC_COMPARE_AND_SWAP_12 | 
|  | +  UNSPEC_SYNC_OLD_OP | 
|  | +  UNSPEC_SYNC_NEW_OP | 
|  | +  UNSPEC_SYNC_NEW_OP_12 | 
|  | +  UNSPEC_SYNC_OLD_OP_12 | 
|  | +  UNSPEC_SYNC_EXCHANGE | 
|  | +  UNSPEC_SYNC_EXCHANGE_12 | 
|  | +  UNSPEC_MEMORY_BARRIER | 
|  | +]) | 
|  | + | 
|  | +(define_code_iterator any_atomic [plus ior xor and]) | 
|  | + | 
|  | +;; Atomic memory operations. | 
|  | + | 
|  | +(define_expand "memory_barrier" | 
|  | +  [(set (match_dup 0) | 
|  | +	(unspec:BLK [(match_dup 0)] UNSPEC_MEMORY_BARRIER))] | 
|  | +  "" | 
|  | +{ | 
|  | +  operands[0] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode)); | 
|  | +  MEM_VOLATILE_P (operands[0]) = 1; | 
|  | +}) | 
|  | + | 
|  | +(define_insn "*memory_barrier" | 
|  | +  [(set (match_operand:BLK 0 "" "") | 
|  | +	(unspec:BLK [(match_dup 0)] UNSPEC_MEMORY_BARRIER))] | 
|  | +  "" | 
|  | +  "fence") | 
|  | + | 
|  | +(define_insn "sync_<optab><mode>" | 
|  | +  [(set (match_operand:GPR 0 "memory_operand" "+YR") | 
|  | +	(unspec_volatile:GPR | 
|  | +          [(any_atomic:GPR (match_dup 0) | 
|  | +		     (match_operand:GPR 1 "register_operand" "d"))] | 
|  | +	 UNSPEC_SYNC_OLD_OP))] | 
|  | +  "" | 
|  | +  "amo<insn>.<amo> zero,%1,%0") | 
|  | + | 
|  | +(define_insn "sync_old_<optab><mode>" | 
|  | +  [(set (match_operand:GPR 0 "register_operand" "=&d") | 
|  | +	(match_operand:GPR 1 "memory_operand" "+YR")) | 
|  | +   (set (match_dup 1) | 
|  | +	(unspec_volatile:GPR | 
|  | +          [(any_atomic:GPR (match_dup 1) | 
|  | +		     (match_operand:GPR 2 "register_operand" "d"))] | 
|  | +	 UNSPEC_SYNC_OLD_OP))] | 
|  | +  "" | 
|  | +  "amo<insn>.<amo> %0,%2,%1") | 
|  | + | 
|  | +(define_insn "sync_lock_test_and_set<mode>" | 
|  | +  [(set (match_operand:GPR 0 "register_operand" "=&d") | 
|  | +	(match_operand:GPR 1 "memory_operand" "+YR")) | 
|  | +   (set (match_dup 1) | 
|  | +	(unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "d")] | 
|  | +	 UNSPEC_SYNC_EXCHANGE))] | 
|  | +  "" | 
|  | +  "amoswap.<amo> %0,%2,%1") | 
|  | + | 
|  | +(define_insn "sync_compare_and_swap<mode>" | 
|  | +  [(set (match_operand:GPR 0 "register_operand" "=&d") | 
|  | +	(match_operand:GPR 1 "memory_operand" "+YR")) | 
|  | +   (set (match_dup 1) | 
|  | +	(unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "d") | 
|  | +			      (match_operand:GPR 3 "reg_or_0_operand" "d")] | 
|  | +	 UNSPEC_COMPARE_AND_SWAP)) | 
|  | +   (clobber (match_scratch:GPR 4 "=&d"))] | 
|  | +  "" | 
|  | +  "1: lr.<amo> %0,%1; bne %0,%2,1f; sc.<amo> %4,%3,%1; bnez %4,1b; 1:" | 
|  | +  [(set (attr "length") (const_int 16))]) | 
|  | diff -ruN gcc-4.9.2/gcc/config/riscv/t-elf gcc-4.9.2-riscv/gcc/config/riscv/t-elf | 
|  | --- gcc-4.9.2/gcc/config/riscv/t-elf	1969-12-31 16:00:00.000000000 -0800 | 
|  | +++ gcc-4.9.2-riscv/gcc/config/riscv/t-elf	2014-12-02 18:04:50.111949590 -0800 | 
|  | @@ -0,0 +1,36 @@ | 
|  | +# Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2006, | 
|  | +# 2007 Free Software Foundation, Inc. | 
|  | +# | 
|  | +# This file is part of GCC. | 
|  | +# | 
|  | +# GCC is free software; you can redistribute it and/or modify | 
|  | +# it under the terms of the GNU General Public License as published by | 
|  | +# the Free Software Foundation; either version 3, or (at your option) | 
|  | +# any later version. | 
|  | +# | 
|  | +# GCC is distributed in the hope that it will be useful, | 
|  | +# but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | +# GNU General Public License for more details. | 
|  | +# | 
|  | +# You should have received a copy of the GNU General Public License | 
|  | +# along with GCC; see the file COPYING3.  If not see | 
|  | +# <http://www.gnu.org/licenses/>. | 
|  | + | 
|  | +# Assemble startup files. | 
|  | +$(T)crti.o: $(srcdir)/config/riscv/crti.asm $(GCC_PASSES) | 
|  | +	$(GCC_FOR_TARGET) $(GCC_CFLAGS) $(MULTILIB_CFLAGS) $(INCLUDES) \ | 
|  | +	-c -o $(T)crti.o -x assembler-with-cpp $(srcdir)/config/riscv/crti.asm | 
|  | + | 
|  | +$(T)crtn.o: $(srcdir)/config/riscv/crtn.asm $(GCC_PASSES) | 
|  | +	$(GCC_FOR_TARGET) $(GCC_CFLAGS) $(MULTILIB_CFLAGS) $(INCLUDES) \ | 
|  | +	-c -o $(T)crtn.o -x assembler-with-cpp $(srcdir)/config/riscv/crtn.asm | 
|  | + | 
|  | +# Build the libraries for both hard and soft floating point | 
|  | + | 
|  | +MULTILIB_OPTIONS = m64/m32 | 
|  | +MULTILIB_DIRNAMES = 64 32 | 
|  | +EXTRA_MULTILIB_PARTS = crtbegin.o crtend.o crti.o crtn.o | 
|  | + | 
|  | +LIBGCC = stmp-multilib | 
|  | +INSTALL_LIBGCC = install-multilib | 
|  | diff -ruN gcc-4.9.2/gcc/config/riscv/t-linux64 gcc-4.9.2-riscv/gcc/config/riscv/t-linux64 | 
|  | --- gcc-4.9.2/gcc/config/riscv/t-linux64	1969-12-31 16:00:00.000000000 -0800 | 
|  | +++ gcc-4.9.2-riscv/gcc/config/riscv/t-linux64	2014-12-02 18:04:50.111949590 -0800 | 
|  | @@ -0,0 +1,35 @@ | 
|  | +# Copyright (C) 2003, 2004, 2005, 2006 Free Software Foundation, Inc. | 
|  | +# | 
|  | +# This file is part of GCC. | 
|  | +# | 
|  | +# GCC is free software; you can redistribute it and/or modify | 
|  | +# it under the terms of the GNU General Public License as published by | 
|  | +# the Free Software Foundation; either version 3, or (at your option) | 
|  | +# any later version. | 
|  | +# | 
|  | +# GCC is distributed in the hope that it will be useful, | 
|  | +# but WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
|  | +# GNU General Public License for more details. | 
|  | +# | 
|  | +# You should have received a copy of the GNU General Public License | 
|  | +# along with GCC; see the file COPYING3.  If not see | 
|  | +# <http://www.gnu.org/licenses/>. | 
|  | + | 
|  | +MULTILIB_OPTIONS = m64/m32 | 
|  | +MULTILIB_DIRNAMES = 64 32 | 
|  | +MULTILIB_OSDIRNAMES = ../lib ../lib32 | 
|  | + | 
|  | +EXTRA_MULTILIB_PARTS=crtbegin.o crtend.o crtbeginS.o crtendS.o crtbeginT.o | 
|  | + | 
|  | +TPBIT = tp-bit.c | 
|  | + | 
|  | +tp-bit.c: $(srcdir)/config/fp-bit.c | 
|  | +	echo '#ifdef __MIPSEL__' > tp-bit.c | 
|  | +	echo '# define FLOAT_BIT_ORDER_MISMATCH' >> tp-bit.c | 
|  | +	echo '#endif' >> tp-bit.c | 
|  | +	echo '#if __LDBL_MANT_DIG__ == 113' >> tp-bit.c | 
|  | +	echo '#define QUIET_NAN_NEGATED' >> tp-bit.c | 
|  | +	echo '# define TFLOAT' >> tp-bit.c | 
|  | +	cat $(srcdir)/config/fp-bit.c >> tp-bit.c | 
|  | +	echo '#endif' >> tp-bit.c | 
|  | diff -ruN gcc-4.9.2/gcc/config.gcc gcc-4.9.2-riscv/gcc/config.gcc | 
|  | --- gcc-4.9.2/gcc/config.gcc	2014-09-17 07:16:02.000000000 -0700 | 
|  | +++ gcc-4.9.2-riscv/gcc/config.gcc	2014-12-02 18:04:50.107949563 -0800 | 
|  | @@ -1962,6 +1962,18 @@ | 
|  | gnu_ld=yes | 
|  | gas=yes | 
|  | ;; | 
|  | +riscv*-*-linux*)				# Linux RISC-V | 
|  | +	tm_file="dbxelf.h elfos.h gnu-user.h linux.h glibc-stdint.h ${tm_file} riscv/linux.h riscv/linux64.h" | 
|  | +	tmake_file="${tmake_file} riscv/t-linux64" | 
|  | +	gnu_ld=yes | 
|  | +	gas=yes | 
|  | +	;; | 
|  | +riscv*-*-elf*)				# Linux RISC-V | 
|  | +	tm_file="dbxelf.h elfos.h newlib-stdint.h ${tm_file} riscv/elf.h" | 
|  | +	tmake_file="${tmake_file} riscv/t-elf" | 
|  | +	gnu_ld=yes | 
|  | +	gas=yes | 
|  | +	;; | 
|  | mips64*-*-linux* | mipsisa64*-*-linux*) | 
|  | tm_file="dbxelf.h elfos.h gnu-user.h linux.h linux-android.h glibc-stdint.h ${tm_file} mips/gnu-user.h mips/gnu-user64.h mips/linux64.h mips/linux-common.h" | 
|  | extra_options="${extra_options} linux-android.opt" | 
|  | @@ -3756,6 +3768,31 @@ | 
|  | done | 
|  | ;; | 
|  |  | 
|  | +	riscv*-*-*) | 
|  | +		supported_defaults="abi arch arch_32 arch_64 float tune tune_32 tune_64" | 
|  | + | 
|  | +		case ${with_float} in | 
|  | +		"" | soft | hard) | 
|  | +			# OK | 
|  | +			;; | 
|  | +		*) | 
|  | +			echo "Unknown floating point type used in --with-float=$with_float" 1>&2 | 
|  | +			exit 1 | 
|  | +			;; | 
|  | +		esac | 
|  | + | 
|  | +		case ${with_abi} in | 
|  | +		"" | 32 | 64) | 
|  | +			# OK | 
|  | +			;; | 
|  | +		*) | 
|  | +			echo "Unknown ABI used in --with-abi=$with_abi" 1>&2 | 
|  | +			exit 1 | 
|  | +			;; | 
|  | +		esac | 
|  | + | 
|  | +    ;; | 
|  | + | 
|  | mips*-*-*) | 
|  | supported_defaults="abi arch arch_32 arch_64 float fpu nan tune tune_32 tune_64 divide llsc mips-plt synci" | 
|  |  | 
|  | diff -ruN gcc-4.9.2/gcc/expr.c gcc-4.9.2-riscv/gcc/expr.c | 
|  | --- gcc-4.9.2/gcc/expr.c	2014-09-01 03:14:22.000000000 -0700 | 
|  | +++ gcc-4.9.2-riscv/gcc/expr.c	2014-12-02 18:04:50.115949618 -0800 | 
|  | @@ -358,7 +358,12 @@ | 
|  |  | 
|  | if (VECTOR_MODE_P (to_mode) || VECTOR_MODE_P (from_mode)) | 
|  | { | 
|  | +      /* cbatten - On maven the size of both vectors need not be the | 
|  | +         same, I am not quite sure yet if it is safe to comment out | 
|  | +         this assertion. | 
|  | + | 
|  | gcc_assert (GET_MODE_BITSIZE (from_mode) == GET_MODE_BITSIZE (to_mode)); | 
|  | +      */ | 
|  |  | 
|  | if (VECTOR_MODE_P (to_mode)) | 
|  | from = simplify_gen_subreg (to_mode, from, GET_MODE (from), 0); | 
|  | diff -ruN gcc-4.9.2/gcc/genmodes.c gcc-4.9.2-riscv/gcc/genmodes.c | 
|  | --- gcc-4.9.2/gcc/genmodes.c	2014-01-02 14:23:26.000000000 -0800 | 
|  | +++ gcc-4.9.2-riscv/gcc/genmodes.c	2014-12-02 18:04:53.943976152 -0800 | 
|  | @@ -1058,7 +1058,7 @@ | 
|  | int c; | 
|  | struct mode_data *m; | 
|  |  | 
|  | -  print_maybe_const_decl ("%sunsigned char", "mode_size", | 
|  | +  print_maybe_const_decl ("%sunsigned short", "mode_size", | 
|  | "NUM_MACHINE_MODES", bytesize); | 
|  |  | 
|  | for_all_modes (c, m) | 
|  | @@ -1192,7 +1192,7 @@ | 
|  | int c; | 
|  | struct mode_data *m; | 
|  |  | 
|  | -  print_maybe_const_decl ("%sunsigned char", | 
|  | +  print_maybe_const_decl ("%sunsigned short", | 
|  | "mode_base_align", "NUM_MACHINE_MODES", | 
|  | alignment); | 
|  |  | 
|  | diff -ruN gcc-4.9.2/gcc/machmode.h gcc-4.9.2-riscv/gcc/machmode.h | 
|  | --- gcc-4.9.2/gcc/machmode.h	2014-01-02 14:23:26.000000000 -0800 | 
|  | +++ gcc-4.9.2-riscv/gcc/machmode.h	2014-12-02 18:04:53.943976152 -0800 | 
|  | @@ -176,7 +176,7 @@ | 
|  |  | 
|  | /* Get the size in bytes and bits of an object of mode MODE.  */ | 
|  |  | 
|  | -extern CONST_MODE_SIZE unsigned char mode_size[NUM_MACHINE_MODES]; | 
|  | +extern CONST_MODE_SIZE unsigned short mode_size[NUM_MACHINE_MODES]; | 
|  | #define GET_MODE_SIZE(MODE)    ((unsigned short) mode_size[MODE]) | 
|  | #define GET_MODE_BITSIZE(MODE) \ | 
|  | ((unsigned short) (GET_MODE_SIZE (MODE) * BITS_PER_UNIT)) | 
|  | @@ -291,7 +291,7 @@ | 
|  |  | 
|  | /* Determine alignment, 1<=result<=BIGGEST_ALIGNMENT.  */ | 
|  |  | 
|  | -extern CONST_MODE_BASE_ALIGN unsigned char mode_base_align[NUM_MACHINE_MODES]; | 
|  | +extern CONST_MODE_BASE_ALIGN unsigned short mode_base_align[NUM_MACHINE_MODES]; | 
|  |  | 
|  | extern unsigned get_mode_alignment (enum machine_mode); | 
|  |  | 
|  | diff -ruN gcc-4.9.2/libgcc/config.host gcc-4.9.2-riscv/libgcc/config.host | 
|  | --- gcc-4.9.2/libgcc/config.host	2014-03-27 08:40:31.000000000 -0700 | 
|  | +++ gcc-4.9.2-riscv/libgcc/config.host	2014-12-02 18:04:53.947976181 -0800 | 
|  | @@ -1002,6 +1002,8 @@ | 
|  | tmake_file="${tmake_file} rs6000/t-ppccomm rs6000/t-crtstuff t-crtstuff-pic t-fdpbit" | 
|  | extra_parts="$extra_parts crtbegin.o crtend.o crtbeginS.o crtendS.o crtbeginT.o ecrti.o ecrtn.o ncrti.o ncrtn.o" | 
|  | ;; | 
|  | +riscv*-*-*) | 
|  | +	;; | 
|  | rs6000-ibm-aix4.[3456789]* | powerpc-ibm-aix4.[3456789]*) | 
|  | md_unwind_header=rs6000/aix-unwind.h | 
|  | tmake_file="t-fdpbit rs6000/t-ppc64-fp rs6000/t-slibgcc-aix rs6000/t-ibm-ldouble" |