summaryrefslogtreecommitdiff
path: root/debian
diff options
context:
space:
mode:
authordoko <doko@6ca36cf4-e1d1-0310-8c6f-e303bb2178ca>2013-10-20 22:51:47 +0000
committerdoko <doko@6ca36cf4-e1d1-0310-8c6f-e303bb2178ca>2013-10-20 22:51:47 +0000
commit206767a6609a13221853b18ca7a66173838c9304 (patch)
tree10ce79e2c6f96c6e26c67b036eeeab94a346eb87 /debian
parentc083cb4b48d50ca7247ac56c459f51f9ffb608b7 (diff)
downloadgcc-47-206767a6609a13221853b18ca7a66173838c9304.tar.gz
* Update the Linaro support to the 4.7-2013.10 release.
git-svn-id: svn://svn.debian.org/svn/gcccvs/branches/sid/gcc-4.7@6987 6ca36cf4-e1d1-0310-8c6f-e303bb2178ca
Diffstat (limited to 'debian')
-rw-r--r--debian/changelog1
-rw-r--r--debian/patches/aarch64-bootstrap.diff24
-rw-r--r--debian/patches/gcc-linaro-doc.diff93
-rw-r--r--debian/patches/gcc-linaro.diff15848
-rw-r--r--debian/patches/svn-updates-linaro.diff594
-rw-r--r--debian/rules.patch1
6 files changed, 9098 insertions, 7463 deletions
diff --git a/debian/changelog b/debian/changelog
index b444176..4cbf987 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,6 +1,7 @@
gcc-4.7 (4.7.3-7ubuntu1) UNRELEASED; urgency=low
* Update to SVN 20131020 (r203880) from the gcc-4_7-branch.
+ * Update the Linaro support to the 4.7-2013.10 release.
* Fix bootstrap of native aarch64 build.
-- Matthias Klose <doko@debian.org> Sun, 20 Oct 2013 23:08:56 +0200
diff --git a/debian/patches/aarch64-bootstrap.diff b/debian/patches/aarch64-bootstrap.diff
deleted file mode 100644
index 0918c92..0000000
--- a/debian/patches/aarch64-bootstrap.diff
+++ /dev/null
@@ -1,24 +0,0 @@
-# DP: Fix bootstrap of native aarch64 build.
-
---- a/src/gcc/config/aarch64/aarch64-protos.h
-+++ b/src/gcc/config/aarch64/aarch64-protos.h
-@@ -177,7 +177,7 @@
- rtx aarch64_simd_vect_par_cnst_half (enum machine_mode, bool);
- rtx aarch64_tls_get_addr (void);
- unsigned aarch64_dbx_register_number (unsigned);
--unsigned aarch64_regno_regclass (unsigned);
-+enum reg_class aarch64_regno_regclass (unsigned);
- unsigned aarch64_trampoline_size (void);
- void aarch64_asm_output_labelref (FILE *, const char *);
- void aarch64_elf_asm_named_section (const char *, unsigned, tree);
---- a/src/gcc/config/aarch64/aarch64.c
-+++ b/src/gcc/config/aarch64/aarch64.c
-@@ -3672,7 +3672,7 @@
-
- /* Implement REGNO_REG_CLASS. */
-
--unsigned
-+enum reg_class
- aarch64_regno_regclass (unsigned regno)
- {
- if (GP_REGNUM_P (regno))
diff --git a/debian/patches/gcc-linaro-doc.diff b/debian/patches/gcc-linaro-doc.diff
index 10234bf..a86532c 100644
--- a/debian/patches/gcc-linaro-doc.diff
+++ b/debian/patches/gcc-linaro-doc.diff
@@ -1,4 +1,4 @@
-# DP: Changes for the Linaro 4.7-2013.08 release (documentation).
+# DP: Changes for the Linaro 4.7-2013.10 release (documentation).
--- a/src/gcc/doc/extend.texi
+++ b/src/gcc/doc/extend.texi
@@ -95,7 +95,7 @@
.\"
.IX Title "FSF-FUNDING 7"
-.TH FSF-FUNDING 7 "2013-04-11" "gcc-4.7.3" "GNU"
-+.TH FSF-FUNDING 7 "2013-08-12" "gcc-4.7.4" "GNU"
++.TH FSF-FUNDING 7 "2013-10-14" "gcc-4.7.4" "GNU"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
@@ -141,7 +141,7 @@
.\"
.IX Title "GFDL 7"
-.TH GFDL 7 "2013-04-11" "gcc-4.7.3" "GNU"
-+.TH GFDL 7 "2013-08-12" "gcc-4.7.4" "GNU"
++.TH GFDL 7 "2013-10-14" "gcc-4.7.4" "GNU"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
@@ -199,7 +199,7 @@
.\"
.IX Title "GPL 7"
-.TH GPL 7 "2013-04-11" "gcc-4.7.3" "GNU"
-+.TH GPL 7 "2013-08-12" "gcc-4.7.4" "GNU"
++.TH GPL 7 "2013-10-14" "gcc-4.7.4" "GNU"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.if n .ad l
@@ -245,6 +245,19 @@
.IX Subsection "How to Apply These Terms to Your New Programs"
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
+--- a/src/gcc/doc/implement-cxx.texi
++++ b/src/gcc/doc/implement-cxx.texi
+@@ -10,8 +10,8 @@
+ A conforming implementation of ISO C++ is required to document its
+ choice of behavior in each of the areas that are designated
+ ``implementation defined''. The following lists all such areas,
+-along with the section numbers from the ISO/IEC 14822:1998 and ISO/IEC
+-14822:2003 standards. Some areas are only implementation-defined in
++along with the section numbers from the ISO/IEC 14882:1998 and ISO/IEC
++14882:2003 standards. Some areas are only implementation-defined in
+ one version of the standard.
+
+ Some choices depend on the externally determined ABI for the platform
--- a/src/gcc/doc/install.texi
+++ b/src/gcc/doc/install.texi
@@ -1047,6 +1047,15 @@
@@ -693,7 +706,43 @@
ARM Target supports @code{-mfpu=neon-fp16 -mfloat-abi=softfp} or compatible
--- a/src/gcc/doc/tm.texi
+++ b/src/gcc/doc/tm.texi
-@@ -9495,6 +9495,10 @@
+@@ -700,6 +700,14 @@
+ Targets may provide a string object type that can be used within and between C, C++ and their respective Objective-C dialects. A string object might, for example, embed encoding and length information. These objects are considered opaque to the compiler and handled as references. An ideal implementation makes the composition of the string object match that of the Objective-C @code{NSString} (@code{NXString} for GNUStep), allowing efficient interworking between C-only and Objective-C code. If a target implements string objects then this hook should return a reference to such an object constructed from the normal `C' string representation provided in @var{string}. At present, the hook is used by Objective-C only, to obtain a common-format string object when the target provides one.
+ @end deftypefn
+
++@deftypefn {C Target Hook} void TARGET_OBJC_DECLARE_UNRESOLVED_CLASS_REFERENCE (const char *@var{classname})
++Declare that Objective C class @var{classname} is referenced by the current TU.
++@end deftypefn
++
++@deftypefn {C Target Hook} void TARGET_OBJC_DECLARE_CLASS_DEFINITION (const char *@var{classname})
++Declare that Objective C class @var{classname} is defined by the current TU.
++@end deftypefn
++
+ @deftypefn {C Target Hook} bool TARGET_STRING_OBJECT_REF_TYPE_P (const_tree @var{stringref})
+ If a target implements string objects then this hook should return @code{true} if @var{stringref} is a valid reference to such an object.
+ @end deftypefn
+@@ -8258,20 +8266,6 @@
+ macro to provide more human-readable names.
+ @end defmac
+
+-@defmac ASM_DECLARE_CLASS_REFERENCE (@var{stream}, @var{name})
+-A C statement (sans semicolon) to output to the stdio stream
+-@var{stream} commands to declare that the label @var{name} is an
+-Objective-C class reference. This is only needed for targets whose
+-linkers have special support for NeXT-style runtimes.
+-@end defmac
+-
+-@defmac ASM_DECLARE_UNRESOLVED_REFERENCE (@var{stream}, @var{name})
+-A C statement (sans semicolon) to output to the stdio stream
+-@var{stream} commands to declare that the label @var{name} is an
+-unresolved Objective-C class reference. This is only needed for targets
+-whose linkers have special support for NeXT-style runtimes.
+-@end defmac
+-
+ @node Initialization
+ @subsection How Initialization Functions Are Handled
+ @cindex initialization routines
+@@ -9495,6 +9489,10 @@
True if the @code{.debug_pubtypes} and @code{.debug_pubnames} sections should be emitted. These sections are not used on most platforms, and in particular GDB does not use them.
@end deftypevr
@@ -706,7 +755,39 @@
@end deftypevr
--- a/src/gcc/doc/tm.texi.in
+++ b/src/gcc/doc/tm.texi.in
-@@ -9388,6 +9388,8 @@
+@@ -696,6 +696,10 @@
+
+ @hook TARGET_OBJC_CONSTRUCT_STRING_OBJECT
+
++@hook TARGET_OBJC_DECLARE_UNRESOLVED_CLASS_REFERENCE
++
++@hook TARGET_OBJC_DECLARE_CLASS_DEFINITION
++
+ @hook TARGET_STRING_OBJECT_REF_TYPE_P
+
+ @hook TARGET_CHECK_STRING_OBJECT_FORMAT_ARG
+@@ -8157,20 +8161,6 @@
+ macro to provide more human-readable names.
+ @end defmac
+
+-@defmac ASM_DECLARE_CLASS_REFERENCE (@var{stream}, @var{name})
+-A C statement (sans semicolon) to output to the stdio stream
+-@var{stream} commands to declare that the label @var{name} is an
+-Objective-C class reference. This is only needed for targets whose
+-linkers have special support for NeXT-style runtimes.
+-@end defmac
+-
+-@defmac ASM_DECLARE_UNRESOLVED_REFERENCE (@var{stream}, @var{name})
+-A C statement (sans semicolon) to output to the stdio stream
+-@var{stream} commands to declare that the label @var{name} is an
+-unresolved Objective-C class reference. This is only needed for targets
+-whose linkers have special support for NeXT-style runtimes.
+-@end defmac
+-
+ @node Initialization
+ @subsection How Initialization Functions Are Handled
+ @cindex initialization routines
+@@ -9388,6 +9378,8 @@
@hook TARGET_WANT_DEBUG_PUB_SECTIONS
diff --git a/debian/patches/gcc-linaro.diff b/debian/patches/gcc-linaro.diff
index 5c9d27f..9871bb0 100644
--- a/debian/patches/gcc-linaro.diff
+++ b/debian/patches/gcc-linaro.diff
@@ -1,8 +1,47 @@
-# DP: Changes for the Linaro 4.7-2013.08 release.
+# DP: Changes for the Linaro 4.7-2013.10 release.
--- a/src/ChangeLog.linaro
+++ b/src/ChangeLog.linaro
-@@ -0,0 +1,2578 @@
+@@ -0,0 +1,2617 @@
++2013-10-15 Christophe Lyon <christophe.lyon@linaro.org>
++
++ GCC Linaro 4.7-2013.10 released.
++
++ gcc/
++ * LINARO-VERSION: Update.
++
++2013-09-14 Christophe lyon <christophe.lyon@linaro.org>
++
++ Merge from FSF GCC 4.7.4 (svn branches/gcc-4_7-branch 203509).
++
++2013-10-08 Matthias Klose <doko@ubuntu.com>
++
++ gcc/
++ * config/aarch64/aarch64-protos.h (aarch64_regno_regclass): Fix
++ prototype.
++ * config/aarch64/aarch64.c (aarch64_regno_regclass): Likewise.
++
++2013-09-10 Christophe Lyon <christophe.lyon@linaro.org>
++
++ gcc/
++ * LINARO-VERSION: Bump version.
++
++2013-09-10 Christophe Lyon <christophe.lyon@linaro.org>
++
++ GCC Linaro 4.7-2013.09 released.
++
++ gcc/
++ * LINARO-VERSION: Update.
++
++2013-09-03 Christophe lyon <christophe.lyon@linaro.org>
++
++ Merge from FSF GCC 4.7.4 (svn branches/gcc-4_7-branch 202210).
++
++2013-08-15 Yvan Roux <yvan.roux@linaro.org>
++
++ gcc/
++ * LINARO-VERSION: Bump version.
++
+2013-08-15 Yvan Roux <yvan.roux@linaro.org>
+
+ GCC Linaro 4.7-2013.08 released.
@@ -2581,6 +2620,146 @@
+ Merge from FSF trunk SVN revision 184223.
+
+Imported GCC from FSF trunk SVN revision 183796.
+--- a/src/INSTALL/binaries.html
++++ b/src/INSTALL/binaries.html
+@@ -3,7 +3,7 @@
+ <title>Installing GCC: Binaries</title>
+ <meta http-equiv="Content-Type" content="text/html">
+ <meta name="description" content="Installing GCC: Binaries">
+-<meta name="generator" content="makeinfo 4.12">
++<meta name="generator" content="makeinfo 4.13">
+ <link title="Top" rel="top" href="#Top">
+ <link href="http://www.gnu.org/software/texinfo/" rel="generator-home" title="Texinfo Homepage">
+ <!--
+--- a/src/INSTALL/build.html
++++ b/src/INSTALL/build.html
+@@ -3,7 +3,7 @@
+ <title>Installing GCC: Building</title>
+ <meta http-equiv="Content-Type" content="text/html">
+ <meta name="description" content="Installing GCC: Building">
+-<meta name="generator" content="makeinfo 4.12">
++<meta name="generator" content="makeinfo 4.13">
+ <link title="Top" rel="top" href="#Top">
+ <link href="http://www.gnu.org/software/texinfo/" rel="generator-home" title="Texinfo Homepage">
+ <!--
+--- a/src/INSTALL/configure.html
++++ b/src/INSTALL/configure.html
+@@ -3,7 +3,7 @@
+ <title>Installing GCC: Configuration</title>
+ <meta http-equiv="Content-Type" content="text/html">
+ <meta name="description" content="Installing GCC: Configuration">
+-<meta name="generator" content="makeinfo 4.12">
++<meta name="generator" content="makeinfo 4.13">
+ <link title="Top" rel="top" href="#Top">
+ <link href="http://www.gnu.org/software/texinfo/" rel="generator-home" title="Texinfo Homepage">
+ <!--
+@@ -446,6 +446,14 @@
+ conventions, etc. should not be built. The default is to build a
+ predefined set of them.
+
++ <br><dt><code>--enable-multiarch</code><dd>Specify whether to enable or disable multiarch support. The default is
++to check for glibc start files in a multiarch location, and enable it
++if the files are found. The auto detection is enabled for native builds,
++and for cross builds configured with <samp><span class="option">--with-sysroot</span></samp>, and without
++<samp><span class="option">--with-native-system-header-dir</span></samp>.
++More documentation about multiarch can be found at
++<a href="http://wiki.debian.org/Multiarch">http://wiki.debian.org/Multiarch</a>.
++
+ <p>Some targets provide finer-grained control over which multilibs are built
+ (e.g., <samp><span class="option">--disable-softfloat</span></samp>):
+ <dl>
+--- a/src/INSTALL/download.html
++++ b/src/INSTALL/download.html
+@@ -3,7 +3,7 @@
+ <title>Downloading GCC</title>
+ <meta http-equiv="Content-Type" content="text/html">
+ <meta name="description" content="Downloading GCC">
+-<meta name="generator" content="makeinfo 4.12">
++<meta name="generator" content="makeinfo 4.13">
+ <link title="Top" rel="top" href="#Top">
+ <link href="http://www.gnu.org/software/texinfo/" rel="generator-home" title="Texinfo Homepage">
+ <!--
+--- a/src/INSTALL/finalinstall.html
++++ b/src/INSTALL/finalinstall.html
+@@ -3,7 +3,7 @@
+ <title>Installing GCC: Final installation</title>
+ <meta http-equiv="Content-Type" content="text/html">
+ <meta name="description" content="Installing GCC: Final installation">
+-<meta name="generator" content="makeinfo 4.12">
++<meta name="generator" content="makeinfo 4.13">
+ <link title="Top" rel="top" href="#Top">
+ <link href="http://www.gnu.org/software/texinfo/" rel="generator-home" title="Texinfo Homepage">
+ <!--
+--- a/src/INSTALL/gfdl.html
++++ b/src/INSTALL/gfdl.html
+@@ -3,7 +3,7 @@
+ <title>Installing GCC: GNU Free Documentation License</title>
+ <meta http-equiv="Content-Type" content="text/html">
+ <meta name="description" content="Installing GCC: GNU Free Documentation License">
+-<meta name="generator" content="makeinfo 4.12">
++<meta name="generator" content="makeinfo 4.13">
+ <link title="Top" rel="top" href="#Top">
+ <link href="http://www.gnu.org/software/texinfo/" rel="generator-home" title="Texinfo Homepage">
+ <!--
+--- a/src/INSTALL/index.html
++++ b/src/INSTALL/index.html
+@@ -3,7 +3,7 @@
+ <title>Installing GCC</title>
+ <meta http-equiv="Content-Type" content="text/html">
+ <meta name="description" content="Installing GCC">
+-<meta name="generator" content="makeinfo 4.12">
++<meta name="generator" content="makeinfo 4.13">
+ <link title="Top" rel="top" href="#Top">
+ <link href="http://www.gnu.org/software/texinfo/" rel="generator-home" title="Texinfo Homepage">
+ <!--
+--- a/src/INSTALL/old.html
++++ b/src/INSTALL/old.html
+@@ -3,7 +3,7 @@
+ <title>Installing GCC: Old documentation</title>
+ <meta http-equiv="Content-Type" content="text/html">
+ <meta name="description" content="Installing GCC: Old documentation">
+-<meta name="generator" content="makeinfo 4.12">
++<meta name="generator" content="makeinfo 4.13">
+ <link title="Top" rel="top" href="#Top">
+ <link href="http://www.gnu.org/software/texinfo/" rel="generator-home" title="Texinfo Homepage">
+ <!--
+--- a/src/INSTALL/prerequisites.html
++++ b/src/INSTALL/prerequisites.html
+@@ -3,7 +3,7 @@
+ <title>Prerequisites for GCC</title>
+ <meta http-equiv="Content-Type" content="text/html">
+ <meta name="description" content="Prerequisites for GCC">
+-<meta name="generator" content="makeinfo 4.12">
++<meta name="generator" content="makeinfo 4.13">
+ <link title="Top" rel="top" href="#Top">
+ <link href="http://www.gnu.org/software/texinfo/" rel="generator-home" title="Texinfo Homepage">
+ <!--
+--- a/src/INSTALL/specific.html
++++ b/src/INSTALL/specific.html
+@@ -3,7 +3,7 @@
+ <title>Host/Target specific installation notes for GCC</title>
+ <meta http-equiv="Content-Type" content="text/html">
+ <meta name="description" content="Host/Target specific installation notes for GCC">
+-<meta name="generator" content="makeinfo 4.12">
++<meta name="generator" content="makeinfo 4.13">
+ <link title="Top" rel="top" href="#Top">
+ <link href="http://www.gnu.org/software/texinfo/" rel="generator-home" title="Texinfo Homepage">
+ <!--
+--- a/src/INSTALL/test.html
++++ b/src/INSTALL/test.html
+@@ -3,7 +3,7 @@
+ <title>Installing GCC: Testing</title>
+ <meta http-equiv="Content-Type" content="text/html">
+ <meta name="description" content="Installing GCC: Testing">
+-<meta name="generator" content="makeinfo 4.12">
++<meta name="generator" content="makeinfo 4.13">
+ <link title="Top" rel="top" href="#Top">
+ <link href="http://www.gnu.org/software/texinfo/" rel="generator-home" title="Texinfo Homepage">
+ <!--
+--- a/src/LAST_UPDATED
++++ b/src/LAST_UPDATED
+@@ -1 +0,0 @@
+-Obtained from SVN: tags/gcc_4_7_3_release revision 197739
--- a/src/config.guess
+++ b/src/config.guess
@@ -2,9 +2,9 @@
@@ -2930,260 +3109,150 @@
;;
m68*-cisco)
os=-aout
---- a/src/gcc/ada/ChangeLog
-+++ b/src/gcc/ada/ChangeLog
-@@ -1,3 +1,8 @@
-+2013-05-26 Eric Botcazou <ebotcazou@adacore.com>
-+
-+ * gcc-interface/trans.c (Attribute_to_gnu) <Attr_Last_Bit>: Add kludge
-+ to avoid generating an overflow for -1.
-+
- 2013-04-11 Release Manager
-
- * GCC 4.7.3 released.
---- a/src/gcc/ada/gcc-interface/trans.c
-+++ b/src/gcc/ada/gcc-interface/trans.c
-@@ -1901,14 +1901,19 @@
- gnu_result = bitsize_int (bitpos % BITS_PER_UNIT);
- gnu_result = size_binop (PLUS_EXPR, gnu_result,
- TYPE_SIZE (TREE_TYPE (gnu_prefix)));
-- gnu_result = size_binop (MINUS_EXPR, gnu_result,
-- bitsize_one_node);
-+ /* ??? Avoid a large unsigned result that will overflow when
-+ converted to the signed universal_integer. */
-+ if (integer_zerop (gnu_result))
-+ gnu_result = integer_minus_one_node;
-+ else
-+ gnu_result
-+ = size_binop (MINUS_EXPR, gnu_result, bitsize_one_node);
- break;
-
- case Attr_Bit_Position:
- gnu_result = gnu_field_bitpos;
- break;
-- }
-+ }
-
- /* If this has a PLACEHOLDER_EXPR, qualify it by the object we are
- handling. */
---- a/src/gcc/builtins.c
-+++ b/src/gcc/builtins.c
-@@ -4626,13 +4626,15 @@
- return result;
- }
-
--/* Expand a call to a bswap builtin with argument ARG0. MODE
-- is the mode to expand with. */
-+/* Expand a call to bswap builtin in EXP.
-+ Return NULL_RTX if a normal call should be emitted rather than expanding the
-+ function in-line. If convenient, the result should be placed in TARGET.
-+ SUBTARGET may be used as the target for computing one of EXP's operands. */
-
- static rtx
--expand_builtin_bswap (tree exp, rtx target, rtx subtarget)
-+expand_builtin_bswap (enum machine_mode target_mode, tree exp, rtx target,
-+ rtx subtarget)
- {
-- enum machine_mode mode;
- tree arg;
- rtx op0;
-
-@@ -4640,14 +4642,18 @@
- return NULL_RTX;
-
- arg = CALL_EXPR_ARG (exp, 0);
-- mode = TYPE_MODE (TREE_TYPE (arg));
-- op0 = expand_expr (arg, subtarget, VOIDmode, EXPAND_NORMAL);
-+ op0 = expand_expr (arg,
-+ subtarget && GET_MODE (subtarget) == target_mode
-+ ? subtarget : NULL_RTX,
-+ target_mode, EXPAND_NORMAL);
-+ if (GET_MODE (op0) != target_mode)
-+ op0 = convert_to_mode (target_mode, op0, 1);
-
-- target = expand_unop (mode, bswap_optab, op0, target, 1);
-+ target = expand_unop (target_mode, bswap_optab, op0, target, 1);
-
- gcc_assert (target);
-
-- return convert_to_mode (mode, target, 0);
-+ return convert_to_mode (target_mode, target, 1);
- }
-
- /* Expand a call to a unary builtin in EXP.
-@@ -6084,10 +6090,10 @@
- expand_stack_restore (CALL_EXPR_ARG (exp, 0));
- return const0_rtx;
-
-+ case BUILT_IN_BSWAP16:
- case BUILT_IN_BSWAP32:
- case BUILT_IN_BSWAP64:
-- target = expand_builtin_bswap (exp, target, subtarget);
--
-+ target = expand_builtin_bswap (target_mode, exp, target, subtarget);
- if (target)
- return target;
- break;
-@@ -8176,7 +8182,7 @@
- return NULL_TREE;
- }
-
--/* Fold function call to builtin_bswap and the long and long long
-+/* Fold function call to builtin_bswap and the short, long and long long
- variants. Return NULL_TREE if no simplification can be made. */
- static tree
- fold_builtin_bswap (tree fndecl, tree arg)
-@@ -8189,15 +8195,15 @@
- {
- HOST_WIDE_INT hi, width, r_hi = 0;
- unsigned HOST_WIDE_INT lo, r_lo = 0;
-- tree type;
-+ tree type = TREE_TYPE (TREE_TYPE (fndecl));
-
-- type = TREE_TYPE (arg);
- width = TYPE_PRECISION (type);
- lo = TREE_INT_CST_LOW (arg);
- hi = TREE_INT_CST_HIGH (arg);
-
- switch (DECL_FUNCTION_CODE (fndecl))
- {
-+ case BUILT_IN_BSWAP16:
- case BUILT_IN_BSWAP32:
- case BUILT_IN_BSWAP64:
- {
-@@ -8227,9 +8233,9 @@
- }
-
- if (width < HOST_BITS_PER_WIDE_INT)
-- return build_int_cst (TREE_TYPE (TREE_TYPE (fndecl)), r_lo);
-+ return build_int_cst (type, r_lo);
- else
-- return build_int_cst_wide (TREE_TYPE (TREE_TYPE (fndecl)), r_lo, r_hi);
-+ return build_int_cst_wide (type, r_lo, r_hi);
- }
-
- return NULL_TREE;
-@@ -9692,7 +9698,16 @@
- case rvc_inf:
- /* If arg is Inf or NaN and we're logb, return it. */
- if (TREE_CODE (rettype) == REAL_TYPE)
-- return fold_convert_loc (loc, rettype, arg);
-+ {
-+ /* For logb(-Inf) we have to return +Inf. */
-+ if (real_isinf (value) && real_isneg (value))
-+ {
-+ REAL_VALUE_TYPE tem;
-+ real_inf (&tem);
-+ return build_real (rettype, tem);
-+ }
-+ return fold_convert_loc (loc, rettype, arg);
-+ }
- /* Fall through... */
- case rvc_zero:
- /* Zero may set errno and/or raise an exception for logb, also
-@@ -10582,6 +10597,7 @@
- CASE_FLT_FN (BUILT_IN_LLRINT):
- return fold_fixed_mathfn (loc, fndecl, arg0);
-
-+ case BUILT_IN_BSWAP16:
- case BUILT_IN_BSWAP32:
- case BUILT_IN_BSWAP64:
- return fold_builtin_bswap (fndecl, arg0);
-@@ -14346,6 +14362,7 @@
- case BUILT_IN_ABS:
- case BUILT_IN_ALLOCA:
- case BUILT_IN_ALLOCA_WITH_ALIGN:
-+ case BUILT_IN_BSWAP16:
- case BUILT_IN_BSWAP32:
- case BUILT_IN_BSWAP64:
- case BUILT_IN_CLZ:
---- a/src/gcc/builtins.def
-+++ b/src/gcc/builtins.def
-@@ -628,6 +628,7 @@
- DEF_EXT_LIB_BUILTIN (BUILT_IN_ALLOCA, "alloca", BT_FN_PTR_SIZE, ATTR_MALLOC_NOTHROW_LEAF_LIST)
- DEF_GCC_BUILTIN (BUILT_IN_APPLY, "apply", BT_FN_PTR_PTR_FN_VOID_VAR_PTR_SIZE, ATTR_NULL)
- DEF_GCC_BUILTIN (BUILT_IN_APPLY_ARGS, "apply_args", BT_FN_PTR_VAR, ATTR_LEAF_LIST)
-+DEF_GCC_BUILTIN (BUILT_IN_BSWAP16, "bswap16", BT_FN_UINT16_UINT16, ATTR_CONST_NOTHROW_LEAF_LIST)
- DEF_GCC_BUILTIN (BUILT_IN_BSWAP32, "bswap32", BT_FN_UINT32_UINT32, ATTR_CONST_NOTHROW_LEAF_LIST)
- DEF_GCC_BUILTIN (BUILT_IN_BSWAP64, "bswap64", BT_FN_UINT64_UINT64, ATTR_CONST_NOTHROW_LEAF_LIST)
- DEF_EXT_LIB_BUILTIN (BUILT_IN_CLEAR_CACHE, "__clear_cache", BT_FN_VOID_PTR_PTR, ATTR_NOTHROW_LEAF_LIST)
---- a/src/gcc/builtin-types.def
-+++ b/src/gcc/builtin-types.def
-@@ -76,6 +76,7 @@
- DEF_PRIMITIVE_TYPE (BT_UINT128, int128_unsigned_type_node)
- DEF_PRIMITIVE_TYPE (BT_INTMAX, intmax_type_node)
- DEF_PRIMITIVE_TYPE (BT_UINTMAX, uintmax_type_node)
-+DEF_PRIMITIVE_TYPE (BT_UINT16, uint16_type_node)
- DEF_PRIMITIVE_TYPE (BT_UINT32, uint32_type_node)
- DEF_PRIMITIVE_TYPE (BT_UINT64, uint64_type_node)
- DEF_PRIMITIVE_TYPE (BT_WORD, (*lang_hooks.types.type_for_mode) (word_mode, 1))
-@@ -226,6 +227,7 @@
- DEF_FUNCTION_TYPE_1 (BT_FN_UINT_UINT, BT_UINT, BT_UINT)
- DEF_FUNCTION_TYPE_1 (BT_FN_ULONG_ULONG, BT_ULONG, BT_ULONG)
- DEF_FUNCTION_TYPE_1 (BT_FN_ULONGLONG_ULONGLONG, BT_ULONGLONG, BT_ULONGLONG)
-+DEF_FUNCTION_TYPE_1 (BT_FN_UINT16_UINT16, BT_UINT16, BT_UINT16)
- DEF_FUNCTION_TYPE_1 (BT_FN_UINT32_UINT32, BT_UINT32, BT_UINT32)
- DEF_FUNCTION_TYPE_1 (BT_FN_UINT64_UINT64, BT_UINT64, BT_UINT64)
-
---- a/src/gcc/c-family/c-common.c
-+++ b/src/gcc/c-family/c-common.c
-@@ -4992,7 +4992,7 @@
- uint8_type_node =
- TREE_TYPE (identifier_global_value (c_get_ident (UINT8_TYPE)));
- if (UINT16_TYPE)
-- uint16_type_node =
-+ c_uint16_type_node =
- TREE_TYPE (identifier_global_value (c_get_ident (UINT16_TYPE)));
- if (UINT32_TYPE)
- c_uint32_type_node =
---- a/src/gcc/c-family/c-common.h
-+++ b/src/gcc/c-family/c-common.h
-@@ -390,7 +390,7 @@
- #define int32_type_node c_global_trees[CTI_INT32_TYPE]
- #define int64_type_node c_global_trees[CTI_INT64_TYPE]
- #define uint8_type_node c_global_trees[CTI_UINT8_TYPE]
--#define uint16_type_node c_global_trees[CTI_UINT16_TYPE]
-+#define c_uint16_type_node c_global_trees[CTI_UINT16_TYPE]
- #define c_uint32_type_node c_global_trees[CTI_UINT32_TYPE]
- #define c_uint64_type_node c_global_trees[CTI_UINT64_TYPE]
- #define int_least8_type_node c_global_trees[CTI_INT_LEAST8_TYPE]
---- a/src/gcc/c-family/c-cppbuiltin.c
-+++ b/src/gcc/c-family/c-cppbuiltin.c
-@@ -448,8 +448,8 @@
- builtin_define_type_max ("__INT64_MAX__", int64_type_node);
- if (uint8_type_node)
- builtin_define_type_max ("__UINT8_MAX__", uint8_type_node);
-- if (uint16_type_node)
-- builtin_define_type_max ("__UINT16_MAX__", uint16_type_node);
-+ if (c_uint16_type_node)
-+ builtin_define_type_max ("__UINT16_MAX__", c_uint16_type_node);
- if (c_uint32_type_node)
- builtin_define_type_max ("__UINT32_MAX__", c_uint32_type_node);
- if (c_uint64_type_node)
---- a/src/gcc/cfgexpand.c
-+++ b/src/gcc/cfgexpand.c
-@@ -3646,6 +3646,8 @@
- avoid_complex_debug_insns (rtx insn, rtx *exp_p, int depth)
- {
- rtx exp = *exp_p;
-+ const char *format_ptr;
-+ int i, j;
-
- if (exp == NULL_RTX)
- return;
-@@ -3668,8 +3670,7 @@
- return;
- }
-
-- const char *format_ptr = GET_RTX_FORMAT (GET_CODE (exp));
-- int i, j;
-+ format_ptr = GET_RTX_FORMAT (GET_CODE (exp));
- for (i = 0; i < GET_RTX_LENGTH (GET_CODE (exp)); i++)
- switch (*format_ptr++)
- {
--- a/src/gcc/ChangeLog
+++ b/src/gcc/ChangeLog
-@@ -1,3 +1,315 @@
+@@ -1,3 +1,461 @@
++2013-10-02 John David Anglin <danglin@gcc.gnu.org>
++
++ * config.gcc (hppa*64*-*-linux*): Don't add pa/t-linux to tmake_file.
++
++2013-09-23 Eric Botcazou <ebotcazou@adacore.com>
++
++ * tree-ssa-ccp.c (insert_clobber_before_stack_restore): Recurse on copy
++ assignment statements.
++
++2013-09-20 John David Anglin <danglin@gcc.gnu.org>
++
++ * config/pa/pa.md: In "scc" insn patterns, change output template to
++ handle const0_rtx in reg_or_0_operand operands.
++
++2013-09-18 Daniel Morris <danielm@ecoscentric.com>
++ Paolo Carlini <paolo.carlini@oracle.com>
++
++ PR c++/58458
++ * doc/implement-cxx.texi: Fix references to the C++ standards.
++
++2013-09-14 John David Anglin <danglin@gcc.gnu.org>
++
++ PR target/58382
++ * config/pa/pa.c (pa_expand_prologue): Change mode in gen_rtx_POST_INC
++ calls to word_mode.
++
++2013-09-12 Terry Guo <terry.guo@arm.com>
++
++ Backport from mainline
++ 2012-09-17 Richard Guenther <rguenther@suse.de>
++
++ PR lto/54598
++ * tree-streamer-in.c (unpack_ts_real_cst_value_fields): Use ggc'ed
++ FIXED_VALUE_TYPE instead of struct fixed_value.
++
++2013-09-10 Richard Earnshaw <rearnsha@arm.com>
++
++ PR target/58361
++ * arm/vfp.md (combine_vcvt_f32_<FCVTI32typename>): Fix pattern to
++ support conditional execution.
++ (combine_vcvt_f64_<FCVTI32typename>): Likewise.
++
++2013-09-01 Uros Bizjak <ubizjak@gmail.com>
++
++ Backport from mainline
++ 2013-08-31 Uros Bizjak <ubizjak@gmail.com>
++
++ * config/alpha/alpha.c (alpha_emit_conditional_move): Update
++ "cmp" RTX before signed_comparison_operator check to account
++ for "code" changes.
++
++2013-09-01 John David Anglin <danglin@gcc.gnu.org>
++
++ * config/pa/pa.md: Allow "const 0" operand 1 in "scc" insns.
++
++2013-09-01 Iain Sandoe <iain@codesourcery.com>
++
++ Backported from 4.8
++ 2012-06-19 Steven Bosscher <steven@gcc.gnu.org>
++
++ * doc/tm.texi.in (TARGET_OBJC_DECLARE_UNRESOLVED_CLASS_REFERENCE,
++ TARGET_OBJC_DECLARE_CLASS_DEFINITION): Add @hooks.
++ (ASM_DECLARE_CLASS_REFERENCE, ASM_DECLARE_UNRESOLVED_REFERENCE):
++ Remove.
++ * doc/tm.texi: Regenerate.
++ * config/darwin.h (ASM_OUTPUT_LABELREF): Remove special case for
++ .objc_class_name_*.
++ * config/darwin-c.c: Include target.h.
++ (darwin_objc_declare_unresolved_class_reference): New function.
++ (darwin_objc_declare_class_definition): New function.
++ (TARGET_OBJC_DECLARE_UNRESOLVED_CLASS_REFERENCE): Define.
++ (TARGET_OBJC_DECLARE_CLASS_DEFINITION): Define.
++
++2013-09-01 Iain Sandoe <iain@codesourcery.com>
++
++ Backport from mainline:
++ 2013-07-22 Uros Bizjak <ubizjak@gmail.com>
++
++ * config/i386/i386.md (nonlocal_goto_receiver): Delete insn if
++ it is not needed after split.
++
++ 2013-07-20 Iain Sandoe <iain@codesourcery.com>
++
++ PR target/51784
++ * config/i386/i386.c (output_set_got) [TARGET_MACHO]: Adjust to emit a
++ second label for nonlocal goto receivers. Don't output pic base labels
++ unless we're producing PIC; mark that action unreachable().
++ (ix86_save_reg): If the function contains a nonlocal label, save the
++ PIC base reg.
++ * config/darwin-protos.h (machopic_should_output_picbase_label): New.
++ * gcc/config/darwin.c (emitted_pic_label_num): New GTY.
++ (update_pic_label_number_if_needed): New.
++ (machopic_output_function_base_name): Adjust for nonlocal receiver
++ case.
++ (machopic_should_output_picbase_label): New.
++ * config/i386/i386.md (enum unspecv): UNSPECV_NLGR: New.
++ (nonlocal_goto_receiver): New insn and split.
++
++2013-08-28 Uros Bizjak <ubizjak@gmail.com>
++
++ Backport from mainline
++ 2013-08-27 Christian Widmer <shadow@umbrox.de>
++
++ PR target/57927
++ * config/i386/driver-i386.c (host_detect_local_cpu): Add detection
++ of Ivy Bridge processors.
++
++2013-08-21 Richard Earnshaw <rearnsha@arm.com>
++
++ PR target/56979
++ * arm.c (aapcs_vfp_allocate): Decompose the argument if the
++ suggested mode for the assignment isn't compatible with the
++ registers required.
++
++2013-08-17 Uros Bizjak <ubizjak@gmail.com>
++
++ Backport from mainline
++ 2013-08-12 Perez Read <netfirewall@gmail.com>
++
++ PR target/58132
++ * config/i386/i386.md (*movabs<mode>_1): Add <ptrsize> PTR before
++ operand 0 for intel asm alternative.
++ (*movabs<mode>_2): Ditto for operand 1.
++
++2013-08-13 Marek Polacek <polacek@redhat.com>
++
++ Backport from 4.8:
++ 2013-0813 Marek Polacek <polacek@redhat.com>
++ Jakub Jelinek <jakub@redhat.com>
++
++ PR tree-optimization/57980
++ * tree-tailcall.c (process_assignment): Return false
++ when not dealing with integers or floats.
++
++2013-08-12 David Edelsohn <dje.gcc@gmail.com>
++
++ Backport from mainline
++ 2013-02-14 Steven Bosscher <steven@gcc.gnu.org>
++
++ * collect2-aix.h: Define F_LOADONLY.
++
+2013-08-02 Eric Botcazou <ebotcazou@adacore.com>
+
+ * config/sparc/sparc.c (sparc_emit_membar_for_model) <SMM_TSO>: Add
@@ -3496,6 +3565,11 @@
+ * config/pa/pa.c (legitimize_pic_address): Before incrementing label
+ nuses, make sure we have a label.
+
++2013-04-11 Richard Biener <rguenther@suse.de>
++
++ * BASE-VER: Set to 4.7.4.
++ * DEV-PHASE: Set to prerelease.
++
2013-04-11 Release Manager
* GCC 4.7.3 released.
@@ -4597,6 +4671,424 @@
+ (read_rtx): Parse and read int iterators mapping and attributes.
+ Initialize int iterators group's hash-table. Memory management.
+ (read_rtx_code): Handle case for rtl field specifier 'i'.
+--- a/src/gcc/DATESTAMP
++++ b/src/gcc/DATESTAMP
+@@ -1 +1 @@
+-20130411
++20131014
+--- a/src/gcc/LINARO-VERSION
++++ b/src/gcc/LINARO-VERSION
+@@ -0,0 +1 @@
++4.7-2013.10
+--- a/src/gcc/Makefile.in
++++ b/src/gcc/Makefile.in
+@@ -1848,11 +1848,12 @@
+ "$(MULTILIB_EXTRA_OPTS)" \
+ "$(MULTILIB_EXCLUSIONS)" \
+ "$(MULTILIB_OSDIRNAMES)" \
++ "$(MULTILIB_REQUIRED)" \
+ "$(MULTIARCH_DIRNAME)" \
+ "@enable_multilib@" \
+ > tmp-mlib.h; \
+ else \
+- $(SHELL) $(srcdir)/genmultilib '' '' '' '' '' '' '' "$(MULTIARCH_DIRNAME)" no \
++ $(SHELL) $(srcdir)/genmultilib '' '' '' '' '' '' '' '' "$(MULTIARCH_DIRNAME)" no \
+ > tmp-mlib.h; \
+ fi
+ $(SHELL) $(srcdir)/../move-if-change tmp-mlib.h multilib.h
+@@ -2570,7 +2571,7 @@
+ $(TM_H) coretypes.h $(TREE_DUMP_H) $(TREE_PASS_H) $(FLAGS_H) \
+ tree-iterator.h $(BASIC_BLOCK_H) $(GIMPLE_H) $(TREE_INLINE_H) \
+ $(VEC_H) langhooks.h alloc-pool.h pointer-set.h $(CFGLOOP_H) \
+- tree-pretty-print.h gimple-pretty-print.h $(DIAGNOSTIC_CORE_H)
++ tree-pretty-print.h gimple-pretty-print.h $(DIAGNOSTIC_CORE_H) $(PARAMS_H)
+ tree-optimize.o : tree-optimize.c $(TREE_FLOW_H) $(CONFIG_H) $(SYSTEM_H) \
+ $(TREE_H) $(TM_P_H) $(GGC_H) output.h \
+ $(DIAGNOSTIC_H) $(BASIC_BLOCK_H) $(FLAGS_H) $(TIMEVAR_H) $(TM_H) \
+@@ -3904,7 +3905,7 @@
+ $(SYSTEM_H) coretypes.h $(GTM_H) errors.h $(READ_MD_H) gensupport.h
+ build/gengenrtl.o : gengenrtl.c $(BCONFIG_H) $(SYSTEM_H) rtl.def
+ gengtype-lex.o build/gengtype-lex.o : gengtype-lex.c gengtype.h $(SYSTEM_H)
+-gengtype-lex.o: $(CONFIG_H)
++gengtype-lex.o: $(CONFIG_H) $(BCONFIG_H)
+ build/gengtype-lex.o: $(BCONFIG_H)
+ gengtype-parse.o build/gengtype-parse.o : gengtype-parse.c gengtype.h \
+ $(SYSTEM_H)
+--- a/src/gcc/ada/ChangeLog
++++ b/src/gcc/ada/ChangeLog
+@@ -1,3 +1,18 @@
++2013-09-18 Eric Botcazou <ebotcazou@adacore.com>
++
++ * gcc-interface/trans.c (Subprogram_Body_to_gnu): Pop the stack of
++ return variables for subprograms using the CICO mechanism.
++
++2013-08-13 Eric Botcazou <ebotcazou@adacore.com>
++
++ * gcc-interface/trans.c (can_equal_min_or_max_val_p): Be prepared for
++ values outside of the range of the type.
++
++2013-05-26 Eric Botcazou <ebotcazou@adacore.com>
++
++ * gcc-interface/trans.c (Attribute_to_gnu) <Attr_Last_Bit>: Add kludge
++ to avoid generating an overflow for -1.
++
+ 2013-04-11 Release Manager
+
+ * GCC 4.7.3 released.
+--- a/src/gcc/ada/gcc-interface/trans.c
++++ b/src/gcc/ada/gcc-interface/trans.c
+@@ -1901,14 +1901,19 @@
+ gnu_result = bitsize_int (bitpos % BITS_PER_UNIT);
+ gnu_result = size_binop (PLUS_EXPR, gnu_result,
+ TYPE_SIZE (TREE_TYPE (gnu_prefix)));
+- gnu_result = size_binop (MINUS_EXPR, gnu_result,
+- bitsize_one_node);
++ /* ??? Avoid a large unsigned result that will overflow when
++ converted to the signed universal_integer. */
++ if (integer_zerop (gnu_result))
++ gnu_result = integer_minus_one_node;
++ else
++ gnu_result
++ = size_binop (MINUS_EXPR, gnu_result, bitsize_one_node);
+ break;
+
+ case Attr_Bit_Position:
+ gnu_result = gnu_field_bitpos;
+ break;
+- }
++ }
+
+ /* If this has a PLACEHOLDER_EXPR, qualify it by the object we are
+ handling. */
+@@ -2227,7 +2232,10 @@
+ if (TREE_CODE (val) != INTEGER_CST)
+ return true;
+
+- return tree_int_cst_equal (val, min_or_max_val) == 1;
++ if (max)
++ return tree_int_cst_lt (val, min_or_max_val) == 0;
++ else
++ return tree_int_cst_lt (min_or_max_val, val) == 0;
+ }
+
+ /* Return true if VAL (of type TYPE) can equal the minimum value of TYPE.
+@@ -3430,6 +3438,8 @@
+ {
+ tree gnu_retval;
+
++ VEC_pop (tree, gnu_return_var_stack);
++
+ add_stmt (gnu_result);
+ add_stmt (build1 (LABEL_EXPR, void_type_node,
+ VEC_last (tree, gnu_return_label_stack)));
+--- a/src/gcc/builtin-types.def
++++ b/src/gcc/builtin-types.def
+@@ -76,6 +76,7 @@
+ DEF_PRIMITIVE_TYPE (BT_UINT128, int128_unsigned_type_node)
+ DEF_PRIMITIVE_TYPE (BT_INTMAX, intmax_type_node)
+ DEF_PRIMITIVE_TYPE (BT_UINTMAX, uintmax_type_node)
++DEF_PRIMITIVE_TYPE (BT_UINT16, uint16_type_node)
+ DEF_PRIMITIVE_TYPE (BT_UINT32, uint32_type_node)
+ DEF_PRIMITIVE_TYPE (BT_UINT64, uint64_type_node)
+ DEF_PRIMITIVE_TYPE (BT_WORD, (*lang_hooks.types.type_for_mode) (word_mode, 1))
+@@ -226,6 +227,7 @@
+ DEF_FUNCTION_TYPE_1 (BT_FN_UINT_UINT, BT_UINT, BT_UINT)
+ DEF_FUNCTION_TYPE_1 (BT_FN_ULONG_ULONG, BT_ULONG, BT_ULONG)
+ DEF_FUNCTION_TYPE_1 (BT_FN_ULONGLONG_ULONGLONG, BT_ULONGLONG, BT_ULONGLONG)
++DEF_FUNCTION_TYPE_1 (BT_FN_UINT16_UINT16, BT_UINT16, BT_UINT16)
+ DEF_FUNCTION_TYPE_1 (BT_FN_UINT32_UINT32, BT_UINT32, BT_UINT32)
+ DEF_FUNCTION_TYPE_1 (BT_FN_UINT64_UINT64, BT_UINT64, BT_UINT64)
+
+--- a/src/gcc/builtins.c
++++ b/src/gcc/builtins.c
+@@ -4626,13 +4626,15 @@
+ return result;
+ }
+
+-/* Expand a call to a bswap builtin with argument ARG0. MODE
+- is the mode to expand with. */
++/* Expand a call to bswap builtin in EXP.
++ Return NULL_RTX if a normal call should be emitted rather than expanding the
++ function in-line. If convenient, the result should be placed in TARGET.
++ SUBTARGET may be used as the target for computing one of EXP's operands. */
+
+ static rtx
+-expand_builtin_bswap (tree exp, rtx target, rtx subtarget)
++expand_builtin_bswap (enum machine_mode target_mode, tree exp, rtx target,
++ rtx subtarget)
+ {
+- enum machine_mode mode;
+ tree arg;
+ rtx op0;
+
+@@ -4640,14 +4642,18 @@
+ return NULL_RTX;
+
+ arg = CALL_EXPR_ARG (exp, 0);
+- mode = TYPE_MODE (TREE_TYPE (arg));
+- op0 = expand_expr (arg, subtarget, VOIDmode, EXPAND_NORMAL);
++ op0 = expand_expr (arg,
++ subtarget && GET_MODE (subtarget) == target_mode
++ ? subtarget : NULL_RTX,
++ target_mode, EXPAND_NORMAL);
++ if (GET_MODE (op0) != target_mode)
++ op0 = convert_to_mode (target_mode, op0, 1);
+
+- target = expand_unop (mode, bswap_optab, op0, target, 1);
++ target = expand_unop (target_mode, bswap_optab, op0, target, 1);
+
+ gcc_assert (target);
+
+- return convert_to_mode (mode, target, 0);
++ return convert_to_mode (target_mode, target, 1);
+ }
+
+ /* Expand a call to a unary builtin in EXP.
+@@ -6084,10 +6090,10 @@
+ expand_stack_restore (CALL_EXPR_ARG (exp, 0));
+ return const0_rtx;
+
++ case BUILT_IN_BSWAP16:
+ case BUILT_IN_BSWAP32:
+ case BUILT_IN_BSWAP64:
+- target = expand_builtin_bswap (exp, target, subtarget);
+-
++ target = expand_builtin_bswap (target_mode, exp, target, subtarget);
+ if (target)
+ return target;
+ break;
+@@ -8176,7 +8182,7 @@
+ return NULL_TREE;
+ }
+
+-/* Fold function call to builtin_bswap and the long and long long
++/* Fold function call to builtin_bswap and the short, long and long long
+ variants. Return NULL_TREE if no simplification can be made. */
+ static tree
+ fold_builtin_bswap (tree fndecl, tree arg)
+@@ -8189,15 +8195,15 @@
+ {
+ HOST_WIDE_INT hi, width, r_hi = 0;
+ unsigned HOST_WIDE_INT lo, r_lo = 0;
+- tree type;
++ tree type = TREE_TYPE (TREE_TYPE (fndecl));
+
+- type = TREE_TYPE (arg);
+ width = TYPE_PRECISION (type);
+ lo = TREE_INT_CST_LOW (arg);
+ hi = TREE_INT_CST_HIGH (arg);
+
+ switch (DECL_FUNCTION_CODE (fndecl))
+ {
++ case BUILT_IN_BSWAP16:
+ case BUILT_IN_BSWAP32:
+ case BUILT_IN_BSWAP64:
+ {
+@@ -8227,9 +8233,9 @@
+ }
+
+ if (width < HOST_BITS_PER_WIDE_INT)
+- return build_int_cst (TREE_TYPE (TREE_TYPE (fndecl)), r_lo);
++ return build_int_cst (type, r_lo);
+ else
+- return build_int_cst_wide (TREE_TYPE (TREE_TYPE (fndecl)), r_lo, r_hi);
++ return build_int_cst_wide (type, r_lo, r_hi);
+ }
+
+ return NULL_TREE;
+@@ -9692,7 +9698,16 @@
+ case rvc_inf:
+ /* If arg is Inf or NaN and we're logb, return it. */
+ if (TREE_CODE (rettype) == REAL_TYPE)
+- return fold_convert_loc (loc, rettype, arg);
++ {
++ /* For logb(-Inf) we have to return +Inf. */
++ if (real_isinf (value) && real_isneg (value))
++ {
++ REAL_VALUE_TYPE tem;
++ real_inf (&tem);
++ return build_real (rettype, tem);
++ }
++ return fold_convert_loc (loc, rettype, arg);
++ }
+ /* Fall through... */
+ case rvc_zero:
+ /* Zero may set errno and/or raise an exception for logb, also
+@@ -10582,6 +10597,7 @@
+ CASE_FLT_FN (BUILT_IN_LLRINT):
+ return fold_fixed_mathfn (loc, fndecl, arg0);
+
++ case BUILT_IN_BSWAP16:
+ case BUILT_IN_BSWAP32:
+ case BUILT_IN_BSWAP64:
+ return fold_builtin_bswap (fndecl, arg0);
+@@ -14346,6 +14362,7 @@
+ case BUILT_IN_ABS:
+ case BUILT_IN_ALLOCA:
+ case BUILT_IN_ALLOCA_WITH_ALIGN:
++ case BUILT_IN_BSWAP16:
+ case BUILT_IN_BSWAP32:
+ case BUILT_IN_BSWAP64:
+ case BUILT_IN_CLZ:
+--- a/src/gcc/builtins.def
++++ b/src/gcc/builtins.def
+@@ -628,6 +628,7 @@
+ DEF_EXT_LIB_BUILTIN (BUILT_IN_ALLOCA, "alloca", BT_FN_PTR_SIZE, ATTR_MALLOC_NOTHROW_LEAF_LIST)
+ DEF_GCC_BUILTIN (BUILT_IN_APPLY, "apply", BT_FN_PTR_PTR_FN_VOID_VAR_PTR_SIZE, ATTR_NULL)
+ DEF_GCC_BUILTIN (BUILT_IN_APPLY_ARGS, "apply_args", BT_FN_PTR_VAR, ATTR_LEAF_LIST)
++DEF_GCC_BUILTIN (BUILT_IN_BSWAP16, "bswap16", BT_FN_UINT16_UINT16, ATTR_CONST_NOTHROW_LEAF_LIST)
+ DEF_GCC_BUILTIN (BUILT_IN_BSWAP32, "bswap32", BT_FN_UINT32_UINT32, ATTR_CONST_NOTHROW_LEAF_LIST)
+ DEF_GCC_BUILTIN (BUILT_IN_BSWAP64, "bswap64", BT_FN_UINT64_UINT64, ATTR_CONST_NOTHROW_LEAF_LIST)
+ DEF_EXT_LIB_BUILTIN (BUILT_IN_CLEAR_CACHE, "__clear_cache", BT_FN_VOID_PTR_PTR, ATTR_NOTHROW_LEAF_LIST)
+--- a/src/gcc/c-family/ChangeLog
++++ b/src/gcc/c-family/ChangeLog
+@@ -1,3 +1,11 @@
++2013-09-01 Iain Sandoe <iain@codesourcery.com>
++
++ Backported from 4.8
++ 2012-06-19 Steven Bosscher <steven@gcc.gnu.org>
++
++ * c-target.def (objc_declare_unresolved_class_reference,
++ objc_declare_class_definition): Add new hooks.
++
+ 2013-04-11 Release Manager
+
+ * GCC 4.7.3 released.
+--- a/src/gcc/c-family/c-common.c
++++ b/src/gcc/c-family/c-common.c
+@@ -4992,7 +4992,7 @@
+ uint8_type_node =
+ TREE_TYPE (identifier_global_value (c_get_ident (UINT8_TYPE)));
+ if (UINT16_TYPE)
+- uint16_type_node =
++ c_uint16_type_node =
+ TREE_TYPE (identifier_global_value (c_get_ident (UINT16_TYPE)));
+ if (UINT32_TYPE)
+ c_uint32_type_node =
+--- a/src/gcc/c-family/c-common.h
++++ b/src/gcc/c-family/c-common.h
+@@ -390,7 +390,7 @@
+ #define int32_type_node c_global_trees[CTI_INT32_TYPE]
+ #define int64_type_node c_global_trees[CTI_INT64_TYPE]
+ #define uint8_type_node c_global_trees[CTI_UINT8_TYPE]
+-#define uint16_type_node c_global_trees[CTI_UINT16_TYPE]
++#define c_uint16_type_node c_global_trees[CTI_UINT16_TYPE]
+ #define c_uint32_type_node c_global_trees[CTI_UINT32_TYPE]
+ #define c_uint64_type_node c_global_trees[CTI_UINT64_TYPE]
+ #define int_least8_type_node c_global_trees[CTI_INT_LEAST8_TYPE]
+--- a/src/gcc/c-family/c-cppbuiltin.c
++++ b/src/gcc/c-family/c-cppbuiltin.c
+@@ -448,8 +448,8 @@
+ builtin_define_type_max ("__INT64_MAX__", int64_type_node);
+ if (uint8_type_node)
+ builtin_define_type_max ("__UINT8_MAX__", uint8_type_node);
+- if (uint16_type_node)
+- builtin_define_type_max ("__UINT16_MAX__", uint16_type_node);
++ if (c_uint16_type_node)
++ builtin_define_type_max ("__UINT16_MAX__", c_uint16_type_node);
+ if (c_uint32_type_node)
+ builtin_define_type_max ("__UINT32_MAX__", c_uint32_type_node);
+ if (c_uint64_type_node)
+--- a/src/gcc/c-family/c-target.def
++++ b/src/gcc/c-family/c-target.def
+@@ -59,7 +59,21 @@
+ common-format string object when the target provides one.",
+ tree, (tree string),
+ NULL)
+-
++
++DEFHOOK
++(objc_declare_unresolved_class_reference,
++ "Declare that Objective C class @var{classname} is referenced\
++ by the current TU.",
++ void, (const char *classname),
++ NULL)
++
++DEFHOOK
++(objc_declare_class_definition,
++ "Declare that Objective C class @var{classname} is defined\
++ by the current TU.",
++ void, (const char *classname),
++ NULL)
++
+ DEFHOOK
+ (string_object_ref_type_p,
+ "If a target implements string objects then this hook should return\
+--- a/src/gcc/cfgexpand.c
++++ b/src/gcc/cfgexpand.c
+@@ -3646,6 +3646,8 @@
+ avoid_complex_debug_insns (rtx insn, rtx *exp_p, int depth)
+ {
+ rtx exp = *exp_p;
++ const char *format_ptr;
++ int i, j;
+
+ if (exp == NULL_RTX)
+ return;
+@@ -3668,8 +3670,7 @@
+ return;
+ }
+
+- const char *format_ptr = GET_RTX_FORMAT (GET_CODE (exp));
+- int i, j;
++ format_ptr = GET_RTX_FORMAT (GET_CODE (exp));
+ for (i = 0; i < GET_RTX_LENGTH (GET_CODE (exp)); i++)
+ switch (*format_ptr++)
+ {
+--- a/src/gcc/collect2-aix.h
++++ b/src/gcc/collect2-aix.h
+@@ -1,5 +1,5 @@
+ /* AIX cross support for collect2.
+- Copyright (C) 2009 Free Software Foundation, Inc.
++ Copyright (C) 2009-2013 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+@@ -29,7 +29,7 @@
+ Definitions adapted from bfd. (Fairly heavily adapted in some cases.)
+ ------------------------------------------------------------------------- */
+
+-/* Compatiblity types for bfd. */
++/* Compatibility types for bfd. */
+ typedef unsigned HOST_WIDE_INT bfd_vma;
+
+ /* The size of an archive's fl_magic field. */
+@@ -135,7 +135,7 @@
+ /* The number of entries in the symbol table. */
+ char f_nsyms[4];
+
+- /* The size of the auxillary header. */
++ /* The size of the auxiliary header. */
+ char f_opthdr[2];
+
+ /* Flags. */
+@@ -157,7 +157,7 @@
+ /* The offset of the symbol table from the start of the file. */
+ char f_symptr[8];
+
+- /* The size of the auxillary header. */
++ /* The size of the auxiliary header. */
+ char f_opthdr[2];
+
+ /* Flags. */
+@@ -222,14 +222,15 @@
+ /* The class of symbol (a C_* value). */
+ char n_sclass[1];
+
+- /* The number of auxillary symbols attached to this entry. */
++ /* The number of auxiliary symbols attached to this entry. */
+ char n_numaux[1];
+ };
+
+ /* Definitions required by collect2. */
+ #define C_EXT 2
+
+-#define F_SHROBJ 0x2000
++#define F_SHROBJ 0x2000
++#define F_LOADONLY 0x4000
+
+ #define N_UNDEF ((short) 0)
+ #define N_TMASK 060
--- a/src/gcc/collect2.c
+++ b/src/gcc/collect2.c
@@ -1,7 +1,7 @@
@@ -6227,6 +6719,4714 @@
+}
+#undef AARCH64_CHECK_BUILTIN_MODE
+#undef AARCH64_FIND_FRINT_VARIANT
+--- a/src/gcc/config/aarch64/aarch64-cores.def
++++ b/src/gcc/config/aarch64/aarch64-cores.def
+@@ -0,0 +1,38 @@
++/* Copyright (C) 2011, 2012 Free Software Foundation, Inc.
++ Contributed by ARM Ltd.
++
++ This file is part of GCC.
++
++ GCC is free software; you can redistribute it and/or modify it
++ under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 3, or (at your option)
++ any later version.
++
++ GCC is distributed in the hope that it will be useful, but
++ WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with GCC; see the file COPYING3. If not see
++ <http://www.gnu.org/licenses/>. */
++
++/* This is a list of cores that implement AArch64.
++
++ Before using #include to read this file, define a macro:
++
++ AARCH64_CORE(CORE_NAME, CORE_IDENT, ARCH, FLAGS, COSTS)
++
++ The CORE_NAME is the name of the core, represented as a string constant.
++ The CORE_IDENT is the name of the core, represented as an identifier.
++ ARCH is the architecture revision implemented by the chip.
++ FLAGS are the bitwise-or of the traits that apply to that core.
++ This need not include flags implied by the architecture.
++ COSTS is the name of the rtx_costs routine to use. */
++
++/* V8 Architecture Processors.
++ This list currently contains example CPUs that implement AArch64, and
++ therefore serves as a template for adding more CPUs in the future. */
++
++AARCH64_CORE("example-1", large, 8, AARCH64_FL_FPSIMD, generic)
++AARCH64_CORE("example-2", small, 8, AARCH64_FL_FPSIMD, generic)
+--- a/src/gcc/config/aarch64/aarch64-elf-raw.h
++++ b/src/gcc/config/aarch64/aarch64-elf-raw.h
+@@ -0,0 +1,32 @@
++/* Machine description for AArch64 architecture.
++ Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
++ Contributed by ARM Ltd.
++
++ This file is part of GCC.
++
++ GCC is free software; you can redistribute it and/or modify it
++ under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 3, or (at your option)
++ any later version.
++
++ GCC is distributed in the hope that it will be useful, but
++ WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with GCC; see the file COPYING3. If not see
++ <http://www.gnu.org/licenses/>. */
++
++/* Support for bare-metal builds. */
++#ifndef GCC_AARCH64_ELF_RAW_H
++#define GCC_AARCH64_ELF_RAW_H
++
++#define STARTFILE_SPEC " crti%O%s crtbegin%O%s crt0%O%s"
++#define ENDFILE_SPEC " crtend%O%s crtn%O%s"
++
++#ifndef LINK_SPEC
++#define LINK_SPEC "%{mbig-endian:-EB} %{mlittle-endian:-EL} -X"
++#endif
++
++#endif /* GCC_AARCH64_ELF_RAW_H */
+--- a/src/gcc/config/aarch64/aarch64-elf.h
++++ b/src/gcc/config/aarch64/aarch64-elf.h
+@@ -0,0 +1,132 @@
++/* Machine description for AArch64 architecture.
++ Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
++ Contributed by ARM Ltd.
++
++ This file is part of GCC.
++
++ GCC is free software; you can redistribute it and/or modify it
++ under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 3, or (at your option)
++ any later version.
++
++ GCC is distributed in the hope that it will be useful, but
++ WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with GCC; see the file COPYING3. If not see
++ <http://www.gnu.org/licenses/>. */
++
++#ifndef GCC_AARCH64_ELF_H
++#define GCC_AARCH64_ELF_H
++
++
++#define ASM_OUTPUT_LABELREF(FILE, NAME) \
++ aarch64_asm_output_labelref (FILE, NAME)
++
++#define ASM_OUTPUT_DEF(FILE, NAME1, NAME2) \
++ do \
++ { \
++ assemble_name (FILE, NAME1); \
++ fputs (" = ", FILE); \
++ assemble_name (FILE, NAME2); \
++ fputc ('\n', FILE); \
++ } while (0)
++
++#define TEXT_SECTION_ASM_OP "\t.text"
++#define DATA_SECTION_ASM_OP "\t.data"
++#define BSS_SECTION_ASM_OP "\t.bss"
++
++#define CTORS_SECTION_ASM_OP "\t.section\t.init_array,\"aw\",%init_array"
++#define DTORS_SECTION_ASM_OP "\t.section\t.fini_array,\"aw\",%fini_array"
++
++#undef INIT_SECTION_ASM_OP
++#undef FINI_SECTION_ASM_OP
++#define INIT_ARRAY_SECTION_ASM_OP CTORS_SECTION_ASM_OP
++#define FINI_ARRAY_SECTION_ASM_OP DTORS_SECTION_ASM_OP
++
++/* Since we use .init_array/.fini_array we don't need the markers at
++ the start and end of the ctors/dtors arrays. */
++#define CTOR_LIST_BEGIN asm (CTORS_SECTION_ASM_OP)
++#define CTOR_LIST_END /* empty */
++#define DTOR_LIST_BEGIN asm (DTORS_SECTION_ASM_OP)
++#define DTOR_LIST_END /* empty */
++
++#undef TARGET_ASM_CONSTRUCTOR
++#define TARGET_ASM_CONSTRUCTOR aarch64_elf_asm_constructor
++
++#undef TARGET_ASM_DESTRUCTOR
++#define TARGET_ASM_DESTRUCTOR aarch64_elf_asm_destructor
++
++#ifdef HAVE_GAS_MAX_SKIP_P2ALIGN
++/* Support for -falign-* switches. Use .p2align to ensure that code
++ sections are padded with NOP instructions, rather than zeros. */
++#define ASM_OUTPUT_MAX_SKIP_ALIGN(FILE, LOG, MAX_SKIP) \
++ do \
++ { \
++ if ((LOG) != 0) \
++ { \
++ if ((MAX_SKIP) == 0) \
++ fprintf ((FILE), "\t.p2align %d\n", (int) (LOG)); \
++ else \
++ fprintf ((FILE), "\t.p2align %d,,%d\n", \
++ (int) (LOG), (int) (MAX_SKIP)); \
++ } \
++ } while (0)
++
++#endif /* HAVE_GAS_MAX_SKIP_P2ALIGN */
++
++#define JUMP_TABLES_IN_TEXT_SECTION 0
++
++#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL) \
++ do { \
++ switch (GET_MODE (BODY)) \
++ { \
++ case QImode: \
++ asm_fprintf (STREAM, "\t.byte\t(%LL%d - %LLrtx%d) / 4\n", \
++ VALUE, REL); \
++ break; \
++ case HImode: \
++ asm_fprintf (STREAM, "\t.2byte\t(%LL%d - %LLrtx%d) / 4\n", \
++ VALUE, REL); \
++ break; \
++ case SImode: \
++ case DImode: /* See comment in aarch64_output_casesi. */ \
++ asm_fprintf (STREAM, "\t.word\t(%LL%d - %LLrtx%d) / 4\n", \
++ VALUE, REL); \
++ break; \
++ default: \
++ gcc_unreachable (); \
++ } \
++ } while (0)
++
++#define ASM_OUTPUT_ALIGN(STREAM, POWER) \
++ fprintf(STREAM, "\t.align\t%d\n", (int)POWER)
++
++#define ASM_COMMENT_START "//"
++
++#define REGISTER_PREFIX ""
++#define LOCAL_LABEL_PREFIX "."
++#define USER_LABEL_PREFIX ""
++
++#define GLOBAL_ASM_OP "\t.global\t"
++
++#ifndef ASM_SPEC
++#define ASM_SPEC "\
++%{mbig-endian:-EB} \
++%{mlittle-endian:-EL} \
++%{mcpu=*:-mcpu=%*} \
++%{march=*:-march=%*}"
++#endif
++
++#undef TYPE_OPERAND_FMT
++#define TYPE_OPERAND_FMT "%%%s"
++
++#undef TARGET_ASM_NAMED_SECTION
++#define TARGET_ASM_NAMED_SECTION aarch64_elf_asm_named_section
++
++/* Stabs debug not required. */
++#undef DBX_DEBUGGING_INFO
++
++#endif /* GCC_AARCH64_ELF_H */
+--- a/src/gcc/config/aarch64/aarch64-generic.md
++++ b/src/gcc/config/aarch64/aarch64-generic.md
+@@ -0,0 +1,38 @@
++;; Machine description for AArch64 architecture.
++;; Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
++;; Contributed by ARM Ltd.
++;;
++;; This file is part of GCC.
++;;
++;; GCC is free software; you can redistribute it and/or modify it
++;; under the terms of the GNU General Public License as published by
++;; the Free Software Foundation; either version 3, or (at your option)
++;; any later version.
++;;
++;; GCC is distributed in the hope that it will be useful, but
++;; WITHOUT ANY WARRANTY; without even the implied warranty of
++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++;; General Public License for more details.
++;;
++;; You should have received a copy of the GNU General Public License
++;; along with GCC; see the file COPYING3. If not see
++;; <http://www.gnu.org/licenses/>.
++
++;; Generic scheduler
++
++(define_automaton "aarch64")
++
++(define_cpu_unit "core" "aarch64")
++
++(define_attr "is_load" "yes,no"
++ (if_then_else (eq_attr "v8type" "fpsimd_load,fpsimd_load2,load1,load2")
++ (const_string "yes")
++ (const_string "no")))
++
++(define_insn_reservation "load" 2
++ (eq_attr "is_load" "yes")
++ "core")
++
++(define_insn_reservation "nonload" 1
++ (eq_attr "is_load" "no")
++ "core")
+--- a/src/gcc/config/aarch64/aarch64-linux.h
++++ b/src/gcc/config/aarch64/aarch64-linux.h
+@@ -0,0 +1,44 @@
++/* Machine description for AArch64 architecture.
++ Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
++ Contributed by ARM Ltd.
++
++ This file is part of GCC.
++
++ GCC is free software; you can redistribute it and/or modify it
++ under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 3, or (at your option)
++ any later version.
++
++ GCC is distributed in the hope that it will be useful, but
++ WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with GCC; see the file COPYING3. If not see
++ <http://www.gnu.org/licenses/>. */
++
++#ifndef GCC_AARCH64_LINUX_H
++#define GCC_AARCH64_LINUX_H
++
++#define GLIBC_DYNAMIC_LINKER "/lib/ld-linux-aarch64.so.1"
++
++#define LINUX_TARGET_LINK_SPEC "%{h*} \
++ %{static:-Bstatic} \
++ %{shared:-shared} \
++ %{symbolic:-Bsymbolic} \
++ %{rdynamic:-export-dynamic} \
++ -dynamic-linker " GNU_USER_DYNAMIC_LINKER " \
++ -X \
++ %{mbig-endian:-EB} %{mlittle-endian:-EL}"
++
++#define LINK_SPEC LINUX_TARGET_LINK_SPEC
++
++#define TARGET_OS_CPP_BUILTINS() \
++ do \
++ { \
++ GNU_USER_TARGET_OS_CPP_BUILTINS(); \
++ } \
++ while (0)
++
++#endif /* GCC_AARCH64_LINUX_H */
+--- a/src/gcc/config/aarch64/aarch64-modes.def
++++ b/src/gcc/config/aarch64/aarch64-modes.def
+@@ -0,0 +1,54 @@
++/* Machine description for AArch64 architecture.
++ Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
++ Contributed by ARM Ltd.
++
++ This file is part of GCC.
++
++ GCC is free software; you can redistribute it and/or modify it
++ under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 3, or (at your option)
++ any later version.
++
++ GCC is distributed in the hope that it will be useful, but
++ WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with GCC; see the file COPYING3. If not see
++ <http://www.gnu.org/licenses/>. */
++
++CC_MODE (CCFP);
++CC_MODE (CCFPE);
++CC_MODE (CC_SWP);
++CC_MODE (CC_ZESWP); /* zero-extend LHS (but swap to make it RHS). */
++CC_MODE (CC_SESWP); /* sign-extend LHS (but swap to make it RHS). */
++CC_MODE (CC_NZ); /* Only N and Z bits of condition flags are valid. */
++
++/* Vector modes. */
++VECTOR_MODES (INT, 8); /* V8QI V4HI V2SI. */
++VECTOR_MODES (INT, 16); /* V16QI V8HI V4SI V2DI. */
++VECTOR_MODES (FLOAT, 8); /* V2SF. */
++VECTOR_MODES (FLOAT, 16); /* V4SF V2DF. */
++
++/* Oct Int: 256-bit integer mode needed for 32-byte vector arguments. */
++INT_MODE (OI, 32);
++
++/* Opaque integer modes for 3, 6 or 8 Neon double registers (2 is
++ TImode). */
++INT_MODE (EI, 24);
++INT_MODE (CI, 48);
++INT_MODE (XI, 64);
++
++/* Vector modes for register lists. */
++VECTOR_MODES (INT, 32); /* V32QI V16HI V8SI V4DI. */
++VECTOR_MODES (FLOAT, 32); /* V8SF V4DF. */
++
++VECTOR_MODES (INT, 48); /* V32QI V16HI V8SI V4DI. */
++VECTOR_MODES (FLOAT, 48); /* V8SF V4DF. */
++
++VECTOR_MODES (INT, 64); /* V32QI V16HI V8SI V4DI. */
++VECTOR_MODES (FLOAT, 64); /* V8SF V4DF. */
++
++/* Quad float: 128-bit floating mode for long doubles. */
++FLOAT_MODE (TF, 16, ieee_quad_format);
+--- a/src/gcc/config/aarch64/aarch64-option-extensions.def
++++ b/src/gcc/config/aarch64/aarch64-option-extensions.def
+@@ -0,0 +1,37 @@
++/* Copyright (C) 2012 Free Software Foundation, Inc.
++ Contributed by ARM Ltd.
++
++ This file is part of GCC.
++
++ GCC is free software; you can redistribute it and/or modify it
++ under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 3, or (at your option)
++ any later version.
++
++ GCC is distributed in the hope that it will be useful, but
++ WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with GCC; see the file COPYING3. If not see
++ <http://www.gnu.org/licenses/>. */
++
++/* This is a list of ISA extentsions in AArch64.
++
++ Before using #include to read this file, define a macro:
++
++ AARCH64_OPT_EXTENSION(EXT_NAME, FLAGS_ON, FLAGS_OFF)
++
++ EXT_NAME is the name of the extension, represented as a string constant.
++ FLAGS_ON are the bitwise-or of the features that the extension adds.
++ FLAGS_OFF are the bitwise-or of the features that the extension removes. */
++
++/* V8 Architecture Extensions.
++ This list currently contains example extensions for CPUs that implement
++ AArch64, and therefore serves as a template for adding more CPUs in the
++ future. */
++
++AARCH64_OPT_EXTENSION("fp", AARCH64_FL_FP, AARCH64_FL_FPSIMD | AARCH64_FL_CRYPTO)
++AARCH64_OPT_EXTENSION("simd", AARCH64_FL_FPSIMD, AARCH64_FL_SIMD | AARCH64_FL_CRYPTO)
++AARCH64_OPT_EXTENSION("crypto", AARCH64_FL_CRYPTO | AARCH64_FL_FPSIMD, AARCH64_FL_CRYPTO)
+--- a/src/gcc/config/aarch64/aarch64-opts.h
++++ b/src/gcc/config/aarch64/aarch64-opts.h
+@@ -0,0 +1,64 @@
++/* Copyright (C) 2011, 2012 Free Software Foundation, Inc.
++ Contributed by ARM Ltd.
++
++ This file is part of GCC.
++
++ GCC is free software; you can redistribute it and/or modify it
++ under the terms of the GNU General Public License as published
++ by the Free Software Foundation; either version 3, or (at your
++ option) any later version.
++
++ GCC is distributed in the hope that it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
++ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
++ License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with GCC; see the file COPYING3. If not see
++ <http://www.gnu.org/licenses/>. */
++
++/* Definitions for option handling for AArch64. */
++
++#ifndef GCC_AARCH64_OPTS_H
++#define GCC_AARCH64_OPTS_H
++
++/* The various cores that implement AArch64. */
++enum aarch64_processor
++{
++#define AARCH64_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
++ IDENT,
++#include "aarch64-cores.def"
++#undef AARCH64_CORE
++ /* Used to indicate that no processor has been specified. */
++ generic,
++ /* Used to mark the end of the processor table. */
++ aarch64_none
++};
++
++/* TLS types. */
++enum aarch64_tls_type {
++ TLS_TRADITIONAL,
++ TLS_DESCRIPTORS
++};
++
++/* The code model defines the address generation strategy.
++ Most have a PIC and non-PIC variant. */
++enum aarch64_code_model {
++ /* Static code and data fit within a 1MB region.
++ Not fully implemented, mostly treated as SMALL. */
++ AARCH64_CMODEL_TINY,
++ /* Static code, data and GOT/PLT fit within a 1MB region.
++ Not fully implemented, mostly treated as SMALL_PIC. */
++ AARCH64_CMODEL_TINY_PIC,
++ /* Static code and data fit within a 4GB region.
++ The default non-PIC code model. */
++ AARCH64_CMODEL_SMALL,
++ /* Static code, data and GOT/PLT fit within a 4GB region.
++ The default PIC code model. */
++ AARCH64_CMODEL_SMALL_PIC,
++ /* No assumptions about addresses of code and data.
++ The PIC variant is not yet implemented. */
++ AARCH64_CMODEL_LARGE
++};
++
++#endif
+--- a/src/gcc/config/aarch64/aarch64-protos.h
++++ b/src/gcc/config/aarch64/aarch64-protos.h
+@@ -0,0 +1,254 @@
++/* Machine description for AArch64 architecture.
++ Copyright (C) 2009, 2010, 2011, 2012, 2013 Free Software Foundation, Inc.
++ Contributed by ARM Ltd.
++
++ This file is part of GCC.
++
++ GCC is free software; you can redistribute it and/or modify it
++ under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 3, or (at your option)
++ any later version.
++
++ GCC is distributed in the hope that it will be useful, but
++ WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with GCC; see the file COPYING3. If not see
++ <http://www.gnu.org/licenses/>. */
++
++
++#ifndef GCC_AARCH64_PROTOS_H
++#define GCC_AARCH64_PROTOS_H
++
++/*
++ SYMBOL_CONTEXT_ADR
++ The symbol is used in a load-address operation.
++ SYMBOL_CONTEXT_MEM
++ The symbol is used as the address in a MEM.
++ */
++enum aarch64_symbol_context
++{
++ SYMBOL_CONTEXT_MEM,
++ SYMBOL_CONTEXT_ADR
++};
++
++/* SYMBOL_SMALL_ABSOLUTE: Generate symbol accesses through
++ high and lo relocs that calculate the base address using a PC
++ relative reloc.
++ So to get the address of foo, we generate
++ adrp x0, foo
++ add x0, x0, :lo12:foo
++
++ To load or store something to foo, we could use the corresponding
++ load store variants that generate an
++ ldr x0, [x0,:lo12:foo]
++ or
++ str x1, [x0, :lo12:foo]
++
++ This corresponds to the small code model of the compiler.
++
++ SYMBOL_SMALL_GOT: Similar to the one above but this
++ gives us the GOT entry of the symbol being referred to :
++ Thus calculating the GOT entry for foo is done using the
++ following sequence of instructions. The ADRP instruction
++ gets us to the page containing the GOT entry of the symbol
++ and the got_lo12 gets us the actual offset in it.
++
++ adrp x0, :got:foo
++ ldr x0, [x0, :gotoff_lo12:foo]
++
++ This corresponds to the small PIC model of the compiler.
++
++ SYMBOL_SMALL_TLSGD
++ SYMBOL_SMALL_TLSDESC
++ SYMBOL_SMALL_GOTTPREL
++ SYMBOL_SMALL_TPREL
++ Each of of these represents a thread-local symbol, and corresponds to the
++ thread local storage relocation operator for the symbol being referred to.
++
++ SYMBOL_FORCE_TO_MEM : Global variables are addressed using
++ constant pool. All variable addresses are spilled into constant
++ pools. The constant pools themselves are addressed using PC
++ relative accesses. This only works for the large code model.
++ */
++enum aarch64_symbol_type
++{
++ SYMBOL_SMALL_ABSOLUTE,
++ SYMBOL_SMALL_GOT,
++ SYMBOL_SMALL_TLSGD,
++ SYMBOL_SMALL_TLSDESC,
++ SYMBOL_SMALL_GOTTPREL,
++ SYMBOL_SMALL_TPREL,
++ SYMBOL_FORCE_TO_MEM
++};
++
++/* A set of tuning parameters contains references to size and time
++ cost models and vectors for address cost calculations, register
++ move costs and memory move costs. */
++
++/* Extra costs for specific insns. Only records the cost above a
++ single insn. */
++
++struct cpu_rtx_cost_table
++{
++ const int memory_load;
++ const int memory_store;
++ const int register_shift;
++ const int int_divide;
++ const int float_divide;
++ const int double_divide;
++ const int int_multiply;
++ const int int_multiply_extend;
++ const int int_multiply_add;
++ const int int_multiply_extend_add;
++ const int float_multiply;
++ const int double_multiply;
++};
++
++/* Additional cost for addresses. */
++struct cpu_addrcost_table
++{
++ const int pre_modify;
++ const int post_modify;
++ const int register_offset;
++ const int register_extend;
++ const int imm_offset;
++};
++
++/* Additional costs for register copies. Cost is for one register. */
++struct cpu_regmove_cost
++{
++ const int GP2GP;
++ const int GP2FP;
++ const int FP2GP;
++ const int FP2FP;
++};
++
++struct tune_params
++{
++ const struct cpu_rtx_cost_table *const insn_extra_cost;
++ const struct cpu_addrcost_table *const addr_cost;
++ const struct cpu_regmove_cost *const regmove_cost;
++ const int memmov_cost;
++};
++
++HOST_WIDE_INT aarch64_initial_elimination_offset (unsigned, unsigned);
++bool aarch64_bitmask_imm (HOST_WIDE_INT val, enum machine_mode);
++bool aarch64_constant_address_p (rtx);
++bool aarch64_float_const_zero_rtx_p (rtx);
++bool aarch64_function_arg_regno_p (unsigned);
++bool aarch64_gen_movmemqi (rtx *);
++bool aarch64_is_extend_from_extract (enum machine_mode, rtx, rtx);
++bool aarch64_is_long_call_p (rtx);
++bool aarch64_label_mentioned_p (rtx);
++bool aarch64_legitimate_pic_operand_p (rtx);
++bool aarch64_move_imm (HOST_WIDE_INT, enum machine_mode);
++bool aarch64_pad_arg_upward (enum machine_mode, const_tree);
++bool aarch64_pad_reg_upward (enum machine_mode, const_tree, bool);
++bool aarch64_regno_ok_for_base_p (int, bool);
++bool aarch64_regno_ok_for_index_p (int, bool);
++bool aarch64_simd_imm_scalar_p (rtx x, enum machine_mode mode);
++bool aarch64_simd_imm_zero_p (rtx, enum machine_mode);
++bool aarch64_simd_shift_imm_p (rtx, enum machine_mode, bool);
++bool aarch64_symbolic_address_p (rtx);
++bool aarch64_symbolic_constant_p (rtx, enum aarch64_symbol_context,
++ enum aarch64_symbol_type *);
++bool aarch64_uimm12_shift (HOST_WIDE_INT);
++const char *aarch64_output_casesi (rtx *);
++enum aarch64_symbol_type aarch64_classify_symbol (rtx,
++ enum aarch64_symbol_context);
++enum aarch64_symbol_type aarch64_classify_tls_symbol (rtx);
++int aarch64_asm_preferred_eh_data_format (int, int);
++int aarch64_hard_regno_mode_ok (unsigned, enum machine_mode);
++int aarch64_hard_regno_nregs (unsigned, enum machine_mode);
++int aarch64_simd_attr_length_move (rtx);
++int aarch64_simd_immediate_valid_for_move (rtx, enum machine_mode, rtx *,
++ int *, unsigned char *, int *,
++ int *);
++int aarch64_uxt_size (int, HOST_WIDE_INT);
++rtx aarch64_final_eh_return_addr (void);
++rtx aarch64_legitimize_reload_address (rtx *, enum machine_mode, int, int, int);
++const char *aarch64_output_move_struct (rtx *operands);
++rtx aarch64_return_addr (int, rtx);
++rtx aarch64_simd_gen_const_vector_dup (enum machine_mode, int);
++bool aarch64_simd_mem_operand_p (rtx);
++rtx aarch64_simd_vect_par_cnst_half (enum machine_mode, bool);
++rtx aarch64_tls_get_addr (void);
++unsigned aarch64_dbx_register_number (unsigned);
++enum reg_class aarch64_regno_regclass (unsigned);
++unsigned aarch64_trampoline_size (void);
++void aarch64_asm_output_labelref (FILE *, const char *);
++void aarch64_elf_asm_named_section (const char *, unsigned, tree);
++void aarch64_expand_epilogue (bool);
++void aarch64_expand_mov_immediate (rtx, rtx);
++void aarch64_expand_prologue (void);
++void aarch64_expand_vector_init (rtx, rtx);
++void aarch64_function_profiler (FILE *, int);
++void aarch64_init_cumulative_args (CUMULATIVE_ARGS *, const_tree, rtx,
++ const_tree, unsigned);
++void aarch64_init_expanders (void);
++void aarch64_print_operand (FILE *, rtx, char);
++void aarch64_print_operand_address (FILE *, rtx);
++
++/* Initialize builtins for SIMD intrinsics. */
++void init_aarch64_simd_builtins (void);
++
++void aarch64_simd_const_bounds (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
++void aarch64_simd_disambiguate_copy (rtx *, rtx *, rtx *, unsigned int);
++
++/* Emit code to place a AdvSIMD pair result in memory locations (with equal
++ registers). */
++void aarch64_simd_emit_pair_result_insn (enum machine_mode,
++ rtx (*intfn) (rtx, rtx, rtx), rtx,
++ rtx);
++
++/* Expand builtins for SIMD intrinsics. */
++rtx aarch64_simd_expand_builtin (int, tree, rtx);
++
++void aarch64_simd_lane_bounds (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
++
++/* Emit code for reinterprets. */
++void aarch64_simd_reinterpret (rtx, rtx);
++
++void aarch64_split_128bit_move (rtx, rtx);
++
++bool aarch64_split_128bit_move_p (rtx, rtx);
++
++/* Check for a legitimate floating point constant for FMOV. */
++bool aarch64_float_const_representable_p (rtx);
++
++#if defined (RTX_CODE)
++
++bool aarch64_legitimate_address_p (enum machine_mode, rtx, RTX_CODE, bool);
++enum machine_mode aarch64_select_cc_mode (RTX_CODE, rtx, rtx);
++rtx aarch64_gen_compare_reg (RTX_CODE, rtx, rtx);
++
++void aarch64_expand_compare_and_swap (rtx op[]);
++void aarch64_split_compare_and_swap (rtx op[]);
++void aarch64_split_atomic_op (enum rtx_code, rtx, rtx, rtx, rtx, rtx, rtx);
++
++#endif /* RTX_CODE */
++
++rtx aarch64_load_tp (rtx target);
++void aarch64_init_builtins (void);
++rtx aarch64_expand_builtin (tree exp,
++ rtx target,
++ rtx subtarget ATTRIBUTE_UNUSED,
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ int ignore ATTRIBUTE_UNUSED);
++tree aarch64_builtin_decl (unsigned, bool ATTRIBUTE_UNUSED);
++
++tree
++aarch64_builtin_vectorized_function (tree fndecl,
++ tree type_out,
++ tree type_in);
++
++extern void aarch64_split_combinev16qi (rtx operands[3]);
++extern void aarch64_expand_vec_perm (rtx target, rtx op0, rtx op1, rtx sel);
++extern bool
++aarch64_expand_vec_perm_const (rtx target, rtx op0, rtx op1, rtx sel);
++
++char* aarch64_output_simd_mov_immediate (rtx *, enum machine_mode, unsigned);
++#endif /* GCC_AARCH64_PROTOS_H */
+--- a/src/gcc/config/aarch64/aarch64-simd-builtins.def
++++ b/src/gcc/config/aarch64/aarch64-simd-builtins.def
+@@ -0,0 +1,258 @@
++/* Machine description for AArch64 architecture.
++ Copyright (C) 2012-2013 Free Software Foundation, Inc.
++ Contributed by ARM Ltd.
++
++ This file is part of GCC.
++
++ GCC is free software; you can redistribute it and/or modify it
++ under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 3, or (at your option)
++ any later version.
++
++ GCC is distributed in the hope that it will be useful, but
++ WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with GCC; see the file COPYING3. If not see
++ <http://www.gnu.org/licenses/>. */
++
++/* In the list below, the BUILTIN_<ITERATOR> macros should
++ correspond to the iterator used to construct the instruction's
++ patterns in aarch64-simd.md. A helpful idiom to follow when
++ adding new builtins is to add a line for each pattern in the md
++ file. Thus, ADDP, which has one pattern defined for the VD_BHSI
++ iterator, and one for DImode, has two entries below. */
++
++ BUILTIN_VD_RE (CREATE, create)
++ BUILTIN_VQ_S (GETLANE, get_lane_signed)
++ BUILTIN_VDQ (GETLANE, get_lane_unsigned)
++ BUILTIN_VDQF (GETLANE, get_lane)
++ VAR1 (GETLANE, get_lane, di)
++ BUILTIN_VDC (COMBINE, combine)
++ BUILTIN_VB (BINOP, pmul)
++ BUILTIN_VDQF (UNOP, sqrt)
++ BUILTIN_VD_BHSI (BINOP, addp)
++ VAR1 (UNOP, addp, di)
++
++ BUILTIN_VD_RE (REINTERP, reinterpretdi)
++ BUILTIN_VDC (REINTERP, reinterpretv8qi)
++ BUILTIN_VDC (REINTERP, reinterpretv4hi)
++ BUILTIN_VDC (REINTERP, reinterpretv2si)
++ BUILTIN_VDC (REINTERP, reinterpretv2sf)
++ BUILTIN_VQ (REINTERP, reinterpretv16qi)
++ BUILTIN_VQ (REINTERP, reinterpretv8hi)
++ BUILTIN_VQ (REINTERP, reinterpretv4si)
++ BUILTIN_VQ (REINTERP, reinterpretv4sf)
++ BUILTIN_VQ (REINTERP, reinterpretv2di)
++ BUILTIN_VQ (REINTERP, reinterpretv2df)
++
++ BUILTIN_VDQ_I (BINOP, dup_lane)
++ BUILTIN_SDQ_I (BINOP, dup_lane)
++ /* Implemented by aarch64_<sur>q<r>shl<mode>. */
++ BUILTIN_VSDQ_I (BINOP, sqshl)
++ BUILTIN_VSDQ_I (BINOP, uqshl)
++ BUILTIN_VSDQ_I (BINOP, sqrshl)
++ BUILTIN_VSDQ_I (BINOP, uqrshl)
++ /* Implemented by aarch64_<su_optab><optab><mode>. */
++ BUILTIN_VSDQ_I (BINOP, sqadd)
++ BUILTIN_VSDQ_I (BINOP, uqadd)
++ BUILTIN_VSDQ_I (BINOP, sqsub)
++ BUILTIN_VSDQ_I (BINOP, uqsub)
++ /* Implemented by aarch64_<sur>qadd<mode>. */
++ BUILTIN_VSDQ_I (BINOP, suqadd)
++ BUILTIN_VSDQ_I (BINOP, usqadd)
++
++ /* Implemented by aarch64_get_dreg<VSTRUCT:mode><VDC:mode>. */
++ BUILTIN_VDC (GETLANE, get_dregoi)
++ BUILTIN_VDC (GETLANE, get_dregci)
++ BUILTIN_VDC (GETLANE, get_dregxi)
++ /* Implemented by aarch64_get_qreg<VSTRUCT:mode><VQ:mode>. */
++ BUILTIN_VQ (GETLANE, get_qregoi)
++ BUILTIN_VQ (GETLANE, get_qregci)
++ BUILTIN_VQ (GETLANE, get_qregxi)
++ /* Implemented by aarch64_set_qreg<VSTRUCT:mode><VQ:mode>. */
++ BUILTIN_VQ (SETLANE, set_qregoi)
++ BUILTIN_VQ (SETLANE, set_qregci)
++ BUILTIN_VQ (SETLANE, set_qregxi)
++ /* Implemented by aarch64_ld<VSTRUCT:nregs><VDC:mode>. */
++ BUILTIN_VDC (LOADSTRUCT, ld2)
++ BUILTIN_VDC (LOADSTRUCT, ld3)
++ BUILTIN_VDC (LOADSTRUCT, ld4)
++ /* Implemented by aarch64_ld<VSTRUCT:nregs><VQ:mode>. */
++ BUILTIN_VQ (LOADSTRUCT, ld2)
++ BUILTIN_VQ (LOADSTRUCT, ld3)
++ BUILTIN_VQ (LOADSTRUCT, ld4)
++ /* Implemented by aarch64_st<VSTRUCT:nregs><VDC:mode>. */
++ BUILTIN_VDC (STORESTRUCT, st2)
++ BUILTIN_VDC (STORESTRUCT, st3)
++ BUILTIN_VDC (STORESTRUCT, st4)
++ /* Implemented by aarch64_st<VSTRUCT:nregs><VQ:mode>. */
++ BUILTIN_VQ (STORESTRUCT, st2)
++ BUILTIN_VQ (STORESTRUCT, st3)
++ BUILTIN_VQ (STORESTRUCT, st4)
++
++ BUILTIN_VQW (BINOP, saddl2)
++ BUILTIN_VQW (BINOP, uaddl2)
++ BUILTIN_VQW (BINOP, ssubl2)
++ BUILTIN_VQW (BINOP, usubl2)
++ BUILTIN_VQW (BINOP, saddw2)
++ BUILTIN_VQW (BINOP, uaddw2)
++ BUILTIN_VQW (BINOP, ssubw2)
++ BUILTIN_VQW (BINOP, usubw2)
++ /* Implemented by aarch64_<ANY_EXTEND:su><ADDSUB:optab>l<mode>. */
++ BUILTIN_VDW (BINOP, saddl)
++ BUILTIN_VDW (BINOP, uaddl)
++ BUILTIN_VDW (BINOP, ssubl)
++ BUILTIN_VDW (BINOP, usubl)
++ /* Implemented by aarch64_<ANY_EXTEND:su><ADDSUB:optab>w<mode>. */
++ BUILTIN_VDW (BINOP, saddw)
++ BUILTIN_VDW (BINOP, uaddw)
++ BUILTIN_VDW (BINOP, ssubw)
++ BUILTIN_VDW (BINOP, usubw)
++ /* Implemented by aarch64_<sur>h<addsub><mode>. */
++ BUILTIN_VQ_S (BINOP, shadd)
++ BUILTIN_VQ_S (BINOP, uhadd)
++ BUILTIN_VQ_S (BINOP, srhadd)
++ BUILTIN_VQ_S (BINOP, urhadd)
++ /* Implemented by aarch64_<sur><addsub>hn<mode>. */
++ BUILTIN_VQN (BINOP, addhn)
++ BUILTIN_VQN (BINOP, raddhn)
++ /* Implemented by aarch64_<sur><addsub>hn2<mode>. */
++ BUILTIN_VQN (TERNOP, addhn2)
++ BUILTIN_VQN (TERNOP, raddhn2)
++
++ BUILTIN_VSQN_HSDI (UNOP, sqmovun)
++ /* Implemented by aarch64_<sur>qmovn<mode>. */
++ BUILTIN_VSQN_HSDI (UNOP, sqmovn)
++ BUILTIN_VSQN_HSDI (UNOP, uqmovn)
++ /* Implemented by aarch64_s<optab><mode>. */
++ BUILTIN_VSDQ_I_BHSI (UNOP, sqabs)
++ BUILTIN_VSDQ_I_BHSI (UNOP, sqneg)
++
++ BUILTIN_VSD_HSI (QUADOP, sqdmlal_lane)
++ BUILTIN_VSD_HSI (QUADOP, sqdmlsl_lane)
++ BUILTIN_VSD_HSI (QUADOP, sqdmlal_laneq)
++ BUILTIN_VSD_HSI (QUADOP, sqdmlsl_laneq)
++ BUILTIN_VQ_HSI (TERNOP, sqdmlal2)
++ BUILTIN_VQ_HSI (TERNOP, sqdmlsl2)
++ BUILTIN_VQ_HSI (QUADOP, sqdmlal2_lane)
++ BUILTIN_VQ_HSI (QUADOP, sqdmlsl2_lane)
++ BUILTIN_VQ_HSI (QUADOP, sqdmlal2_laneq)
++ BUILTIN_VQ_HSI (QUADOP, sqdmlsl2_laneq)
++ BUILTIN_VQ_HSI (TERNOP, sqdmlal2_n)
++ BUILTIN_VQ_HSI (TERNOP, sqdmlsl2_n)
++ /* Implemented by aarch64_sqdml<SBINQOPS:as>l<mode>. */
++ BUILTIN_VSD_HSI (TERNOP, sqdmlal)
++ BUILTIN_VSD_HSI (TERNOP, sqdmlsl)
++ /* Implemented by aarch64_sqdml<SBINQOPS:as>l_n<mode>. */
++ BUILTIN_VD_HSI (TERNOP, sqdmlal_n)
++ BUILTIN_VD_HSI (TERNOP, sqdmlsl_n)
++
++ BUILTIN_VSD_HSI (BINOP, sqdmull)
++ BUILTIN_VSD_HSI (TERNOP, sqdmull_lane)
++ BUILTIN_VD_HSI (TERNOP, sqdmull_laneq)
++ BUILTIN_VD_HSI (BINOP, sqdmull_n)
++ BUILTIN_VQ_HSI (BINOP, sqdmull2)
++ BUILTIN_VQ_HSI (TERNOP, sqdmull2_lane)
++ BUILTIN_VQ_HSI (TERNOP, sqdmull2_laneq)
++ BUILTIN_VQ_HSI (BINOP, sqdmull2_n)
++ /* Implemented by aarch64_sq<r>dmulh<mode>. */
++ BUILTIN_VSDQ_HSI (BINOP, sqdmulh)
++ BUILTIN_VSDQ_HSI (BINOP, sqrdmulh)
++ /* Implemented by aarch64_sq<r>dmulh_lane<q><mode>. */
++ BUILTIN_VDQHS (TERNOP, sqdmulh_lane)
++ BUILTIN_VDQHS (TERNOP, sqdmulh_laneq)
++ BUILTIN_VDQHS (TERNOP, sqrdmulh_lane)
++ BUILTIN_VDQHS (TERNOP, sqrdmulh_laneq)
++ BUILTIN_SD_HSI (TERNOP, sqdmulh_lane)
++ BUILTIN_SD_HSI (TERNOP, sqrdmulh_lane)
++
++ BUILTIN_VSDQ_I_DI (BINOP, sshl_n)
++ BUILTIN_VSDQ_I_DI (BINOP, ushl_n)
++ /* Implemented by aarch64_<sur>shl<mode>. */
++ BUILTIN_VSDQ_I_DI (BINOP, sshl)
++ BUILTIN_VSDQ_I_DI (BINOP, ushl)
++ BUILTIN_VSDQ_I_DI (BINOP, srshl)
++ BUILTIN_VSDQ_I_DI (BINOP, urshl)
++
++ BUILTIN_VSDQ_I_DI (SHIFTIMM, sshr_n)
++ BUILTIN_VSDQ_I_DI (SHIFTIMM, ushr_n)
++ /* Implemented by aarch64_<sur>shr_n<mode>. */
++ BUILTIN_VSDQ_I_DI (SHIFTIMM, srshr_n)
++ BUILTIN_VSDQ_I_DI (SHIFTIMM, urshr_n)
++ /* Implemented by aarch64_<sur>sra_n<mode>. */
++ BUILTIN_VSDQ_I_DI (SHIFTACC, ssra_n)
++ BUILTIN_VSDQ_I_DI (SHIFTACC, usra_n)
++ BUILTIN_VSDQ_I_DI (SHIFTACC, srsra_n)
++ BUILTIN_VSDQ_I_DI (SHIFTACC, ursra_n)
++ /* Implemented by aarch64_<sur>shll_n<mode>. */
++ BUILTIN_VDW (SHIFTIMM, sshll_n)
++ BUILTIN_VDW (SHIFTIMM, ushll_n)
++ /* Implemented by aarch64_<sur>shll2_n<mode>. */
++ BUILTIN_VQW (SHIFTIMM, sshll2_n)
++ BUILTIN_VQW (SHIFTIMM, ushll2_n)
++ /* Implemented by aarch64_<sur>q<r>shr<u>n_n<mode>. */
++ BUILTIN_VSQN_HSDI (SHIFTIMM, sqshrun_n)
++ BUILTIN_VSQN_HSDI (SHIFTIMM, sqrshrun_n)
++ BUILTIN_VSQN_HSDI (SHIFTIMM, sqshrn_n)
++ BUILTIN_VSQN_HSDI (SHIFTIMM, uqshrn_n)
++ BUILTIN_VSQN_HSDI (SHIFTIMM, sqrshrn_n)
++ BUILTIN_VSQN_HSDI (SHIFTIMM, uqrshrn_n)
++ /* Implemented by aarch64_<sur>s<lr>i_n<mode>. */
++ BUILTIN_VSDQ_I_DI (SHIFTINSERT, ssri_n)
++ BUILTIN_VSDQ_I_DI (SHIFTINSERT, usri_n)
++ BUILTIN_VSDQ_I_DI (SHIFTINSERT, ssli_n)
++ BUILTIN_VSDQ_I_DI (SHIFTINSERT, usli_n)
++ /* Implemented by aarch64_<sur>qshl<u>_n<mode>. */
++ BUILTIN_VSDQ_I (SHIFTIMM, sqshlu_n)
++ BUILTIN_VSDQ_I (SHIFTIMM, sqshl_n)
++ BUILTIN_VSDQ_I (SHIFTIMM, uqshl_n)
++
++ /* Implemented by aarch64_cm<cmp><mode>. */
++ BUILTIN_VSDQ_I_DI (BINOP, cmeq)
++ BUILTIN_VSDQ_I_DI (BINOP, cmge)
++ BUILTIN_VSDQ_I_DI (BINOP, cmgt)
++ BUILTIN_VSDQ_I_DI (BINOP, cmle)
++ BUILTIN_VSDQ_I_DI (BINOP, cmlt)
++ /* Implemented by aarch64_cm<cmp><mode>. */
++ BUILTIN_VSDQ_I_DI (BINOP, cmhs)
++ BUILTIN_VSDQ_I_DI (BINOP, cmhi)
++ BUILTIN_VSDQ_I_DI (BINOP, cmtst)
++
++ /* Implemented by aarch64_<fmaxmin><mode>. */
++ BUILTIN_VDQF (BINOP, fmax)
++ BUILTIN_VDQF (BINOP, fmin)
++ /* Implemented by aarch64_<maxmin><mode>. */
++ BUILTIN_VDQ_BHSI (BINOP, smax)
++ BUILTIN_VDQ_BHSI (BINOP, smin)
++ BUILTIN_VDQ_BHSI (BINOP, umax)
++ BUILTIN_VDQ_BHSI (BINOP, umin)
++
++ /* Implemented by aarch64_frint<frint_suffix><mode>. */
++ BUILTIN_VDQF (UNOP, frintz)
++ BUILTIN_VDQF (UNOP, frintp)
++ BUILTIN_VDQF (UNOP, frintm)
++ BUILTIN_VDQF (UNOP, frinti)
++ BUILTIN_VDQF (UNOP, frintx)
++ BUILTIN_VDQF (UNOP, frinta)
++
++ /* Implemented by aarch64_fcvt<frint_suffix><su><mode>. */
++ BUILTIN_VDQF (UNOP, fcvtzs)
++ BUILTIN_VDQF (UNOP, fcvtzu)
++ BUILTIN_VDQF (UNOP, fcvtas)
++ BUILTIN_VDQF (UNOP, fcvtau)
++ BUILTIN_VDQF (UNOP, fcvtps)
++ BUILTIN_VDQF (UNOP, fcvtpu)
++ BUILTIN_VDQF (UNOP, fcvtms)
++ BUILTIN_VDQF (UNOP, fcvtmu)
++
++ /* Implemented by
++ aarch64_<PERMUTE:perm_insn><PERMUTE:perm_hilo><mode>. */
++ BUILTIN_VALL (BINOP, zip1)
++ BUILTIN_VALL (BINOP, zip2)
++ BUILTIN_VALL (BINOP, uzp1)
++ BUILTIN_VALL (BINOP, uzp2)
++ BUILTIN_VALL (BINOP, trn1)
++ BUILTIN_VALL (BINOP, trn2)
+--- a/src/gcc/config/aarch64/aarch64-simd.md
++++ b/src/gcc/config/aarch64/aarch64-simd.md
+@@ -0,0 +1,3716 @@
++;; Machine description for AArch64 AdvSIMD architecture.
++;; Copyright (C) 2011, 2012, 2013 Free Software Foundation, Inc.
++;; Contributed by ARM Ltd.
++;;
++;; This file is part of GCC.
++;;
++;; GCC is free software; you can redistribute it and/or modify it
++;; under the terms of the GNU General Public License as published by
++;; the Free Software Foundation; either version 3, or (at your option)
++;; any later version.
++;;
++;; GCC is distributed in the hope that it will be useful, but
++;; WITHOUT ANY WARRANTY; without even the implied warranty of
++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++;; General Public License for more details.
++;;
++;; You should have received a copy of the GNU General Public License
++;; along with GCC; see the file COPYING3. If not see
++;; <http://www.gnu.org/licenses/>.
++
++
++; Main data types used by the insntructions
++
++(define_attr "simd_mode" "unknown,none,V8QI,V16QI,V4HI,V8HI,V2SI,V4SI,V2DI,V2SF,V4SF,V2DF,OI,CI,XI,DI,DF,SI,HI,QI"
++ (const_string "unknown"))
++
++
++; Classification of AdvSIMD instructions for scheduling purposes.
++; Do not set this attribute and the "v8type" attribute together in
++; any instruction pattern.
++
++; simd_abd integer absolute difference and accumulate.
++; simd_abdl integer absolute difference and accumulate (long).
++; simd_adal integer add and accumulate (long).
++; simd_add integer addition/subtraction.
++; simd_addl integer addition/subtraction (long).
++; simd_addlv across lanes integer sum (long).
++; simd_addn integer addition/subtraction (narrow).
++; simd_addn2 integer addition/subtraction (narrow, high).
++; simd_addv across lanes integer sum.
++; simd_cls count leading sign/zero bits.
++; simd_cmp compare / create mask.
++; simd_cnt population count.
++; simd_dup duplicate element.
++; simd_dupgp duplicate general purpose register.
++; simd_ext bitwise extract from pair.
++; simd_fadd floating point add/sub.
++; simd_fcmp floating point compare.
++; simd_fcvti floating point convert to integer.
++; simd_fcvtl floating-point convert upsize.
++; simd_fcvtn floating-point convert downsize (narrow).
++; simd_fcvtn2 floating-point convert downsize (narrow, high).
++; simd_fdiv floating point division.
++; simd_fminmax floating point min/max.
++; simd_fminmaxv across lanes floating point min/max.
++; simd_fmla floating point multiply-add.
++; simd_fmla_elt floating point multiply-add (by element).
++; simd_fmul floating point multiply.
++; simd_fmul_elt floating point multiply (by element).
++; simd_fnegabs floating point neg/abs.
++; simd_frcpe floating point reciprocal estimate.
++; simd_frcps floating point reciprocal step.
++; simd_frecx floating point reciprocal exponent.
++; simd_frint floating point round to integer.
++; simd_fsqrt floating point square root.
++; simd_icvtf integer convert to floating point.
++; simd_ins insert element.
++; simd_insgp insert general purpose register.
++; simd_load1 load multiple structures to one register (LD1).
++; simd_load1r load single structure to all lanes of one register (LD1R).
++; simd_load1s load single structure to one lane of one register (LD1 [index]).
++; simd_load2 load multiple structures to two registers (LD1, LD2).
++; simd_load2r load single structure to all lanes of two registers (LD1R, LD2R).
++; simd_load2s load single structure to one lane of two registers (LD2 [index]).
++; simd_load3 load multiple structures to three registers (LD1, LD3).
++; simd_load3r load single structure to all lanes of three registers (LD3R).
++; simd_load3s load single structure to one lane of three registers (LD3 [index]).
++; simd_load4 load multiple structures to four registers (LD1, LD2, LD4).
++; simd_load4r load single structure to all lanes of four registers (LD4R).
++; simd_load4s load single structure to one lane of four registers (LD4 [index]).
++; simd_logic logical operation.
++; simd_logic_imm logcial operation (immediate).
++; simd_minmax integer min/max.
++; simd_minmaxv across lanes integer min/max,
++; simd_mla integer multiply-accumulate.
++; simd_mla_elt integer multiply-accumulate (by element).
++; simd_mlal integer multiply-accumulate (long).
++; simd_mlal_elt integer multiply-accumulate (by element, long).
++; simd_move move register.
++; simd_move_imm move immediate.
++; simd_movgp move element to general purpose register.
++; simd_mul integer multiply.
++; simd_mul_elt integer multiply (by element).
++; simd_mull integer multiply (long).
++; simd_mull_elt integer multiply (by element, long).
++; simd_negabs integer negate/absolute.
++; simd_rbit bitwise reverse.
++; simd_rcpe integer reciprocal estimate.
++; simd_rcps integer reciprocal square root.
++; simd_rev element reverse.
++; simd_sat_add integer saturating addition/subtraction.
++; simd_sat_mlal integer saturating multiply-accumulate (long).
++; simd_sat_mlal_elt integer saturating multiply-accumulate (by element, long).
++; simd_sat_mul integer saturating multiply.
++; simd_sat_mul_elt integer saturating multiply (by element).
++; simd_sat_mull integer saturating multiply (long).
++; simd_sat_mull_elt integer saturating multiply (by element, long).
++; simd_sat_negabs integer saturating negate/absolute.
++; simd_sat_shift integer saturating shift.
++; simd_sat_shift_imm integer saturating shift (immediate).
++; simd_sat_shiftn_imm integer saturating shift (narrow, immediate).
++; simd_sat_shiftn2_imm integer saturating shift (narrow, high, immediate).
++; simd_shift shift register/vector.
++; simd_shift_acc shift accumulate.
++; simd_shift_imm shift immediate.
++; simd_shift_imm_acc shift immediate and accumualte.
++; simd_shiftl shift register/vector (long).
++; simd_shiftl_imm shift register/vector (long, immediate).
++; simd_shiftn_imm shift register/vector (narrow, immediate).
++; simd_shiftn2_imm shift register/vector (narrow, high, immediate).
++; simd_store1 store multiple structures from one register (ST1).
++; simd_store1s store single structure from one lane of one register (ST1 [index]).
++; simd_store2 store multiple structures from two registers (ST1, ST2).
++; simd_store2s store single structure from one lane of two registers (ST2 [index]).
++; simd_store3 store multiple structures from three registers (ST1, ST3).
++; simd_store3s store single structure from one lane of three register (ST3 [index]).
++; simd_store4 store multiple structures from four registers (ST1, ST2, ST4).
++; simd_store4s store single structure from one lane for four registers (ST4 [index]).
++; simd_tbl table lookup.
++; simd_trn transpose.
++; simd_uzp unzip.
++; simd_zip zip.
++
++(define_attr "simd_type"
++ "simd_abd,\
++ simd_abdl,\
++ simd_adal,\
++ simd_add,\
++ simd_addl,\
++ simd_addlv,\
++ simd_addn,\
++ simd_addn2,\
++ simd_addv,\
++ simd_cls,\
++ simd_cmp,\
++ simd_cnt,\
++ simd_dup,\
++ simd_dupgp,\
++ simd_ext,\
++ simd_fadd,\
++ simd_fcmp,\
++ simd_fcvti,\
++ simd_fcvtl,\
++ simd_fcvtn,\
++ simd_fcvtn2,\
++ simd_fdiv,\
++ simd_fminmax,\
++ simd_fminmaxv,\
++ simd_fmla,\
++ simd_fmla_elt,\
++ simd_fmul,\
++ simd_fmul_elt,\
++ simd_fnegabs,\
++ simd_frcpe,\
++ simd_frcps,\
++ simd_frecx,\
++ simd_frint,\
++ simd_fsqrt,\
++ simd_icvtf,\
++ simd_ins,\
++ simd_insgp,\
++ simd_load1,\
++ simd_load1r,\
++ simd_load1s,\
++ simd_load2,\
++ simd_load2r,\
++ simd_load2s,\
++ simd_load3,\
++ simd_load3r,\
++ simd_load3s,\
++ simd_load4,\
++ simd_load4r,\
++ simd_load4s,\
++ simd_logic,\
++ simd_logic_imm,\
++ simd_minmax,\
++ simd_minmaxv,\
++ simd_mla,\
++ simd_mla_elt,\
++ simd_mlal,\
++ simd_mlal_elt,\
++ simd_movgp,\
++ simd_move,\
++ simd_move_imm,\
++ simd_mul,\
++ simd_mul_elt,\
++ simd_mull,\
++ simd_mull_elt,\
++ simd_negabs,\
++ simd_rbit,\
++ simd_rcpe,\
++ simd_rcps,\
++ simd_rev,\
++ simd_sat_add,\
++ simd_sat_mlal,\
++ simd_sat_mlal_elt,\
++ simd_sat_mul,\
++ simd_sat_mul_elt,\
++ simd_sat_mull,\
++ simd_sat_mull_elt,\
++ simd_sat_negabs,\
++ simd_sat_shift,\
++ simd_sat_shift_imm,\
++ simd_sat_shiftn_imm,\
++ simd_sat_shiftn2_imm,\
++ simd_shift,\
++ simd_shift_acc,\
++ simd_shift_imm,\
++ simd_shift_imm_acc,\
++ simd_shiftl,\
++ simd_shiftl_imm,\
++ simd_shiftn_imm,\
++ simd_shiftn2_imm,\
++ simd_store1,\
++ simd_store1s,\
++ simd_store2,\
++ simd_store2s,\
++ simd_store3,\
++ simd_store3s,\
++ simd_store4,\
++ simd_store4s,\
++ simd_tbl,\
++ simd_trn,\
++ simd_uzp,\
++ simd_zip,\
++ none"
++ (const_string "none"))
++
++
++; The "neon_type" attribute is used by the AArch32 backend. Below is a mapping
++; from "simd_type" to "neon_type".
++
++(define_attr "neon_type"
++ "neon_int_1,neon_int_2,neon_int_3,neon_int_4,neon_int_5,neon_vqneg_vqabs,
++ neon_vmov,neon_vaba,neon_vsma,neon_vaba_qqq,
++ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,neon_mul_qqq_8_16_32_ddd_32,
++ neon_mul_qdd_64_32_long_qqd_16_ddd_32_scalar_64_32_long_scalar,
++ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,neon_mla_qqq_8_16,
++ neon_mla_ddd_32_qqd_16_ddd_32_scalar_qdd_64_32_long_scalar_qdd_64_32_long,
++ neon_mla_qqq_32_qqd_32_scalar,neon_mul_ddd_16_scalar_32_16_long_scalar,
++ neon_mul_qqd_32_scalar,neon_mla_ddd_16_scalar_qdd_32_16_long_scalar,
++ neon_shift_1,neon_shift_2,neon_shift_3,neon_vshl_ddd,
++ neon_vqshl_vrshl_vqrshl_qqq,neon_vsra_vrsra,neon_fp_vadd_ddd_vabs_dd,
++ neon_fp_vadd_qqq_vabs_qq,neon_fp_vsum,neon_fp_vmul_ddd,neon_fp_vmul_qqd,
++ neon_fp_vmla_ddd,neon_fp_vmla_qqq,neon_fp_vmla_ddd_scalar,
++ neon_fp_vmla_qqq_scalar,neon_fp_vrecps_vrsqrts_ddd,
++ neon_fp_vrecps_vrsqrts_qqq,neon_bp_simple,neon_bp_2cycle,neon_bp_3cycle,
++ neon_ldr,neon_str,neon_vld1_1_2_regs,neon_vld1_3_4_regs,
++ neon_vld2_2_regs_vld1_vld2_all_lanes,neon_vld2_4_regs,neon_vld3_vld4,
++ neon_vst1_1_2_regs_vst2_2_regs,neon_vst1_3_4_regs,
++ neon_vst2_4_regs_vst3_vst4,neon_vst3_vst4,neon_vld1_vld2_lane,
++ neon_vld3_vld4_lane,neon_vst1_vst2_lane,neon_vst3_vst4_lane,
++ neon_vld3_vld4_all_lanes,neon_mcr,neon_mcr_2_mcrr,neon_mrc,neon_mrrc,
++ neon_ldm_2,neon_stm_2,none,unknown"
++ (cond [
++ (eq_attr "simd_type" "simd_dup") (const_string "neon_bp_simple")
++ (eq_attr "simd_type" "simd_movgp") (const_string "neon_bp_simple")
++ (eq_attr "simd_type" "simd_add,simd_logic,simd_logic_imm") (const_string "neon_int_1")
++ (eq_attr "simd_type" "simd_negabs,simd_addlv") (const_string "neon_int_3")
++ (eq_attr "simd_type" "simd_addn,simd_addn2,simd_addl,simd_sat_add,simd_sat_negabs") (const_string "neon_int_4")
++ (eq_attr "simd_type" "simd_move") (const_string "neon_vmov")
++ (eq_attr "simd_type" "simd_ins") (const_string "neon_mcr")
++ (and (eq_attr "simd_type" "simd_mul,simd_sat_mul") (eq_attr "simd_mode" "V8QI,V4HI")) (const_string "neon_mul_ddd_8_16_qdd_16_8_long_32_16_long")
++ (and (eq_attr "simd_type" "simd_mul,simd_sat_mul") (eq_attr "simd_mode" "V2SI,V8QI,V16QI,V2SI")) (const_string "neon_mul_qqq_8_16_32_ddd_32")
++ (and (eq_attr "simd_type" "simd_mull,simd_sat_mull") (eq_attr "simd_mode" "V8QI,V16QI,V4HI,V8HI")) (const_string "neon_mul_ddd_8_16_qdd_16_8_long_32_16_long")
++ (and (eq_attr "simd_type" "simd_mull,simd_sat_mull") (eq_attr "simd_mode" "V2SI,V4SI,V2DI")) (const_string "neon_mul_qdd_64_32_long_qqd_16_ddd_32_scalar_64_32_long_scalar")
++ (and (eq_attr "simd_type" "simd_mla,simd_sat_mlal") (eq_attr "simd_mode" "V8QI,V4HI")) (const_string "neon_mla_ddd_8_16_qdd_16_8_long_32_16_long")
++ (and (eq_attr "simd_type" "simd_mla,simd_sat_mlal") (eq_attr "simd_mode" "V2SI")) (const_string "neon_mla_ddd_32_qqd_16_ddd_32_scalar_qdd_64_32_long_scalar_qdd_64_32_long")
++ (and (eq_attr "simd_type" "simd_mla,simd_sat_mlal") (eq_attr "simd_mode" "V16QI,V8HI")) (const_string "neon_mla_qqq_8_16")
++ (and (eq_attr "simd_type" "simd_mla,simd_sat_mlal") (eq_attr "simd_mode" "V4SI")) (const_string "neon_mla_qqq_32_qqd_32_scalar")
++ (and (eq_attr "simd_type" "simd_mlal") (eq_attr "simd_mode" "V8QI,V16QI,V4HI,V8HI")) (const_string "neon_mla_ddd_8_16_qdd_16_8_long_32_16_long")
++ (and (eq_attr "simd_type" "simd_mlal") (eq_attr "simd_mode" "V2SI,V4SI,V2DI")) (const_string "neon_mla_ddd_32_qqd_16_ddd_32_scalar_qdd_64_32_long_scalar_qdd_64_32_long")
++ (and (eq_attr "simd_type" "simd_fmla") (eq_attr "simd_mode" "V2SF")) (const_string "neon_fp_vmla_ddd")
++ (and (eq_attr "simd_type" "simd_fmla") (eq_attr "simd_mode" "V4SF,V2DF")) (const_string "neon_fp_vmla_qqq")
++ (and (eq_attr "simd_type" "simd_fmla_elt") (eq_attr "simd_mode" "V2SF")) (const_string "neon_fp_vmla_ddd_scalar")
++ (and (eq_attr "simd_type" "simd_fmla_elt") (eq_attr "simd_mode" "V4SF,V2DF")) (const_string "neon_fp_vmla_qqq_scalar")
++ (and (eq_attr "simd_type" "simd_fmul,simd_fmul_elt,simd_fdiv,simd_fsqrt") (eq_attr "simd_mode" "V2SF")) (const_string "neon_fp_vmul_ddd")
++ (and (eq_attr "simd_type" "simd_fmul,simd_fmul_elt,simd_fdiv,simd_fsqrt") (eq_attr "simd_mode" "V4SF,V2DF")) (const_string "neon_fp_vmul_qqd")
++ (and (eq_attr "simd_type" "simd_fadd") (eq_attr "simd_mode" "V2SF")) (const_string "neon_fp_vadd_ddd_vabs_dd")
++ (and (eq_attr "simd_type" "simd_fadd") (eq_attr "simd_mode" "V4SF,V2DF")) (const_string "neon_fp_vadd_qqq_vabs_qq")
++ (and (eq_attr "simd_type" "simd_fnegabs,simd_fminmax,simd_fminmaxv") (eq_attr "simd_mode" "V2SF")) (const_string "neon_fp_vadd_ddd_vabs_dd")
++ (and (eq_attr "simd_type" "simd_fnegabs,simd_fminmax,simd_fminmaxv") (eq_attr "simd_mode" "V4SF,V2DF")) (const_string "neon_fp_vadd_qqq_vabs_qq")
++ (and (eq_attr "simd_type" "simd_shift,simd_shift_acc") (eq_attr "simd_mode" "V8QI,V4HI,V2SI")) (const_string "neon_vshl_ddd")
++ (and (eq_attr "simd_type" "simd_shift,simd_shift_acc") (eq_attr "simd_mode" "V16QI,V8HI,V4SI,V2DI")) (const_string "neon_shift_3")
++ (eq_attr "simd_type" "simd_minmax,simd_minmaxv") (const_string "neon_int_5")
++ (eq_attr "simd_type" "simd_shiftn_imm,simd_shiftn2_imm,simd_shiftl_imm,") (const_string "neon_shift_1")
++ (eq_attr "simd_type" "simd_load1,simd_load2") (const_string "neon_vld1_1_2_regs")
++ (eq_attr "simd_type" "simd_load3,simd_load3") (const_string "neon_vld1_3_4_regs")
++ (eq_attr "simd_type" "simd_load1r,simd_load2r,simd_load3r,simd_load4r") (const_string "neon_vld2_2_regs_vld1_vld2_all_lanes")
++ (eq_attr "simd_type" "simd_load1s,simd_load2s") (const_string "neon_vld1_vld2_lane")
++ (eq_attr "simd_type" "simd_load3s,simd_load4s") (const_string "neon_vld3_vld4_lane")
++ (eq_attr "simd_type" "simd_store1,simd_store2") (const_string "neon_vst1_1_2_regs_vst2_2_regs")
++ (eq_attr "simd_type" "simd_store3,simd_store4") (const_string "neon_vst1_3_4_regs")
++ (eq_attr "simd_type" "simd_store1s,simd_store2s") (const_string "neon_vst1_vst2_lane")
++ (eq_attr "simd_type" "simd_store3s,simd_store4s") (const_string "neon_vst3_vst4_lane")
++ (and (eq_attr "simd_type" "simd_frcpe,simd_frcps") (eq_attr "simd_mode" "V2SF")) (const_string "neon_fp_vrecps_vrsqrts_ddd")
++ (and (eq_attr "simd_type" "simd_frcpe,simd_frcps") (eq_attr "simd_mode" "V4SF,V2DF")) (const_string "neon_fp_vrecps_vrsqrts_qqq")
++ (eq_attr "simd_type" "none") (const_string "none")
++ ]
++ (const_string "unknown")))
++
++
++(define_expand "mov<mode>"
++ [(set (match_operand:VALL 0 "aarch64_simd_nonimmediate_operand" "")
++ (match_operand:VALL 1 "aarch64_simd_general_operand" ""))]
++ "TARGET_SIMD"
++ "
++ if (GET_CODE (operands[0]) == MEM)
++ operands[1] = force_reg (<MODE>mode, operands[1]);
++ "
++)
++
++(define_expand "movmisalign<mode>"
++ [(set (match_operand:VALL 0 "aarch64_simd_nonimmediate_operand" "")
++ (match_operand:VALL 1 "aarch64_simd_general_operand" ""))]
++ "TARGET_SIMD"
++{
++ /* This pattern is not permitted to fail during expansion: if both arguments
++ are non-registers (e.g. memory := constant, which can be created by the
++ auto-vectorizer), force operand 1 into a register. */
++ if (!register_operand (operands[0], <MODE>mode)
++ && !register_operand (operands[1], <MODE>mode))
++ operands[1] = force_reg (<MODE>mode, operands[1]);
++})
++
++(define_insn "aarch64_simd_dup<mode>"
++ [(set (match_operand:VDQ 0 "register_operand" "=w")
++ (vec_duplicate:VDQ (match_operand:<VEL> 1 "register_operand" "r")))]
++ "TARGET_SIMD"
++ "dup\\t%0.<Vtype>, %<vw>1"
++ [(set_attr "simd_type" "simd_dupgp")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "aarch64_dup_lane<mode>"
++ [(set (match_operand:VDQ_I 0 "register_operand" "=w")
++ (vec_duplicate:VDQ_I
++ (vec_select:<VEL>
++ (match_operand:<VCON> 1 "register_operand" "w")
++ (parallel [(match_operand:SI 2 "immediate_operand" "i")])
++ )))]
++ "TARGET_SIMD"
++ "dup\\t%<v>0<Vmtype>, %1.<Vetype>[%2]"
++ [(set_attr "simd_type" "simd_dup")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "aarch64_dup_lane<mode>"
++ [(set (match_operand:SDQ_I 0 "register_operand" "=w")
++ (vec_select:<VEL>
++ (match_operand:<VCON> 1 "register_operand" "w")
++ (parallel [(match_operand:SI 2 "immediate_operand" "i")])
++ ))]
++ "TARGET_SIMD"
++ "dup\\t%<v>0<Vmtype>, %1.<Vetype>[%2]"
++ [(set_attr "simd_type" "simd_dup")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "aarch64_simd_dup<mode>"
++ [(set (match_operand:VDQF 0 "register_operand" "=w")
++ (vec_duplicate:VDQF (match_operand:<VEL> 1 "register_operand" "w")))]
++ "TARGET_SIMD"
++ "dup\\t%0.<Vtype>, %1.<Vetype>[0]"
++ [(set_attr "simd_type" "simd_dup")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "*aarch64_simd_mov<mode>"
++ [(set (match_operand:VD 0 "aarch64_simd_nonimmediate_operand"
++ "=w, Utv, w, ?r, ?w, ?r, w")
++ (match_operand:VD 1 "aarch64_simd_general_operand"
++ "Utv, w, w, w, r, r, Dn"))]
++ "TARGET_SIMD
++ && (register_operand (operands[0], <MODE>mode)
++ || register_operand (operands[1], <MODE>mode))"
++{
++ switch (which_alternative)
++ {
++ case 0: return "ld1\t{%0.<Vtype>}, %1";
++ case 1: return "st1\t{%1.<Vtype>}, %0";
++ case 2: return "orr\t%0.<Vbtype>, %1.<Vbtype>, %1.<Vbtype>";
++ case 3: return "umov\t%0, %1.d[0]";
++ case 4: return "ins\t%0.d[0], %1";
++ case 5: return "mov\t%0, %1";
++ case 6:
++ return aarch64_output_simd_mov_immediate (&operands[1],
++ <MODE>mode, 64);
++ default: gcc_unreachable ();
++ }
++}
++ [(set_attr "simd_type" "simd_load1,simd_store1,simd_move,simd_movgp,simd_insgp,simd_move,simd_move_imm")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "*aarch64_simd_mov<mode>"
++ [(set (match_operand:VQ 0 "aarch64_simd_nonimmediate_operand"
++ "=w, Utv, w, ?r, ?w, ?r, w")
++ (match_operand:VQ 1 "aarch64_simd_general_operand"
++ "Utv, w, w, w, r, r, Dn"))]
++ "TARGET_SIMD
++ && (register_operand (operands[0], <MODE>mode)
++ || register_operand (operands[1], <MODE>mode))"
++{
++ switch (which_alternative)
++ {
++ case 0: return "ld1\t{%0.<Vtype>}, %1";
++ case 1: return "st1\t{%1.<Vtype>}, %0";
++ case 2: return "orr\t%0.<Vbtype>, %1.<Vbtype>, %1.<Vbtype>";
++ case 3: return "umov\t%0, %1.d[0]\;umov\t%H0, %1.d[1]";
++ case 4: return "ins\t%0.d[0], %1\;ins\t%0.d[1], %H1";
++ case 5: return "#";
++ case 6:
++ return aarch64_output_simd_mov_immediate (&operands[1],
++ <MODE>mode, 128);
++ default: gcc_unreachable ();
++ }
++}
++ [(set_attr "simd_type" "simd_load1,simd_store1,simd_move,simd_movgp,simd_insgp,simd_move,simd_move_imm")
++ (set_attr "simd_mode" "<MODE>")
++ (set_attr "length" "4,4,4,8,8,8,4")]
++)
++
++(define_split
++ [(set (match_operand:VQ 0 "register_operand" "")
++ (match_operand:VQ 1 "register_operand" ""))]
++ "TARGET_SIMD && reload_completed
++ && GP_REGNUM_P (REGNO (operands[0]))
++ && GP_REGNUM_P (REGNO (operands[1]))"
++ [(set (match_dup 0) (match_dup 1))
++ (set (match_dup 2) (match_dup 3))]
++{
++ int rdest = REGNO (operands[0]);
++ int rsrc = REGNO (operands[1]);
++ rtx dest[2], src[2];
++
++ dest[0] = gen_rtx_REG (DImode, rdest);
++ src[0] = gen_rtx_REG (DImode, rsrc);
++ dest[1] = gen_rtx_REG (DImode, rdest + 1);
++ src[1] = gen_rtx_REG (DImode, rsrc + 1);
++
++ aarch64_simd_disambiguate_copy (operands, dest, src, 2);
++})
++
++(define_insn "orn<mode>3"
++ [(set (match_operand:VDQ 0 "register_operand" "=w")
++ (ior:VDQ (not:VDQ (match_operand:VDQ 1 "register_operand" "w"))
++ (match_operand:VDQ 2 "register_operand" "w")))]
++ "TARGET_SIMD"
++ "orn\t%0.<Vbtype>, %2.<Vbtype>, %1.<Vbtype>"
++ [(set_attr "simd_type" "simd_logic")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "bic<mode>3"
++ [(set (match_operand:VDQ 0 "register_operand" "=w")
++ (and:VDQ (not:VDQ (match_operand:VDQ 1 "register_operand" "w"))
++ (match_operand:VDQ 2 "register_operand" "w")))]
++ "TARGET_SIMD"
++ "bic\t%0.<Vbtype>, %2.<Vbtype>, %1.<Vbtype>"
++ [(set_attr "simd_type" "simd_logic")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "add<mode>3"
++ [(set (match_operand:VDQ 0 "register_operand" "=w")
++ (plus:VDQ (match_operand:VDQ 1 "register_operand" "w")
++ (match_operand:VDQ 2 "register_operand" "w")))]
++ "TARGET_SIMD"
++ "add\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
++ [(set_attr "simd_type" "simd_add")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "sub<mode>3"
++ [(set (match_operand:VDQ 0 "register_operand" "=w")
++ (minus:VDQ (match_operand:VDQ 1 "register_operand" "w")
++ (match_operand:VDQ 2 "register_operand" "w")))]
++ "TARGET_SIMD"
++ "sub\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
++ [(set_attr "simd_type" "simd_add")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "mul<mode>3"
++ [(set (match_operand:VDQM 0 "register_operand" "=w")
++ (mult:VDQM (match_operand:VDQM 1 "register_operand" "w")
++ (match_operand:VDQM 2 "register_operand" "w")))]
++ "TARGET_SIMD"
++ "mul\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
++ [(set_attr "simd_type" "simd_mul")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "neg<mode>2"
++ [(set (match_operand:VDQM 0 "register_operand" "=w")
++ (neg:VDQM (match_operand:VDQM 1 "register_operand" "w")))]
++ "TARGET_SIMD"
++ "neg\t%0.<Vtype>, %1.<Vtype>"
++ [(set_attr "simd_type" "simd_negabs")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "abs<mode>2"
++ [(set (match_operand:VDQ 0 "register_operand" "=w")
++ (abs:VDQ (match_operand:VDQ 1 "register_operand" "w")))]
++ "TARGET_SIMD"
++ "abs\t%0.<Vtype>, %1.<Vtype>"
++ [(set_attr "simd_type" "simd_negabs")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "and<mode>3"
++ [(set (match_operand:VDQ 0 "register_operand" "=w")
++ (and:VDQ (match_operand:VDQ 1 "register_operand" "w")
++ (match_operand:VDQ 2 "register_operand" "w")))]
++ "TARGET_SIMD"
++ "and\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>"
++ [(set_attr "simd_type" "simd_logic")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "ior<mode>3"
++ [(set (match_operand:VDQ 0 "register_operand" "=w")
++ (ior:VDQ (match_operand:VDQ 1 "register_operand" "w")
++ (match_operand:VDQ 2 "register_operand" "w")))]
++ "TARGET_SIMD"
++ "orr\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>"
++ [(set_attr "simd_type" "simd_logic")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "xor<mode>3"
++ [(set (match_operand:VDQ 0 "register_operand" "=w")
++ (xor:VDQ (match_operand:VDQ 1 "register_operand" "w")
++ (match_operand:VDQ 2 "register_operand" "w")))]
++ "TARGET_SIMD"
++ "eor\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>"
++ [(set_attr "simd_type" "simd_logic")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "one_cmpl<mode>2"
++ [(set (match_operand:VDQ 0 "register_operand" "=w")
++ (not:VDQ (match_operand:VDQ 1 "register_operand" "w")))]
++ "TARGET_SIMD"
++ "not\t%0.<Vbtype>, %1.<Vbtype>"
++ [(set_attr "simd_type" "simd_logic")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "aarch64_simd_vec_set<mode>"
++ [(set (match_operand:VQ_S 0 "register_operand" "=w")
++ (vec_merge:VQ_S
++ (vec_duplicate:VQ_S
++ (match_operand:<VEL> 1 "register_operand" "r"))
++ (match_operand:VQ_S 3 "register_operand" "0")
++ (match_operand:SI 2 "immediate_operand" "i")))]
++ "TARGET_SIMD"
++ "ins\t%0.<Vetype>[%p2], %w1";
++ [(set_attr "simd_type" "simd_insgp")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "aarch64_simd_lshr<mode>"
++ [(set (match_operand:VDQ 0 "register_operand" "=w")
++ (lshiftrt:VDQ (match_operand:VDQ 1 "register_operand" "w")
++ (match_operand:VDQ 2 "aarch64_simd_rshift_imm" "Dr")))]
++ "TARGET_SIMD"
++ "ushr\t%0.<Vtype>, %1.<Vtype>, %2"
++ [(set_attr "simd_type" "simd_shift_imm")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "aarch64_simd_ashr<mode>"
++ [(set (match_operand:VDQ 0 "register_operand" "=w")
++ (ashiftrt:VDQ (match_operand:VDQ 1 "register_operand" "w")
++ (match_operand:VDQ 2 "aarch64_simd_rshift_imm" "Dr")))]
++ "TARGET_SIMD"
++ "sshr\t%0.<Vtype>, %1.<Vtype>, %2"
++ [(set_attr "simd_type" "simd_shift_imm")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "aarch64_simd_imm_shl<mode>"
++ [(set (match_operand:VDQ 0 "register_operand" "=w")
++ (ashift:VDQ (match_operand:VDQ 1 "register_operand" "w")
++ (match_operand:VDQ 2 "aarch64_simd_lshift_imm" "Dl")))]
++ "TARGET_SIMD"
++ "shl\t%0.<Vtype>, %1.<Vtype>, %2"
++ [(set_attr "simd_type" "simd_shift_imm")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "aarch64_simd_reg_sshl<mode>"
++ [(set (match_operand:VDQ 0 "register_operand" "=w")
++ (ashift:VDQ (match_operand:VDQ 1 "register_operand" "w")
++ (match_operand:VDQ 2 "register_operand" "w")))]
++ "TARGET_SIMD"
++ "sshl\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
++ [(set_attr "simd_type" "simd_shift")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "aarch64_simd_reg_shl<mode>_unsigned"
++ [(set (match_operand:VDQ 0 "register_operand" "=w")
++ (unspec:VDQ [(match_operand:VDQ 1 "register_operand" "w")
++ (match_operand:VDQ 2 "register_operand" "w")]
++ UNSPEC_ASHIFT_UNSIGNED))]
++ "TARGET_SIMD"
++ "ushl\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
++ [(set_attr "simd_type" "simd_shift")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "aarch64_simd_reg_shl<mode>_signed"
++ [(set (match_operand:VDQ 0 "register_operand" "=w")
++ (unspec:VDQ [(match_operand:VDQ 1 "register_operand" "w")
++ (match_operand:VDQ 2 "register_operand" "w")]
++ UNSPEC_ASHIFT_SIGNED))]
++ "TARGET_SIMD"
++ "sshl\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
++ [(set_attr "simd_type" "simd_shift")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_expand "ashl<mode>3"
++ [(match_operand:VDQ 0 "register_operand" "")
++ (match_operand:VDQ 1 "register_operand" "")
++ (match_operand:SI 2 "general_operand" "")]
++ "TARGET_SIMD"
++{
++ int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
++ int shift_amount;
++
++ if (CONST_INT_P (operands[2]))
++ {
++ shift_amount = INTVAL (operands[2]);
++ if (shift_amount >= 0 && shift_amount < bit_width)
++ {
++ rtx tmp = aarch64_simd_gen_const_vector_dup (<MODE>mode,
++ shift_amount);
++ emit_insn (gen_aarch64_simd_imm_shl<mode> (operands[0],
++ operands[1],
++ tmp));
++ DONE;
++ }
++ else
++ {
++ operands[2] = force_reg (SImode, operands[2]);
++ }
++ }
++ else if (MEM_P (operands[2]))
++ {
++ operands[2] = force_reg (SImode, operands[2]);
++ }
++
++ if (REG_P (operands[2]))
++ {
++ rtx tmp = gen_reg_rtx (<MODE>mode);
++ emit_insn (gen_aarch64_simd_dup<mode> (tmp,
++ convert_to_mode (<VEL>mode,
++ operands[2],
++ 0)));
++ emit_insn (gen_aarch64_simd_reg_sshl<mode> (operands[0], operands[1],
++ tmp));
++ DONE;
++ }
++ else
++ FAIL;
++}
++)
++
++(define_expand "lshr<mode>3"
++ [(match_operand:VDQ 0 "register_operand" "")
++ (match_operand:VDQ 1 "register_operand" "")
++ (match_operand:SI 2 "general_operand" "")]
++ "TARGET_SIMD"
++{
++ int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
++ int shift_amount;
++
++ if (CONST_INT_P (operands[2]))
++ {
++ shift_amount = INTVAL (operands[2]);
++ if (shift_amount > 0 && shift_amount <= bit_width)
++ {
++ rtx tmp = aarch64_simd_gen_const_vector_dup (<MODE>mode,
++ shift_amount);
++ emit_insn (gen_aarch64_simd_lshr<mode> (operands[0],
++ operands[1],
++ tmp));
++ DONE;
++ }
++ else
++ operands[2] = force_reg (SImode, operands[2]);
++ }
++ else if (MEM_P (operands[2]))
++ {
++ operands[2] = force_reg (SImode, operands[2]);
++ }
++
++ if (REG_P (operands[2]))
++ {
++ rtx tmp = gen_reg_rtx (SImode);
++ rtx tmp1 = gen_reg_rtx (<MODE>mode);
++ emit_insn (gen_negsi2 (tmp, operands[2]));
++ emit_insn (gen_aarch64_simd_dup<mode> (tmp1,
++ convert_to_mode (<VEL>mode,
++ tmp, 0)));
++ emit_insn (gen_aarch64_simd_reg_shl<mode>_unsigned (operands[0],
++ operands[1],
++ tmp1));
++ DONE;
++ }
++ else
++ FAIL;
++}
++)
++
++(define_expand "ashr<mode>3"
++ [(match_operand:VDQ 0 "register_operand" "")
++ (match_operand:VDQ 1 "register_operand" "")
++ (match_operand:SI 2 "general_operand" "")]
++ "TARGET_SIMD"
++{
++ int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
++ int shift_amount;
++
++ if (CONST_INT_P (operands[2]))
++ {
++ shift_amount = INTVAL (operands[2]);
++ if (shift_amount > 0 && shift_amount <= bit_width)
++ {
++ rtx tmp = aarch64_simd_gen_const_vector_dup (<MODE>mode,
++ shift_amount);
++ emit_insn (gen_aarch64_simd_ashr<mode> (operands[0],
++ operands[1],
++ tmp));
++ DONE;
++ }
++ else
++ operands[2] = force_reg (SImode, operands[2]);
++ }
++ else if (MEM_P (operands[2]))
++ {
++ operands[2] = force_reg (SImode, operands[2]);
++ }
++
++ if (REG_P (operands[2]))
++ {
++ rtx tmp = gen_reg_rtx (SImode);
++ rtx tmp1 = gen_reg_rtx (<MODE>mode);
++ emit_insn (gen_negsi2 (tmp, operands[2]));
++ emit_insn (gen_aarch64_simd_dup<mode> (tmp1,
++ convert_to_mode (<VEL>mode,
++ tmp, 0)));
++ emit_insn (gen_aarch64_simd_reg_shl<mode>_signed (operands[0],
++ operands[1],
++ tmp1));
++ DONE;
++ }
++ else
++ FAIL;
++}
++)
++
++(define_expand "vashl<mode>3"
++ [(match_operand:VDQ 0 "register_operand" "")
++ (match_operand:VDQ 1 "register_operand" "")
++ (match_operand:VDQ 2 "register_operand" "")]
++ "TARGET_SIMD"
++{
++ emit_insn (gen_aarch64_simd_reg_sshl<mode> (operands[0], operands[1],
++ operands[2]));
++ DONE;
++})
++
++;; Using mode VQ_S as there is no V2DImode neg!
++;; Negating individual lanes most certainly offsets the
++;; gain from vectorization.
++(define_expand "vashr<mode>3"
++ [(match_operand:VQ_S 0 "register_operand" "")
++ (match_operand:VQ_S 1 "register_operand" "")
++ (match_operand:VQ_S 2 "register_operand" "")]
++ "TARGET_SIMD"
++{
++ rtx neg = gen_reg_rtx (<MODE>mode);
++ emit (gen_neg<mode>2 (neg, operands[2]));
++ emit_insn (gen_aarch64_simd_reg_shl<mode>_signed (operands[0], operands[1],
++ neg));
++ DONE;
++})
++
++(define_expand "vlshr<mode>3"
++ [(match_operand:VQ_S 0 "register_operand" "")
++ (match_operand:VQ_S 1 "register_operand" "")
++ (match_operand:VQ_S 2 "register_operand" "")]
++ "TARGET_SIMD"
++{
++ rtx neg = gen_reg_rtx (<MODE>mode);
++ emit (gen_neg<mode>2 (neg, operands[2]));
++ emit_insn (gen_aarch64_simd_reg_shl<mode>_unsigned (operands[0], operands[1],
++ neg));
++ DONE;
++})
++
++(define_expand "vec_set<mode>"
++ [(match_operand:VQ_S 0 "register_operand" "+w")
++ (match_operand:<VEL> 1 "register_operand" "r")
++ (match_operand:SI 2 "immediate_operand" "")]
++ "TARGET_SIMD"
++ {
++ HOST_WIDE_INT elem = (HOST_WIDE_INT) 1 << INTVAL (operands[2]);
++ emit_insn (gen_aarch64_simd_vec_set<mode> (operands[0], operands[1],
++ GEN_INT (elem), operands[0]));
++ DONE;
++ }
++)
++
++(define_insn "aarch64_simd_vec_setv2di"
++ [(set (match_operand:V2DI 0 "register_operand" "=w")
++ (vec_merge:V2DI
++ (vec_duplicate:V2DI
++ (match_operand:DI 1 "register_operand" "r"))
++ (match_operand:V2DI 3 "register_operand" "0")
++ (match_operand:SI 2 "immediate_operand" "i")))]
++ "TARGET_SIMD"
++ "ins\t%0.d[%p2], %1";
++ [(set_attr "simd_type" "simd_insgp")
++ (set_attr "simd_mode" "V2DI")]
++)
++
++(define_expand "vec_setv2di"
++ [(match_operand:V2DI 0 "register_operand" "+w")
++ (match_operand:DI 1 "register_operand" "r")
++ (match_operand:SI 2 "immediate_operand" "")]
++ "TARGET_SIMD"
++ {
++ HOST_WIDE_INT elem = (HOST_WIDE_INT) 1 << INTVAL (operands[2]);
++ emit_insn (gen_aarch64_simd_vec_setv2di (operands[0], operands[1],
++ GEN_INT (elem), operands[0]));
++ DONE;
++ }
++)
++
++(define_insn "aarch64_simd_vec_set<mode>"
++ [(set (match_operand:VDQF 0 "register_operand" "=w")
++ (vec_merge:VDQF
++ (vec_duplicate:VDQF
++ (match_operand:<VEL> 1 "register_operand" "w"))
++ (match_operand:VDQF 3 "register_operand" "0")
++ (match_operand:SI 2 "immediate_operand" "i")))]
++ "TARGET_SIMD"
++ "ins\t%0.<Vetype>[%p2], %1.<Vetype>[0]";
++ [(set_attr "simd_type" "simd_ins")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_expand "vec_set<mode>"
++ [(match_operand:VDQF 0 "register_operand" "+w")
++ (match_operand:<VEL> 1 "register_operand" "w")
++ (match_operand:SI 2 "immediate_operand" "")]
++ "TARGET_SIMD"
++ {
++ HOST_WIDE_INT elem = (HOST_WIDE_INT) 1 << INTVAL (operands[2]);
++ emit_insn (gen_aarch64_simd_vec_set<mode> (operands[0], operands[1],
++ GEN_INT (elem), operands[0]));
++ DONE;
++ }
++)
++
++
++(define_insn "aarch64_mla<mode>"
++ [(set (match_operand:VQ_S 0 "register_operand" "=w")
++ (plus:VQ_S (mult:VQ_S (match_operand:VQ_S 2 "register_operand" "w")
++ (match_operand:VQ_S 3 "register_operand" "w"))
++ (match_operand:VQ_S 1 "register_operand" "0")))]
++ "TARGET_SIMD"
++ "mla\t%0.<Vtype>, %2.<Vtype>, %3.<Vtype>"
++ [(set_attr "simd_type" "simd_mla")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "aarch64_mls<mode>"
++ [(set (match_operand:VQ_S 0 "register_operand" "=w")
++ (minus:VQ_S (match_operand:VQ_S 1 "register_operand" "0")
++ (mult:VQ_S (match_operand:VQ_S 2 "register_operand" "w")
++ (match_operand:VQ_S 3 "register_operand" "w"))))]
++ "TARGET_SIMD"
++ "mls\t%0.<Vtype>, %2.<Vtype>, %3.<Vtype>"
++ [(set_attr "simd_type" "simd_mla")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++;; Max/Min operations.
++(define_insn "<maxmin><mode>3"
++ [(set (match_operand:VQ_S 0 "register_operand" "=w")
++ (MAXMIN:VQ_S (match_operand:VQ_S 1 "register_operand" "w")
++ (match_operand:VQ_S 2 "register_operand" "w")))]
++ "TARGET_SIMD"
++ "<maxmin>\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
++ [(set_attr "simd_type" "simd_minmax")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++;; Move into low-half clearing high half to 0.
++
++(define_insn "move_lo_quad_<mode>"
++ [(set (match_operand:VQ 0 "register_operand" "=w")
++ (vec_concat:VQ
++ (match_operand:<VHALF> 1 "register_operand" "w")
++ (vec_duplicate:<VHALF> (const_int 0))))]
++ "TARGET_SIMD"
++ "mov\\t%d0, %d1";
++ [(set_attr "simd_type" "simd_dup")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++;; Move into high-half.
++
++(define_insn "aarch64_simd_move_hi_quad_<mode>"
++ [(set (match_operand:VQ 0 "register_operand" "+w")
++ (vec_concat:VQ
++ (vec_select:<VHALF>
++ (match_dup 0)
++ (match_operand:VQ 2 "vect_par_cnst_lo_half" ""))
++ (match_operand:<VHALF> 1 "register_operand" "w")))]
++ "TARGET_SIMD"
++ "ins\\t%0.d[1], %1.d[0]";
++ [(set_attr "simd_type" "simd_ins")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_expand "move_hi_quad_<mode>"
++ [(match_operand:VQ 0 "register_operand" "")
++ (match_operand:<VHALF> 1 "register_operand" "")]
++ "TARGET_SIMD"
++{
++ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, false);
++ emit_insn (gen_aarch64_simd_move_hi_quad_<mode> (operands[0],
++ operands[1], p));
++ DONE;
++})
++
++;; Narrowing operations.
++
++;; For doubles.
++(define_insn "aarch64_simd_vec_pack_trunc_<mode>"
++ [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w")
++ (truncate:<VNARROWQ> (match_operand:VQN 1 "register_operand" "w")))]
++ "TARGET_SIMD"
++ "xtn\\t%0.<Vntype>, %1.<Vtype>"
++ [(set_attr "simd_type" "simd_shiftn_imm")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_expand "vec_pack_trunc_<mode>"
++ [(match_operand:<VNARROWD> 0 "register_operand" "")
++ (match_operand:VDN 1 "register_operand" "")
++ (match_operand:VDN 2 "register_operand" "")]
++ "TARGET_SIMD"
++{
++ rtx tempreg = gen_reg_rtx (<VDBL>mode);
++
++ emit_insn (gen_move_lo_quad_<Vdbl> (tempreg, operands[1]));
++ emit_insn (gen_move_hi_quad_<Vdbl> (tempreg, operands[2]));
++ emit_insn (gen_aarch64_simd_vec_pack_trunc_<Vdbl> (operands[0], tempreg));
++ DONE;
++})
++
++;; For quads.
++
++(define_insn "vec_pack_trunc_<mode>"
++ [(set (match_operand:<VNARROWQ2> 0 "register_operand" "+&w")
++ (vec_concat:<VNARROWQ2>
++ (truncate:<VNARROWQ> (match_operand:VQN 1 "register_operand" "w"))
++ (truncate:<VNARROWQ> (match_operand:VQN 2 "register_operand" "w"))))]
++ "TARGET_SIMD"
++ "xtn\\t%0.<Vntype>, %1.<Vtype>\;xtn2\\t%0.<V2ntype>, %2.<Vtype>"
++ [(set_attr "simd_type" "simd_shiftn2_imm")
++ (set_attr "simd_mode" "<MODE>")
++ (set_attr "length" "8")]
++)
++
++;; Widening operations.
++
++(define_insn "aarch64_simd_vec_unpack<su>_lo_<mode>"
++ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
++ (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
++ (match_operand:VQW 1 "register_operand" "w")
++ (match_operand:VQW 2 "vect_par_cnst_lo_half" "")
++ )))]
++ "TARGET_SIMD"
++ "<su>shll %0.<Vwtype>, %1.<Vhalftype>, 0"
++ [(set_attr "simd_type" "simd_shiftl_imm")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "aarch64_simd_vec_unpack<su>_hi_<mode>"
++ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
++ (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
++ (match_operand:VQW 1 "register_operand" "w")
++ (match_operand:VQW 2 "vect_par_cnst_hi_half" "")
++ )))]
++ "TARGET_SIMD"
++ "<su>shll2 %0.<Vwtype>, %1.<Vtype>, 0"
++ [(set_attr "simd_type" "simd_shiftl_imm")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_expand "vec_unpack<su>_hi_<mode>"
++ [(match_operand:<VWIDE> 0 "register_operand" "")
++ (ANY_EXTEND:<VWIDE> (match_operand:VQW 1 "register_operand"))]
++ "TARGET_SIMD"
++ {
++ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
++ emit_insn (gen_aarch64_simd_vec_unpack<su>_hi_<mode> (operands[0],
++ operands[1], p));
++ DONE;
++ }
++)
++
++(define_expand "vec_unpack<su>_lo_<mode>"
++ [(match_operand:<VWIDE> 0 "register_operand" "")
++ (ANY_EXTEND:<VWIDE> (match_operand:VQW 1 "register_operand" ""))]
++ "TARGET_SIMD"
++ {
++ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, false);
++ emit_insn (gen_aarch64_simd_vec_unpack<su>_lo_<mode> (operands[0],
++ operands[1], p));
++ DONE;
++ }
++)
++
++;; Widening arithmetic.
++
++(define_insn "aarch64_simd_vec_<su>mult_lo_<mode>"
++ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
++ (mult:<VWIDE> (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
++ (match_operand:VQW 1 "register_operand" "w")
++ (match_operand:VQW 3 "vect_par_cnst_lo_half" "")))
++ (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
++ (match_operand:VQW 2 "register_operand" "w")
++ (match_dup 3)))))]
++ "TARGET_SIMD"
++ "<su>mull\\t%0.<Vwtype>, %1.<Vhalftype>, %2.<Vhalftype>"
++ [(set_attr "simd_type" "simd_mull")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_expand "vec_widen_<su>mult_lo_<mode>"
++ [(match_operand:<VWIDE> 0 "register_operand" "")
++ (ANY_EXTEND:<VWIDE> (match_operand:VQW 1 "register_operand" ""))
++ (ANY_EXTEND:<VWIDE> (match_operand:VQW 2 "register_operand" ""))]
++ "TARGET_SIMD"
++ {
++ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, false);
++ emit_insn (gen_aarch64_simd_vec_<su>mult_lo_<mode> (operands[0],
++ operands[1],
++ operands[2], p));
++ DONE;
++ }
++)
++
++(define_insn "aarch64_simd_vec_<su>mult_hi_<mode>"
++ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
++ (mult:<VWIDE> (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
++ (match_operand:VQW 1 "register_operand" "w")
++ (match_operand:VQW 3 "vect_par_cnst_hi_half" "")))
++ (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
++ (match_operand:VQW 2 "register_operand" "w")
++ (match_dup 3)))))]
++ "TARGET_SIMD"
++ "<su>mull2\\t%0.<Vwtype>, %1.<Vtype>, %2.<Vtype>"
++ [(set_attr "simd_type" "simd_mull")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_expand "vec_widen_<su>mult_hi_<mode>"
++ [(match_operand:<VWIDE> 0 "register_operand" "")
++ (ANY_EXTEND:<VWIDE> (match_operand:VQW 1 "register_operand" ""))
++ (ANY_EXTEND:<VWIDE> (match_operand:VQW 2 "register_operand" ""))]
++ "TARGET_SIMD"
++ {
++ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
++ emit_insn (gen_aarch64_simd_vec_<su>mult_hi_<mode> (operands[0],
++ operands[1],
++ operands[2], p));
++ DONE;
++
++ }
++)
++
++;; FP vector operations.
++;; AArch64 AdvSIMD supports single-precision (32-bit) and
++;; double-precision (64-bit) floating-point data types and arithmetic as
++;; defined by the IEEE 754-2008 standard. This makes them vectorizable
++;; without the need for -ffast-math or -funsafe-math-optimizations.
++;;
++;; Floating-point operations can raise an exception. Vectorizing such
++;; operations are safe because of reasons explained below.
++;;
++;; ARMv8 permits an extension to enable trapped floating-point
++;; exception handling, however this is an optional feature. In the
++;; event of a floating-point exception being raised by vectorised
++;; code then:
++;; 1. If trapped floating-point exceptions are available, then a trap
++;; will be taken when any lane raises an enabled exception. A trap
++;; handler may determine which lane raised the exception.
++;; 2. Alternatively a sticky exception flag is set in the
++;; floating-point status register (FPSR). Software may explicitly
++;; test the exception flags, in which case the tests will either
++;; prevent vectorisation, allowing precise identification of the
++;; failing operation, or if tested outside of vectorisable regions
++;; then the specific operation and lane are not of interest.
++
++;; FP arithmetic operations.
++
++(define_insn "add<mode>3"
++ [(set (match_operand:VDQF 0 "register_operand" "=w")
++ (plus:VDQF (match_operand:VDQF 1 "register_operand" "w")
++ (match_operand:VDQF 2 "register_operand" "w")))]
++ "TARGET_SIMD"
++ "fadd\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
++ [(set_attr "simd_type" "simd_fadd")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "sub<mode>3"
++ [(set (match_operand:VDQF 0 "register_operand" "=w")
++ (minus:VDQF (match_operand:VDQF 1 "register_operand" "w")
++ (match_operand:VDQF 2 "register_operand" "w")))]
++ "TARGET_SIMD"
++ "fsub\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
++ [(set_attr "simd_type" "simd_fadd")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "mul<mode>3"
++ [(set (match_operand:VDQF 0 "register_operand" "=w")
++ (mult:VDQF (match_operand:VDQF 1 "register_operand" "w")
++ (match_operand:VDQF 2 "register_operand" "w")))]
++ "TARGET_SIMD"
++ "fmul\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
++ [(set_attr "simd_type" "simd_fmul")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "div<mode>3"
++ [(set (match_operand:VDQF 0 "register_operand" "=w")
++ (div:VDQF (match_operand:VDQF 1 "register_operand" "w")
++ (match_operand:VDQF 2 "register_operand" "w")))]
++ "TARGET_SIMD"
++ "fdiv\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
++ [(set_attr "simd_type" "simd_fdiv")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "neg<mode>2"
++ [(set (match_operand:VDQF 0 "register_operand" "=w")
++ (neg:VDQF (match_operand:VDQF 1 "register_operand" "w")))]
++ "TARGET_SIMD"
++ "fneg\\t%0.<Vtype>, %1.<Vtype>"
++ [(set_attr "simd_type" "simd_fnegabs")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "abs<mode>2"
++ [(set (match_operand:VDQF 0 "register_operand" "=w")
++ (abs:VDQF (match_operand:VDQF 1 "register_operand" "w")))]
++ "TARGET_SIMD"
++ "fabs\\t%0.<Vtype>, %1.<Vtype>"
++ [(set_attr "simd_type" "simd_fnegabs")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "fma<mode>4"
++ [(set (match_operand:VDQF 0 "register_operand" "=w")
++ (fma:VDQF (match_operand:VDQF 1 "register_operand" "w")
++ (match_operand:VDQF 2 "register_operand" "w")
++ (match_operand:VDQF 3 "register_operand" "0")))]
++ "TARGET_SIMD"
++ "fmla\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
++ [(set_attr "simd_type" "simd_fmla")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "aarch64_frint<frint_suffix><mode>"
++ [(set (match_operand:VDQF 0 "register_operand" "=w")
++ (unspec:VDQF [(match_operand:VDQF 1 "register_operand" "w")]
++ FRINT))]
++ "TARGET_SIMD"
++ "frint<frint_suffix>\\t%0.<Vtype>, %1.<Vtype>"
++ [(set_attr "simd_type" "simd_frint")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++;; Vector versions of the floating-point frint patterns.
++;; Expands to btrunc, ceil, floor, nearbyint, rint, round.
++(define_expand "<frint_pattern><mode>2"
++ [(set (match_operand:VDQF 0 "register_operand")
++ (unspec:VDQF [(match_operand:VDQF 1 "register_operand")]
++ FRINT))]
++ "TARGET_SIMD"
++ {})
++
++(define_insn "aarch64_fcvt<frint_suffix><su><mode>"
++ [(set (match_operand:<FCVT_TARGET> 0 "register_operand" "=w")
++ (FIXUORS:<FCVT_TARGET> (unspec:<FCVT_TARGET>
++ [(match_operand:VDQF 1 "register_operand" "w")]
++ FCVT)))]
++ "TARGET_SIMD"
++ "fcvt<frint_suffix><su>\\t%0.<Vtype>, %1.<Vtype>"
++ [(set_attr "simd_type" "simd_fcvti")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++;; Vector versions of the fcvt standard patterns.
++;; Expands to lbtrunc, lround, lceil, lfloor
++(define_expand "l<fcvt_pattern><su_optab><fcvt_target><VDQF:mode>2"
++ [(set (match_operand:<FCVT_TARGET> 0 "register_operand")
++ (FIXUORS:<FCVT_TARGET> (unspec:<FCVT_TARGET>
++ [(match_operand:VDQF 1 "register_operand")]
++ FCVT)))]
++ "TARGET_SIMD"
++ {})
++
++(define_insn "aarch64_vmls<mode>"
++ [(set (match_operand:VDQF 0 "register_operand" "=w")
++ (minus:VDQF (match_operand:VDQF 1 "register_operand" "0")
++ (mult:VDQF (match_operand:VDQF 2 "register_operand" "w")
++ (match_operand:VDQF 3 "register_operand" "w"))))]
++ "TARGET_SIMD"
++ "fmls\\t%0.<Vtype>, %2.<Vtype>, %3.<Vtype>"
++ [(set_attr "simd_type" "simd_fmla")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++;; FP Max/Min
++;; Max/Min are introduced by idiom recognition by GCC's mid-end. An
++;; expression like:
++;; a = (b < c) ? b : c;
++;; is idiom-matched as MIN_EXPR<b,c> only if -ffinite-math-only is enabled
++;; either explicitly or indirectly via -ffast-math.
++;;
++;; MIN_EXPR and MAX_EXPR eventually map to 'smin' and 'smax' in RTL.
++;; The 'smax' and 'smin' RTL standard pattern names do not specify which
++;; operand will be returned when both operands are zero (i.e. they may not
++;; honour signed zeroes), or when either operand is NaN. Therefore GCC
++;; only introduces MIN_EXPR/MAX_EXPR in fast math mode or when not honouring
++;; NaNs.
++
++(define_insn "smax<mode>3"
++ [(set (match_operand:VDQF 0 "register_operand" "=w")
++ (smax:VDQF (match_operand:VDQF 1 "register_operand" "w")
++ (match_operand:VDQF 2 "register_operand" "w")))]
++ "TARGET_SIMD"
++ "fmaxnm\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
++ [(set_attr "simd_type" "simd_fminmax")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "smin<mode>3"
++ [(set (match_operand:VDQF 0 "register_operand" "=w")
++ (smin:VDQF (match_operand:VDQF 1 "register_operand" "w")
++ (match_operand:VDQF 2 "register_operand" "w")))]
++ "TARGET_SIMD"
++ "fminnm\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
++ [(set_attr "simd_type" "simd_fminmax")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++;; FP 'across lanes' max and min ops.
++
++(define_insn "reduc_s<fmaxminv>_v4sf"
++ [(set (match_operand:V4SF 0 "register_operand" "=w")
++ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "w")]
++ FMAXMINV))]
++ "TARGET_SIMD"
++ "f<fmaxminv>nmv\\t%s0, %1.4s";
++ [(set_attr "simd_type" "simd_fminmaxv")
++ (set_attr "simd_mode" "V4SF")]
++)
++
++(define_insn "reduc_s<fmaxminv>_<mode>"
++ [(set (match_operand:V2F 0 "register_operand" "=w")
++ (unspec:V2F [(match_operand:V2F 1 "register_operand" "w")]
++ FMAXMINV))]
++ "TARGET_SIMD"
++ "f<fmaxminv>nmp\\t%0.<Vtype>, %1.<Vtype>, %1.<Vtype>";
++ [(set_attr "simd_type" "simd_fminmax")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++;; FP 'across lanes' add.
++
++(define_insn "aarch64_addvv4sf"
++ [(set (match_operand:V4SF 0 "register_operand" "=w")
++ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "w")]
++ UNSPEC_FADDV))]
++ "TARGET_SIMD"
++ "faddp\\t%0.4s, %1.4s, %1.4s"
++ [(set_attr "simd_type" "simd_fadd")
++ (set_attr "simd_mode" "V4SF")]
++)
++
++(define_expand "reduc_uplus_v4sf"
++ [(set (match_operand:V4SF 0 "register_operand" "=w")
++ (match_operand:V4SF 1 "register_operand" "w"))]
++ "TARGET_SIMD"
++{
++ rtx tmp = gen_reg_rtx (V4SFmode);
++ emit_insn (gen_aarch64_addvv4sf (tmp, operands[1]));
++ emit_insn (gen_aarch64_addvv4sf (operands[0], tmp));
++ DONE;
++})
++
++(define_expand "reduc_splus_v4sf"
++ [(set (match_operand:V4SF 0 "register_operand" "=w")
++ (match_operand:V4SF 1 "register_operand" "w"))]
++ "TARGET_SIMD"
++{
++ rtx tmp = gen_reg_rtx (V4SFmode);
++ emit_insn (gen_aarch64_addvv4sf (tmp, operands[1]));
++ emit_insn (gen_aarch64_addvv4sf (operands[0], tmp));
++ DONE;
++})
++
++(define_insn "aarch64_addv<mode>"
++ [(set (match_operand:V2F 0 "register_operand" "=w")
++ (unspec:V2F [(match_operand:V2F 1 "register_operand" "w")]
++ UNSPEC_FADDV))]
++ "TARGET_SIMD"
++ "faddp\\t%<Vetype>0, %1.<Vtype>"
++ [(set_attr "simd_type" "simd_fadd")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_expand "reduc_uplus_<mode>"
++ [(set (match_operand:V2F 0 "register_operand" "=w")
++ (unspec:V2F [(match_operand:V2F 1 "register_operand" "w")]
++ UNSPEC_FADDV))]
++ "TARGET_SIMD"
++ ""
++)
++
++(define_expand "reduc_splus_<mode>"
++ [(set (match_operand:V2F 0 "register_operand" "=w")
++ (unspec:V2F [(match_operand:V2F 1 "register_operand" "w")]
++ UNSPEC_FADDV))]
++ "TARGET_SIMD"
++ ""
++)
++
++;; Reduction across lanes.
++
++(define_insn "aarch64_addv<mode>"
++ [(set (match_operand:VDQV 0 "register_operand" "=w")
++ (unspec:VDQV [(match_operand:VDQV 1 "register_operand" "w")]
++ UNSPEC_ADDV))]
++ "TARGET_SIMD"
++ "addv\\t%<Vetype>0, %1.<Vtype>"
++ [(set_attr "simd_type" "simd_addv")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_expand "reduc_splus_<mode>"
++ [(set (match_operand:VDQV 0 "register_operand" "=w")
++ (unspec:VDQV [(match_operand:VDQV 1 "register_operand" "w")]
++ UNSPEC_ADDV))]
++ "TARGET_SIMD"
++ ""
++)
++
++(define_expand "reduc_uplus_<mode>"
++ [(set (match_operand:VDQV 0 "register_operand" "=w")
++ (unspec:VDQV [(match_operand:VDQV 1 "register_operand" "w")]
++ UNSPEC_ADDV))]
++ "TARGET_SIMD"
++ ""
++)
++
++(define_insn "aarch64_addvv2di"
++ [(set (match_operand:V2DI 0 "register_operand" "=w")
++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "w")]
++ UNSPEC_ADDV))]
++ "TARGET_SIMD"
++ "addp\\t%d0, %1.2d"
++ [(set_attr "simd_type" "simd_add")
++ (set_attr "simd_mode" "V2DI")]
++)
++
++(define_expand "reduc_uplus_v2di"
++ [(set (match_operand:V2DI 0 "register_operand" "=w")
++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "w")]
++ UNSPEC_ADDV))]
++ "TARGET_SIMD"
++ ""
++)
++
++(define_expand "reduc_splus_v2di"
++ [(set (match_operand:V2DI 0 "register_operand" "=w")
++ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "w")]
++ UNSPEC_ADDV))]
++ "TARGET_SIMD"
++ ""
++)
++
++(define_insn "aarch64_addvv2si"
++ [(set (match_operand:V2SI 0 "register_operand" "=w")
++ (unspec:V2SI [(match_operand:V2SI 1 "register_operand" "w")]
++ UNSPEC_ADDV))]
++ "TARGET_SIMD"
++ "addp\\t%0.2s, %1.2s, %1.2s"
++ [(set_attr "simd_type" "simd_add")
++ (set_attr "simd_mode" "V2SI")]
++)
++
++(define_expand "reduc_uplus_v2si"
++ [(set (match_operand:V2SI 0 "register_operand" "=w")
++ (unspec:V2SI [(match_operand:V2SI 1 "register_operand" "w")]
++ UNSPEC_ADDV))]
++ "TARGET_SIMD"
++ ""
++)
++
++(define_expand "reduc_splus_v2si"
++ [(set (match_operand:V2SI 0 "register_operand" "=w")
++ (unspec:V2SI [(match_operand:V2SI 1 "register_operand" "w")]
++ UNSPEC_ADDV))]
++ "TARGET_SIMD"
++ ""
++)
++
++(define_insn "reduc_<maxminv>_<mode>"
++ [(set (match_operand:VDQV 0 "register_operand" "=w")
++ (unspec:VDQV [(match_operand:VDQV 1 "register_operand" "w")]
++ MAXMINV))]
++ "TARGET_SIMD"
++ "<maxminv>v\\t%<Vetype>0, %1.<Vtype>"
++ [(set_attr "simd_type" "simd_minmaxv")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "reduc_<maxminv>_v2si"
++ [(set (match_operand:V2SI 0 "register_operand" "=w")
++ (unspec:V2SI [(match_operand:V2SI 1 "register_operand" "w")]
++ MAXMINV))]
++ "TARGET_SIMD"
++ "<maxminv>p\\t%0.2s, %1.2s, %1.2s"
++ [(set_attr "simd_type" "simd_minmax")
++ (set_attr "simd_mode" "V2SI")]
++)
++
++;; vbsl_* intrinsics may compile to any of bsl/bif/bit depending on register
++;; allocation. For an intrinsic of form:
++;; vD = bsl_* (vS, vN, vM)
++;; We can use any of:
++;; bsl vS, vN, vM (if D = S)
++;; bit vD, vN, vS (if D = M, so 1-bits in vS choose bits from vN, else vM)
++;; bif vD, vM, vS (if D = N, so 0-bits in vS choose bits from vM, else vN)
++
++(define_insn "aarch64_simd_bsl<mode>_internal"
++ [(set (match_operand:VALL 0 "register_operand" "=w,w,w")
++ (unspec:VALL
++ [(match_operand:<V_cmp_result> 1 "register_operand" " 0,w,w")
++ (match_operand:VALL 2 "register_operand" " w,w,0")
++ (match_operand:VALL 3 "register_operand" " w,0,w")]
++ UNSPEC_BSL))]
++ "TARGET_SIMD"
++ "@
++ bsl\\t%0.<Vbtype>, %2.<Vbtype>, %3.<Vbtype>
++ bit\\t%0.<Vbtype>, %2.<Vbtype>, %1.<Vbtype>
++ bif\\t%0.<Vbtype>, %3.<Vbtype>, %1.<Vbtype>"
++)
++
++(define_expand "aarch64_simd_bsl<mode>"
++ [(set (match_operand:VALL 0 "register_operand")
++ (unspec:VALL [(match_operand:<V_cmp_result> 1 "register_operand")
++ (match_operand:VALL 2 "register_operand")
++ (match_operand:VALL 3 "register_operand")]
++ UNSPEC_BSL))]
++ "TARGET_SIMD"
++{
++ /* We can't alias operands together if they have different modes. */
++ operands[1] = gen_lowpart (<V_cmp_result>mode, operands[1]);
++})
++
++(define_expand "aarch64_vcond_internal<mode>"
++ [(set (match_operand:VDQ 0 "register_operand")
++ (if_then_else:VDQ
++ (match_operator 3 "comparison_operator"
++ [(match_operand:VDQ 4 "register_operand")
++ (match_operand:VDQ 5 "nonmemory_operand")])
++ (match_operand:VDQ 1 "register_operand")
++ (match_operand:VDQ 2 "register_operand")))]
++ "TARGET_SIMD"
++{
++ int inverse = 0, has_zero_imm_form = 0;
++ rtx mask = gen_reg_rtx (<MODE>mode);
++
++ switch (GET_CODE (operands[3]))
++ {
++ case LE:
++ case LT:
++ case NE:
++ inverse = 1;
++ /* Fall through. */
++ case GE:
++ case GT:
++ case EQ:
++ has_zero_imm_form = 1;
++ break;
++ case LEU:
++ case LTU:
++ inverse = 1;
++ break;
++ default:
++ break;
++ }
++
++ if (!REG_P (operands[5])
++ && (operands[5] != CONST0_RTX (<MODE>mode) || !has_zero_imm_form))
++ operands[5] = force_reg (<MODE>mode, operands[5]);
++
++ switch (GET_CODE (operands[3]))
++ {
++ case LT:
++ case GE:
++ emit_insn (gen_aarch64_cmge<mode> (mask, operands[4], operands[5]));
++ break;
++
++ case LE:
++ case GT:
++ emit_insn (gen_aarch64_cmgt<mode> (mask, operands[4], operands[5]));
++ break;
++
++ case LTU:
++ case GEU:
++ emit_insn (gen_aarch64_cmhs<mode> (mask, operands[4], operands[5]));
++ break;
++
++ case LEU:
++ case GTU:
++ emit_insn (gen_aarch64_cmhi<mode> (mask, operands[4], operands[5]));
++ break;
++
++ case NE:
++ case EQ:
++ emit_insn (gen_aarch64_cmeq<mode> (mask, operands[4], operands[5]));
++ break;
++
++ default:
++ gcc_unreachable ();
++ }
++
++ if (inverse)
++ emit_insn (gen_aarch64_simd_bsl<mode> (operands[0], mask, operands[2],
++ operands[1]));
++ else
++ emit_insn (gen_aarch64_simd_bsl<mode> (operands[0], mask, operands[1],
++ operands[2]));
++
++ DONE;
++})
++
++(define_expand "aarch64_vcond_internal<mode>"
++ [(set (match_operand:VDQF 0 "register_operand")
++ (if_then_else:VDQF
++ (match_operator 3 "comparison_operator"
++ [(match_operand:VDQF 4 "register_operand")
++ (match_operand:VDQF 5 "nonmemory_operand")])
++ (match_operand:VDQF 1 "register_operand")
++ (match_operand:VDQF 2 "register_operand")))]
++ "TARGET_SIMD"
++{
++ int inverse = 0;
++ int use_zero_form = 0;
++ int swap_bsl_operands = 0;
++ rtx mask = gen_reg_rtx (<V_cmp_result>mode);
++ rtx tmp = gen_reg_rtx (<V_cmp_result>mode);
++
++ rtx (*base_comparison) (rtx, rtx, rtx);
++ rtx (*complimentary_comparison) (rtx, rtx, rtx);
++
++ switch (GET_CODE (operands[3]))
++ {
++ case GE:
++ case GT:
++ case LE:
++ case LT:
++ case EQ:
++ if (operands[5] == CONST0_RTX (<MODE>mode))
++ {
++ use_zero_form = 1;
++ break;
++ }
++ /* Fall through. */
++ default:
++ if (!REG_P (operands[5]))
++ operands[5] = force_reg (<MODE>mode, operands[5]);
++ }
++
++ switch (GET_CODE (operands[3]))
++ {
++ case LT:
++ case UNLT:
++ inverse = 1;
++ /* Fall through. */
++ case GE:
++ case UNGE:
++ case ORDERED:
++ case UNORDERED:
++ base_comparison = gen_aarch64_cmge<mode>;
++ complimentary_comparison = gen_aarch64_cmgt<mode>;
++ break;
++ case LE:
++ case UNLE:
++ inverse = 1;
++ /* Fall through. */
++ case GT:
++ case UNGT:
++ base_comparison = gen_aarch64_cmgt<mode>;
++ complimentary_comparison = gen_aarch64_cmge<mode>;
++ break;
++ case EQ:
++ case NE:
++ case UNEQ:
++ base_comparison = gen_aarch64_cmeq<mode>;
++ complimentary_comparison = gen_aarch64_cmeq<mode>;
++ break;
++ default:
++ gcc_unreachable ();
++ }
++
++ switch (GET_CODE (operands[3]))
++ {
++ case LT:
++ case LE:
++ case GT:
++ case GE:
++ case EQ:
++ /* The easy case. Here we emit one of FCMGE, FCMGT or FCMEQ.
++ As a LT b <=> b GE a && a LE b <=> b GT a. Our transformations are:
++ a GE b -> a GE b
++ a GT b -> a GT b
++ a LE b -> b GE a
++ a LT b -> b GT a
++ a EQ b -> a EQ b
++ Note that there also exist direct comparison against 0 forms,
++ so catch those as a special case. */
++ if (use_zero_form)
++ {
++ inverse = 0;
++ switch (GET_CODE (operands[3]))
++ {
++ case LT:
++ base_comparison = gen_aarch64_cmlt<mode>;
++ break;
++ case LE:
++ base_comparison = gen_aarch64_cmle<mode>;
++ break;
++ default:
++ /* Do nothing, other zero form cases already have the correct
++ base_comparison. */
++ break;
++ }
++ }
++
++ if (!inverse)
++ emit_insn (base_comparison (mask, operands[4], operands[5]));
++ else
++ emit_insn (complimentary_comparison (mask, operands[5], operands[4]));
++ break;
++ case UNLT:
++ case UNLE:
++ case UNGT:
++ case UNGE:
++ case NE:
++ /* FCM returns false for lanes which are unordered, so if we use
++ the inverse of the comparison we actually want to emit, then
++ swap the operands to BSL, we will end up with the correct result.
++ Note that a NE NaN and NaN NE b are true for all a, b.
++
++ Our transformations are:
++ a GE b -> !(b GT a)
++ a GT b -> !(b GE a)
++ a LE b -> !(a GT b)
++ a LT b -> !(a GE b)
++ a NE b -> !(a EQ b) */
++
++ if (inverse)
++ emit_insn (base_comparison (mask, operands[4], operands[5]));
++ else
++ emit_insn (complimentary_comparison (mask, operands[5], operands[4]));
++
++ swap_bsl_operands = 1;
++ break;
++ case UNEQ:
++ /* We check (a > b || b > a). combining these comparisons give us
++ true iff !(a != b && a ORDERED b), swapping the operands to BSL
++ will then give us (a == b || a UNORDERED b) as intended. */
++
++ emit_insn (gen_aarch64_cmgt<mode> (mask, operands[4], operands[5]));
++ emit_insn (gen_aarch64_cmgt<mode> (tmp, operands[5], operands[4]));
++ emit_insn (gen_ior<v_cmp_result>3 (mask, mask, tmp));
++ swap_bsl_operands = 1;
++ break;
++ case UNORDERED:
++ /* Operands are ORDERED iff (a > b || b >= a).
++ Swapping the operands to BSL will give the UNORDERED case. */
++ swap_bsl_operands = 1;
++ /* Fall through. */
++ case ORDERED:
++ emit_insn (gen_aarch64_cmgt<mode> (tmp, operands[4], operands[5]));
++ emit_insn (gen_aarch64_cmge<mode> (mask, operands[5], operands[4]));
++ emit_insn (gen_ior<v_cmp_result>3 (mask, mask, tmp));
++ break;
++ default:
++ gcc_unreachable ();
++ }
++
++ if (swap_bsl_operands)
++ emit_insn (gen_aarch64_simd_bsl<mode> (operands[0], mask, operands[2],
++ operands[1]));
++ else
++ emit_insn (gen_aarch64_simd_bsl<mode> (operands[0], mask, operands[1],
++ operands[2]));
++ DONE;
++})
++
++(define_expand "vcond<mode><mode>"
++ [(set (match_operand:VALL 0 "register_operand")
++ (if_then_else:VALL
++ (match_operator 3 "comparison_operator"
++ [(match_operand:VALL 4 "register_operand")
++ (match_operand:VALL 5 "nonmemory_operand")])
++ (match_operand:VALL 1 "register_operand")
++ (match_operand:VALL 2 "register_operand")))]
++ "TARGET_SIMD"
++{
++ emit_insn (gen_aarch64_vcond_internal<mode> (operands[0], operands[1],
++ operands[2], operands[3],
++ operands[4], operands[5]));
++ DONE;
++})
++
++
++(define_expand "vcondu<mode><mode>"
++ [(set (match_operand:VDQ 0 "register_operand")
++ (if_then_else:VDQ
++ (match_operator 3 "comparison_operator"
++ [(match_operand:VDQ 4 "register_operand")
++ (match_operand:VDQ 5 "nonmemory_operand")])
++ (match_operand:VDQ 1 "register_operand")
++ (match_operand:VDQ 2 "register_operand")))]
++ "TARGET_SIMD"
++{
++ emit_insn (gen_aarch64_vcond_internal<mode> (operands[0], operands[1],
++ operands[2], operands[3],
++ operands[4], operands[5]));
++ DONE;
++})
++
++;; Patterns for AArch64 SIMD Intrinsics.
++
++(define_expand "aarch64_create<mode>"
++ [(match_operand:VD_RE 0 "register_operand" "")
++ (match_operand:DI 1 "general_operand" "")]
++ "TARGET_SIMD"
++{
++ rtx src = gen_lowpart (<MODE>mode, operands[1]);
++ emit_move_insn (operands[0], src);
++ DONE;
++})
++
++(define_insn "aarch64_get_lane_signed<mode>"
++ [(set (match_operand:<VEL> 0 "register_operand" "=r")
++ (sign_extend:<VEL>
++ (vec_select:<VEL>
++ (match_operand:VQ_S 1 "register_operand" "w")
++ (parallel [(match_operand:SI 2 "immediate_operand" "i")]))))]
++ "TARGET_SIMD"
++ "smov\\t%0, %1.<Vetype>[%2]"
++ [(set_attr "simd_type" "simd_movgp")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "aarch64_get_lane_unsigned<mode>"
++ [(set (match_operand:<VEL> 0 "register_operand" "=r")
++ (zero_extend:<VEL>
++ (vec_select:<VEL>
++ (match_operand:VDQ 1 "register_operand" "w")
++ (parallel [(match_operand:SI 2 "immediate_operand" "i")]))))]
++ "TARGET_SIMD"
++ "umov\\t%<vw>0, %1.<Vetype>[%2]"
++ [(set_attr "simd_type" "simd_movgp")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "aarch64_get_lane<mode>"
++ [(set (match_operand:<VEL> 0 "register_operand" "=w")
++ (vec_select:<VEL>
++ (match_operand:VDQF 1 "register_operand" "w")
++ (parallel [(match_operand:SI 2 "immediate_operand" "i")])))]
++ "TARGET_SIMD"
++ "mov\\t%0.<Vetype>[0], %1.<Vetype>[%2]"
++ [(set_attr "simd_type" "simd_ins")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_expand "aarch64_get_lanedi"
++ [(match_operand:DI 0 "register_operand" "=r")
++ (match_operand:DI 1 "register_operand" "w")
++ (match_operand:SI 2 "immediate_operand" "i")]
++ "TARGET_SIMD"
++{
++ aarch64_simd_lane_bounds (operands[2], 0, 1);
++ emit_move_insn (operands[0], operands[1]);
++ DONE;
++})
++
++(define_expand "aarch64_reinterpretv8qi<mode>"
++ [(match_operand:V8QI 0 "register_operand" "")
++ (match_operand:VDC 1 "register_operand" "")]
++ "TARGET_SIMD"
++{
++ aarch64_simd_reinterpret (operands[0], operands[1]);
++ DONE;
++})
++
++(define_expand "aarch64_reinterpretv4hi<mode>"
++ [(match_operand:V4HI 0 "register_operand" "")
++ (match_operand:VDC 1 "register_operand" "")]
++ "TARGET_SIMD"
++{
++ aarch64_simd_reinterpret (operands[0], operands[1]);
++ DONE;
++})
++
++(define_expand "aarch64_reinterpretv2si<mode>"
++ [(match_operand:V2SI 0 "register_operand" "")
++ (match_operand:VDC 1 "register_operand" "")]
++ "TARGET_SIMD"
++{
++ aarch64_simd_reinterpret (operands[0], operands[1]);
++ DONE;
++})
++
++(define_expand "aarch64_reinterpretv2sf<mode>"
++ [(match_operand:V2SF 0 "register_operand" "")
++ (match_operand:VDC 1 "register_operand" "")]
++ "TARGET_SIMD"
++{
++ aarch64_simd_reinterpret (operands[0], operands[1]);
++ DONE;
++})
++
++(define_expand "aarch64_reinterpretdi<mode>"
++ [(match_operand:DI 0 "register_operand" "")
++ (match_operand:VD_RE 1 "register_operand" "")]
++ "TARGET_SIMD"
++{
++ aarch64_simd_reinterpret (operands[0], operands[1]);
++ DONE;
++})
++
++(define_expand "aarch64_reinterpretv16qi<mode>"
++ [(match_operand:V16QI 0 "register_operand" "")
++ (match_operand:VQ 1 "register_operand" "")]
++ "TARGET_SIMD"
++{
++ aarch64_simd_reinterpret (operands[0], operands[1]);
++ DONE;
++})
++
++(define_expand "aarch64_reinterpretv8hi<mode>"
++ [(match_operand:V8HI 0 "register_operand" "")
++ (match_operand:VQ 1 "register_operand" "")]
++ "TARGET_SIMD"
++{
++ aarch64_simd_reinterpret (operands[0], operands[1]);
++ DONE;
++})
++
++(define_expand "aarch64_reinterpretv4si<mode>"
++ [(match_operand:V4SI 0 "register_operand" "")
++ (match_operand:VQ 1 "register_operand" "")]
++ "TARGET_SIMD"
++{
++ aarch64_simd_reinterpret (operands[0], operands[1]);
++ DONE;
++})
++
++(define_expand "aarch64_reinterpretv4sf<mode>"
++ [(match_operand:V4SF 0 "register_operand" "")
++ (match_operand:VQ 1 "register_operand" "")]
++ "TARGET_SIMD"
++{
++ aarch64_simd_reinterpret (operands[0], operands[1]);
++ DONE;
++})
++
++(define_expand "aarch64_reinterpretv2di<mode>"
++ [(match_operand:V2DI 0 "register_operand" "")
++ (match_operand:VQ 1 "register_operand" "")]
++ "TARGET_SIMD"
++{
++ aarch64_simd_reinterpret (operands[0], operands[1]);
++ DONE;
++})
++
++(define_expand "aarch64_reinterpretv2df<mode>"
++ [(match_operand:V2DF 0 "register_operand" "")
++ (match_operand:VQ 1 "register_operand" "")]
++ "TARGET_SIMD"
++{
++ aarch64_simd_reinterpret (operands[0], operands[1]);
++ DONE;
++})
++
++;; In this insn, operand 1 should be low, and operand 2 the high part of the
++;; dest vector.
++
++(define_insn "*aarch64_combinez<mode>"
++ [(set (match_operand:<VDBL> 0 "register_operand" "=&w")
++ (vec_concat:<VDBL>
++ (match_operand:VDIC 1 "register_operand" "w")
++ (match_operand:VDIC 2 "aarch64_simd_imm_zero" "Dz")))]
++ "TARGET_SIMD"
++ "mov\\t%0.8b, %1.8b"
++ [(set_attr "simd_type" "simd_move")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "aarch64_combine<mode>"
++ [(set (match_operand:<VDBL> 0 "register_operand" "=&w")
++ (vec_concat:<VDBL> (match_operand:VDC 1 "register_operand" "w")
++ (match_operand:VDC 2 "register_operand" "w")))]
++ "TARGET_SIMD"
++ "mov\\t%0.d[0], %1.d[0]\;ins\\t%0.d[1], %2.d[0]"
++ [(set_attr "simd_type" "simd_ins")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++;; <su><addsub>l<q>.
++
++(define_insn "aarch64_<ANY_EXTEND:su><ADDSUB:optab>l2<mode>_internal"
++ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
++ (ADDSUB:<VWIDE> (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
++ (match_operand:VQW 1 "register_operand" "w")
++ (match_operand:VQW 3 "vect_par_cnst_hi_half" "")))
++ (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
++ (match_operand:VQW 2 "register_operand" "w")
++ (match_dup 3)))))]
++ "TARGET_SIMD"
++ "<ANY_EXTEND:su><ADDSUB:optab>l2 %0.<Vwtype>, %1.<Vtype>, %2.<Vtype>"
++ [(set_attr "simd_type" "simd_addl")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_expand "aarch64_saddl2<mode>"
++ [(match_operand:<VWIDE> 0 "register_operand" "=w")
++ (match_operand:VQW 1 "register_operand" "w")
++ (match_operand:VQW 2 "register_operand" "w")]
++ "TARGET_SIMD"
++{
++ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
++ emit_insn (gen_aarch64_saddl2<mode>_internal (operands[0], operands[1],
++ operands[2], p));
++ DONE;
++})
++
++(define_expand "aarch64_uaddl2<mode>"
++ [(match_operand:<VWIDE> 0 "register_operand" "=w")
++ (match_operand:VQW 1 "register_operand" "w")
++ (match_operand:VQW 2 "register_operand" "w")]
++ "TARGET_SIMD"
++{
++ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
++ emit_insn (gen_aarch64_uaddl2<mode>_internal (operands[0], operands[1],
++ operands[2], p));
++ DONE;
++})
++
++(define_expand "aarch64_ssubl2<mode>"
++ [(match_operand:<VWIDE> 0 "register_operand" "=w")
++ (match_operand:VQW 1 "register_operand" "w")
++ (match_operand:VQW 2 "register_operand" "w")]
++ "TARGET_SIMD"
++{
++ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
++ emit_insn (gen_aarch64_ssubl2<mode>_internal (operands[0], operands[1],
++ operands[2], p));
++ DONE;
++})
++
++(define_expand "aarch64_usubl2<mode>"
++ [(match_operand:<VWIDE> 0 "register_operand" "=w")
++ (match_operand:VQW 1 "register_operand" "w")
++ (match_operand:VQW 2 "register_operand" "w")]
++ "TARGET_SIMD"
++{
++ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
++ emit_insn (gen_aarch64_usubl2<mode>_internal (operands[0], operands[1],
++ operands[2], p));
++ DONE;
++})
++
++(define_insn "aarch64_<ANY_EXTEND:su><ADDSUB:optab>l<mode>"
++ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
++ (ADDSUB:<VWIDE> (ANY_EXTEND:<VWIDE>
++ (match_operand:VDW 1 "register_operand" "w"))
++ (ANY_EXTEND:<VWIDE>
++ (match_operand:VDW 2 "register_operand" "w"))))]
++ "TARGET_SIMD"
++ "<ANY_EXTEND:su><ADDSUB:optab>l %0.<Vwtype>, %1.<Vtype>, %2.<Vtype>"
++ [(set_attr "simd_type" "simd_addl")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++;; <su><addsub>w<q>.
++
++(define_insn "aarch64_<ANY_EXTEND:su><ADDSUB:optab>w<mode>"
++ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
++ (ADDSUB:<VWIDE> (match_operand:<VWIDE> 1 "register_operand" "w")
++ (ANY_EXTEND:<VWIDE>
++ (match_operand:VDW 2 "register_operand" "w"))))]
++ "TARGET_SIMD"
++ "<ANY_EXTEND:su><ADDSUB:optab>w\\t%0.<Vwtype>, %1.<Vwtype>, %2.<Vtype>"
++ [(set_attr "simd_type" "simd_addl")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "aarch64_<ANY_EXTEND:su><ADDSUB:optab>w2<mode>_internal"
++ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
++ (ADDSUB:<VWIDE> (match_operand:<VWIDE> 1 "register_operand" "w")
++ (ANY_EXTEND:<VWIDE>
++ (vec_select:<VHALF>
++ (match_operand:VQW 2 "register_operand" "w")
++ (match_operand:VQW 3 "vect_par_cnst_hi_half" "")))))]
++ "TARGET_SIMD"
++ "<ANY_EXTEND:su><ADDSUB:optab>w2\\t%0.<Vwtype>, %1.<Vwtype>, %2.<Vtype>"
++ [(set_attr "simd_type" "simd_addl")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_expand "aarch64_saddw2<mode>"
++ [(match_operand:<VWIDE> 0 "register_operand" "=w")
++ (match_operand:<VWIDE> 1 "register_operand" "w")
++ (match_operand:VQW 2 "register_operand" "w")]
++ "TARGET_SIMD"
++{
++ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
++ emit_insn (gen_aarch64_saddw2<mode>_internal (operands[0], operands[1],
++ operands[2], p));
++ DONE;
++})
++
++(define_expand "aarch64_uaddw2<mode>"
++ [(match_operand:<VWIDE> 0 "register_operand" "=w")
++ (match_operand:<VWIDE> 1 "register_operand" "w")
++ (match_operand:VQW 2 "register_operand" "w")]
++ "TARGET_SIMD"
++{
++ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
++ emit_insn (gen_aarch64_uaddw2<mode>_internal (operands[0], operands[1],
++ operands[2], p));
++ DONE;
++})
++
++
++(define_expand "aarch64_ssubw2<mode>"
++ [(match_operand:<VWIDE> 0 "register_operand" "=w")
++ (match_operand:<VWIDE> 1 "register_operand" "w")
++ (match_operand:VQW 2 "register_operand" "w")]
++ "TARGET_SIMD"
++{
++ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
++ emit_insn (gen_aarch64_ssubw2<mode>_internal (operands[0], operands[1],
++ operands[2], p));
++ DONE;
++})
++
++(define_expand "aarch64_usubw2<mode>"
++ [(match_operand:<VWIDE> 0 "register_operand" "=w")
++ (match_operand:<VWIDE> 1 "register_operand" "w")
++ (match_operand:VQW 2 "register_operand" "w")]
++ "TARGET_SIMD"
++{
++ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
++ emit_insn (gen_aarch64_usubw2<mode>_internal (operands[0], operands[1],
++ operands[2], p));
++ DONE;
++})
++
++;; <su><r>h<addsub>.
++
++(define_insn "aarch64_<sur>h<addsub><mode>"
++ [(set (match_operand:VQ_S 0 "register_operand" "=w")
++ (unspec:VQ_S [(match_operand:VQ_S 1 "register_operand" "w")
++ (match_operand:VQ_S 2 "register_operand" "w")]
++ HADDSUB))]
++ "TARGET_SIMD"
++ "<sur>h<addsub>\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
++ [(set_attr "simd_type" "simd_add")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++;; <r><addsub>hn<q>.
++
++(define_insn "aarch64_<sur><addsub>hn<mode>"
++ [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w")
++ (unspec:<VNARROWQ> [(match_operand:VQN 1 "register_operand" "w")
++ (match_operand:VQN 2 "register_operand" "w")]
++ ADDSUBHN))]
++ "TARGET_SIMD"
++ "<sur><addsub>hn\\t%0.<Vntype>, %1.<Vtype>, %2.<Vtype>"
++ [(set_attr "simd_type" "simd_addn")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "aarch64_<sur><addsub>hn2<mode>"
++ [(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
++ (unspec:<VNARROWQ2> [(match_operand:<VNARROWQ> 1 "register_operand" "0")
++ (match_operand:VQN 2 "register_operand" "w")
++ (match_operand:VQN 3 "register_operand" "w")]
++ ADDSUBHN2))]
++ "TARGET_SIMD"
++ "<sur><addsub>hn2\\t%0.<V2ntype>, %2.<Vtype>, %3.<Vtype>"
++ [(set_attr "simd_type" "simd_addn2")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++;; pmul.
++
++(define_insn "aarch64_pmul<mode>"
++ [(set (match_operand:VB 0 "register_operand" "=w")
++ (unspec:VB [(match_operand:VB 1 "register_operand" "w")
++ (match_operand:VB 2 "register_operand" "w")]
++ UNSPEC_PMUL))]
++ "TARGET_SIMD"
++ "pmul\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
++ [(set_attr "simd_type" "simd_mul")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++;; <su>q<addsub>
++
++(define_insn "aarch64_<su_optab><optab><mode>"
++ [(set (match_operand:VSDQ_I 0 "register_operand" "=w")
++ (BINQOPS:VSDQ_I (match_operand:VSDQ_I 1 "register_operand" "w")
++ (match_operand:VSDQ_I 2 "register_operand" "w")))]
++ "TARGET_SIMD"
++ "<su_optab><optab>\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
++ [(set_attr "simd_type" "simd_add")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++;; suqadd and usqadd
++
++(define_insn "aarch64_<sur>qadd<mode>"
++ [(set (match_operand:VSDQ_I 0 "register_operand" "=w")
++ (unspec:VSDQ_I [(match_operand:VSDQ_I 1 "register_operand" "0")
++ (match_operand:VSDQ_I 2 "register_operand" "w")]
++ USSUQADD))]
++ "TARGET_SIMD"
++ "<sur>qadd\\t%<v>0<Vmtype>, %<v>2<Vmtype>"
++ [(set_attr "simd_type" "simd_sat_add")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++;; sqmovun
++
++(define_insn "aarch64_sqmovun<mode>"
++ [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w")
++ (unspec:<VNARROWQ> [(match_operand:VSQN_HSDI 1 "register_operand" "w")]
++ UNSPEC_SQXTUN))]
++ "TARGET_SIMD"
++ "sqxtun\\t%<vn2>0<Vmntype>, %<v>1<Vmtype>"
++ [(set_attr "simd_type" "simd_sat_shiftn_imm")
++ (set_attr "simd_mode" "<MODE>")]
++ )
++
++;; sqmovn and uqmovn
++
++(define_insn "aarch64_<sur>qmovn<mode>"
++ [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w")
++ (unspec:<VNARROWQ> [(match_operand:VSQN_HSDI 1 "register_operand" "w")]
++ SUQMOVN))]
++ "TARGET_SIMD"
++ "<sur>qxtn\\t%<vn2>0<Vmntype>, %<v>1<Vmtype>"
++ [(set_attr "simd_type" "simd_sat_shiftn_imm")
++ (set_attr "simd_mode" "<MODE>")]
++ )
++
++;; <su>q<absneg>
++
++(define_insn "aarch64_s<optab><mode>"
++ [(set (match_operand:VSDQ_I_BHSI 0 "register_operand" "=w")
++ (UNQOPS:VSDQ_I_BHSI
++ (match_operand:VSDQ_I_BHSI 1 "register_operand" "w")))]
++ "TARGET_SIMD"
++ "s<optab>\\t%<v>0<Vmtype>, %<v>1<Vmtype>"
++ [(set_attr "simd_type" "simd_sat_negabs")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++;; sq<r>dmulh.
++
++(define_insn "aarch64_sq<r>dmulh<mode>"
++ [(set (match_operand:VSDQ_HSI 0 "register_operand" "=w")
++ (unspec:VSDQ_HSI
++ [(match_operand:VSDQ_HSI 1 "register_operand" "w")
++ (match_operand:VSDQ_HSI 2 "register_operand" "w")]
++ VQDMULH))]
++ "TARGET_SIMD"
++ "sq<r>dmulh\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
++ [(set_attr "simd_type" "simd_sat_mul")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++;; sq<r>dmulh_lane
++
++(define_insn "aarch64_sq<r>dmulh_lane<mode>"
++ [(set (match_operand:VDQHS 0 "register_operand" "=w")
++ (unspec:VDQHS
++ [(match_operand:VDQHS 1 "register_operand" "w")
++ (vec_select:<VEL>
++ (match_operand:<VCOND> 2 "register_operand" "<vwx>")
++ (parallel [(match_operand:SI 3 "immediate_operand" "i")]))]
++ VQDMULH))]
++ "TARGET_SIMD"
++ "*
++ aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<VCOND>mode));
++ return \"sq<r>dmulh\\t%0.<Vtype>, %1.<Vtype>, %2.<Vetype>[%3]\";"
++ [(set_attr "simd_type" "simd_sat_mul")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "aarch64_sq<r>dmulh_laneq<mode>"
++ [(set (match_operand:VDQHS 0 "register_operand" "=w")
++ (unspec:VDQHS
++ [(match_operand:VDQHS 1 "register_operand" "w")
++ (vec_select:<VEL>
++ (match_operand:<VCONQ> 2 "register_operand" "<vwx>")
++ (parallel [(match_operand:SI 3 "immediate_operand" "i")]))]
++ VQDMULH))]
++ "TARGET_SIMD"
++ "*
++ aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<VCONQ>mode));
++ return \"sq<r>dmulh\\t%0.<Vtype>, %1.<Vtype>, %2.<Vetype>[%3]\";"
++ [(set_attr "simd_type" "simd_sat_mul")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "aarch64_sq<r>dmulh_lane<mode>"
++ [(set (match_operand:SD_HSI 0 "register_operand" "=w")
++ (unspec:SD_HSI
++ [(match_operand:SD_HSI 1 "register_operand" "w")
++ (vec_select:<VEL>
++ (match_operand:<VCONQ> 2 "register_operand" "<vwx>")
++ (parallel [(match_operand:SI 3 "immediate_operand" "i")]))]
++ VQDMULH))]
++ "TARGET_SIMD"
++ "*
++ aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<VCONQ>mode));
++ return \"sq<r>dmulh\\t%<v>0, %<v>1, %2.<v>[%3]\";"
++ [(set_attr "simd_type" "simd_sat_mul")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++;; vqdml[sa]l
++
++(define_insn "aarch64_sqdml<SBINQOPS:as>l<mode>"
++ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
++ (SBINQOPS:<VWIDE>
++ (match_operand:<VWIDE> 1 "register_operand" "0")
++ (ss_ashift:<VWIDE>
++ (mult:<VWIDE>
++ (sign_extend:<VWIDE>
++ (match_operand:VSD_HSI 2 "register_operand" "w"))
++ (sign_extend:<VWIDE>
++ (match_operand:VSD_HSI 3 "register_operand" "w")))
++ (const_int 1))))]
++ "TARGET_SIMD"
++ "sqdml<SBINQOPS:as>l\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %<v>3<Vmtype>"
++ [(set_attr "simd_type" "simd_sat_mlal")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++;; vqdml[sa]l_lane
++
++(define_insn "aarch64_sqdml<SBINQOPS:as>l_lane<mode>_internal"
++ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
++ (SBINQOPS:<VWIDE>
++ (match_operand:<VWIDE> 1 "register_operand" "0")
++ (ss_ashift:<VWIDE>
++ (mult:<VWIDE>
++ (sign_extend:<VWIDE>
++ (match_operand:VD_HSI 2 "register_operand" "w"))
++ (sign_extend:<VWIDE>
++ (vec_duplicate:VD_HSI
++ (vec_select:<VEL>
++ (match_operand:<VCON> 3 "register_operand" "<vwx>")
++ (parallel [(match_operand:SI 4 "immediate_operand" "i")])))
++ ))
++ (const_int 1))))]
++ "TARGET_SIMD"
++ "sqdml<SBINQOPS:as>l\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %3.<Vetype>[%4]"
++ [(set_attr "simd_type" "simd_sat_mlal")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "aarch64_sqdml<SBINQOPS:as>l_lane<mode>_internal"
++ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
++ (SBINQOPS:<VWIDE>
++ (match_operand:<VWIDE> 1 "register_operand" "0")
++ (ss_ashift:<VWIDE>
++ (mult:<VWIDE>
++ (sign_extend:<VWIDE>
++ (match_operand:SD_HSI 2 "register_operand" "w"))
++ (sign_extend:<VWIDE>
++ (vec_select:<VEL>
++ (match_operand:<VCON> 3 "register_operand" "<vwx>")
++ (parallel [(match_operand:SI 4 "immediate_operand" "i")])))
++ )
++ (const_int 1))))]
++ "TARGET_SIMD"
++ "sqdml<SBINQOPS:as>l\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %3.<Vetype>[%4]"
++ [(set_attr "simd_type" "simd_sat_mlal")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_expand "aarch64_sqdmlal_lane<mode>"
++ [(match_operand:<VWIDE> 0 "register_operand" "=w")
++ (match_operand:<VWIDE> 1 "register_operand" "0")
++ (match_operand:VSD_HSI 2 "register_operand" "w")
++ (match_operand:<VCON> 3 "register_operand" "<vwx>")
++ (match_operand:SI 4 "immediate_operand" "i")]
++ "TARGET_SIMD"
++{
++ aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<VCON>mode) / 2);
++ emit_insn (gen_aarch64_sqdmlal_lane<mode>_internal (operands[0], operands[1],
++ operands[2], operands[3],
++ operands[4]));
++ DONE;
++})
++
++(define_expand "aarch64_sqdmlal_laneq<mode>"
++ [(match_operand:<VWIDE> 0 "register_operand" "=w")
++ (match_operand:<VWIDE> 1 "register_operand" "0")
++ (match_operand:VSD_HSI 2 "register_operand" "w")
++ (match_operand:<VCON> 3 "register_operand" "<vwx>")
++ (match_operand:SI 4 "immediate_operand" "i")]
++ "TARGET_SIMD"
++{
++ aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<VCON>mode));
++ emit_insn (gen_aarch64_sqdmlal_lane<mode>_internal (operands[0], operands[1],
++ operands[2], operands[3],
++ operands[4]));
++ DONE;
++})
++
++(define_expand "aarch64_sqdmlsl_lane<mode>"
++ [(match_operand:<VWIDE> 0 "register_operand" "=w")
++ (match_operand:<VWIDE> 1 "register_operand" "0")
++ (match_operand:VSD_HSI 2 "register_operand" "w")
++ (match_operand:<VCON> 3 "register_operand" "<vwx>")
++ (match_operand:SI 4 "immediate_operand" "i")]
++ "TARGET_SIMD"
++{
++ aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<VCON>mode) / 2);
++ emit_insn (gen_aarch64_sqdmlsl_lane<mode>_internal (operands[0], operands[1],
++ operands[2], operands[3],
++ operands[4]));
++ DONE;
++})
++
++(define_expand "aarch64_sqdmlsl_laneq<mode>"
++ [(match_operand:<VWIDE> 0 "register_operand" "=w")
++ (match_operand:<VWIDE> 1 "register_operand" "0")
++ (match_operand:VSD_HSI 2 "register_operand" "w")
++ (match_operand:<VCON> 3 "register_operand" "<vwx>")
++ (match_operand:SI 4 "immediate_operand" "i")]
++ "TARGET_SIMD"
++{
++ aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<VCON>mode));
++ emit_insn (gen_aarch64_sqdmlsl_lane<mode>_internal (operands[0], operands[1],
++ operands[2], operands[3],
++ operands[4]));
++ DONE;
++})
++
++;; vqdml[sa]l_n
++
++(define_insn "aarch64_sqdml<SBINQOPS:as>l_n<mode>"
++ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
++ (SBINQOPS:<VWIDE>
++ (match_operand:<VWIDE> 1 "register_operand" "0")
++ (ss_ashift:<VWIDE>
++ (mult:<VWIDE>
++ (sign_extend:<VWIDE>
++ (match_operand:VD_HSI 2 "register_operand" "w"))
++ (sign_extend:<VWIDE>
++ (vec_duplicate:VD_HSI
++ (match_operand:<VEL> 3 "register_operand" "w"))))
++ (const_int 1))))]
++ "TARGET_SIMD"
++ "sqdml<SBINQOPS:as>l\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %3.<Vetype>[0]"
++ [(set_attr "simd_type" "simd_sat_mlal")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++;; sqdml[as]l2
++
++(define_insn "aarch64_sqdml<SBINQOPS:as>l2<mode>_internal"
++ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
++ (SBINQOPS:<VWIDE>
++ (match_operand:<VWIDE> 1 "register_operand" "0")
++ (ss_ashift:<VWIDE>
++ (mult:<VWIDE>
++ (sign_extend:<VWIDE>
++ (vec_select:<VHALF>
++ (match_operand:VQ_HSI 2 "register_operand" "w")
++ (match_operand:VQ_HSI 4 "vect_par_cnst_hi_half" "")))
++ (sign_extend:<VWIDE>
++ (vec_select:<VHALF>
++ (match_operand:VQ_HSI 3 "register_operand" "w")
++ (match_dup 4))))
++ (const_int 1))))]
++ "TARGET_SIMD"
++ "sqdml<SBINQOPS:as>l2\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %<v>3<Vmtype>"
++ [(set_attr "simd_type" "simd_sat_mlal")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_expand "aarch64_sqdmlal2<mode>"
++ [(match_operand:<VWIDE> 0 "register_operand" "=w")
++ (match_operand:<VWIDE> 1 "register_operand" "w")
++ (match_operand:VQ_HSI 2 "register_operand" "w")
++ (match_operand:VQ_HSI 3 "register_operand" "w")]
++ "TARGET_SIMD"
++{
++ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
++ emit_insn (gen_aarch64_sqdmlal2<mode>_internal (operands[0], operands[1],
++ operands[2], operands[3], p));
++ DONE;
++})
++
++(define_expand "aarch64_sqdmlsl2<mode>"
++ [(match_operand:<VWIDE> 0 "register_operand" "=w")
++ (match_operand:<VWIDE> 1 "register_operand" "w")
++ (match_operand:VQ_HSI 2 "register_operand" "w")
++ (match_operand:VQ_HSI 3 "register_operand" "w")]
++ "TARGET_SIMD"
++{
++ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
++ emit_insn (gen_aarch64_sqdmlsl2<mode>_internal (operands[0], operands[1],
++ operands[2], operands[3], p));
++ DONE;
++})
++
++;; vqdml[sa]l2_lane
++
++(define_insn "aarch64_sqdml<SBINQOPS:as>l2_lane<mode>_internal"
++ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
++ (SBINQOPS:<VWIDE>
++ (match_operand:<VWIDE> 1 "register_operand" "0")
++ (ss_ashift:<VWIDE>
++ (mult:<VWIDE>
++ (sign_extend:<VWIDE>
++ (vec_select:<VHALF>
++ (match_operand:VQ_HSI 2 "register_operand" "w")
++ (match_operand:VQ_HSI 5 "vect_par_cnst_hi_half" "")))
++ (sign_extend:<VWIDE>
++ (vec_duplicate:<VHALF>
++ (vec_select:<VEL>
++ (match_operand:<VCON> 3 "register_operand" "<vwx>")
++ (parallel [(match_operand:SI 4 "immediate_operand" "i")])
++ ))))
++ (const_int 1))))]
++ "TARGET_SIMD"
++ "sqdml<SBINQOPS:as>l2\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %3.<Vetype>[%4]"
++ [(set_attr "simd_type" "simd_sat_mlal")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_expand "aarch64_sqdmlal2_lane<mode>"
++ [(match_operand:<VWIDE> 0 "register_operand" "=w")
++ (match_operand:<VWIDE> 1 "register_operand" "w")
++ (match_operand:VQ_HSI 2 "register_operand" "w")
++ (match_operand:<VCON> 3 "register_operand" "<vwx>")
++ (match_operand:SI 4 "immediate_operand" "i")]
++ "TARGET_SIMD"
++{
++ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
++ aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<MODE>mode) / 2);
++ emit_insn (gen_aarch64_sqdmlal2_lane<mode>_internal (operands[0], operands[1],
++ operands[2], operands[3],
++ operands[4], p));
++ DONE;
++})
++
++(define_expand "aarch64_sqdmlal2_laneq<mode>"
++ [(match_operand:<VWIDE> 0 "register_operand" "=w")
++ (match_operand:<VWIDE> 1 "register_operand" "w")
++ (match_operand:VQ_HSI 2 "register_operand" "w")
++ (match_operand:<VCON> 3 "register_operand" "<vwx>")
++ (match_operand:SI 4 "immediate_operand" "i")]
++ "TARGET_SIMD"
++{
++ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
++ aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<MODE>mode));
++ emit_insn (gen_aarch64_sqdmlal2_lane<mode>_internal (operands[0], operands[1],
++ operands[2], operands[3],
++ operands[4], p));
++ DONE;
++})
++
++(define_expand "aarch64_sqdmlsl2_lane<mode>"
++ [(match_operand:<VWIDE> 0 "register_operand" "=w")
++ (match_operand:<VWIDE> 1 "register_operand" "w")
++ (match_operand:VQ_HSI 2 "register_operand" "w")
++ (match_operand:<VCON> 3 "register_operand" "<vwx>")
++ (match_operand:SI 4 "immediate_operand" "i")]
++ "TARGET_SIMD"
++{
++ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
++ aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<MODE>mode) / 2);
++ emit_insn (gen_aarch64_sqdmlsl2_lane<mode>_internal (operands[0], operands[1],
++ operands[2], operands[3],
++ operands[4], p));
++ DONE;
++})
++
++(define_expand "aarch64_sqdmlsl2_laneq<mode>"
++ [(match_operand:<VWIDE> 0 "register_operand" "=w")
++ (match_operand:<VWIDE> 1 "register_operand" "w")
++ (match_operand:VQ_HSI 2 "register_operand" "w")
++ (match_operand:<VCON> 3 "register_operand" "<vwx>")
++ (match_operand:SI 4 "immediate_operand" "i")]
++ "TARGET_SIMD"
++{
++ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
++ aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<MODE>mode));
++ emit_insn (gen_aarch64_sqdmlsl2_lane<mode>_internal (operands[0], operands[1],
++ operands[2], operands[3],
++ operands[4], p));
++ DONE;
++})
++
++(define_insn "aarch64_sqdml<SBINQOPS:as>l2_n<mode>_internal"
++ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
++ (SBINQOPS:<VWIDE>
++ (match_operand:<VWIDE> 1 "register_operand" "0")
++ (ss_ashift:<VWIDE>
++ (mult:<VWIDE>
++ (sign_extend:<VWIDE>
++ (vec_select:<VHALF>
++ (match_operand:VQ_HSI 2 "register_operand" "w")
++ (match_operand:VQ_HSI 4 "vect_par_cnst_hi_half" "")))
++ (sign_extend:<VWIDE>
++ (vec_duplicate:<VHALF>
++ (match_operand:<VEL> 3 "register_operand" "w"))))
++ (const_int 1))))]
++ "TARGET_SIMD"
++ "sqdml<SBINQOPS:as>l2\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %3.<Vetype>[0]"
++ [(set_attr "simd_type" "simd_sat_mlal")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_expand "aarch64_sqdmlal2_n<mode>"
++ [(match_operand:<VWIDE> 0 "register_operand" "=w")
++ (match_operand:<VWIDE> 1 "register_operand" "w")
++ (match_operand:VQ_HSI 2 "register_operand" "w")
++ (match_operand:<VEL> 3 "register_operand" "w")]
++ "TARGET_SIMD"
++{
++ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
++ emit_insn (gen_aarch64_sqdmlal2_n<mode>_internal (operands[0], operands[1],
++ operands[2], operands[3],
++ p));
++ DONE;
++})
++
++(define_expand "aarch64_sqdmlsl2_n<mode>"
++ [(match_operand:<VWIDE> 0 "register_operand" "=w")
++ (match_operand:<VWIDE> 1 "register_operand" "w")
++ (match_operand:VQ_HSI 2 "register_operand" "w")
++ (match_operand:<VEL> 3 "register_operand" "w")]
++ "TARGET_SIMD"
++{
++ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
++ emit_insn (gen_aarch64_sqdmlsl2_n<mode>_internal (operands[0], operands[1],
++ operands[2], operands[3],
++ p));
++ DONE;
++})
++
++;; vqdmull
++
++(define_insn "aarch64_sqdmull<mode>"
++ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
++ (ss_ashift:<VWIDE>
++ (mult:<VWIDE>
++ (sign_extend:<VWIDE>
++ (match_operand:VSD_HSI 1 "register_operand" "w"))
++ (sign_extend:<VWIDE>
++ (match_operand:VSD_HSI 2 "register_operand" "w")))
++ (const_int 1)))]
++ "TARGET_SIMD"
++ "sqdmull\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
++ [(set_attr "simd_type" "simd_sat_mul")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++;; vqdmull_lane
++
++(define_insn "aarch64_sqdmull_lane<mode>_internal"
++ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
++ (ss_ashift:<VWIDE>
++ (mult:<VWIDE>
++ (sign_extend:<VWIDE>
++ (match_operand:VD_HSI 1 "register_operand" "w"))
++ (sign_extend:<VWIDE>
++ (vec_duplicate:VD_HSI
++ (vec_select:<VEL>
++ (match_operand:<VCON> 2 "register_operand" "<vwx>")
++ (parallel [(match_operand:SI 3 "immediate_operand" "i")])))
++ ))
++ (const_int 1)))]
++ "TARGET_SIMD"
++ "sqdmull\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %2.<Vetype>[%3]"
++ [(set_attr "simd_type" "simd_sat_mul")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "aarch64_sqdmull_lane<mode>_internal"
++ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
++ (ss_ashift:<VWIDE>
++ (mult:<VWIDE>
++ (sign_extend:<VWIDE>
++ (match_operand:SD_HSI 1 "register_operand" "w"))
++ (sign_extend:<VWIDE>
++ (vec_select:<VEL>
++ (match_operand:<VCON> 2 "register_operand" "<vwx>")
++ (parallel [(match_operand:SI 3 "immediate_operand" "i")]))
++ ))
++ (const_int 1)))]
++ "TARGET_SIMD"
++ "sqdmull\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %2.<Vetype>[%3]"
++ [(set_attr "simd_type" "simd_sat_mul")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_expand "aarch64_sqdmull_lane<mode>"
++ [(match_operand:<VWIDE> 0 "register_operand" "=w")
++ (match_operand:VSD_HSI 1 "register_operand" "w")
++ (match_operand:<VCON> 2 "register_operand" "<vwx>")
++ (match_operand:SI 3 "immediate_operand" "i")]
++ "TARGET_SIMD"
++{
++ aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<VCON>mode) / 2);
++ emit_insn (gen_aarch64_sqdmull_lane<mode>_internal (operands[0], operands[1],
++ operands[2], operands[3]));
++ DONE;
++})
++
++(define_expand "aarch64_sqdmull_laneq<mode>"
++ [(match_operand:<VWIDE> 0 "register_operand" "=w")
++ (match_operand:VD_HSI 1 "register_operand" "w")
++ (match_operand:<VCON> 2 "register_operand" "<vwx>")
++ (match_operand:SI 3 "immediate_operand" "i")]
++ "TARGET_SIMD"
++{
++ aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<VCON>mode));
++ emit_insn (gen_aarch64_sqdmull_lane<mode>_internal
++ (operands[0], operands[1], operands[2], operands[3]));
++ DONE;
++})
++
++;; vqdmull_n
++
++(define_insn "aarch64_sqdmull_n<mode>"
++ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
++ (ss_ashift:<VWIDE>
++ (mult:<VWIDE>
++ (sign_extend:<VWIDE>
++ (match_operand:VD_HSI 1 "register_operand" "w"))
++ (sign_extend:<VWIDE>
++ (vec_duplicate:VD_HSI
++ (match_operand:<VEL> 2 "register_operand" "w")))
++ )
++ (const_int 1)))]
++ "TARGET_SIMD"
++ "sqdmull\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %2.<Vetype>[0]"
++ [(set_attr "simd_type" "simd_sat_mul")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++;; vqdmull2
++
++
++
++(define_insn "aarch64_sqdmull2<mode>_internal"
++ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
++ (ss_ashift:<VWIDE>
++ (mult:<VWIDE>
++ (sign_extend:<VWIDE>
++ (vec_select:<VHALF>
++ (match_operand:VQ_HSI 1 "register_operand" "w")
++ (match_operand:VQ_HSI 3 "vect_par_cnst_hi_half" "")))
++ (sign_extend:<VWIDE>
++ (vec_select:<VHALF>
++ (match_operand:VQ_HSI 2 "register_operand" "w")
++ (match_dup 3)))
++ )
++ (const_int 1)))]
++ "TARGET_SIMD"
++ "sqdmull2\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
++ [(set_attr "simd_type" "simd_sat_mul")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_expand "aarch64_sqdmull2<mode>"
++ [(match_operand:<VWIDE> 0 "register_operand" "=w")
++ (match_operand:VQ_HSI 1 "register_operand" "w")
++ (match_operand:<VCON> 2 "register_operand" "w")]
++ "TARGET_SIMD"
++{
++ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
++ emit_insn (gen_aarch64_sqdmull2<mode>_internal (operands[0], operands[1],
++ operands[2], p));
++ DONE;
++})
++
++;; vqdmull2_lane
++
++(define_insn "aarch64_sqdmull2_lane<mode>_internal"
++ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
++ (ss_ashift:<VWIDE>
++ (mult:<VWIDE>
++ (sign_extend:<VWIDE>
++ (vec_select:<VHALF>
++ (match_operand:VQ_HSI 1 "register_operand" "w")
++ (match_operand:VQ_HSI 4 "vect_par_cnst_hi_half" "")))
++ (sign_extend:<VWIDE>
++ (vec_duplicate:<VHALF>
++ (vec_select:<VEL>
++ (match_operand:<VCON> 2 "register_operand" "<vwx>")
++ (parallel [(match_operand:SI 3 "immediate_operand" "i")])))
++ ))
++ (const_int 1)))]
++ "TARGET_SIMD"
++ "sqdmull2\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %2.<Vetype>[%3]"
++ [(set_attr "simd_type" "simd_sat_mul")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_expand "aarch64_sqdmull2_lane<mode>"
++ [(match_operand:<VWIDE> 0 "register_operand" "=w")
++ (match_operand:VQ_HSI 1 "register_operand" "w")
++ (match_operand:<VCON> 2 "register_operand" "<vwx>")
++ (match_operand:SI 3 "immediate_operand" "i")]
++ "TARGET_SIMD"
++{
++ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
++ aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<MODE>mode) / 2);
++ emit_insn (gen_aarch64_sqdmull2_lane<mode>_internal (operands[0], operands[1],
++ operands[2], operands[3],
++ p));
++ DONE;
++})
++
++(define_expand "aarch64_sqdmull2_laneq<mode>"
++ [(match_operand:<VWIDE> 0 "register_operand" "=w")
++ (match_operand:VQ_HSI 1 "register_operand" "w")
++ (match_operand:<VCON> 2 "register_operand" "<vwx>")
++ (match_operand:SI 3 "immediate_operand" "i")]
++ "TARGET_SIMD"
++{
++ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
++ aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<MODE>mode));
++ emit_insn (gen_aarch64_sqdmull2_lane<mode>_internal (operands[0], operands[1],
++ operands[2], operands[3],
++ p));
++ DONE;
++})
++
++;; vqdmull2_n
++
++(define_insn "aarch64_sqdmull2_n<mode>_internal"
++ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
++ (ss_ashift:<VWIDE>
++ (mult:<VWIDE>
++ (sign_extend:<VWIDE>
++ (vec_select:<VHALF>
++ (match_operand:VQ_HSI 1 "register_operand" "w")
++ (match_operand:VQ_HSI 3 "vect_par_cnst_hi_half" "")))
++ (sign_extend:<VWIDE>
++ (vec_duplicate:<VHALF>
++ (match_operand:<VEL> 2 "register_operand" "w")))
++ )
++ (const_int 1)))]
++ "TARGET_SIMD"
++ "sqdmull2\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %2.<Vetype>[0]"
++ [(set_attr "simd_type" "simd_sat_mul")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_expand "aarch64_sqdmull2_n<mode>"
++ [(match_operand:<VWIDE> 0 "register_operand" "=w")
++ (match_operand:VQ_HSI 1 "register_operand" "w")
++ (match_operand:<VEL> 2 "register_operand" "w")]
++ "TARGET_SIMD"
++{
++ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
++ emit_insn (gen_aarch64_sqdmull2_n<mode>_internal (operands[0], operands[1],
++ operands[2], p));
++ DONE;
++})
++
++;; vshl
++
++(define_insn "aarch64_<sur>shl<mode>"
++ [(set (match_operand:VSDQ_I_DI 0 "register_operand" "=w")
++ (unspec:VSDQ_I_DI
++ [(match_operand:VSDQ_I_DI 1 "register_operand" "w")
++ (match_operand:VSDQ_I_DI 2 "register_operand" "w")]
++ VSHL))]
++ "TARGET_SIMD"
++ "<sur>shl\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>";
++ [(set_attr "simd_type" "simd_shift")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++
++;; vqshl
++
++(define_insn "aarch64_<sur>q<r>shl<mode>"
++ [(set (match_operand:VSDQ_I 0 "register_operand" "=w")
++ (unspec:VSDQ_I
++ [(match_operand:VSDQ_I 1 "register_operand" "w")
++ (match_operand:VSDQ_I 2 "register_operand" "w")]
++ VQSHL))]
++ "TARGET_SIMD"
++ "<sur>q<r>shl\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>";
++ [(set_attr "simd_type" "simd_sat_shift")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++;; vshl_n
++
++(define_expand "aarch64_sshl_n<mode>"
++ [(match_operand:VSDQ_I_DI 0 "register_operand" "=w")
++ (match_operand:VSDQ_I_DI 1 "register_operand" "w")
++ (match_operand:SI 2 "immediate_operand" "i")]
++ "TARGET_SIMD"
++{
++ emit_insn (gen_ashl<mode>3 (operands[0], operands[1], operands[2]));
++ DONE;
++})
++
++(define_expand "aarch64_ushl_n<mode>"
++ [(match_operand:VSDQ_I_DI 0 "register_operand" "=w")
++ (match_operand:VSDQ_I_DI 1 "register_operand" "w")
++ (match_operand:SI 2 "immediate_operand" "i")]
++ "TARGET_SIMD"
++{
++ emit_insn (gen_ashl<mode>3 (operands[0], operands[1], operands[2]));
++ DONE;
++})
++
++;; vshll_n
++
++(define_insn "aarch64_<sur>shll_n<mode>"
++ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
++ (unspec:<VWIDE> [(match_operand:VDW 1 "register_operand" "w")
++ (match_operand:SI 2 "immediate_operand" "i")]
++ VSHLL))]
++ "TARGET_SIMD"
++ "*
++ int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
++ aarch64_simd_const_bounds (operands[2], 0, bit_width + 1);
++ if (INTVAL (operands[2]) == bit_width)
++ {
++ return \"shll\\t%0.<Vwtype>, %1.<Vtype>, %2\";
++ }
++ else {
++ return \"<sur>shll\\t%0.<Vwtype>, %1.<Vtype>, %2\";
++ }"
++ [(set_attr "simd_type" "simd_shift_imm")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++;; vshll_high_n
++
++(define_insn "aarch64_<sur>shll2_n<mode>"
++ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
++ (unspec:<VWIDE> [(match_operand:VQW 1 "register_operand" "w")
++ (match_operand:SI 2 "immediate_operand" "i")]
++ VSHLL))]
++ "TARGET_SIMD"
++ "*
++ int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
++ aarch64_simd_const_bounds (operands[2], 0, bit_width + 1);
++ if (INTVAL (operands[2]) == bit_width)
++ {
++ return \"shll2\\t%0.<Vwtype>, %1.<Vtype>, %2\";
++ }
++ else {
++ return \"<sur>shll2\\t%0.<Vwtype>, %1.<Vtype>, %2\";
++ }"
++ [(set_attr "simd_type" "simd_shift_imm")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++;; vshr_n
++
++(define_expand "aarch64_sshr_n<mode>"
++ [(match_operand:VSDQ_I_DI 0 "register_operand" "=w")
++ (match_operand:VSDQ_I_DI 1 "register_operand" "w")
++ (match_operand:SI 2 "immediate_operand" "i")]
++ "TARGET_SIMD"
++{
++ emit_insn (gen_ashr<mode>3 (operands[0], operands[1], operands[2]));
++ DONE;
++})
++
++(define_expand "aarch64_ushr_n<mode>"
++ [(match_operand:VSDQ_I_DI 0 "register_operand" "=w")
++ (match_operand:VSDQ_I_DI 1 "register_operand" "w")
++ (match_operand:SI 2 "immediate_operand" "i")]
++ "TARGET_SIMD"
++{
++ emit_insn (gen_lshr<mode>3 (operands[0], operands[1], operands[2]));
++ DONE;
++})
++
++;; vrshr_n
++
++(define_insn "aarch64_<sur>shr_n<mode>"
++ [(set (match_operand:VSDQ_I_DI 0 "register_operand" "=w")
++ (unspec:VSDQ_I_DI [(match_operand:VSDQ_I_DI 1 "register_operand" "w")
++ (match_operand:SI 2 "immediate_operand" "i")]
++ VRSHR_N))]
++ "TARGET_SIMD"
++ "*
++ int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
++ aarch64_simd_const_bounds (operands[2], 1, bit_width + 1);
++ return \"<sur>shr\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %2\";"
++ [(set_attr "simd_type" "simd_shift_imm")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++;; v(r)sra_n
++
++(define_insn "aarch64_<sur>sra_n<mode>"
++ [(set (match_operand:VSDQ_I_DI 0 "register_operand" "=w")
++ (unspec:VSDQ_I_DI [(match_operand:VSDQ_I_DI 1 "register_operand" "0")
++ (match_operand:VSDQ_I_DI 2 "register_operand" "w")
++ (match_operand:SI 3 "immediate_operand" "i")]
++ VSRA))]
++ "TARGET_SIMD"
++ "*
++ int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
++ aarch64_simd_const_bounds (operands[3], 1, bit_width + 1);
++ return \"<sur>sra\\t%<v>0<Vmtype>, %<v>2<Vmtype>, %3\";"
++ [(set_attr "simd_type" "simd_shift_imm_acc")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++;; vs<lr>i_n
++
++(define_insn "aarch64_<sur>s<lr>i_n<mode>"
++ [(set (match_operand:VSDQ_I_DI 0 "register_operand" "=w")
++ (unspec:VSDQ_I_DI [(match_operand:VSDQ_I_DI 1 "register_operand" "0")
++ (match_operand:VSDQ_I_DI 2 "register_operand" "w")
++ (match_operand:SI 3 "immediate_operand" "i")]
++ VSLRI))]
++ "TARGET_SIMD"
++ "*
++ int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
++ aarch64_simd_const_bounds (operands[3], 1 - <VSLRI:offsetlr>,
++ bit_width - <VSLRI:offsetlr> + 1);
++ return \"s<lr>i\\t%<v>0<Vmtype>, %<v>2<Vmtype>, %3\";"
++ [(set_attr "simd_type" "simd_shift_imm")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++;; vqshl(u)
++
++(define_insn "aarch64_<sur>qshl<u>_n<mode>"
++ [(set (match_operand:VSDQ_I 0 "register_operand" "=w")
++ (unspec:VSDQ_I [(match_operand:VSDQ_I 1 "register_operand" "w")
++ (match_operand:SI 2 "immediate_operand" "i")]
++ VQSHL_N))]
++ "TARGET_SIMD"
++ "*
++ int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
++ aarch64_simd_const_bounds (operands[2], 0, bit_width);
++ return \"<sur>qshl<u>\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %2\";"
++ [(set_attr "simd_type" "simd_sat_shift_imm")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++
++;; vq(r)shr(u)n_n
++
++(define_insn "aarch64_<sur>q<r>shr<u>n_n<mode>"
++ [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w")
++ (unspec:<VNARROWQ> [(match_operand:VSQN_HSDI 1 "register_operand" "w")
++ (match_operand:SI 2 "immediate_operand" "i")]
++ VQSHRN_N))]
++ "TARGET_SIMD"
++ "*
++ int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
++ aarch64_simd_const_bounds (operands[2], 1, bit_width + 1);
++ return \"<sur>q<r>shr<u>n\\t%<vn2>0<Vmntype>, %<v>1<Vmtype>, %2\";"
++ [(set_attr "simd_type" "simd_sat_shiftn_imm")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++
++;; cm(eq|ge|le|lt|gt)
++
++(define_insn "aarch64_cm<cmp><mode>"
++ [(set (match_operand:<V_cmp_result> 0 "register_operand" "=w,w")
++ (unspec:<V_cmp_result>
++ [(match_operand:VSDQ_I_DI 1 "register_operand" "w,w")
++ (match_operand:VSDQ_I_DI 2 "aarch64_simd_reg_or_zero" "w,Z")]
++ VCMP_S))]
++ "TARGET_SIMD"
++ "@
++ cm<cmp>\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>
++ cm<cmp>\t%<v>0<Vmtype>, %<v>1<Vmtype>, #0"
++ [(set_attr "simd_type" "simd_cmp")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++;; cm(hs|hi|tst)
++
++(define_insn "aarch64_cm<cmp><mode>"
++ [(set (match_operand:<V_cmp_result> 0 "register_operand" "=w")
++ (unspec:<V_cmp_result>
++ [(match_operand:VSDQ_I_DI 1 "register_operand" "w")
++ (match_operand:VSDQ_I_DI 2 "register_operand" "w")]
++ VCMP_U))]
++ "TARGET_SIMD"
++ "cm<cmp>\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
++ [(set_attr "simd_type" "simd_cmp")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++;; fcm(eq|ge|le|lt|gt)
++
++(define_insn "aarch64_cm<cmp><mode>"
++ [(set (match_operand:<V_cmp_result> 0 "register_operand" "=w,w")
++ (unspec:<V_cmp_result>
++ [(match_operand:VDQF 1 "register_operand" "w,w")
++ (match_operand:VDQF 2 "aarch64_simd_reg_or_zero" "w,Dz")]
++ VCMP_S))]
++ "TARGET_SIMD"
++ "@
++ fcm<cmp>\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>
++ fcm<cmp>\t%<v>0<Vmtype>, %<v>1<Vmtype>, 0"
++ [(set_attr "simd_type" "simd_fcmp")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++;; addp
++
++(define_insn "aarch64_addp<mode>"
++ [(set (match_operand:VD_BHSI 0 "register_operand" "=w")
++ (unspec:VD_BHSI
++ [(match_operand:VD_BHSI 1 "register_operand" "w")
++ (match_operand:VD_BHSI 2 "register_operand" "w")]
++ UNSPEC_ADDP))]
++ "TARGET_SIMD"
++ "addp\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
++ [(set_attr "simd_type" "simd_add")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "aarch64_addpdi"
++ [(set (match_operand:DI 0 "register_operand" "=w")
++ (unspec:DI
++ [(match_operand:V2DI 1 "register_operand" "w")]
++ UNSPEC_ADDP))]
++ "TARGET_SIMD"
++ "addp\t%d0, %1.2d"
++ [(set_attr "simd_type" "simd_add")
++ (set_attr "simd_mode" "DI")]
++)
++
++;; v(max|min)
++
++(define_expand "aarch64_<maxmin><mode>"
++ [(set (match_operand:VDQ_BHSI 0 "register_operand" "=w")
++ (MAXMIN:VDQ_BHSI (match_operand:VDQ_BHSI 1 "register_operand" "w")
++ (match_operand:VDQ_BHSI 2 "register_operand" "w")))]
++ "TARGET_SIMD"
++{
++ emit_insn (gen_<maxmin><mode>3 (operands[0], operands[1], operands[2]));
++ DONE;
++})
++
++
++(define_insn "aarch64_<fmaxmin><mode>"
++ [(set (match_operand:VDQF 0 "register_operand" "=w")
++ (unspec:VDQF [(match_operand:VDQF 1 "register_operand" "w")
++ (match_operand:VDQF 2 "register_operand" "w")]
++ FMAXMIN))]
++ "TARGET_SIMD"
++ "<fmaxmin>\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
++ [(set_attr "simd_type" "simd_fminmax")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++;; sqrt
++
++(define_insn "sqrt<mode>2"
++ [(set (match_operand:VDQF 0 "register_operand" "=w")
++ (sqrt:VDQF (match_operand:VDQF 1 "register_operand" "w")))]
++ "TARGET_SIMD"
++ "fsqrt\\t%0.<Vtype>, %1.<Vtype>"
++ [(set_attr "simd_type" "simd_fsqrt")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_expand "aarch64_sqrt<mode>"
++ [(match_operand:VDQF 0 "register_operand" "=w")
++ (match_operand:VDQF 1 "register_operand" "w")]
++ "TARGET_SIMD"
++{
++ emit_insn (gen_sqrt<mode>2 (operands[0], operands[1]));
++ DONE;
++})
++
++
++;; Patterns for vector struct loads and stores.
++
++(define_insn "vec_load_lanesoi<mode>"
++ [(set (match_operand:OI 0 "register_operand" "=w")
++ (unspec:OI [(match_operand:OI 1 "aarch64_simd_struct_operand" "Utv")
++ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
++ UNSPEC_LD2))]
++ "TARGET_SIMD"
++ "ld2\\t{%S0.<Vtype> - %T0.<Vtype>}, %1"
++ [(set_attr "simd_type" "simd_load2")
++ (set_attr "simd_mode" "<MODE>")])
++
++(define_insn "vec_store_lanesoi<mode>"
++ [(set (match_operand:OI 0 "aarch64_simd_struct_operand" "=Utv")
++ (unspec:OI [(match_operand:OI 1 "register_operand" "w")
++ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
++ UNSPEC_ST2))]
++ "TARGET_SIMD"
++ "st2\\t{%S1.<Vtype> - %T1.<Vtype>}, %0"
++ [(set_attr "simd_type" "simd_store2")
++ (set_attr "simd_mode" "<MODE>")])
++
++(define_insn "vec_load_lanesci<mode>"
++ [(set (match_operand:CI 0 "register_operand" "=w")
++ (unspec:CI [(match_operand:CI 1 "aarch64_simd_struct_operand" "Utv")
++ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
++ UNSPEC_LD3))]
++ "TARGET_SIMD"
++ "ld3\\t{%S0.<Vtype> - %U0.<Vtype>}, %1"
++ [(set_attr "simd_type" "simd_load3")
++ (set_attr "simd_mode" "<MODE>")])
++
++(define_insn "vec_store_lanesci<mode>"
++ [(set (match_operand:CI 0 "aarch64_simd_struct_operand" "=Utv")
++ (unspec:CI [(match_operand:CI 1 "register_operand" "w")
++ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
++ UNSPEC_ST3))]
++ "TARGET_SIMD"
++ "st3\\t{%S1.<Vtype> - %U1.<Vtype>}, %0"
++ [(set_attr "simd_type" "simd_store3")
++ (set_attr "simd_mode" "<MODE>")])
++
++(define_insn "vec_load_lanesxi<mode>"
++ [(set (match_operand:XI 0 "register_operand" "=w")
++ (unspec:XI [(match_operand:XI 1 "aarch64_simd_struct_operand" "Utv")
++ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
++ UNSPEC_LD4))]
++ "TARGET_SIMD"
++ "ld4\\t{%S0.<Vtype> - %V0.<Vtype>}, %1"
++ [(set_attr "simd_type" "simd_load4")
++ (set_attr "simd_mode" "<MODE>")])
++
++(define_insn "vec_store_lanesxi<mode>"
++ [(set (match_operand:XI 0 "aarch64_simd_struct_operand" "=Utv")
++ (unspec:XI [(match_operand:XI 1 "register_operand" "w")
++ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
++ UNSPEC_ST4))]
++ "TARGET_SIMD"
++ "st4\\t{%S1.<Vtype> - %V1.<Vtype>}, %0"
++ [(set_attr "simd_type" "simd_store4")
++ (set_attr "simd_mode" "<MODE>")])
++
++;; Reload patterns for AdvSIMD register list operands.
++
++(define_expand "mov<mode>"
++ [(set (match_operand:VSTRUCT 0 "aarch64_simd_nonimmediate_operand" "")
++ (match_operand:VSTRUCT 1 "aarch64_simd_general_operand" ""))]
++ "TARGET_SIMD"
++{
++ if (can_create_pseudo_p ())
++ {
++ if (GET_CODE (operands[0]) != REG)
++ operands[1] = force_reg (<MODE>mode, operands[1]);
++ }
++})
++
++(define_insn "*aarch64_mov<mode>"
++ [(set (match_operand:VSTRUCT 0 "aarch64_simd_nonimmediate_operand" "=w,Utv,w")
++ (match_operand:VSTRUCT 1 "aarch64_simd_general_operand" " w,w,Utv"))]
++ "TARGET_SIMD
++ && (register_operand (operands[0], <MODE>mode)
++ || register_operand (operands[1], <MODE>mode))"
++
++{
++ switch (which_alternative)
++ {
++ case 0: return "#";
++ case 1: return "st1\\t{%S1.16b - %<Vendreg>1.16b}, %0";
++ case 2: return "ld1\\t{%S0.16b - %<Vendreg>0.16b}, %1";
++ default: gcc_unreachable ();
++ }
++}
++ [(set_attr "simd_type" "simd_move,simd_store<nregs>,simd_load<nregs>")
++ (set (attr "length") (symbol_ref "aarch64_simd_attr_length_move (insn)"))
++ (set_attr "simd_mode" "<MODE>")])
++
++(define_split
++ [(set (match_operand:OI 0 "register_operand" "")
++ (match_operand:OI 1 "register_operand" ""))]
++ "TARGET_SIMD && reload_completed"
++ [(set (match_dup 0) (match_dup 1))
++ (set (match_dup 2) (match_dup 3))]
++{
++ int rdest = REGNO (operands[0]);
++ int rsrc = REGNO (operands[1]);
++ rtx dest[2], src[2];
++
++ dest[0] = gen_rtx_REG (TFmode, rdest);
++ src[0] = gen_rtx_REG (TFmode, rsrc);
++ dest[1] = gen_rtx_REG (TFmode, rdest + 1);
++ src[1] = gen_rtx_REG (TFmode, rsrc + 1);
++
++ aarch64_simd_disambiguate_copy (operands, dest, src, 2);
++})
++
++(define_split
++ [(set (match_operand:CI 0 "register_operand" "")
++ (match_operand:CI 1 "register_operand" ""))]
++ "TARGET_SIMD && reload_completed"
++ [(set (match_dup 0) (match_dup 1))
++ (set (match_dup 2) (match_dup 3))
++ (set (match_dup 4) (match_dup 5))]
++{
++ int rdest = REGNO (operands[0]);
++ int rsrc = REGNO (operands[1]);
++ rtx dest[3], src[3];
++
++ dest[0] = gen_rtx_REG (TFmode, rdest);
++ src[0] = gen_rtx_REG (TFmode, rsrc);
++ dest[1] = gen_rtx_REG (TFmode, rdest + 1);
++ src[1] = gen_rtx_REG (TFmode, rsrc + 1);
++ dest[2] = gen_rtx_REG (TFmode, rdest + 2);
++ src[2] = gen_rtx_REG (TFmode, rsrc + 2);
++
++ aarch64_simd_disambiguate_copy (operands, dest, src, 3);
++})
++
++(define_split
++ [(set (match_operand:XI 0 "register_operand" "")
++ (match_operand:XI 1 "register_operand" ""))]
++ "TARGET_SIMD && reload_completed"
++ [(set (match_dup 0) (match_dup 1))
++ (set (match_dup 2) (match_dup 3))
++ (set (match_dup 4) (match_dup 5))
++ (set (match_dup 6) (match_dup 7))]
++{
++ int rdest = REGNO (operands[0]);
++ int rsrc = REGNO (operands[1]);
++ rtx dest[4], src[4];
++
++ dest[0] = gen_rtx_REG (TFmode, rdest);
++ src[0] = gen_rtx_REG (TFmode, rsrc);
++ dest[1] = gen_rtx_REG (TFmode, rdest + 1);
++ src[1] = gen_rtx_REG (TFmode, rsrc + 1);
++ dest[2] = gen_rtx_REG (TFmode, rdest + 2);
++ src[2] = gen_rtx_REG (TFmode, rsrc + 2);
++ dest[3] = gen_rtx_REG (TFmode, rdest + 3);
++ src[3] = gen_rtx_REG (TFmode, rsrc + 3);
++
++ aarch64_simd_disambiguate_copy (operands, dest, src, 4);
++})
++
++(define_insn "aarch64_ld2<mode>_dreg"
++ [(set (match_operand:OI 0 "register_operand" "=w")
++ (subreg:OI
++ (vec_concat:<VRL2>
++ (vec_concat:<VDBL>
++ (unspec:VD [(match_operand:TI 1 "aarch64_simd_struct_operand" "Utv")]
++ UNSPEC_LD2)
++ (vec_duplicate:VD (const_int 0)))
++ (vec_concat:<VDBL>
++ (unspec:VD [(match_dup 1)]
++ UNSPEC_LD2)
++ (vec_duplicate:VD (const_int 0)))) 0))]
++ "TARGET_SIMD"
++ "ld2\\t{%S0.<Vtype> - %T0.<Vtype>}, %1"
++ [(set_attr "simd_type" "simd_load2")
++ (set_attr "simd_mode" "<MODE>")])
++
++(define_insn "aarch64_ld2<mode>_dreg"
++ [(set (match_operand:OI 0 "register_operand" "=w")
++ (subreg:OI
++ (vec_concat:<VRL2>
++ (vec_concat:<VDBL>
++ (unspec:DX [(match_operand:TI 1 "aarch64_simd_struct_operand" "Utv")]
++ UNSPEC_LD2)
++ (const_int 0))
++ (vec_concat:<VDBL>
++ (unspec:DX [(match_dup 1)]
++ UNSPEC_LD2)
++ (const_int 0))) 0))]
++ "TARGET_SIMD"
++ "ld1\\t{%S0.1d - %T0.1d}, %1"
++ [(set_attr "simd_type" "simd_load2")
++ (set_attr "simd_mode" "<MODE>")])
++
++(define_insn "aarch64_ld3<mode>_dreg"
++ [(set (match_operand:CI 0 "register_operand" "=w")
++ (subreg:CI
++ (vec_concat:<VRL3>
++ (vec_concat:<VRL2>
++ (vec_concat:<VDBL>
++ (unspec:VD [(match_operand:EI 1 "aarch64_simd_struct_operand" "Utv")]
++ UNSPEC_LD3)
++ (vec_duplicate:VD (const_int 0)))
++ (vec_concat:<VDBL>
++ (unspec:VD [(match_dup 1)]
++ UNSPEC_LD3)
++ (vec_duplicate:VD (const_int 0))))
++ (vec_concat:<VDBL>
++ (unspec:VD [(match_dup 1)]
++ UNSPEC_LD3)
++ (vec_duplicate:VD (const_int 0)))) 0))]
++ "TARGET_SIMD"
++ "ld3\\t{%S0.<Vtype> - %U0.<Vtype>}, %1"
++ [(set_attr "simd_type" "simd_load3")
++ (set_attr "simd_mode" "<MODE>")])
++
++(define_insn "aarch64_ld3<mode>_dreg"
++ [(set (match_operand:CI 0 "register_operand" "=w")
++ (subreg:CI
++ (vec_concat:<VRL3>
++ (vec_concat:<VRL2>
++ (vec_concat:<VDBL>
++ (unspec:DX [(match_operand:EI 1 "aarch64_simd_struct_operand" "Utv")]
++ UNSPEC_LD3)
++ (const_int 0))
++ (vec_concat:<VDBL>
++ (unspec:DX [(match_dup 1)]
++ UNSPEC_LD3)
++ (const_int 0)))
++ (vec_concat:<VDBL>
++ (unspec:DX [(match_dup 1)]
++ UNSPEC_LD3)
++ (const_int 0))) 0))]
++ "TARGET_SIMD"
++ "ld1\\t{%S0.1d - %U0.1d}, %1"
++ [(set_attr "simd_type" "simd_load3")
++ (set_attr "simd_mode" "<MODE>")])
++
++(define_insn "aarch64_ld4<mode>_dreg"
++ [(set (match_operand:XI 0 "register_operand" "=w")
++ (subreg:XI
++ (vec_concat:<VRL4>
++ (vec_concat:<VRL2>
++ (vec_concat:<VDBL>
++ (unspec:VD [(match_operand:OI 1 "aarch64_simd_struct_operand" "Utv")]
++ UNSPEC_LD4)
++ (vec_duplicate:VD (const_int 0)))
++ (vec_concat:<VDBL>
++ (unspec:VD [(match_dup 1)]
++ UNSPEC_LD4)
++ (vec_duplicate:VD (const_int 0))))
++ (vec_concat:<VRL2>
++ (vec_concat:<VDBL>
++ (unspec:VD [(match_dup 1)]
++ UNSPEC_LD4)
++ (vec_duplicate:VD (const_int 0)))
++ (vec_concat:<VDBL>
++ (unspec:VD [(match_dup 1)]
++ UNSPEC_LD4)
++ (vec_duplicate:VD (const_int 0))))) 0))]
++ "TARGET_SIMD"
++ "ld4\\t{%S0.<Vtype> - %V0.<Vtype>}, %1"
++ [(set_attr "simd_type" "simd_load4")
++ (set_attr "simd_mode" "<MODE>")])
++
++(define_insn "aarch64_ld4<mode>_dreg"
++ [(set (match_operand:XI 0 "register_operand" "=w")
++ (subreg:XI
++ (vec_concat:<VRL4>
++ (vec_concat:<VRL2>
++ (vec_concat:<VDBL>
++ (unspec:DX [(match_operand:OI 1 "aarch64_simd_struct_operand" "Utv")]
++ UNSPEC_LD4)
++ (const_int 0))
++ (vec_concat:<VDBL>
++ (unspec:DX [(match_dup 1)]
++ UNSPEC_LD4)
++ (const_int 0)))
++ (vec_concat:<VRL2>
++ (vec_concat:<VDBL>
++ (unspec:DX [(match_dup 1)]
++ UNSPEC_LD4)
++ (const_int 0))
++ (vec_concat:<VDBL>
++ (unspec:DX [(match_dup 1)]
++ UNSPEC_LD4)
++ (const_int 0)))) 0))]
++ "TARGET_SIMD"
++ "ld1\\t{%S0.1d - %V0.1d}, %1"
++ [(set_attr "simd_type" "simd_load4")
++ (set_attr "simd_mode" "<MODE>")])
++
++(define_expand "aarch64_ld<VSTRUCT:nregs><VDC:mode>"
++ [(match_operand:VSTRUCT 0 "register_operand" "=w")
++ (match_operand:DI 1 "register_operand" "r")
++ (unspec:VDC [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
++ "TARGET_SIMD"
++{
++ enum machine_mode mode = <VSTRUCT:VSTRUCT_DREG>mode;
++ rtx mem = gen_rtx_MEM (mode, operands[1]);
++
++ emit_insn (gen_aarch64_ld<VSTRUCT:nregs><VDC:mode>_dreg (operands[0], mem));
++ DONE;
++})
++
++(define_expand "aarch64_ld<VSTRUCT:nregs><VQ:mode>"
++ [(match_operand:VSTRUCT 0 "register_operand" "=w")
++ (match_operand:DI 1 "register_operand" "r")
++ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
++ "TARGET_SIMD"
++{
++ enum machine_mode mode = <VSTRUCT:MODE>mode;
++ rtx mem = gen_rtx_MEM (mode, operands[1]);
++
++ emit_insn (gen_vec_load_lanes<VSTRUCT:mode><VQ:mode> (operands[0], mem));
++ DONE;
++})
++
++;; Expanders for builtins to extract vector registers from large
++;; opaque integer modes.
++
++;; D-register list.
++
++(define_expand "aarch64_get_dreg<VSTRUCT:mode><VDC:mode>"
++ [(match_operand:VDC 0 "register_operand" "=w")
++ (match_operand:VSTRUCT 1 "register_operand" "w")
++ (match_operand:SI 2 "immediate_operand" "i")]
++ "TARGET_SIMD"
++{
++ int part = INTVAL (operands[2]);
++ rtx temp = gen_reg_rtx (<VDC:VDBL>mode);
++ int offset = part * 16;
++
++ emit_move_insn (temp, gen_rtx_SUBREG (<VDC:VDBL>mode, operands[1], offset));
++ emit_move_insn (operands[0], gen_lowpart (<VDC:MODE>mode, temp));
++ DONE;
++})
++
++;; Q-register list.
++
++(define_expand "aarch64_get_qreg<VSTRUCT:mode><VQ:mode>"
++ [(match_operand:VQ 0 "register_operand" "=w")
++ (match_operand:VSTRUCT 1 "register_operand" "w")
++ (match_operand:SI 2 "immediate_operand" "i")]
++ "TARGET_SIMD"
++{
++ int part = INTVAL (operands[2]);
++ int offset = part * 16;
++
++ emit_move_insn (operands[0],
++ gen_rtx_SUBREG (<VQ:MODE>mode, operands[1], offset));
++ DONE;
++})
++
++;; Permuted-store expanders for neon intrinsics.
++
++;; Permute instructions
++
++;; vec_perm support
++
++(define_expand "vec_perm_const<mode>"
++ [(match_operand:VALL 0 "register_operand")
++ (match_operand:VALL 1 "register_operand")
++ (match_operand:VALL 2 "register_operand")
++ (match_operand:<V_cmp_result> 3)]
++ "TARGET_SIMD"
++{
++ if (aarch64_expand_vec_perm_const (operands[0], operands[1],
++ operands[2], operands[3]))
++ DONE;
++ else
++ FAIL;
++})
++
++(define_expand "vec_perm<mode>"
++ [(match_operand:VB 0 "register_operand")
++ (match_operand:VB 1 "register_operand")
++ (match_operand:VB 2 "register_operand")
++ (match_operand:VB 3 "register_operand")]
++ "TARGET_SIMD"
++{
++ aarch64_expand_vec_perm (operands[0], operands[1],
++ operands[2], operands[3]);
++ DONE;
++})
++
++(define_insn "aarch64_tbl1<mode>"
++ [(set (match_operand:VB 0 "register_operand" "=w")
++ (unspec:VB [(match_operand:V16QI 1 "register_operand" "w")
++ (match_operand:VB 2 "register_operand" "w")]
++ UNSPEC_TBL))]
++ "TARGET_SIMD"
++ "tbl\\t%0.<Vtype>, {%1.16b}, %2.<Vtype>"
++ [(set_attr "simd_type" "simd_tbl")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++;; Two source registers.
++
++(define_insn "aarch64_tbl2v16qi"
++ [(set (match_operand:V16QI 0 "register_operand" "=w")
++ (unspec:V16QI [(match_operand:OI 1 "register_operand" "w")
++ (match_operand:V16QI 2 "register_operand" "w")]
++ UNSPEC_TBL))]
++ "TARGET_SIMD"
++ "tbl\\t%0.16b, {%S1.16b - %T1.16b}, %2.16b"
++ [(set_attr "simd_type" "simd_tbl")
++ (set_attr "simd_mode" "V16QI")]
++)
++
++(define_insn_and_split "aarch64_combinev16qi"
++ [(set (match_operand:OI 0 "register_operand" "=w")
++ (unspec:OI [(match_operand:V16QI 1 "register_operand" "w")
++ (match_operand:V16QI 2 "register_operand" "w")]
++ UNSPEC_CONCAT))]
++ "TARGET_SIMD"
++ "#"
++ "&& reload_completed"
++ [(const_int 0)]
++{
++ aarch64_split_combinev16qi (operands);
++ DONE;
++})
++
++(define_insn "aarch64_<PERMUTE:perm_insn><PERMUTE:perm_hilo><mode>"
++ [(set (match_operand:VALL 0 "register_operand" "=w")
++ (unspec:VALL [(match_operand:VALL 1 "register_operand" "w")
++ (match_operand:VALL 2 "register_operand" "w")]
++ PERMUTE))]
++ "TARGET_SIMD"
++ "<PERMUTE:perm_insn><PERMUTE:perm_hilo>\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
++ [(set_attr "simd_type" "simd_<PERMUTE:perm_insn>")
++ (set_attr "simd_mode" "<MODE>")]
++)
++
++(define_insn "aarch64_st2<mode>_dreg"
++ [(set (match_operand:TI 0 "aarch64_simd_struct_operand" "=Utv")
++ (unspec:TI [(match_operand:OI 1 "register_operand" "w")
++ (unspec:VD [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
++ UNSPEC_ST2))]
++ "TARGET_SIMD"
++ "st2\\t{%S1.<Vtype> - %T1.<Vtype>}, %0"
++ [(set_attr "simd_type" "simd_store2")
++ (set_attr "simd_mode" "<MODE>")])
++
++(define_insn "aarch64_st2<mode>_dreg"
++ [(set (match_operand:TI 0 "aarch64_simd_struct_operand" "=Utv")
++ (unspec:TI [(match_operand:OI 1 "register_operand" "w")
++ (unspec:DX [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
++ UNSPEC_ST2))]
++ "TARGET_SIMD"
++ "st1\\t{%S1.1d - %T1.1d}, %0"
++ [(set_attr "simd_type" "simd_store2")
++ (set_attr "simd_mode" "<MODE>")])
++
++(define_insn "aarch64_st3<mode>_dreg"
++ [(set (match_operand:EI 0 "aarch64_simd_struct_operand" "=Utv")
++ (unspec:EI [(match_operand:CI 1 "register_operand" "w")
++ (unspec:VD [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
++ UNSPEC_ST3))]
++ "TARGET_SIMD"
++ "st3\\t{%S1.<Vtype> - %U1.<Vtype>}, %0"
++ [(set_attr "simd_type" "simd_store3")
++ (set_attr "simd_mode" "<MODE>")])
++
++(define_insn "aarch64_st3<mode>_dreg"
++ [(set (match_operand:EI 0 "aarch64_simd_struct_operand" "=Utv")
++ (unspec:EI [(match_operand:CI 1 "register_operand" "w")
++ (unspec:DX [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
++ UNSPEC_ST3))]
++ "TARGET_SIMD"
++ "st1\\t{%S1.1d - %U1.1d}, %0"
++ [(set_attr "simd_type" "simd_store3")
++ (set_attr "simd_mode" "<MODE>")])
++
++(define_insn "aarch64_st4<mode>_dreg"
++ [(set (match_operand:OI 0 "aarch64_simd_struct_operand" "=Utv")
++ (unspec:OI [(match_operand:XI 1 "register_operand" "w")
++ (unspec:VD [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
++ UNSPEC_ST4))]
++ "TARGET_SIMD"
++ "st4\\t{%S1.<Vtype> - %V1.<Vtype>}, %0"
++ [(set_attr "simd_type" "simd_store4")
++ (set_attr "simd_mode" "<MODE>")])
++
++(define_insn "aarch64_st4<mode>_dreg"
++ [(set (match_operand:OI 0 "aarch64_simd_struct_operand" "=Utv")
++ (unspec:OI [(match_operand:XI 1 "register_operand" "w")
++ (unspec:DX [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
++ UNSPEC_ST4))]
++ "TARGET_SIMD"
++ "st1\\t{%S1.1d - %V1.1d}, %0"
++ [(set_attr "simd_type" "simd_store4")
++ (set_attr "simd_mode" "<MODE>")])
++
++(define_expand "aarch64_st<VSTRUCT:nregs><VDC:mode>"
++ [(match_operand:DI 0 "register_operand" "r")
++ (match_operand:VSTRUCT 1 "register_operand" "w")
++ (unspec:VDC [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
++ "TARGET_SIMD"
++{
++ enum machine_mode mode = <VSTRUCT:VSTRUCT_DREG>mode;
++ rtx mem = gen_rtx_MEM (mode, operands[0]);
++
++ emit_insn (gen_aarch64_st<VSTRUCT:nregs><VDC:mode>_dreg (mem, operands[1]));
++ DONE;
++})
++
++(define_expand "aarch64_st<VSTRUCT:nregs><VQ:mode>"
++ [(match_operand:DI 0 "register_operand" "r")
++ (match_operand:VSTRUCT 1 "register_operand" "w")
++ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
++ "TARGET_SIMD"
++{
++ enum machine_mode mode = <VSTRUCT:MODE>mode;
++ rtx mem = gen_rtx_MEM (mode, operands[0]);
++
++ emit_insn (gen_vec_store_lanes<VSTRUCT:mode><VQ:mode> (mem, operands[1]));
++ DONE;
++})
++
++;; Expander for builtins to insert vector registers into large
++;; opaque integer modes.
++
++;; Q-register list. We don't need a D-reg inserter as we zero
++;; extend them in arm_neon.h and insert the resulting Q-regs.
++
++(define_expand "aarch64_set_qreg<VSTRUCT:mode><VQ:mode>"
++ [(match_operand:VSTRUCT 0 "register_operand" "+w")
++ (match_operand:VSTRUCT 1 "register_operand" "0")
++ (match_operand:VQ 2 "register_operand" "w")
++ (match_operand:SI 3 "immediate_operand" "i")]
++ "TARGET_SIMD"
++{
++ int part = INTVAL (operands[3]);
++ int offset = part * 16;
++
++ emit_move_insn (operands[0], operands[1]);
++ emit_move_insn (gen_rtx_SUBREG (<VQ:MODE>mode, operands[0], offset),
++ operands[2]);
++ DONE;
++})
++
++;; Standard pattern name vec_init<mode>.
++
++(define_expand "vec_init<mode>"
++ [(match_operand:VALL 0 "register_operand" "")
++ (match_operand 1 "" "")]
++ "TARGET_SIMD"
++{
++ aarch64_expand_vector_init (operands[0], operands[1]);
++ DONE;
++})
++
++(define_insn "*aarch64_simd_ld1r<mode>"
++ [(set (match_operand:VALLDI 0 "register_operand" "=w")
++ (vec_duplicate:VALLDI
++ (match_operand:<VEL> 1 "aarch64_simd_struct_operand" "Utv")))]
++ "TARGET_SIMD"
++ "ld1r\\t{%0.<Vtype>}, %1"
++ [(set_attr "simd_type" "simd_load1r")
++ (set_attr "simd_mode" "<MODE>")])
+--- a/src/gcc/config/aarch64/aarch64-tune.md
++++ b/src/gcc/config/aarch64/aarch64-tune.md
+@@ -0,0 +1,5 @@
++;; -*- buffer-read-only: t -*-
++;; Generated automatically by gentune.sh from aarch64-cores.def
++(define_attr "tune"
++ "large,small"
++ (const (symbol_ref "((enum attr_tune) aarch64_tune)")))
--- a/src/gcc/config/aarch64/aarch64.c
+++ b/src/gcc/config/aarch64/aarch64.c
@@ -0,0 +1,7972 @@
@@ -9904,7 +15104,7 @@
+
+/* Implement REGNO_REG_CLASS. */
+
-+unsigned
++enum reg_class
+aarch64_regno_regclass (unsigned regno)
+{
+ if (GP_REGNUM_P (regno))
@@ -14202,258 +19402,6 @@
+struct gcc_target targetm = TARGET_INITIALIZER;
+
+#include "gt-aarch64.h"
---- a/src/gcc/config/aarch64/aarch64-cores.def
-+++ b/src/gcc/config/aarch64/aarch64-cores.def
-@@ -0,0 +1,38 @@
-+/* Copyright (C) 2011, 2012 Free Software Foundation, Inc.
-+ Contributed by ARM Ltd.
-+
-+ This file is part of GCC.
-+
-+ GCC is free software; you can redistribute it and/or modify it
-+ under the terms of the GNU General Public License as published by
-+ the Free Software Foundation; either version 3, or (at your option)
-+ any later version.
-+
-+ GCC is distributed in the hope that it will be useful, but
-+ WITHOUT ANY WARRANTY; without even the implied warranty of
-+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ General Public License for more details.
-+
-+ You should have received a copy of the GNU General Public License
-+ along with GCC; see the file COPYING3. If not see
-+ <http://www.gnu.org/licenses/>. */
-+
-+/* This is a list of cores that implement AArch64.
-+
-+ Before using #include to read this file, define a macro:
-+
-+ AARCH64_CORE(CORE_NAME, CORE_IDENT, ARCH, FLAGS, COSTS)
-+
-+ The CORE_NAME is the name of the core, represented as a string constant.
-+ The CORE_IDENT is the name of the core, represented as an identifier.
-+ ARCH is the architecture revision implemented by the chip.
-+ FLAGS are the bitwise-or of the traits that apply to that core.
-+ This need not include flags implied by the architecture.
-+ COSTS is the name of the rtx_costs routine to use. */
-+
-+/* V8 Architecture Processors.
-+ This list currently contains example CPUs that implement AArch64, and
-+ therefore serves as a template for adding more CPUs in the future. */
-+
-+AARCH64_CORE("example-1", large, 8, AARCH64_FL_FPSIMD, generic)
-+AARCH64_CORE("example-2", small, 8, AARCH64_FL_FPSIMD, generic)
---- a/src/gcc/config/aarch64/aarch64-elf.h
-+++ b/src/gcc/config/aarch64/aarch64-elf.h
-@@ -0,0 +1,132 @@
-+/* Machine description for AArch64 architecture.
-+ Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
-+ Contributed by ARM Ltd.
-+
-+ This file is part of GCC.
-+
-+ GCC is free software; you can redistribute it and/or modify it
-+ under the terms of the GNU General Public License as published by
-+ the Free Software Foundation; either version 3, or (at your option)
-+ any later version.
-+
-+ GCC is distributed in the hope that it will be useful, but
-+ WITHOUT ANY WARRANTY; without even the implied warranty of
-+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ General Public License for more details.
-+
-+ You should have received a copy of the GNU General Public License
-+ along with GCC; see the file COPYING3. If not see
-+ <http://www.gnu.org/licenses/>. */
-+
-+#ifndef GCC_AARCH64_ELF_H
-+#define GCC_AARCH64_ELF_H
-+
-+
-+#define ASM_OUTPUT_LABELREF(FILE, NAME) \
-+ aarch64_asm_output_labelref (FILE, NAME)
-+
-+#define ASM_OUTPUT_DEF(FILE, NAME1, NAME2) \
-+ do \
-+ { \
-+ assemble_name (FILE, NAME1); \
-+ fputs (" = ", FILE); \
-+ assemble_name (FILE, NAME2); \
-+ fputc ('\n', FILE); \
-+ } while (0)
-+
-+#define TEXT_SECTION_ASM_OP "\t.text"
-+#define DATA_SECTION_ASM_OP "\t.data"
-+#define BSS_SECTION_ASM_OP "\t.bss"
-+
-+#define CTORS_SECTION_ASM_OP "\t.section\t.init_array,\"aw\",%init_array"
-+#define DTORS_SECTION_ASM_OP "\t.section\t.fini_array,\"aw\",%fini_array"
-+
-+#undef INIT_SECTION_ASM_OP
-+#undef FINI_SECTION_ASM_OP
-+#define INIT_ARRAY_SECTION_ASM_OP CTORS_SECTION_ASM_OP
-+#define FINI_ARRAY_SECTION_ASM_OP DTORS_SECTION_ASM_OP
-+
-+/* Since we use .init_array/.fini_array we don't need the markers at
-+ the start and end of the ctors/dtors arrays. */
-+#define CTOR_LIST_BEGIN asm (CTORS_SECTION_ASM_OP)
-+#define CTOR_LIST_END /* empty */
-+#define DTOR_LIST_BEGIN asm (DTORS_SECTION_ASM_OP)
-+#define DTOR_LIST_END /* empty */
-+
-+#undef TARGET_ASM_CONSTRUCTOR
-+#define TARGET_ASM_CONSTRUCTOR aarch64_elf_asm_constructor
-+
-+#undef TARGET_ASM_DESTRUCTOR
-+#define TARGET_ASM_DESTRUCTOR aarch64_elf_asm_destructor
-+
-+#ifdef HAVE_GAS_MAX_SKIP_P2ALIGN
-+/* Support for -falign-* switches. Use .p2align to ensure that code
-+ sections are padded with NOP instructions, rather than zeros. */
-+#define ASM_OUTPUT_MAX_SKIP_ALIGN(FILE, LOG, MAX_SKIP) \
-+ do \
-+ { \
-+ if ((LOG) != 0) \
-+ { \
-+ if ((MAX_SKIP) == 0) \
-+ fprintf ((FILE), "\t.p2align %d\n", (int) (LOG)); \
-+ else \
-+ fprintf ((FILE), "\t.p2align %d,,%d\n", \
-+ (int) (LOG), (int) (MAX_SKIP)); \
-+ } \
-+ } while (0)
-+
-+#endif /* HAVE_GAS_MAX_SKIP_P2ALIGN */
-+
-+#define JUMP_TABLES_IN_TEXT_SECTION 0
-+
-+#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL) \
-+ do { \
-+ switch (GET_MODE (BODY)) \
-+ { \
-+ case QImode: \
-+ asm_fprintf (STREAM, "\t.byte\t(%LL%d - %LLrtx%d) / 4\n", \
-+ VALUE, REL); \
-+ break; \
-+ case HImode: \
-+ asm_fprintf (STREAM, "\t.2byte\t(%LL%d - %LLrtx%d) / 4\n", \
-+ VALUE, REL); \
-+ break; \
-+ case SImode: \
-+ case DImode: /* See comment in aarch64_output_casesi. */ \
-+ asm_fprintf (STREAM, "\t.word\t(%LL%d - %LLrtx%d) / 4\n", \
-+ VALUE, REL); \
-+ break; \
-+ default: \
-+ gcc_unreachable (); \
-+ } \
-+ } while (0)
-+
-+#define ASM_OUTPUT_ALIGN(STREAM, POWER) \
-+ fprintf(STREAM, "\t.align\t%d\n", (int)POWER)
-+
-+#define ASM_COMMENT_START "//"
-+
-+#define REGISTER_PREFIX ""
-+#define LOCAL_LABEL_PREFIX "."
-+#define USER_LABEL_PREFIX ""
-+
-+#define GLOBAL_ASM_OP "\t.global\t"
-+
-+#ifndef ASM_SPEC
-+#define ASM_SPEC "\
-+%{mbig-endian:-EB} \
-+%{mlittle-endian:-EL} \
-+%{mcpu=*:-mcpu=%*} \
-+%{march=*:-march=%*}"
-+#endif
-+
-+#undef TYPE_OPERAND_FMT
-+#define TYPE_OPERAND_FMT "%%%s"
-+
-+#undef TARGET_ASM_NAMED_SECTION
-+#define TARGET_ASM_NAMED_SECTION aarch64_elf_asm_named_section
-+
-+/* Stabs debug not required. */
-+#undef DBX_DEBUGGING_INFO
-+
-+#endif /* GCC_AARCH64_ELF_H */
---- a/src/gcc/config/aarch64/aarch64-elf-raw.h
-+++ b/src/gcc/config/aarch64/aarch64-elf-raw.h
-@@ -0,0 +1,32 @@
-+/* Machine description for AArch64 architecture.
-+ Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
-+ Contributed by ARM Ltd.
-+
-+ This file is part of GCC.
-+
-+ GCC is free software; you can redistribute it and/or modify it
-+ under the terms of the GNU General Public License as published by
-+ the Free Software Foundation; either version 3, or (at your option)
-+ any later version.
-+
-+ GCC is distributed in the hope that it will be useful, but
-+ WITHOUT ANY WARRANTY; without even the implied warranty of
-+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ General Public License for more details.
-+
-+ You should have received a copy of the GNU General Public License
-+ along with GCC; see the file COPYING3. If not see
-+ <http://www.gnu.org/licenses/>. */
-+
-+/* Support for bare-metal builds. */
-+#ifndef GCC_AARCH64_ELF_RAW_H
-+#define GCC_AARCH64_ELF_RAW_H
-+
-+#define STARTFILE_SPEC " crti%O%s crtbegin%O%s crt0%O%s"
-+#define ENDFILE_SPEC " crtend%O%s crtn%O%s"
-+
-+#ifndef LINK_SPEC
-+#define LINK_SPEC "%{mbig-endian:-EB} %{mlittle-endian:-EL} -X"
-+#endif
-+
-+#endif /* GCC_AARCH64_ELF_RAW_H */
---- a/src/gcc/config/aarch64/aarch64-generic.md
-+++ b/src/gcc/config/aarch64/aarch64-generic.md
-@@ -0,0 +1,38 @@
-+;; Machine description for AArch64 architecture.
-+;; Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
-+;; Contributed by ARM Ltd.
-+;;
-+;; This file is part of GCC.
-+;;
-+;; GCC is free software; you can redistribute it and/or modify it
-+;; under the terms of the GNU General Public License as published by
-+;; the Free Software Foundation; either version 3, or (at your option)
-+;; any later version.
-+;;
-+;; GCC is distributed in the hope that it will be useful, but
-+;; WITHOUT ANY WARRANTY; without even the implied warranty of
-+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+;; General Public License for more details.
-+;;
-+;; You should have received a copy of the GNU General Public License
-+;; along with GCC; see the file COPYING3. If not see
-+;; <http://www.gnu.org/licenses/>.
-+
-+;; Generic scheduler
-+
-+(define_automaton "aarch64")
-+
-+(define_cpu_unit "core" "aarch64")
-+
-+(define_attr "is_load" "yes,no"
-+ (if_then_else (eq_attr "v8type" "fpsimd_load,fpsimd_load2,load1,load2")
-+ (const_string "yes")
-+ (const_string "no")))
-+
-+(define_insn_reservation "load" 2
-+ (eq_attr "is_load" "yes")
-+ "core")
-+
-+(define_insn_reservation "nonload" 1
-+ (eq_attr "is_load" "no")
-+ "core")
--- a/src/gcc/config/aarch64/aarch64.h
+++ b/src/gcc/config/aarch64/aarch64.h
@@ -0,0 +1,823 @@
@@ -15280,53 +20228,6 @@
+ || (MODE) == V4SFmode || (MODE) == V2DImode || mode == V2DFmode)
+
+#endif /* GCC_AARCH64_H */
---- a/src/gcc/config/aarch64/aarch64-linux.h
-+++ b/src/gcc/config/aarch64/aarch64-linux.h
-@@ -0,0 +1,44 @@
-+/* Machine description for AArch64 architecture.
-+ Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
-+ Contributed by ARM Ltd.
-+
-+ This file is part of GCC.
-+
-+ GCC is free software; you can redistribute it and/or modify it
-+ under the terms of the GNU General Public License as published by
-+ the Free Software Foundation; either version 3, or (at your option)
-+ any later version.
-+
-+ GCC is distributed in the hope that it will be useful, but
-+ WITHOUT ANY WARRANTY; without even the implied warranty of
-+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ General Public License for more details.
-+
-+ You should have received a copy of the GNU General Public License
-+ along with GCC; see the file COPYING3. If not see
-+ <http://www.gnu.org/licenses/>. */
-+
-+#ifndef GCC_AARCH64_LINUX_H
-+#define GCC_AARCH64_LINUX_H
-+
-+#define GLIBC_DYNAMIC_LINKER "/lib/ld-linux-aarch64.so.1"
-+
-+#define LINUX_TARGET_LINK_SPEC "%{h*} \
-+ %{static:-Bstatic} \
-+ %{shared:-shared} \
-+ %{symbolic:-Bsymbolic} \
-+ %{rdynamic:-export-dynamic} \
-+ -dynamic-linker " GNU_USER_DYNAMIC_LINKER " \
-+ -X \
-+ %{mbig-endian:-EB} %{mlittle-endian:-EL}"
-+
-+#define LINK_SPEC LINUX_TARGET_LINK_SPEC
-+
-+#define TARGET_OS_CPP_BUILTINS() \
-+ do \
-+ { \
-+ GNU_USER_TARGET_OS_CPP_BUILTINS(); \
-+ } \
-+ while (0)
-+
-+#endif /* GCC_AARCH64_LINUX_H */
--- a/src/gcc/config/aarch64/aarch64.md
+++ b/src/gcc/config/aarch64/aarch64.md
@@ -0,0 +1,3216 @@
@@ -18546,63 +23447,6 @@
+
+;; Atomic Operations
+(include "atomics.md")
---- a/src/gcc/config/aarch64/aarch64-modes.def
-+++ b/src/gcc/config/aarch64/aarch64-modes.def
-@@ -0,0 +1,54 @@
-+/* Machine description for AArch64 architecture.
-+ Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
-+ Contributed by ARM Ltd.
-+
-+ This file is part of GCC.
-+
-+ GCC is free software; you can redistribute it and/or modify it
-+ under the terms of the GNU General Public License as published by
-+ the Free Software Foundation; either version 3, or (at your option)
-+ any later version.
-+
-+ GCC is distributed in the hope that it will be useful, but
-+ WITHOUT ANY WARRANTY; without even the implied warranty of
-+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ General Public License for more details.
-+
-+ You should have received a copy of the GNU General Public License
-+ along with GCC; see the file COPYING3. If not see
-+ <http://www.gnu.org/licenses/>. */
-+
-+CC_MODE (CCFP);
-+CC_MODE (CCFPE);
-+CC_MODE (CC_SWP);
-+CC_MODE (CC_ZESWP); /* zero-extend LHS (but swap to make it RHS). */
-+CC_MODE (CC_SESWP); /* sign-extend LHS (but swap to make it RHS). */
-+CC_MODE (CC_NZ); /* Only N and Z bits of condition flags are valid. */
-+
-+/* Vector modes. */
-+VECTOR_MODES (INT, 8); /* V8QI V4HI V2SI. */
-+VECTOR_MODES (INT, 16); /* V16QI V8HI V4SI V2DI. */
-+VECTOR_MODES (FLOAT, 8); /* V2SF. */
-+VECTOR_MODES (FLOAT, 16); /* V4SF V2DF. */
-+
-+/* Oct Int: 256-bit integer mode needed for 32-byte vector arguments. */
-+INT_MODE (OI, 32);
-+
-+/* Opaque integer modes for 3, 6 or 8 Neon double registers (2 is
-+ TImode). */
-+INT_MODE (EI, 24);
-+INT_MODE (CI, 48);
-+INT_MODE (XI, 64);
-+
-+/* Vector modes for register lists. */
-+VECTOR_MODES (INT, 32); /* V32QI V16HI V8SI V4DI. */
-+VECTOR_MODES (FLOAT, 32); /* V8SF V4DF. */
-+
-+VECTOR_MODES (INT, 48); /* V32QI V16HI V8SI V4DI. */
-+VECTOR_MODES (FLOAT, 48); /* V8SF V4DF. */
-+
-+VECTOR_MODES (INT, 64); /* V32QI V16HI V8SI V4DI. */
-+VECTOR_MODES (FLOAT, 64); /* V8SF V4DF. */
-+
-+/* Quad float: 128-bit floating mode for long doubles. */
-+FLOAT_MODE (TF, 16, ieee_quad_format);
--- a/src/gcc/config/aarch64/aarch64.opt
+++ b/src/gcc/config/aarch64/aarch64.opt
@@ -0,0 +1,100 @@
@@ -18706,4358 +23550,6 @@
+mtune=
+Target RejectNegative Joined Var(aarch64_tune_string)
+-mtune=CPU Optimize for CPU
---- a/src/gcc/config/aarch64/aarch64-option-extensions.def
-+++ b/src/gcc/config/aarch64/aarch64-option-extensions.def
-@@ -0,0 +1,37 @@
-+/* Copyright (C) 2012 Free Software Foundation, Inc.
-+ Contributed by ARM Ltd.
-+
-+ This file is part of GCC.
-+
-+ GCC is free software; you can redistribute it and/or modify it
-+ under the terms of the GNU General Public License as published by
-+ the Free Software Foundation; either version 3, or (at your option)
-+ any later version.
-+
-+ GCC is distributed in the hope that it will be useful, but
-+ WITHOUT ANY WARRANTY; without even the implied warranty of
-+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ General Public License for more details.
-+
-+ You should have received a copy of the GNU General Public License
-+ along with GCC; see the file COPYING3. If not see
-+ <http://www.gnu.org/licenses/>. */
-+
-+/* This is a list of ISA extentsions in AArch64.
-+
-+ Before using #include to read this file, define a macro:
-+
-+ AARCH64_OPT_EXTENSION(EXT_NAME, FLAGS_ON, FLAGS_OFF)
-+
-+ EXT_NAME is the name of the extension, represented as a string constant.
-+ FLAGS_ON are the bitwise-or of the features that the extension adds.
-+ FLAGS_OFF are the bitwise-or of the features that the extension removes. */
-+
-+/* V8 Architecture Extensions.
-+ This list currently contains example extensions for CPUs that implement
-+ AArch64, and therefore serves as a template for adding more CPUs in the
-+ future. */
-+
-+AARCH64_OPT_EXTENSION("fp", AARCH64_FL_FP, AARCH64_FL_FPSIMD | AARCH64_FL_CRYPTO)
-+AARCH64_OPT_EXTENSION("simd", AARCH64_FL_FPSIMD, AARCH64_FL_SIMD | AARCH64_FL_CRYPTO)
-+AARCH64_OPT_EXTENSION("crypto", AARCH64_FL_CRYPTO | AARCH64_FL_FPSIMD, AARCH64_FL_CRYPTO)
---- a/src/gcc/config/aarch64/aarch64-opts.h
-+++ b/src/gcc/config/aarch64/aarch64-opts.h
-@@ -0,0 +1,64 @@
-+/* Copyright (C) 2011, 2012 Free Software Foundation, Inc.
-+ Contributed by ARM Ltd.
-+
-+ This file is part of GCC.
-+
-+ GCC is free software; you can redistribute it and/or modify it
-+ under the terms of the GNU General Public License as published
-+ by the Free Software Foundation; either version 3, or (at your
-+ option) any later version.
-+
-+ GCC is distributed in the hope that it will be useful, but WITHOUT
-+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
-+ License for more details.
-+
-+ You should have received a copy of the GNU General Public License
-+ along with GCC; see the file COPYING3. If not see
-+ <http://www.gnu.org/licenses/>. */
-+
-+/* Definitions for option handling for AArch64. */
-+
-+#ifndef GCC_AARCH64_OPTS_H
-+#define GCC_AARCH64_OPTS_H
-+
-+/* The various cores that implement AArch64. */
-+enum aarch64_processor
-+{
-+#define AARCH64_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
-+ IDENT,
-+#include "aarch64-cores.def"
-+#undef AARCH64_CORE
-+ /* Used to indicate that no processor has been specified. */
-+ generic,
-+ /* Used to mark the end of the processor table. */
-+ aarch64_none
-+};
-+
-+/* TLS types. */
-+enum aarch64_tls_type {
-+ TLS_TRADITIONAL,
-+ TLS_DESCRIPTORS
-+};
-+
-+/* The code model defines the address generation strategy.
-+ Most have a PIC and non-PIC variant. */
-+enum aarch64_code_model {
-+ /* Static code and data fit within a 1MB region.
-+ Not fully implemented, mostly treated as SMALL. */
-+ AARCH64_CMODEL_TINY,
-+ /* Static code, data and GOT/PLT fit within a 1MB region.
-+ Not fully implemented, mostly treated as SMALL_PIC. */
-+ AARCH64_CMODEL_TINY_PIC,
-+ /* Static code and data fit within a 4GB region.
-+ The default non-PIC code model. */
-+ AARCH64_CMODEL_SMALL,
-+ /* Static code, data and GOT/PLT fit within a 4GB region.
-+ The default PIC code model. */
-+ AARCH64_CMODEL_SMALL_PIC,
-+ /* No assumptions about addresses of code and data.
-+ The PIC variant is not yet implemented. */
-+ AARCH64_CMODEL_LARGE
-+};
-+
-+#endif
---- a/src/gcc/config/aarch64/aarch64-protos.h
-+++ b/src/gcc/config/aarch64/aarch64-protos.h
-@@ -0,0 +1,254 @@
-+/* Machine description for AArch64 architecture.
-+ Copyright (C) 2009, 2010, 2011, 2012, 2013 Free Software Foundation, Inc.
-+ Contributed by ARM Ltd.
-+
-+ This file is part of GCC.
-+
-+ GCC is free software; you can redistribute it and/or modify it
-+ under the terms of the GNU General Public License as published by
-+ the Free Software Foundation; either version 3, or (at your option)
-+ any later version.
-+
-+ GCC is distributed in the hope that it will be useful, but
-+ WITHOUT ANY WARRANTY; without even the implied warranty of
-+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ General Public License for more details.
-+
-+ You should have received a copy of the GNU General Public License
-+ along with GCC; see the file COPYING3. If not see
-+ <http://www.gnu.org/licenses/>. */
-+
-+
-+#ifndef GCC_AARCH64_PROTOS_H
-+#define GCC_AARCH64_PROTOS_H
-+
-+/*
-+ SYMBOL_CONTEXT_ADR
-+ The symbol is used in a load-address operation.
-+ SYMBOL_CONTEXT_MEM
-+ The symbol is used as the address in a MEM.
-+ */
-+enum aarch64_symbol_context
-+{
-+ SYMBOL_CONTEXT_MEM,
-+ SYMBOL_CONTEXT_ADR
-+};
-+
-+/* SYMBOL_SMALL_ABSOLUTE: Generate symbol accesses through
-+ high and lo relocs that calculate the base address using a PC
-+ relative reloc.
-+ So to get the address of foo, we generate
-+ adrp x0, foo
-+ add x0, x0, :lo12:foo
-+
-+ To load or store something to foo, we could use the corresponding
-+ load store variants that generate an
-+ ldr x0, [x0,:lo12:foo]
-+ or
-+ str x1, [x0, :lo12:foo]
-+
-+ This corresponds to the small code model of the compiler.
-+
-+ SYMBOL_SMALL_GOT: Similar to the one above but this
-+ gives us the GOT entry of the symbol being referred to :
-+ Thus calculating the GOT entry for foo is done using the
-+ following sequence of instructions. The ADRP instruction
-+ gets us to the page containing the GOT entry of the symbol
-+ and the got_lo12 gets us the actual offset in it.
-+
-+ adrp x0, :got:foo
-+ ldr x0, [x0, :gotoff_lo12:foo]
-+
-+ This corresponds to the small PIC model of the compiler.
-+
-+ SYMBOL_SMALL_TLSGD
-+ SYMBOL_SMALL_TLSDESC
-+ SYMBOL_SMALL_GOTTPREL
-+ SYMBOL_SMALL_TPREL
-+ Each of of these represents a thread-local symbol, and corresponds to the
-+ thread local storage relocation operator for the symbol being referred to.
-+
-+ SYMBOL_FORCE_TO_MEM : Global variables are addressed using
-+ constant pool. All variable addresses are spilled into constant
-+ pools. The constant pools themselves are addressed using PC
-+ relative accesses. This only works for the large code model.
-+ */
-+enum aarch64_symbol_type
-+{
-+ SYMBOL_SMALL_ABSOLUTE,
-+ SYMBOL_SMALL_GOT,
-+ SYMBOL_SMALL_TLSGD,
-+ SYMBOL_SMALL_TLSDESC,
-+ SYMBOL_SMALL_GOTTPREL,
-+ SYMBOL_SMALL_TPREL,
-+ SYMBOL_FORCE_TO_MEM
-+};
-+
-+/* A set of tuning parameters contains references to size and time
-+ cost models and vectors for address cost calculations, register
-+ move costs and memory move costs. */
-+
-+/* Extra costs for specific insns. Only records the cost above a
-+ single insn. */
-+
-+struct cpu_rtx_cost_table
-+{
-+ const int memory_load;
-+ const int memory_store;
-+ const int register_shift;
-+ const int int_divide;
-+ const int float_divide;
-+ const int double_divide;
-+ const int int_multiply;
-+ const int int_multiply_extend;
-+ const int int_multiply_add;
-+ const int int_multiply_extend_add;
-+ const int float_multiply;
-+ const int double_multiply;
-+};
-+
-+/* Additional cost for addresses. */
-+struct cpu_addrcost_table
-+{
-+ const int pre_modify;
-+ const int post_modify;
-+ const int register_offset;
-+ const int register_extend;
-+ const int imm_offset;
-+};
-+
-+/* Additional costs for register copies. Cost is for one register. */
-+struct cpu_regmove_cost
-+{
-+ const int GP2GP;
-+ const int GP2FP;
-+ const int FP2GP;
-+ const int FP2FP;
-+};
-+
-+struct tune_params
-+{
-+ const struct cpu_rtx_cost_table *const insn_extra_cost;
-+ const struct cpu_addrcost_table *const addr_cost;
-+ const struct cpu_regmove_cost *const regmove_cost;
-+ const int memmov_cost;
-+};
-+
-+HOST_WIDE_INT aarch64_initial_elimination_offset (unsigned, unsigned);
-+bool aarch64_bitmask_imm (HOST_WIDE_INT val, enum machine_mode);
-+bool aarch64_constant_address_p (rtx);
-+bool aarch64_float_const_zero_rtx_p (rtx);
-+bool aarch64_function_arg_regno_p (unsigned);
-+bool aarch64_gen_movmemqi (rtx *);
-+bool aarch64_is_extend_from_extract (enum machine_mode, rtx, rtx);
-+bool aarch64_is_long_call_p (rtx);
-+bool aarch64_label_mentioned_p (rtx);
-+bool aarch64_legitimate_pic_operand_p (rtx);
-+bool aarch64_move_imm (HOST_WIDE_INT, enum machine_mode);
-+bool aarch64_pad_arg_upward (enum machine_mode, const_tree);
-+bool aarch64_pad_reg_upward (enum machine_mode, const_tree, bool);
-+bool aarch64_regno_ok_for_base_p (int, bool);
-+bool aarch64_regno_ok_for_index_p (int, bool);
-+bool aarch64_simd_imm_scalar_p (rtx x, enum machine_mode mode);
-+bool aarch64_simd_imm_zero_p (rtx, enum machine_mode);
-+bool aarch64_simd_shift_imm_p (rtx, enum machine_mode, bool);
-+bool aarch64_symbolic_address_p (rtx);
-+bool aarch64_symbolic_constant_p (rtx, enum aarch64_symbol_context,
-+ enum aarch64_symbol_type *);
-+bool aarch64_uimm12_shift (HOST_WIDE_INT);
-+const char *aarch64_output_casesi (rtx *);
-+enum aarch64_symbol_type aarch64_classify_symbol (rtx,
-+ enum aarch64_symbol_context);
-+enum aarch64_symbol_type aarch64_classify_tls_symbol (rtx);
-+int aarch64_asm_preferred_eh_data_format (int, int);
-+int aarch64_hard_regno_mode_ok (unsigned, enum machine_mode);
-+int aarch64_hard_regno_nregs (unsigned, enum machine_mode);
-+int aarch64_simd_attr_length_move (rtx);
-+int aarch64_simd_immediate_valid_for_move (rtx, enum machine_mode, rtx *,
-+ int *, unsigned char *, int *,
-+ int *);
-+int aarch64_uxt_size (int, HOST_WIDE_INT);
-+rtx aarch64_final_eh_return_addr (void);
-+rtx aarch64_legitimize_reload_address (rtx *, enum machine_mode, int, int, int);
-+const char *aarch64_output_move_struct (rtx *operands);
-+rtx aarch64_return_addr (int, rtx);
-+rtx aarch64_simd_gen_const_vector_dup (enum machine_mode, int);
-+bool aarch64_simd_mem_operand_p (rtx);
-+rtx aarch64_simd_vect_par_cnst_half (enum machine_mode, bool);
-+rtx aarch64_tls_get_addr (void);
-+unsigned aarch64_dbx_register_number (unsigned);
-+unsigned aarch64_regno_regclass (unsigned);
-+unsigned aarch64_trampoline_size (void);
-+void aarch64_asm_output_labelref (FILE *, const char *);
-+void aarch64_elf_asm_named_section (const char *, unsigned, tree);
-+void aarch64_expand_epilogue (bool);
-+void aarch64_expand_mov_immediate (rtx, rtx);
-+void aarch64_expand_prologue (void);
-+void aarch64_expand_vector_init (rtx, rtx);
-+void aarch64_function_profiler (FILE *, int);
-+void aarch64_init_cumulative_args (CUMULATIVE_ARGS *, const_tree, rtx,
-+ const_tree, unsigned);
-+void aarch64_init_expanders (void);
-+void aarch64_print_operand (FILE *, rtx, char);
-+void aarch64_print_operand_address (FILE *, rtx);
-+
-+/* Initialize builtins for SIMD intrinsics. */
-+void init_aarch64_simd_builtins (void);
-+
-+void aarch64_simd_const_bounds (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
-+void aarch64_simd_disambiguate_copy (rtx *, rtx *, rtx *, unsigned int);
-+
-+/* Emit code to place a AdvSIMD pair result in memory locations (with equal
-+ registers). */
-+void aarch64_simd_emit_pair_result_insn (enum machine_mode,
-+ rtx (*intfn) (rtx, rtx, rtx), rtx,
-+ rtx);
-+
-+/* Expand builtins for SIMD intrinsics. */
-+rtx aarch64_simd_expand_builtin (int, tree, rtx);
-+
-+void aarch64_simd_lane_bounds (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
-+
-+/* Emit code for reinterprets. */
-+void aarch64_simd_reinterpret (rtx, rtx);
-+
-+void aarch64_split_128bit_move (rtx, rtx);
-+
-+bool aarch64_split_128bit_move_p (rtx, rtx);
-+
-+/* Check for a legitimate floating point constant for FMOV. */
-+bool aarch64_float_const_representable_p (rtx);
-+
-+#if defined (RTX_CODE)
-+
-+bool aarch64_legitimate_address_p (enum machine_mode, rtx, RTX_CODE, bool);
-+enum machine_mode aarch64_select_cc_mode (RTX_CODE, rtx, rtx);
-+rtx aarch64_gen_compare_reg (RTX_CODE, rtx, rtx);
-+
-+void aarch64_expand_compare_and_swap (rtx op[]);
-+void aarch64_split_compare_and_swap (rtx op[]);
-+void aarch64_split_atomic_op (enum rtx_code, rtx, rtx, rtx, rtx, rtx, rtx);
-+
-+#endif /* RTX_CODE */
-+
-+rtx aarch64_load_tp (rtx target);
-+void aarch64_init_builtins (void);
-+rtx aarch64_expand_builtin (tree exp,
-+ rtx target,
-+ rtx subtarget ATTRIBUTE_UNUSED,
-+ enum machine_mode mode ATTRIBUTE_UNUSED,
-+ int ignore ATTRIBUTE_UNUSED);
-+tree aarch64_builtin_decl (unsigned, bool ATTRIBUTE_UNUSED);
-+
-+tree
-+aarch64_builtin_vectorized_function (tree fndecl,
-+ tree type_out,
-+ tree type_in);
-+
-+extern void aarch64_split_combinev16qi (rtx operands[3]);
-+extern void aarch64_expand_vec_perm (rtx target, rtx op0, rtx op1, rtx sel);
-+extern bool
-+aarch64_expand_vec_perm_const (rtx target, rtx op0, rtx op1, rtx sel);
-+
-+char* aarch64_output_simd_mov_immediate (rtx *, enum machine_mode, unsigned);
-+#endif /* GCC_AARCH64_PROTOS_H */
---- a/src/gcc/config/aarch64/aarch64-simd-builtins.def
-+++ b/src/gcc/config/aarch64/aarch64-simd-builtins.def
-@@ -0,0 +1,258 @@
-+/* Machine description for AArch64 architecture.
-+ Copyright (C) 2012-2013 Free Software Foundation, Inc.
-+ Contributed by ARM Ltd.
-+
-+ This file is part of GCC.
-+
-+ GCC is free software; you can redistribute it and/or modify it
-+ under the terms of the GNU General Public License as published by
-+ the Free Software Foundation; either version 3, or (at your option)
-+ any later version.
-+
-+ GCC is distributed in the hope that it will be useful, but
-+ WITHOUT ANY WARRANTY; without even the implied warranty of
-+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ General Public License for more details.
-+
-+ You should have received a copy of the GNU General Public License
-+ along with GCC; see the file COPYING3. If not see
-+ <http://www.gnu.org/licenses/>. */
-+
-+/* In the list below, the BUILTIN_<ITERATOR> macros should
-+ correspond to the iterator used to construct the instruction's
-+ patterns in aarch64-simd.md. A helpful idiom to follow when
-+ adding new builtins is to add a line for each pattern in the md
-+ file. Thus, ADDP, which has one pattern defined for the VD_BHSI
-+ iterator, and one for DImode, has two entries below. */
-+
-+ BUILTIN_VD_RE (CREATE, create)
-+ BUILTIN_VQ_S (GETLANE, get_lane_signed)
-+ BUILTIN_VDQ (GETLANE, get_lane_unsigned)
-+ BUILTIN_VDQF (GETLANE, get_lane)
-+ VAR1 (GETLANE, get_lane, di)
-+ BUILTIN_VDC (COMBINE, combine)
-+ BUILTIN_VB (BINOP, pmul)
-+ BUILTIN_VDQF (UNOP, sqrt)
-+ BUILTIN_VD_BHSI (BINOP, addp)
-+ VAR1 (UNOP, addp, di)
-+
-+ BUILTIN_VD_RE (REINTERP, reinterpretdi)
-+ BUILTIN_VDC (REINTERP, reinterpretv8qi)
-+ BUILTIN_VDC (REINTERP, reinterpretv4hi)
-+ BUILTIN_VDC (REINTERP, reinterpretv2si)
-+ BUILTIN_VDC (REINTERP, reinterpretv2sf)
-+ BUILTIN_VQ (REINTERP, reinterpretv16qi)
-+ BUILTIN_VQ (REINTERP, reinterpretv8hi)
-+ BUILTIN_VQ (REINTERP, reinterpretv4si)
-+ BUILTIN_VQ (REINTERP, reinterpretv4sf)
-+ BUILTIN_VQ (REINTERP, reinterpretv2di)
-+ BUILTIN_VQ (REINTERP, reinterpretv2df)
-+
-+ BUILTIN_VDQ_I (BINOP, dup_lane)
-+ BUILTIN_SDQ_I (BINOP, dup_lane)
-+ /* Implemented by aarch64_<sur>q<r>shl<mode>. */
-+ BUILTIN_VSDQ_I (BINOP, sqshl)
-+ BUILTIN_VSDQ_I (BINOP, uqshl)
-+ BUILTIN_VSDQ_I (BINOP, sqrshl)
-+ BUILTIN_VSDQ_I (BINOP, uqrshl)
-+ /* Implemented by aarch64_<su_optab><optab><mode>. */
-+ BUILTIN_VSDQ_I (BINOP, sqadd)
-+ BUILTIN_VSDQ_I (BINOP, uqadd)
-+ BUILTIN_VSDQ_I (BINOP, sqsub)
-+ BUILTIN_VSDQ_I (BINOP, uqsub)
-+ /* Implemented by aarch64_<sur>qadd<mode>. */
-+ BUILTIN_VSDQ_I (BINOP, suqadd)
-+ BUILTIN_VSDQ_I (BINOP, usqadd)
-+
-+ /* Implemented by aarch64_get_dreg<VSTRUCT:mode><VDC:mode>. */
-+ BUILTIN_VDC (GETLANE, get_dregoi)
-+ BUILTIN_VDC (GETLANE, get_dregci)
-+ BUILTIN_VDC (GETLANE, get_dregxi)
-+ /* Implemented by aarch64_get_qreg<VSTRUCT:mode><VQ:mode>. */
-+ BUILTIN_VQ (GETLANE, get_qregoi)
-+ BUILTIN_VQ (GETLANE, get_qregci)
-+ BUILTIN_VQ (GETLANE, get_qregxi)
-+ /* Implemented by aarch64_set_qreg<VSTRUCT:mode><VQ:mode>. */
-+ BUILTIN_VQ (SETLANE, set_qregoi)
-+ BUILTIN_VQ (SETLANE, set_qregci)
-+ BUILTIN_VQ (SETLANE, set_qregxi)
-+ /* Implemented by aarch64_ld<VSTRUCT:nregs><VDC:mode>. */
-+ BUILTIN_VDC (LOADSTRUCT, ld2)
-+ BUILTIN_VDC (LOADSTRUCT, ld3)
-+ BUILTIN_VDC (LOADSTRUCT, ld4)
-+ /* Implemented by aarch64_ld<VSTRUCT:nregs><VQ:mode>. */
-+ BUILTIN_VQ (LOADSTRUCT, ld2)
-+ BUILTIN_VQ (LOADSTRUCT, ld3)
-+ BUILTIN_VQ (LOADSTRUCT, ld4)
-+ /* Implemented by aarch64_st<VSTRUCT:nregs><VDC:mode>. */
-+ BUILTIN_VDC (STORESTRUCT, st2)
-+ BUILTIN_VDC (STORESTRUCT, st3)
-+ BUILTIN_VDC (STORESTRUCT, st4)
-+ /* Implemented by aarch64_st<VSTRUCT:nregs><VQ:mode>. */
-+ BUILTIN_VQ (STORESTRUCT, st2)
-+ BUILTIN_VQ (STORESTRUCT, st3)
-+ BUILTIN_VQ (STORESTRUCT, st4)
-+
-+ BUILTIN_VQW (BINOP, saddl2)
-+ BUILTIN_VQW (BINOP, uaddl2)
-+ BUILTIN_VQW (BINOP, ssubl2)
-+ BUILTIN_VQW (BINOP, usubl2)
-+ BUILTIN_VQW (BINOP, saddw2)
-+ BUILTIN_VQW (BINOP, uaddw2)
-+ BUILTIN_VQW (BINOP, ssubw2)
-+ BUILTIN_VQW (BINOP, usubw2)
-+ /* Implemented by aarch64_<ANY_EXTEND:su><ADDSUB:optab>l<mode>. */
-+ BUILTIN_VDW (BINOP, saddl)
-+ BUILTIN_VDW (BINOP, uaddl)
-+ BUILTIN_VDW (BINOP, ssubl)
-+ BUILTIN_VDW (BINOP, usubl)
-+ /* Implemented by aarch64_<ANY_EXTEND:su><ADDSUB:optab>w<mode>. */
-+ BUILTIN_VDW (BINOP, saddw)
-+ BUILTIN_VDW (BINOP, uaddw)
-+ BUILTIN_VDW (BINOP, ssubw)
-+ BUILTIN_VDW (BINOP, usubw)
-+ /* Implemented by aarch64_<sur>h<addsub><mode>. */
-+ BUILTIN_VQ_S (BINOP, shadd)
-+ BUILTIN_VQ_S (BINOP, uhadd)
-+ BUILTIN_VQ_S (BINOP, srhadd)
-+ BUILTIN_VQ_S (BINOP, urhadd)
-+ /* Implemented by aarch64_<sur><addsub>hn<mode>. */
-+ BUILTIN_VQN (BINOP, addhn)
-+ BUILTIN_VQN (BINOP, raddhn)
-+ /* Implemented by aarch64_<sur><addsub>hn2<mode>. */
-+ BUILTIN_VQN (TERNOP, addhn2)
-+ BUILTIN_VQN (TERNOP, raddhn2)
-+
-+ BUILTIN_VSQN_HSDI (UNOP, sqmovun)
-+ /* Implemented by aarch64_<sur>qmovn<mode>. */
-+ BUILTIN_VSQN_HSDI (UNOP, sqmovn)
-+ BUILTIN_VSQN_HSDI (UNOP, uqmovn)
-+ /* Implemented by aarch64_s<optab><mode>. */
-+ BUILTIN_VSDQ_I_BHSI (UNOP, sqabs)
-+ BUILTIN_VSDQ_I_BHSI (UNOP, sqneg)
-+
-+ BUILTIN_VSD_HSI (QUADOP, sqdmlal_lane)
-+ BUILTIN_VSD_HSI (QUADOP, sqdmlsl_lane)
-+ BUILTIN_VSD_HSI (QUADOP, sqdmlal_laneq)
-+ BUILTIN_VSD_HSI (QUADOP, sqdmlsl_laneq)
-+ BUILTIN_VQ_HSI (TERNOP, sqdmlal2)
-+ BUILTIN_VQ_HSI (TERNOP, sqdmlsl2)
-+ BUILTIN_VQ_HSI (QUADOP, sqdmlal2_lane)
-+ BUILTIN_VQ_HSI (QUADOP, sqdmlsl2_lane)
-+ BUILTIN_VQ_HSI (QUADOP, sqdmlal2_laneq)
-+ BUILTIN_VQ_HSI (QUADOP, sqdmlsl2_laneq)
-+ BUILTIN_VQ_HSI (TERNOP, sqdmlal2_n)
-+ BUILTIN_VQ_HSI (TERNOP, sqdmlsl2_n)
-+ /* Implemented by aarch64_sqdml<SBINQOPS:as>l<mode>. */
-+ BUILTIN_VSD_HSI (TERNOP, sqdmlal)
-+ BUILTIN_VSD_HSI (TERNOP, sqdmlsl)
-+ /* Implemented by aarch64_sqdml<SBINQOPS:as>l_n<mode>. */
-+ BUILTIN_VD_HSI (TERNOP, sqdmlal_n)
-+ BUILTIN_VD_HSI (TERNOP, sqdmlsl_n)
-+
-+ BUILTIN_VSD_HSI (BINOP, sqdmull)
-+ BUILTIN_VSD_HSI (TERNOP, sqdmull_lane)
-+ BUILTIN_VD_HSI (TERNOP, sqdmull_laneq)
-+ BUILTIN_VD_HSI (BINOP, sqdmull_n)
-+ BUILTIN_VQ_HSI (BINOP, sqdmull2)
-+ BUILTIN_VQ_HSI (TERNOP, sqdmull2_lane)
-+ BUILTIN_VQ_HSI (TERNOP, sqdmull2_laneq)
-+ BUILTIN_VQ_HSI (BINOP, sqdmull2_n)
-+ /* Implemented by aarch64_sq<r>dmulh<mode>. */
-+ BUILTIN_VSDQ_HSI (BINOP, sqdmulh)
-+ BUILTIN_VSDQ_HSI (BINOP, sqrdmulh)
-+ /* Implemented by aarch64_sq<r>dmulh_lane<q><mode>. */
-+ BUILTIN_VDQHS (TERNOP, sqdmulh_lane)
-+ BUILTIN_VDQHS (TERNOP, sqdmulh_laneq)
-+ BUILTIN_VDQHS (TERNOP, sqrdmulh_lane)
-+ BUILTIN_VDQHS (TERNOP, sqrdmulh_laneq)
-+ BUILTIN_SD_HSI (TERNOP, sqdmulh_lane)
-+ BUILTIN_SD_HSI (TERNOP, sqrdmulh_lane)
-+
-+ BUILTIN_VSDQ_I_DI (BINOP, sshl_n)
-+ BUILTIN_VSDQ_I_DI (BINOP, ushl_n)
-+ /* Implemented by aarch64_<sur>shl<mode>. */
-+ BUILTIN_VSDQ_I_DI (BINOP, sshl)
-+ BUILTIN_VSDQ_I_DI (BINOP, ushl)
-+ BUILTIN_VSDQ_I_DI (BINOP, srshl)
-+ BUILTIN_VSDQ_I_DI (BINOP, urshl)
-+
-+ BUILTIN_VSDQ_I_DI (SHIFTIMM, sshr_n)
-+ BUILTIN_VSDQ_I_DI (SHIFTIMM, ushr_n)
-+ /* Implemented by aarch64_<sur>shr_n<mode>. */
-+ BUILTIN_VSDQ_I_DI (SHIFTIMM, srshr_n)
-+ BUILTIN_VSDQ_I_DI (SHIFTIMM, urshr_n)
-+ /* Implemented by aarch64_<sur>sra_n<mode>. */
-+ BUILTIN_VSDQ_I_DI (SHIFTACC, ssra_n)
-+ BUILTIN_VSDQ_I_DI (SHIFTACC, usra_n)
-+ BUILTIN_VSDQ_I_DI (SHIFTACC, srsra_n)
-+ BUILTIN_VSDQ_I_DI (SHIFTACC, ursra_n)
-+ /* Implemented by aarch64_<sur>shll_n<mode>. */
-+ BUILTIN_VDW (SHIFTIMM, sshll_n)
-+ BUILTIN_VDW (SHIFTIMM, ushll_n)
-+ /* Implemented by aarch64_<sur>shll2_n<mode>. */
-+ BUILTIN_VQW (SHIFTIMM, sshll2_n)
-+ BUILTIN_VQW (SHIFTIMM, ushll2_n)
-+ /* Implemented by aarch64_<sur>q<r>shr<u>n_n<mode>. */
-+ BUILTIN_VSQN_HSDI (SHIFTIMM, sqshrun_n)
-+ BUILTIN_VSQN_HSDI (SHIFTIMM, sqrshrun_n)
-+ BUILTIN_VSQN_HSDI (SHIFTIMM, sqshrn_n)
-+ BUILTIN_VSQN_HSDI (SHIFTIMM, uqshrn_n)
-+ BUILTIN_VSQN_HSDI (SHIFTIMM, sqrshrn_n)
-+ BUILTIN_VSQN_HSDI (SHIFTIMM, uqrshrn_n)
-+ /* Implemented by aarch64_<sur>s<lr>i_n<mode>. */
-+ BUILTIN_VSDQ_I_DI (SHIFTINSERT, ssri_n)
-+ BUILTIN_VSDQ_I_DI (SHIFTINSERT, usri_n)
-+ BUILTIN_VSDQ_I_DI (SHIFTINSERT, ssli_n)
-+ BUILTIN_VSDQ_I_DI (SHIFTINSERT, usli_n)
-+ /* Implemented by aarch64_<sur>qshl<u>_n<mode>. */
-+ BUILTIN_VSDQ_I (SHIFTIMM, sqshlu_n)
-+ BUILTIN_VSDQ_I (SHIFTIMM, sqshl_n)
-+ BUILTIN_VSDQ_I (SHIFTIMM, uqshl_n)
-+
-+ /* Implemented by aarch64_cm<cmp><mode>. */
-+ BUILTIN_VSDQ_I_DI (BINOP, cmeq)
-+ BUILTIN_VSDQ_I_DI (BINOP, cmge)
-+ BUILTIN_VSDQ_I_DI (BINOP, cmgt)
-+ BUILTIN_VSDQ_I_DI (BINOP, cmle)
-+ BUILTIN_VSDQ_I_DI (BINOP, cmlt)
-+ /* Implemented by aarch64_cm<cmp><mode>. */
-+ BUILTIN_VSDQ_I_DI (BINOP, cmhs)
-+ BUILTIN_VSDQ_I_DI (BINOP, cmhi)
-+ BUILTIN_VSDQ_I_DI (BINOP, cmtst)
-+
-+ /* Implemented by aarch64_<fmaxmin><mode>. */
-+ BUILTIN_VDQF (BINOP, fmax)
-+ BUILTIN_VDQF (BINOP, fmin)
-+ /* Implemented by aarch64_<maxmin><mode>. */
-+ BUILTIN_VDQ_BHSI (BINOP, smax)
-+ BUILTIN_VDQ_BHSI (BINOP, smin)
-+ BUILTIN_VDQ_BHSI (BINOP, umax)
-+ BUILTIN_VDQ_BHSI (BINOP, umin)
-+
-+ /* Implemented by aarch64_frint<frint_suffix><mode>. */
-+ BUILTIN_VDQF (UNOP, frintz)
-+ BUILTIN_VDQF (UNOP, frintp)
-+ BUILTIN_VDQF (UNOP, frintm)
-+ BUILTIN_VDQF (UNOP, frinti)
-+ BUILTIN_VDQF (UNOP, frintx)
-+ BUILTIN_VDQF (UNOP, frinta)
-+
-+ /* Implemented by aarch64_fcvt<frint_suffix><su><mode>. */
-+ BUILTIN_VDQF (UNOP, fcvtzs)
-+ BUILTIN_VDQF (UNOP, fcvtzu)
-+ BUILTIN_VDQF (UNOP, fcvtas)
-+ BUILTIN_VDQF (UNOP, fcvtau)
-+ BUILTIN_VDQF (UNOP, fcvtps)
-+ BUILTIN_VDQF (UNOP, fcvtpu)
-+ BUILTIN_VDQF (UNOP, fcvtms)
-+ BUILTIN_VDQF (UNOP, fcvtmu)
-+
-+ /* Implemented by
-+ aarch64_<PERMUTE:perm_insn><PERMUTE:perm_hilo><mode>. */
-+ BUILTIN_VALL (BINOP, zip1)
-+ BUILTIN_VALL (BINOP, zip2)
-+ BUILTIN_VALL (BINOP, uzp1)
-+ BUILTIN_VALL (BINOP, uzp2)
-+ BUILTIN_VALL (BINOP, trn1)
-+ BUILTIN_VALL (BINOP, trn2)
---- a/src/gcc/config/aarch64/aarch64-simd.md
-+++ b/src/gcc/config/aarch64/aarch64-simd.md
-@@ -0,0 +1,3716 @@
-+;; Machine description for AArch64 AdvSIMD architecture.
-+;; Copyright (C) 2011, 2012, 2013 Free Software Foundation, Inc.
-+;; Contributed by ARM Ltd.
-+;;
-+;; This file is part of GCC.
-+;;
-+;; GCC is free software; you can redistribute it and/or modify it
-+;; under the terms of the GNU General Public License as published by
-+;; the Free Software Foundation; either version 3, or (at your option)
-+;; any later version.
-+;;
-+;; GCC is distributed in the hope that it will be useful, but
-+;; WITHOUT ANY WARRANTY; without even the implied warranty of
-+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+;; General Public License for more details.
-+;;
-+;; You should have received a copy of the GNU General Public License
-+;; along with GCC; see the file COPYING3. If not see
-+;; <http://www.gnu.org/licenses/>.
-+
-+
-+; Main data types used by the insntructions
-+
-+(define_attr "simd_mode" "unknown,none,V8QI,V16QI,V4HI,V8HI,V2SI,V4SI,V2DI,V2SF,V4SF,V2DF,OI,CI,XI,DI,DF,SI,HI,QI"
-+ (const_string "unknown"))
-+
-+
-+; Classification of AdvSIMD instructions for scheduling purposes.
-+; Do not set this attribute and the "v8type" attribute together in
-+; any instruction pattern.
-+
-+; simd_abd integer absolute difference and accumulate.
-+; simd_abdl integer absolute difference and accumulate (long).
-+; simd_adal integer add and accumulate (long).
-+; simd_add integer addition/subtraction.
-+; simd_addl integer addition/subtraction (long).
-+; simd_addlv across lanes integer sum (long).
-+; simd_addn integer addition/subtraction (narrow).
-+; simd_addn2 integer addition/subtraction (narrow, high).
-+; simd_addv across lanes integer sum.
-+; simd_cls count leading sign/zero bits.
-+; simd_cmp compare / create mask.
-+; simd_cnt population count.
-+; simd_dup duplicate element.
-+; simd_dupgp duplicate general purpose register.
-+; simd_ext bitwise extract from pair.
-+; simd_fadd floating point add/sub.
-+; simd_fcmp floating point compare.
-+; simd_fcvti floating point convert to integer.
-+; simd_fcvtl floating-point convert upsize.
-+; simd_fcvtn floating-point convert downsize (narrow).
-+; simd_fcvtn2 floating-point convert downsize (narrow, high).
-+; simd_fdiv floating point division.
-+; simd_fminmax floating point min/max.
-+; simd_fminmaxv across lanes floating point min/max.
-+; simd_fmla floating point multiply-add.
-+; simd_fmla_elt floating point multiply-add (by element).
-+; simd_fmul floating point multiply.
-+; simd_fmul_elt floating point multiply (by element).
-+; simd_fnegabs floating point neg/abs.
-+; simd_frcpe floating point reciprocal estimate.
-+; simd_frcps floating point reciprocal step.
-+; simd_frecx floating point reciprocal exponent.
-+; simd_frint floating point round to integer.
-+; simd_fsqrt floating point square root.
-+; simd_icvtf integer convert to floating point.
-+; simd_ins insert element.
-+; simd_insgp insert general purpose register.
-+; simd_load1 load multiple structures to one register (LD1).
-+; simd_load1r load single structure to all lanes of one register (LD1R).
-+; simd_load1s load single structure to one lane of one register (LD1 [index]).
-+; simd_load2 load multiple structures to two registers (LD1, LD2).
-+; simd_load2r load single structure to all lanes of two registers (LD1R, LD2R).
-+; simd_load2s load single structure to one lane of two registers (LD2 [index]).
-+; simd_load3 load multiple structures to three registers (LD1, LD3).
-+; simd_load3r load single structure to all lanes of three registers (LD3R).
-+; simd_load3s load single structure to one lane of three registers (LD3 [index]).
-+; simd_load4 load multiple structures to four registers (LD1, LD2, LD4).
-+; simd_load4r load single structure to all lanes of four registers (LD4R).
-+; simd_load4s load single structure to one lane of four registers (LD4 [index]).
-+; simd_logic logical operation.
-+; simd_logic_imm logcial operation (immediate).
-+; simd_minmax integer min/max.
-+; simd_minmaxv across lanes integer min/max,
-+; simd_mla integer multiply-accumulate.
-+; simd_mla_elt integer multiply-accumulate (by element).
-+; simd_mlal integer multiply-accumulate (long).
-+; simd_mlal_elt integer multiply-accumulate (by element, long).
-+; simd_move move register.
-+; simd_move_imm move immediate.
-+; simd_movgp move element to general purpose register.
-+; simd_mul integer multiply.
-+; simd_mul_elt integer multiply (by element).
-+; simd_mull integer multiply (long).
-+; simd_mull_elt integer multiply (by element, long).
-+; simd_negabs integer negate/absolute.
-+; simd_rbit bitwise reverse.
-+; simd_rcpe integer reciprocal estimate.
-+; simd_rcps integer reciprocal square root.
-+; simd_rev element reverse.
-+; simd_sat_add integer saturating addition/subtraction.
-+; simd_sat_mlal integer saturating multiply-accumulate (long).
-+; simd_sat_mlal_elt integer saturating multiply-accumulate (by element, long).
-+; simd_sat_mul integer saturating multiply.
-+; simd_sat_mul_elt integer saturating multiply (by element).
-+; simd_sat_mull integer saturating multiply (long).
-+; simd_sat_mull_elt integer saturating multiply (by element, long).
-+; simd_sat_negabs integer saturating negate/absolute.
-+; simd_sat_shift integer saturating shift.
-+; simd_sat_shift_imm integer saturating shift (immediate).
-+; simd_sat_shiftn_imm integer saturating shift (narrow, immediate).
-+; simd_sat_shiftn2_imm integer saturating shift (narrow, high, immediate).
-+; simd_shift shift register/vector.
-+; simd_shift_acc shift accumulate.
-+; simd_shift_imm shift immediate.
-+; simd_shift_imm_acc shift immediate and accumualte.
-+; simd_shiftl shift register/vector (long).
-+; simd_shiftl_imm shift register/vector (long, immediate).
-+; simd_shiftn_imm shift register/vector (narrow, immediate).
-+; simd_shiftn2_imm shift register/vector (narrow, high, immediate).
-+; simd_store1 store multiple structures from one register (ST1).
-+; simd_store1s store single structure from one lane of one register (ST1 [index]).
-+; simd_store2 store multiple structures from two registers (ST1, ST2).
-+; simd_store2s store single structure from one lane of two registers (ST2 [index]).
-+; simd_store3 store multiple structures from three registers (ST1, ST3).
-+; simd_store3s store single structure from one lane of three register (ST3 [index]).
-+; simd_store4 store multiple structures from four registers (ST1, ST2, ST4).
-+; simd_store4s store single structure from one lane for four registers (ST4 [index]).
-+; simd_tbl table lookup.
-+; simd_trn transpose.
-+; simd_uzp unzip.
-+; simd_zip zip.
-+
-+(define_attr "simd_type"
-+ "simd_abd,\
-+ simd_abdl,\
-+ simd_adal,\
-+ simd_add,\
-+ simd_addl,\
-+ simd_addlv,\
-+ simd_addn,\
-+ simd_addn2,\
-+ simd_addv,\
-+ simd_cls,\
-+ simd_cmp,\
-+ simd_cnt,\
-+ simd_dup,\
-+ simd_dupgp,\
-+ simd_ext,\
-+ simd_fadd,\
-+ simd_fcmp,\
-+ simd_fcvti,\
-+ simd_fcvtl,\
-+ simd_fcvtn,\
-+ simd_fcvtn2,\
-+ simd_fdiv,\
-+ simd_fminmax,\
-+ simd_fminmaxv,\
-+ simd_fmla,\
-+ simd_fmla_elt,\
-+ simd_fmul,\
-+ simd_fmul_elt,\
-+ simd_fnegabs,\
-+ simd_frcpe,\
-+ simd_frcps,\
-+ simd_frecx,\
-+ simd_frint,\
-+ simd_fsqrt,\
-+ simd_icvtf,\
-+ simd_ins,\
-+ simd_insgp,\
-+ simd_load1,\
-+ simd_load1r,\
-+ simd_load1s,\
-+ simd_load2,\
-+ simd_load2r,\
-+ simd_load2s,\
-+ simd_load3,\
-+ simd_load3r,\
-+ simd_load3s,\
-+ simd_load4,\
-+ simd_load4r,\
-+ simd_load4s,\
-+ simd_logic,\
-+ simd_logic_imm,\
-+ simd_minmax,\
-+ simd_minmaxv,\
-+ simd_mla,\
-+ simd_mla_elt,\
-+ simd_mlal,\
-+ simd_mlal_elt,\
-+ simd_movgp,\
-+ simd_move,\
-+ simd_move_imm,\
-+ simd_mul,\
-+ simd_mul_elt,\
-+ simd_mull,\
-+ simd_mull_elt,\
-+ simd_negabs,\
-+ simd_rbit,\
-+ simd_rcpe,\
-+ simd_rcps,\
-+ simd_rev,\
-+ simd_sat_add,\
-+ simd_sat_mlal,\
-+ simd_sat_mlal_elt,\
-+ simd_sat_mul,\
-+ simd_sat_mul_elt,\
-+ simd_sat_mull,\
-+ simd_sat_mull_elt,\
-+ simd_sat_negabs,\
-+ simd_sat_shift,\
-+ simd_sat_shift_imm,\
-+ simd_sat_shiftn_imm,\
-+ simd_sat_shiftn2_imm,\
-+ simd_shift,\
-+ simd_shift_acc,\
-+ simd_shift_imm,\
-+ simd_shift_imm_acc,\
-+ simd_shiftl,\
-+ simd_shiftl_imm,\
-+ simd_shiftn_imm,\
-+ simd_shiftn2_imm,\
-+ simd_store1,\
-+ simd_store1s,\
-+ simd_store2,\
-+ simd_store2s,\
-+ simd_store3,\
-+ simd_store3s,\
-+ simd_store4,\
-+ simd_store4s,\
-+ simd_tbl,\
-+ simd_trn,\
-+ simd_uzp,\
-+ simd_zip,\
-+ none"
-+ (const_string "none"))
-+
-+
-+; The "neon_type" attribute is used by the AArch32 backend. Below is a mapping
-+; from "simd_type" to "neon_type".
-+
-+(define_attr "neon_type"
-+ "neon_int_1,neon_int_2,neon_int_3,neon_int_4,neon_int_5,neon_vqneg_vqabs,
-+ neon_vmov,neon_vaba,neon_vsma,neon_vaba_qqq,
-+ neon_mul_ddd_8_16_qdd_16_8_long_32_16_long,neon_mul_qqq_8_16_32_ddd_32,
-+ neon_mul_qdd_64_32_long_qqd_16_ddd_32_scalar_64_32_long_scalar,
-+ neon_mla_ddd_8_16_qdd_16_8_long_32_16_long,neon_mla_qqq_8_16,
-+ neon_mla_ddd_32_qqd_16_ddd_32_scalar_qdd_64_32_long_scalar_qdd_64_32_long,
-+ neon_mla_qqq_32_qqd_32_scalar,neon_mul_ddd_16_scalar_32_16_long_scalar,
-+ neon_mul_qqd_32_scalar,neon_mla_ddd_16_scalar_qdd_32_16_long_scalar,
-+ neon_shift_1,neon_shift_2,neon_shift_3,neon_vshl_ddd,
-+ neon_vqshl_vrshl_vqrshl_qqq,neon_vsra_vrsra,neon_fp_vadd_ddd_vabs_dd,
-+ neon_fp_vadd_qqq_vabs_qq,neon_fp_vsum,neon_fp_vmul_ddd,neon_fp_vmul_qqd,
-+ neon_fp_vmla_ddd,neon_fp_vmla_qqq,neon_fp_vmla_ddd_scalar,
-+ neon_fp_vmla_qqq_scalar,neon_fp_vrecps_vrsqrts_ddd,
-+ neon_fp_vrecps_vrsqrts_qqq,neon_bp_simple,neon_bp_2cycle,neon_bp_3cycle,
-+ neon_ldr,neon_str,neon_vld1_1_2_regs,neon_vld1_3_4_regs,
-+ neon_vld2_2_regs_vld1_vld2_all_lanes,neon_vld2_4_regs,neon_vld3_vld4,
-+ neon_vst1_1_2_regs_vst2_2_regs,neon_vst1_3_4_regs,
-+ neon_vst2_4_regs_vst3_vst4,neon_vst3_vst4,neon_vld1_vld2_lane,
-+ neon_vld3_vld4_lane,neon_vst1_vst2_lane,neon_vst3_vst4_lane,
-+ neon_vld3_vld4_all_lanes,neon_mcr,neon_mcr_2_mcrr,neon_mrc,neon_mrrc,
-+ neon_ldm_2,neon_stm_2,none,unknown"
-+ (cond [
-+ (eq_attr "simd_type" "simd_dup") (const_string "neon_bp_simple")
-+ (eq_attr "simd_type" "simd_movgp") (const_string "neon_bp_simple")
-+ (eq_attr "simd_type" "simd_add,simd_logic,simd_logic_imm") (const_string "neon_int_1")
-+ (eq_attr "simd_type" "simd_negabs,simd_addlv") (const_string "neon_int_3")
-+ (eq_attr "simd_type" "simd_addn,simd_addn2,simd_addl,simd_sat_add,simd_sat_negabs") (const_string "neon_int_4")
-+ (eq_attr "simd_type" "simd_move") (const_string "neon_vmov")
-+ (eq_attr "simd_type" "simd_ins") (const_string "neon_mcr")
-+ (and (eq_attr "simd_type" "simd_mul,simd_sat_mul") (eq_attr "simd_mode" "V8QI,V4HI")) (const_string "neon_mul_ddd_8_16_qdd_16_8_long_32_16_long")
-+ (and (eq_attr "simd_type" "simd_mul,simd_sat_mul") (eq_attr "simd_mode" "V2SI,V8QI,V16QI,V2SI")) (const_string "neon_mul_qqq_8_16_32_ddd_32")
-+ (and (eq_attr "simd_type" "simd_mull,simd_sat_mull") (eq_attr "simd_mode" "V8QI,V16QI,V4HI,V8HI")) (const_string "neon_mul_ddd_8_16_qdd_16_8_long_32_16_long")
-+ (and (eq_attr "simd_type" "simd_mull,simd_sat_mull") (eq_attr "simd_mode" "V2SI,V4SI,V2DI")) (const_string "neon_mul_qdd_64_32_long_qqd_16_ddd_32_scalar_64_32_long_scalar")
-+ (and (eq_attr "simd_type" "simd_mla,simd_sat_mlal") (eq_attr "simd_mode" "V8QI,V4HI")) (const_string "neon_mla_ddd_8_16_qdd_16_8_long_32_16_long")
-+ (and (eq_attr "simd_type" "simd_mla,simd_sat_mlal") (eq_attr "simd_mode" "V2SI")) (const_string "neon_mla_ddd_32_qqd_16_ddd_32_scalar_qdd_64_32_long_scalar_qdd_64_32_long")
-+ (and (eq_attr "simd_type" "simd_mla,simd_sat_mlal") (eq_attr "simd_mode" "V16QI,V8HI")) (const_string "neon_mla_qqq_8_16")
-+ (and (eq_attr "simd_type" "simd_mla,simd_sat_mlal") (eq_attr "simd_mode" "V4SI")) (const_string "neon_mla_qqq_32_qqd_32_scalar")
-+ (and (eq_attr "simd_type" "simd_mlal") (eq_attr "simd_mode" "V8QI,V16QI,V4HI,V8HI")) (const_string "neon_mla_ddd_8_16_qdd_16_8_long_32_16_long")
-+ (and (eq_attr "simd_type" "simd_mlal") (eq_attr "simd_mode" "V2SI,V4SI,V2DI")) (const_string "neon_mla_ddd_32_qqd_16_ddd_32_scalar_qdd_64_32_long_scalar_qdd_64_32_long")
-+ (and (eq_attr "simd_type" "simd_fmla") (eq_attr "simd_mode" "V2SF")) (const_string "neon_fp_vmla_ddd")
-+ (and (eq_attr "simd_type" "simd_fmla") (eq_attr "simd_mode" "V4SF,V2DF")) (const_string "neon_fp_vmla_qqq")
-+ (and (eq_attr "simd_type" "simd_fmla_elt") (eq_attr "simd_mode" "V2SF")) (const_string "neon_fp_vmla_ddd_scalar")
-+ (and (eq_attr "simd_type" "simd_fmla_elt") (eq_attr "simd_mode" "V4SF,V2DF")) (const_string "neon_fp_vmla_qqq_scalar")
-+ (and (eq_attr "simd_type" "simd_fmul,simd_fmul_elt,simd_fdiv,simd_fsqrt") (eq_attr "simd_mode" "V2SF")) (const_string "neon_fp_vmul_ddd")
-+ (and (eq_attr "simd_type" "simd_fmul,simd_fmul_elt,simd_fdiv,simd_fsqrt") (eq_attr "simd_mode" "V4SF,V2DF")) (const_string "neon_fp_vmul_qqd")
-+ (and (eq_attr "simd_type" "simd_fadd") (eq_attr "simd_mode" "V2SF")) (const_string "neon_fp_vadd_ddd_vabs_dd")
-+ (and (eq_attr "simd_type" "simd_fadd") (eq_attr "simd_mode" "V4SF,V2DF")) (const_string "neon_fp_vadd_qqq_vabs_qq")
-+ (and (eq_attr "simd_type" "simd_fnegabs,simd_fminmax,simd_fminmaxv") (eq_attr "simd_mode" "V2SF")) (const_string "neon_fp_vadd_ddd_vabs_dd")
-+ (and (eq_attr "simd_type" "simd_fnegabs,simd_fminmax,simd_fminmaxv") (eq_attr "simd_mode" "V4SF,V2DF")) (const_string "neon_fp_vadd_qqq_vabs_qq")
-+ (and (eq_attr "simd_type" "simd_shift,simd_shift_acc") (eq_attr "simd_mode" "V8QI,V4HI,V2SI")) (const_string "neon_vshl_ddd")
-+ (and (eq_attr "simd_type" "simd_shift,simd_shift_acc") (eq_attr "simd_mode" "V16QI,V8HI,V4SI,V2DI")) (const_string "neon_shift_3")
-+ (eq_attr "simd_type" "simd_minmax,simd_minmaxv") (const_string "neon_int_5")
-+ (eq_attr "simd_type" "simd_shiftn_imm,simd_shiftn2_imm,simd_shiftl_imm,") (const_string "neon_shift_1")
-+ (eq_attr "simd_type" "simd_load1,simd_load2") (const_string "neon_vld1_1_2_regs")
-+ (eq_attr "simd_type" "simd_load3,simd_load3") (const_string "neon_vld1_3_4_regs")
-+ (eq_attr "simd_type" "simd_load1r,simd_load2r,simd_load3r,simd_load4r") (const_string "neon_vld2_2_regs_vld1_vld2_all_lanes")
-+ (eq_attr "simd_type" "simd_load1s,simd_load2s") (const_string "neon_vld1_vld2_lane")
-+ (eq_attr "simd_type" "simd_load3s,simd_load4s") (const_string "neon_vld3_vld4_lane")
-+ (eq_attr "simd_type" "simd_store1,simd_store2") (const_string "neon_vst1_1_2_regs_vst2_2_regs")
-+ (eq_attr "simd_type" "simd_store3,simd_store4") (const_string "neon_vst1_3_4_regs")
-+ (eq_attr "simd_type" "simd_store1s,simd_store2s") (const_string "neon_vst1_vst2_lane")
-+ (eq_attr "simd_type" "simd_store3s,simd_store4s") (const_string "neon_vst3_vst4_lane")
-+ (and (eq_attr "simd_type" "simd_frcpe,simd_frcps") (eq_attr "simd_mode" "V2SF")) (const_string "neon_fp_vrecps_vrsqrts_ddd")
-+ (and (eq_attr "simd_type" "simd_frcpe,simd_frcps") (eq_attr "simd_mode" "V4SF,V2DF")) (const_string "neon_fp_vrecps_vrsqrts_qqq")
-+ (eq_attr "simd_type" "none") (const_string "none")
-+ ]
-+ (const_string "unknown")))
-+
-+
-+(define_expand "mov<mode>"
-+ [(set (match_operand:VALL 0 "aarch64_simd_nonimmediate_operand" "")
-+ (match_operand:VALL 1 "aarch64_simd_general_operand" ""))]
-+ "TARGET_SIMD"
-+ "
-+ if (GET_CODE (operands[0]) == MEM)
-+ operands[1] = force_reg (<MODE>mode, operands[1]);
-+ "
-+)
-+
-+(define_expand "movmisalign<mode>"
-+ [(set (match_operand:VALL 0 "aarch64_simd_nonimmediate_operand" "")
-+ (match_operand:VALL 1 "aarch64_simd_general_operand" ""))]
-+ "TARGET_SIMD"
-+{
-+ /* This pattern is not permitted to fail during expansion: if both arguments
-+ are non-registers (e.g. memory := constant, which can be created by the
-+ auto-vectorizer), force operand 1 into a register. */
-+ if (!register_operand (operands[0], <MODE>mode)
-+ && !register_operand (operands[1], <MODE>mode))
-+ operands[1] = force_reg (<MODE>mode, operands[1]);
-+})
-+
-+(define_insn "aarch64_simd_dup<mode>"
-+ [(set (match_operand:VDQ 0 "register_operand" "=w")
-+ (vec_duplicate:VDQ (match_operand:<VEL> 1 "register_operand" "r")))]
-+ "TARGET_SIMD"
-+ "dup\\t%0.<Vtype>, %<vw>1"
-+ [(set_attr "simd_type" "simd_dupgp")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "aarch64_dup_lane<mode>"
-+ [(set (match_operand:VDQ_I 0 "register_operand" "=w")
-+ (vec_duplicate:VDQ_I
-+ (vec_select:<VEL>
-+ (match_operand:<VCON> 1 "register_operand" "w")
-+ (parallel [(match_operand:SI 2 "immediate_operand" "i")])
-+ )))]
-+ "TARGET_SIMD"
-+ "dup\\t%<v>0<Vmtype>, %1.<Vetype>[%2]"
-+ [(set_attr "simd_type" "simd_dup")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "aarch64_dup_lane<mode>"
-+ [(set (match_operand:SDQ_I 0 "register_operand" "=w")
-+ (vec_select:<VEL>
-+ (match_operand:<VCON> 1 "register_operand" "w")
-+ (parallel [(match_operand:SI 2 "immediate_operand" "i")])
-+ ))]
-+ "TARGET_SIMD"
-+ "dup\\t%<v>0<Vmtype>, %1.<Vetype>[%2]"
-+ [(set_attr "simd_type" "simd_dup")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "aarch64_simd_dup<mode>"
-+ [(set (match_operand:VDQF 0 "register_operand" "=w")
-+ (vec_duplicate:VDQF (match_operand:<VEL> 1 "register_operand" "w")))]
-+ "TARGET_SIMD"
-+ "dup\\t%0.<Vtype>, %1.<Vetype>[0]"
-+ [(set_attr "simd_type" "simd_dup")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "*aarch64_simd_mov<mode>"
-+ [(set (match_operand:VD 0 "aarch64_simd_nonimmediate_operand"
-+ "=w, Utv, w, ?r, ?w, ?r, w")
-+ (match_operand:VD 1 "aarch64_simd_general_operand"
-+ "Utv, w, w, w, r, r, Dn"))]
-+ "TARGET_SIMD
-+ && (register_operand (operands[0], <MODE>mode)
-+ || register_operand (operands[1], <MODE>mode))"
-+{
-+ switch (which_alternative)
-+ {
-+ case 0: return "ld1\t{%0.<Vtype>}, %1";
-+ case 1: return "st1\t{%1.<Vtype>}, %0";
-+ case 2: return "orr\t%0.<Vbtype>, %1.<Vbtype>, %1.<Vbtype>";
-+ case 3: return "umov\t%0, %1.d[0]";
-+ case 4: return "ins\t%0.d[0], %1";
-+ case 5: return "mov\t%0, %1";
-+ case 6:
-+ return aarch64_output_simd_mov_immediate (&operands[1],
-+ <MODE>mode, 64);
-+ default: gcc_unreachable ();
-+ }
-+}
-+ [(set_attr "simd_type" "simd_load1,simd_store1,simd_move,simd_movgp,simd_insgp,simd_move,simd_move_imm")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "*aarch64_simd_mov<mode>"
-+ [(set (match_operand:VQ 0 "aarch64_simd_nonimmediate_operand"
-+ "=w, Utv, w, ?r, ?w, ?r, w")
-+ (match_operand:VQ 1 "aarch64_simd_general_operand"
-+ "Utv, w, w, w, r, r, Dn"))]
-+ "TARGET_SIMD
-+ && (register_operand (operands[0], <MODE>mode)
-+ || register_operand (operands[1], <MODE>mode))"
-+{
-+ switch (which_alternative)
-+ {
-+ case 0: return "ld1\t{%0.<Vtype>}, %1";
-+ case 1: return "st1\t{%1.<Vtype>}, %0";
-+ case 2: return "orr\t%0.<Vbtype>, %1.<Vbtype>, %1.<Vbtype>";
-+ case 3: return "umov\t%0, %1.d[0]\;umov\t%H0, %1.d[1]";
-+ case 4: return "ins\t%0.d[0], %1\;ins\t%0.d[1], %H1";
-+ case 5: return "#";
-+ case 6:
-+ return aarch64_output_simd_mov_immediate (&operands[1],
-+ <MODE>mode, 128);
-+ default: gcc_unreachable ();
-+ }
-+}
-+ [(set_attr "simd_type" "simd_load1,simd_store1,simd_move,simd_movgp,simd_insgp,simd_move,simd_move_imm")
-+ (set_attr "simd_mode" "<MODE>")
-+ (set_attr "length" "4,4,4,8,8,8,4")]
-+)
-+
-+(define_split
-+ [(set (match_operand:VQ 0 "register_operand" "")
-+ (match_operand:VQ 1 "register_operand" ""))]
-+ "TARGET_SIMD && reload_completed
-+ && GP_REGNUM_P (REGNO (operands[0]))
-+ && GP_REGNUM_P (REGNO (operands[1]))"
-+ [(set (match_dup 0) (match_dup 1))
-+ (set (match_dup 2) (match_dup 3))]
-+{
-+ int rdest = REGNO (operands[0]);
-+ int rsrc = REGNO (operands[1]);
-+ rtx dest[2], src[2];
-+
-+ dest[0] = gen_rtx_REG (DImode, rdest);
-+ src[0] = gen_rtx_REG (DImode, rsrc);
-+ dest[1] = gen_rtx_REG (DImode, rdest + 1);
-+ src[1] = gen_rtx_REG (DImode, rsrc + 1);
-+
-+ aarch64_simd_disambiguate_copy (operands, dest, src, 2);
-+})
-+
-+(define_insn "orn<mode>3"
-+ [(set (match_operand:VDQ 0 "register_operand" "=w")
-+ (ior:VDQ (not:VDQ (match_operand:VDQ 1 "register_operand" "w"))
-+ (match_operand:VDQ 2 "register_operand" "w")))]
-+ "TARGET_SIMD"
-+ "orn\t%0.<Vbtype>, %2.<Vbtype>, %1.<Vbtype>"
-+ [(set_attr "simd_type" "simd_logic")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "bic<mode>3"
-+ [(set (match_operand:VDQ 0 "register_operand" "=w")
-+ (and:VDQ (not:VDQ (match_operand:VDQ 1 "register_operand" "w"))
-+ (match_operand:VDQ 2 "register_operand" "w")))]
-+ "TARGET_SIMD"
-+ "bic\t%0.<Vbtype>, %2.<Vbtype>, %1.<Vbtype>"
-+ [(set_attr "simd_type" "simd_logic")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "add<mode>3"
-+ [(set (match_operand:VDQ 0 "register_operand" "=w")
-+ (plus:VDQ (match_operand:VDQ 1 "register_operand" "w")
-+ (match_operand:VDQ 2 "register_operand" "w")))]
-+ "TARGET_SIMD"
-+ "add\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
-+ [(set_attr "simd_type" "simd_add")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "sub<mode>3"
-+ [(set (match_operand:VDQ 0 "register_operand" "=w")
-+ (minus:VDQ (match_operand:VDQ 1 "register_operand" "w")
-+ (match_operand:VDQ 2 "register_operand" "w")))]
-+ "TARGET_SIMD"
-+ "sub\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
-+ [(set_attr "simd_type" "simd_add")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "mul<mode>3"
-+ [(set (match_operand:VDQM 0 "register_operand" "=w")
-+ (mult:VDQM (match_operand:VDQM 1 "register_operand" "w")
-+ (match_operand:VDQM 2 "register_operand" "w")))]
-+ "TARGET_SIMD"
-+ "mul\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
-+ [(set_attr "simd_type" "simd_mul")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "neg<mode>2"
-+ [(set (match_operand:VDQM 0 "register_operand" "=w")
-+ (neg:VDQM (match_operand:VDQM 1 "register_operand" "w")))]
-+ "TARGET_SIMD"
-+ "neg\t%0.<Vtype>, %1.<Vtype>"
-+ [(set_attr "simd_type" "simd_negabs")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "abs<mode>2"
-+ [(set (match_operand:VDQ 0 "register_operand" "=w")
-+ (abs:VDQ (match_operand:VDQ 1 "register_operand" "w")))]
-+ "TARGET_SIMD"
-+ "abs\t%0.<Vtype>, %1.<Vtype>"
-+ [(set_attr "simd_type" "simd_negabs")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "and<mode>3"
-+ [(set (match_operand:VDQ 0 "register_operand" "=w")
-+ (and:VDQ (match_operand:VDQ 1 "register_operand" "w")
-+ (match_operand:VDQ 2 "register_operand" "w")))]
-+ "TARGET_SIMD"
-+ "and\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>"
-+ [(set_attr "simd_type" "simd_logic")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "ior<mode>3"
-+ [(set (match_operand:VDQ 0 "register_operand" "=w")
-+ (ior:VDQ (match_operand:VDQ 1 "register_operand" "w")
-+ (match_operand:VDQ 2 "register_operand" "w")))]
-+ "TARGET_SIMD"
-+ "orr\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>"
-+ [(set_attr "simd_type" "simd_logic")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "xor<mode>3"
-+ [(set (match_operand:VDQ 0 "register_operand" "=w")
-+ (xor:VDQ (match_operand:VDQ 1 "register_operand" "w")
-+ (match_operand:VDQ 2 "register_operand" "w")))]
-+ "TARGET_SIMD"
-+ "eor\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>"
-+ [(set_attr "simd_type" "simd_logic")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "one_cmpl<mode>2"
-+ [(set (match_operand:VDQ 0 "register_operand" "=w")
-+ (not:VDQ (match_operand:VDQ 1 "register_operand" "w")))]
-+ "TARGET_SIMD"
-+ "not\t%0.<Vbtype>, %1.<Vbtype>"
-+ [(set_attr "simd_type" "simd_logic")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "aarch64_simd_vec_set<mode>"
-+ [(set (match_operand:VQ_S 0 "register_operand" "=w")
-+ (vec_merge:VQ_S
-+ (vec_duplicate:VQ_S
-+ (match_operand:<VEL> 1 "register_operand" "r"))
-+ (match_operand:VQ_S 3 "register_operand" "0")
-+ (match_operand:SI 2 "immediate_operand" "i")))]
-+ "TARGET_SIMD"
-+ "ins\t%0.<Vetype>[%p2], %w1";
-+ [(set_attr "simd_type" "simd_insgp")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "aarch64_simd_lshr<mode>"
-+ [(set (match_operand:VDQ 0 "register_operand" "=w")
-+ (lshiftrt:VDQ (match_operand:VDQ 1 "register_operand" "w")
-+ (match_operand:VDQ 2 "aarch64_simd_rshift_imm" "Dr")))]
-+ "TARGET_SIMD"
-+ "ushr\t%0.<Vtype>, %1.<Vtype>, %2"
-+ [(set_attr "simd_type" "simd_shift_imm")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "aarch64_simd_ashr<mode>"
-+ [(set (match_operand:VDQ 0 "register_operand" "=w")
-+ (ashiftrt:VDQ (match_operand:VDQ 1 "register_operand" "w")
-+ (match_operand:VDQ 2 "aarch64_simd_rshift_imm" "Dr")))]
-+ "TARGET_SIMD"
-+ "sshr\t%0.<Vtype>, %1.<Vtype>, %2"
-+ [(set_attr "simd_type" "simd_shift_imm")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "aarch64_simd_imm_shl<mode>"
-+ [(set (match_operand:VDQ 0 "register_operand" "=w")
-+ (ashift:VDQ (match_operand:VDQ 1 "register_operand" "w")
-+ (match_operand:VDQ 2 "aarch64_simd_lshift_imm" "Dl")))]
-+ "TARGET_SIMD"
-+ "shl\t%0.<Vtype>, %1.<Vtype>, %2"
-+ [(set_attr "simd_type" "simd_shift_imm")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "aarch64_simd_reg_sshl<mode>"
-+ [(set (match_operand:VDQ 0 "register_operand" "=w")
-+ (ashift:VDQ (match_operand:VDQ 1 "register_operand" "w")
-+ (match_operand:VDQ 2 "register_operand" "w")))]
-+ "TARGET_SIMD"
-+ "sshl\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
-+ [(set_attr "simd_type" "simd_shift")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "aarch64_simd_reg_shl<mode>_unsigned"
-+ [(set (match_operand:VDQ 0 "register_operand" "=w")
-+ (unspec:VDQ [(match_operand:VDQ 1 "register_operand" "w")
-+ (match_operand:VDQ 2 "register_operand" "w")]
-+ UNSPEC_ASHIFT_UNSIGNED))]
-+ "TARGET_SIMD"
-+ "ushl\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
-+ [(set_attr "simd_type" "simd_shift")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "aarch64_simd_reg_shl<mode>_signed"
-+ [(set (match_operand:VDQ 0 "register_operand" "=w")
-+ (unspec:VDQ [(match_operand:VDQ 1 "register_operand" "w")
-+ (match_operand:VDQ 2 "register_operand" "w")]
-+ UNSPEC_ASHIFT_SIGNED))]
-+ "TARGET_SIMD"
-+ "sshl\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
-+ [(set_attr "simd_type" "simd_shift")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_expand "ashl<mode>3"
-+ [(match_operand:VDQ 0 "register_operand" "")
-+ (match_operand:VDQ 1 "register_operand" "")
-+ (match_operand:SI 2 "general_operand" "")]
-+ "TARGET_SIMD"
-+{
-+ int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
-+ int shift_amount;
-+
-+ if (CONST_INT_P (operands[2]))
-+ {
-+ shift_amount = INTVAL (operands[2]);
-+ if (shift_amount >= 0 && shift_amount < bit_width)
-+ {
-+ rtx tmp = aarch64_simd_gen_const_vector_dup (<MODE>mode,
-+ shift_amount);
-+ emit_insn (gen_aarch64_simd_imm_shl<mode> (operands[0],
-+ operands[1],
-+ tmp));
-+ DONE;
-+ }
-+ else
-+ {
-+ operands[2] = force_reg (SImode, operands[2]);
-+ }
-+ }
-+ else if (MEM_P (operands[2]))
-+ {
-+ operands[2] = force_reg (SImode, operands[2]);
-+ }
-+
-+ if (REG_P (operands[2]))
-+ {
-+ rtx tmp = gen_reg_rtx (<MODE>mode);
-+ emit_insn (gen_aarch64_simd_dup<mode> (tmp,
-+ convert_to_mode (<VEL>mode,
-+ operands[2],
-+ 0)));
-+ emit_insn (gen_aarch64_simd_reg_sshl<mode> (operands[0], operands[1],
-+ tmp));
-+ DONE;
-+ }
-+ else
-+ FAIL;
-+}
-+)
-+
-+(define_expand "lshr<mode>3"
-+ [(match_operand:VDQ 0 "register_operand" "")
-+ (match_operand:VDQ 1 "register_operand" "")
-+ (match_operand:SI 2 "general_operand" "")]
-+ "TARGET_SIMD"
-+{
-+ int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
-+ int shift_amount;
-+
-+ if (CONST_INT_P (operands[2]))
-+ {
-+ shift_amount = INTVAL (operands[2]);
-+ if (shift_amount > 0 && shift_amount <= bit_width)
-+ {
-+ rtx tmp = aarch64_simd_gen_const_vector_dup (<MODE>mode,
-+ shift_amount);
-+ emit_insn (gen_aarch64_simd_lshr<mode> (operands[0],
-+ operands[1],
-+ tmp));
-+ DONE;
-+ }
-+ else
-+ operands[2] = force_reg (SImode, operands[2]);
-+ }
-+ else if (MEM_P (operands[2]))
-+ {
-+ operands[2] = force_reg (SImode, operands[2]);
-+ }
-+
-+ if (REG_P (operands[2]))
-+ {
-+ rtx tmp = gen_reg_rtx (SImode);
-+ rtx tmp1 = gen_reg_rtx (<MODE>mode);
-+ emit_insn (gen_negsi2 (tmp, operands[2]));
-+ emit_insn (gen_aarch64_simd_dup<mode> (tmp1,
-+ convert_to_mode (<VEL>mode,
-+ tmp, 0)));
-+ emit_insn (gen_aarch64_simd_reg_shl<mode>_unsigned (operands[0],
-+ operands[1],
-+ tmp1));
-+ DONE;
-+ }
-+ else
-+ FAIL;
-+}
-+)
-+
-+(define_expand "ashr<mode>3"
-+ [(match_operand:VDQ 0 "register_operand" "")
-+ (match_operand:VDQ 1 "register_operand" "")
-+ (match_operand:SI 2 "general_operand" "")]
-+ "TARGET_SIMD"
-+{
-+ int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
-+ int shift_amount;
-+
-+ if (CONST_INT_P (operands[2]))
-+ {
-+ shift_amount = INTVAL (operands[2]);
-+ if (shift_amount > 0 && shift_amount <= bit_width)
-+ {
-+ rtx tmp = aarch64_simd_gen_const_vector_dup (<MODE>mode,
-+ shift_amount);
-+ emit_insn (gen_aarch64_simd_ashr<mode> (operands[0],
-+ operands[1],
-+ tmp));
-+ DONE;
-+ }
-+ else
-+ operands[2] = force_reg (SImode, operands[2]);
-+ }
-+ else if (MEM_P (operands[2]))
-+ {
-+ operands[2] = force_reg (SImode, operands[2]);
-+ }
-+
-+ if (REG_P (operands[2]))
-+ {
-+ rtx tmp = gen_reg_rtx (SImode);
-+ rtx tmp1 = gen_reg_rtx (<MODE>mode);
-+ emit_insn (gen_negsi2 (tmp, operands[2]));
-+ emit_insn (gen_aarch64_simd_dup<mode> (tmp1,
-+ convert_to_mode (<VEL>mode,
-+ tmp, 0)));
-+ emit_insn (gen_aarch64_simd_reg_shl<mode>_signed (operands[0],
-+ operands[1],
-+ tmp1));
-+ DONE;
-+ }
-+ else
-+ FAIL;
-+}
-+)
-+
-+(define_expand "vashl<mode>3"
-+ [(match_operand:VDQ 0 "register_operand" "")
-+ (match_operand:VDQ 1 "register_operand" "")
-+ (match_operand:VDQ 2 "register_operand" "")]
-+ "TARGET_SIMD"
-+{
-+ emit_insn (gen_aarch64_simd_reg_sshl<mode> (operands[0], operands[1],
-+ operands[2]));
-+ DONE;
-+})
-+
-+;; Using mode VQ_S as there is no V2DImode neg!
-+;; Negating individual lanes most certainly offsets the
-+;; gain from vectorization.
-+(define_expand "vashr<mode>3"
-+ [(match_operand:VQ_S 0 "register_operand" "")
-+ (match_operand:VQ_S 1 "register_operand" "")
-+ (match_operand:VQ_S 2 "register_operand" "")]
-+ "TARGET_SIMD"
-+{
-+ rtx neg = gen_reg_rtx (<MODE>mode);
-+ emit (gen_neg<mode>2 (neg, operands[2]));
-+ emit_insn (gen_aarch64_simd_reg_shl<mode>_signed (operands[0], operands[1],
-+ neg));
-+ DONE;
-+})
-+
-+(define_expand "vlshr<mode>3"
-+ [(match_operand:VQ_S 0 "register_operand" "")
-+ (match_operand:VQ_S 1 "register_operand" "")
-+ (match_operand:VQ_S 2 "register_operand" "")]
-+ "TARGET_SIMD"
-+{
-+ rtx neg = gen_reg_rtx (<MODE>mode);
-+ emit (gen_neg<mode>2 (neg, operands[2]));
-+ emit_insn (gen_aarch64_simd_reg_shl<mode>_unsigned (operands[0], operands[1],
-+ neg));
-+ DONE;
-+})
-+
-+(define_expand "vec_set<mode>"
-+ [(match_operand:VQ_S 0 "register_operand" "+w")
-+ (match_operand:<VEL> 1 "register_operand" "r")
-+ (match_operand:SI 2 "immediate_operand" "")]
-+ "TARGET_SIMD"
-+ {
-+ HOST_WIDE_INT elem = (HOST_WIDE_INT) 1 << INTVAL (operands[2]);
-+ emit_insn (gen_aarch64_simd_vec_set<mode> (operands[0], operands[1],
-+ GEN_INT (elem), operands[0]));
-+ DONE;
-+ }
-+)
-+
-+(define_insn "aarch64_simd_vec_setv2di"
-+ [(set (match_operand:V2DI 0 "register_operand" "=w")
-+ (vec_merge:V2DI
-+ (vec_duplicate:V2DI
-+ (match_operand:DI 1 "register_operand" "r"))
-+ (match_operand:V2DI 3 "register_operand" "0")
-+ (match_operand:SI 2 "immediate_operand" "i")))]
-+ "TARGET_SIMD"
-+ "ins\t%0.d[%p2], %1";
-+ [(set_attr "simd_type" "simd_insgp")
-+ (set_attr "simd_mode" "V2DI")]
-+)
-+
-+(define_expand "vec_setv2di"
-+ [(match_operand:V2DI 0 "register_operand" "+w")
-+ (match_operand:DI 1 "register_operand" "r")
-+ (match_operand:SI 2 "immediate_operand" "")]
-+ "TARGET_SIMD"
-+ {
-+ HOST_WIDE_INT elem = (HOST_WIDE_INT) 1 << INTVAL (operands[2]);
-+ emit_insn (gen_aarch64_simd_vec_setv2di (operands[0], operands[1],
-+ GEN_INT (elem), operands[0]));
-+ DONE;
-+ }
-+)
-+
-+(define_insn "aarch64_simd_vec_set<mode>"
-+ [(set (match_operand:VDQF 0 "register_operand" "=w")
-+ (vec_merge:VDQF
-+ (vec_duplicate:VDQF
-+ (match_operand:<VEL> 1 "register_operand" "w"))
-+ (match_operand:VDQF 3 "register_operand" "0")
-+ (match_operand:SI 2 "immediate_operand" "i")))]
-+ "TARGET_SIMD"
-+ "ins\t%0.<Vetype>[%p2], %1.<Vetype>[0]";
-+ [(set_attr "simd_type" "simd_ins")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_expand "vec_set<mode>"
-+ [(match_operand:VDQF 0 "register_operand" "+w")
-+ (match_operand:<VEL> 1 "register_operand" "w")
-+ (match_operand:SI 2 "immediate_operand" "")]
-+ "TARGET_SIMD"
-+ {
-+ HOST_WIDE_INT elem = (HOST_WIDE_INT) 1 << INTVAL (operands[2]);
-+ emit_insn (gen_aarch64_simd_vec_set<mode> (operands[0], operands[1],
-+ GEN_INT (elem), operands[0]));
-+ DONE;
-+ }
-+)
-+
-+
-+(define_insn "aarch64_mla<mode>"
-+ [(set (match_operand:VQ_S 0 "register_operand" "=w")
-+ (plus:VQ_S (mult:VQ_S (match_operand:VQ_S 2 "register_operand" "w")
-+ (match_operand:VQ_S 3 "register_operand" "w"))
-+ (match_operand:VQ_S 1 "register_operand" "0")))]
-+ "TARGET_SIMD"
-+ "mla\t%0.<Vtype>, %2.<Vtype>, %3.<Vtype>"
-+ [(set_attr "simd_type" "simd_mla")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "aarch64_mls<mode>"
-+ [(set (match_operand:VQ_S 0 "register_operand" "=w")
-+ (minus:VQ_S (match_operand:VQ_S 1 "register_operand" "0")
-+ (mult:VQ_S (match_operand:VQ_S 2 "register_operand" "w")
-+ (match_operand:VQ_S 3 "register_operand" "w"))))]
-+ "TARGET_SIMD"
-+ "mls\t%0.<Vtype>, %2.<Vtype>, %3.<Vtype>"
-+ [(set_attr "simd_type" "simd_mla")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+;; Max/Min operations.
-+(define_insn "<maxmin><mode>3"
-+ [(set (match_operand:VQ_S 0 "register_operand" "=w")
-+ (MAXMIN:VQ_S (match_operand:VQ_S 1 "register_operand" "w")
-+ (match_operand:VQ_S 2 "register_operand" "w")))]
-+ "TARGET_SIMD"
-+ "<maxmin>\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
-+ [(set_attr "simd_type" "simd_minmax")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+;; Move into low-half clearing high half to 0.
-+
-+(define_insn "move_lo_quad_<mode>"
-+ [(set (match_operand:VQ 0 "register_operand" "=w")
-+ (vec_concat:VQ
-+ (match_operand:<VHALF> 1 "register_operand" "w")
-+ (vec_duplicate:<VHALF> (const_int 0))))]
-+ "TARGET_SIMD"
-+ "mov\\t%d0, %d1";
-+ [(set_attr "simd_type" "simd_dup")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+;; Move into high-half.
-+
-+(define_insn "aarch64_simd_move_hi_quad_<mode>"
-+ [(set (match_operand:VQ 0 "register_operand" "+w")
-+ (vec_concat:VQ
-+ (vec_select:<VHALF>
-+ (match_dup 0)
-+ (match_operand:VQ 2 "vect_par_cnst_lo_half" ""))
-+ (match_operand:<VHALF> 1 "register_operand" "w")))]
-+ "TARGET_SIMD"
-+ "ins\\t%0.d[1], %1.d[0]";
-+ [(set_attr "simd_type" "simd_ins")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_expand "move_hi_quad_<mode>"
-+ [(match_operand:VQ 0 "register_operand" "")
-+ (match_operand:<VHALF> 1 "register_operand" "")]
-+ "TARGET_SIMD"
-+{
-+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, false);
-+ emit_insn (gen_aarch64_simd_move_hi_quad_<mode> (operands[0],
-+ operands[1], p));
-+ DONE;
-+})
-+
-+;; Narrowing operations.
-+
-+;; For doubles.
-+(define_insn "aarch64_simd_vec_pack_trunc_<mode>"
-+ [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w")
-+ (truncate:<VNARROWQ> (match_operand:VQN 1 "register_operand" "w")))]
-+ "TARGET_SIMD"
-+ "xtn\\t%0.<Vntype>, %1.<Vtype>"
-+ [(set_attr "simd_type" "simd_shiftn_imm")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_expand "vec_pack_trunc_<mode>"
-+ [(match_operand:<VNARROWD> 0 "register_operand" "")
-+ (match_operand:VDN 1 "register_operand" "")
-+ (match_operand:VDN 2 "register_operand" "")]
-+ "TARGET_SIMD"
-+{
-+ rtx tempreg = gen_reg_rtx (<VDBL>mode);
-+
-+ emit_insn (gen_move_lo_quad_<Vdbl> (tempreg, operands[1]));
-+ emit_insn (gen_move_hi_quad_<Vdbl> (tempreg, operands[2]));
-+ emit_insn (gen_aarch64_simd_vec_pack_trunc_<Vdbl> (operands[0], tempreg));
-+ DONE;
-+})
-+
-+;; For quads.
-+
-+(define_insn "vec_pack_trunc_<mode>"
-+ [(set (match_operand:<VNARROWQ2> 0 "register_operand" "+&w")
-+ (vec_concat:<VNARROWQ2>
-+ (truncate:<VNARROWQ> (match_operand:VQN 1 "register_operand" "w"))
-+ (truncate:<VNARROWQ> (match_operand:VQN 2 "register_operand" "w"))))]
-+ "TARGET_SIMD"
-+ "xtn\\t%0.<Vntype>, %1.<Vtype>\;xtn2\\t%0.<V2ntype>, %2.<Vtype>"
-+ [(set_attr "simd_type" "simd_shiftn2_imm")
-+ (set_attr "simd_mode" "<MODE>")
-+ (set_attr "length" "8")]
-+)
-+
-+;; Widening operations.
-+
-+(define_insn "aarch64_simd_vec_unpack<su>_lo_<mode>"
-+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
-+ (match_operand:VQW 1 "register_operand" "w")
-+ (match_operand:VQW 2 "vect_par_cnst_lo_half" "")
-+ )))]
-+ "TARGET_SIMD"
-+ "<su>shll %0.<Vwtype>, %1.<Vhalftype>, 0"
-+ [(set_attr "simd_type" "simd_shiftl_imm")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "aarch64_simd_vec_unpack<su>_hi_<mode>"
-+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
-+ (match_operand:VQW 1 "register_operand" "w")
-+ (match_operand:VQW 2 "vect_par_cnst_hi_half" "")
-+ )))]
-+ "TARGET_SIMD"
-+ "<su>shll2 %0.<Vwtype>, %1.<Vtype>, 0"
-+ [(set_attr "simd_type" "simd_shiftl_imm")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_expand "vec_unpack<su>_hi_<mode>"
-+ [(match_operand:<VWIDE> 0 "register_operand" "")
-+ (ANY_EXTEND:<VWIDE> (match_operand:VQW 1 "register_operand"))]
-+ "TARGET_SIMD"
-+ {
-+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
-+ emit_insn (gen_aarch64_simd_vec_unpack<su>_hi_<mode> (operands[0],
-+ operands[1], p));
-+ DONE;
-+ }
-+)
-+
-+(define_expand "vec_unpack<su>_lo_<mode>"
-+ [(match_operand:<VWIDE> 0 "register_operand" "")
-+ (ANY_EXTEND:<VWIDE> (match_operand:VQW 1 "register_operand" ""))]
-+ "TARGET_SIMD"
-+ {
-+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, false);
-+ emit_insn (gen_aarch64_simd_vec_unpack<su>_lo_<mode> (operands[0],
-+ operands[1], p));
-+ DONE;
-+ }
-+)
-+
-+;; Widening arithmetic.
-+
-+(define_insn "aarch64_simd_vec_<su>mult_lo_<mode>"
-+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (mult:<VWIDE> (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
-+ (match_operand:VQW 1 "register_operand" "w")
-+ (match_operand:VQW 3 "vect_par_cnst_lo_half" "")))
-+ (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
-+ (match_operand:VQW 2 "register_operand" "w")
-+ (match_dup 3)))))]
-+ "TARGET_SIMD"
-+ "<su>mull\\t%0.<Vwtype>, %1.<Vhalftype>, %2.<Vhalftype>"
-+ [(set_attr "simd_type" "simd_mull")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_expand "vec_widen_<su>mult_lo_<mode>"
-+ [(match_operand:<VWIDE> 0 "register_operand" "")
-+ (ANY_EXTEND:<VWIDE> (match_operand:VQW 1 "register_operand" ""))
-+ (ANY_EXTEND:<VWIDE> (match_operand:VQW 2 "register_operand" ""))]
-+ "TARGET_SIMD"
-+ {
-+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, false);
-+ emit_insn (gen_aarch64_simd_vec_<su>mult_lo_<mode> (operands[0],
-+ operands[1],
-+ operands[2], p));
-+ DONE;
-+ }
-+)
-+
-+(define_insn "aarch64_simd_vec_<su>mult_hi_<mode>"
-+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (mult:<VWIDE> (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
-+ (match_operand:VQW 1 "register_operand" "w")
-+ (match_operand:VQW 3 "vect_par_cnst_hi_half" "")))
-+ (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
-+ (match_operand:VQW 2 "register_operand" "w")
-+ (match_dup 3)))))]
-+ "TARGET_SIMD"
-+ "<su>mull2\\t%0.<Vwtype>, %1.<Vtype>, %2.<Vtype>"
-+ [(set_attr "simd_type" "simd_mull")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_expand "vec_widen_<su>mult_hi_<mode>"
-+ [(match_operand:<VWIDE> 0 "register_operand" "")
-+ (ANY_EXTEND:<VWIDE> (match_operand:VQW 1 "register_operand" ""))
-+ (ANY_EXTEND:<VWIDE> (match_operand:VQW 2 "register_operand" ""))]
-+ "TARGET_SIMD"
-+ {
-+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
-+ emit_insn (gen_aarch64_simd_vec_<su>mult_hi_<mode> (operands[0],
-+ operands[1],
-+ operands[2], p));
-+ DONE;
-+
-+ }
-+)
-+
-+;; FP vector operations.
-+;; AArch64 AdvSIMD supports single-precision (32-bit) and
-+;; double-precision (64-bit) floating-point data types and arithmetic as
-+;; defined by the IEEE 754-2008 standard. This makes them vectorizable
-+;; without the need for -ffast-math or -funsafe-math-optimizations.
-+;;
-+;; Floating-point operations can raise an exception. Vectorizing such
-+;; operations are safe because of reasons explained below.
-+;;
-+;; ARMv8 permits an extension to enable trapped floating-point
-+;; exception handling, however this is an optional feature. In the
-+;; event of a floating-point exception being raised by vectorised
-+;; code then:
-+;; 1. If trapped floating-point exceptions are available, then a trap
-+;; will be taken when any lane raises an enabled exception. A trap
-+;; handler may determine which lane raised the exception.
-+;; 2. Alternatively a sticky exception flag is set in the
-+;; floating-point status register (FPSR). Software may explicitly
-+;; test the exception flags, in which case the tests will either
-+;; prevent vectorisation, allowing precise identification of the
-+;; failing operation, or if tested outside of vectorisable regions
-+;; then the specific operation and lane are not of interest.
-+
-+;; FP arithmetic operations.
-+
-+(define_insn "add<mode>3"
-+ [(set (match_operand:VDQF 0 "register_operand" "=w")
-+ (plus:VDQF (match_operand:VDQF 1 "register_operand" "w")
-+ (match_operand:VDQF 2 "register_operand" "w")))]
-+ "TARGET_SIMD"
-+ "fadd\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
-+ [(set_attr "simd_type" "simd_fadd")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "sub<mode>3"
-+ [(set (match_operand:VDQF 0 "register_operand" "=w")
-+ (minus:VDQF (match_operand:VDQF 1 "register_operand" "w")
-+ (match_operand:VDQF 2 "register_operand" "w")))]
-+ "TARGET_SIMD"
-+ "fsub\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
-+ [(set_attr "simd_type" "simd_fadd")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "mul<mode>3"
-+ [(set (match_operand:VDQF 0 "register_operand" "=w")
-+ (mult:VDQF (match_operand:VDQF 1 "register_operand" "w")
-+ (match_operand:VDQF 2 "register_operand" "w")))]
-+ "TARGET_SIMD"
-+ "fmul\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
-+ [(set_attr "simd_type" "simd_fmul")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "div<mode>3"
-+ [(set (match_operand:VDQF 0 "register_operand" "=w")
-+ (div:VDQF (match_operand:VDQF 1 "register_operand" "w")
-+ (match_operand:VDQF 2 "register_operand" "w")))]
-+ "TARGET_SIMD"
-+ "fdiv\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
-+ [(set_attr "simd_type" "simd_fdiv")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "neg<mode>2"
-+ [(set (match_operand:VDQF 0 "register_operand" "=w")
-+ (neg:VDQF (match_operand:VDQF 1 "register_operand" "w")))]
-+ "TARGET_SIMD"
-+ "fneg\\t%0.<Vtype>, %1.<Vtype>"
-+ [(set_attr "simd_type" "simd_fnegabs")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "abs<mode>2"
-+ [(set (match_operand:VDQF 0 "register_operand" "=w")
-+ (abs:VDQF (match_operand:VDQF 1 "register_operand" "w")))]
-+ "TARGET_SIMD"
-+ "fabs\\t%0.<Vtype>, %1.<Vtype>"
-+ [(set_attr "simd_type" "simd_fnegabs")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "fma<mode>4"
-+ [(set (match_operand:VDQF 0 "register_operand" "=w")
-+ (fma:VDQF (match_operand:VDQF 1 "register_operand" "w")
-+ (match_operand:VDQF 2 "register_operand" "w")
-+ (match_operand:VDQF 3 "register_operand" "0")))]
-+ "TARGET_SIMD"
-+ "fmla\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
-+ [(set_attr "simd_type" "simd_fmla")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "aarch64_frint<frint_suffix><mode>"
-+ [(set (match_operand:VDQF 0 "register_operand" "=w")
-+ (unspec:VDQF [(match_operand:VDQF 1 "register_operand" "w")]
-+ FRINT))]
-+ "TARGET_SIMD"
-+ "frint<frint_suffix>\\t%0.<Vtype>, %1.<Vtype>"
-+ [(set_attr "simd_type" "simd_frint")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+;; Vector versions of the floating-point frint patterns.
-+;; Expands to btrunc, ceil, floor, nearbyint, rint, round.
-+(define_expand "<frint_pattern><mode>2"
-+ [(set (match_operand:VDQF 0 "register_operand")
-+ (unspec:VDQF [(match_operand:VDQF 1 "register_operand")]
-+ FRINT))]
-+ "TARGET_SIMD"
-+ {})
-+
-+(define_insn "aarch64_fcvt<frint_suffix><su><mode>"
-+ [(set (match_operand:<FCVT_TARGET> 0 "register_operand" "=w")
-+ (FIXUORS:<FCVT_TARGET> (unspec:<FCVT_TARGET>
-+ [(match_operand:VDQF 1 "register_operand" "w")]
-+ FCVT)))]
-+ "TARGET_SIMD"
-+ "fcvt<frint_suffix><su>\\t%0.<Vtype>, %1.<Vtype>"
-+ [(set_attr "simd_type" "simd_fcvti")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+;; Vector versions of the fcvt standard patterns.
-+;; Expands to lbtrunc, lround, lceil, lfloor
-+(define_expand "l<fcvt_pattern><su_optab><fcvt_target><VDQF:mode>2"
-+ [(set (match_operand:<FCVT_TARGET> 0 "register_operand")
-+ (FIXUORS:<FCVT_TARGET> (unspec:<FCVT_TARGET>
-+ [(match_operand:VDQF 1 "register_operand")]
-+ FCVT)))]
-+ "TARGET_SIMD"
-+ {})
-+
-+(define_insn "aarch64_vmls<mode>"
-+ [(set (match_operand:VDQF 0 "register_operand" "=w")
-+ (minus:VDQF (match_operand:VDQF 1 "register_operand" "0")
-+ (mult:VDQF (match_operand:VDQF 2 "register_operand" "w")
-+ (match_operand:VDQF 3 "register_operand" "w"))))]
-+ "TARGET_SIMD"
-+ "fmls\\t%0.<Vtype>, %2.<Vtype>, %3.<Vtype>"
-+ [(set_attr "simd_type" "simd_fmla")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+;; FP Max/Min
-+;; Max/Min are introduced by idiom recognition by GCC's mid-end. An
-+;; expression like:
-+;; a = (b < c) ? b : c;
-+;; is idiom-matched as MIN_EXPR<b,c> only if -ffinite-math-only is enabled
-+;; either explicitly or indirectly via -ffast-math.
-+;;
-+;; MIN_EXPR and MAX_EXPR eventually map to 'smin' and 'smax' in RTL.
-+;; The 'smax' and 'smin' RTL standard pattern names do not specify which
-+;; operand will be returned when both operands are zero (i.e. they may not
-+;; honour signed zeroes), or when either operand is NaN. Therefore GCC
-+;; only introduces MIN_EXPR/MAX_EXPR in fast math mode or when not honouring
-+;; NaNs.
-+
-+(define_insn "smax<mode>3"
-+ [(set (match_operand:VDQF 0 "register_operand" "=w")
-+ (smax:VDQF (match_operand:VDQF 1 "register_operand" "w")
-+ (match_operand:VDQF 2 "register_operand" "w")))]
-+ "TARGET_SIMD"
-+ "fmaxnm\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
-+ [(set_attr "simd_type" "simd_fminmax")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "smin<mode>3"
-+ [(set (match_operand:VDQF 0 "register_operand" "=w")
-+ (smin:VDQF (match_operand:VDQF 1 "register_operand" "w")
-+ (match_operand:VDQF 2 "register_operand" "w")))]
-+ "TARGET_SIMD"
-+ "fminnm\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
-+ [(set_attr "simd_type" "simd_fminmax")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+;; FP 'across lanes' max and min ops.
-+
-+(define_insn "reduc_s<fmaxminv>_v4sf"
-+ [(set (match_operand:V4SF 0 "register_operand" "=w")
-+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "w")]
-+ FMAXMINV))]
-+ "TARGET_SIMD"
-+ "f<fmaxminv>nmv\\t%s0, %1.4s";
-+ [(set_attr "simd_type" "simd_fminmaxv")
-+ (set_attr "simd_mode" "V4SF")]
-+)
-+
-+(define_insn "reduc_s<fmaxminv>_<mode>"
-+ [(set (match_operand:V2F 0 "register_operand" "=w")
-+ (unspec:V2F [(match_operand:V2F 1 "register_operand" "w")]
-+ FMAXMINV))]
-+ "TARGET_SIMD"
-+ "f<fmaxminv>nmp\\t%0.<Vtype>, %1.<Vtype>, %1.<Vtype>";
-+ [(set_attr "simd_type" "simd_fminmax")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+;; FP 'across lanes' add.
-+
-+(define_insn "aarch64_addvv4sf"
-+ [(set (match_operand:V4SF 0 "register_operand" "=w")
-+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "w")]
-+ UNSPEC_FADDV))]
-+ "TARGET_SIMD"
-+ "faddp\\t%0.4s, %1.4s, %1.4s"
-+ [(set_attr "simd_type" "simd_fadd")
-+ (set_attr "simd_mode" "V4SF")]
-+)
-+
-+(define_expand "reduc_uplus_v4sf"
-+ [(set (match_operand:V4SF 0 "register_operand" "=w")
-+ (match_operand:V4SF 1 "register_operand" "w"))]
-+ "TARGET_SIMD"
-+{
-+ rtx tmp = gen_reg_rtx (V4SFmode);
-+ emit_insn (gen_aarch64_addvv4sf (tmp, operands[1]));
-+ emit_insn (gen_aarch64_addvv4sf (operands[0], tmp));
-+ DONE;
-+})
-+
-+(define_expand "reduc_splus_v4sf"
-+ [(set (match_operand:V4SF 0 "register_operand" "=w")
-+ (match_operand:V4SF 1 "register_operand" "w"))]
-+ "TARGET_SIMD"
-+{
-+ rtx tmp = gen_reg_rtx (V4SFmode);
-+ emit_insn (gen_aarch64_addvv4sf (tmp, operands[1]));
-+ emit_insn (gen_aarch64_addvv4sf (operands[0], tmp));
-+ DONE;
-+})
-+
-+(define_insn "aarch64_addv<mode>"
-+ [(set (match_operand:V2F 0 "register_operand" "=w")
-+ (unspec:V2F [(match_operand:V2F 1 "register_operand" "w")]
-+ UNSPEC_FADDV))]
-+ "TARGET_SIMD"
-+ "faddp\\t%<Vetype>0, %1.<Vtype>"
-+ [(set_attr "simd_type" "simd_fadd")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_expand "reduc_uplus_<mode>"
-+ [(set (match_operand:V2F 0 "register_operand" "=w")
-+ (unspec:V2F [(match_operand:V2F 1 "register_operand" "w")]
-+ UNSPEC_FADDV))]
-+ "TARGET_SIMD"
-+ ""
-+)
-+
-+(define_expand "reduc_splus_<mode>"
-+ [(set (match_operand:V2F 0 "register_operand" "=w")
-+ (unspec:V2F [(match_operand:V2F 1 "register_operand" "w")]
-+ UNSPEC_FADDV))]
-+ "TARGET_SIMD"
-+ ""
-+)
-+
-+;; Reduction across lanes.
-+
-+(define_insn "aarch64_addv<mode>"
-+ [(set (match_operand:VDQV 0 "register_operand" "=w")
-+ (unspec:VDQV [(match_operand:VDQV 1 "register_operand" "w")]
-+ UNSPEC_ADDV))]
-+ "TARGET_SIMD"
-+ "addv\\t%<Vetype>0, %1.<Vtype>"
-+ [(set_attr "simd_type" "simd_addv")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_expand "reduc_splus_<mode>"
-+ [(set (match_operand:VDQV 0 "register_operand" "=w")
-+ (unspec:VDQV [(match_operand:VDQV 1 "register_operand" "w")]
-+ UNSPEC_ADDV))]
-+ "TARGET_SIMD"
-+ ""
-+)
-+
-+(define_expand "reduc_uplus_<mode>"
-+ [(set (match_operand:VDQV 0 "register_operand" "=w")
-+ (unspec:VDQV [(match_operand:VDQV 1 "register_operand" "w")]
-+ UNSPEC_ADDV))]
-+ "TARGET_SIMD"
-+ ""
-+)
-+
-+(define_insn "aarch64_addvv2di"
-+ [(set (match_operand:V2DI 0 "register_operand" "=w")
-+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "w")]
-+ UNSPEC_ADDV))]
-+ "TARGET_SIMD"
-+ "addp\\t%d0, %1.2d"
-+ [(set_attr "simd_type" "simd_add")
-+ (set_attr "simd_mode" "V2DI")]
-+)
-+
-+(define_expand "reduc_uplus_v2di"
-+ [(set (match_operand:V2DI 0 "register_operand" "=w")
-+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "w")]
-+ UNSPEC_ADDV))]
-+ "TARGET_SIMD"
-+ ""
-+)
-+
-+(define_expand "reduc_splus_v2di"
-+ [(set (match_operand:V2DI 0 "register_operand" "=w")
-+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "w")]
-+ UNSPEC_ADDV))]
-+ "TARGET_SIMD"
-+ ""
-+)
-+
-+(define_insn "aarch64_addvv2si"
-+ [(set (match_operand:V2SI 0 "register_operand" "=w")
-+ (unspec:V2SI [(match_operand:V2SI 1 "register_operand" "w")]
-+ UNSPEC_ADDV))]
-+ "TARGET_SIMD"
-+ "addp\\t%0.2s, %1.2s, %1.2s"
-+ [(set_attr "simd_type" "simd_add")
-+ (set_attr "simd_mode" "V2SI")]
-+)
-+
-+(define_expand "reduc_uplus_v2si"
-+ [(set (match_operand:V2SI 0 "register_operand" "=w")
-+ (unspec:V2SI [(match_operand:V2SI 1 "register_operand" "w")]
-+ UNSPEC_ADDV))]
-+ "TARGET_SIMD"
-+ ""
-+)
-+
-+(define_expand "reduc_splus_v2si"
-+ [(set (match_operand:V2SI 0 "register_operand" "=w")
-+ (unspec:V2SI [(match_operand:V2SI 1 "register_operand" "w")]
-+ UNSPEC_ADDV))]
-+ "TARGET_SIMD"
-+ ""
-+)
-+
-+(define_insn "reduc_<maxminv>_<mode>"
-+ [(set (match_operand:VDQV 0 "register_operand" "=w")
-+ (unspec:VDQV [(match_operand:VDQV 1 "register_operand" "w")]
-+ MAXMINV))]
-+ "TARGET_SIMD"
-+ "<maxminv>v\\t%<Vetype>0, %1.<Vtype>"
-+ [(set_attr "simd_type" "simd_minmaxv")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "reduc_<maxminv>_v2si"
-+ [(set (match_operand:V2SI 0 "register_operand" "=w")
-+ (unspec:V2SI [(match_operand:V2SI 1 "register_operand" "w")]
-+ MAXMINV))]
-+ "TARGET_SIMD"
-+ "<maxminv>p\\t%0.2s, %1.2s, %1.2s"
-+ [(set_attr "simd_type" "simd_minmax")
-+ (set_attr "simd_mode" "V2SI")]
-+)
-+
-+;; vbsl_* intrinsics may compile to any of bsl/bif/bit depending on register
-+;; allocation. For an intrinsic of form:
-+;; vD = bsl_* (vS, vN, vM)
-+;; We can use any of:
-+;; bsl vS, vN, vM (if D = S)
-+;; bit vD, vN, vS (if D = M, so 1-bits in vS choose bits from vN, else vM)
-+;; bif vD, vM, vS (if D = N, so 0-bits in vS choose bits from vM, else vN)
-+
-+(define_insn "aarch64_simd_bsl<mode>_internal"
-+ [(set (match_operand:VALL 0 "register_operand" "=w,w,w")
-+ (unspec:VALL
-+ [(match_operand:<V_cmp_result> 1 "register_operand" " 0,w,w")
-+ (match_operand:VALL 2 "register_operand" " w,w,0")
-+ (match_operand:VALL 3 "register_operand" " w,0,w")]
-+ UNSPEC_BSL))]
-+ "TARGET_SIMD"
-+ "@
-+ bsl\\t%0.<Vbtype>, %2.<Vbtype>, %3.<Vbtype>
-+ bit\\t%0.<Vbtype>, %2.<Vbtype>, %1.<Vbtype>
-+ bif\\t%0.<Vbtype>, %3.<Vbtype>, %1.<Vbtype>"
-+)
-+
-+(define_expand "aarch64_simd_bsl<mode>"
-+ [(set (match_operand:VALL 0 "register_operand")
-+ (unspec:VALL [(match_operand:<V_cmp_result> 1 "register_operand")
-+ (match_operand:VALL 2 "register_operand")
-+ (match_operand:VALL 3 "register_operand")]
-+ UNSPEC_BSL))]
-+ "TARGET_SIMD"
-+{
-+ /* We can't alias operands together if they have different modes. */
-+ operands[1] = gen_lowpart (<V_cmp_result>mode, operands[1]);
-+})
-+
-+(define_expand "aarch64_vcond_internal<mode>"
-+ [(set (match_operand:VDQ 0 "register_operand")
-+ (if_then_else:VDQ
-+ (match_operator 3 "comparison_operator"
-+ [(match_operand:VDQ 4 "register_operand")
-+ (match_operand:VDQ 5 "nonmemory_operand")])
-+ (match_operand:VDQ 1 "register_operand")
-+ (match_operand:VDQ 2 "register_operand")))]
-+ "TARGET_SIMD"
-+{
-+ int inverse = 0, has_zero_imm_form = 0;
-+ rtx mask = gen_reg_rtx (<MODE>mode);
-+
-+ switch (GET_CODE (operands[3]))
-+ {
-+ case LE:
-+ case LT:
-+ case NE:
-+ inverse = 1;
-+ /* Fall through. */
-+ case GE:
-+ case GT:
-+ case EQ:
-+ has_zero_imm_form = 1;
-+ break;
-+ case LEU:
-+ case LTU:
-+ inverse = 1;
-+ break;
-+ default:
-+ break;
-+ }
-+
-+ if (!REG_P (operands[5])
-+ && (operands[5] != CONST0_RTX (<MODE>mode) || !has_zero_imm_form))
-+ operands[5] = force_reg (<MODE>mode, operands[5]);
-+
-+ switch (GET_CODE (operands[3]))
-+ {
-+ case LT:
-+ case GE:
-+ emit_insn (gen_aarch64_cmge<mode> (mask, operands[4], operands[5]));
-+ break;
-+
-+ case LE:
-+ case GT:
-+ emit_insn (gen_aarch64_cmgt<mode> (mask, operands[4], operands[5]));
-+ break;
-+
-+ case LTU:
-+ case GEU:
-+ emit_insn (gen_aarch64_cmhs<mode> (mask, operands[4], operands[5]));
-+ break;
-+
-+ case LEU:
-+ case GTU:
-+ emit_insn (gen_aarch64_cmhi<mode> (mask, operands[4], operands[5]));
-+ break;
-+
-+ case NE:
-+ case EQ:
-+ emit_insn (gen_aarch64_cmeq<mode> (mask, operands[4], operands[5]));
-+ break;
-+
-+ default:
-+ gcc_unreachable ();
-+ }
-+
-+ if (inverse)
-+ emit_insn (gen_aarch64_simd_bsl<mode> (operands[0], mask, operands[2],
-+ operands[1]));
-+ else
-+ emit_insn (gen_aarch64_simd_bsl<mode> (operands[0], mask, operands[1],
-+ operands[2]));
-+
-+ DONE;
-+})
-+
-+(define_expand "aarch64_vcond_internal<mode>"
-+ [(set (match_operand:VDQF 0 "register_operand")
-+ (if_then_else:VDQF
-+ (match_operator 3 "comparison_operator"
-+ [(match_operand:VDQF 4 "register_operand")
-+ (match_operand:VDQF 5 "nonmemory_operand")])
-+ (match_operand:VDQF 1 "register_operand")
-+ (match_operand:VDQF 2 "register_operand")))]
-+ "TARGET_SIMD"
-+{
-+ int inverse = 0;
-+ int use_zero_form = 0;
-+ int swap_bsl_operands = 0;
-+ rtx mask = gen_reg_rtx (<V_cmp_result>mode);
-+ rtx tmp = gen_reg_rtx (<V_cmp_result>mode);
-+
-+ rtx (*base_comparison) (rtx, rtx, rtx);
-+ rtx (*complimentary_comparison) (rtx, rtx, rtx);
-+
-+ switch (GET_CODE (operands[3]))
-+ {
-+ case GE:
-+ case GT:
-+ case LE:
-+ case LT:
-+ case EQ:
-+ if (operands[5] == CONST0_RTX (<MODE>mode))
-+ {
-+ use_zero_form = 1;
-+ break;
-+ }
-+ /* Fall through. */
-+ default:
-+ if (!REG_P (operands[5]))
-+ operands[5] = force_reg (<MODE>mode, operands[5]);
-+ }
-+
-+ switch (GET_CODE (operands[3]))
-+ {
-+ case LT:
-+ case UNLT:
-+ inverse = 1;
-+ /* Fall through. */
-+ case GE:
-+ case UNGE:
-+ case ORDERED:
-+ case UNORDERED:
-+ base_comparison = gen_aarch64_cmge<mode>;
-+ complimentary_comparison = gen_aarch64_cmgt<mode>;
-+ break;
-+ case LE:
-+ case UNLE:
-+ inverse = 1;
-+ /* Fall through. */
-+ case GT:
-+ case UNGT:
-+ base_comparison = gen_aarch64_cmgt<mode>;
-+ complimentary_comparison = gen_aarch64_cmge<mode>;
-+ break;
-+ case EQ:
-+ case NE:
-+ case UNEQ:
-+ base_comparison = gen_aarch64_cmeq<mode>;
-+ complimentary_comparison = gen_aarch64_cmeq<mode>;
-+ break;
-+ default:
-+ gcc_unreachable ();
-+ }
-+
-+ switch (GET_CODE (operands[3]))
-+ {
-+ case LT:
-+ case LE:
-+ case GT:
-+ case GE:
-+ case EQ:
-+ /* The easy case. Here we emit one of FCMGE, FCMGT or FCMEQ.
-+ As a LT b <=> b GE a && a LE b <=> b GT a. Our transformations are:
-+ a GE b -> a GE b
-+ a GT b -> a GT b
-+ a LE b -> b GE a
-+ a LT b -> b GT a
-+ a EQ b -> a EQ b
-+ Note that there also exist direct comparison against 0 forms,
-+ so catch those as a special case. */
-+ if (use_zero_form)
-+ {
-+ inverse = 0;
-+ switch (GET_CODE (operands[3]))
-+ {
-+ case LT:
-+ base_comparison = gen_aarch64_cmlt<mode>;
-+ break;
-+ case LE:
-+ base_comparison = gen_aarch64_cmle<mode>;
-+ break;
-+ default:
-+ /* Do nothing, other zero form cases already have the correct
-+ base_comparison. */
-+ break;
-+ }
-+ }
-+
-+ if (!inverse)
-+ emit_insn (base_comparison (mask, operands[4], operands[5]));
-+ else
-+ emit_insn (complimentary_comparison (mask, operands[5], operands[4]));
-+ break;
-+ case UNLT:
-+ case UNLE:
-+ case UNGT:
-+ case UNGE:
-+ case NE:
-+ /* FCM returns false for lanes which are unordered, so if we use
-+ the inverse of the comparison we actually want to emit, then
-+ swap the operands to BSL, we will end up with the correct result.
-+ Note that a NE NaN and NaN NE b are true for all a, b.
-+
-+ Our transformations are:
-+ a GE b -> !(b GT a)
-+ a GT b -> !(b GE a)
-+ a LE b -> !(a GT b)
-+ a LT b -> !(a GE b)
-+ a NE b -> !(a EQ b) */
-+
-+ if (inverse)
-+ emit_insn (base_comparison (mask, operands[4], operands[5]));
-+ else
-+ emit_insn (complimentary_comparison (mask, operands[5], operands[4]));
-+
-+ swap_bsl_operands = 1;
-+ break;
-+ case UNEQ:
-+ /* We check (a > b || b > a). combining these comparisons give us
-+ true iff !(a != b && a ORDERED b), swapping the operands to BSL
-+ will then give us (a == b || a UNORDERED b) as intended. */
-+
-+ emit_insn (gen_aarch64_cmgt<mode> (mask, operands[4], operands[5]));
-+ emit_insn (gen_aarch64_cmgt<mode> (tmp, operands[5], operands[4]));
-+ emit_insn (gen_ior<v_cmp_result>3 (mask, mask, tmp));
-+ swap_bsl_operands = 1;
-+ break;
-+ case UNORDERED:
-+ /* Operands are ORDERED iff (a > b || b >= a).
-+ Swapping the operands to BSL will give the UNORDERED case. */
-+ swap_bsl_operands = 1;
-+ /* Fall through. */
-+ case ORDERED:
-+ emit_insn (gen_aarch64_cmgt<mode> (tmp, operands[4], operands[5]));
-+ emit_insn (gen_aarch64_cmge<mode> (mask, operands[5], operands[4]));
-+ emit_insn (gen_ior<v_cmp_result>3 (mask, mask, tmp));
-+ break;
-+ default:
-+ gcc_unreachable ();
-+ }
-+
-+ if (swap_bsl_operands)
-+ emit_insn (gen_aarch64_simd_bsl<mode> (operands[0], mask, operands[2],
-+ operands[1]));
-+ else
-+ emit_insn (gen_aarch64_simd_bsl<mode> (operands[0], mask, operands[1],
-+ operands[2]));
-+ DONE;
-+})
-+
-+(define_expand "vcond<mode><mode>"
-+ [(set (match_operand:VALL 0 "register_operand")
-+ (if_then_else:VALL
-+ (match_operator 3 "comparison_operator"
-+ [(match_operand:VALL 4 "register_operand")
-+ (match_operand:VALL 5 "nonmemory_operand")])
-+ (match_operand:VALL 1 "register_operand")
-+ (match_operand:VALL 2 "register_operand")))]
-+ "TARGET_SIMD"
-+{
-+ emit_insn (gen_aarch64_vcond_internal<mode> (operands[0], operands[1],
-+ operands[2], operands[3],
-+ operands[4], operands[5]));
-+ DONE;
-+})
-+
-+
-+(define_expand "vcondu<mode><mode>"
-+ [(set (match_operand:VDQ 0 "register_operand")
-+ (if_then_else:VDQ
-+ (match_operator 3 "comparison_operator"
-+ [(match_operand:VDQ 4 "register_operand")
-+ (match_operand:VDQ 5 "nonmemory_operand")])
-+ (match_operand:VDQ 1 "register_operand")
-+ (match_operand:VDQ 2 "register_operand")))]
-+ "TARGET_SIMD"
-+{
-+ emit_insn (gen_aarch64_vcond_internal<mode> (operands[0], operands[1],
-+ operands[2], operands[3],
-+ operands[4], operands[5]));
-+ DONE;
-+})
-+
-+;; Patterns for AArch64 SIMD Intrinsics.
-+
-+(define_expand "aarch64_create<mode>"
-+ [(match_operand:VD_RE 0 "register_operand" "")
-+ (match_operand:DI 1 "general_operand" "")]
-+ "TARGET_SIMD"
-+{
-+ rtx src = gen_lowpart (<MODE>mode, operands[1]);
-+ emit_move_insn (operands[0], src);
-+ DONE;
-+})
-+
-+(define_insn "aarch64_get_lane_signed<mode>"
-+ [(set (match_operand:<VEL> 0 "register_operand" "=r")
-+ (sign_extend:<VEL>
-+ (vec_select:<VEL>
-+ (match_operand:VQ_S 1 "register_operand" "w")
-+ (parallel [(match_operand:SI 2 "immediate_operand" "i")]))))]
-+ "TARGET_SIMD"
-+ "smov\\t%0, %1.<Vetype>[%2]"
-+ [(set_attr "simd_type" "simd_movgp")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "aarch64_get_lane_unsigned<mode>"
-+ [(set (match_operand:<VEL> 0 "register_operand" "=r")
-+ (zero_extend:<VEL>
-+ (vec_select:<VEL>
-+ (match_operand:VDQ 1 "register_operand" "w")
-+ (parallel [(match_operand:SI 2 "immediate_operand" "i")]))))]
-+ "TARGET_SIMD"
-+ "umov\\t%<vw>0, %1.<Vetype>[%2]"
-+ [(set_attr "simd_type" "simd_movgp")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "aarch64_get_lane<mode>"
-+ [(set (match_operand:<VEL> 0 "register_operand" "=w")
-+ (vec_select:<VEL>
-+ (match_operand:VDQF 1 "register_operand" "w")
-+ (parallel [(match_operand:SI 2 "immediate_operand" "i")])))]
-+ "TARGET_SIMD"
-+ "mov\\t%0.<Vetype>[0], %1.<Vetype>[%2]"
-+ [(set_attr "simd_type" "simd_ins")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_expand "aarch64_get_lanedi"
-+ [(match_operand:DI 0 "register_operand" "=r")
-+ (match_operand:DI 1 "register_operand" "w")
-+ (match_operand:SI 2 "immediate_operand" "i")]
-+ "TARGET_SIMD"
-+{
-+ aarch64_simd_lane_bounds (operands[2], 0, 1);
-+ emit_move_insn (operands[0], operands[1]);
-+ DONE;
-+})
-+
-+(define_expand "aarch64_reinterpretv8qi<mode>"
-+ [(match_operand:V8QI 0 "register_operand" "")
-+ (match_operand:VDC 1 "register_operand" "")]
-+ "TARGET_SIMD"
-+{
-+ aarch64_simd_reinterpret (operands[0], operands[1]);
-+ DONE;
-+})
-+
-+(define_expand "aarch64_reinterpretv4hi<mode>"
-+ [(match_operand:V4HI 0 "register_operand" "")
-+ (match_operand:VDC 1 "register_operand" "")]
-+ "TARGET_SIMD"
-+{
-+ aarch64_simd_reinterpret (operands[0], operands[1]);
-+ DONE;
-+})
-+
-+(define_expand "aarch64_reinterpretv2si<mode>"
-+ [(match_operand:V2SI 0 "register_operand" "")
-+ (match_operand:VDC 1 "register_operand" "")]
-+ "TARGET_SIMD"
-+{
-+ aarch64_simd_reinterpret (operands[0], operands[1]);
-+ DONE;
-+})
-+
-+(define_expand "aarch64_reinterpretv2sf<mode>"
-+ [(match_operand:V2SF 0 "register_operand" "")
-+ (match_operand:VDC 1 "register_operand" "")]
-+ "TARGET_SIMD"
-+{
-+ aarch64_simd_reinterpret (operands[0], operands[1]);
-+ DONE;
-+})
-+
-+(define_expand "aarch64_reinterpretdi<mode>"
-+ [(match_operand:DI 0 "register_operand" "")
-+ (match_operand:VD_RE 1 "register_operand" "")]
-+ "TARGET_SIMD"
-+{
-+ aarch64_simd_reinterpret (operands[0], operands[1]);
-+ DONE;
-+})
-+
-+(define_expand "aarch64_reinterpretv16qi<mode>"
-+ [(match_operand:V16QI 0 "register_operand" "")
-+ (match_operand:VQ 1 "register_operand" "")]
-+ "TARGET_SIMD"
-+{
-+ aarch64_simd_reinterpret (operands[0], operands[1]);
-+ DONE;
-+})
-+
-+(define_expand "aarch64_reinterpretv8hi<mode>"
-+ [(match_operand:V8HI 0 "register_operand" "")
-+ (match_operand:VQ 1 "register_operand" "")]
-+ "TARGET_SIMD"
-+{
-+ aarch64_simd_reinterpret (operands[0], operands[1]);
-+ DONE;
-+})
-+
-+(define_expand "aarch64_reinterpretv4si<mode>"
-+ [(match_operand:V4SI 0 "register_operand" "")
-+ (match_operand:VQ 1 "register_operand" "")]
-+ "TARGET_SIMD"
-+{
-+ aarch64_simd_reinterpret (operands[0], operands[1]);
-+ DONE;
-+})
-+
-+(define_expand "aarch64_reinterpretv4sf<mode>"
-+ [(match_operand:V4SF 0 "register_operand" "")
-+ (match_operand:VQ 1 "register_operand" "")]
-+ "TARGET_SIMD"
-+{
-+ aarch64_simd_reinterpret (operands[0], operands[1]);
-+ DONE;
-+})
-+
-+(define_expand "aarch64_reinterpretv2di<mode>"
-+ [(match_operand:V2DI 0 "register_operand" "")
-+ (match_operand:VQ 1 "register_operand" "")]
-+ "TARGET_SIMD"
-+{
-+ aarch64_simd_reinterpret (operands[0], operands[1]);
-+ DONE;
-+})
-+
-+(define_expand "aarch64_reinterpretv2df<mode>"
-+ [(match_operand:V2DF 0 "register_operand" "")
-+ (match_operand:VQ 1 "register_operand" "")]
-+ "TARGET_SIMD"
-+{
-+ aarch64_simd_reinterpret (operands[0], operands[1]);
-+ DONE;
-+})
-+
-+;; In this insn, operand 1 should be low, and operand 2 the high part of the
-+;; dest vector.
-+
-+(define_insn "*aarch64_combinez<mode>"
-+ [(set (match_operand:<VDBL> 0 "register_operand" "=&w")
-+ (vec_concat:<VDBL>
-+ (match_operand:VDIC 1 "register_operand" "w")
-+ (match_operand:VDIC 2 "aarch64_simd_imm_zero" "Dz")))]
-+ "TARGET_SIMD"
-+ "mov\\t%0.8b, %1.8b"
-+ [(set_attr "simd_type" "simd_move")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "aarch64_combine<mode>"
-+ [(set (match_operand:<VDBL> 0 "register_operand" "=&w")
-+ (vec_concat:<VDBL> (match_operand:VDC 1 "register_operand" "w")
-+ (match_operand:VDC 2 "register_operand" "w")))]
-+ "TARGET_SIMD"
-+ "mov\\t%0.d[0], %1.d[0]\;ins\\t%0.d[1], %2.d[0]"
-+ [(set_attr "simd_type" "simd_ins")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+;; <su><addsub>l<q>.
-+
-+(define_insn "aarch64_<ANY_EXTEND:su><ADDSUB:optab>l2<mode>_internal"
-+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (ADDSUB:<VWIDE> (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
-+ (match_operand:VQW 1 "register_operand" "w")
-+ (match_operand:VQW 3 "vect_par_cnst_hi_half" "")))
-+ (ANY_EXTEND:<VWIDE> (vec_select:<VHALF>
-+ (match_operand:VQW 2 "register_operand" "w")
-+ (match_dup 3)))))]
-+ "TARGET_SIMD"
-+ "<ANY_EXTEND:su><ADDSUB:optab>l2 %0.<Vwtype>, %1.<Vtype>, %2.<Vtype>"
-+ [(set_attr "simd_type" "simd_addl")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_expand "aarch64_saddl2<mode>"
-+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (match_operand:VQW 1 "register_operand" "w")
-+ (match_operand:VQW 2 "register_operand" "w")]
-+ "TARGET_SIMD"
-+{
-+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
-+ emit_insn (gen_aarch64_saddl2<mode>_internal (operands[0], operands[1],
-+ operands[2], p));
-+ DONE;
-+})
-+
-+(define_expand "aarch64_uaddl2<mode>"
-+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (match_operand:VQW 1 "register_operand" "w")
-+ (match_operand:VQW 2 "register_operand" "w")]
-+ "TARGET_SIMD"
-+{
-+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
-+ emit_insn (gen_aarch64_uaddl2<mode>_internal (operands[0], operands[1],
-+ operands[2], p));
-+ DONE;
-+})
-+
-+(define_expand "aarch64_ssubl2<mode>"
-+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (match_operand:VQW 1 "register_operand" "w")
-+ (match_operand:VQW 2 "register_operand" "w")]
-+ "TARGET_SIMD"
-+{
-+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
-+ emit_insn (gen_aarch64_ssubl2<mode>_internal (operands[0], operands[1],
-+ operands[2], p));
-+ DONE;
-+})
-+
-+(define_expand "aarch64_usubl2<mode>"
-+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (match_operand:VQW 1 "register_operand" "w")
-+ (match_operand:VQW 2 "register_operand" "w")]
-+ "TARGET_SIMD"
-+{
-+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
-+ emit_insn (gen_aarch64_usubl2<mode>_internal (operands[0], operands[1],
-+ operands[2], p));
-+ DONE;
-+})
-+
-+(define_insn "aarch64_<ANY_EXTEND:su><ADDSUB:optab>l<mode>"
-+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (ADDSUB:<VWIDE> (ANY_EXTEND:<VWIDE>
-+ (match_operand:VDW 1 "register_operand" "w"))
-+ (ANY_EXTEND:<VWIDE>
-+ (match_operand:VDW 2 "register_operand" "w"))))]
-+ "TARGET_SIMD"
-+ "<ANY_EXTEND:su><ADDSUB:optab>l %0.<Vwtype>, %1.<Vtype>, %2.<Vtype>"
-+ [(set_attr "simd_type" "simd_addl")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+;; <su><addsub>w<q>.
-+
-+(define_insn "aarch64_<ANY_EXTEND:su><ADDSUB:optab>w<mode>"
-+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (ADDSUB:<VWIDE> (match_operand:<VWIDE> 1 "register_operand" "w")
-+ (ANY_EXTEND:<VWIDE>
-+ (match_operand:VDW 2 "register_operand" "w"))))]
-+ "TARGET_SIMD"
-+ "<ANY_EXTEND:su><ADDSUB:optab>w\\t%0.<Vwtype>, %1.<Vwtype>, %2.<Vtype>"
-+ [(set_attr "simd_type" "simd_addl")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "aarch64_<ANY_EXTEND:su><ADDSUB:optab>w2<mode>_internal"
-+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (ADDSUB:<VWIDE> (match_operand:<VWIDE> 1 "register_operand" "w")
-+ (ANY_EXTEND:<VWIDE>
-+ (vec_select:<VHALF>
-+ (match_operand:VQW 2 "register_operand" "w")
-+ (match_operand:VQW 3 "vect_par_cnst_hi_half" "")))))]
-+ "TARGET_SIMD"
-+ "<ANY_EXTEND:su><ADDSUB:optab>w2\\t%0.<Vwtype>, %1.<Vwtype>, %2.<Vtype>"
-+ [(set_attr "simd_type" "simd_addl")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_expand "aarch64_saddw2<mode>"
-+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (match_operand:<VWIDE> 1 "register_operand" "w")
-+ (match_operand:VQW 2 "register_operand" "w")]
-+ "TARGET_SIMD"
-+{
-+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
-+ emit_insn (gen_aarch64_saddw2<mode>_internal (operands[0], operands[1],
-+ operands[2], p));
-+ DONE;
-+})
-+
-+(define_expand "aarch64_uaddw2<mode>"
-+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (match_operand:<VWIDE> 1 "register_operand" "w")
-+ (match_operand:VQW 2 "register_operand" "w")]
-+ "TARGET_SIMD"
-+{
-+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
-+ emit_insn (gen_aarch64_uaddw2<mode>_internal (operands[0], operands[1],
-+ operands[2], p));
-+ DONE;
-+})
-+
-+
-+(define_expand "aarch64_ssubw2<mode>"
-+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (match_operand:<VWIDE> 1 "register_operand" "w")
-+ (match_operand:VQW 2 "register_operand" "w")]
-+ "TARGET_SIMD"
-+{
-+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
-+ emit_insn (gen_aarch64_ssubw2<mode>_internal (operands[0], operands[1],
-+ operands[2], p));
-+ DONE;
-+})
-+
-+(define_expand "aarch64_usubw2<mode>"
-+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (match_operand:<VWIDE> 1 "register_operand" "w")
-+ (match_operand:VQW 2 "register_operand" "w")]
-+ "TARGET_SIMD"
-+{
-+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
-+ emit_insn (gen_aarch64_usubw2<mode>_internal (operands[0], operands[1],
-+ operands[2], p));
-+ DONE;
-+})
-+
-+;; <su><r>h<addsub>.
-+
-+(define_insn "aarch64_<sur>h<addsub><mode>"
-+ [(set (match_operand:VQ_S 0 "register_operand" "=w")
-+ (unspec:VQ_S [(match_operand:VQ_S 1 "register_operand" "w")
-+ (match_operand:VQ_S 2 "register_operand" "w")]
-+ HADDSUB))]
-+ "TARGET_SIMD"
-+ "<sur>h<addsub>\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
-+ [(set_attr "simd_type" "simd_add")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+;; <r><addsub>hn<q>.
-+
-+(define_insn "aarch64_<sur><addsub>hn<mode>"
-+ [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w")
-+ (unspec:<VNARROWQ> [(match_operand:VQN 1 "register_operand" "w")
-+ (match_operand:VQN 2 "register_operand" "w")]
-+ ADDSUBHN))]
-+ "TARGET_SIMD"
-+ "<sur><addsub>hn\\t%0.<Vntype>, %1.<Vtype>, %2.<Vtype>"
-+ [(set_attr "simd_type" "simd_addn")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "aarch64_<sur><addsub>hn2<mode>"
-+ [(set (match_operand:<VNARROWQ2> 0 "register_operand" "=w")
-+ (unspec:<VNARROWQ2> [(match_operand:<VNARROWQ> 1 "register_operand" "0")
-+ (match_operand:VQN 2 "register_operand" "w")
-+ (match_operand:VQN 3 "register_operand" "w")]
-+ ADDSUBHN2))]
-+ "TARGET_SIMD"
-+ "<sur><addsub>hn2\\t%0.<V2ntype>, %2.<Vtype>, %3.<Vtype>"
-+ [(set_attr "simd_type" "simd_addn2")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+;; pmul.
-+
-+(define_insn "aarch64_pmul<mode>"
-+ [(set (match_operand:VB 0 "register_operand" "=w")
-+ (unspec:VB [(match_operand:VB 1 "register_operand" "w")
-+ (match_operand:VB 2 "register_operand" "w")]
-+ UNSPEC_PMUL))]
-+ "TARGET_SIMD"
-+ "pmul\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
-+ [(set_attr "simd_type" "simd_mul")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+;; <su>q<addsub>
-+
-+(define_insn "aarch64_<su_optab><optab><mode>"
-+ [(set (match_operand:VSDQ_I 0 "register_operand" "=w")
-+ (BINQOPS:VSDQ_I (match_operand:VSDQ_I 1 "register_operand" "w")
-+ (match_operand:VSDQ_I 2 "register_operand" "w")))]
-+ "TARGET_SIMD"
-+ "<su_optab><optab>\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
-+ [(set_attr "simd_type" "simd_add")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+;; suqadd and usqadd
-+
-+(define_insn "aarch64_<sur>qadd<mode>"
-+ [(set (match_operand:VSDQ_I 0 "register_operand" "=w")
-+ (unspec:VSDQ_I [(match_operand:VSDQ_I 1 "register_operand" "0")
-+ (match_operand:VSDQ_I 2 "register_operand" "w")]
-+ USSUQADD))]
-+ "TARGET_SIMD"
-+ "<sur>qadd\\t%<v>0<Vmtype>, %<v>2<Vmtype>"
-+ [(set_attr "simd_type" "simd_sat_add")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+;; sqmovun
-+
-+(define_insn "aarch64_sqmovun<mode>"
-+ [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w")
-+ (unspec:<VNARROWQ> [(match_operand:VSQN_HSDI 1 "register_operand" "w")]
-+ UNSPEC_SQXTUN))]
-+ "TARGET_SIMD"
-+ "sqxtun\\t%<vn2>0<Vmntype>, %<v>1<Vmtype>"
-+ [(set_attr "simd_type" "simd_sat_shiftn_imm")
-+ (set_attr "simd_mode" "<MODE>")]
-+ )
-+
-+;; sqmovn and uqmovn
-+
-+(define_insn "aarch64_<sur>qmovn<mode>"
-+ [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w")
-+ (unspec:<VNARROWQ> [(match_operand:VSQN_HSDI 1 "register_operand" "w")]
-+ SUQMOVN))]
-+ "TARGET_SIMD"
-+ "<sur>qxtn\\t%<vn2>0<Vmntype>, %<v>1<Vmtype>"
-+ [(set_attr "simd_type" "simd_sat_shiftn_imm")
-+ (set_attr "simd_mode" "<MODE>")]
-+ )
-+
-+;; <su>q<absneg>
-+
-+(define_insn "aarch64_s<optab><mode>"
-+ [(set (match_operand:VSDQ_I_BHSI 0 "register_operand" "=w")
-+ (UNQOPS:VSDQ_I_BHSI
-+ (match_operand:VSDQ_I_BHSI 1 "register_operand" "w")))]
-+ "TARGET_SIMD"
-+ "s<optab>\\t%<v>0<Vmtype>, %<v>1<Vmtype>"
-+ [(set_attr "simd_type" "simd_sat_negabs")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+;; sq<r>dmulh.
-+
-+(define_insn "aarch64_sq<r>dmulh<mode>"
-+ [(set (match_operand:VSDQ_HSI 0 "register_operand" "=w")
-+ (unspec:VSDQ_HSI
-+ [(match_operand:VSDQ_HSI 1 "register_operand" "w")
-+ (match_operand:VSDQ_HSI 2 "register_operand" "w")]
-+ VQDMULH))]
-+ "TARGET_SIMD"
-+ "sq<r>dmulh\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
-+ [(set_attr "simd_type" "simd_sat_mul")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+;; sq<r>dmulh_lane
-+
-+(define_insn "aarch64_sq<r>dmulh_lane<mode>"
-+ [(set (match_operand:VDQHS 0 "register_operand" "=w")
-+ (unspec:VDQHS
-+ [(match_operand:VDQHS 1 "register_operand" "w")
-+ (vec_select:<VEL>
-+ (match_operand:<VCOND> 2 "register_operand" "<vwx>")
-+ (parallel [(match_operand:SI 3 "immediate_operand" "i")]))]
-+ VQDMULH))]
-+ "TARGET_SIMD"
-+ "*
-+ aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<VCOND>mode));
-+ return \"sq<r>dmulh\\t%0.<Vtype>, %1.<Vtype>, %2.<Vetype>[%3]\";"
-+ [(set_attr "simd_type" "simd_sat_mul")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "aarch64_sq<r>dmulh_laneq<mode>"
-+ [(set (match_operand:VDQHS 0 "register_operand" "=w")
-+ (unspec:VDQHS
-+ [(match_operand:VDQHS 1 "register_operand" "w")
-+ (vec_select:<VEL>
-+ (match_operand:<VCONQ> 2 "register_operand" "<vwx>")
-+ (parallel [(match_operand:SI 3 "immediate_operand" "i")]))]
-+ VQDMULH))]
-+ "TARGET_SIMD"
-+ "*
-+ aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<VCONQ>mode));
-+ return \"sq<r>dmulh\\t%0.<Vtype>, %1.<Vtype>, %2.<Vetype>[%3]\";"
-+ [(set_attr "simd_type" "simd_sat_mul")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "aarch64_sq<r>dmulh_lane<mode>"
-+ [(set (match_operand:SD_HSI 0 "register_operand" "=w")
-+ (unspec:SD_HSI
-+ [(match_operand:SD_HSI 1 "register_operand" "w")
-+ (vec_select:<VEL>
-+ (match_operand:<VCONQ> 2 "register_operand" "<vwx>")
-+ (parallel [(match_operand:SI 3 "immediate_operand" "i")]))]
-+ VQDMULH))]
-+ "TARGET_SIMD"
-+ "*
-+ aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<VCONQ>mode));
-+ return \"sq<r>dmulh\\t%<v>0, %<v>1, %2.<v>[%3]\";"
-+ [(set_attr "simd_type" "simd_sat_mul")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+;; vqdml[sa]l
-+
-+(define_insn "aarch64_sqdml<SBINQOPS:as>l<mode>"
-+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (SBINQOPS:<VWIDE>
-+ (match_operand:<VWIDE> 1 "register_operand" "0")
-+ (ss_ashift:<VWIDE>
-+ (mult:<VWIDE>
-+ (sign_extend:<VWIDE>
-+ (match_operand:VSD_HSI 2 "register_operand" "w"))
-+ (sign_extend:<VWIDE>
-+ (match_operand:VSD_HSI 3 "register_operand" "w")))
-+ (const_int 1))))]
-+ "TARGET_SIMD"
-+ "sqdml<SBINQOPS:as>l\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %<v>3<Vmtype>"
-+ [(set_attr "simd_type" "simd_sat_mlal")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+;; vqdml[sa]l_lane
-+
-+(define_insn "aarch64_sqdml<SBINQOPS:as>l_lane<mode>_internal"
-+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (SBINQOPS:<VWIDE>
-+ (match_operand:<VWIDE> 1 "register_operand" "0")
-+ (ss_ashift:<VWIDE>
-+ (mult:<VWIDE>
-+ (sign_extend:<VWIDE>
-+ (match_operand:VD_HSI 2 "register_operand" "w"))
-+ (sign_extend:<VWIDE>
-+ (vec_duplicate:VD_HSI
-+ (vec_select:<VEL>
-+ (match_operand:<VCON> 3 "register_operand" "<vwx>")
-+ (parallel [(match_operand:SI 4 "immediate_operand" "i")])))
-+ ))
-+ (const_int 1))))]
-+ "TARGET_SIMD"
-+ "sqdml<SBINQOPS:as>l\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %3.<Vetype>[%4]"
-+ [(set_attr "simd_type" "simd_sat_mlal")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "aarch64_sqdml<SBINQOPS:as>l_lane<mode>_internal"
-+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (SBINQOPS:<VWIDE>
-+ (match_operand:<VWIDE> 1 "register_operand" "0")
-+ (ss_ashift:<VWIDE>
-+ (mult:<VWIDE>
-+ (sign_extend:<VWIDE>
-+ (match_operand:SD_HSI 2 "register_operand" "w"))
-+ (sign_extend:<VWIDE>
-+ (vec_select:<VEL>
-+ (match_operand:<VCON> 3 "register_operand" "<vwx>")
-+ (parallel [(match_operand:SI 4 "immediate_operand" "i")])))
-+ )
-+ (const_int 1))))]
-+ "TARGET_SIMD"
-+ "sqdml<SBINQOPS:as>l\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %3.<Vetype>[%4]"
-+ [(set_attr "simd_type" "simd_sat_mlal")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_expand "aarch64_sqdmlal_lane<mode>"
-+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (match_operand:<VWIDE> 1 "register_operand" "0")
-+ (match_operand:VSD_HSI 2 "register_operand" "w")
-+ (match_operand:<VCON> 3 "register_operand" "<vwx>")
-+ (match_operand:SI 4 "immediate_operand" "i")]
-+ "TARGET_SIMD"
-+{
-+ aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<VCON>mode) / 2);
-+ emit_insn (gen_aarch64_sqdmlal_lane<mode>_internal (operands[0], operands[1],
-+ operands[2], operands[3],
-+ operands[4]));
-+ DONE;
-+})
-+
-+(define_expand "aarch64_sqdmlal_laneq<mode>"
-+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (match_operand:<VWIDE> 1 "register_operand" "0")
-+ (match_operand:VSD_HSI 2 "register_operand" "w")
-+ (match_operand:<VCON> 3 "register_operand" "<vwx>")
-+ (match_operand:SI 4 "immediate_operand" "i")]
-+ "TARGET_SIMD"
-+{
-+ aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<VCON>mode));
-+ emit_insn (gen_aarch64_sqdmlal_lane<mode>_internal (operands[0], operands[1],
-+ operands[2], operands[3],
-+ operands[4]));
-+ DONE;
-+})
-+
-+(define_expand "aarch64_sqdmlsl_lane<mode>"
-+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (match_operand:<VWIDE> 1 "register_operand" "0")
-+ (match_operand:VSD_HSI 2 "register_operand" "w")
-+ (match_operand:<VCON> 3 "register_operand" "<vwx>")
-+ (match_operand:SI 4 "immediate_operand" "i")]
-+ "TARGET_SIMD"
-+{
-+ aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<VCON>mode) / 2);
-+ emit_insn (gen_aarch64_sqdmlsl_lane<mode>_internal (operands[0], operands[1],
-+ operands[2], operands[3],
-+ operands[4]));
-+ DONE;
-+})
-+
-+(define_expand "aarch64_sqdmlsl_laneq<mode>"
-+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (match_operand:<VWIDE> 1 "register_operand" "0")
-+ (match_operand:VSD_HSI 2 "register_operand" "w")
-+ (match_operand:<VCON> 3 "register_operand" "<vwx>")
-+ (match_operand:SI 4 "immediate_operand" "i")]
-+ "TARGET_SIMD"
-+{
-+ aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<VCON>mode));
-+ emit_insn (gen_aarch64_sqdmlsl_lane<mode>_internal (operands[0], operands[1],
-+ operands[2], operands[3],
-+ operands[4]));
-+ DONE;
-+})
-+
-+;; vqdml[sa]l_n
-+
-+(define_insn "aarch64_sqdml<SBINQOPS:as>l_n<mode>"
-+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (SBINQOPS:<VWIDE>
-+ (match_operand:<VWIDE> 1 "register_operand" "0")
-+ (ss_ashift:<VWIDE>
-+ (mult:<VWIDE>
-+ (sign_extend:<VWIDE>
-+ (match_operand:VD_HSI 2 "register_operand" "w"))
-+ (sign_extend:<VWIDE>
-+ (vec_duplicate:VD_HSI
-+ (match_operand:<VEL> 3 "register_operand" "w"))))
-+ (const_int 1))))]
-+ "TARGET_SIMD"
-+ "sqdml<SBINQOPS:as>l\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %3.<Vetype>[0]"
-+ [(set_attr "simd_type" "simd_sat_mlal")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+;; sqdml[as]l2
-+
-+(define_insn "aarch64_sqdml<SBINQOPS:as>l2<mode>_internal"
-+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (SBINQOPS:<VWIDE>
-+ (match_operand:<VWIDE> 1 "register_operand" "0")
-+ (ss_ashift:<VWIDE>
-+ (mult:<VWIDE>
-+ (sign_extend:<VWIDE>
-+ (vec_select:<VHALF>
-+ (match_operand:VQ_HSI 2 "register_operand" "w")
-+ (match_operand:VQ_HSI 4 "vect_par_cnst_hi_half" "")))
-+ (sign_extend:<VWIDE>
-+ (vec_select:<VHALF>
-+ (match_operand:VQ_HSI 3 "register_operand" "w")
-+ (match_dup 4))))
-+ (const_int 1))))]
-+ "TARGET_SIMD"
-+ "sqdml<SBINQOPS:as>l2\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %<v>3<Vmtype>"
-+ [(set_attr "simd_type" "simd_sat_mlal")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_expand "aarch64_sqdmlal2<mode>"
-+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (match_operand:<VWIDE> 1 "register_operand" "w")
-+ (match_operand:VQ_HSI 2 "register_operand" "w")
-+ (match_operand:VQ_HSI 3 "register_operand" "w")]
-+ "TARGET_SIMD"
-+{
-+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
-+ emit_insn (gen_aarch64_sqdmlal2<mode>_internal (operands[0], operands[1],
-+ operands[2], operands[3], p));
-+ DONE;
-+})
-+
-+(define_expand "aarch64_sqdmlsl2<mode>"
-+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (match_operand:<VWIDE> 1 "register_operand" "w")
-+ (match_operand:VQ_HSI 2 "register_operand" "w")
-+ (match_operand:VQ_HSI 3 "register_operand" "w")]
-+ "TARGET_SIMD"
-+{
-+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
-+ emit_insn (gen_aarch64_sqdmlsl2<mode>_internal (operands[0], operands[1],
-+ operands[2], operands[3], p));
-+ DONE;
-+})
-+
-+;; vqdml[sa]l2_lane
-+
-+(define_insn "aarch64_sqdml<SBINQOPS:as>l2_lane<mode>_internal"
-+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (SBINQOPS:<VWIDE>
-+ (match_operand:<VWIDE> 1 "register_operand" "0")
-+ (ss_ashift:<VWIDE>
-+ (mult:<VWIDE>
-+ (sign_extend:<VWIDE>
-+ (vec_select:<VHALF>
-+ (match_operand:VQ_HSI 2 "register_operand" "w")
-+ (match_operand:VQ_HSI 5 "vect_par_cnst_hi_half" "")))
-+ (sign_extend:<VWIDE>
-+ (vec_duplicate:<VHALF>
-+ (vec_select:<VEL>
-+ (match_operand:<VCON> 3 "register_operand" "<vwx>")
-+ (parallel [(match_operand:SI 4 "immediate_operand" "i")])
-+ ))))
-+ (const_int 1))))]
-+ "TARGET_SIMD"
-+ "sqdml<SBINQOPS:as>l2\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %3.<Vetype>[%4]"
-+ [(set_attr "simd_type" "simd_sat_mlal")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_expand "aarch64_sqdmlal2_lane<mode>"
-+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (match_operand:<VWIDE> 1 "register_operand" "w")
-+ (match_operand:VQ_HSI 2 "register_operand" "w")
-+ (match_operand:<VCON> 3 "register_operand" "<vwx>")
-+ (match_operand:SI 4 "immediate_operand" "i")]
-+ "TARGET_SIMD"
-+{
-+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
-+ aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<MODE>mode) / 2);
-+ emit_insn (gen_aarch64_sqdmlal2_lane<mode>_internal (operands[0], operands[1],
-+ operands[2], operands[3],
-+ operands[4], p));
-+ DONE;
-+})
-+
-+(define_expand "aarch64_sqdmlal2_laneq<mode>"
-+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (match_operand:<VWIDE> 1 "register_operand" "w")
-+ (match_operand:VQ_HSI 2 "register_operand" "w")
-+ (match_operand:<VCON> 3 "register_operand" "<vwx>")
-+ (match_operand:SI 4 "immediate_operand" "i")]
-+ "TARGET_SIMD"
-+{
-+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
-+ aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<MODE>mode));
-+ emit_insn (gen_aarch64_sqdmlal2_lane<mode>_internal (operands[0], operands[1],
-+ operands[2], operands[3],
-+ operands[4], p));
-+ DONE;
-+})
-+
-+(define_expand "aarch64_sqdmlsl2_lane<mode>"
-+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (match_operand:<VWIDE> 1 "register_operand" "w")
-+ (match_operand:VQ_HSI 2 "register_operand" "w")
-+ (match_operand:<VCON> 3 "register_operand" "<vwx>")
-+ (match_operand:SI 4 "immediate_operand" "i")]
-+ "TARGET_SIMD"
-+{
-+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
-+ aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<MODE>mode) / 2);
-+ emit_insn (gen_aarch64_sqdmlsl2_lane<mode>_internal (operands[0], operands[1],
-+ operands[2], operands[3],
-+ operands[4], p));
-+ DONE;
-+})
-+
-+(define_expand "aarch64_sqdmlsl2_laneq<mode>"
-+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (match_operand:<VWIDE> 1 "register_operand" "w")
-+ (match_operand:VQ_HSI 2 "register_operand" "w")
-+ (match_operand:<VCON> 3 "register_operand" "<vwx>")
-+ (match_operand:SI 4 "immediate_operand" "i")]
-+ "TARGET_SIMD"
-+{
-+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
-+ aarch64_simd_lane_bounds (operands[4], 0, GET_MODE_NUNITS (<MODE>mode));
-+ emit_insn (gen_aarch64_sqdmlsl2_lane<mode>_internal (operands[0], operands[1],
-+ operands[2], operands[3],
-+ operands[4], p));
-+ DONE;
-+})
-+
-+(define_insn "aarch64_sqdml<SBINQOPS:as>l2_n<mode>_internal"
-+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (SBINQOPS:<VWIDE>
-+ (match_operand:<VWIDE> 1 "register_operand" "0")
-+ (ss_ashift:<VWIDE>
-+ (mult:<VWIDE>
-+ (sign_extend:<VWIDE>
-+ (vec_select:<VHALF>
-+ (match_operand:VQ_HSI 2 "register_operand" "w")
-+ (match_operand:VQ_HSI 4 "vect_par_cnst_hi_half" "")))
-+ (sign_extend:<VWIDE>
-+ (vec_duplicate:<VHALF>
-+ (match_operand:<VEL> 3 "register_operand" "w"))))
-+ (const_int 1))))]
-+ "TARGET_SIMD"
-+ "sqdml<SBINQOPS:as>l2\\t%<vw2>0<Vmwtype>, %<v>2<Vmtype>, %3.<Vetype>[0]"
-+ [(set_attr "simd_type" "simd_sat_mlal")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_expand "aarch64_sqdmlal2_n<mode>"
-+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (match_operand:<VWIDE> 1 "register_operand" "w")
-+ (match_operand:VQ_HSI 2 "register_operand" "w")
-+ (match_operand:<VEL> 3 "register_operand" "w")]
-+ "TARGET_SIMD"
-+{
-+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
-+ emit_insn (gen_aarch64_sqdmlal2_n<mode>_internal (operands[0], operands[1],
-+ operands[2], operands[3],
-+ p));
-+ DONE;
-+})
-+
-+(define_expand "aarch64_sqdmlsl2_n<mode>"
-+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (match_operand:<VWIDE> 1 "register_operand" "w")
-+ (match_operand:VQ_HSI 2 "register_operand" "w")
-+ (match_operand:<VEL> 3 "register_operand" "w")]
-+ "TARGET_SIMD"
-+{
-+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
-+ emit_insn (gen_aarch64_sqdmlsl2_n<mode>_internal (operands[0], operands[1],
-+ operands[2], operands[3],
-+ p));
-+ DONE;
-+})
-+
-+;; vqdmull
-+
-+(define_insn "aarch64_sqdmull<mode>"
-+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (ss_ashift:<VWIDE>
-+ (mult:<VWIDE>
-+ (sign_extend:<VWIDE>
-+ (match_operand:VSD_HSI 1 "register_operand" "w"))
-+ (sign_extend:<VWIDE>
-+ (match_operand:VSD_HSI 2 "register_operand" "w")))
-+ (const_int 1)))]
-+ "TARGET_SIMD"
-+ "sqdmull\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
-+ [(set_attr "simd_type" "simd_sat_mul")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+;; vqdmull_lane
-+
-+(define_insn "aarch64_sqdmull_lane<mode>_internal"
-+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (ss_ashift:<VWIDE>
-+ (mult:<VWIDE>
-+ (sign_extend:<VWIDE>
-+ (match_operand:VD_HSI 1 "register_operand" "w"))
-+ (sign_extend:<VWIDE>
-+ (vec_duplicate:VD_HSI
-+ (vec_select:<VEL>
-+ (match_operand:<VCON> 2 "register_operand" "<vwx>")
-+ (parallel [(match_operand:SI 3 "immediate_operand" "i")])))
-+ ))
-+ (const_int 1)))]
-+ "TARGET_SIMD"
-+ "sqdmull\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %2.<Vetype>[%3]"
-+ [(set_attr "simd_type" "simd_sat_mul")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "aarch64_sqdmull_lane<mode>_internal"
-+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (ss_ashift:<VWIDE>
-+ (mult:<VWIDE>
-+ (sign_extend:<VWIDE>
-+ (match_operand:SD_HSI 1 "register_operand" "w"))
-+ (sign_extend:<VWIDE>
-+ (vec_select:<VEL>
-+ (match_operand:<VCON> 2 "register_operand" "<vwx>")
-+ (parallel [(match_operand:SI 3 "immediate_operand" "i")]))
-+ ))
-+ (const_int 1)))]
-+ "TARGET_SIMD"
-+ "sqdmull\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %2.<Vetype>[%3]"
-+ [(set_attr "simd_type" "simd_sat_mul")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_expand "aarch64_sqdmull_lane<mode>"
-+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (match_operand:VSD_HSI 1 "register_operand" "w")
-+ (match_operand:<VCON> 2 "register_operand" "<vwx>")
-+ (match_operand:SI 3 "immediate_operand" "i")]
-+ "TARGET_SIMD"
-+{
-+ aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<VCON>mode) / 2);
-+ emit_insn (gen_aarch64_sqdmull_lane<mode>_internal (operands[0], operands[1],
-+ operands[2], operands[3]));
-+ DONE;
-+})
-+
-+(define_expand "aarch64_sqdmull_laneq<mode>"
-+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (match_operand:VD_HSI 1 "register_operand" "w")
-+ (match_operand:<VCON> 2 "register_operand" "<vwx>")
-+ (match_operand:SI 3 "immediate_operand" "i")]
-+ "TARGET_SIMD"
-+{
-+ aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<VCON>mode));
-+ emit_insn (gen_aarch64_sqdmull_lane<mode>_internal
-+ (operands[0], operands[1], operands[2], operands[3]));
-+ DONE;
-+})
-+
-+;; vqdmull_n
-+
-+(define_insn "aarch64_sqdmull_n<mode>"
-+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (ss_ashift:<VWIDE>
-+ (mult:<VWIDE>
-+ (sign_extend:<VWIDE>
-+ (match_operand:VD_HSI 1 "register_operand" "w"))
-+ (sign_extend:<VWIDE>
-+ (vec_duplicate:VD_HSI
-+ (match_operand:<VEL> 2 "register_operand" "w")))
-+ )
-+ (const_int 1)))]
-+ "TARGET_SIMD"
-+ "sqdmull\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %2.<Vetype>[0]"
-+ [(set_attr "simd_type" "simd_sat_mul")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+;; vqdmull2
-+
-+
-+
-+(define_insn "aarch64_sqdmull2<mode>_internal"
-+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (ss_ashift:<VWIDE>
-+ (mult:<VWIDE>
-+ (sign_extend:<VWIDE>
-+ (vec_select:<VHALF>
-+ (match_operand:VQ_HSI 1 "register_operand" "w")
-+ (match_operand:VQ_HSI 3 "vect_par_cnst_hi_half" "")))
-+ (sign_extend:<VWIDE>
-+ (vec_select:<VHALF>
-+ (match_operand:VQ_HSI 2 "register_operand" "w")
-+ (match_dup 3)))
-+ )
-+ (const_int 1)))]
-+ "TARGET_SIMD"
-+ "sqdmull2\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
-+ [(set_attr "simd_type" "simd_sat_mul")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_expand "aarch64_sqdmull2<mode>"
-+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (match_operand:VQ_HSI 1 "register_operand" "w")
-+ (match_operand:<VCON> 2 "register_operand" "w")]
-+ "TARGET_SIMD"
-+{
-+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
-+ emit_insn (gen_aarch64_sqdmull2<mode>_internal (operands[0], operands[1],
-+ operands[2], p));
-+ DONE;
-+})
-+
-+;; vqdmull2_lane
-+
-+(define_insn "aarch64_sqdmull2_lane<mode>_internal"
-+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (ss_ashift:<VWIDE>
-+ (mult:<VWIDE>
-+ (sign_extend:<VWIDE>
-+ (vec_select:<VHALF>
-+ (match_operand:VQ_HSI 1 "register_operand" "w")
-+ (match_operand:VQ_HSI 4 "vect_par_cnst_hi_half" "")))
-+ (sign_extend:<VWIDE>
-+ (vec_duplicate:<VHALF>
-+ (vec_select:<VEL>
-+ (match_operand:<VCON> 2 "register_operand" "<vwx>")
-+ (parallel [(match_operand:SI 3 "immediate_operand" "i")])))
-+ ))
-+ (const_int 1)))]
-+ "TARGET_SIMD"
-+ "sqdmull2\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %2.<Vetype>[%3]"
-+ [(set_attr "simd_type" "simd_sat_mul")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_expand "aarch64_sqdmull2_lane<mode>"
-+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (match_operand:VQ_HSI 1 "register_operand" "w")
-+ (match_operand:<VCON> 2 "register_operand" "<vwx>")
-+ (match_operand:SI 3 "immediate_operand" "i")]
-+ "TARGET_SIMD"
-+{
-+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
-+ aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<MODE>mode) / 2);
-+ emit_insn (gen_aarch64_sqdmull2_lane<mode>_internal (operands[0], operands[1],
-+ operands[2], operands[3],
-+ p));
-+ DONE;
-+})
-+
-+(define_expand "aarch64_sqdmull2_laneq<mode>"
-+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (match_operand:VQ_HSI 1 "register_operand" "w")
-+ (match_operand:<VCON> 2 "register_operand" "<vwx>")
-+ (match_operand:SI 3 "immediate_operand" "i")]
-+ "TARGET_SIMD"
-+{
-+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
-+ aarch64_simd_lane_bounds (operands[3], 0, GET_MODE_NUNITS (<MODE>mode));
-+ emit_insn (gen_aarch64_sqdmull2_lane<mode>_internal (operands[0], operands[1],
-+ operands[2], operands[3],
-+ p));
-+ DONE;
-+})
-+
-+;; vqdmull2_n
-+
-+(define_insn "aarch64_sqdmull2_n<mode>_internal"
-+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (ss_ashift:<VWIDE>
-+ (mult:<VWIDE>
-+ (sign_extend:<VWIDE>
-+ (vec_select:<VHALF>
-+ (match_operand:VQ_HSI 1 "register_operand" "w")
-+ (match_operand:VQ_HSI 3 "vect_par_cnst_hi_half" "")))
-+ (sign_extend:<VWIDE>
-+ (vec_duplicate:<VHALF>
-+ (match_operand:<VEL> 2 "register_operand" "w")))
-+ )
-+ (const_int 1)))]
-+ "TARGET_SIMD"
-+ "sqdmull2\\t%<vw2>0<Vmwtype>, %<v>1<Vmtype>, %2.<Vetype>[0]"
-+ [(set_attr "simd_type" "simd_sat_mul")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_expand "aarch64_sqdmull2_n<mode>"
-+ [(match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (match_operand:VQ_HSI 1 "register_operand" "w")
-+ (match_operand:<VEL> 2 "register_operand" "w")]
-+ "TARGET_SIMD"
-+{
-+ rtx p = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
-+ emit_insn (gen_aarch64_sqdmull2_n<mode>_internal (operands[0], operands[1],
-+ operands[2], p));
-+ DONE;
-+})
-+
-+;; vshl
-+
-+(define_insn "aarch64_<sur>shl<mode>"
-+ [(set (match_operand:VSDQ_I_DI 0 "register_operand" "=w")
-+ (unspec:VSDQ_I_DI
-+ [(match_operand:VSDQ_I_DI 1 "register_operand" "w")
-+ (match_operand:VSDQ_I_DI 2 "register_operand" "w")]
-+ VSHL))]
-+ "TARGET_SIMD"
-+ "<sur>shl\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>";
-+ [(set_attr "simd_type" "simd_shift")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+
-+;; vqshl
-+
-+(define_insn "aarch64_<sur>q<r>shl<mode>"
-+ [(set (match_operand:VSDQ_I 0 "register_operand" "=w")
-+ (unspec:VSDQ_I
-+ [(match_operand:VSDQ_I 1 "register_operand" "w")
-+ (match_operand:VSDQ_I 2 "register_operand" "w")]
-+ VQSHL))]
-+ "TARGET_SIMD"
-+ "<sur>q<r>shl\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>";
-+ [(set_attr "simd_type" "simd_sat_shift")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+;; vshl_n
-+
-+(define_expand "aarch64_sshl_n<mode>"
-+ [(match_operand:VSDQ_I_DI 0 "register_operand" "=w")
-+ (match_operand:VSDQ_I_DI 1 "register_operand" "w")
-+ (match_operand:SI 2 "immediate_operand" "i")]
-+ "TARGET_SIMD"
-+{
-+ emit_insn (gen_ashl<mode>3 (operands[0], operands[1], operands[2]));
-+ DONE;
-+})
-+
-+(define_expand "aarch64_ushl_n<mode>"
-+ [(match_operand:VSDQ_I_DI 0 "register_operand" "=w")
-+ (match_operand:VSDQ_I_DI 1 "register_operand" "w")
-+ (match_operand:SI 2 "immediate_operand" "i")]
-+ "TARGET_SIMD"
-+{
-+ emit_insn (gen_ashl<mode>3 (operands[0], operands[1], operands[2]));
-+ DONE;
-+})
-+
-+;; vshll_n
-+
-+(define_insn "aarch64_<sur>shll_n<mode>"
-+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (unspec:<VWIDE> [(match_operand:VDW 1 "register_operand" "w")
-+ (match_operand:SI 2 "immediate_operand" "i")]
-+ VSHLL))]
-+ "TARGET_SIMD"
-+ "*
-+ int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
-+ aarch64_simd_const_bounds (operands[2], 0, bit_width + 1);
-+ if (INTVAL (operands[2]) == bit_width)
-+ {
-+ return \"shll\\t%0.<Vwtype>, %1.<Vtype>, %2\";
-+ }
-+ else {
-+ return \"<sur>shll\\t%0.<Vwtype>, %1.<Vtype>, %2\";
-+ }"
-+ [(set_attr "simd_type" "simd_shift_imm")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+;; vshll_high_n
-+
-+(define_insn "aarch64_<sur>shll2_n<mode>"
-+ [(set (match_operand:<VWIDE> 0 "register_operand" "=w")
-+ (unspec:<VWIDE> [(match_operand:VQW 1 "register_operand" "w")
-+ (match_operand:SI 2 "immediate_operand" "i")]
-+ VSHLL))]
-+ "TARGET_SIMD"
-+ "*
-+ int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
-+ aarch64_simd_const_bounds (operands[2], 0, bit_width + 1);
-+ if (INTVAL (operands[2]) == bit_width)
-+ {
-+ return \"shll2\\t%0.<Vwtype>, %1.<Vtype>, %2\";
-+ }
-+ else {
-+ return \"<sur>shll2\\t%0.<Vwtype>, %1.<Vtype>, %2\";
-+ }"
-+ [(set_attr "simd_type" "simd_shift_imm")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+;; vshr_n
-+
-+(define_expand "aarch64_sshr_n<mode>"
-+ [(match_operand:VSDQ_I_DI 0 "register_operand" "=w")
-+ (match_operand:VSDQ_I_DI 1 "register_operand" "w")
-+ (match_operand:SI 2 "immediate_operand" "i")]
-+ "TARGET_SIMD"
-+{
-+ emit_insn (gen_ashr<mode>3 (operands[0], operands[1], operands[2]));
-+ DONE;
-+})
-+
-+(define_expand "aarch64_ushr_n<mode>"
-+ [(match_operand:VSDQ_I_DI 0 "register_operand" "=w")
-+ (match_operand:VSDQ_I_DI 1 "register_operand" "w")
-+ (match_operand:SI 2 "immediate_operand" "i")]
-+ "TARGET_SIMD"
-+{
-+ emit_insn (gen_lshr<mode>3 (operands[0], operands[1], operands[2]));
-+ DONE;
-+})
-+
-+;; vrshr_n
-+
-+(define_insn "aarch64_<sur>shr_n<mode>"
-+ [(set (match_operand:VSDQ_I_DI 0 "register_operand" "=w")
-+ (unspec:VSDQ_I_DI [(match_operand:VSDQ_I_DI 1 "register_operand" "w")
-+ (match_operand:SI 2 "immediate_operand" "i")]
-+ VRSHR_N))]
-+ "TARGET_SIMD"
-+ "*
-+ int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
-+ aarch64_simd_const_bounds (operands[2], 1, bit_width + 1);
-+ return \"<sur>shr\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %2\";"
-+ [(set_attr "simd_type" "simd_shift_imm")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+;; v(r)sra_n
-+
-+(define_insn "aarch64_<sur>sra_n<mode>"
-+ [(set (match_operand:VSDQ_I_DI 0 "register_operand" "=w")
-+ (unspec:VSDQ_I_DI [(match_operand:VSDQ_I_DI 1 "register_operand" "0")
-+ (match_operand:VSDQ_I_DI 2 "register_operand" "w")
-+ (match_operand:SI 3 "immediate_operand" "i")]
-+ VSRA))]
-+ "TARGET_SIMD"
-+ "*
-+ int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
-+ aarch64_simd_const_bounds (operands[3], 1, bit_width + 1);
-+ return \"<sur>sra\\t%<v>0<Vmtype>, %<v>2<Vmtype>, %3\";"
-+ [(set_attr "simd_type" "simd_shift_imm_acc")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+;; vs<lr>i_n
-+
-+(define_insn "aarch64_<sur>s<lr>i_n<mode>"
-+ [(set (match_operand:VSDQ_I_DI 0 "register_operand" "=w")
-+ (unspec:VSDQ_I_DI [(match_operand:VSDQ_I_DI 1 "register_operand" "0")
-+ (match_operand:VSDQ_I_DI 2 "register_operand" "w")
-+ (match_operand:SI 3 "immediate_operand" "i")]
-+ VSLRI))]
-+ "TARGET_SIMD"
-+ "*
-+ int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
-+ aarch64_simd_const_bounds (operands[3], 1 - <VSLRI:offsetlr>,
-+ bit_width - <VSLRI:offsetlr> + 1);
-+ return \"s<lr>i\\t%<v>0<Vmtype>, %<v>2<Vmtype>, %3\";"
-+ [(set_attr "simd_type" "simd_shift_imm")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+;; vqshl(u)
-+
-+(define_insn "aarch64_<sur>qshl<u>_n<mode>"
-+ [(set (match_operand:VSDQ_I 0 "register_operand" "=w")
-+ (unspec:VSDQ_I [(match_operand:VSDQ_I 1 "register_operand" "w")
-+ (match_operand:SI 2 "immediate_operand" "i")]
-+ VQSHL_N))]
-+ "TARGET_SIMD"
-+ "*
-+ int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
-+ aarch64_simd_const_bounds (operands[2], 0, bit_width);
-+ return \"<sur>qshl<u>\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %2\";"
-+ [(set_attr "simd_type" "simd_sat_shift_imm")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+
-+;; vq(r)shr(u)n_n
-+
-+(define_insn "aarch64_<sur>q<r>shr<u>n_n<mode>"
-+ [(set (match_operand:<VNARROWQ> 0 "register_operand" "=w")
-+ (unspec:<VNARROWQ> [(match_operand:VSQN_HSDI 1 "register_operand" "w")
-+ (match_operand:SI 2 "immediate_operand" "i")]
-+ VQSHRN_N))]
-+ "TARGET_SIMD"
-+ "*
-+ int bit_width = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
-+ aarch64_simd_const_bounds (operands[2], 1, bit_width + 1);
-+ return \"<sur>q<r>shr<u>n\\t%<vn2>0<Vmntype>, %<v>1<Vmtype>, %2\";"
-+ [(set_attr "simd_type" "simd_sat_shiftn_imm")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+
-+;; cm(eq|ge|le|lt|gt)
-+
-+(define_insn "aarch64_cm<cmp><mode>"
-+ [(set (match_operand:<V_cmp_result> 0 "register_operand" "=w,w")
-+ (unspec:<V_cmp_result>
-+ [(match_operand:VSDQ_I_DI 1 "register_operand" "w,w")
-+ (match_operand:VSDQ_I_DI 2 "aarch64_simd_reg_or_zero" "w,Z")]
-+ VCMP_S))]
-+ "TARGET_SIMD"
-+ "@
-+ cm<cmp>\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>
-+ cm<cmp>\t%<v>0<Vmtype>, %<v>1<Vmtype>, #0"
-+ [(set_attr "simd_type" "simd_cmp")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+;; cm(hs|hi|tst)
-+
-+(define_insn "aarch64_cm<cmp><mode>"
-+ [(set (match_operand:<V_cmp_result> 0 "register_operand" "=w")
-+ (unspec:<V_cmp_result>
-+ [(match_operand:VSDQ_I_DI 1 "register_operand" "w")
-+ (match_operand:VSDQ_I_DI 2 "register_operand" "w")]
-+ VCMP_U))]
-+ "TARGET_SIMD"
-+ "cm<cmp>\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
-+ [(set_attr "simd_type" "simd_cmp")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+;; fcm(eq|ge|le|lt|gt)
-+
-+(define_insn "aarch64_cm<cmp><mode>"
-+ [(set (match_operand:<V_cmp_result> 0 "register_operand" "=w,w")
-+ (unspec:<V_cmp_result>
-+ [(match_operand:VDQF 1 "register_operand" "w,w")
-+ (match_operand:VDQF 2 "aarch64_simd_reg_or_zero" "w,Dz")]
-+ VCMP_S))]
-+ "TARGET_SIMD"
-+ "@
-+ fcm<cmp>\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>
-+ fcm<cmp>\t%<v>0<Vmtype>, %<v>1<Vmtype>, 0"
-+ [(set_attr "simd_type" "simd_fcmp")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+;; addp
-+
-+(define_insn "aarch64_addp<mode>"
-+ [(set (match_operand:VD_BHSI 0 "register_operand" "=w")
-+ (unspec:VD_BHSI
-+ [(match_operand:VD_BHSI 1 "register_operand" "w")
-+ (match_operand:VD_BHSI 2 "register_operand" "w")]
-+ UNSPEC_ADDP))]
-+ "TARGET_SIMD"
-+ "addp\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
-+ [(set_attr "simd_type" "simd_add")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "aarch64_addpdi"
-+ [(set (match_operand:DI 0 "register_operand" "=w")
-+ (unspec:DI
-+ [(match_operand:V2DI 1 "register_operand" "w")]
-+ UNSPEC_ADDP))]
-+ "TARGET_SIMD"
-+ "addp\t%d0, %1.2d"
-+ [(set_attr "simd_type" "simd_add")
-+ (set_attr "simd_mode" "DI")]
-+)
-+
-+;; v(max|min)
-+
-+(define_expand "aarch64_<maxmin><mode>"
-+ [(set (match_operand:VDQ_BHSI 0 "register_operand" "=w")
-+ (MAXMIN:VDQ_BHSI (match_operand:VDQ_BHSI 1 "register_operand" "w")
-+ (match_operand:VDQ_BHSI 2 "register_operand" "w")))]
-+ "TARGET_SIMD"
-+{
-+ emit_insn (gen_<maxmin><mode>3 (operands[0], operands[1], operands[2]));
-+ DONE;
-+})
-+
-+
-+(define_insn "aarch64_<fmaxmin><mode>"
-+ [(set (match_operand:VDQF 0 "register_operand" "=w")
-+ (unspec:VDQF [(match_operand:VDQF 1 "register_operand" "w")
-+ (match_operand:VDQF 2 "register_operand" "w")]
-+ FMAXMIN))]
-+ "TARGET_SIMD"
-+ "<fmaxmin>\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
-+ [(set_attr "simd_type" "simd_fminmax")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+;; sqrt
-+
-+(define_insn "sqrt<mode>2"
-+ [(set (match_operand:VDQF 0 "register_operand" "=w")
-+ (sqrt:VDQF (match_operand:VDQF 1 "register_operand" "w")))]
-+ "TARGET_SIMD"
-+ "fsqrt\\t%0.<Vtype>, %1.<Vtype>"
-+ [(set_attr "simd_type" "simd_fsqrt")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_expand "aarch64_sqrt<mode>"
-+ [(match_operand:VDQF 0 "register_operand" "=w")
-+ (match_operand:VDQF 1 "register_operand" "w")]
-+ "TARGET_SIMD"
-+{
-+ emit_insn (gen_sqrt<mode>2 (operands[0], operands[1]));
-+ DONE;
-+})
-+
-+
-+;; Patterns for vector struct loads and stores.
-+
-+(define_insn "vec_load_lanesoi<mode>"
-+ [(set (match_operand:OI 0 "register_operand" "=w")
-+ (unspec:OI [(match_operand:OI 1 "aarch64_simd_struct_operand" "Utv")
-+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
-+ UNSPEC_LD2))]
-+ "TARGET_SIMD"
-+ "ld2\\t{%S0.<Vtype> - %T0.<Vtype>}, %1"
-+ [(set_attr "simd_type" "simd_load2")
-+ (set_attr "simd_mode" "<MODE>")])
-+
-+(define_insn "vec_store_lanesoi<mode>"
-+ [(set (match_operand:OI 0 "aarch64_simd_struct_operand" "=Utv")
-+ (unspec:OI [(match_operand:OI 1 "register_operand" "w")
-+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
-+ UNSPEC_ST2))]
-+ "TARGET_SIMD"
-+ "st2\\t{%S1.<Vtype> - %T1.<Vtype>}, %0"
-+ [(set_attr "simd_type" "simd_store2")
-+ (set_attr "simd_mode" "<MODE>")])
-+
-+(define_insn "vec_load_lanesci<mode>"
-+ [(set (match_operand:CI 0 "register_operand" "=w")
-+ (unspec:CI [(match_operand:CI 1 "aarch64_simd_struct_operand" "Utv")
-+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
-+ UNSPEC_LD3))]
-+ "TARGET_SIMD"
-+ "ld3\\t{%S0.<Vtype> - %U0.<Vtype>}, %1"
-+ [(set_attr "simd_type" "simd_load3")
-+ (set_attr "simd_mode" "<MODE>")])
-+
-+(define_insn "vec_store_lanesci<mode>"
-+ [(set (match_operand:CI 0 "aarch64_simd_struct_operand" "=Utv")
-+ (unspec:CI [(match_operand:CI 1 "register_operand" "w")
-+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
-+ UNSPEC_ST3))]
-+ "TARGET_SIMD"
-+ "st3\\t{%S1.<Vtype> - %U1.<Vtype>}, %0"
-+ [(set_attr "simd_type" "simd_store3")
-+ (set_attr "simd_mode" "<MODE>")])
-+
-+(define_insn "vec_load_lanesxi<mode>"
-+ [(set (match_operand:XI 0 "register_operand" "=w")
-+ (unspec:XI [(match_operand:XI 1 "aarch64_simd_struct_operand" "Utv")
-+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
-+ UNSPEC_LD4))]
-+ "TARGET_SIMD"
-+ "ld4\\t{%S0.<Vtype> - %V0.<Vtype>}, %1"
-+ [(set_attr "simd_type" "simd_load4")
-+ (set_attr "simd_mode" "<MODE>")])
-+
-+(define_insn "vec_store_lanesxi<mode>"
-+ [(set (match_operand:XI 0 "aarch64_simd_struct_operand" "=Utv")
-+ (unspec:XI [(match_operand:XI 1 "register_operand" "w")
-+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
-+ UNSPEC_ST4))]
-+ "TARGET_SIMD"
-+ "st4\\t{%S1.<Vtype> - %V1.<Vtype>}, %0"
-+ [(set_attr "simd_type" "simd_store4")
-+ (set_attr "simd_mode" "<MODE>")])
-+
-+;; Reload patterns for AdvSIMD register list operands.
-+
-+(define_expand "mov<mode>"
-+ [(set (match_operand:VSTRUCT 0 "aarch64_simd_nonimmediate_operand" "")
-+ (match_operand:VSTRUCT 1 "aarch64_simd_general_operand" ""))]
-+ "TARGET_SIMD"
-+{
-+ if (can_create_pseudo_p ())
-+ {
-+ if (GET_CODE (operands[0]) != REG)
-+ operands[1] = force_reg (<MODE>mode, operands[1]);
-+ }
-+})
-+
-+(define_insn "*aarch64_mov<mode>"
-+ [(set (match_operand:VSTRUCT 0 "aarch64_simd_nonimmediate_operand" "=w,Utv,w")
-+ (match_operand:VSTRUCT 1 "aarch64_simd_general_operand" " w,w,Utv"))]
-+ "TARGET_SIMD
-+ && (register_operand (operands[0], <MODE>mode)
-+ || register_operand (operands[1], <MODE>mode))"
-+
-+{
-+ switch (which_alternative)
-+ {
-+ case 0: return "#";
-+ case 1: return "st1\\t{%S1.16b - %<Vendreg>1.16b}, %0";
-+ case 2: return "ld1\\t{%S0.16b - %<Vendreg>0.16b}, %1";
-+ default: gcc_unreachable ();
-+ }
-+}
-+ [(set_attr "simd_type" "simd_move,simd_store<nregs>,simd_load<nregs>")
-+ (set (attr "length") (symbol_ref "aarch64_simd_attr_length_move (insn)"))
-+ (set_attr "simd_mode" "<MODE>")])
-+
-+(define_split
-+ [(set (match_operand:OI 0 "register_operand" "")
-+ (match_operand:OI 1 "register_operand" ""))]
-+ "TARGET_SIMD && reload_completed"
-+ [(set (match_dup 0) (match_dup 1))
-+ (set (match_dup 2) (match_dup 3))]
-+{
-+ int rdest = REGNO (operands[0]);
-+ int rsrc = REGNO (operands[1]);
-+ rtx dest[2], src[2];
-+
-+ dest[0] = gen_rtx_REG (TFmode, rdest);
-+ src[0] = gen_rtx_REG (TFmode, rsrc);
-+ dest[1] = gen_rtx_REG (TFmode, rdest + 1);
-+ src[1] = gen_rtx_REG (TFmode, rsrc + 1);
-+
-+ aarch64_simd_disambiguate_copy (operands, dest, src, 2);
-+})
-+
-+(define_split
-+ [(set (match_operand:CI 0 "register_operand" "")
-+ (match_operand:CI 1 "register_operand" ""))]
-+ "TARGET_SIMD && reload_completed"
-+ [(set (match_dup 0) (match_dup 1))
-+ (set (match_dup 2) (match_dup 3))
-+ (set (match_dup 4) (match_dup 5))]
-+{
-+ int rdest = REGNO (operands[0]);
-+ int rsrc = REGNO (operands[1]);
-+ rtx dest[3], src[3];
-+
-+ dest[0] = gen_rtx_REG (TFmode, rdest);
-+ src[0] = gen_rtx_REG (TFmode, rsrc);
-+ dest[1] = gen_rtx_REG (TFmode, rdest + 1);
-+ src[1] = gen_rtx_REG (TFmode, rsrc + 1);
-+ dest[2] = gen_rtx_REG (TFmode, rdest + 2);
-+ src[2] = gen_rtx_REG (TFmode, rsrc + 2);
-+
-+ aarch64_simd_disambiguate_copy (operands, dest, src, 3);
-+})
-+
-+(define_split
-+ [(set (match_operand:XI 0 "register_operand" "")
-+ (match_operand:XI 1 "register_operand" ""))]
-+ "TARGET_SIMD && reload_completed"
-+ [(set (match_dup 0) (match_dup 1))
-+ (set (match_dup 2) (match_dup 3))
-+ (set (match_dup 4) (match_dup 5))
-+ (set (match_dup 6) (match_dup 7))]
-+{
-+ int rdest = REGNO (operands[0]);
-+ int rsrc = REGNO (operands[1]);
-+ rtx dest[4], src[4];
-+
-+ dest[0] = gen_rtx_REG (TFmode, rdest);
-+ src[0] = gen_rtx_REG (TFmode, rsrc);
-+ dest[1] = gen_rtx_REG (TFmode, rdest + 1);
-+ src[1] = gen_rtx_REG (TFmode, rsrc + 1);
-+ dest[2] = gen_rtx_REG (TFmode, rdest + 2);
-+ src[2] = gen_rtx_REG (TFmode, rsrc + 2);
-+ dest[3] = gen_rtx_REG (TFmode, rdest + 3);
-+ src[3] = gen_rtx_REG (TFmode, rsrc + 3);
-+
-+ aarch64_simd_disambiguate_copy (operands, dest, src, 4);
-+})
-+
-+(define_insn "aarch64_ld2<mode>_dreg"
-+ [(set (match_operand:OI 0 "register_operand" "=w")
-+ (subreg:OI
-+ (vec_concat:<VRL2>
-+ (vec_concat:<VDBL>
-+ (unspec:VD [(match_operand:TI 1 "aarch64_simd_struct_operand" "Utv")]
-+ UNSPEC_LD2)
-+ (vec_duplicate:VD (const_int 0)))
-+ (vec_concat:<VDBL>
-+ (unspec:VD [(match_dup 1)]
-+ UNSPEC_LD2)
-+ (vec_duplicate:VD (const_int 0)))) 0))]
-+ "TARGET_SIMD"
-+ "ld2\\t{%S0.<Vtype> - %T0.<Vtype>}, %1"
-+ [(set_attr "simd_type" "simd_load2")
-+ (set_attr "simd_mode" "<MODE>")])
-+
-+(define_insn "aarch64_ld2<mode>_dreg"
-+ [(set (match_operand:OI 0 "register_operand" "=w")
-+ (subreg:OI
-+ (vec_concat:<VRL2>
-+ (vec_concat:<VDBL>
-+ (unspec:DX [(match_operand:TI 1 "aarch64_simd_struct_operand" "Utv")]
-+ UNSPEC_LD2)
-+ (const_int 0))
-+ (vec_concat:<VDBL>
-+ (unspec:DX [(match_dup 1)]
-+ UNSPEC_LD2)
-+ (const_int 0))) 0))]
-+ "TARGET_SIMD"
-+ "ld1\\t{%S0.1d - %T0.1d}, %1"
-+ [(set_attr "simd_type" "simd_load2")
-+ (set_attr "simd_mode" "<MODE>")])
-+
-+(define_insn "aarch64_ld3<mode>_dreg"
-+ [(set (match_operand:CI 0 "register_operand" "=w")
-+ (subreg:CI
-+ (vec_concat:<VRL3>
-+ (vec_concat:<VRL2>
-+ (vec_concat:<VDBL>
-+ (unspec:VD [(match_operand:EI 1 "aarch64_simd_struct_operand" "Utv")]
-+ UNSPEC_LD3)
-+ (vec_duplicate:VD (const_int 0)))
-+ (vec_concat:<VDBL>
-+ (unspec:VD [(match_dup 1)]
-+ UNSPEC_LD3)
-+ (vec_duplicate:VD (const_int 0))))
-+ (vec_concat:<VDBL>
-+ (unspec:VD [(match_dup 1)]
-+ UNSPEC_LD3)
-+ (vec_duplicate:VD (const_int 0)))) 0))]
-+ "TARGET_SIMD"
-+ "ld3\\t{%S0.<Vtype> - %U0.<Vtype>}, %1"
-+ [(set_attr "simd_type" "simd_load3")
-+ (set_attr "simd_mode" "<MODE>")])
-+
-+(define_insn "aarch64_ld3<mode>_dreg"
-+ [(set (match_operand:CI 0 "register_operand" "=w")
-+ (subreg:CI
-+ (vec_concat:<VRL3>
-+ (vec_concat:<VRL2>
-+ (vec_concat:<VDBL>
-+ (unspec:DX [(match_operand:EI 1 "aarch64_simd_struct_operand" "Utv")]
-+ UNSPEC_LD3)
-+ (const_int 0))
-+ (vec_concat:<VDBL>
-+ (unspec:DX [(match_dup 1)]
-+ UNSPEC_LD3)
-+ (const_int 0)))
-+ (vec_concat:<VDBL>
-+ (unspec:DX [(match_dup 1)]
-+ UNSPEC_LD3)
-+ (const_int 0))) 0))]
-+ "TARGET_SIMD"
-+ "ld1\\t{%S0.1d - %U0.1d}, %1"
-+ [(set_attr "simd_type" "simd_load3")
-+ (set_attr "simd_mode" "<MODE>")])
-+
-+(define_insn "aarch64_ld4<mode>_dreg"
-+ [(set (match_operand:XI 0 "register_operand" "=w")
-+ (subreg:XI
-+ (vec_concat:<VRL4>
-+ (vec_concat:<VRL2>
-+ (vec_concat:<VDBL>
-+ (unspec:VD [(match_operand:OI 1 "aarch64_simd_struct_operand" "Utv")]
-+ UNSPEC_LD4)
-+ (vec_duplicate:VD (const_int 0)))
-+ (vec_concat:<VDBL>
-+ (unspec:VD [(match_dup 1)]
-+ UNSPEC_LD4)
-+ (vec_duplicate:VD (const_int 0))))
-+ (vec_concat:<VRL2>
-+ (vec_concat:<VDBL>
-+ (unspec:VD [(match_dup 1)]
-+ UNSPEC_LD4)
-+ (vec_duplicate:VD (const_int 0)))
-+ (vec_concat:<VDBL>
-+ (unspec:VD [(match_dup 1)]
-+ UNSPEC_LD4)
-+ (vec_duplicate:VD (const_int 0))))) 0))]
-+ "TARGET_SIMD"
-+ "ld4\\t{%S0.<Vtype> - %V0.<Vtype>}, %1"
-+ [(set_attr "simd_type" "simd_load4")
-+ (set_attr "simd_mode" "<MODE>")])
-+
-+(define_insn "aarch64_ld4<mode>_dreg"
-+ [(set (match_operand:XI 0 "register_operand" "=w")
-+ (subreg:XI
-+ (vec_concat:<VRL4>
-+ (vec_concat:<VRL2>
-+ (vec_concat:<VDBL>
-+ (unspec:DX [(match_operand:OI 1 "aarch64_simd_struct_operand" "Utv")]
-+ UNSPEC_LD4)
-+ (const_int 0))
-+ (vec_concat:<VDBL>
-+ (unspec:DX [(match_dup 1)]
-+ UNSPEC_LD4)
-+ (const_int 0)))
-+ (vec_concat:<VRL2>
-+ (vec_concat:<VDBL>
-+ (unspec:DX [(match_dup 1)]
-+ UNSPEC_LD4)
-+ (const_int 0))
-+ (vec_concat:<VDBL>
-+ (unspec:DX [(match_dup 1)]
-+ UNSPEC_LD4)
-+ (const_int 0)))) 0))]
-+ "TARGET_SIMD"
-+ "ld1\\t{%S0.1d - %V0.1d}, %1"
-+ [(set_attr "simd_type" "simd_load4")
-+ (set_attr "simd_mode" "<MODE>")])
-+
-+(define_expand "aarch64_ld<VSTRUCT:nregs><VDC:mode>"
-+ [(match_operand:VSTRUCT 0 "register_operand" "=w")
-+ (match_operand:DI 1 "register_operand" "r")
-+ (unspec:VDC [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
-+ "TARGET_SIMD"
-+{
-+ enum machine_mode mode = <VSTRUCT:VSTRUCT_DREG>mode;
-+ rtx mem = gen_rtx_MEM (mode, operands[1]);
-+
-+ emit_insn (gen_aarch64_ld<VSTRUCT:nregs><VDC:mode>_dreg (operands[0], mem));
-+ DONE;
-+})
-+
-+(define_expand "aarch64_ld<VSTRUCT:nregs><VQ:mode>"
-+ [(match_operand:VSTRUCT 0 "register_operand" "=w")
-+ (match_operand:DI 1 "register_operand" "r")
-+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
-+ "TARGET_SIMD"
-+{
-+ enum machine_mode mode = <VSTRUCT:MODE>mode;
-+ rtx mem = gen_rtx_MEM (mode, operands[1]);
-+
-+ emit_insn (gen_vec_load_lanes<VSTRUCT:mode><VQ:mode> (operands[0], mem));
-+ DONE;
-+})
-+
-+;; Expanders for builtins to extract vector registers from large
-+;; opaque integer modes.
-+
-+;; D-register list.
-+
-+(define_expand "aarch64_get_dreg<VSTRUCT:mode><VDC:mode>"
-+ [(match_operand:VDC 0 "register_operand" "=w")
-+ (match_operand:VSTRUCT 1 "register_operand" "w")
-+ (match_operand:SI 2 "immediate_operand" "i")]
-+ "TARGET_SIMD"
-+{
-+ int part = INTVAL (operands[2]);
-+ rtx temp = gen_reg_rtx (<VDC:VDBL>mode);
-+ int offset = part * 16;
-+
-+ emit_move_insn (temp, gen_rtx_SUBREG (<VDC:VDBL>mode, operands[1], offset));
-+ emit_move_insn (operands[0], gen_lowpart (<VDC:MODE>mode, temp));
-+ DONE;
-+})
-+
-+;; Q-register list.
-+
-+(define_expand "aarch64_get_qreg<VSTRUCT:mode><VQ:mode>"
-+ [(match_operand:VQ 0 "register_operand" "=w")
-+ (match_operand:VSTRUCT 1 "register_operand" "w")
-+ (match_operand:SI 2 "immediate_operand" "i")]
-+ "TARGET_SIMD"
-+{
-+ int part = INTVAL (operands[2]);
-+ int offset = part * 16;
-+
-+ emit_move_insn (operands[0],
-+ gen_rtx_SUBREG (<VQ:MODE>mode, operands[1], offset));
-+ DONE;
-+})
-+
-+;; Permuted-store expanders for neon intrinsics.
-+
-+;; Permute instructions
-+
-+;; vec_perm support
-+
-+(define_expand "vec_perm_const<mode>"
-+ [(match_operand:VALL 0 "register_operand")
-+ (match_operand:VALL 1 "register_operand")
-+ (match_operand:VALL 2 "register_operand")
-+ (match_operand:<V_cmp_result> 3)]
-+ "TARGET_SIMD"
-+{
-+ if (aarch64_expand_vec_perm_const (operands[0], operands[1],
-+ operands[2], operands[3]))
-+ DONE;
-+ else
-+ FAIL;
-+})
-+
-+(define_expand "vec_perm<mode>"
-+ [(match_operand:VB 0 "register_operand")
-+ (match_operand:VB 1 "register_operand")
-+ (match_operand:VB 2 "register_operand")
-+ (match_operand:VB 3 "register_operand")]
-+ "TARGET_SIMD"
-+{
-+ aarch64_expand_vec_perm (operands[0], operands[1],
-+ operands[2], operands[3]);
-+ DONE;
-+})
-+
-+(define_insn "aarch64_tbl1<mode>"
-+ [(set (match_operand:VB 0 "register_operand" "=w")
-+ (unspec:VB [(match_operand:V16QI 1 "register_operand" "w")
-+ (match_operand:VB 2 "register_operand" "w")]
-+ UNSPEC_TBL))]
-+ "TARGET_SIMD"
-+ "tbl\\t%0.<Vtype>, {%1.16b}, %2.<Vtype>"
-+ [(set_attr "simd_type" "simd_tbl")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+;; Two source registers.
-+
-+(define_insn "aarch64_tbl2v16qi"
-+ [(set (match_operand:V16QI 0 "register_operand" "=w")
-+ (unspec:V16QI [(match_operand:OI 1 "register_operand" "w")
-+ (match_operand:V16QI 2 "register_operand" "w")]
-+ UNSPEC_TBL))]
-+ "TARGET_SIMD"
-+ "tbl\\t%0.16b, {%S1.16b - %T1.16b}, %2.16b"
-+ [(set_attr "simd_type" "simd_tbl")
-+ (set_attr "simd_mode" "V16QI")]
-+)
-+
-+(define_insn_and_split "aarch64_combinev16qi"
-+ [(set (match_operand:OI 0 "register_operand" "=w")
-+ (unspec:OI [(match_operand:V16QI 1 "register_operand" "w")
-+ (match_operand:V16QI 2 "register_operand" "w")]
-+ UNSPEC_CONCAT))]
-+ "TARGET_SIMD"
-+ "#"
-+ "&& reload_completed"
-+ [(const_int 0)]
-+{
-+ aarch64_split_combinev16qi (operands);
-+ DONE;
-+})
-+
-+(define_insn "aarch64_<PERMUTE:perm_insn><PERMUTE:perm_hilo><mode>"
-+ [(set (match_operand:VALL 0 "register_operand" "=w")
-+ (unspec:VALL [(match_operand:VALL 1 "register_operand" "w")
-+ (match_operand:VALL 2 "register_operand" "w")]
-+ PERMUTE))]
-+ "TARGET_SIMD"
-+ "<PERMUTE:perm_insn><PERMUTE:perm_hilo>\\t%0.<Vtype>, %1.<Vtype>, %2.<Vtype>"
-+ [(set_attr "simd_type" "simd_<PERMUTE:perm_insn>")
-+ (set_attr "simd_mode" "<MODE>")]
-+)
-+
-+(define_insn "aarch64_st2<mode>_dreg"
-+ [(set (match_operand:TI 0 "aarch64_simd_struct_operand" "=Utv")
-+ (unspec:TI [(match_operand:OI 1 "register_operand" "w")
-+ (unspec:VD [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
-+ UNSPEC_ST2))]
-+ "TARGET_SIMD"
-+ "st2\\t{%S1.<Vtype> - %T1.<Vtype>}, %0"
-+ [(set_attr "simd_type" "simd_store2")
-+ (set_attr "simd_mode" "<MODE>")])
-+
-+(define_insn "aarch64_st2<mode>_dreg"
-+ [(set (match_operand:TI 0 "aarch64_simd_struct_operand" "=Utv")
-+ (unspec:TI [(match_operand:OI 1 "register_operand" "w")
-+ (unspec:DX [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
-+ UNSPEC_ST2))]
-+ "TARGET_SIMD"
-+ "st1\\t{%S1.1d - %T1.1d}, %0"
-+ [(set_attr "simd_type" "simd_store2")
-+ (set_attr "simd_mode" "<MODE>")])
-+
-+(define_insn "aarch64_st3<mode>_dreg"
-+ [(set (match_operand:EI 0 "aarch64_simd_struct_operand" "=Utv")
-+ (unspec:EI [(match_operand:CI 1 "register_operand" "w")
-+ (unspec:VD [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
-+ UNSPEC_ST3))]
-+ "TARGET_SIMD"
-+ "st3\\t{%S1.<Vtype> - %U1.<Vtype>}, %0"
-+ [(set_attr "simd_type" "simd_store3")
-+ (set_attr "simd_mode" "<MODE>")])
-+
-+(define_insn "aarch64_st3<mode>_dreg"
-+ [(set (match_operand:EI 0 "aarch64_simd_struct_operand" "=Utv")
-+ (unspec:EI [(match_operand:CI 1 "register_operand" "w")
-+ (unspec:DX [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
-+ UNSPEC_ST3))]
-+ "TARGET_SIMD"
-+ "st1\\t{%S1.1d - %U1.1d}, %0"
-+ [(set_attr "simd_type" "simd_store3")
-+ (set_attr "simd_mode" "<MODE>")])
-+
-+(define_insn "aarch64_st4<mode>_dreg"
-+ [(set (match_operand:OI 0 "aarch64_simd_struct_operand" "=Utv")
-+ (unspec:OI [(match_operand:XI 1 "register_operand" "w")
-+ (unspec:VD [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
-+ UNSPEC_ST4))]
-+ "TARGET_SIMD"
-+ "st4\\t{%S1.<Vtype> - %V1.<Vtype>}, %0"
-+ [(set_attr "simd_type" "simd_store4")
-+ (set_attr "simd_mode" "<MODE>")])
-+
-+(define_insn "aarch64_st4<mode>_dreg"
-+ [(set (match_operand:OI 0 "aarch64_simd_struct_operand" "=Utv")
-+ (unspec:OI [(match_operand:XI 1 "register_operand" "w")
-+ (unspec:DX [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
-+ UNSPEC_ST4))]
-+ "TARGET_SIMD"
-+ "st1\\t{%S1.1d - %V1.1d}, %0"
-+ [(set_attr "simd_type" "simd_store4")
-+ (set_attr "simd_mode" "<MODE>")])
-+
-+(define_expand "aarch64_st<VSTRUCT:nregs><VDC:mode>"
-+ [(match_operand:DI 0 "register_operand" "r")
-+ (match_operand:VSTRUCT 1 "register_operand" "w")
-+ (unspec:VDC [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
-+ "TARGET_SIMD"
-+{
-+ enum machine_mode mode = <VSTRUCT:VSTRUCT_DREG>mode;
-+ rtx mem = gen_rtx_MEM (mode, operands[0]);
-+
-+ emit_insn (gen_aarch64_st<VSTRUCT:nregs><VDC:mode>_dreg (mem, operands[1]));
-+ DONE;
-+})
-+
-+(define_expand "aarch64_st<VSTRUCT:nregs><VQ:mode>"
-+ [(match_operand:DI 0 "register_operand" "r")
-+ (match_operand:VSTRUCT 1 "register_operand" "w")
-+ (unspec:VQ [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
-+ "TARGET_SIMD"
-+{
-+ enum machine_mode mode = <VSTRUCT:MODE>mode;
-+ rtx mem = gen_rtx_MEM (mode, operands[0]);
-+
-+ emit_insn (gen_vec_store_lanes<VSTRUCT:mode><VQ:mode> (mem, operands[1]));
-+ DONE;
-+})
-+
-+;; Expander for builtins to insert vector registers into large
-+;; opaque integer modes.
-+
-+;; Q-register list. We don't need a D-reg inserter as we zero
-+;; extend them in arm_neon.h and insert the resulting Q-regs.
-+
-+(define_expand "aarch64_set_qreg<VSTRUCT:mode><VQ:mode>"
-+ [(match_operand:VSTRUCT 0 "register_operand" "+w")
-+ (match_operand:VSTRUCT 1 "register_operand" "0")
-+ (match_operand:VQ 2 "register_operand" "w")
-+ (match_operand:SI 3 "immediate_operand" "i")]
-+ "TARGET_SIMD"
-+{
-+ int part = INTVAL (operands[3]);
-+ int offset = part * 16;
-+
-+ emit_move_insn (operands[0], operands[1]);
-+ emit_move_insn (gen_rtx_SUBREG (<VQ:MODE>mode, operands[0], offset),
-+ operands[2]);
-+ DONE;
-+})
-+
-+;; Standard pattern name vec_init<mode>.
-+
-+(define_expand "vec_init<mode>"
-+ [(match_operand:VALL 0 "register_operand" "")
-+ (match_operand 1 "" "")]
-+ "TARGET_SIMD"
-+{
-+ aarch64_expand_vector_init (operands[0], operands[1]);
-+ DONE;
-+})
-+
-+(define_insn "*aarch64_simd_ld1r<mode>"
-+ [(set (match_operand:VALLDI 0 "register_operand" "=w")
-+ (vec_duplicate:VALLDI
-+ (match_operand:<VEL> 1 "aarch64_simd_struct_operand" "Utv")))]
-+ "TARGET_SIMD"
-+ "ld1r\\t{%0.<Vtype>}, %1"
-+ [(set_attr "simd_type" "simd_load1r")
-+ (set_attr "simd_mode" "<MODE>")])
---- a/src/gcc/config/aarch64/aarch64-tune.md
-+++ b/src/gcc/config/aarch64/aarch64-tune.md
-@@ -0,0 +1,5 @@
-+;; -*- buffer-read-only: t -*-
-+;; Generated automatically by gentune.sh from aarch64-cores.def
-+(define_attr "tune"
-+ "large,small"
-+ (const (symbol_ref "((enum attr_tune) aarch64_tune)")))
--- a/src/gcc/config/aarch64/arm_neon.h
+++ b/src/gcc/config/aarch64/arm_neon.h
@@ -0,0 +1,25535 @@
@@ -50971,7 +51463,15 @@
+LIB1ASMFUNCS = _aarch64_sync_cache_range
--- a/src/gcc/config/alpha/alpha.c
+++ b/src/gcc/config/alpha/alpha.c
-@@ -2658,12 +2658,12 @@
+@@ -2617,6 +2617,7 @@
+ cmp_mode = cmp_mode == DImode ? DFmode : DImode;
+ op0 = gen_lowpart (cmp_mode, tem);
+ op1 = CONST0_RTX (cmp_mode);
++ cmp = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
+ local_fast_math = 1;
+ }
+
+@@ -2658,12 +2659,12 @@
break;
case GE: case GT: case GEU: case GTU:
@@ -50990,7 +51490,7 @@
break;
default:
-@@ -3025,12 +3025,9 @@
+@@ -3025,12 +3026,9 @@
operands[1] = op1;
out = gen_reg_rtx (DImode);
@@ -51016,80 +51516,100 @@
UNSPEC_ARG_HOME
UNSPEC_LDGP1
UNSPEC_INSXH
---- a/src/gcc/config/arm/arm1020e.md
-+++ b/src/gcc/config/arm/arm1020e.md
-@@ -66,13 +66,13 @@
- ;; ALU operations with no shifted operand
- (define_insn_reservation "1020alu_op" 1
- (and (eq_attr "tune" "arm1020e,arm1022e")
-- (eq_attr "type" "alu"))
-+ (eq_attr "type" "alu_reg,simple_alu_imm"))
- "1020a_e,1020a_m,1020a_w")
-
- ;; ALU operations with a shift-by-constant operand
- (define_insn_reservation "1020alu_shift_op" 1
- (and (eq_attr "tune" "arm1020e,arm1022e")
-- (eq_attr "type" "alu_shift"))
-+ (eq_attr "type" "simple_alu_shift,alu_shift"))
- "1020a_e,1020a_m,1020a_w")
+--- a/src/gcc/config/arm/arm-fixed.md
++++ b/src/gcc/config/arm/arm-fixed.md
+@@ -374,6 +374,8 @@
+ "TARGET_32BIT && arm_arch6"
+ "ssat%?\\t%0, #16, %2%S1"
+ [(set_attr "predicable" "yes")
++ (set_attr "insn" "sat")
++ (set_attr "shift" "1")
+ (set_attr "type" "alu_shift")])
- ;; ALU operations with a shift-by-register operand
-@@ -284,7 +284,7 @@
+ (define_insn "arm_usatsihi"
+@@ -381,4 +383,5 @@
+ (us_truncate:HI (match_operand:SI 1 "s_register_operand")))]
+ "TARGET_INT_SIMD"
+ "usat%?\\t%0, #16, %1"
+- [(set_attr "predicable" "yes")])
++ [(set_attr "predicable" "yes")
++ (set_attr "insn" "sat")])
+--- a/src/gcc/config/arm/arm-protos.h
++++ b/src/gcc/config/arm/arm-protos.h
+@@ -49,6 +49,7 @@
+ extern bool arm_modes_tieable_p (enum machine_mode, enum machine_mode);
+ extern int const_ok_for_arm (HOST_WIDE_INT);
+ extern int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
++extern int const_ok_for_dimode_op (HOST_WIDE_INT, enum rtx_code);
+ extern int arm_split_constant (RTX_CODE, enum machine_mode, rtx,
+ HOST_WIDE_INT, rtx, rtx, int);
+ extern RTX_CODE arm_canonicalize_comparison (RTX_CODE, rtx *, rtx *);
+@@ -101,12 +102,14 @@
+ extern int arm_no_early_alu_shift_dep (rtx, rtx);
+ extern int arm_no_early_alu_shift_value_dep (rtx, rtx);
+ extern int arm_no_early_mul_dep (rtx, rtx);
++extern int arm_mac_accumulator_is_result (rtx, rtx);
+ extern int arm_mac_accumulator_is_mul_result (rtx, rtx);
- (define_insn_reservation "v10_fmul" 6
- (and (eq_attr "vfp10" "yes")
-- (eq_attr "type" "fmuls,fmacs,fmuld,fmacd"))
-+ (eq_attr "type" "fmuls,fmacs,ffmas,fmuld,fmacd,ffmad"))
- "1020a_e+v10_fmac*2")
+ extern int tls_mentioned_p (rtx);
+ extern int symbol_mentioned_p (rtx);
+ extern int label_mentioned_p (rtx);
+ extern RTX_CODE minmax_code (rtx);
++extern bool arm_sat_operator_match (rtx, rtx, int *, bool *);
+ extern int adjacent_mem_locations (rtx, rtx);
+ extern bool gen_ldm_seq (rtx *, int, bool);
+ extern bool gen_stm_seq (rtx *, int);
+@@ -222,6 +225,27 @@
- (define_insn_reservation "v10_fdivs" 18
---- a/src/gcc/config/arm/arm1026ejs.md
-+++ b/src/gcc/config/arm/arm1026ejs.md
-@@ -66,13 +66,13 @@
- ;; ALU operations with no shifted operand
- (define_insn_reservation "alu_op" 1
- (and (eq_attr "tune" "arm1026ejs")
-- (eq_attr "type" "alu"))
-+ (eq_attr "type" "alu_reg,simple_alu_imm"))
- "a_e,a_m,a_w")
+ extern void arm_order_regs_for_local_alloc (void);
- ;; ALU operations with a shift-by-constant operand
- (define_insn_reservation "alu_shift_op" 1
- (and (eq_attr "tune" "arm1026ejs")
-- (eq_attr "type" "alu_shift"))
-+ (eq_attr "type" "simple_alu_shift,alu_shift"))
- "a_e,a_m,a_w")
++/* Vectorizer cost model implementation. */
++struct cpu_vec_costs {
++ const int scalar_stmt_cost; /* Cost of any scalar operation, excluding
++ load and store. */
++ const int scalar_load_cost; /* Cost of scalar load. */
++ const int scalar_store_cost; /* Cost of scalar store. */
++ const int vec_stmt_cost; /* Cost of any vector operation, excluding
++ load, store, vector-to-scalar and
++ scalar-to-vector operation. */
++ const int vec_to_scalar_cost; /* Cost of vect-to-scalar operation. */
++ const int scalar_to_vec_cost; /* Cost of scalar-to-vector operation. */
++ const int vec_align_load_cost; /* Cost of aligned vector load. */
++ const int vec_unalign_load_cost; /* Cost of unaligned vector load. */
++ const int vec_unalign_store_cost; /* Cost of unaligned vector load. */
++ const int vec_store_cost; /* Cost of vector store. */
++ const int cond_taken_branch_cost; /* Cost of taken branch for vectorizer
++ cost model. */
++ const int cond_not_taken_branch_cost;/* Cost of not taken branch for
++ vectorizer cost model. */
++};
++
+ #ifdef RTX_CODE
+ /* This needs to be here because we need RTX_CODE and similar. */
- ;; ALU operations with a shift-by-register operand
---- a/src/gcc/config/arm/arm1136jfs.md
-+++ b/src/gcc/config/arm/arm1136jfs.md
-@@ -75,13 +75,13 @@
- ;; ALU operations with no shifted operand
- (define_insn_reservation "11_alu_op" 2
- (and (eq_attr "tune" "arm1136js,arm1136jfs")
-- (eq_attr "type" "alu"))
-+ (eq_attr "type" "alu_reg,simple_alu_imm"))
- "e_1,e_2,e_3,e_wb")
+@@ -238,13 +262,22 @@
+ int l1_cache_line_size;
+ bool prefer_constant_pool;
+ int (*branch_cost) (bool, bool);
++ /* Prefer Neon for 64-bit bitops. */
++ bool prefer_neon_for_64bits;
++ /* Vectorizer costs. */
++ const struct cpu_vec_costs* vec_costs;
+ };
- ;; ALU operations with a shift-by-constant operand
- (define_insn_reservation "11_alu_shift_op" 2
- (and (eq_attr "tune" "arm1136js,arm1136jfs")
-- (eq_attr "type" "alu_shift"))
-+ (eq_attr "type" "simple_alu_shift,alu_shift"))
- "e_1,e_2,e_3,e_wb")
+ extern const struct tune_params *current_tune;
+ extern int vfp3_const_double_for_fract_bits (rtx);
++
++extern void arm_emit_coreregs_64bit_shift (enum rtx_code, rtx, rtx, rtx, rtx,
++ rtx);
+ #endif /* RTX_CODE */
- ;; ALU operations with a shift-by-register operand
---- a/src/gcc/config/arm/arm926ejs.md
-+++ b/src/gcc/config/arm/arm926ejs.md
-@@ -58,7 +58,7 @@
- ;; ALU operations with no shifted operand
- (define_insn_reservation "9_alu_op" 1
- (and (eq_attr "tune" "arm926ejs")
-- (eq_attr "type" "alu,alu_shift"))
-+ (eq_attr "type" "alu_reg,simple_alu_imm,simple_alu_shift,alu_shift"))
- "e,m,w")
+ extern void arm_expand_vec_perm (rtx target, rtx op0, rtx op1, rtx sel);
+ extern bool arm_expand_vec_perm_const (rtx target, rtx op0, rtx op1, rtx sel);
- ;; ALU operations with a shift-by-register operand
++extern bool arm_autoinc_modes_ok_p (enum machine_mode, enum arm_auto_incmodes);
++
+ #endif /* ! GCC_ARM_PROTOS_H */
--- a/src/gcc/config/arm/arm.c
+++ b/src/gcc/config/arm/arm.c
@@ -133,6 +133,7 @@
@@ -51362,7 +51882,18 @@
/* Calculate a few attributes that may be useful for specific
optimizations. */
/* Count number of leading zeros. */
-@@ -7640,6 +7757,28 @@
+@@ -4355,7 +4472,9 @@
+ if (((pcum->aapcs_vfp_regs_free >> regno) & mask) == mask)
+ {
+ pcum->aapcs_vfp_reg_alloc = mask << regno;
+- if (mode == BLKmode || (mode == TImode && !TARGET_NEON))
++ if (mode == BLKmode
++ || (mode == TImode && ! TARGET_NEON)
++ || ! arm_hard_regno_mode_ok (FIRST_VFP_REGNUM + regno, mode))
+ {
+ int i;
+ int rcount = pcum->aapcs_vfp_rcount;
+@@ -7640,6 +7759,28 @@
return true;
case SET:
@@ -51391,7 +51922,7 @@
return false;
case UNSPEC:
-@@ -7651,6 +7790,17 @@
+@@ -7651,6 +7792,17 @@
}
return true;
@@ -51409,7 +51940,7 @@
default:
*total = COSTS_N_INSNS (4);
return false;
-@@ -7991,6 +8141,17 @@
+@@ -7991,6 +8143,17 @@
*total = COSTS_N_INSNS (4);
return true;
@@ -51427,7 +51958,7 @@
case HIGH:
case LO_SUM:
/* We prefer constant pool entries to MOVW/MOVT pairs, so bump the
-@@ -8578,6 +8739,222 @@
+@@ -8578,6 +8741,222 @@
}
}
@@ -51650,7 +52181,7 @@
/* This function implements the target macro TARGET_SCHED_ADJUST_COST.
It corrects the value of COST based on the relationship between
INSN and DEP through the dependence LINK. It returns the new
-@@ -8858,11 +9235,14 @@
+@@ -8858,11 +9237,14 @@
vmov i64 17 aaaaaaaa bbbbbbbb cccccccc dddddddd
eeeeeeee ffffffff gggggggg hhhhhhhh
vmov f32 18 aBbbbbbc defgh000 00000000 00000000
@@ -51665,7 +52196,7 @@
Variants 0-5 (inclusive) may also be used as immediates for the second
operand of VORR/VBIC instructions.
-@@ -8893,11 +9273,25 @@
+@@ -8893,11 +9275,25 @@
break; \
}
@@ -51693,7 +52224,7 @@
/* Vectors of float constants. */
if (GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
-@@ -8905,7 +9299,7 @@
+@@ -8905,7 +9301,7 @@
rtx el0 = CONST_VECTOR_ELT (op, 0);
REAL_VALUE_TYPE r0;
@@ -51702,7 +52233,7 @@
return -1;
REAL_VALUE_FROM_CONST_DOUBLE (r0, el0);
-@@ -8927,13 +9321,16 @@
+@@ -8927,13 +9323,16 @@
if (elementwidth)
*elementwidth = 0;
@@ -51721,7 +52252,7 @@
unsigned HOST_WIDE_INT elpart;
unsigned int part, parts;
-@@ -9644,7 +10041,11 @@
+@@ -9644,7 +10043,11 @@
&& REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
&& GET_CODE (XEXP (ind, 1)) == CONST_INT
&& INTVAL (XEXP (ind, 1)) > -1024
@@ -51734,7 +52265,7 @@
&& (INTVAL (XEXP (ind, 1)) & 3) == 0)
return TRUE;
-@@ -10047,6 +10448,42 @@
+@@ -10047,6 +10450,42 @@
}
}
@@ -51777,7 +52308,7 @@
/* Return 1 if memory locations are adjacent. */
int
adjacent_mem_locations (rtx a, rtx b)
-@@ -13277,47 +13714,148 @@
+@@ -13277,47 +13716,148 @@
FOR_BB_INSNS_REVERSE (bb, insn)
{
if (NONJUMP_INSN_P (insn)
@@ -51954,7 +52485,7 @@
}
}
-@@ -14546,15 +15084,16 @@
+@@ -14546,15 +15086,16 @@
return "";
}
@@ -51978,7 +52509,7 @@
For example, the in-memory ordering of a big-endian a quadword
vector with 16-bit elements when stored from register pair {d0,d1}
-@@ -14568,13 +15107,28 @@
+@@ -14568,13 +15109,28 @@
dN -> (rN+1, rN), dN+1 -> (rN+3, rN+2)
So that STM/LDM can be used on vectors in ARM registers, and the
@@ -52009,7 +52540,7 @@
const char *templ;
char buff[50];
enum machine_mode mode;
-@@ -14586,6 +15140,7 @@
+@@ -14586,6 +15142,7 @@
gcc_assert (REG_P (reg));
regno = REGNO (reg);
@@ -52017,7 +52548,7 @@
gcc_assert (VFP_REGNO_OK_FOR_DOUBLE (regno)
|| NEON_REGNO_OK_FOR_QUAD (regno));
gcc_assert (VALID_NEON_DREG_MODE (mode)
-@@ -14602,13 +15157,23 @@
+@@ -14602,13 +15159,23 @@
switch (GET_CODE (addr))
{
case POST_INC:
@@ -52044,7 +52575,7 @@
templ = "v%smdb%%?\t%%0!, %%h1";
ops[0] = XEXP (addr, 0);
ops[1] = reg;
-@@ -14621,7 +15186,6 @@
+@@ -14621,7 +15188,6 @@
case LABEL_REF:
case PLUS:
{
@@ -52052,7 +52583,7 @@
int i;
int overlap = -1;
for (i = 0; i < nregs; i++)
-@@ -14652,7 +15216,12 @@
+@@ -14652,7 +15218,12 @@
}
default:
@@ -52066,7 +52597,7 @@
ops[0] = mem;
ops[1] = reg;
}
-@@ -17287,6 +17856,19 @@
+@@ -17287,6 +17858,19 @@
}
return;
@@ -52086,7 +52617,7 @@
case 'B':
if (GET_CODE (x) == CONST_INT)
{
-@@ -19101,6 +19683,8 @@
+@@ -19101,6 +19685,8 @@
VAR8 (BINOP, vmul, v8qi, v4hi, v2si, v2sf, v16qi, v8hi, v4si, v4sf),
VAR8 (TERNOP, vmla, v8qi, v4hi, v2si, v2sf, v16qi, v8hi, v4si, v4sf),
VAR3 (TERNOP, vmlal, v8qi, v4hi, v2si),
@@ -52095,7 +52626,7 @@
VAR8 (TERNOP, vmls, v8qi, v4hi, v2si, v2sf, v16qi, v8hi, v4si, v4sf),
VAR3 (TERNOP, vmlsl, v8qi, v4hi, v2si),
VAR4 (BINOP, vqdmulh, v4hi, v2si, v8hi, v4si),
-@@ -23485,6 +24069,62 @@
+@@ -23485,6 +24071,62 @@
return TARGET_AAPCS_BASED ? integer_type_node : long_long_integer_type_node;
}
@@ -52158,7 +52689,7 @@
/* Return non-zero if the consumer (a multiply-accumulate instruction)
has an accumulator dependency on the result of the producer (a
multiplication instruction) and no other dependency on that result. */
-@@ -24439,6 +25079,7 @@
+@@ -24439,6 +25081,7 @@
case cortexr5:
case genericv7a:
case cortexa5:
@@ -52166,7 +52697,7 @@
case cortexa8:
case cortexa9:
case fa726te:
-@@ -25374,20 +26015,20 @@
+@@ -25374,20 +26017,20 @@
default:
return false;
}
@@ -52194,7 +52725,7 @@
/* Success! */
if (d->testing_p)
return true;
-@@ -25462,6 +26103,72 @@
+@@ -25462,6 +26105,72 @@
return true;
}
@@ -52267,7 +52798,7 @@
/* The NEON VTBL instruction is a fully variable permuation that's even
stronger than what we expose via VEC_PERM_EXPR. What it doesn't do
is mask the index operand as VEC_PERM_EXPR requires. Therefore we
-@@ -25501,6 +26208,12 @@
+@@ -25501,6 +26210,12 @@
static bool
arm_expand_vec_perm_const_1 (struct expand_vec_perm_d *d)
{
@@ -52280,7 +52811,7 @@
/* The pattern matching functions above are written to look for a small
number to begin the sequence (0, 1, N/2). If we begin with an index
from the second operand, we can swap the operands. */
-@@ -25631,5 +26344,302 @@
+@@ -25631,5 +26346,302 @@
return ret;
}
@@ -52584,24 +53115,6 @@
+}
+
#include "gt-arm.h"
---- a/src/gcc/config/arm/arm-fixed.md
-+++ b/src/gcc/config/arm/arm-fixed.md
-@@ -374,6 +374,8 @@
- "TARGET_32BIT && arm_arch6"
- "ssat%?\\t%0, #16, %2%S1"
- [(set_attr "predicable" "yes")
-+ (set_attr "insn" "sat")
-+ (set_attr "shift" "1")
- (set_attr "type" "alu_shift")])
-
- (define_insn "arm_usatsihi"
-@@ -381,4 +383,5 @@
- (us_truncate:HI (match_operand:SI 1 "s_register_operand")))]
- "TARGET_INT_SIMD"
- "usat%?\\t%0, #16, %1"
-- [(set_attr "predicable" "yes")])
-+ [(set_attr "predicable" "yes")
-+ (set_attr "insn" "sat")])
--- a/src/gcc/config/arm/arm.h
+++ b/src/gcc/config/arm/arm.h
@@ -79,6 +79,9 @@
@@ -54127,6 +54640,90 @@
;; Load the load/store multiple patterns
(include "ldmstm.md")
;; Load the FPA co-processor patterns
+--- a/src/gcc/config/arm/arm.opt
++++ b/src/gcc/config/arm/arm.opt
+@@ -267,3 +267,7 @@
+ munaligned-access
+ Target Report Var(unaligned_access) Init(2)
+ Enable unaligned word and halfword accesses to packed data.
++
++mneon-for-64bits
++Target Report RejectNegative Var(use_neon_for_64bits) Init(0)
++Use Neon to perform 64-bits operations rather than core registers.
+--- a/src/gcc/config/arm/arm1020e.md
++++ b/src/gcc/config/arm/arm1020e.md
+@@ -66,13 +66,13 @@
+ ;; ALU operations with no shifted operand
+ (define_insn_reservation "1020alu_op" 1
+ (and (eq_attr "tune" "arm1020e,arm1022e")
+- (eq_attr "type" "alu"))
++ (eq_attr "type" "alu_reg,simple_alu_imm"))
+ "1020a_e,1020a_m,1020a_w")
+
+ ;; ALU operations with a shift-by-constant operand
+ (define_insn_reservation "1020alu_shift_op" 1
+ (and (eq_attr "tune" "arm1020e,arm1022e")
+- (eq_attr "type" "alu_shift"))
++ (eq_attr "type" "simple_alu_shift,alu_shift"))
+ "1020a_e,1020a_m,1020a_w")
+
+ ;; ALU operations with a shift-by-register operand
+@@ -284,7 +284,7 @@
+
+ (define_insn_reservation "v10_fmul" 6
+ (and (eq_attr "vfp10" "yes")
+- (eq_attr "type" "fmuls,fmacs,fmuld,fmacd"))
++ (eq_attr "type" "fmuls,fmacs,ffmas,fmuld,fmacd,ffmad"))
+ "1020a_e+v10_fmac*2")
+
+ (define_insn_reservation "v10_fdivs" 18
+--- a/src/gcc/config/arm/arm1026ejs.md
++++ b/src/gcc/config/arm/arm1026ejs.md
+@@ -66,13 +66,13 @@
+ ;; ALU operations with no shifted operand
+ (define_insn_reservation "alu_op" 1
+ (and (eq_attr "tune" "arm1026ejs")
+- (eq_attr "type" "alu"))
++ (eq_attr "type" "alu_reg,simple_alu_imm"))
+ "a_e,a_m,a_w")
+
+ ;; ALU operations with a shift-by-constant operand
+ (define_insn_reservation "alu_shift_op" 1
+ (and (eq_attr "tune" "arm1026ejs")
+- (eq_attr "type" "alu_shift"))
++ (eq_attr "type" "simple_alu_shift,alu_shift"))
+ "a_e,a_m,a_w")
+
+ ;; ALU operations with a shift-by-register operand
+--- a/src/gcc/config/arm/arm1136jfs.md
++++ b/src/gcc/config/arm/arm1136jfs.md
+@@ -75,13 +75,13 @@
+ ;; ALU operations with no shifted operand
+ (define_insn_reservation "11_alu_op" 2
+ (and (eq_attr "tune" "arm1136js,arm1136jfs")
+- (eq_attr "type" "alu"))
++ (eq_attr "type" "alu_reg,simple_alu_imm"))
+ "e_1,e_2,e_3,e_wb")
+
+ ;; ALU operations with a shift-by-constant operand
+ (define_insn_reservation "11_alu_shift_op" 2
+ (and (eq_attr "tune" "arm1136js,arm1136jfs")
+- (eq_attr "type" "alu_shift"))
++ (eq_attr "type" "simple_alu_shift,alu_shift"))
+ "e_1,e_2,e_3,e_wb")
+
+ ;; ALU operations with a shift-by-register operand
+--- a/src/gcc/config/arm/arm926ejs.md
++++ b/src/gcc/config/arm/arm926ejs.md
+@@ -58,7 +58,7 @@
+ ;; ALU operations with no shifted operand
+ (define_insn_reservation "9_alu_op" 1
+ (and (eq_attr "tune" "arm926ejs")
+- (eq_attr "type" "alu,alu_shift"))
++ (eq_attr "type" "alu_reg,simple_alu_imm,simple_alu_shift,alu_shift"))
+ "e,m,w")
+
+ ;; ALU operations with a shift-by-register operand
--- a/src/gcc/config/arm/arm_neon.h
+++ b/src/gcc/config/arm/arm_neon.h
@@ -1350,6 +1350,38 @@
@@ -54168,92 +54765,6 @@
__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
vsub_s8 (int8x8_t __a, int8x8_t __b)
{
---- a/src/gcc/config/arm/arm.opt
-+++ b/src/gcc/config/arm/arm.opt
-@@ -267,3 +267,7 @@
- munaligned-access
- Target Report Var(unaligned_access) Init(2)
- Enable unaligned word and halfword accesses to packed data.
-+
-+mneon-for-64bits
-+Target Report RejectNegative Var(use_neon_for_64bits) Init(0)
-+Use Neon to perform 64-bits operations rather than core registers.
---- a/src/gcc/config/arm/arm-protos.h
-+++ b/src/gcc/config/arm/arm-protos.h
-@@ -49,6 +49,7 @@
- extern bool arm_modes_tieable_p (enum machine_mode, enum machine_mode);
- extern int const_ok_for_arm (HOST_WIDE_INT);
- extern int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
-+extern int const_ok_for_dimode_op (HOST_WIDE_INT, enum rtx_code);
- extern int arm_split_constant (RTX_CODE, enum machine_mode, rtx,
- HOST_WIDE_INT, rtx, rtx, int);
- extern RTX_CODE arm_canonicalize_comparison (RTX_CODE, rtx *, rtx *);
-@@ -101,12 +102,14 @@
- extern int arm_no_early_alu_shift_dep (rtx, rtx);
- extern int arm_no_early_alu_shift_value_dep (rtx, rtx);
- extern int arm_no_early_mul_dep (rtx, rtx);
-+extern int arm_mac_accumulator_is_result (rtx, rtx);
- extern int arm_mac_accumulator_is_mul_result (rtx, rtx);
-
- extern int tls_mentioned_p (rtx);
- extern int symbol_mentioned_p (rtx);
- extern int label_mentioned_p (rtx);
- extern RTX_CODE minmax_code (rtx);
-+extern bool arm_sat_operator_match (rtx, rtx, int *, bool *);
- extern int adjacent_mem_locations (rtx, rtx);
- extern bool gen_ldm_seq (rtx *, int, bool);
- extern bool gen_stm_seq (rtx *, int);
-@@ -222,6 +225,27 @@
-
- extern void arm_order_regs_for_local_alloc (void);
-
-+/* Vectorizer cost model implementation. */
-+struct cpu_vec_costs {
-+ const int scalar_stmt_cost; /* Cost of any scalar operation, excluding
-+ load and store. */
-+ const int scalar_load_cost; /* Cost of scalar load. */
-+ const int scalar_store_cost; /* Cost of scalar store. */
-+ const int vec_stmt_cost; /* Cost of any vector operation, excluding
-+ load, store, vector-to-scalar and
-+ scalar-to-vector operation. */
-+ const int vec_to_scalar_cost; /* Cost of vect-to-scalar operation. */
-+ const int scalar_to_vec_cost; /* Cost of scalar-to-vector operation. */
-+ const int vec_align_load_cost; /* Cost of aligned vector load. */
-+ const int vec_unalign_load_cost; /* Cost of unaligned vector load. */
-+ const int vec_unalign_store_cost; /* Cost of unaligned vector load. */
-+ const int vec_store_cost; /* Cost of vector store. */
-+ const int cond_taken_branch_cost; /* Cost of taken branch for vectorizer
-+ cost model. */
-+ const int cond_not_taken_branch_cost;/* Cost of not taken branch for
-+ vectorizer cost model. */
-+};
-+
- #ifdef RTX_CODE
- /* This needs to be here because we need RTX_CODE and similar. */
-
-@@ -238,13 +262,22 @@
- int l1_cache_line_size;
- bool prefer_constant_pool;
- int (*branch_cost) (bool, bool);
-+ /* Prefer Neon for 64-bit bitops. */
-+ bool prefer_neon_for_64bits;
-+ /* Vectorizer costs. */
-+ const struct cpu_vec_costs* vec_costs;
- };
-
- extern const struct tune_params *current_tune;
- extern int vfp3_const_double_for_fract_bits (rtx);
-+
-+extern void arm_emit_coreregs_64bit_shift (enum rtx_code, rtx, rtx, rtx, rtx,
-+ rtx);
- #endif /* RTX_CODE */
-
- extern void arm_expand_vec_perm (rtx target, rtx op0, rtx op1, rtx sel);
- extern bool arm_expand_vec_perm_const (rtx target, rtx op0, rtx op1, rtx sel);
-
-+extern bool arm_autoinc_modes_ok_p (enum machine_mode, enum arm_auto_incmodes);
-+
- #endif /* ! GCC_ARM_PROTOS_H */
--- a/src/gcc/config/arm/constraints.md
+++ b/src/gcc/config/arm/constraints.md
@@ -29,7 +29,7 @@
@@ -54291,87 +54802,6 @@
(match_test "TARGET_32BIT
&& imm_for_neon_mov_operand (op, GET_MODE (op))")))
---- a/src/gcc/config/arm/cortex-a15.md
-+++ b/src/gcc/config/arm/cortex-a15.md
-@@ -24,7 +24,7 @@
- ;; The Cortex-A15 core is modelled as a triple issue pipeline that has
- ;; the following dispatch units.
- ;; 1. Two pipelines for simple integer operations: SX1, SX2
--;; 2. Two pipelines for Neon and FP data-processing operations: CX1, CX2
-+;; 2. Individual units for Neon and FP operations as in cortex-a15-neon.md
- ;; 3. One pipeline for branch operations: BX
- ;; 4. One pipeline for integer multiply and divide operations: MX
- ;; 5. Two pipelines for load and store operations: LS1, LS2
-@@ -44,7 +44,6 @@
-
- ;; The main dispatch units
- (define_cpu_unit "ca15_sx1, ca15_sx2" "cortex_a15")
--(define_cpu_unit "ca15_cx1, ca15_cx2" "cortex_a15")
- (define_cpu_unit "ca15_ls1, ca15_ls2" "cortex_a15")
- (define_cpu_unit "ca15_bx, ca15_mx" "cortex_a15")
-
-@@ -62,14 +61,14 @@
- ;; Simple ALU without shift
- (define_insn_reservation "cortex_a15_alu" 2
- (and (eq_attr "tune" "cortexa15")
-- (and (eq_attr "type" "alu")
-+ (and (eq_attr "type" "alu_reg,simple_alu_imm")
- (eq_attr "neon_type" "none")))
- "ca15_issue1,(ca15_sx1,ca15_sx1_alu)|(ca15_sx2,ca15_sx2_alu)")
-
- ;; ALU ops with immediate shift
- (define_insn_reservation "cortex_a15_alu_shift" 3
- (and (eq_attr "tune" "cortexa15")
-- (and (eq_attr "type" "alu_shift")
-+ (and (eq_attr "type" "simple_alu_shift,alu_shift")
- (eq_attr "neon_type" "none")))
- "ca15_issue1,(ca15_sx1,ca15_sx1+ca15_sx1_shf,ca15_sx1_alu)\
- |(ca15_sx2,ca15_sx2+ca15_sx2_shf,ca15_sx2_alu)")
-@@ -129,20 +128,6 @@
- (eq_attr "neon_type" "none")))
- "ca15_issue1,ca15_bx")
-
--
--;; We lie with calls. They take up all issue slots, and form a block in the
--;; pipeline. The result however is available the next cycle.
--;;
--;; Addition of new units requires this to be updated.
--(define_insn_reservation "cortex_a15_call" 1
-- (and (eq_attr "tune" "cortexa15")
-- (and (eq_attr "type" "call")
-- (eq_attr "neon_type" "none")))
-- "ca15_issue3,\
-- ca15_sx1+ca15_sx2+ca15_bx+ca15_mx+ca15_cx1+ca15_cx2+ca15_ls1+ca15_ls2,\
-- ca15_sx1_alu+ca15_sx1_shf+ca15_sx1_sat+ca15_sx2_alu+ca15_sx2_shf\
-- +ca15_sx2_sat+ca15_ldr+ca15_str")
--
- ;; Load-store execution Unit
- ;;
- ;; Loads of up to two words.
-@@ -173,6 +158,23 @@
- (eq_attr "neon_type" "none")))
- "ca15_issue2,ca15_ls1+ca15_ls2,ca15_str,ca15_str")
-
-+;; We include Neon.md here to ensure that the branch can block the Neon units.
-+(include "cortex-a15-neon.md")
-+
-+;; We lie with calls. They take up all issue slots, and form a block in the
-+;; pipeline. The result however is available the next cycle.
-+(define_insn_reservation "cortex_a15_call" 1
-+ (and (eq_attr "tune" "cortexa15")
-+ (and (eq_attr "type" "call")
-+ (eq_attr "neon_type" "none")))
-+ "ca15_issue3,\
-+ ca15_sx1+ca15_sx2+ca15_bx+ca15_mx+ca15_cx_ij+ca15_cx_ik+ca15_ls1+ca15_ls2+\
-+ ca15_cx_imac1+ca15_cx_ialu1+ca15_cx_ialu2+ca15_cx_ishf+\
-+ ca15_cx_acc+ca15_cx_fmul1+ca15_cx_fmul2+ca15_cx_fmul3+ca15_cx_fmul4+\
-+ ca15_cx_falu1+ca15_cx_falu2+ca15_cx_falu3+ca15_cx_falu4+ca15_cx_vfp_i,\
-+ ca15_sx1_alu+ca15_sx1_shf+ca15_sx1_sat+ca15_sx2_alu+\
-+ ca15_sx2_shf+ca15_sx2_sat+ca15_ldr+ca15_str")
-+
- ;; Simple execution unit bypasses
- (define_bypass 1 "cortex_a15_alu"
- "cortex_a15_alu,cortex_a15_alu_shift,cortex_a15_alu_shift_reg")
--- a/src/gcc/config/arm/cortex-a15-neon.md
+++ b/src/gcc/config/arm/cortex-a15-neon.md
@@ -0,0 +1,1215 @@
@@ -55590,6 +56020,87 @@
+ cortex_a15_neon_fp_vrecps_vrsqrts_ddd,\
+ cortex_a15_neon_fp_vrecps_vrsqrts_qqq")
+
+--- a/src/gcc/config/arm/cortex-a15.md
++++ b/src/gcc/config/arm/cortex-a15.md
+@@ -24,7 +24,7 @@
+ ;; The Cortex-A15 core is modelled as a triple issue pipeline that has
+ ;; the following dispatch units.
+ ;; 1. Two pipelines for simple integer operations: SX1, SX2
+-;; 2. Two pipelines for Neon and FP data-processing operations: CX1, CX2
++;; 2. Individual units for Neon and FP operations as in cortex-a15-neon.md
+ ;; 3. One pipeline for branch operations: BX
+ ;; 4. One pipeline for integer multiply and divide operations: MX
+ ;; 5. Two pipelines for load and store operations: LS1, LS2
+@@ -44,7 +44,6 @@
+
+ ;; The main dispatch units
+ (define_cpu_unit "ca15_sx1, ca15_sx2" "cortex_a15")
+-(define_cpu_unit "ca15_cx1, ca15_cx2" "cortex_a15")
+ (define_cpu_unit "ca15_ls1, ca15_ls2" "cortex_a15")
+ (define_cpu_unit "ca15_bx, ca15_mx" "cortex_a15")
+
+@@ -62,14 +61,14 @@
+ ;; Simple ALU without shift
+ (define_insn_reservation "cortex_a15_alu" 2
+ (and (eq_attr "tune" "cortexa15")
+- (and (eq_attr "type" "alu")
++ (and (eq_attr "type" "alu_reg,simple_alu_imm")
+ (eq_attr "neon_type" "none")))
+ "ca15_issue1,(ca15_sx1,ca15_sx1_alu)|(ca15_sx2,ca15_sx2_alu)")
+
+ ;; ALU ops with immediate shift
+ (define_insn_reservation "cortex_a15_alu_shift" 3
+ (and (eq_attr "tune" "cortexa15")
+- (and (eq_attr "type" "alu_shift")
++ (and (eq_attr "type" "simple_alu_shift,alu_shift")
+ (eq_attr "neon_type" "none")))
+ "ca15_issue1,(ca15_sx1,ca15_sx1+ca15_sx1_shf,ca15_sx1_alu)\
+ |(ca15_sx2,ca15_sx2+ca15_sx2_shf,ca15_sx2_alu)")
+@@ -129,20 +128,6 @@
+ (eq_attr "neon_type" "none")))
+ "ca15_issue1,ca15_bx")
+
+-
+-;; We lie with calls. They take up all issue slots, and form a block in the
+-;; pipeline. The result however is available the next cycle.
+-;;
+-;; Addition of new units requires this to be updated.
+-(define_insn_reservation "cortex_a15_call" 1
+- (and (eq_attr "tune" "cortexa15")
+- (and (eq_attr "type" "call")
+- (eq_attr "neon_type" "none")))
+- "ca15_issue3,\
+- ca15_sx1+ca15_sx2+ca15_bx+ca15_mx+ca15_cx1+ca15_cx2+ca15_ls1+ca15_ls2,\
+- ca15_sx1_alu+ca15_sx1_shf+ca15_sx1_sat+ca15_sx2_alu+ca15_sx2_shf\
+- +ca15_sx2_sat+ca15_ldr+ca15_str")
+-
+ ;; Load-store execution Unit
+ ;;
+ ;; Loads of up to two words.
+@@ -173,6 +158,23 @@
+ (eq_attr "neon_type" "none")))
+ "ca15_issue2,ca15_ls1+ca15_ls2,ca15_str,ca15_str")
+
++;; We include Neon.md here to ensure that the branch can block the Neon units.
++(include "cortex-a15-neon.md")
++
++;; We lie with calls. They take up all issue slots, and form a block in the
++;; pipeline. The result however is available the next cycle.
++(define_insn_reservation "cortex_a15_call" 1
++ (and (eq_attr "tune" "cortexa15")
++ (and (eq_attr "type" "call")
++ (eq_attr "neon_type" "none")))
++ "ca15_issue3,\
++ ca15_sx1+ca15_sx2+ca15_bx+ca15_mx+ca15_cx_ij+ca15_cx_ik+ca15_ls1+ca15_ls2+\
++ ca15_cx_imac1+ca15_cx_ialu1+ca15_cx_ialu2+ca15_cx_ishf+\
++ ca15_cx_acc+ca15_cx_fmul1+ca15_cx_fmul2+ca15_cx_fmul3+ca15_cx_fmul4+\
++ ca15_cx_falu1+ca15_cx_falu2+ca15_cx_falu3+ca15_cx_falu4+ca15_cx_vfp_i,\
++ ca15_sx1_alu+ca15_sx1_shf+ca15_sx1_sat+ca15_sx2_alu+\
++ ca15_sx2_shf+ca15_sx2_sat+ca15_ldr+ca15_str")
++
+ ;; Simple execution unit bypasses
+ (define_bypass 1 "cortex_a15_alu"
+ "cortex_a15_alu,cortex_a15_alu_shift,cortex_a15_alu_shift_reg")
--- a/src/gcc/config/arm/cortex-a5.md
+++ b/src/gcc/config/arm/cortex-a5.md
@@ -58,12 +58,12 @@
@@ -56035,6 +56546,23 @@
+ neon_fp_vmla_ddd_scalar,\
+ neon_fp_vmla_qqq_scalar"))
+ "cortex_a7_both*2")
+--- a/src/gcc/config/arm/cortex-a8-neon.md
++++ b/src/gcc/config/arm/cortex-a8-neon.md
+@@ -149,12 +149,12 @@
+
+ (define_insn_reservation "cortex_a8_vfp_macs" 21
+ (and (eq_attr "tune" "cortexa8")
+- (eq_attr "type" "fmacs"))
++ (eq_attr "type" "fmacs,ffmas"))
+ "cortex_a8_vfp,cortex_a8_vfplite*20")
+
+ (define_insn_reservation "cortex_a8_vfp_macd" 26
+ (and (eq_attr "tune" "cortexa8")
+- (eq_attr "type" "fmacd"))
++ (eq_attr "type" "fmacd,ffmad"))
+ "cortex_a8_vfp,cortex_a8_vfplite*25")
+
+ (define_insn_reservation "cortex_a8_vfp_divs" 37
--- a/src/gcc/config/arm/cortex-a8.md
+++ b/src/gcc/config/arm/cortex-a8.md
@@ -85,7 +85,7 @@
@@ -56064,23 +56592,6 @@
(eq_attr "insn" "mov,mvn")))
"cortex_a8_default")
---- a/src/gcc/config/arm/cortex-a8-neon.md
-+++ b/src/gcc/config/arm/cortex-a8-neon.md
-@@ -149,12 +149,12 @@
-
- (define_insn_reservation "cortex_a8_vfp_macs" 21
- (and (eq_attr "tune" "cortexa8")
-- (eq_attr "type" "fmacs"))
-+ (eq_attr "type" "fmacs,ffmas"))
- "cortex_a8_vfp,cortex_a8_vfplite*20")
-
- (define_insn_reservation "cortex_a8_vfp_macd" 26
- (and (eq_attr "tune" "cortexa8")
-- (eq_attr "type" "fmacd"))
-+ (eq_attr "type" "fmacd,ffmad"))
- "cortex_a8_vfp,cortex_a8_vfplite*25")
-
- (define_insn_reservation "cortex_a8_vfp_divs" 37
--- a/src/gcc/config/arm/cortex-a9.md
+++ b/src/gcc/config/arm/cortex-a9.md
@@ -80,9 +80,9 @@
@@ -56150,26 +56661,6 @@
"cortex_m4_ex")
;; Byte, half-word and word load is two cycles.
---- a/src/gcc/config/arm/cortex-r4f.md
-+++ b/src/gcc/config/arm/cortex-r4f.md
-@@ -63,7 +63,7 @@
-
- (define_insn_reservation "cortex_r4_fmacs" 6
- (and (eq_attr "tune_cortexr4" "yes")
-- (eq_attr "type" "fmacs"))
-+ (eq_attr "type" "fmacs,ffmas"))
- "(cortex_r4_issue_a+cortex_r4_v1)|(cortex_r4_issue_b+cortex_r4_vmla)")
-
- (define_insn_reservation "cortex_r4_fdivs" 17
-@@ -119,7 +119,7 @@
-
- (define_insn_reservation "cortex_r4_fmacd" 20
- (and (eq_attr "tune_cortexr4" "yes")
-- (eq_attr "type" "fmacd"))
-+ (eq_attr "type" "fmacd,ffmad"))
- "cortex_r4_single_issue*13")
-
- (define_insn_reservation "cortex_r4_farith" 10
--- a/src/gcc/config/arm/cortex-r4.md
+++ b/src/gcc/config/arm/cortex-r4.md
@@ -78,19 +78,19 @@
@@ -56195,6 +56686,26 @@
"cortex_r4_alu")
(define_insn_reservation "cortex_r4_alu_shift_reg" 2
+--- a/src/gcc/config/arm/cortex-r4f.md
++++ b/src/gcc/config/arm/cortex-r4f.md
+@@ -63,7 +63,7 @@
+
+ (define_insn_reservation "cortex_r4_fmacs" 6
+ (and (eq_attr "tune_cortexr4" "yes")
+- (eq_attr "type" "fmacs"))
++ (eq_attr "type" "fmacs,ffmas"))
+ "(cortex_r4_issue_a+cortex_r4_v1)|(cortex_r4_issue_b+cortex_r4_vmla)")
+
+ (define_insn_reservation "cortex_r4_fdivs" 17
+@@ -119,7 +119,7 @@
+
+ (define_insn_reservation "cortex_r4_fmacd" 20
+ (and (eq_attr "tune_cortexr4" "yes")
+- (eq_attr "type" "fmacd"))
++ (eq_attr "type" "fmacd,ffmad"))
+ "cortex_r4_single_issue*13")
+
+ (define_insn_reservation "cortex_r4_farith" 10
--- a/src/gcc/config/arm/driver-arm.c
+++ b/src/gcc/config/arm/driver-arm.c
@@ -37,6 +37,7 @@
@@ -56450,6 +56961,70 @@
(* When this function processes the element types in the ops table, it rewrites
them in a list of tuples (a,b,c):
+--- a/src/gcc/config/arm/neon-testgen.ml
++++ b/src/gcc/config/arm/neon-testgen.ml
+@@ -46,13 +46,14 @@
+ failwith ("Could not create test source file " ^ name ^ ": " ^ str)
+
+ (* Emit prologue code to a test source file. *)
+-let emit_prologue chan test_name =
++let emit_prologue chan test_name effective_target =
+ Printf.fprintf chan "/* Test the `%s' ARM Neon intrinsic. */\n" test_name;
+ Printf.fprintf chan "/* This file was autogenerated by neon-testgen. */\n\n";
+ Printf.fprintf chan "/* { dg-do assemble } */\n";
+- Printf.fprintf chan "/* { dg-require-effective-target arm_neon_ok } */\n";
++ Printf.fprintf chan "/* { dg-require-effective-target %s_ok } */\n"
++ effective_target;
+ Printf.fprintf chan "/* { dg-options \"-save-temps -O0\" } */\n";
+- Printf.fprintf chan "/* { dg-add-options arm_neon } */\n";
++ Printf.fprintf chan "/* { dg-add-options %s } */\n" effective_target;
+ Printf.fprintf chan "\n#include \"arm_neon.h\"\n\n";
+ Printf.fprintf chan "void test_%s (void)\n{\n" test_name
+
+@@ -79,9 +80,12 @@
+ (* The intrinsic returns a value. We need to do explict register
+ allocation for vget_low tests or they fail because of copy
+ elimination. *)
+- ((if List.mem Fixed_return_reg features then
++ ((if List.mem Fixed_vector_reg features then
+ Printf.fprintf chan " register %s out_%s asm (\"d18\");\n"
+ return_ty return_ty
++ else if List.mem Fixed_core_reg features then
++ Printf.fprintf chan " register %s out_%s asm (\"r0\");\n"
++ return_ty return_ty
+ else
+ Printf.fprintf chan " %s out_%s;\n" return_ty return_ty);
+ emit ())
+@@ -153,6 +157,17 @@
+ then (Const :: flags, String.sub ty 6 ((String.length ty) - 6))
+ else (flags, ty)) tys'
+
++(* Work out what the effective target should be. *)
++let effective_target features =
++ try
++ match List.find (fun feature ->
++ match feature with Requires_feature _ -> true
++ | _ -> false)
++ features with
++ Requires_feature "FMA" -> "arm_neonv2"
++ | _ -> assert false
++ with Not_found -> "arm_neon"
++
+ (* Given an intrinsic shape, produce a regexp that will match
+ the right-hand sides of instructions generated by an intrinsic of
+ that shape. *)
+@@ -260,8 +275,10 @@
+ "!?\\(\\[ \t\\]+@\\[a-zA-Z0-9 \\]+\\)?\\n")
+ (analyze_all_shapes features shape analyze_shape)
+ in
++ let effective_target = effective_target features
++ in
+ (* Emit file and function prologues. *)
+- emit_prologue chan test_name;
++ emit_prologue chan test_name effective_target;
+ (* Emit local variable declarations. *)
+ emit_automatics chan c_types features;
+ Printf.fprintf chan "\n";
--- a/src/gcc/config/arm/neon.md
+++ b/src/gcc/config/arm/neon.md
@@ -23,6 +23,7 @@
@@ -57321,70 +57896,6 @@
Use_operands [| Dreg; Qreg |], "vget_low",
notype_1, pf_su_8_32;
Vget_low, [No_op],
---- a/src/gcc/config/arm/neon-testgen.ml
-+++ b/src/gcc/config/arm/neon-testgen.ml
-@@ -46,13 +46,14 @@
- failwith ("Could not create test source file " ^ name ^ ": " ^ str)
-
- (* Emit prologue code to a test source file. *)
--let emit_prologue chan test_name =
-+let emit_prologue chan test_name effective_target =
- Printf.fprintf chan "/* Test the `%s' ARM Neon intrinsic. */\n" test_name;
- Printf.fprintf chan "/* This file was autogenerated by neon-testgen. */\n\n";
- Printf.fprintf chan "/* { dg-do assemble } */\n";
-- Printf.fprintf chan "/* { dg-require-effective-target arm_neon_ok } */\n";
-+ Printf.fprintf chan "/* { dg-require-effective-target %s_ok } */\n"
-+ effective_target;
- Printf.fprintf chan "/* { dg-options \"-save-temps -O0\" } */\n";
-- Printf.fprintf chan "/* { dg-add-options arm_neon } */\n";
-+ Printf.fprintf chan "/* { dg-add-options %s } */\n" effective_target;
- Printf.fprintf chan "\n#include \"arm_neon.h\"\n\n";
- Printf.fprintf chan "void test_%s (void)\n{\n" test_name
-
-@@ -79,9 +80,12 @@
- (* The intrinsic returns a value. We need to do explict register
- allocation for vget_low tests or they fail because of copy
- elimination. *)
-- ((if List.mem Fixed_return_reg features then
-+ ((if List.mem Fixed_vector_reg features then
- Printf.fprintf chan " register %s out_%s asm (\"d18\");\n"
- return_ty return_ty
-+ else if List.mem Fixed_core_reg features then
-+ Printf.fprintf chan " register %s out_%s asm (\"r0\");\n"
-+ return_ty return_ty
- else
- Printf.fprintf chan " %s out_%s;\n" return_ty return_ty);
- emit ())
-@@ -153,6 +157,17 @@
- then (Const :: flags, String.sub ty 6 ((String.length ty) - 6))
- else (flags, ty)) tys'
-
-+(* Work out what the effective target should be. *)
-+let effective_target features =
-+ try
-+ match List.find (fun feature ->
-+ match feature with Requires_feature _ -> true
-+ | _ -> false)
-+ features with
-+ Requires_feature "FMA" -> "arm_neonv2"
-+ | _ -> assert false
-+ with Not_found -> "arm_neon"
-+
- (* Given an intrinsic shape, produce a regexp that will match
- the right-hand sides of instructions generated by an intrinsic of
- that shape. *)
-@@ -260,8 +275,10 @@
- "!?\\(\\[ \t\\]+@\\[a-zA-Z0-9 \\]+\\)?\\n")
- (analyze_all_shapes features shape analyze_shape)
- in
-+ let effective_target = effective_target features
-+ in
- (* Emit file and function prologues. *)
-- emit_prologue chan test_name;
-+ emit_prologue chan test_name effective_target;
- (* Emit local variable declarations. *)
- emit_automatics chan c_types features;
- Printf.fprintf chan "\n";
--- a/src/gcc/config/arm/predicates.md
+++ b/src/gcc/config/arm/predicates.md
@@ -89,6 +89,15 @@
@@ -57460,6 +57971,62 @@
$(srcdir)/config/arm/cortex-a8.md \
$(srcdir)/config/arm/cortex-a8-neon.md \
$(srcdir)/config/arm/cortex-a9.md \
+--- a/src/gcc/config/arm/t-rtems-eabi
++++ b/src/gcc/config/arm/t-rtems-eabi
+@@ -1,8 +1,47 @@
+ # Custom RTEMS EABI multilibs
+
+-MULTILIB_OPTIONS = mthumb march=armv6-m/march=armv7/march=armv7-m
+-MULTILIB_DIRNAMES = thumb armv6-m armv7 armv7-m
+-MULTILIB_EXCEPTIONS = march=armv6-m march=armv7 march=armv7-m
+-MULTILIB_MATCHES =
+-MULTILIB_EXCLUSIONS =
+-MULTILIB_OSDIRNAMES =
++MULTILIB_OPTIONS = mthumb march=armv6-m/march=armv7-a/march=armv7-r/march=armv7-m mfpu=neon mfloat-abi=hard
++MULTILIB_DIRNAMES = thumb armv6-m armv7-a armv7-r armv7-m neon hard
++
++# Enumeration of multilibs
++
++MULTILIB_EXCEPTIONS =
++MULTILIB_EXCEPTIONS += mthumb/march=armv6-m/mfpu=neon/mfloat-abi=hard
++MULTILIB_EXCEPTIONS += mthumb/march=armv6-m/mfpu=neon
++MULTILIB_EXCEPTIONS += mthumb/march=armv6-m/mfloat-abi=hard
++# MULTILIB_EXCEPTIONS += mthumb/march=armv6-m
++# MULTILIB_EXCEPTIONS += mthumb/march=armv7-a/mfpu=neon/mfloat-abi=hard
++MULTILIB_EXCEPTIONS += mthumb/march=armv7-a/mfpu=neon
++MULTILIB_EXCEPTIONS += mthumb/march=armv7-a/mfloat-abi=hard
++# MULTILIB_EXCEPTIONS += mthumb/march=armv7-a
++MULTILIB_EXCEPTIONS += mthumb/march=armv7-r/mfpu=neon/mfloat-abi=hard
++MULTILIB_EXCEPTIONS += mthumb/march=armv7-r/mfpu=neon
++MULTILIB_EXCEPTIONS += mthumb/march=armv7-r/mfloat-abi=hard
++# MULTILIB_EXCEPTIONS += mthumb/march=armv7-r
++MULTILIB_EXCEPTIONS += mthumb/march=armv7-m/mfpu=neon/mfloat-abi=hard
++MULTILIB_EXCEPTIONS += mthumb/march=armv7-m/mfpu=neon
++MULTILIB_EXCEPTIONS += mthumb/march=armv7-m/mfloat-abi=hard
++# MULTILIB_EXCEPTIONS += mthumb/march=armv7-m
++MULTILIB_EXCEPTIONS += mthumb/mfpu=neon/mfloat-abi=hard
++MULTILIB_EXCEPTIONS += mthumb/mfpu=neon
++MULTILIB_EXCEPTIONS += mthumb/mfloat-abi=hard
++# MULTILIB_EXCEPTIONS += mthumb
++MULTILIB_EXCEPTIONS += march=armv6-m/mfpu=neon/mfloat-abi=hard
++MULTILIB_EXCEPTIONS += march=armv6-m/mfpu=neon
++MULTILIB_EXCEPTIONS += march=armv6-m/mfloat-abi=hard
++MULTILIB_EXCEPTIONS += march=armv6-m
++MULTILIB_EXCEPTIONS += march=armv7-a/mfpu=neon/mfloat-abi=hard
++MULTILIB_EXCEPTIONS += march=armv7-a/mfpu=neon
++MULTILIB_EXCEPTIONS += march=armv7-a/mfloat-abi=hard
++MULTILIB_EXCEPTIONS += march=armv7-a
++MULTILIB_EXCEPTIONS += march=armv7-r/mfpu=neon/mfloat-abi=hard
++MULTILIB_EXCEPTIONS += march=armv7-r/mfpu=neon
++MULTILIB_EXCEPTIONS += march=armv7-r/mfloat-abi=hard
++MULTILIB_EXCEPTIONS += march=armv7-r
++MULTILIB_EXCEPTIONS += march=armv7-m/mfpu=neon/mfloat-abi=hard
++MULTILIB_EXCEPTIONS += march=armv7-m/mfpu=neon
++MULTILIB_EXCEPTIONS += march=armv7-m/mfloat-abi=hard
++MULTILIB_EXCEPTIONS += march=armv7-m
++MULTILIB_EXCEPTIONS += mfpu=neon/mfloat-abi=hard
++MULTILIB_EXCEPTIONS += mfpu=neon
++MULTILIB_EXCEPTIONS += mfloat-abi=hard
--- a/src/gcc/config/arm/thumb2.md
+++ b/src/gcc/config/arm/thumb2.md
@@ -1,5 +1,5 @@
@@ -57710,79 +58277,6 @@
(define_insn "*thumb2_negsi2_short"
[(set (match_operand:SI 0 "low_register_operand" "=l")
(neg:SI (match_operand:SI 1 "low_register_operand" "l")))
---- a/src/gcc/config/arm/t-rtems-eabi
-+++ b/src/gcc/config/arm/t-rtems-eabi
-@@ -1,8 +1,47 @@
- # Custom RTEMS EABI multilibs
-
--MULTILIB_OPTIONS = mthumb march=armv6-m/march=armv7/march=armv7-m
--MULTILIB_DIRNAMES = thumb armv6-m armv7 armv7-m
--MULTILIB_EXCEPTIONS = march=armv6-m march=armv7 march=armv7-m
--MULTILIB_MATCHES =
--MULTILIB_EXCLUSIONS =
--MULTILIB_OSDIRNAMES =
-+MULTILIB_OPTIONS = mthumb march=armv6-m/march=armv7-a/march=armv7-r/march=armv7-m mfpu=neon mfloat-abi=hard
-+MULTILIB_DIRNAMES = thumb armv6-m armv7-a armv7-r armv7-m neon hard
-+
-+# Enumeration of multilibs
-+
-+MULTILIB_EXCEPTIONS =
-+MULTILIB_EXCEPTIONS += mthumb/march=armv6-m/mfpu=neon/mfloat-abi=hard
-+MULTILIB_EXCEPTIONS += mthumb/march=armv6-m/mfpu=neon
-+MULTILIB_EXCEPTIONS += mthumb/march=armv6-m/mfloat-abi=hard
-+# MULTILIB_EXCEPTIONS += mthumb/march=armv6-m
-+# MULTILIB_EXCEPTIONS += mthumb/march=armv7-a/mfpu=neon/mfloat-abi=hard
-+MULTILIB_EXCEPTIONS += mthumb/march=armv7-a/mfpu=neon
-+MULTILIB_EXCEPTIONS += mthumb/march=armv7-a/mfloat-abi=hard
-+# MULTILIB_EXCEPTIONS += mthumb/march=armv7-a
-+MULTILIB_EXCEPTIONS += mthumb/march=armv7-r/mfpu=neon/mfloat-abi=hard
-+MULTILIB_EXCEPTIONS += mthumb/march=armv7-r/mfpu=neon
-+MULTILIB_EXCEPTIONS += mthumb/march=armv7-r/mfloat-abi=hard
-+# MULTILIB_EXCEPTIONS += mthumb/march=armv7-r
-+MULTILIB_EXCEPTIONS += mthumb/march=armv7-m/mfpu=neon/mfloat-abi=hard
-+MULTILIB_EXCEPTIONS += mthumb/march=armv7-m/mfpu=neon
-+MULTILIB_EXCEPTIONS += mthumb/march=armv7-m/mfloat-abi=hard
-+# MULTILIB_EXCEPTIONS += mthumb/march=armv7-m
-+MULTILIB_EXCEPTIONS += mthumb/mfpu=neon/mfloat-abi=hard
-+MULTILIB_EXCEPTIONS += mthumb/mfpu=neon
-+MULTILIB_EXCEPTIONS += mthumb/mfloat-abi=hard
-+# MULTILIB_EXCEPTIONS += mthumb
-+MULTILIB_EXCEPTIONS += march=armv6-m/mfpu=neon/mfloat-abi=hard
-+MULTILIB_EXCEPTIONS += march=armv6-m/mfpu=neon
-+MULTILIB_EXCEPTIONS += march=armv6-m/mfloat-abi=hard
-+MULTILIB_EXCEPTIONS += march=armv6-m
-+MULTILIB_EXCEPTIONS += march=armv7-a/mfpu=neon/mfloat-abi=hard
-+MULTILIB_EXCEPTIONS += march=armv7-a/mfpu=neon
-+MULTILIB_EXCEPTIONS += march=armv7-a/mfloat-abi=hard
-+MULTILIB_EXCEPTIONS += march=armv7-a
-+MULTILIB_EXCEPTIONS += march=armv7-r/mfpu=neon/mfloat-abi=hard
-+MULTILIB_EXCEPTIONS += march=armv7-r/mfpu=neon
-+MULTILIB_EXCEPTIONS += march=armv7-r/mfloat-abi=hard
-+MULTILIB_EXCEPTIONS += march=armv7-r
-+MULTILIB_EXCEPTIONS += march=armv7-m/mfpu=neon/mfloat-abi=hard
-+MULTILIB_EXCEPTIONS += march=armv7-m/mfpu=neon
-+MULTILIB_EXCEPTIONS += march=armv7-m/mfloat-abi=hard
-+MULTILIB_EXCEPTIONS += march=armv7-m
-+MULTILIB_EXCEPTIONS += mfpu=neon/mfloat-abi=hard
-+MULTILIB_EXCEPTIONS += mfpu=neon
-+MULTILIB_EXCEPTIONS += mfloat-abi=hard
---- a/src/gcc/config/arm/vfp11.md
-+++ b/src/gcc/config/arm/vfp11.md
-@@ -56,12 +56,12 @@
-
- (define_insn_reservation "vfp_farith" 8
- (and (eq_attr "generic_vfp" "yes")
-- (eq_attr "type" "fadds,faddd,fconsts,fconstd,f_cvt,fmuls,fmacs"))
-+ (eq_attr "type" "fadds,faddd,fconsts,fconstd,f_cvt,fmuls,fmacs,ffmas"))
- "fmac")
-
- (define_insn_reservation "vfp_fmul" 9
- (and (eq_attr "generic_vfp" "yes")
-- (eq_attr "type" "fmuld,fmacd"))
-+ (eq_attr "type" "fmuld,fmacd,ffmad"))
- "fmac*2")
-
- (define_insn_reservation "vfp_fdivs" 19
--- a/src/gcc/config/arm/vfp.md
+++ b/src/gcc/config/arm/vfp.md
@@ -38,6 +38,8 @@
@@ -58003,6 +58497,69 @@
;; Conversion routines
+@@ -1144,18 +1210,18 @@
+ (set_attr "type" "fcmpd")]
+ )
+
+-;; Fixed point to floating point conversions.
++;; Fixed point to floating point conversions.
+ (define_code_iterator FCVT [unsigned_float float])
+ (define_code_attr FCVTI32typename [(unsigned_float "u32") (float "s32")])
+
+ (define_insn "*combine_vcvt_f32_<FCVTI32typename>"
+ [(set (match_operand:SF 0 "s_register_operand" "=t")
+ (mult:SF (FCVT:SF (match_operand:SI 1 "s_register_operand" "0"))
+- (match_operand 2
++ (match_operand 2
+ "const_double_vcvt_power_of_two_reciprocal" "Dt")))]
+ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP3 && !flag_rounding_math"
+- "vcvt.f32.<FCVTI32typename>\\t%0, %1, %v2"
+- [(set_attr "predicable" "no")
++ "vcvt%?.f32.<FCVTI32typename>\\t%0, %1, %v2"
++ [(set_attr "predicable" "yes")
+ (set_attr "type" "f_cvt")]
+ )
+
+@@ -1164,15 +1230,16 @@
+ (define_insn "*combine_vcvt_f64_<FCVTI32typename>"
+ [(set (match_operand:DF 0 "s_register_operand" "=x,x,w")
+ (mult:DF (FCVT:DF (match_operand:SI 1 "s_register_operand" "r,t,r"))
+- (match_operand 2
++ (match_operand 2
+ "const_double_vcvt_power_of_two_reciprocal" "Dt,Dt,Dt")))]
+- "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP3 && !flag_rounding_math
++ "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP3 && !flag_rounding_math
+ && !TARGET_VFP_SINGLE"
+ "@
+- vmov.f32\\t%0, %1\;vcvt.f64.<FCVTI32typename>\\t%P0, %P0, %v2
+- vmov.f32\\t%0, %1\;vcvt.f64.<FCVTI32typename>\\t%P0, %P0, %v2
+- vmov.f64\\t%P0, %1, %1\;vcvt.f64.<FCVTI32typename>\\t%P0, %P0, %v2"
+- [(set_attr "predicable" "no")
++ vmov%?.f32\\t%0, %1\;vcvt%?.f64.<FCVTI32typename>\\t%P0, %P0, %v2
++ vmov%?.f32\\t%0, %1\;vcvt%?.f64.<FCVTI32typename>\\t%P0, %P0, %v2
++ vmov%?.f64\\t%P0, %1, %1\;vcvt%?.f64.<FCVTI32typename>\\t%P0, %P0, %v2"
++ [(set_attr "predicable" "yes")
++ (set_attr "ce_count" "2")
+ (set_attr "type" "f_cvt")
+ (set_attr "length" "8")]
+ )
+--- a/src/gcc/config/arm/vfp11.md
++++ b/src/gcc/config/arm/vfp11.md
+@@ -56,12 +56,12 @@
+
+ (define_insn_reservation "vfp_farith" 8
+ (and (eq_attr "generic_vfp" "yes")
+- (eq_attr "type" "fadds,faddd,fconsts,fconstd,f_cvt,fmuls,fmacs"))
++ (eq_attr "type" "fadds,faddd,fconsts,fconstd,f_cvt,fmuls,fmacs,ffmas"))
+ "fmac")
+
+ (define_insn_reservation "vfp_fmul" 9
+ (and (eq_attr "generic_vfp" "yes")
+- (eq_attr "type" "fmuld,fmacd"))
++ (eq_attr "type" "fmuld,fmacd,ffmad"))
+ "fmac*2")
+
+ (define_insn_reservation "vfp_fdivs" 19
--- a/src/gcc/config/avr/avr.c
+++ b/src/gcc/config/avr/avr.c
@@ -549,7 +549,12 @@
@@ -58045,6 +58602,148 @@
if (frame_pointer_needed)
{
RTX_FRAME_RELATED_P (insn) = 1;
+--- a/src/gcc/config/darwin-c.c
++++ b/src/gcc/config/darwin-c.c
+@@ -25,6 +25,7 @@
+ #include "tm.h"
+ #include "cpplib.h"
+ #include "tree.h"
++#include "target.h"
+ #include "incpath.h"
+ #include "c-family/c-common.h"
+ #include "c-family/c-pragma.h"
+@@ -36,6 +37,7 @@
+ #include "prefix.h"
+ #include "c-family/c-target.h"
+ #include "c-family/c-target-def.h"
++#include "cgraph.h"
+
+ /* Pragmas. */
+
+@@ -711,13 +713,60 @@
+ }
+ };
+
+-#undef TARGET_HANDLE_C_OPTION
++
++/* Support routines to dump the class references for NeXT ABI v1, aka
++ 32-bits ObjC-2.0, as top-level asms.
++ The following two functions should only be called from
++ objc/objc-next-runtime-abi-01.c. */
++
++static void
++darwin_objc_declare_unresolved_class_reference (const char *name)
++{
++ const char *lazy_reference = ".lazy_reference\t";
++ const char *hard_reference = ".reference\t";
++ const char *reference = MACHOPIC_INDIRECT ? lazy_reference : hard_reference;
++ size_t len = strlen (reference) + strlen(name) + 2;
++ char *buf = (char *) alloca (len);
++
++ gcc_checking_assert (!strncmp (name, ".objc_class_name_", 17));
++
++ snprintf (buf, len, "%s%s", reference, name);
++ cgraph_add_asm_node (build_string (strlen (buf), buf));
++}
++
++static void
++darwin_objc_declare_class_definition (const char *name)
++{
++ const char *xname = targetm.strip_name_encoding (name);
++ size_t len = strlen (xname) + 7 + 5;
++ char *buf = (char *) alloca (len);
++
++ gcc_checking_assert (!strncmp (name, ".objc_class_name_", 17)
++ || !strncmp (name, "*.objc_category_name_", 21));
++
++ /* Mimic default_globalize_label. */
++ snprintf (buf, len, ".globl\t%s", xname);
++ cgraph_add_asm_node (build_string (strlen (buf), buf));
++
++ snprintf (buf, len, "%s = 0", xname);
++ cgraph_add_asm_node (build_string (strlen (buf), buf));
++}
++
++#undef TARGET_HANDLE_C_OPTION
+ #define TARGET_HANDLE_C_OPTION handle_c_option
+
+-#undef TARGET_OBJC_CONSTRUCT_STRING_OBJECT
++#undef TARGET_OBJC_CONSTRUCT_STRING_OBJECT
+ #define TARGET_OBJC_CONSTRUCT_STRING_OBJECT darwin_objc_construct_string
+
+-#undef TARGET_STRING_OBJECT_REF_TYPE_P
++#undef TARGET_OBJC_DECLARE_UNRESOLVED_CLASS_REFERENCE
++#define TARGET_OBJC_DECLARE_UNRESOLVED_CLASS_REFERENCE \
++ darwin_objc_declare_unresolved_class_reference
++
++#undef TARGET_OBJC_DECLARE_CLASS_DEFINITION
++#define TARGET_OBJC_DECLARE_CLASS_DEFINITION \
++ darwin_objc_declare_class_definition
++
++#undef TARGET_STRING_OBJECT_REF_TYPE_P
+ #define TARGET_STRING_OBJECT_REF_TYPE_P darwin_cfstring_ref_p
+
+ #undef TARGET_CHECK_STRING_OBJECT_FORMAT_ARG
+--- a/src/gcc/config/darwin-protos.h
++++ b/src/gcc/config/darwin-protos.h
+@@ -26,6 +26,7 @@
+ extern void machopic_output_function_base_name (FILE *);
+ extern const char *machopic_indirection_name (rtx, bool);
+ extern const char *machopic_mcount_stub_name (void);
++extern bool machopic_should_output_picbase_label (void);
+
+ #ifdef RTX_CODE
+
+--- a/src/gcc/config/darwin.c
++++ b/src/gcc/config/darwin.c
+@@ -362,14 +362,13 @@
+
+ static GTY(()) const char * function_base_func_name;
+ static GTY(()) int current_pic_label_num;
++static GTY(()) int emitted_pic_label_num;
+
+-void
+-machopic_output_function_base_name (FILE *file)
++static void
++update_pic_label_number_if_needed (void)
+ {
+ const char *current_name;
+
+- /* If dynamic-no-pic is on, we should not get here. */
+- gcc_assert (!MACHO_DYNAMIC_NO_PIC_P);
+ /* When we are generating _get_pc thunks within stubs, there is no current
+ function. */
+ if (current_function_decl)
+@@ -387,7 +386,28 @@
+ ++current_pic_label_num;
+ function_base_func_name = "L_machopic_stub_dummy";
+ }
+- fprintf (file, "L%011d$pb", current_pic_label_num);
++}
++
++void
++machopic_output_function_base_name (FILE *file)
++{
++ /* If dynamic-no-pic is on, we should not get here. */
++ gcc_assert (!MACHO_DYNAMIC_NO_PIC_P);
++
++ update_pic_label_number_if_needed ();
++ fprintf (file, "L%d$pb", current_pic_label_num);
++}
++
++bool
++machopic_should_output_picbase_label (void)
++{
++ update_pic_label_number_if_needed ();
++
++ if (current_pic_label_num == emitted_pic_label_num)
++ return false;
++
++ emitted_pic_label_num = current_pic_label_num;
++ return true;
+ }
+
+ /* The suffix attached to non-lazy pointer symbols. */
--- a/src/gcc/config/darwin.h
+++ b/src/gcc/config/darwin.h
@@ -356,7 +356,9 @@
@@ -58076,6 +58775,45 @@
/* When generating stabs debugging, use N_BINCL entries. */
#define DBX_USE_BINCL
+@@ -612,8 +616,6 @@
+ fprintf (FILE, "\"%s\"", xname); \
+ else if (darwin_label_is_anonymous_local_objc_name (xname)) \
+ fprintf (FILE, "L%s", xname); \
+- else if (!strncmp (xname, ".objc_class_name_", 17)) \
+- fprintf (FILE, "%s", xname); \
+ else if (xname[0] != '"' && name_needs_quotes (xname)) \
+ asm_fprintf (FILE, "\"%U%s\"", xname); \
+ else \
+@@ -696,29 +698,6 @@
+ #undef TARGET_ASM_RELOC_RW_MASK
+ #define TARGET_ASM_RELOC_RW_MASK machopic_reloc_rw_mask
+
+-
+-#define ASM_DECLARE_UNRESOLVED_REFERENCE(FILE,NAME) \
+- do { \
+- if (FILE) { \
+- if (MACHOPIC_INDIRECT) \
+- fprintf (FILE, "\t.lazy_reference "); \
+- else \
+- fprintf (FILE, "\t.reference "); \
+- assemble_name (FILE, NAME); \
+- fprintf (FILE, "\n"); \
+- } \
+- } while (0)
+-
+-#define ASM_DECLARE_CLASS_REFERENCE(FILE,NAME) \
+- do { \
+- if (FILE) { \
+- fprintf (FILE, "\t"); \
+- assemble_name (FILE, NAME); \
+- fprintf (FILE, "=0\n"); \
+- (*targetm.asm_out.globalize_label) (FILE, NAME); \
+- } \
+- } while (0)
+-
+ /* Globalizing directive for a label. */
+ #define GLOBAL_ASM_OP "\t.globl "
+ #define TARGET_ASM_GLOBALIZE_LABEL darwin_globalize_label
--- a/src/gcc/config/i386/driver-i386.c
+++ b/src/gcc/config/i386/driver-i386.c
@@ -350,7 +350,10 @@
@@ -58140,6 +58878,46 @@
else
{
switch (family)
+@@ -593,13 +630,18 @@
+ /* Atom. */
+ cpu = "atom";
+ break;
++ case 0x0f:
++ /* Merom. */
++ case 0x17:
++ case 0x1d:
++ /* Penryn. */
++ cpu = "core2";
++ break;
+ case 0x1a:
+ case 0x1e:
+ case 0x1f:
+ case 0x2e:
+ /* Nehalem. */
+- cpu = "corei7";
+- break;
+ case 0x25:
+ case 0x2c:
+ case 0x2f:
+@@ -611,14 +653,10 @@
+ /* Sandy Bridge. */
+ cpu = "corei7-avx";
+ break;
+- case 0x17:
+- case 0x1d:
+- /* Penryn. */
+- cpu = "core2";
+- break;
+- case 0x0f:
+- /* Merom. */
+- cpu = "core2";
++ case 0x3a:
++ case 0x3e:
++ /* Ivy Bridge. */
++ cpu = "core-avx-i";
+ break;
+ default:
+ if (arch)
--- a/src/gcc/config/i386/i386.c
+++ b/src/gcc/config/i386/i386.c
@@ -2979,7 +2979,7 @@
@@ -58160,7 +58938,62 @@
for (i = 0; i < n; i++)
if (regclass[i] == X86_64_X87_CLASS
|| regclass[i] == X86_64_X87UP_CLASS
-@@ -20026,7 +20026,7 @@
+@@ -8613,17 +8613,12 @@
+
+ if (!flag_pic)
+ {
+- xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
++ if (TARGET_MACHO)
++ /* We don't need a pic base, we're not producing pic. */
++ gcc_unreachable ();
+
++ xops[2] = gen_rtx_LABEL_REF (Pmode, label ? label : gen_label_rtx ());
+ output_asm_insn ("mov%z0\t{%2, %0|%0, %2}", xops);
+-
+-#if TARGET_MACHO
+- /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
+- is what will be referenced by the Mach-O PIC subsystem. */
+- if (!label)
+- ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
+-#endif
+-
+ targetm.asm_out.internal_label (asm_out_file, "L",
+ CODE_LABEL_NUMBER (XEXP (xops[2], 0)));
+ }
+@@ -8636,12 +8631,18 @@
+ xops[2] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
+ xops[2] = gen_rtx_MEM (QImode, xops[2]);
+ output_asm_insn ("call\t%X2", xops);
+- /* Output the Mach-O "canonical" label name ("Lxx$pb") here too. This
+- is what will be referenced by the Mach-O PIC subsystem. */
++
+ #if TARGET_MACHO
+- if (!label)
++ /* Output the Mach-O "canonical" pic base label name ("Lxx$pb") here.
++ This is what will be referenced by the Mach-O PIC subsystem. */
++ if (machopic_should_output_picbase_label () || !label)
+ ASM_OUTPUT_LABEL (asm_out_file, MACHOPIC_FUNCTION_BASE_NAME);
+- else
++
++ /* When we are restoring the pic base at the site of a nonlocal label,
++ and we decided to emit the pic base above, we will still output a
++ local label used for calculating the correction offset (even though
++ the offset will be 0 in that case). */
++ if (label)
+ targetm.asm_out.internal_label (asm_out_file, "L",
+ CODE_LABEL_NUMBER (label));
+ #endif
+@@ -8717,7 +8718,8 @@
+ && (df_regs_ever_live_p (REAL_PIC_OFFSET_TABLE_REGNUM)
+ || crtl->profile
+ || crtl->calls_eh_return
+- || crtl->uses_const_pool))
++ || crtl->uses_const_pool
++ || cfun->has_nonlocal_label))
+ return ix86_select_alt_pic_regnum () == INVALID_REGNUM;
+
+ if (crtl->calls_eh_return && maybe_eh_return)
+@@ -20026,7 +20028,7 @@
vec[i * 2 + 1] = const1_rtx;
}
vt = gen_rtx_CONST_VECTOR (maskmode, gen_rtvec_v (w, vec));
@@ -58169,7 +59002,7 @@
t1 = expand_simple_binop (maskmode, PLUS, t1, vt, t1, 1,
OPTAB_DIRECT);
-@@ -20223,7 +20223,7 @@
+@@ -20223,7 +20225,7 @@
for (i = 0; i < 16; ++i)
vec[i] = GEN_INT (i/e * e);
vt = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, vec));
@@ -58178,7 +59011,7 @@
if (TARGET_XOP)
emit_insn (gen_xop_pperm (mask, mask, mask, vt));
else
-@@ -20234,7 +20234,7 @@
+@@ -20234,7 +20236,7 @@
for (i = 0; i < 16; ++i)
vec[i] = GEN_INT (i % e);
vt = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, vec));
@@ -58197,7 +59030,19 @@
;; For SSE/MMX support:
UNSPEC_FIX_NOTRUNC
-@@ -1855,18 +1856,16 @@
+@@ -201,7 +202,10 @@
+
+ ;; For RDRAND support
+ UNSPECV_RDRAND
+-])
++
++ ;; Non-local goto.
++ UNSPECV_NLGR
++ ])
+
+ ;; Constants to represent rounding modes in the ROUND instruction
+ (define_constants
+@@ -1855,18 +1859,16 @@
[(set_attr "type" "*,*,sselog1,ssemov,ssemov")
(set_attr "prefix" "*,*,maybe_vex,maybe_vex,maybe_vex")
(set (attr "mode")
@@ -58226,7 +59071,25 @@
(define_split
[(set (match_operand:TI 0 "nonimmediate_operand" "")
-@@ -3444,9 +3443,9 @@
+@@ -2328,7 +2330,7 @@
+ "TARGET_LP64 && ix86_check_movabs (insn, 0)"
+ "@
+ movabs{<imodesuffix>}\t{%1, %P0|[%P0], %1}
+- mov{<imodesuffix>}\t{%1, %a0|%a0, %1}"
++ mov{<imodesuffix>}\t{%1, %a0|<iptrsize> PTR %a0, %1}"
+ [(set_attr "type" "imov")
+ (set_attr "modrm" "0,*")
+ (set_attr "length_address" "8,0")
+@@ -2342,7 +2344,7 @@
+ "TARGET_LP64 && ix86_check_movabs (insn, 1)"
+ "@
+ movabs{<imodesuffix>}\t{%P1, %0|%0, [%P1]}
+- mov{<imodesuffix>}\t{%a1, %0|%0, %a1}"
++ mov{<imodesuffix>}\t{%a1, %0|%0, <iptrsize> PTR %a1}"
+ [(set_attr "type" "imov")
+ (set_attr "modrm" "0,*")
+ (set_attr "length_address" "8,0")
+@@ -3444,9 +3446,9 @@
})
(define_insn "*zero_extendsidi2_rex64"
@@ -58238,7 +59101,7 @@
"TARGET_64BIT"
"@
mov{l}\t{%1, %k0|%k0, %1}
-@@ -3469,9 +3468,9 @@
+@@ -3469,9 +3471,9 @@
;; %%% Kill me once multi-word ops are sane.
(define_insn "zero_extendsidi2_1"
@@ -58250,7 +59113,7 @@
(clobber (reg:CC FLAGS_REG))]
"!TARGET_64BIT"
"@
-@@ -15912,7 +15911,8 @@
+@@ -15912,7 +15914,8 @@
[(parallel [(set (match_operand 1 "memory_operand" "")
(match_operand 2 "register_operand" ""))
(set (match_operand 0 "register_operand" "")
@@ -58260,7 +59123,7 @@
""
"ix86_current_function_needs_cld = 1;")
-@@ -15921,7 +15921,8 @@
+@@ -15921,7 +15924,8 @@
(match_operand:DI 2 "register_operand" "a"))
(set (match_operand:DI 0 "register_operand" "=D")
(plus:DI (match_dup 1)
@@ -58270,7 +59133,7 @@
"TARGET_64BIT
&& !(fixed_regs[AX_REG] || fixed_regs[DI_REG])"
"stosq"
-@@ -15934,7 +15935,8 @@
+@@ -15934,7 +15938,8 @@
(match_operand:SI 2 "register_operand" "a"))
(set (match_operand:P 0 "register_operand" "=D")
(plus:P (match_dup 1)
@@ -58280,7 +59143,7 @@
"!(fixed_regs[AX_REG] || fixed_regs[DI_REG])"
"stos{l|d}"
[(set_attr "type" "str")
-@@ -15946,7 +15948,8 @@
+@@ -15946,7 +15951,8 @@
(match_operand:HI 2 "register_operand" "a"))
(set (match_operand:P 0 "register_operand" "=D")
(plus:P (match_dup 1)
@@ -58290,7 +59153,7 @@
"!(fixed_regs[AX_REG] || fixed_regs[DI_REG])"
"stosw"
[(set_attr "type" "str")
-@@ -15958,7 +15961,8 @@
+@@ -15958,7 +15964,8 @@
(match_operand:QI 2 "register_operand" "a"))
(set (match_operand:P 0 "register_operand" "=D")
(plus:P (match_dup 1)
@@ -58300,7 +59163,46 @@
"!(fixed_regs[AX_REG] || fixed_regs[DI_REG])"
"stosb"
[(set_attr "type" "str")
-@@ -17190,6 +17194,7 @@
+@@ -16797,7 +16804,37 @@
+ emit_insn (gen_set_got (pic_offset_table_rtx));
+ DONE;
+ })
+-
++
++(define_insn_and_split "nonlocal_goto_receiver"
++ [(unspec_volatile [(const_int 0)] UNSPECV_NLGR)]
++ "TARGET_MACHO && !TARGET_64BIT && flag_pic"
++ "#"
++ "&& reload_completed"
++ [(const_int 0)]
++{
++ if (crtl->uses_pic_offset_table)
++ {
++ rtx xops[3];
++ rtx label_rtx = gen_label_rtx ();
++ rtx tmp;
++
++ /* Get a new pic base. */
++ emit_insn (gen_set_got_labelled (pic_offset_table_rtx, label_rtx));
++ /* Correct this with the offset from the new to the old. */
++ xops[0] = xops[1] = pic_offset_table_rtx;
++ label_rtx = gen_rtx_LABEL_REF (SImode, label_rtx);
++ tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, label_rtx),
++ UNSPEC_MACHOPIC_OFFSET);
++ xops[2] = gen_rtx_CONST (Pmode, tmp);
++ ix86_expand_binary_operator (MINUS, SImode, xops);
++ }
++ else
++ /* No pic reg restore needed. */
++ emit_note (NOTE_INSN_DELETED);
++
++ DONE;
++})
++
+ ;; Avoid redundant prefixes by splitting HImode arithmetic to SImode.
+
+ (define_split
+@@ -17190,6 +17227,7 @@
"(TARGET_READ_MODIFY_WRITE || optimize_insn_for_size_p ())
&& peep2_reg_dead_p (4, operands[0])
&& !reg_overlap_mentioned_p (operands[0], operands[1])
@@ -58308,7 +59210,7 @@
&& (<MODE>mode != QImode
|| immediate_operand (operands[2], QImode)
|| q_regs_operand (operands[2], QImode))
-@@ -17254,6 +17259,7 @@
+@@ -17254,6 +17292,7 @@
|| immediate_operand (operands[2], SImode)
|| q_regs_operand (operands[2], SImode))
&& !reg_overlap_mentioned_p (operands[0], operands[1])
@@ -58361,6 +59263,110 @@
}
crtl->uses_pic_offset_table = 1;
return reg;
+@@ -4001,7 +4003,8 @@
+ || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
+ {
+ rtx addr, insn, reg;
+- addr = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
++ addr = gen_rtx_MEM (DFmode,
++ gen_rtx_POST_INC (word_mode, tmpreg));
+ reg = gen_rtx_REG (DFmode, i);
+ insn = emit_move_insn (addr, reg);
+ if (DO_FRAME_NOTES)
+@@ -4291,7 +4294,8 @@
+ if (df_regs_ever_live_p (i)
+ || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
+ {
+- rtx src = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
++ rtx src = gen_rtx_MEM (DFmode,
++ gen_rtx_POST_INC (word_mode, tmpreg));
+ rtx dest = gen_rtx_REG (DFmode, i);
+ emit_move_insn (dest, src);
+ }
+--- a/src/gcc/config/pa/pa.md
++++ b/src/gcc/config/pa/pa.md
+@@ -730,46 +730,46 @@
+ (define_insn "scc"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (match_operator:SI 3 "comparison_operator"
+- [(match_operand:SI 1 "register_operand" "r")
++ [(match_operand:SI 1 "reg_or_0_operand" "rM")
+ (match_operand:SI 2 "arith11_operand" "rI")]))]
+ ""
+- "{com%I2clr|cmp%I2clr},%B3 %2,%1,%0\;ldi 1,%0"
++ "{com%I2clr|cmp%I2clr},%B3 %2,%r1,%0\;ldi 1,%0"
+ [(set_attr "type" "binary")
+ (set_attr "length" "8")])
+
+ (define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (match_operator:DI 3 "comparison_operator"
+- [(match_operand:DI 1 "register_operand" "r")
++ [(match_operand:DI 1 "reg_or_0_operand" "rM")
+ (match_operand:DI 2 "arith11_operand" "rI")]))]
+ "TARGET_64BIT"
+- "cmp%I2clr,*%B3 %2,%1,%0\;ldi 1,%0"
++ "cmp%I2clr,*%B3 %2,%r1,%0\;ldi 1,%0"
+ [(set_attr "type" "binary")
+ (set_attr "length" "8")])
+
+ (define_insn "iorscc"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ior:SI (match_operator:SI 3 "comparison_operator"
+- [(match_operand:SI 1 "register_operand" "r")
++ [(match_operand:SI 1 "reg_or_0_operand" "rM")
+ (match_operand:SI 2 "arith11_operand" "rI")])
+ (match_operator:SI 6 "comparison_operator"
+- [(match_operand:SI 4 "register_operand" "r")
++ [(match_operand:SI 4 "reg_or_0_operand" "rM")
+ (match_operand:SI 5 "arith11_operand" "rI")])))]
+ ""
+- "{com%I2clr|cmp%I2clr},%S3 %2,%1,%%r0\;{com%I5clr|cmp%I5clr},%B6 %5,%4,%0\;ldi 1,%0"
++ "{com%I2clr|cmp%I2clr},%S3 %2,%r1,%%r0\;{com%I5clr|cmp%I5clr},%B6 %5,%r4,%0\;ldi 1,%0"
+ [(set_attr "type" "binary")
+ (set_attr "length" "12")])
+
+ (define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ior:DI (match_operator:DI 3 "comparison_operator"
+- [(match_operand:DI 1 "register_operand" "r")
++ [(match_operand:DI 1 "reg_or_0_operand" "rM")
+ (match_operand:DI 2 "arith11_operand" "rI")])
+ (match_operator:DI 6 "comparison_operator"
+- [(match_operand:DI 4 "register_operand" "r")
++ [(match_operand:DI 4 "reg_or_0_operand" "rM")
+ (match_operand:DI 5 "arith11_operand" "rI")])))]
+ "TARGET_64BIT"
+- "cmp%I2clr,*%S3 %2,%1,%%r0\;cmp%I5clr,*%B6 %5,%4,%0\;ldi 1,%0"
++ "cmp%I2clr,*%S3 %2,%r1,%%r0\;cmp%I5clr,*%B6 %5,%r4,%0\;ldi 1,%0"
+ [(set_attr "type" "binary")
+ (set_attr "length" "12")])
+
+@@ -778,20 +778,20 @@
+ (define_insn "negscc"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (neg:SI (match_operator:SI 3 "comparison_operator"
+- [(match_operand:SI 1 "register_operand" "r")
++ [(match_operand:SI 1 "reg_or_0_operand" "rM")
+ (match_operand:SI 2 "arith11_operand" "rI")])))]
+ ""
+- "{com%I2clr|cmp%I2clr},%B3 %2,%1,%0\;ldi -1,%0"
++ "{com%I2clr|cmp%I2clr},%B3 %2,%r1,%0\;ldi -1,%0"
+ [(set_attr "type" "binary")
+ (set_attr "length" "8")])
+
+ (define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (neg:DI (match_operator:DI 3 "comparison_operator"
+- [(match_operand:DI 1 "register_operand" "r")
++ [(match_operand:DI 1 "reg_or_0_operand" "rM")
+ (match_operand:DI 2 "arith11_operand" "rI")])))]
+ "TARGET_64BIT"
+- "cmp%I2clr,*%B3 %2,%1,%0\;ldi -1,%0"
++ "cmp%I2clr,*%B3 %2,%r1,%0\;ldi -1,%0"
+ [(set_attr "type" "binary")
+ (set_attr "length" "8")])
+
--- a/src/gcc/config/rs6000/rs6000-builtin.def
+++ b/src/gcc/config/rs6000/rs6000-builtin.def
@@ -1430,9 +1430,6 @@
@@ -58607,7 +59613,15 @@
alpha*-*-linux*)
tm_file="${tm_file} alpha/elf.h alpha/linux.h alpha/linux-elf.h glibc-stdint.h"
tmake_file="${tmake_file} alpha/t-linux"
-@@ -3042,6 +3070,92 @@
+@@ -1064,7 +1092,6 @@
+ tm_file="pa/pa64-start.h ${tm_file} dbxelf.h elfos.h gnu-user.h linux.h \
+ glibc-stdint.h pa/pa-linux.h pa/pa64-regs.h pa/pa-64.h \
+ pa/pa64-linux.h"
+- tmake_file="${tmake_file} pa/t-linux"
+ gas=yes gnu_ld=yes
+ need_64bit_hwint=yes
+ ;;
+@@ -3042,6 +3069,92 @@
supported_defaults=
case "${target}" in
@@ -58700,7 +59714,7 @@
alpha*-*-*)
supported_defaults="cpu tune"
for which in cpu tune; do
-@@ -3530,6 +3644,15 @@
+@@ -3530,6 +3643,15 @@
# Set some miscellaneous flags for particular targets.
target_cpu_default2=
case ${target} in
@@ -58830,7 +59844,21 @@
/* Performs file-level cleanup. Close graph file, generate coverage
--- a/src/gcc/cp/ChangeLog
+++ b/src/gcc/cp/ChangeLog
-@@ -1,3 +1,48 @@
+@@ -1,3 +1,62 @@
++2013-09-13 Jason Merrill <jason@redhat.com>
++
++ PR c++/58273
++ * pt.c (any_type_dependent_elements_p): Actually check for
++ type-dependence, not value-dependence.
++
++2013-08-20 Jason Merrill <jason@redhat.com>
++
++ PR c++/58119
++ * cp-tree.h (WILDCARD_TYPE_P): Split out from...
++ (MAYBE_CLASS_TYPE_P): ...here.
++ * cvt.c (build_expr_type_conversion): Don't complain about a
++ template that can't match the desired type category.
++
+2012-12-03 Paolo Carlini <paolo.carlini@oracle.com>
+
+ PR c++/54170
@@ -58879,6 +59907,36 @@
2013-04-11 Release Manager
* GCC 4.7.3 released.
+--- a/src/gcc/cp/cp-tree.h
++++ b/src/gcc/cp/cp-tree.h
+@@ -1191,17 +1191,20 @@
+ /* The _DECL for this _TYPE. */
+ #define TYPE_MAIN_DECL(NODE) (TYPE_STUB_DECL (TYPE_MAIN_VARIANT (NODE)))
+
+-/* Nonzero if T is a class (or struct or union) type. Also nonzero
+- for template type parameters, typename types, and instantiated
+- template template parameters. Keep these checks in ascending code
+- order. */
+-#define MAYBE_CLASS_TYPE_P(T) \
++/* Nonzero if T is a type that could resolve to any kind of concrete type
++ at instantiation time. */
++#define WILDCARD_TYPE_P(T) \
+ (TREE_CODE (T) == TEMPLATE_TYPE_PARM \
+ || TREE_CODE (T) == TYPENAME_TYPE \
+ || TREE_CODE (T) == TYPEOF_TYPE \
+ || TREE_CODE (T) == BOUND_TEMPLATE_TEMPLATE_PARM \
+- || TREE_CODE (T) == DECLTYPE_TYPE \
+- || CLASS_TYPE_P (T))
++ || TREE_CODE (T) == DECLTYPE_TYPE)
++
++/* Nonzero if T is a class (or struct or union) type. Also nonzero
++ for template type parameters, typename types, and instantiated
++ template template parameters. Keep these checks in ascending code
++ order. */
++#define MAYBE_CLASS_TYPE_P(T) (WILDCARD_TYPE_P (T) || CLASS_TYPE_P (T))
+
+ /* Set CLASS_TYPE_P for T to VAL. T must be a class, struct, or
+ union type. */
--- a/src/gcc/cp/cvt.c
+++ b/src/gcc/cp/cvt.c
@@ -198,6 +198,8 @@
@@ -58914,6 +59972,48 @@
}
else if (TYPE_PTR_TO_MEMBER_P (type) && INTEGRAL_CODE_P (form))
{
+@@ -1539,17 +1539,6 @@
+ if (DECL_NONCONVERTING_P (cand))
+ continue;
+
+- if (TREE_CODE (cand) == TEMPLATE_DECL)
+- {
+- if (complain)
+- {
+- error ("ambiguous default type conversion from %qT",
+- basetype);
+- error (" candidate conversions include %qD", cand);
+- }
+- return error_mark_node;
+- }
+-
+ candidate = non_reference (TREE_TYPE (TREE_TYPE (cand)));
+
+ switch (TREE_CODE (candidate))
+@@ -1583,11 +1572,23 @@
+ break;
+
+ default:
++ /* A wildcard could be instantiated to match any desired
++ type, but we can't deduce the template argument. */
++ if (WILDCARD_TYPE_P (candidate))
++ win = true;
+ break;
+ }
+
+ if (win)
+ {
++ if (TREE_CODE (cand) == TEMPLATE_DECL)
++ {
++ if (complain)
++ error ("default type conversion can't deduce template"
++ " argument for %qD", cand);
++ return error_mark_node;
++ }
++
+ if (winner)
+ {
+ if (complain)
--- a/src/gcc/cp/parser.c
+++ b/src/gcc/cp/parser.c
@@ -16691,7 +16691,7 @@
@@ -58959,6 +60059,15 @@
case IDENTIFIER_NODE:
if (IDENTIFIER_TYPENAME_P (t))
{
+@@ -19545,7 +19552,7 @@
+ any_type_dependent_elements_p (const_tree list)
+ {
+ for (; list; list = TREE_CHAIN (list))
+- if (value_dependent_expression_p (TREE_VALUE (list)))
++ if (type_dependent_expression_p (TREE_VALUE (list)))
+ return true;
+
+ return false;
--- a/src/gcc/cp/semantics.c
+++ b/src/gcc/cp/semantics.c
@@ -7412,15 +7412,17 @@
@@ -59067,11 +60176,6 @@
|| TREE_CODE (retval) == PARM_DECL)
&& DECL_CONTEXT (retval) == current_function_decl
&& !TREE_STATIC (retval)
---- a/src/gcc/DATESTAMP
-+++ b/src/gcc/DATESTAMP
-@@ -1 +1 @@
--20130411
-+20130812
--- a/src/gcc/dwarf2out.c
+++ b/src/gcc/dwarf2out.c
@@ -22538,7 +22538,7 @@
@@ -59724,7 +60828,7 @@
#define YY_RESTORE_YY_MORE_OFFSET
char *yytext;
-#line 1 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 1 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 1 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
/* -*- indented-text -*- */
/* Process source files and output type information.
Copyright (C) 2002, 2003, 2004, 2005, 2007, 2008, 2009, 2010
@@ -59733,7 +60837,7 @@
<http://www.gnu.org/licenses/>. */
#define YY_NO_INPUT 1
-#line 25 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 25 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 25 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
#ifdef GENERATOR_FILE
#include "bconfig.h"
#else
@@ -59782,7 +60886,7 @@
register int yy_act;
-#line 63 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 63 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 63 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
/* Do this on entry to yylex(): */
*yylval = 0;
@@ -59800,7 +60904,7 @@
YY_DO_BEFORE_ACTION; /* set up yytext again */
YY_RULE_SETUP
-#line 74 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 74 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 74 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
{
BEGIN(in_struct);
return TYPEDEF;
@@ -59809,7 +60913,7 @@
YY_DO_BEFORE_ACTION; /* set up yytext again */
YY_RULE_SETUP
-#line 78 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 78 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 78 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
{
BEGIN(in_struct);
return STRUCT;
@@ -59818,7 +60922,7 @@
YY_DO_BEFORE_ACTION; /* set up yytext again */
YY_RULE_SETUP
-#line 82 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 82 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 82 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
{
BEGIN(in_struct);
return UNION;
@@ -59827,7 +60931,7 @@
YY_DO_BEFORE_ACTION; /* set up yytext again */
YY_RULE_SETUP
-#line 86 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 86 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 86 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
{
BEGIN(in_struct);
return EXTERN;
@@ -59836,7 +60940,7 @@
YY_DO_BEFORE_ACTION; /* set up yytext again */
YY_RULE_SETUP
-#line 90 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 90 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 90 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
{
BEGIN(in_struct);
return STATIC;
@@ -59845,7 +60949,7 @@
YY_DO_BEFORE_ACTION; /* set up yytext again */
YY_RULE_SETUP
-#line 95 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 95 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 95 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
{
BEGIN(in_struct);
return DEFVEC_OP;
@@ -59854,7 +60958,7 @@
YY_DO_BEFORE_ACTION; /* set up yytext again */
YY_RULE_SETUP
-#line 99 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 99 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 99 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
{
BEGIN(in_struct);
return DEFVEC_I;
@@ -59863,7 +60967,7 @@
YY_DO_BEFORE_ACTION; /* set up yytext again */
YY_RULE_SETUP
-#line 103 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 103 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 103 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
{
BEGIN(in_struct);
return DEFVEC_ALLOC;
@@ -59872,21 +60976,21 @@
case 9:
YY_RULE_SETUP
-#line 111 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 111 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 111 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
{ BEGIN(in_struct_comment); }
YY_BREAK
case 10:
/* rule 10 can match eol */
YY_RULE_SETUP
-#line 113 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 113 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 113 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
{ update_lineno (yytext, yyleng); }
YY_BREAK
case 11:
/* rule 11 can match eol */
YY_RULE_SETUP
-#line 114 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 114 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 114 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
{ lexer_line.line++; }
YY_BREAK
case 12:
@@ -59895,7 +60999,7 @@
YY_DO_BEFORE_ACTION; /* set up yytext again */
YY_RULE_SETUP
-#line 116 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 116 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 116 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
/* don't care */
YY_BREAK
case 13:
@@ -59904,7 +61008,7 @@
YY_DO_BEFORE_ACTION; /* set up yytext again */
YY_RULE_SETUP
-#line 117 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 117 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 117 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
{ return GTY_TOKEN; }
YY_BREAK
case 14:
@@ -59913,7 +61017,7 @@
YY_DO_BEFORE_ACTION; /* set up yytext again */
YY_RULE_SETUP
-#line 118 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 118 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 118 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
{ return VEC_TOKEN; }
YY_BREAK
case 15:
@@ -59922,7 +61026,7 @@
YY_DO_BEFORE_ACTION; /* set up yytext again */
YY_RULE_SETUP
-#line 119 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 119 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 119 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
{ return UNION; }
YY_BREAK
case 16:
@@ -59931,7 +61035,7 @@
YY_DO_BEFORE_ACTION; /* set up yytext again */
YY_RULE_SETUP
-#line 120 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 120 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 120 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
{ return STRUCT; }
YY_BREAK
case 17:
@@ -59940,7 +61044,7 @@
YY_DO_BEFORE_ACTION; /* set up yytext again */
YY_RULE_SETUP
-#line 121 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 121 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 121 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
{ return ENUM; }
YY_BREAK
case 18:
@@ -59949,7 +61053,7 @@
YY_DO_BEFORE_ACTION; /* set up yytext again */
YY_RULE_SETUP
-#line 122 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 122 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 122 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
{ return PTR_ALIAS; }
YY_BREAK
case 19:
@@ -59958,13 +61062,13 @@
YY_DO_BEFORE_ACTION; /* set up yytext again */
YY_RULE_SETUP
-#line 123 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 123 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 123 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
{ return NESTED_PTR; }
YY_BREAK
case 20:
YY_RULE_SETUP
-#line 124 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 124 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 124 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
{ return NUM; }
YY_BREAK
case 21:
@@ -59973,7 +61077,7 @@
YY_DO_BEFORE_ACTION; /* set up yytext again */
YY_RULE_SETUP
-#line 125 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 125 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 125 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
{
*yylval = XDUPVAR (const char, yytext, yyleng, yyleng+1);
return PARAM_IS;
@@ -59982,12 +61086,12 @@
(yy_c_buf_p) = yy_cp -= 1;
YY_DO_BEFORE_ACTION; /* set up yytext again */
-#line 131 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 131 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 131 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
case 23:
/* rule 23 can match eol */
YY_RULE_SETUP
-#line 131 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 131 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 131 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
{
size_t len;
@@ -59996,7 +61100,7 @@
YY_DO_BEFORE_ACTION; /* set up yytext again */
YY_RULE_SETUP
-#line 143 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 143 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 143 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
{
*yylval = XDUPVAR (const char, yytext, yyleng, yyleng+1);
return ID;
@@ -60005,7 +61109,7 @@
/* rule 25 can match eol */
YY_RULE_SETUP
-#line 148 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 148 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 148 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
{
*yylval = XDUPVAR (const char, yytext+1, yyleng-2, yyleng-1);
return STRING;
@@ -60014,7 +61118,7 @@
/* rule 26 can match eol */
YY_RULE_SETUP
-#line 153 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 153 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 153 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
{
*yylval = XDUPVAR (const char, yytext+1, yyleng-2, yyleng-1);
return ARRAY;
@@ -60023,7 +61127,7 @@
/* rule 27 can match eol */
YY_RULE_SETUP
-#line 157 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 157 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 157 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
{
*yylval = XDUPVAR (const char, yytext+1, yyleng-2, yyleng);
return CHAR;
@@ -60032,13 +61136,13 @@
case 28:
YY_RULE_SETUP
-#line 162 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 162 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 162 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
{ return ELLIPSIS; }
YY_BREAK
case 29:
YY_RULE_SETUP
-#line 163 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 163 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 163 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
{ return yytext[0]; }
YY_BREAK
/* ignore pp-directives */
@@ -60046,13 +61150,13 @@
/* rule 30 can match eol */
YY_RULE_SETUP
-#line 166 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 166 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 166 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
{lexer_line.line++;}
YY_BREAK
case 31:
YY_RULE_SETUP
-#line 168 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 168 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 168 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
{
error_at_line (&lexer_line, "unexpected character `%s'", yytext);
}
@@ -60061,35 +61165,35 @@
case 32:
YY_RULE_SETUP
-#line 173 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 173 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 173 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
{ BEGIN(in_comment); }
YY_BREAK
case 33:
/* rule 33 can match eol */
YY_RULE_SETUP
-#line 174 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 174 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 174 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
{ lexer_line.line++; }
YY_BREAK
case 34:
-#line 176 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 176 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 176 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
case 35:
/* rule 35 can match eol */
-#line 177 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 177 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 177 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
case 36:
/* rule 36 can match eol */
YY_RULE_SETUP
-#line 177 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 177 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 177 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
/* do nothing */
YY_BREAK
case 37:
/* rule 37 can match eol */
YY_RULE_SETUP
-#line 178 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 178 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 178 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
{ update_lineno (yytext, yyleng); }
YY_BREAK
case 38:
@@ -60098,7 +61202,7 @@
YY_DO_BEFORE_ACTION; /* set up yytext again */
YY_RULE_SETUP
-#line 179 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 179 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 179 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
/* do nothing */
YY_BREAK
@@ -60106,16 +61210,16 @@
/* rule 39 can match eol */
YY_RULE_SETUP
-#line 182 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 182 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 182 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
{ lexer_line.line++; }
YY_BREAK
case 40:
-#line 184 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 184 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 184 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
case 41:
YY_RULE_SETUP
-#line 184 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 184 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 184 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
/* do nothing */
YY_BREAK
case 42:
@@ -60124,29 +61228,29 @@
YY_DO_BEFORE_ACTION; /* set up yytext again */
YY_RULE_SETUP
-#line 185 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 185 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 185 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
/* do nothing */
YY_BREAK
case 43:
YY_RULE_SETUP
-#line 187 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 187 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 187 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
{ BEGIN(INITIAL); }
YY_BREAK
case 44:
YY_RULE_SETUP
-#line 188 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 188 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 188 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
{ BEGIN(in_struct); }
YY_BREAK
case 45:
-#line 191 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 191 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 191 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
case 46:
YY_RULE_SETUP
-#line 191 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 191 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 191 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
{
error_at_line (&lexer_line,
"unterminated comment or string; unexpected EOF");
@@ -60155,13 +61259,13 @@
/* rule 47 can match eol */
YY_RULE_SETUP
-#line 196 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 196 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 196 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
/* do nothing */
YY_BREAK
case 48:
YY_RULE_SETUP
-#line 198 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 198 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 198 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
YY_FATAL_ERROR( "flex scanner jammed" );
YY_BREAK
-#line 1654 "gengtype-lex.c"
@@ -60185,7 +61289,7 @@
#define YYTABLES_NAME "yytables"
-#line 198 "/space/rguenther/gcc-4.7.3/gcc-4.7.3/gcc/gengtype-lex.l"
-+#line 198 "/home/yvan/release/4.7-2013.08/gcc-linaro-4.7-2013.08/gcc/gengtype-lex.l"
++#line 198 "/home/lyon/src/GCC/release/4.7-2013.10/gcc-linaro-4.7-2013.10/gcc/gengtype-lex.l"
@@ -62199,10 +63303,6 @@
/* Likewise with X. In particular this can happen when
noce_get_condition looks farther back in the instruction
stream than one might expect. */
---- a/src/gcc/LINARO-VERSION
-+++ b/src/gcc/LINARO-VERSION
-@@ -0,0 +1 @@
-+4.7-2013.08
--- a/src/gcc/lower-subreg.c
+++ b/src/gcc/lower-subreg.c
@@ -233,9 +233,9 @@
@@ -62295,40 +63395,146 @@
return 0;
}
---- a/src/gcc/Makefile.in
-+++ b/src/gcc/Makefile.in
-@@ -1848,11 +1848,12 @@
- "$(MULTILIB_EXTRA_OPTS)" \
- "$(MULTILIB_EXCLUSIONS)" \
- "$(MULTILIB_OSDIRNAMES)" \
-+ "$(MULTILIB_REQUIRED)" \
- "$(MULTIARCH_DIRNAME)" \
- "@enable_multilib@" \
- > tmp-mlib.h; \
- else \
-- $(SHELL) $(srcdir)/genmultilib '' '' '' '' '' '' '' "$(MULTIARCH_DIRNAME)" no \
-+ $(SHELL) $(srcdir)/genmultilib '' '' '' '' '' '' '' '' "$(MULTIARCH_DIRNAME)" no \
- > tmp-mlib.h; \
- fi
- $(SHELL) $(srcdir)/../move-if-change tmp-mlib.h multilib.h
-@@ -2570,7 +2571,7 @@
- $(TM_H) coretypes.h $(TREE_DUMP_H) $(TREE_PASS_H) $(FLAGS_H) \
- tree-iterator.h $(BASIC_BLOCK_H) $(GIMPLE_H) $(TREE_INLINE_H) \
- $(VEC_H) langhooks.h alloc-pool.h pointer-set.h $(CFGLOOP_H) \
-- tree-pretty-print.h gimple-pretty-print.h $(DIAGNOSTIC_CORE_H)
-+ tree-pretty-print.h gimple-pretty-print.h $(DIAGNOSTIC_CORE_H) $(PARAMS_H)
- tree-optimize.o : tree-optimize.c $(TREE_FLOW_H) $(CONFIG_H) $(SYSTEM_H) \
- $(TREE_H) $(TM_P_H) $(GGC_H) output.h \
- $(DIAGNOSTIC_H) $(BASIC_BLOCK_H) $(FLAGS_H) $(TIMEVAR_H) $(TM_H) \
-@@ -3904,7 +3905,7 @@
- $(SYSTEM_H) coretypes.h $(GTM_H) errors.h $(READ_MD_H) gensupport.h
- build/gengenrtl.o : gengenrtl.c $(BCONFIG_H) $(SYSTEM_H) rtl.def
- gengtype-lex.o build/gengtype-lex.o : gengtype-lex.c gengtype.h $(SYSTEM_H)
--gengtype-lex.o: $(CONFIG_H)
-+gengtype-lex.o: $(CONFIG_H) $(BCONFIG_H)
- build/gengtype-lex.o: $(BCONFIG_H)
- gengtype-parse.o build/gengtype-parse.o : gengtype-parse.c gengtype.h \
- $(SYSTEM_H)
+--- a/src/gcc/objc/ChangeLog
++++ b/src/gcc/objc/ChangeLog
+@@ -1,3 +1,14 @@
++2013-09-01 Iain Sandoe <iain@codesourcery.com>
++
++ Backported from 4.8
++ 2012-06-19 Steven Bosscher <steven@gcc.gnu.org>
++
++ * objc-next-runtime-abi-01.c: Do not include tm.h and output.h.
++ Include c-family/c-target.h.
++ (handle_next_class_ref): Rewrite to emit top-level asm statements.
++ (handle_next_impent): Likewise.
++ * objc/Make-lang.in: Fix dependencies for objc-next-runtime-abi-01.o.
++
+ 2013-04-11 Release Manager
+
+ * GCC 4.7.3 released.
+--- a/src/gcc/objc/Make-lang.in
++++ b/src/gcc/objc/Make-lang.in
+@@ -106,7 +106,7 @@
+ gt-objc-objc-next-runtime-abi-01.h \
+ $(START_HDRS) \
+ $(GGC_H) $(DIAGNOSTIC_CORE_H) $(FLAGS_H) input.h \
+- $(TARGET_H) output.h \
++ $(TARGET_H) \
+ objc/objc-encoding.h \
+ objc/objc-next-metadata-tags.h \
+ objc/objc-runtime-hooks.h \
+--- a/src/gcc/objc/objc-next-runtime-abi-01.c
++++ b/src/gcc/objc/objc-next-runtime-abi-01.c
+@@ -26,7 +26,6 @@
+ #include "config.h"
+ #include "system.h"
+ #include "coretypes.h"
+-#include "tm.h"
+ #include "tree.h"
+
+ #ifdef OBJCPLUS
+@@ -49,7 +48,7 @@
+
+ #include "ggc.h"
+ #include "target.h"
+-#include "output.h"
++#include "c-family/c-target.h"
+ #include "tree-iterator.h"
+
+ #include "objc-runtime-hooks.h"
+@@ -2268,47 +2267,50 @@
+ init_objc_symtab (TREE_TYPE (UOBJC_SYMBOLS_decl)));
+ }
+
++/* Any target implementing NeXT ObjC m32 ABI has to ensure that objects
++ refer to, and define, symbols that enforce linkage of classes into the
++ executable image, preserving unix archive semantics.
++
++ At present (4.8), the only targets implementing this are Darwin; these
++ use top level asms to implement a scheme (see config/darwin-c.c). The
++ latter method is a hack, but compatible with LTO see also PR48109 for
++ further discussion and other possible methods. */
+
+ static void
+-handle_next_class_ref (tree chain)
++handle_next_class_ref (tree chain ATTRIBUTE_UNUSED)
+ {
+- const char *name = IDENTIFIER_POINTER (TREE_VALUE (chain));
+- char *string = (char *) alloca (strlen (name) + 30);
+-
+- sprintf (string, ".objc_class_name_%s", name);
+-
+-#ifdef ASM_DECLARE_UNRESOLVED_REFERENCE
+- ASM_DECLARE_UNRESOLVED_REFERENCE (asm_out_file, string);
+-#else
+- return ; /* NULL build for targets other than Darwin. */
+-#endif
++ if (targetcm.objc_declare_unresolved_class_reference)
++ {
++ const char *name = IDENTIFIER_POINTER (TREE_VALUE (chain));
++ char *string = (char *) alloca (strlen (name) + 30);
++ sprintf (string, ".objc_class_name_%s", name);
++ targetcm.objc_declare_unresolved_class_reference (string);
++ }
+ }
+
+ static void
+-handle_next_impent (struct imp_entry *impent)
++handle_next_impent (struct imp_entry *impent ATTRIBUTE_UNUSED)
+ {
+- char buf[BUFSIZE];
+-
+- switch (TREE_CODE (impent->imp_context))
++ if (targetcm.objc_declare_class_definition)
+ {
+- case CLASS_IMPLEMENTATION_TYPE:
+- snprintf (buf, BUFSIZE, ".objc_class_name_%s",
+- IDENTIFIER_POINTER (CLASS_NAME (impent->imp_context)));
+- break;
+- case CATEGORY_IMPLEMENTATION_TYPE:
+- snprintf (buf, BUFSIZE, "*.objc_category_name_%s_%s",
+- IDENTIFIER_POINTER (CLASS_NAME (impent->imp_context)),
+- IDENTIFIER_POINTER (CLASS_SUPER_NAME (impent->imp_context)));
+- break;
+- default:
+- return;
+- }
++ char buf[BUFSIZE];
+
+-#ifdef ASM_DECLARE_CLASS_REFERENCE
+- ASM_DECLARE_CLASS_REFERENCE (asm_out_file, buf);
+-#else
+- return ; /* NULL build for targets other than Darwin. */
+-#endif
++ switch (TREE_CODE (impent->imp_context))
++ {
++ case CLASS_IMPLEMENTATION_TYPE:
++ snprintf (buf, BUFSIZE, ".objc_class_name_%s",
++ IDENTIFIER_POINTER (CLASS_NAME (impent->imp_context)));
++ break;
++ case CATEGORY_IMPLEMENTATION_TYPE:
++ snprintf (buf, BUFSIZE, "*.objc_category_name_%s_%s",
++ IDENTIFIER_POINTER (CLASS_NAME (impent->imp_context)),
++ IDENTIFIER_POINTER (CLASS_SUPER_NAME (impent->imp_context)));
++ break;
++ default:
++ return;
++ }
++ targetcm.objc_declare_class_definition (buf);
++ }
+ }
+
+ static void
+@@ -2415,8 +2417,7 @@
+
+ /* Dump the class references. This forces the appropriate classes
+ to be linked into the executable image, preserving unix archive
+- semantics. This can be removed when we move to a more dynamically
+- linked environment. */
++ semantics. */
+
+ for (chain = cls_ref_chain; chain; chain = TREE_CHAIN (chain))
+ {
--- a/src/gcc/optabs.c
+++ b/src/gcc/optabs.c
@@ -3028,6 +3028,47 @@
@@ -63353,7 +64559,29 @@
bool, false)
--- a/src/gcc/testsuite/ChangeLog
+++ b/src/gcc/testsuite/ChangeLog
-@@ -1,3 +1,197 @@
+@@ -1,3 +1,219 @@
++2013-09-23 Eric Botcazou <ebotcazou@adacore.com>
++
++ * gnat.dg/opt28.ad[sb]: New test.
++ * gnat.dg/opt28_pkg.ads: New helper.
++
++2013-09-18 Eric Botcazou <ebotcazou@adacore.com>
++
++ * gnat.dg/in_out_parameter4.adb: New test.
++
++2013-08-13 Eric Botcazou <ebotcazou@adacore.com>
++
++ * gnat.dg/loop_optimization16.adb: New test.
++ * gnat.dg/loop_optimization16_pkg.ad[sb]: New helper.
++
++2013-08-13 Marek Polacek <polacek@redhat.com>
++
++ Backport from 4.8:
++ 2013-08-13 Marek Polacek <polacek@redhat.com>
++
++ PR tree-optimization/57980
++ * gcc.dg/pr57980.c: New test.
++
+2013-08-11 Janus Weil <janus@gcc.gnu.org>
+
+ Backport from trunk:
@@ -64000,6 +65228,492 @@
+ * gcc.dg/const-float128-ped.c: Likewise.
+ * gcc.dg/const-float128.c: Likewise.
+ * gcc.dg/stack-usage-1.c: Likewise.
+--- a/src/gcc/testsuite/g++.dg/abi/aarch64_guard1.C
++++ b/src/gcc/testsuite/g++.dg/abi/aarch64_guard1.C
+@@ -0,0 +1,17 @@
++// Check that the initialization guard variable is an 8-byte aligned,
++// 8-byte doubleword and that only the least significant bit is used
++// for initialization guard variables.
++// { dg-do compile { target aarch64*-*-* } }
++// { dg-options "-O -fdump-tree-original -fno-section-anchors" }
++
++int bar();
++
++int *foo ()
++{
++ static int x = bar ();
++ return &x;
++}
++
++// { dg-final { scan-assembler _ZGVZ3foovE1x,8,8 } }
++// { dg-final { scan-tree-dump "_ZGVZ3foovE1x & 1" "original" } }
++// { dg-final { cleanup-tree-dump "original" } }
+--- a/src/gcc/testsuite/g++.dg/abi/arm_va_list.C
++++ b/src/gcc/testsuite/g++.dg/abi/arm_va_list.C
+@@ -1,9 +1,10 @@
+-// { dg-do compile }
++// { dg-do compile { target { aarch64*-*-* arm*-*-* } } }
+ // { dg-options "-Wno-abi" }
+-// { dg-require-effective-target arm_eabi }
++// { dg-require-effective-target arm_eabi { target arm*-*-* } }
+
+ // AAPCS \S 7.1.4 requires that va_list be a typedef for "struct
+ // __va_list". The mangling is as if it were "std::__va_list".
++// AAPCS64 \S 7.1.4 has the same requirement for AArch64 targets.
+ // #include <stdarg.h>
+ typedef __builtin_va_list va_list;
+
+--- a/src/gcc/testsuite/g++.dg/abi/mangle-neon-aarch64.C
++++ b/src/gcc/testsuite/g++.dg/abi/mangle-neon-aarch64.C
+@@ -0,0 +1,55 @@
++// Test that AArch64 AdvSIMD (NEON) vector types have their names mangled
++// correctly.
++
++// { dg-do compile { target { aarch64*-*-* } } }
++
++#include <arm_neon.h>
++
++void f0 (int8x8_t a) {}
++void f1 (int16x4_t a) {}
++void f2 (int32x2_t a) {}
++void f3 (uint8x8_t a) {}
++void f4 (uint16x4_t a) {}
++void f5 (uint32x2_t a) {}
++void f6 (float32x2_t a) {}
++void f7 (poly8x8_t a) {}
++void f8 (poly16x4_t a) {}
++
++void f9 (int8x16_t a) {}
++void f10 (int16x8_t a) {}
++void f11 (int32x4_t a) {}
++void f12 (int64x2_t a) {}
++void f13 (uint8x16_t a) {}
++void f14 (uint16x8_t a) {}
++void f15 (uint32x4_t a) {}
++void f16 (uint64x2_t a) {}
++void f17 (float32x4_t a) {}
++void f18 (float64x2_t a) {}
++void f19 (poly8x16_t a) {}
++void f20 (poly16x8_t a) {}
++
++void f21 (int8x16_t, int8x16_t) {}
++
++
++// { dg-final { scan-assembler "_Z2f010__Int8x8_t:" } }
++// { dg-final { scan-assembler "_Z2f111__Int16x4_t:" } }
++// { dg-final { scan-assembler "_Z2f211__Int32x2_t:" } }
++// { dg-final { scan-assembler "_Z2f311__Uint8x8_t:" } }
++// { dg-final { scan-assembler "_Z2f412__Uint16x4_t:" } }
++// { dg-final { scan-assembler "_Z2f512__Uint32x2_t:" } }
++// { dg-final { scan-assembler "_Z2f613__Float32x2_t:" } }
++// { dg-final { scan-assembler "_Z2f711__Poly8x8_t:" } }
++// { dg-final { scan-assembler "_Z2f812__Poly16x4_t:" } }
++// { dg-final { scan-assembler "_Z2f911__Int8x16_t:" } }
++// { dg-final { scan-assembler "_Z3f1011__Int16x8_t:" } }
++// { dg-final { scan-assembler "_Z3f1111__Int32x4_t:" } }
++// { dg-final { scan-assembler "_Z3f1211__Int64x2_t:" } }
++// { dg-final { scan-assembler "_Z3f1312__Uint8x16_t:" } }
++// { dg-final { scan-assembler "_Z3f1412__Uint16x8_t:" } }
++// { dg-final { scan-assembler "_Z3f1512__Uint32x4_t:" } }
++// { dg-final { scan-assembler "_Z3f1612__Uint64x2_t:" } }
++// { dg-final { scan-assembler "_Z3f1713__Float32x4_t:" } }
++// { dg-final { scan-assembler "_Z3f1813__Float64x2_t:" } }
++// { dg-final { scan-assembler "_Z3f1912__Poly8x16_t:" } }
++// { dg-final { scan-assembler "_Z3f2012__Poly16x8_t:" } }
++// { dg-final { scan-assembler "_Z3f2111__Int8x16_tS_:" } }
+--- a/src/gcc/testsuite/g++.dg/cpp0x/constexpr-array-ptr8.C
++++ b/src/gcc/testsuite/g++.dg/cpp0x/constexpr-array-ptr8.C
+@@ -0,0 +1,54 @@
++// PR c++/57047
++// { dg-require-effective-target c++11 }
++
++template <typename>
++struct A;
++template <typename T>
++struct A <T &>
++{
++ typedef T type;
++};
++template <typename T>
++constexpr T && foo (typename A <T>::type & __t) noexcept
++{
++ return static_cast <T &&>(__t);
++}
++template <class T1, class T2>
++struct B
++{
++ T1 t1;
++ T2 t2;
++ template <class U>
++ constexpr B (U && __x, const T2 & __y) : t1 (foo <U> (__x)), t2 (__y) {}
++};
++static inline constexpr bool
++fn1 (const char c)
++{
++ return ('0' <= c) && (c <= '9');
++}
++static inline constexpr bool
++fn2 (const char c)
++{
++ return (('A' <= c) && (c <= 'Z')) || (('a' <= c) && (c <= 'z'));
++}
++static constexpr bool
++fn3 (const char *const x)
++{
++ return (x[1] == '\0' && x[0] == ']') ? true : (!fn1 (x[0])) ? false : fn3 (&x[1]);
++}
++static constexpr bool
++fn4 (const char *const x)
++{
++ return (x[0] == '\0') ? fn3 (&x[1]) : fn4 (&x[1]);
++}
++static inline constexpr bool
++fn5 (const char *const x)
++{
++ return fn2 (x[0]) ? fn4 (x) : false;
++}
++struct C final
++{
++ constexpr C (const char *const t1) : c (fn5 (t1) ? 199 : 69) {}
++ unsigned c;
++};
++B <C, C> p ("a", "b");
+--- a/src/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-eh3.C
++++ b/src/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-eh3.C
+@@ -0,0 +1,14 @@
++// PR c++/56388
++// { dg-require-effective-target c++11 }
++
++int main()
++{
++ bool /*const*/ condition = false;
++
++ [&]{
++ try{}
++ catch(...){
++ if(condition){}
++ }
++ }();
++}
+--- a/src/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-nullptr.C
++++ b/src/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-nullptr.C
+@@ -0,0 +1,47 @@
++// PR c++/54170
++// { dg-do run { target c++11 } }
++
++#include <cassert>
++
++struct A;
++typedef A* ptr;
++typedef int (A::*pmf) (int);
++typedef int (A::*pdm);
++
++int total;
++
++void add(int n)
++{
++ total += n;
++}
++
++template <typename RType, typename Callable>
++RType Call(Callable native_func, int arg)
++{
++ return native_func(arg);
++}
++
++template <typename RType>
++RType do_test(int delta)
++{
++ return Call<RType>([=](int delta) { add(delta); return nullptr; }, delta);
++}
++
++template <typename RType>
++void test()
++{
++ total = 0;
++ assert (!do_test<RType>(5));
++ assert (total == 5);
++ assert (!do_test<RType>(20));
++ assert (total == 25);
++ assert (!do_test<RType>(-256));
++ assert (total == -231);
++}
++
++int main()
++{
++ test<ptr>();
++ test<pdm>();
++ test<pmf>();
++}
+--- a/src/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-return1.C
++++ b/src/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-return1.C
+@@ -0,0 +1,26 @@
++// PR c++/57437
++// { dg-require-effective-target c++11 }
++
++struct A {
++ int i;
++
++ A(): i(42) {}
++ A(const A&) = default;
++ A(A&& a): i(a.i) { a.i = 0; }
++};
++
++int main()
++{
++ A x;
++
++ auto y = [x] () mutable {
++ x.i++;
++ return x;
++ };
++
++ if (y().i != 43)
++ __builtin_abort ();
++
++ if (y().i != 44)
++ __builtin_abort ();
++}
+--- a/src/gcc/testsuite/g++.dg/debug/template2.C
++++ b/src/gcc/testsuite/g++.dg/debug/template2.C
+@@ -0,0 +1,14 @@
++// PR c++/57545
++
++template<typename T, long unsigned int N>
++struct array {
++ T data[N];
++};
++
++template<typename T>
++struct derived {
++ typedef long unsigned int size_type;
++ static const size_type n = 42;
++
++ array<int, n> a;
++};
+--- a/src/gcc/testsuite/g++.dg/expr/const1.C
++++ b/src/gcc/testsuite/g++.dg/expr/const1.C
+@@ -0,0 +1,9 @@
++// PR c++/57551
++
++extern unsigned long ADDR;
++
++unsigned long f(){
++ const unsigned long* const var=&ADDR;
++ const unsigned long retval=var[1];
++ return retval;
++}
+--- a/src/gcc/testsuite/g++.dg/other/PR23205.C
++++ b/src/gcc/testsuite/g++.dg/other/PR23205.C
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-skip-if "No stabs" { mmix-*-* *-*-aix* alpha*-*-* hppa*64*-*-* ia64-*-* tile*-*-* *-*-vxworks } { "*" } { "" } } */
++/* { dg-skip-if "No stabs" { aarch64*-*-* mmix-*-* *-*-aix* alpha*-*-* hppa*64*-*-* ia64-*-* tile*-*-* *-*-vxworks } { "*" } { "" } } */
+ /* { dg-options "-gstabs+ -fno-eliminate-unused-debug-types" } */
+
+ const int foobar = 4;
+--- a/src/gcc/testsuite/g++.dg/other/pr23205-2.C
++++ b/src/gcc/testsuite/g++.dg/other/pr23205-2.C
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-skip-if "No stabs" { mmix-*-* *-*-aix* alpha*-*-* hppa*64*-*-* ia64-*-* tile*-*-* } { "*" } { "" } } */
++/* { dg-skip-if "No stabs" { aarch64*-*-* mmix-*-* *-*-aix* alpha*-*-* hppa*64*-*-* ia64-*-* tile*-*-* } { "*" } { "" } } */
+ /* { dg-options "-gstabs+ -fno-eliminate-unused-debug-types -ftoplevel-reorder" } */
+
+ const int foobar = 4;
+--- a/src/gcc/testsuite/g++.dg/template/array26.C
++++ b/src/gcc/testsuite/g++.dg/template/array26.C
+@@ -0,0 +1,40 @@
++// PR c++/57325
++
++class valarray { int _M_data; };
++template < typename > struct SimpleJet { valarray partials; };
++
++template < class C > struct scoped_ptr_impl
++{
++ scoped_ptr_impl (C *):data_ () { }
++ struct Data
++ {
++ C ptr;
++ };
++ Data data_;
++};
++
++template < class, class = int >struct scoped_ptr;
++template < class C, class D > struct scoped_ptr <C[], D >
++{
++ scoped_ptr ():impl_ (0) { }
++ scoped_ptr_impl < C > impl_;
++};
++
++template < typename JetsT > void
++TestJets (JetsT *)
++{
++ typedef typename JetsT::JetType JetT;
++ scoped_ptr < JetT[] > a;
++}
++
++template < typename T > struct SimpleJets
++{
++ typedef SimpleJet < T > JetType;
++ scoped_ptr < SimpleJet < T >[] > vars_;
++};
++
++void fn ()
++{
++ SimpleJets < double >b;
++ TestJets (&b);
++}
+--- a/src/gcc/testsuite/g++.dg/template/delete2.C
++++ b/src/gcc/testsuite/g++.dg/template/delete2.C
+@@ -0,0 +1,26 @@
++// PR c++/58119
++
++template <class T>
++struct A
++{
++ operator T*();
++ template <class U>
++ operator A<U>();
++};
++
++template <class T>
++struct B
++{
++ operator T*();
++ template <class U>
++ operator A<U>*();
++};
++
++int main()
++{
++ A<int> a;
++ delete a;
++
++ B<int> b;
++ delete b; // { dg-error "template|delete" }
++}
+--- a/src/gcc/testsuite/g++.dg/template/inherit9.C
++++ b/src/gcc/testsuite/g++.dg/template/inherit9.C
+@@ -0,0 +1,15 @@
++// PR c++/58273
++
++class A {};
++class B
++{
++ int goo(A);
++};
++template<typename E>
++class D : public B
++{
++ void foo(A t)
++ {
++ int const i(B::goo(t));
++ }
++};
+--- a/src/gcc/testsuite/g++.dg/template/using23.C
++++ b/src/gcc/testsuite/g++.dg/template/using23.C
+@@ -0,0 +1,15 @@
++// PR c++/57831
++
++struct A {
++ void f();
++};
++template <class T> struct B : T {
++ typedef T base;
++ using base::f; // If I write "using B<T>::f" it's ok
++ void g( ) {
++ B<T>::f(); // This is OK as expected
++ (this->*&T::f)(); // This is also OK
++ (this->*&B<T>::f)(); // This causes error
++ }
++};
++template struct B< A >;
+--- a/src/gcc/testsuite/g++.dg/torture/pr54684.C
++++ b/src/gcc/testsuite/g++.dg/torture/pr54684.C
+@@ -0,0 +1,62 @@
++// { dg-do compile }
++
++typedef union tree_node *tree;
++typedef union gimple_statement_d *gimple;
++struct vec_prefix { unsigned num_; };
++template<typename T> struct vec_t {
++ unsigned length (void) const;
++ T &operator[] (unsigned);
++ vec_prefix prefix_;
++ T vec_[1];
++};
++template<typename T> inline unsigned vec_t<T>::length (void) const {
++ return prefix_.num_;
++}
++template<typename T> T & vec_t<T>::operator[] (unsigned ix) {
++ ((void)(__builtin_expect(!(ix < prefix_.num_), 0) ? __builtin_unreachable(), 0 : 0));
++ return vec_[ix];
++}
++enum tree_code { PARM_DECL };
++struct tree_base {
++ enum tree_code code : 16;
++ unsigned default_def_flag : 1;
++};
++union tree_node {
++ struct tree_base base;
++};
++struct ipa_param_descriptor {
++ tree decl;
++ unsigned used : 1;
++};
++typedef struct ipa_param_descriptor ipa_param_descriptor_t;
++struct ipa_node_params {
++ vec_t<ipa_param_descriptor_t> *descriptors;
++};
++static inline int ipa_get_param_count (struct ipa_node_params *info) {
++ return ((info->descriptors) ? (info->descriptors)->length () : 0);
++}
++static inline tree ipa_get_param (struct ipa_node_params *info, int i) {
++ return ((*(info->descriptors))[i]).decl;
++}
++static inline void ipa_set_param_used (struct ipa_node_params *info, int i, bool val) {
++ ((*(info->descriptors))[i]).used = val;
++}
++int ipa_get_param_decl_index (struct ipa_node_params *info, tree ptree)
++{
++ int i, count;
++ count = ipa_get_param_count (info);
++ for (i = 0; i < count; i++)
++ if (ipa_get_param (info, i) == ptree) return i;
++ return -1;
++}
++bool visit_ref_for_mod_analysis (gimple stmt __attribute__ ((__unused__)),
++ tree op, void *data)
++{
++ struct ipa_node_params *info = (struct ipa_node_params *) data;
++ if (op && ((enum tree_code) (op)->base.code) == PARM_DECL)
++ {
++ int index = ipa_get_param_decl_index (info, op);
++ ((void)(__builtin_expect(!(index >= 0), 0) ? __builtin_unreachable(), 0 : 0));
++ ipa_set_param_used (info, index, true);
++ }
++}
+--- a/src/gcc/testsuite/g++.dg/tree-ssa/ivopts-2.C
++++ b/src/gcc/testsuite/g++.dg/tree-ssa/ivopts-2.C
+@@ -7,5 +7,5 @@
+ *p = 1;
+ }
+
+-/* { dg-final { scan-tree-dump-times "PHI <p" 1 "ivopts"} } */
++/* { dg-final { scan-tree-dump-times "PHI <\[pb\]" 1 "ivopts"} } */
+ /* { dg-final { cleanup-tree-dump "ivopts" } } */
+--- a/src/gcc/testsuite/g++.old-deja/g++.abi/ptrmem.C
++++ b/src/gcc/testsuite/g++.old-deja/g++.abi/ptrmem.C
+@@ -7,7 +7,7 @@
+ function. However, some platforms use all bits to encode a
+ function pointer. Such platforms use the lowest bit of the delta,
+ that is shifted left by one bit. */
+-#if defined __MN10300__ || defined __SH5__ || defined __arm__ || defined __thumb__ || defined __mips__
++#if defined __MN10300__ || defined __SH5__ || defined __arm__ || defined __thumb__ || defined __mips__ || defined __aarch64__
+ #define ADJUST_PTRFN(func, virt) ((void (*)())(func))
+ #define ADJUST_DELTA(delta, virt) (((delta) << 1) + !!(virt))
+ #else
--- a/src/gcc/testsuite/gcc.c-torture/compile/pr56484.c
+++ b/src/gcc/testsuite/gcc.c-torture/compile/pr56484.c
@@ -0,0 +1,17 @@
@@ -64364,6 +66078,28 @@
+ }
+ return number;
+}
+--- a/src/gcc/testsuite/gcc.dg/pr57980.c
++++ b/src/gcc/testsuite/gcc.dg/pr57980.c
+@@ -0,0 +1,19 @@
++/* PR tree-optimization/57980 */
++/* { dg-do compile } */
++/* { dg-options "-O -foptimize-sibling-calls -w" } */
++
++typedef int V __attribute__ ((vector_size (2 * sizeof (int))));
++extern V f (void);
++
++V
++bar (void)
++{
++ return -f ();
++}
++
++V
++foo (void)
++{
++ V v = { };
++ return v - f ();
++}
--- a/src/gcc/testsuite/gcc.dg/stack-usage-1.c
+++ b/src/gcc/testsuite/gcc.dg/stack-usage-1.c
@@ -7,7 +7,9 @@
@@ -65344,6 +67080,68 @@
+#endif
+#define RUNTIME_ENDIANNESS_CHECK_FUNCTION_DEFINED
+#endif
+--- a/src/gcc/testsuite/gcc.target/aarch64/aapcs64/abitest.S
++++ b/src/gcc/testsuite/gcc.target/aarch64/aapcs64/abitest.S
+@@ -0,0 +1,59 @@
++ .global dumpregs
++ .global myfunc
++ .type dumpregs,%function
++ .type myfunc,%function
++dumpregs:
++myfunc:
++ mov x16, sp
++ mov x17, sp
++ sub sp, sp, 352 // 336 for registers and 16 for old sp and lr
++
++ stp x8, x9, [x17, #-16]! //320
++
++ stp q6, q7, [x17, #-32]! //288
++ stp q4, q5, [x17, #-32]! //256
++ stp q2, q3, [x17, #-32]! //224
++ stp q0, q1, [x17, #-32]! //192
++
++ stp x6, x7, [x17, #-16]! //176
++ stp x4, x5, [x17, #-16]! //160
++ stp x2, x3, [x17, #-16]! //144
++ stp x0, x1, [x17, #-16]! //128
++
++ stp w6, w7, [x17, #-8]! //120
++ stp w4, w5, [x17, #-8]! //112
++ stp w2, w3, [x17, #-8]! //104
++ stp w0, w1, [x17, #-8]! // 96
++
++ stp s6, s7, [x17, #-8]! // 88
++ stp s4, s5, [x17, #-8]! // 80
++ stp s2, s3, [x17, #-8]! // 72
++ stp s0, s1, [x17, #-8]! // 64
++
++ stp d6, d7, [x17, #-16]! // 48
++ stp d4, d5, [x17, #-16]! // 32
++ stp d2, d3, [x17, #-16]! // 16
++ stp d0, d1, [x17, #-16]! // 0
++
++ add x0, sp, #16
++ stp x16, x30, [x17, #-16]!
++
++ adrp x9, which_kind_of_test // determine the type of test
++ add x9, x9, :lo12:which_kind_of_test
++ ldr w9, [x9, #0]
++ cmp w9, #1
++ bgt LABEL_TEST_FUNC_RETURN
++ bl testfunc // parameter passing test or va_arg code gen test
++ b LABEL_RET
++LABEL_TEST_FUNC_RETURN:
++ adrp x9, testfunc_ptr
++ add x9, x9, :lo12:testfunc_ptr
++ ldr x9, [x9, #0]
++ blr x9 // function return value test
++LABEL_RET:
++ ldp x0, x30, [sp]
++ mov sp, x0
++ ret
++
++.weak testfunc
++.weak testfunc_ptr
--- a/src/gcc/testsuite/gcc.target/aarch64/aapcs64/abitest.h
+++ b/src/gcc/testsuite/gcc.target/aarch64/aapcs64/abitest.h
@@ -0,0 +1,159 @@
@@ -65506,68 +67304,6 @@
+ return 0;
+}
+
---- a/src/gcc/testsuite/gcc.target/aarch64/aapcs64/abitest.S
-+++ b/src/gcc/testsuite/gcc.target/aarch64/aapcs64/abitest.S
-@@ -0,0 +1,59 @@
-+ .global dumpregs
-+ .global myfunc
-+ .type dumpregs,%function
-+ .type myfunc,%function
-+dumpregs:
-+myfunc:
-+ mov x16, sp
-+ mov x17, sp
-+ sub sp, sp, 352 // 336 for registers and 16 for old sp and lr
-+
-+ stp x8, x9, [x17, #-16]! //320
-+
-+ stp q6, q7, [x17, #-32]! //288
-+ stp q4, q5, [x17, #-32]! //256
-+ stp q2, q3, [x17, #-32]! //224
-+ stp q0, q1, [x17, #-32]! //192
-+
-+ stp x6, x7, [x17, #-16]! //176
-+ stp x4, x5, [x17, #-16]! //160
-+ stp x2, x3, [x17, #-16]! //144
-+ stp x0, x1, [x17, #-16]! //128
-+
-+ stp w6, w7, [x17, #-8]! //120
-+ stp w4, w5, [x17, #-8]! //112
-+ stp w2, w3, [x17, #-8]! //104
-+ stp w0, w1, [x17, #-8]! // 96
-+
-+ stp s6, s7, [x17, #-8]! // 88
-+ stp s4, s5, [x17, #-8]! // 80
-+ stp s2, s3, [x17, #-8]! // 72
-+ stp s0, s1, [x17, #-8]! // 64
-+
-+ stp d6, d7, [x17, #-16]! // 48
-+ stp d4, d5, [x17, #-16]! // 32
-+ stp d2, d3, [x17, #-16]! // 16
-+ stp d0, d1, [x17, #-16]! // 0
-+
-+ add x0, sp, #16
-+ stp x16, x30, [x17, #-16]!
-+
-+ adrp x9, which_kind_of_test // determine the type of test
-+ add x9, x9, :lo12:which_kind_of_test
-+ ldr w9, [x9, #0]
-+ cmp w9, #1
-+ bgt LABEL_TEST_FUNC_RETURN
-+ bl testfunc // parameter passing test or va_arg code gen test
-+ b LABEL_RET
-+LABEL_TEST_FUNC_RETURN:
-+ adrp x9, testfunc_ptr
-+ add x9, x9, :lo12:testfunc_ptr
-+ ldr x9, [x9, #0]
-+ blr x9 // function return value test
-+LABEL_RET:
-+ ldp x0, x30, [sp]
-+ mov sp, x0
-+ ret
-+
-+.weak testfunc
-+.weak testfunc_ptr
--- a/src/gcc/testsuite/gcc.target/aarch64/aapcs64/func-ret-1.c
+++ b/src/gcc/testsuite/gcc.target/aarch64/aapcs64/func-ret-1.c
@@ -0,0 +1,44 @@
@@ -66198,6 +67934,40 @@
+ ANON(type_promoted, val_promoted, offset, __VA_ARGS__)
+
+#endif /* AARCH64_VARIADIC_MACRO_DEF_GEN_ARGUMENT_LIST */
+--- a/src/gcc/testsuite/gcc.target/aarch64/aapcs64/test_1.c
++++ b/src/gcc/testsuite/gcc.target/aarch64/aapcs64/test_1.c
+@@ -0,0 +1,31 @@
++/* Test AAPCS64 layout */
++
++/* C.7 If the argument is an Integral Type, the size of the argument is
++ less than or equal to 8 bytes and the NGRN is less than 8, the
++ argument is copied to the least significant bits in x[NGRN]. The
++ NGRN is incremented by one. The argument has now been allocated. */
++
++/* { dg-do run { target aarch64*-*-* } } */
++
++#ifndef IN_FRAMEWORK
++#define TESTFILE "test_1.c"
++/* TODO: review if we need this */
++#define RUNTIME_ENDIANNESS_CHECK
++#include "abitest.h"
++#else
++ ARG(int, 4, W0)
++ ARG(double, 4.0, D0)
++ ARG(int, 3, W1)
++ /* TODO: review the way of memcpy char, short, etc. */
++#ifndef __AAPCS64_BIG_ENDIAN__
++ ARG(char, 0xEF, X2)
++ ARG(short, 0xBEEF, X3)
++ ARG(int, 0xDEADBEEF, X4)
++#else
++ /* TODO: need the model/qemu to be big-endian as well */
++ ARG(char, 0xEF, X2+7)
++ ARG(short, 0xBEEF, X3+6)
++ ARG(int, 0xDEADBEEF, X4+4)
++#endif
++ LAST_ARG(long long, 0xDEADBEEFCAFEBABELL, X5)
++#endif
--- a/src/gcc/testsuite/gcc.target/aarch64/aapcs64/test_10.c
+++ b/src/gcc/testsuite/gcc.target/aarch64/aapcs64/test_10.c
@@ -0,0 +1,26 @@
@@ -66560,39 +68330,24 @@
+ PTR_ANON(struct y, v, X1)
+ LAST_ANON(int, 10, W2)
+#endif
---- a/src/gcc/testsuite/gcc.target/aarch64/aapcs64/test_1.c
-+++ b/src/gcc/testsuite/gcc.target/aarch64/aapcs64/test_1.c
-@@ -0,0 +1,31 @@
+--- a/src/gcc/testsuite/gcc.target/aarch64/aapcs64/test_2.c
++++ b/src/gcc/testsuite/gcc.target/aarch64/aapcs64/test_2.c
+@@ -0,0 +1,16 @@
+/* Test AAPCS64 layout */
+
-+/* C.7 If the argument is an Integral Type, the size of the argument is
-+ less than or equal to 8 bytes and the NGRN is less than 8, the
-+ argument is copied to the least significant bits in x[NGRN]. The
-+ NGRN is incremented by one. The argument has now been allocated. */
-+
+/* { dg-do run { target aarch64*-*-* } } */
+
+#ifndef IN_FRAMEWORK
-+#define TESTFILE "test_1.c"
-+/* TODO: review if we need this */
-+#define RUNTIME_ENDIANNESS_CHECK
++#define VFP
++#define TESTFILE "test_2.c"
+#include "abitest.h"
++
+#else
-+ ARG(int, 4, W0)
-+ ARG(double, 4.0, D0)
-+ ARG(int, 3, W1)
-+ /* TODO: review the way of memcpy char, short, etc. */
-+#ifndef __AAPCS64_BIG_ENDIAN__
-+ ARG(char, 0xEF, X2)
-+ ARG(short, 0xBEEF, X3)
-+ ARG(int, 0xDEADBEEF, X4)
-+#else
-+ /* TODO: need the model/qemu to be big-endian as well */
-+ ARG(char, 0xEF, X2+7)
-+ ARG(short, 0xBEEF, X3+6)
-+ ARG(int, 0xDEADBEEF, X4+4)
-+#endif
-+ LAST_ARG(long long, 0xDEADBEEFCAFEBABELL, X5)
++ ARG(float, 1.0f, S0)
++ ARG(double, 4.0, D1)
++ ARG(float, 2.0f, S2)
++ ARG(double, 5.0, D3)
++ LAST_ARG(int, 3, W0)
+#endif
--- a/src/gcc/testsuite/gcc.target/aarch64/aapcs64/test_20.c
+++ b/src/gcc/testsuite/gcc.target/aarch64/aapcs64/test_20.c
@@ -66856,25 +68611,6 @@
+ARG (struct x1, s1, D1)
+LAST_ARG_NONFLAT (int, 89012, X5, i32in64)
+#endif
---- a/src/gcc/testsuite/gcc.target/aarch64/aapcs64/test_2.c
-+++ b/src/gcc/testsuite/gcc.target/aarch64/aapcs64/test_2.c
-@@ -0,0 +1,16 @@
-+/* Test AAPCS64 layout */
-+
-+/* { dg-do run { target aarch64*-*-* } } */
-+
-+#ifndef IN_FRAMEWORK
-+#define VFP
-+#define TESTFILE "test_2.c"
-+#include "abitest.h"
-+
-+#else
-+ ARG(float, 1.0f, S0)
-+ ARG(double, 4.0, D1)
-+ ARG(float, 2.0f, S2)
-+ ARG(double, 5.0, D3)
-+ LAST_ARG(int, 3, W0)
-+#endif
--- a/src/gcc/testsuite/gcc.target/aarch64/aapcs64/test_3.c
+++ b/src/gcc/testsuite/gcc.target/aarch64/aapcs64/test_3.c
@@ -0,0 +1,18 @@
@@ -67568,6 +69304,59 @@
+ double a;
+ float b;
+};
+--- a/src/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-1.c
++++ b/src/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-1.c
+@@ -0,0 +1,50 @@
++/* Test AAPCS64 layout and __builtin_va_arg.
++
++ This test covers fundamental data types as specified in AAPCS64 \S 4.1.
++ It is focus on unnamed parameter passed in registers. */
++
++/* { dg-do run { target aarch64*-*-* } } */
++
++#ifndef IN_FRAMEWORK
++#define AAPCS64_TEST_STDARG
++#define TESTFILE "va_arg-1.c"
++#include "type-def.h"
++
++vf2_t vf2 = (vf2_t){ 17.f, 18.f };
++vi4_t vi4 = (vi4_t){ 0xdeadbabe, 0xbabecafe, 0xcafebeef, 0xbeefdead };
++union int128_t qword;
++signed char sc = 0xed;
++signed int sc_promoted = 0xffffffed;
++signed short ss = 0xcba9;
++signed int ss_promoted = 0xffffcba9;
++float fp = 65432.12345f;
++double fp_promoted = (double)65432.12345f;
++
++#define HAS_DATA_INIT_FUNC
++void init_data ()
++{
++ /* Init signed quad-word integer. */
++ qword.l64 = 0xfdb9753102468aceLL;
++ qword.h64 = 0xeca8642013579bdfLL;
++}
++
++#include "abitest.h"
++#else
++ ARG ( int , 0xff , X0, LAST_NAMED_ARG_ID)
++ DOTS
++ ANON_PROMOTED(unsigned char , 0xfe , unsigned int, 0xfe , X1, 1)
++ ANON_PROMOTED( signed char , sc , signed int, sc_promoted, X2, 2)
++ ANON_PROMOTED(unsigned short , 0xdcba, unsigned int, 0xdcba , X3, 3)
++ ANON_PROMOTED( signed short , ss , signed int, ss_promoted, X4, 4)
++ ANON (unsigned int , 0xdeadbeef, X5, 5)
++ ANON ( signed int , 0xcafebabe, X6, 6)
++ ANON (unsigned long long, 0xba98765432101234ULL, X7, 7)
++ ANON ( signed long long, 0xa987654321012345LL , STACK, 8)
++ ANON ( __int128, qword.i , STACK+16, 9)
++ ANON_PROMOTED( float , fp , double, fp_promoted, D0, 10)
++ ANON ( double , 9876543.212345, D1, 11)
++ ANON ( long double , 98765432123456789.987654321L, Q2, 12)
++ ANON ( vf2_t, vf2 , D3, 13)
++ ANON ( vi4_t, vi4 , Q4, 14)
++ LAST_ANON ( int , 0xeeee, STACK+32,15)
++#endif
--- a/src/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-10.c
+++ b/src/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-10.c
@@ -0,0 +1,29 @@
@@ -67698,59 +69487,6 @@
+ LAST_ANON(double, 123.45, D0, 10)
+
+#endif
---- a/src/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-1.c
-+++ b/src/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-1.c
-@@ -0,0 +1,50 @@
-+/* Test AAPCS64 layout and __builtin_va_arg.
-+
-+ This test covers fundamental data types as specified in AAPCS64 \S 4.1.
-+ It is focus on unnamed parameter passed in registers. */
-+
-+/* { dg-do run { target aarch64*-*-* } } */
-+
-+#ifndef IN_FRAMEWORK
-+#define AAPCS64_TEST_STDARG
-+#define TESTFILE "va_arg-1.c"
-+#include "type-def.h"
-+
-+vf2_t vf2 = (vf2_t){ 17.f, 18.f };
-+vi4_t vi4 = (vi4_t){ 0xdeadbabe, 0xbabecafe, 0xcafebeef, 0xbeefdead };
-+union int128_t qword;
-+signed char sc = 0xed;
-+signed int sc_promoted = 0xffffffed;
-+signed short ss = 0xcba9;
-+signed int ss_promoted = 0xffffcba9;
-+float fp = 65432.12345f;
-+double fp_promoted = (double)65432.12345f;
-+
-+#define HAS_DATA_INIT_FUNC
-+void init_data ()
-+{
-+ /* Init signed quad-word integer. */
-+ qword.l64 = 0xfdb9753102468aceLL;
-+ qword.h64 = 0xeca8642013579bdfLL;
-+}
-+
-+#include "abitest.h"
-+#else
-+ ARG ( int , 0xff , X0, LAST_NAMED_ARG_ID)
-+ DOTS
-+ ANON_PROMOTED(unsigned char , 0xfe , unsigned int, 0xfe , X1, 1)
-+ ANON_PROMOTED( signed char , sc , signed int, sc_promoted, X2, 2)
-+ ANON_PROMOTED(unsigned short , 0xdcba, unsigned int, 0xdcba , X3, 3)
-+ ANON_PROMOTED( signed short , ss , signed int, ss_promoted, X4, 4)
-+ ANON (unsigned int , 0xdeadbeef, X5, 5)
-+ ANON ( signed int , 0xcafebabe, X6, 6)
-+ ANON (unsigned long long, 0xba98765432101234ULL, X7, 7)
-+ ANON ( signed long long, 0xa987654321012345LL , STACK, 8)
-+ ANON ( __int128, qword.i , STACK+16, 9)
-+ ANON_PROMOTED( float , fp , double, fp_promoted, D0, 10)
-+ ANON ( double , 9876543.212345, D1, 11)
-+ ANON ( long double , 98765432123456789.987654321L, Q2, 12)
-+ ANON ( vf2_t, vf2 , D3, 13)
-+ ANON ( vi4_t, vi4 , Q4, 14)
-+ LAST_ANON ( int , 0xeeee, STACK+32,15)
-+#endif
--- a/src/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-2.c
+++ b/src/gcc/testsuite/gcc.target/aarch64/aapcs64/va_arg-2.c
@@ -0,0 +1,59 @@
@@ -69903,6 +71639,64 @@
+ /* { dg-final { scan-assembler "add\tw\[0-9\]+,.*sxth\n" } } */
+ return a + (int)i;
+}
+--- a/src/gcc/testsuite/gcc.target/aarch64/fcvt.x
++++ b/src/gcc/testsuite/gcc.target/aarch64/fcvt.x
+@@ -0,0 +1,55 @@
++extern GPF SUFFIX(trunc) (GPF);
++extern GPF SUFFIX(ceil) (GPF);
++extern GPF SUFFIX(floor) (GPF);
++extern GPF SUFFIX(round) (GPF);
++
++GPI test1a (GPF x) {
++ return SUFFIX(__builtin_trunc)(x);
++}
++
++GPI test1b (GPF x)
++{
++ return SUFFIX(trunc)(x);
++}
++
++GPI test2a (GPF x)
++{
++ return SUFFIX(__builtin_lceil)(x);
++}
++
++GPI test2b (GPF x)
++{
++ return SUFFIX(ceil)(x);
++}
++
++GPI test2c (GPF x)
++{
++ return SUFFIX(__builtin_ceil)(x);
++}
++
++GPI test3a (GPF x)
++{
++ return SUFFIX(__builtin_lfloor)(x);
++}
++
++GPI test3b (GPF x)
++{
++ return SUFFIX(floor)(x);
++}
++
++GPI test3c (GPF x)
++{
++ return SUFFIX(__builtin_floor)(x);
++}
++
++GPI test4a (GPF x)
++{
++ return SUFFIX(__builtin_round)(x);
++}
++
++GPI test4b (GPF x)
++{
++ return SUFFIX(round)(x);
++}
++
++
--- a/src/gcc/testsuite/gcc.target/aarch64/fcvt_double_int.c
+++ b/src/gcc/testsuite/gcc.target/aarch64/fcvt_double_int.c
@@ -0,0 +1,15 @@
@@ -70043,64 +71837,6 @@
+/* { dg-final { scan-assembler-times "fcvtms\tx\[0-9\]+, *s\[0-9\]" 1 } } */
+/* { dg-final { scan-assembler-times "fcvtmu\tx\[0-9\]+, *s\[0-9\]" 2 } } */
+/* { dg-final { scan-assembler-times "fcvtau\tx\[0-9\]+, *s\[0-9\]" 2 } } */
---- a/src/gcc/testsuite/gcc.target/aarch64/fcvt.x
-+++ b/src/gcc/testsuite/gcc.target/aarch64/fcvt.x
-@@ -0,0 +1,55 @@
-+extern GPF SUFFIX(trunc) (GPF);
-+extern GPF SUFFIX(ceil) (GPF);
-+extern GPF SUFFIX(floor) (GPF);
-+extern GPF SUFFIX(round) (GPF);
-+
-+GPI test1a (GPF x) {
-+ return SUFFIX(__builtin_trunc)(x);
-+}
-+
-+GPI test1b (GPF x)
-+{
-+ return SUFFIX(trunc)(x);
-+}
-+
-+GPI test2a (GPF x)
-+{
-+ return SUFFIX(__builtin_lceil)(x);
-+}
-+
-+GPI test2b (GPF x)
-+{
-+ return SUFFIX(ceil)(x);
-+}
-+
-+GPI test2c (GPF x)
-+{
-+ return SUFFIX(__builtin_ceil)(x);
-+}
-+
-+GPI test3a (GPF x)
-+{
-+ return SUFFIX(__builtin_lfloor)(x);
-+}
-+
-+GPI test3b (GPF x)
-+{
-+ return SUFFIX(floor)(x);
-+}
-+
-+GPI test3c (GPF x)
-+{
-+ return SUFFIX(__builtin_floor)(x);
-+}
-+
-+GPI test4a (GPF x)
-+{
-+ return SUFFIX(__builtin_round)(x);
-+}
-+
-+GPI test4b (GPF x)
-+{
-+ return SUFFIX(round)(x);
-+}
-+
-+
--- a/src/gcc/testsuite/gcc.target/aarch64/ffs.c
+++ b/src/gcc/testsuite/gcc.target/aarch64/ffs.c
@@ -0,0 +1,12 @@
@@ -70174,8 +71910,8 @@
+/* { dg-final { scan-assembler-times "fnmadd\td\[0-9\]" 1 } } */
+/* { dg-final { scan-assembler-times "fnmadd\ts\[0-9\]" 1 } } */
+
---- a/src/gcc/testsuite/gcc.target/aarch64/fmovd.c
-+++ b/src/gcc/testsuite/gcc.target/aarch64/fmovd.c
+--- a/src/gcc/testsuite/gcc.target/aarch64/fmovd-zero.c
++++ b/src/gcc/testsuite/gcc.target/aarch64/fmovd-zero.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
@@ -70183,12 +71919,12 @@
+void
+foo (double *output)
+{
-+ *output = 4.25;
++ *output = 0.0;
+}
+
-+/* { dg-final { scan-assembler "fmov\\td\[0-9\]+, 4\\.25" } } */
---- a/src/gcc/testsuite/gcc.target/aarch64/fmovd-zero.c
-+++ b/src/gcc/testsuite/gcc.target/aarch64/fmovd-zero.c
++/* { dg-final { scan-assembler "fmov\\td\[0-9\]+, xzr" } } */
+--- a/src/gcc/testsuite/gcc.target/aarch64/fmovd.c
++++ b/src/gcc/testsuite/gcc.target/aarch64/fmovd.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
@@ -70196,12 +71932,12 @@
+void
+foo (double *output)
+{
-+ *output = 0.0;
++ *output = 4.25;
+}
+
-+/* { dg-final { scan-assembler "fmov\\td\[0-9\]+, xzr" } } */
---- a/src/gcc/testsuite/gcc.target/aarch64/fmovf.c
-+++ b/src/gcc/testsuite/gcc.target/aarch64/fmovf.c
++/* { dg-final { scan-assembler "fmov\\td\[0-9\]+, 4\\.25" } } */
+--- a/src/gcc/testsuite/gcc.target/aarch64/fmovf-zero.c
++++ b/src/gcc/testsuite/gcc.target/aarch64/fmovf-zero.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
@@ -70209,12 +71945,12 @@
+void
+foo (float *output)
+{
-+ *output = 4.25;
++ *output = 0.0;
+}
+
-+/* { dg-final { scan-assembler "fmov\\ts\[0-9\]+, 4\\.25" } } */
---- a/src/gcc/testsuite/gcc.target/aarch64/fmovf-zero.c
-+++ b/src/gcc/testsuite/gcc.target/aarch64/fmovf-zero.c
++/* { dg-final { scan-assembler "fmov\\ts\[0-9\]+, wzr" } } */
+--- a/src/gcc/testsuite/gcc.target/aarch64/fmovf.c
++++ b/src/gcc/testsuite/gcc.target/aarch64/fmovf.c
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
@@ -70222,10 +71958,10 @@
+void
+foo (float *output)
+{
-+ *output = 0.0;
++ *output = 4.25;
+}
+
-+/* { dg-final { scan-assembler "fmov\\ts\[0-9\]+, wzr" } } */
++/* { dg-final { scan-assembler "fmov\\ts\[0-9\]+, 4\\.25" } } */
--- a/src/gcc/testsuite/gcc.target/aarch64/fnmadd-fastmath.c
+++ b/src/gcc/testsuite/gcc.target/aarch64/fnmadd-fastmath.c
@@ -0,0 +1,19 @@
@@ -70248,40 +71984,6 @@
+/* { dg-final { scan-assembler-times "fnmadd\td\[0-9\]" 1 } } */
+/* { dg-final { scan-assembler-times "fnmadd\ts\[0-9\]" 1 } } */
+
---- a/src/gcc/testsuite/gcc.target/aarch64/frint_double.c
-+++ b/src/gcc/testsuite/gcc.target/aarch64/frint_double.c
-@@ -0,0 +1,14 @@
-+/* { dg-do compile } */
-+/* { dg-options "-O2" } */
-+
-+#define GPF double
-+#define SUFFIX(x) x
-+
-+#include "frint.x"
-+
-+/* { dg-final { scan-assembler-times "frintz\td\[0-9\]" 2 } } */
-+/* { dg-final { scan-assembler-times "frintp\td\[0-9\]" 2 } } */
-+/* { dg-final { scan-assembler-times "frintm\td\[0-9\]" 2 } } */
-+/* { dg-final { scan-assembler-times "frinti\td\[0-9\]" 2 } } */
-+/* { dg-final { scan-assembler-times "frintx\td\[0-9\]" 2 } } */
-+/* { dg-final { scan-assembler-times "frinta\td\[0-9\]" 2 } } */
---- a/src/gcc/testsuite/gcc.target/aarch64/frint_float.c
-+++ b/src/gcc/testsuite/gcc.target/aarch64/frint_float.c
-@@ -0,0 +1,14 @@
-+/* { dg-do compile } */
-+/* { dg-options "-O2" } */
-+
-+#define GPF float
-+#define SUFFIX(x) x##f
-+
-+#include "frint.x"
-+
-+/* { dg-final { scan-assembler-times "frintz\ts\[0-9\]" 2 } } */
-+/* { dg-final { scan-assembler-times "frintp\ts\[0-9\]" 2 } } */
-+/* { dg-final { scan-assembler-times "frintm\ts\[0-9\]" 2 } } */
-+/* { dg-final { scan-assembler-times "frinti\ts\[0-9\]" 2 } } */
-+/* { dg-final { scan-assembler-times "frintx\ts\[0-9\]" 2 } } */
-+/* { dg-final { scan-assembler-times "frinta\ts\[0-9\]" 2 } } */
--- a/src/gcc/testsuite/gcc.target/aarch64/frint.x
+++ b/src/gcc/testsuite/gcc.target/aarch64/frint.x
@@ -0,0 +1,66 @@
@@ -70351,6 +72053,40 @@
+{
+ return SUFFIX(round)(x);
+}
+--- a/src/gcc/testsuite/gcc.target/aarch64/frint_double.c
++++ b/src/gcc/testsuite/gcc.target/aarch64/frint_double.c
+@@ -0,0 +1,14 @@
++/* { dg-do compile } */
++/* { dg-options "-O2" } */
++
++#define GPF double
++#define SUFFIX(x) x
++
++#include "frint.x"
++
++/* { dg-final { scan-assembler-times "frintz\td\[0-9\]" 2 } } */
++/* { dg-final { scan-assembler-times "frintp\td\[0-9\]" 2 } } */
++/* { dg-final { scan-assembler-times "frintm\td\[0-9\]" 2 } } */
++/* { dg-final { scan-assembler-times "frinti\td\[0-9\]" 2 } } */
++/* { dg-final { scan-assembler-times "frintx\td\[0-9\]" 2 } } */
++/* { dg-final { scan-assembler-times "frinta\td\[0-9\]" 2 } } */
+--- a/src/gcc/testsuite/gcc.target/aarch64/frint_float.c
++++ b/src/gcc/testsuite/gcc.target/aarch64/frint_float.c
+@@ -0,0 +1,14 @@
++/* { dg-do compile } */
++/* { dg-options "-O2" } */
++
++#define GPF float
++#define SUFFIX(x) x##f
++
++#include "frint.x"
++
++/* { dg-final { scan-assembler-times "frintz\ts\[0-9\]" 2 } } */
++/* { dg-final { scan-assembler-times "frintp\ts\[0-9\]" 2 } } */
++/* { dg-final { scan-assembler-times "frintm\ts\[0-9\]" 2 } } */
++/* { dg-final { scan-assembler-times "frinti\ts\[0-9\]" 2 } } */
++/* { dg-final { scan-assembler-times "frintx\ts\[0-9\]" 2 } } */
++/* { dg-final { scan-assembler-times "frinta\ts\[0-9\]" 2 } } */
--- a/src/gcc/testsuite/gcc.target/aarch64/index.c
+++ b/src/gcc/testsuite/gcc.target/aarch64/index.c
@@ -0,0 +1,111 @@
@@ -72760,6 +74496,21 @@
+jumpto:
+ if (!(x1 & 0x08)) goto jumpto;
+}
+--- a/src/gcc/testsuite/gcc.target/aarch64/vect-abs-compile.c
++++ b/src/gcc/testsuite/gcc.target/aarch64/vect-abs-compile.c
+@@ -0,0 +1,12 @@
++
++/* { dg-do compile } */
++/* { dg-options "-O3" } */
++
++#define N 16
++
++#include "vect-abs.x"
++
++/* { dg-final { scan-assembler "abs\\tv\[0-9\]+\.16b" } } */
++/* { dg-final { scan-assembler "abs\\tv\[0-9\]+\.8h" } } */
++/* { dg-final { scan-assembler "abs\\tv\[0-9\]+\.4s" } } */
++/* { dg-final { scan-assembler "abs\\tv\[0-9\]+\.2d" } } */
--- a/src/gcc/testsuite/gcc.target/aarch64/vect-abs.c
+++ b/src/gcc/testsuite/gcc.target/aarch64/vect-abs.c
@@ -0,0 +1,131 @@
@@ -72894,21 +74645,6 @@
+
+ return 0;
+}
---- a/src/gcc/testsuite/gcc.target/aarch64/vect-abs-compile.c
-+++ b/src/gcc/testsuite/gcc.target/aarch64/vect-abs-compile.c
-@@ -0,0 +1,12 @@
-+
-+/* { dg-do compile } */
-+/* { dg-options "-O3" } */
-+
-+#define N 16
-+
-+#include "vect-abs.x"
-+
-+/* { dg-final { scan-assembler "abs\\tv\[0-9\]+\.16b" } } */
-+/* { dg-final { scan-assembler "abs\\tv\[0-9\]+\.8h" } } */
-+/* { dg-final { scan-assembler "abs\\tv\[0-9\]+\.4s" } } */
-+/* { dg-final { scan-assembler "abs\\tv\[0-9\]+\.2d" } } */
--- a/src/gcc/testsuite/gcc.target/aarch64/vect-abs.x
+++ b/src/gcc/testsuite/gcc.target/aarch64/vect-abs.x
@@ -0,0 +1,36 @@
@@ -72948,102 +74684,6 @@
+ for (i=0; i<N; i++)
+ a[i] = labs (b[i]);
+}
---- a/src/gcc/testsuite/gcc.target/aarch64/vect.c
-+++ b/src/gcc/testsuite/gcc.target/aarch64/vect.c
-@@ -0,0 +1,93 @@
-+
-+/* { dg-do run } */
-+/* { dg-options "-O3" } */
-+
-+#include "vect.x"
-+
-+extern void abort (void);
-+
-+void set_vector (int *a, int n)
-+{
-+ int i;
-+ for (i=0; i<16; i++)
-+ a[i] = n;
-+}
-+
-+void check_vector (pRINT c, pRINT result, char *str)
-+{
-+ int i;
-+ for (i=0; i<16 ; i++)
-+ if (c[i] != result[i])
-+ abort ();
-+}
-+
-+#define TEST(func, sign) set_vector (sign##c, 0); \
-+ func (sign##a, sign##b, sign##c); \
-+ check_vector (sign##c, func##_vector, #func);
-+
-+
-+#define TESTV(func, sign) \
-+ if (func (sign##a) != func##_value) \
-+ abort ();
-+
-+#define TESTVLL(func, sign) \
-+ if (func (ll##sign##a) != func##_value) \
-+ abort ();
-+
-+int main (void)
-+{
-+ int sa[16];
-+ int sb[16];
-+ int sc[16];
-+ unsigned int ua[16];
-+ unsigned int ub[16];
-+ unsigned int uc[16];
-+ long long llsa[16];
-+ unsigned long long llua[16];
-+ int i;
-+
-+ /* Table of standard values to compare against. */
-+ unsigned int test_bic_vector[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
-+ unsigned int test_orn_vector[] = {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1};
-+ int mla_vector[] = {0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121, 144, 169, 196, 225};
-+ int mls_vector[] = {0, -1, -4, -9, -16, -25, -36, -49, -64, -81, -100, -121, -144, -169, -196, -225};
-+ int smax_vector[] = {0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15};
-+ int smin_vector[] = {0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15};
-+ unsigned int umax_vector[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
-+ unsigned int umin_vector[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
-+ int reduce_smax_value = 0;
-+ int reduce_smin_value = -15;
-+ unsigned int reduce_umax_value = 15;
-+ unsigned int reduce_umin_value = 0;
-+ unsigned int reduce_add_u32_value = 120;
-+ int reduce_add_s32_value = -120;
-+ long long reduce_add_s64_value = -120;
-+ unsigned long long reduce_add_u64_value = 120;
-+
-+ /* Set up input vectors. */
-+ for (i=0; i < 16; i++)
-+ {
-+ sa[i] = sb[i] = -i;
-+ llsa[i] = (long long)-i;
-+ ua[i] = ub[i] = i;
-+ llua[i] = (unsigned long long)i;
-+ }
-+
-+ TEST (test_bic, s);
-+ TEST (test_orn, s);
-+ TEST (mla, s);
-+ TEST (mls, s);
-+ TEST (smax, s);
-+ TEST (smin, s);
-+ TEST (umax, u);
-+ TEST (umin, u);
-+ TESTV (reduce_smax, s);
-+ TESTV (reduce_smin, s);
-+ TESTV (reduce_umax, u);
-+ TESTV (reduce_umin, u);
-+ TESTV (reduce_add_u32, u);
-+ TESTV (reduce_add_s32, s);
-+ TESTVLL (reduce_add_u64, u);
-+ TESTVLL (reduce_add_s64, s);
-+ return 0;
-+}
--- a/src/gcc/testsuite/gcc.target/aarch64/vect-compile.c
+++ b/src/gcc/testsuite/gcc.target/aarch64/vect-compile.c
@@ -0,0 +1,20 @@
@@ -73067,6 +74707,16 @@
+/* { dg-final { scan-assembler "sminv" } } */
+/* { dg-final { scan-assembler-times "addv" 2} } */
+/* { dg-final { scan-assembler-times "addp" 2} } */
+--- a/src/gcc/testsuite/gcc.target/aarch64/vect-faddv-compile.c
++++ b/src/gcc/testsuite/gcc.target/aarch64/vect-faddv-compile.c
+@@ -0,0 +1,7 @@
++
++/* { dg-do compile } */
++/* { dg-options "-O3 -ffast-math" } */
++
++#include "vect-faddv.x"
++
++/* { dg-final { scan-assembler-times "faddp\\tv" 2} } */
--- a/src/gcc/testsuite/gcc.target/aarch64/vect-faddv.c
+++ b/src/gcc/testsuite/gcc.target/aarch64/vect-faddv.c
@@ -0,0 +1,31 @@
@@ -73101,16 +74751,6 @@
+
+ return 0;
+}
---- a/src/gcc/testsuite/gcc.target/aarch64/vect-faddv-compile.c
-+++ b/src/gcc/testsuite/gcc.target/aarch64/vect-faddv-compile.c
-@@ -0,0 +1,7 @@
-+
-+/* { dg-do compile } */
-+/* { dg-options "-O3 -ffast-math" } */
-+
-+#include "vect-faddv.x"
-+
-+/* { dg-final { scan-assembler-times "faddp\\tv" 2} } */
--- a/src/gcc/testsuite/gcc.target/aarch64/vect-faddv.x
+++ b/src/gcc/testsuite/gcc.target/aarch64/vect-faddv.x
@@ -0,0 +1,23 @@
@@ -73316,6 +74956,16 @@
+ return 0;
+}
+
+--- a/src/gcc/testsuite/gcc.target/aarch64/vect-fmax-fmin-compile.c
++++ b/src/gcc/testsuite/gcc.target/aarch64/vect-fmax-fmin-compile.c
+@@ -0,0 +1,7 @@
++/* { dg-do compile } */
++/* { dg-options "-O3 -ffast-math" } */
++
++#include "vect-fmax-fmin.x"
++
++/* { dg-final { scan-assembler "fmaxnm\\tv" } } */
++/* { dg-final { scan-assembler "fminnm\\tv" } } */
--- a/src/gcc/testsuite/gcc.target/aarch64/vect-fmax-fmin.c
+++ b/src/gcc/testsuite/gcc.target/aarch64/vect-fmax-fmin.c
@@ -0,0 +1,105 @@
@@ -73424,16 +75074,6 @@
+
+ return 0;
+}
---- a/src/gcc/testsuite/gcc.target/aarch64/vect-fmax-fmin-compile.c
-+++ b/src/gcc/testsuite/gcc.target/aarch64/vect-fmax-fmin-compile.c
-@@ -0,0 +1,7 @@
-+/* { dg-do compile } */
-+/* { dg-options "-O3 -ffast-math" } */
-+
-+#include "vect-fmax-fmin.x"
-+
-+/* { dg-final { scan-assembler "fmaxnm\\tv" } } */
-+/* { dg-final { scan-assembler "fminnm\\tv" } } */
--- a/src/gcc/testsuite/gcc.target/aarch64/vect-fmax-fmin.x
+++ b/src/gcc/testsuite/gcc.target/aarch64/vect-fmax-fmin.x
@@ -0,0 +1,32 @@
@@ -73528,8 +75168,8 @@
+
+ return s;
+}
---- a/src/gcc/testsuite/gcc.target/aarch64/vect-fmovd.c
-+++ b/src/gcc/testsuite/gcc.target/aarch64/vect-fmovd.c
+--- a/src/gcc/testsuite/gcc.target/aarch64/vect-fmovd-zero.c
++++ b/src/gcc/testsuite/gcc.target/aarch64/vect-fmovd-zero.c
@@ -0,0 +1,17 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -ftree-vectorize -fdump-tree-vect-all" } */
@@ -73542,14 +75182,14 @@
+ int i = 0;
+ /* Vectorizable. */
+ for (i = 0; i < N; i++)
-+ output[i] = 4.25;
++ output[i] = 0.0;
+}
+
++/* { dg-final { scan-assembler "movi\\tv\[0-9\]+\\.2d, 0" } } */
+/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" } } */
-+/* { dg-final { scan-assembler "fmov\\tv\[0-9\]+\\.2d, 4\\.25" } } */
+/* { dg-final { cleanup-tree-dump "vect" } } */
---- a/src/gcc/testsuite/gcc.target/aarch64/vect-fmovd-zero.c
-+++ b/src/gcc/testsuite/gcc.target/aarch64/vect-fmovd-zero.c
+--- a/src/gcc/testsuite/gcc.target/aarch64/vect-fmovd.c
++++ b/src/gcc/testsuite/gcc.target/aarch64/vect-fmovd.c
@@ -0,0 +1,17 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -ftree-vectorize -fdump-tree-vect-all" } */
@@ -73562,14 +75202,14 @@
+ int i = 0;
+ /* Vectorizable. */
+ for (i = 0; i < N; i++)
-+ output[i] = 0.0;
++ output[i] = 4.25;
+}
+
-+/* { dg-final { scan-assembler "movi\\tv\[0-9\]+\\.2d, 0" } } */
+/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" } } */
++/* { dg-final { scan-assembler "fmov\\tv\[0-9\]+\\.2d, 4\\.25" } } */
+/* { dg-final { cleanup-tree-dump "vect" } } */
---- a/src/gcc/testsuite/gcc.target/aarch64/vect-fmovf.c
-+++ b/src/gcc/testsuite/gcc.target/aarch64/vect-fmovf.c
+--- a/src/gcc/testsuite/gcc.target/aarch64/vect-fmovf-zero.c
++++ b/src/gcc/testsuite/gcc.target/aarch64/vect-fmovf-zero.c
@@ -0,0 +1,17 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -ftree-vectorize -fdump-tree-vect-all" } */
@@ -73582,14 +75222,14 @@
+ int i = 0;
+ /* Vectorizable. */
+ for (i = 0; i < N; i++)
-+ output[i] = 4.25;
++ output[i] = 0.0;
+}
+
++/* { dg-final { scan-assembler "movi\\tv\[0-9\]+\\.\[24\]s, 0" } } */
+/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" } } */
-+/* { dg-final { scan-assembler "fmov\\tv\[0-9\]+\\.\[24\]s, 4\\.25" } } */
+/* { dg-final { cleanup-tree-dump "vect" } } */
---- a/src/gcc/testsuite/gcc.target/aarch64/vect-fmovf-zero.c
-+++ b/src/gcc/testsuite/gcc.target/aarch64/vect-fmovf-zero.c
+--- a/src/gcc/testsuite/gcc.target/aarch64/vect-fmovf.c
++++ b/src/gcc/testsuite/gcc.target/aarch64/vect-fmovf.c
@@ -0,0 +1,17 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -ftree-vectorize -fdump-tree-vect-all" } */
@@ -73602,12 +75242,28 @@
+ int i = 0;
+ /* Vectorizable. */
+ for (i = 0; i < N; i++)
-+ output[i] = 0.0;
++ output[i] = 4.25;
+}
+
-+/* { dg-final { scan-assembler "movi\\tv\[0-9\]+\\.\[24\]s, 0" } } */
+/* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" } } */
++/* { dg-final { scan-assembler "fmov\\tv\[0-9\]+\\.\[24\]s, 4\\.25" } } */
+/* { dg-final { cleanup-tree-dump "vect" } } */
+--- a/src/gcc/testsuite/gcc.target/aarch64/vect-fp-compile.c
++++ b/src/gcc/testsuite/gcc.target/aarch64/vect-fp-compile.c
+@@ -0,0 +1,13 @@
++
++
++/* { dg-do compile } */
++/* { dg-options "-O3" } */
++
++#include "vect-fp.x"
++
++/* { dg-final { scan-assembler "fadd\\tv" } } */
++/* { dg-final { scan-assembler "fsub\\tv" } } */
++/* { dg-final { scan-assembler "fmul\\tv" } } */
++/* { dg-final { scan-assembler "fdiv\\tv" } } */
++/* { dg-final { scan-assembler "fneg\\tv" } } */
++/* { dg-final { scan-assembler "fabs\\tv" } } */
--- a/src/gcc/testsuite/gcc.target/aarch64/vect-fp.c
+++ b/src/gcc/testsuite/gcc.target/aarch64/vect-fp.c
@@ -0,0 +1,137 @@
@@ -73748,22 +75404,6 @@
+
+ return 0;
+}
---- a/src/gcc/testsuite/gcc.target/aarch64/vect-fp-compile.c
-+++ b/src/gcc/testsuite/gcc.target/aarch64/vect-fp-compile.c
-@@ -0,0 +1,13 @@
-+
-+
-+/* { dg-do compile } */
-+/* { dg-options "-O3" } */
-+
-+#include "vect-fp.x"
-+
-+/* { dg-final { scan-assembler "fadd\\tv" } } */
-+/* { dg-final { scan-assembler "fsub\\tv" } } */
-+/* { dg-final { scan-assembler "fmul\\tv" } } */
-+/* { dg-final { scan-assembler "fdiv\\tv" } } */
-+/* { dg-final { scan-assembler "fneg\\tv" } } */
-+/* { dg-final { scan-assembler "fabs\\tv" } } */
--- a/src/gcc/testsuite/gcc.target/aarch64/vect-fp.x
+++ b/src/gcc/testsuite/gcc.target/aarch64/vect-fp.x
@@ -0,0 +1,44 @@
@@ -73811,74 +75451,22 @@
+DEFN2 (neg, -)
+DEF2 (abs, F32, fabsf)
+DEF2 (abs, F64, fabs)
---- a/src/gcc/testsuite/gcc.target/aarch64/vect-ld1r.c
-+++ b/src/gcc/testsuite/gcc.target/aarch64/vect-ld1r.c
-@@ -0,0 +1,65 @@
-+/* { dg-do run } */
+--- a/src/gcc/testsuite/gcc.target/aarch64/vect-ld1r-compile-fp.c
++++ b/src/gcc/testsuite/gcc.target/aarch64/vect-ld1r-compile-fp.c
+@@ -0,0 +1,13 @@
++/* { dg-do compile } */
+/* { dg-options "-O3" } */
+
-+extern void abort (void);
-+
+#include "stdint.h"
+#include "vect-ld1r.x"
+
-+DEF (int8_t)
-+DEF (int16_t)
-+DEF (int32_t)
-+DEF (int64_t)
-+
-+#define FOOD(TYPE) \
-+ foo_ ## TYPE ## _d (&a_ ## TYPE, output_ ## TYPE)
-+
-+#define FOOQ(TYPE) \
-+ foo_ ## TYPE ## _q (&a_ ## TYPE, output_ ## TYPE)
-+
-+#define CHECKD(TYPE) \
-+ for (i = 0; i < 8 / sizeof (TYPE); i++) \
-+ if (output_ ## TYPE[i] != a_ ## TYPE) \
-+ abort ()
-+
-+#define CHECKQ(TYPE) \
-+ for (i = 0; i < 32 / sizeof (TYPE); i++) \
-+ if (output_ ## TYPE[i] != a_ ## TYPE) \
-+ abort ()
-+
-+#define DECL(TYPE) \
-+ TYPE output_ ## TYPE[32]; \
-+ TYPE a_ ## TYPE = (TYPE)12
-+
-+int
-+main (void)
-+{
-+
-+ DECL(int8_t);
-+ DECL(int16_t);
-+ DECL(int32_t);
-+ DECL(int64_t);
-+ int i;
-+
-+ FOOD (int8_t);
-+ CHECKD (int8_t);
-+ FOOQ (int8_t);
-+ CHECKQ (int8_t);
-+
-+ FOOD (int16_t);
-+ CHECKD (int16_t);
-+ FOOQ (int16_t);
-+ CHECKQ (int16_t);
-+
-+ FOOD (int32_t);
-+ CHECKD (int32_t);
-+ FOOQ (int32_t);
-+ CHECKQ (int32_t);
++DEF (float)
++DEF (double)
+
-+ FOOD (int64_t);
-+ CHECKD (int64_t);
-+ FOOQ (int64_t);
-+ CHECKQ (int64_t);
++/* { dg-final { scan-assembler "ld1r\\t\{v\[0-9\]+\.4s"} } */
++/* { dg-final { scan-assembler "ldr\\t\d\[0-9\]+"} } */
++/* { dg-final { scan-assembler "ld1r\\t\{v\[0-9\]+\.2d"} } */
+
-+ return 0;
-+}
--- a/src/gcc/testsuite/gcc.target/aarch64/vect-ld1r-compile.c
+++ b/src/gcc/testsuite/gcc.target/aarch64/vect-ld1r-compile.c
@@ -0,0 +1,18 @@
@@ -73900,22 +75488,6 @@
+/* { dg-final { scan-assembler "ld1r\\t\{v\[0-9\]+\.4s"} } */
+/* { dg-final { scan-assembler "ldr\\t\x\[0-9\]+"} } */
+/* { dg-final { scan-assembler "ld1r\\t\{v\[0-9\]+\.2d"} } */
---- a/src/gcc/testsuite/gcc.target/aarch64/vect-ld1r-compile-fp.c
-+++ b/src/gcc/testsuite/gcc.target/aarch64/vect-ld1r-compile-fp.c
-@@ -0,0 +1,13 @@
-+/* { dg-do compile } */
-+/* { dg-options "-O3" } */
-+
-+#include "stdint.h"
-+#include "vect-ld1r.x"
-+
-+DEF (float)
-+DEF (double)
-+
-+/* { dg-final { scan-assembler "ld1r\\t\{v\[0-9\]+\.4s"} } */
-+/* { dg-final { scan-assembler "ldr\\t\d\[0-9\]+"} } */
-+/* { dg-final { scan-assembler "ld1r\\t\{v\[0-9\]+\.2d"} } */
-+
--- a/src/gcc/testsuite/gcc.target/aarch64/vect-ld1r-fp.c
+++ b/src/gcc/testsuite/gcc.target/aarch64/vect-ld1r-fp.c
@@ -0,0 +1,51 @@
@@ -73970,6 +75542,74 @@
+
+ return 0;
+}
+--- a/src/gcc/testsuite/gcc.target/aarch64/vect-ld1r.c
++++ b/src/gcc/testsuite/gcc.target/aarch64/vect-ld1r.c
+@@ -0,0 +1,65 @@
++/* { dg-do run } */
++/* { dg-options "-O3" } */
++
++extern void abort (void);
++
++#include "stdint.h"
++#include "vect-ld1r.x"
++
++DEF (int8_t)
++DEF (int16_t)
++DEF (int32_t)
++DEF (int64_t)
++
++#define FOOD(TYPE) \
++ foo_ ## TYPE ## _d (&a_ ## TYPE, output_ ## TYPE)
++
++#define FOOQ(TYPE) \
++ foo_ ## TYPE ## _q (&a_ ## TYPE, output_ ## TYPE)
++
++#define CHECKD(TYPE) \
++ for (i = 0; i < 8 / sizeof (TYPE); i++) \
++ if (output_ ## TYPE[i] != a_ ## TYPE) \
++ abort ()
++
++#define CHECKQ(TYPE) \
++ for (i = 0; i < 32 / sizeof (TYPE); i++) \
++ if (output_ ## TYPE[i] != a_ ## TYPE) \
++ abort ()
++
++#define DECL(TYPE) \
++ TYPE output_ ## TYPE[32]; \
++ TYPE a_ ## TYPE = (TYPE)12
++
++int
++main (void)
++{
++
++ DECL(int8_t);
++ DECL(int16_t);
++ DECL(int32_t);
++ DECL(int64_t);
++ int i;
++
++ FOOD (int8_t);
++ CHECKD (int8_t);
++ FOOQ (int8_t);
++ CHECKQ (int8_t);
++
++ FOOD (int16_t);
++ CHECKD (int16_t);
++ FOOQ (int16_t);
++ CHECKQ (int16_t);
++
++ FOOD (int32_t);
++ CHECKD (int32_t);
++ FOOQ (int32_t);
++ CHECKQ (int32_t);
++
++ FOOD (int64_t);
++ CHECKD (int64_t);
++ FOOQ (int64_t);
++ CHECKQ (int64_t);
++
++ return 0;
++}
--- a/src/gcc/testsuite/gcc.target/aarch64/vect-ld1r.x
+++ b/src/gcc/testsuite/gcc.target/aarch64/vect-ld1r.x
@@ -0,0 +1,15 @@
@@ -73988,6 +75628,33 @@
+ for (i = 0; i < 32 / sizeof (TYPE); i++) \
+ output[i] = *a; \
+ }
+--- a/src/gcc/testsuite/gcc.target/aarch64/vect-mull-compile.c
++++ b/src/gcc/testsuite/gcc.target/aarch64/vect-mull-compile.c
+@@ -0,0 +1,24 @@
++
++/* { dg-do compile } */
++/* { dg-options "-O3" } */
++
++#define N 16
++
++#include "vect-mull.x"
++
++DEF_MULL2 (DEF_MULLB)
++DEF_MULL2 (DEF_MULLH)
++DEF_MULL2 (DEF_MULLS)
++
++/* { dg-final { scan-assembler "smull\\tv\[0-9\]+\.8h"} } */
++/* { dg-final { scan-assembler "smull\\tv\[0-9\]+\.4s"} } */
++/* { dg-final { scan-assembler "smull\\tv\[0-9\]+\.2d"} } */
++/* { dg-final { scan-assembler "umull\\tv\[0-9\]+\.8h"} } */
++/* { dg-final { scan-assembler "umull\\tv\[0-9\]+\.4s"} } */
++/* { dg-final { scan-assembler "umull\\tv\[0-9\]+\.2d"} } */
++/* { dg-final { scan-assembler "smull2\\tv\[0-9\]+\.8h"} } */
++/* { dg-final { scan-assembler "smull2\\tv\[0-9\]+\.4s"} } */
++/* { dg-final { scan-assembler "smull2\\tv\[0-9\]+\.2d"} } */
++/* { dg-final { scan-assembler "umull2\\tv\[0-9\]+\.8h"} } */
++/* { dg-final { scan-assembler "umull2\\tv\[0-9\]+\.4s"} } */
++/* { dg-final { scan-assembler "umull2\\tv\[0-9\]+\.2d"} } */
--- a/src/gcc/testsuite/gcc.target/aarch64/vect-mull.c
+++ b/src/gcc/testsuite/gcc.target/aarch64/vect-mull.c
@@ -0,0 +1,138 @@
@@ -74129,33 +75796,6 @@
+
+ return 0;
+}
---- a/src/gcc/testsuite/gcc.target/aarch64/vect-mull-compile.c
-+++ b/src/gcc/testsuite/gcc.target/aarch64/vect-mull-compile.c
-@@ -0,0 +1,24 @@
-+
-+/* { dg-do compile } */
-+/* { dg-options "-O3" } */
-+
-+#define N 16
-+
-+#include "vect-mull.x"
-+
-+DEF_MULL2 (DEF_MULLB)
-+DEF_MULL2 (DEF_MULLH)
-+DEF_MULL2 (DEF_MULLS)
-+
-+/* { dg-final { scan-assembler "smull\\tv\[0-9\]+\.8h"} } */
-+/* { dg-final { scan-assembler "smull\\tv\[0-9\]+\.4s"} } */
-+/* { dg-final { scan-assembler "smull\\tv\[0-9\]+\.2d"} } */
-+/* { dg-final { scan-assembler "umull\\tv\[0-9\]+\.8h"} } */
-+/* { dg-final { scan-assembler "umull\\tv\[0-9\]+\.4s"} } */
-+/* { dg-final { scan-assembler "umull\\tv\[0-9\]+\.2d"} } */
-+/* { dg-final { scan-assembler "smull2\\tv\[0-9\]+\.8h"} } */
-+/* { dg-final { scan-assembler "smull2\\tv\[0-9\]+\.4s"} } */
-+/* { dg-final { scan-assembler "smull2\\tv\[0-9\]+\.2d"} } */
-+/* { dg-final { scan-assembler "umull2\\tv\[0-9\]+\.8h"} } */
-+/* { dg-final { scan-assembler "umull2\\tv\[0-9\]+\.4s"} } */
-+/* { dg-final { scan-assembler "umull2\\tv\[0-9\]+\.2d"} } */
--- a/src/gcc/testsuite/gcc.target/aarch64/vect-mull.x
+++ b/src/gcc/testsuite/gcc.target/aarch64/vect-mull.x
@@ -0,0 +1,49 @@
@@ -74208,6 +75848,245 @@
+
+#define DEF_MULL2(x) x (S) \
+ x (U)
+--- a/src/gcc/testsuite/gcc.target/aarch64/vect.c
++++ b/src/gcc/testsuite/gcc.target/aarch64/vect.c
+@@ -0,0 +1,93 @@
++
++/* { dg-do run } */
++/* { dg-options "-O3" } */
++
++#include "vect.x"
++
++extern void abort (void);
++
++void set_vector (int *a, int n)
++{
++ int i;
++ for (i=0; i<16; i++)
++ a[i] = n;
++}
++
++void check_vector (pRINT c, pRINT result, char *str)
++{
++ int i;
++ for (i=0; i<16 ; i++)
++ if (c[i] != result[i])
++ abort ();
++}
++
++#define TEST(func, sign) set_vector (sign##c, 0); \
++ func (sign##a, sign##b, sign##c); \
++ check_vector (sign##c, func##_vector, #func);
++
++
++#define TESTV(func, sign) \
++ if (func (sign##a) != func##_value) \
++ abort ();
++
++#define TESTVLL(func, sign) \
++ if (func (ll##sign##a) != func##_value) \
++ abort ();
++
++int main (void)
++{
++ int sa[16];
++ int sb[16];
++ int sc[16];
++ unsigned int ua[16];
++ unsigned int ub[16];
++ unsigned int uc[16];
++ long long llsa[16];
++ unsigned long long llua[16];
++ int i;
++
++ /* Table of standard values to compare against. */
++ unsigned int test_bic_vector[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
++ unsigned int test_orn_vector[] = {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1};
++ int mla_vector[] = {0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121, 144, 169, 196, 225};
++ int mls_vector[] = {0, -1, -4, -9, -16, -25, -36, -49, -64, -81, -100, -121, -144, -169, -196, -225};
++ int smax_vector[] = {0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15};
++ int smin_vector[] = {0, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15};
++ unsigned int umax_vector[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
++ unsigned int umin_vector[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
++ int reduce_smax_value = 0;
++ int reduce_smin_value = -15;
++ unsigned int reduce_umax_value = 15;
++ unsigned int reduce_umin_value = 0;
++ unsigned int reduce_add_u32_value = 120;
++ int reduce_add_s32_value = -120;
++ long long reduce_add_s64_value = -120;
++ unsigned long long reduce_add_u64_value = 120;
++
++ /* Set up input vectors. */
++ for (i=0; i < 16; i++)
++ {
++ sa[i] = sb[i] = -i;
++ llsa[i] = (long long)-i;
++ ua[i] = ub[i] = i;
++ llua[i] = (unsigned long long)i;
++ }
++
++ TEST (test_bic, s);
++ TEST (test_orn, s);
++ TEST (mla, s);
++ TEST (mls, s);
++ TEST (smax, s);
++ TEST (smin, s);
++ TEST (umax, u);
++ TEST (umin, u);
++ TESTV (reduce_smax, s);
++ TESTV (reduce_smin, s);
++ TESTV (reduce_umax, u);
++ TESTV (reduce_umin, u);
++ TESTV (reduce_add_u32, u);
++ TESTV (reduce_add_s32, s);
++ TESTVLL (reduce_add_u64, u);
++ TESTVLL (reduce_add_s64, s);
++ return 0;
++}
+--- a/src/gcc/testsuite/gcc.target/aarch64/vect.x
++++ b/src/gcc/testsuite/gcc.target/aarch64/vect.x
+@@ -0,0 +1,140 @@
++typedef int *__restrict__ pRINT;
++typedef unsigned int *__restrict__ pRUINT;
++typedef long long *__restrict__ pRINT64;
++typedef unsigned long long *__restrict__ pRUINT64;
++
++void test_orn (pRUINT a, pRUINT b, pRUINT c)
++{
++ int i;
++ for (i = 0; i < 16; i++)
++ c[i] = a[i] | (~b[i]);
++}
++
++void test_bic (pRUINT a, pRUINT b, pRUINT c)
++{
++ int i;
++ for (i = 0; i < 16; i++)
++ c[i] = a[i] & (~b[i]);
++}
++
++void mla (pRINT a, pRINT b, pRINT c)
++{
++ int i;
++ for (i=0;i<16;i++)
++ c[i] += a[i] * b[i];
++}
++
++void mls (pRINT a, pRINT b, pRINT c)
++{
++ int i;
++ for (i=0;i<16;i++)
++ c[i] -= a[i] * b[i];
++}
++
++void smax (pRINT a, pRINT b, pRINT c)
++{
++ int i;
++ for (i=0;i<16;i++)
++ c[i] = (a[i] > b[i] ? a[i] : b[i]);
++}
++
++void smin (pRINT a, pRINT b, pRINT c)
++{
++ int i;
++ for (i=0;i<16;i++)
++ c[i] = (a[i] < b[i] ? a[i] : b[i]);
++}
++
++void umax (pRUINT a, pRUINT b, pRUINT c)
++{
++ int i;
++ for (i=0;i<16;i++)
++ c[i] = (a[i] > b[i] ? a[i] : b[i]);
++}
++
++void umin (pRUINT a, pRUINT b, pRUINT c)
++{
++ int i;
++ for (i=0;i<16;i++)
++ c[i] = (a[i] < b[i] ? a[i] : b[i]);
++}
++
++unsigned int reduce_umax (pRUINT a)
++{
++ int i;
++ unsigned int s = a[0];
++ for (i = 1; i < 16; i++)
++ s = (s > a[i] ? s : a[i]);
++
++ return s;
++}
++
++unsigned int reduce_umin (pRUINT a)
++{
++ int i;
++ unsigned int s = a[0];
++ for (i = 1; i < 16; i++)
++ s = (s < a[i] ? s : a[i]);
++
++ return s;
++}
++
++int reduce_smax (pRINT a)
++{
++ int i;
++ int s = a[0];
++ for (i = 1; i < 16; i++)
++ s = (s > a[i] ? s : a[i]);
++
++ return s;
++}
++
++int reduce_smin (pRINT a)
++{
++ int i;
++ int s = a[0];
++ for (i = 1; i < 16; i++)
++ s = (s < a[i] ? s : a[i]);
++
++ return s;
++}
++
++unsigned int reduce_add_u32 (pRINT a)
++{
++ int i;
++ unsigned int s = 0;
++ for (i = 0; i < 16; i++)
++ s += a[i];
++
++ return s;
++}
++
++int reduce_add_s32 (pRINT a)
++{
++ int i;
++ int s = 0;
++ for (i = 0; i < 16; i++)
++ s += a[i];
++
++ return s;
++}
++
++unsigned long long reduce_add_u64 (pRUINT64 a)
++{
++ int i;
++ unsigned long long s = 0;
++ for (i = 0; i < 16; i++)
++ s += a[i];
++
++ return s;
++}
++
++long long reduce_add_s64 (pRINT64 a)
++{
++ int i;
++ long long s = 0;
++ for (i = 0; i < 16; i++)
++ s += a[i];
++
++ return s;
++}
--- a/src/gcc/testsuite/gcc.target/aarch64/vector_intrinsics.c
+++ b/src/gcc/testsuite/gcc.target/aarch64/vector_intrinsics.c
@@ -0,0 +1,803 @@
@@ -75014,149 +76893,6 @@
+{
+ return vshll_high_n_u8 (__a, 8);
+}
---- a/src/gcc/testsuite/gcc.target/aarch64/vect.x
-+++ b/src/gcc/testsuite/gcc.target/aarch64/vect.x
-@@ -0,0 +1,140 @@
-+typedef int *__restrict__ pRINT;
-+typedef unsigned int *__restrict__ pRUINT;
-+typedef long long *__restrict__ pRINT64;
-+typedef unsigned long long *__restrict__ pRUINT64;
-+
-+void test_orn (pRUINT a, pRUINT b, pRUINT c)
-+{
-+ int i;
-+ for (i = 0; i < 16; i++)
-+ c[i] = a[i] | (~b[i]);
-+}
-+
-+void test_bic (pRUINT a, pRUINT b, pRUINT c)
-+{
-+ int i;
-+ for (i = 0; i < 16; i++)
-+ c[i] = a[i] & (~b[i]);
-+}
-+
-+void mla (pRINT a, pRINT b, pRINT c)
-+{
-+ int i;
-+ for (i=0;i<16;i++)
-+ c[i] += a[i] * b[i];
-+}
-+
-+void mls (pRINT a, pRINT b, pRINT c)
-+{
-+ int i;
-+ for (i=0;i<16;i++)
-+ c[i] -= a[i] * b[i];
-+}
-+
-+void smax (pRINT a, pRINT b, pRINT c)
-+{
-+ int i;
-+ for (i=0;i<16;i++)
-+ c[i] = (a[i] > b[i] ? a[i] : b[i]);
-+}
-+
-+void smin (pRINT a, pRINT b, pRINT c)
-+{
-+ int i;
-+ for (i=0;i<16;i++)
-+ c[i] = (a[i] < b[i] ? a[i] : b[i]);
-+}
-+
-+void umax (pRUINT a, pRUINT b, pRUINT c)
-+{
-+ int i;
-+ for (i=0;i<16;i++)
-+ c[i] = (a[i] > b[i] ? a[i] : b[i]);
-+}
-+
-+void umin (pRUINT a, pRUINT b, pRUINT c)
-+{
-+ int i;
-+ for (i=0;i<16;i++)
-+ c[i] = (a[i] < b[i] ? a[i] : b[i]);
-+}
-+
-+unsigned int reduce_umax (pRUINT a)
-+{
-+ int i;
-+ unsigned int s = a[0];
-+ for (i = 1; i < 16; i++)
-+ s = (s > a[i] ? s : a[i]);
-+
-+ return s;
-+}
-+
-+unsigned int reduce_umin (pRUINT a)
-+{
-+ int i;
-+ unsigned int s = a[0];
-+ for (i = 1; i < 16; i++)
-+ s = (s < a[i] ? s : a[i]);
-+
-+ return s;
-+}
-+
-+int reduce_smax (pRINT a)
-+{
-+ int i;
-+ int s = a[0];
-+ for (i = 1; i < 16; i++)
-+ s = (s > a[i] ? s : a[i]);
-+
-+ return s;
-+}
-+
-+int reduce_smin (pRINT a)
-+{
-+ int i;
-+ int s = a[0];
-+ for (i = 1; i < 16; i++)
-+ s = (s < a[i] ? s : a[i]);
-+
-+ return s;
-+}
-+
-+unsigned int reduce_add_u32 (pRINT a)
-+{
-+ int i;
-+ unsigned int s = 0;
-+ for (i = 0; i < 16; i++)
-+ s += a[i];
-+
-+ return s;
-+}
-+
-+int reduce_add_s32 (pRINT a)
-+{
-+ int i;
-+ int s = 0;
-+ for (i = 0; i < 16; i++)
-+ s += a[i];
-+
-+ return s;
-+}
-+
-+unsigned long long reduce_add_u64 (pRUINT64 a)
-+{
-+ int i;
-+ unsigned long long s = 0;
-+ for (i = 0; i < 16; i++)
-+ s += a[i];
-+
-+ return s;
-+}
-+
-+long long reduce_add_s64 (pRINT64 a)
-+{
-+ int i;
-+ long long s = 0;
-+ for (i = 0; i < 16; i++)
-+ s += a[i];
-+
-+ return s;
-+}
--- a/src/gcc/testsuite/gcc.target/aarch64/vfp-1.c
+++ b/src/gcc/testsuite/gcc.target/aarch64/vfp-1.c
@@ -0,0 +1,109 @@
@@ -75565,24 +77301,6 @@
+
+ return 0;
+}
---- a/src/gcc/testsuite/gcc.target/arm/builtin-bswap16-1.c
-+++ b/src/gcc/testsuite/gcc.target/arm/builtin-bswap16-1.c
-@@ -0,0 +1,15 @@
-+/* { dg-do compile } */
-+/* { dg-options "-O2" } */
-+/* { dg-require-effective-target arm_arch_v7a_ok } */
-+/* { dg-add-options arm_arch_v7a } */
-+/* { dg-final { scan-assembler-not "orr\[ \t\]" } } */
-+
-+unsigned short swapu16_1 (unsigned short x)
-+{
-+ return (x << 8) | (x >> 8);
-+}
-+
-+unsigned short swapu16_2 (unsigned short x)
-+{
-+ return (x >> 8) | (x << 8);
-+}
--- a/src/gcc/testsuite/gcc.target/arm/builtin-bswap-1.c
+++ b/src/gcc/testsuite/gcc.target/arm/builtin-bswap-1.c
@@ -0,0 +1,81 @@
@@ -75667,6 +77385,24 @@
+ z = __builtin_bswap32 (x);
+ return foou32 (z);
+}
+--- a/src/gcc/testsuite/gcc.target/arm/builtin-bswap16-1.c
++++ b/src/gcc/testsuite/gcc.target/arm/builtin-bswap16-1.c
+@@ -0,0 +1,15 @@
++/* { dg-do compile } */
++/* { dg-options "-O2" } */
++/* { dg-require-effective-target arm_arch_v7a_ok } */
++/* { dg-add-options arm_arch_v7a } */
++/* { dg-final { scan-assembler-not "orr\[ \t\]" } } */
++
++unsigned short swapu16_1 (unsigned short x)
++{
++ return (x << 8) | (x >> 8);
++}
++
++unsigned short swapu16_2 (unsigned short x)
++{
++ return (x >> 8) | (x << 8);
++}
--- a/src/gcc/testsuite/gcc.target/arm/neon/vgetQ_lanes64.c
+++ b/src/gcc/testsuite/gcc.target/arm/neon/vgetQ_lanes64.c
@@ -10,11 +10,11 @@
@@ -75898,6 +77634,26 @@
+ vshl with a negative amount in register. */
+/* { dg-final {scan-assembler-times "vshr" 6} } */
+/* { dg-final {scan-assembler-times "vshl" 2} } */
+--- a/src/gcc/testsuite/gcc.target/arm/neon-vdup-1.c
++++ b/src/gcc/testsuite/gcc.target/arm/neon-vdup-1.c
+@@ -0,0 +1,17 @@
++/* Test the optimization of `vdupq_n_f32' ARM Neon intrinsic. */
++
++/* { dg-do compile } */
++/* { dg-require-effective-target arm_neon_ok } */
++/* { dg-options "-O2" } */
++/* { dg-add-options arm_neon } */
++
++#include <arm_neon.h>
++
++float32x4_t out_float32x4_t;
++void test_vdupq_nf32 (void)
++{
++ out_float32x4_t = vdupq_n_f32 (0.0);
++}
++
++/* { dg-final { scan-assembler "vmov\.f32\[ \]+\[qQ\]\[0-9\]+, #0\.0\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
++/* { dg-final { cleanup-saved-temps } } */
--- a/src/gcc/testsuite/gcc.target/arm/neon-vdup-10.c
+++ b/src/gcc/testsuite/gcc.target/arm/neon-vdup-10.c
@@ -0,0 +1,17 @@
@@ -76098,26 +77854,6 @@
+
+/* { dg-final { scan-assembler "vmov\.i32\[ \]+\[qQ\]\[0-9\]+, #4293722112\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
---- a/src/gcc/testsuite/gcc.target/arm/neon-vdup-1.c
-+++ b/src/gcc/testsuite/gcc.target/arm/neon-vdup-1.c
-@@ -0,0 +1,17 @@
-+/* Test the optimization of `vdupq_n_f32' ARM Neon intrinsic. */
-+
-+/* { dg-do compile } */
-+/* { dg-require-effective-target arm_neon_ok } */
-+/* { dg-options "-O2" } */
-+/* { dg-add-options arm_neon } */
-+
-+#include <arm_neon.h>
-+
-+float32x4_t out_float32x4_t;
-+void test_vdupq_nf32 (void)
-+{
-+ out_float32x4_t = vdupq_n_f32 (0.0);
-+}
-+
-+/* { dg-final { scan-assembler "vmov\.f32\[ \]+\[qQ\]\[0-9\]+, #0\.0\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
-+/* { dg-final { cleanup-saved-temps } } */
--- a/src/gcc/testsuite/gcc.target/arm/neon-vdup-2.c
+++ b/src/gcc/testsuite/gcc.target/arm/neon-vdup-2.c
@@ -0,0 +1,17 @@
@@ -76278,124 +78014,6 @@
+
+/* { dg-final { scan-assembler "vmov\.i32\[ \]+\[qQ\]\[0-9\]+, #4293787647\(\[ \]+@\[a-zA-Z0-9 \]+\)?\n" } } */
+/* { dg-final { cleanup-saved-temps } } */
---- a/src/gcc/testsuite/gcc.target/arm/neon-vext.c
-+++ b/src/gcc/testsuite/gcc.target/arm/neon-vext.c
-@@ -0,0 +1,115 @@
-+/* { dg-do compile } */
-+/* { dg-require-effective-target arm_neon_ok } */
-+/* { dg-require-effective-target arm_little_endian } */
-+/* { dg-options "-O2" } */
-+/* { dg-add-options arm_neon } */
-+
-+#include <arm_neon.h>
-+
-+uint8x8_t
-+tst_vext_u8 (uint8x8_t __a, uint8x8_t __b)
-+{
-+ uint8x8_t __mask1 = {2, 3, 4, 5, 6, 7, 8, 9};
-+
-+ return __builtin_shuffle ( __a, __b, __mask1) ;
-+}
-+
-+uint8x8_t
-+tst_vext_u8_rotate (uint8x8_t __a)
-+{
-+ uint8x8_t __mask1 = {2, 3, 4, 5, 6, 7, 0, 1};
-+ return __builtin_shuffle ( __a, __mask1) ;
-+}
-+
-+uint16x4_t
-+tst_vext_u16 (uint16x4_t __a, uint16x4_t __b)
-+{
-+ uint16x4_t __mask1 = {2, 3, 4, 5};
-+ return __builtin_shuffle ( __a, __b, __mask1) ;
-+}
-+
-+uint16x4_t
-+tst_vext_u16_rotate (uint16x4_t __a)
-+{
-+ uint16x4_t __mask1 = {2, 3, 0, 1};
-+ return __builtin_shuffle ( __a, __mask1) ;
-+}
-+
-+uint32x2_t
-+tst_vext_u32 (uint32x2_t __a, uint32x2_t __b)
-+{
-+ uint32x2_t __mask1 = {1, 2};
-+ return __builtin_shuffle ( __a, __b, __mask1) ;
-+}
-+
-+/* This one is mapped into vrev64.32. */
-+uint32x2_t
-+tst_vext_u32_rotate (uint32x2_t __a)
-+{
-+ uint32x2_t __mask1 = {1, 0};
-+ return __builtin_shuffle ( __a, __mask1) ;
-+}
-+
-+uint8x16_t
-+tst_vextq_u8 (uint8x16_t __a, uint8x16_t __b)
-+{
-+ uint8x16_t __mask1 = {4, 5, 6, 7, 8, 9, 10, 11,
-+ 12, 13, 14, 15, 16, 17, 18, 19};
-+ return __builtin_shuffle ( __a, __b, __mask1) ;
-+}
-+
-+uint8x16_t
-+tst_vextq_u8_rotate (uint8x16_t __a)
-+{
-+ uint8x16_t __mask1 = {4, 5, 6, 7, 8, 9, 10, 11,
-+ 12, 13, 14, 15, 0, 1, 2, 3};
-+ return __builtin_shuffle ( __a, __mask1) ;
-+}
-+
-+uint16x8_t
-+tst_vextq_u16 (uint16x8_t __a, uint16x8_t __b)
-+{
-+ uint16x8_t __mask1 = {2, 3, 4, 5, 6, 7, 8, 9};
-+ return __builtin_shuffle ( __a, __b, __mask1) ;
-+}
-+
-+uint16x8_t
-+tst_vextq_u16_rotate (uint16x8_t __a)
-+{
-+ uint16x8_t __mask1 = {2, 3, 4, 5, 6, 7, 0, 1};
-+ return __builtin_shuffle ( __a, __mask1) ;
-+}
-+
-+uint32x4_t
-+tst_vextq_u32 (uint32x4_t __a, uint32x4_t __b)
-+{
-+ uint32x4_t __mask1 = {1, 2, 3, 4};
-+ return __builtin_shuffle ( __a, __b, __mask1) ;
-+}
-+
-+uint32x4_t
-+tst_vextq_u32_rotate (uint32x4_t __a)
-+{
-+ uint32x4_t __mask1 = {1, 2, 3, 0};
-+ return __builtin_shuffle ( __a, __mask1) ;
-+}
-+
-+uint64x2_t
-+tst_vextq_u64 (uint64x2_t __a, uint64x2_t __b)
-+{
-+ uint64x2_t __mask1 = {1, 2};
-+ return __builtin_shuffle ( __a, __b, __mask1) ;
-+}
-+
-+uint64x2_t
-+tst_vextq_u64_rotate (uint64x2_t __a)
-+{
-+ uint64x2_t __mask1 = {1, 0};
-+ return __builtin_shuffle ( __a, __mask1) ;
-+}
-+
-+/* { dg-final {scan-assembler-times "vext\.8\\t" 4} } */
-+/* { dg-final {scan-assembler-times "vext\.16\\t" 4} } */
-+/* { dg-final {scan-assembler-times "vext\.32\\t" 3} } */
-+/* { dg-final {scan-assembler-times "vrev64\.32\\t" 1} } */
-+/* { dg-final {scan-assembler-times "vext\.64\\t" 2} } */
--- a/src/gcc/testsuite/gcc.target/arm/neon-vext-execute.c
+++ b/src/gcc/testsuite/gcc.target/arm/neon-vext-execute.c
@@ -0,0 +1,340 @@
@@ -76739,6 +78357,124 @@
+
+ return 0;
+}
+--- a/src/gcc/testsuite/gcc.target/arm/neon-vext.c
++++ b/src/gcc/testsuite/gcc.target/arm/neon-vext.c
+@@ -0,0 +1,115 @@
++/* { dg-do compile } */
++/* { dg-require-effective-target arm_neon_ok } */
++/* { dg-require-effective-target arm_little_endian } */
++/* { dg-options "-O2" } */
++/* { dg-add-options arm_neon } */
++
++#include <arm_neon.h>
++
++uint8x8_t
++tst_vext_u8 (uint8x8_t __a, uint8x8_t __b)
++{
++ uint8x8_t __mask1 = {2, 3, 4, 5, 6, 7, 8, 9};
++
++ return __builtin_shuffle ( __a, __b, __mask1) ;
++}
++
++uint8x8_t
++tst_vext_u8_rotate (uint8x8_t __a)
++{
++ uint8x8_t __mask1 = {2, 3, 4, 5, 6, 7, 0, 1};
++ return __builtin_shuffle ( __a, __mask1) ;
++}
++
++uint16x4_t
++tst_vext_u16 (uint16x4_t __a, uint16x4_t __b)
++{
++ uint16x4_t __mask1 = {2, 3, 4, 5};
++ return __builtin_shuffle ( __a, __b, __mask1) ;
++}
++
++uint16x4_t
++tst_vext_u16_rotate (uint16x4_t __a)
++{
++ uint16x4_t __mask1 = {2, 3, 0, 1};
++ return __builtin_shuffle ( __a, __mask1) ;
++}
++
++uint32x2_t
++tst_vext_u32 (uint32x2_t __a, uint32x2_t __b)
++{
++ uint32x2_t __mask1 = {1, 2};
++ return __builtin_shuffle ( __a, __b, __mask1) ;
++}
++
++/* This one is mapped into vrev64.32. */
++uint32x2_t
++tst_vext_u32_rotate (uint32x2_t __a)
++{
++ uint32x2_t __mask1 = {1, 0};
++ return __builtin_shuffle ( __a, __mask1) ;
++}
++
++uint8x16_t
++tst_vextq_u8 (uint8x16_t __a, uint8x16_t __b)
++{
++ uint8x16_t __mask1 = {4, 5, 6, 7, 8, 9, 10, 11,
++ 12, 13, 14, 15, 16, 17, 18, 19};
++ return __builtin_shuffle ( __a, __b, __mask1) ;
++}
++
++uint8x16_t
++tst_vextq_u8_rotate (uint8x16_t __a)
++{
++ uint8x16_t __mask1 = {4, 5, 6, 7, 8, 9, 10, 11,
++ 12, 13, 14, 15, 0, 1, 2, 3};
++ return __builtin_shuffle ( __a, __mask1) ;
++}
++
++uint16x8_t
++tst_vextq_u16 (uint16x8_t __a, uint16x8_t __b)
++{
++ uint16x8_t __mask1 = {2, 3, 4, 5, 6, 7, 8, 9};
++ return __builtin_shuffle ( __a, __b, __mask1) ;
++}
++
++uint16x8_t
++tst_vextq_u16_rotate (uint16x8_t __a)
++{
++ uint16x8_t __mask1 = {2, 3, 4, 5, 6, 7, 0, 1};
++ return __builtin_shuffle ( __a, __mask1) ;
++}
++
++uint32x4_t
++tst_vextq_u32 (uint32x4_t __a, uint32x4_t __b)
++{
++ uint32x4_t __mask1 = {1, 2, 3, 4};
++ return __builtin_shuffle ( __a, __b, __mask1) ;
++}
++
++uint32x4_t
++tst_vextq_u32_rotate (uint32x4_t __a)
++{
++ uint32x4_t __mask1 = {1, 2, 3, 0};
++ return __builtin_shuffle ( __a, __mask1) ;
++}
++
++uint64x2_t
++tst_vextq_u64 (uint64x2_t __a, uint64x2_t __b)
++{
++ uint64x2_t __mask1 = {1, 2};
++ return __builtin_shuffle ( __a, __b, __mask1) ;
++}
++
++uint64x2_t
++tst_vextq_u64_rotate (uint64x2_t __a)
++{
++ uint64x2_t __mask1 = {1, 0};
++ return __builtin_shuffle ( __a, __mask1) ;
++}
++
++/* { dg-final {scan-assembler-times "vext\.8\\t" 4} } */
++/* { dg-final {scan-assembler-times "vext\.16\\t" 4} } */
++/* { dg-final {scan-assembler-times "vext\.32\\t" 3} } */
++/* { dg-final {scan-assembler-times "vrev64\.32\\t" 1} } */
++/* { dg-final {scan-assembler-times "vext\.64\\t" 2} } */
--- a/src/gcc/testsuite/gcc.target/arm/pr52686.c
+++ b/src/gcc/testsuite/gcc.target/arm/pr52686.c
@@ -0,0 +1,19 @@
@@ -77323,434 +79059,6 @@
+
+
+ }
---- a/src/gcc/testsuite/g++.dg/abi/aarch64_guard1.C
-+++ b/src/gcc/testsuite/g++.dg/abi/aarch64_guard1.C
-@@ -0,0 +1,17 @@
-+// Check that the initialization guard variable is an 8-byte aligned,
-+// 8-byte doubleword and that only the least significant bit is used
-+// for initialization guard variables.
-+// { dg-do compile { target aarch64*-*-* } }
-+// { dg-options "-O -fdump-tree-original -fno-section-anchors" }
-+
-+int bar();
-+
-+int *foo ()
-+{
-+ static int x = bar ();
-+ return &x;
-+}
-+
-+// { dg-final { scan-assembler _ZGVZ3foovE1x,8,8 } }
-+// { dg-final { scan-tree-dump "_ZGVZ3foovE1x & 1" "original" } }
-+// { dg-final { cleanup-tree-dump "original" } }
---- a/src/gcc/testsuite/g++.dg/abi/arm_va_list.C
-+++ b/src/gcc/testsuite/g++.dg/abi/arm_va_list.C
-@@ -1,9 +1,10 @@
--// { dg-do compile }
-+// { dg-do compile { target { aarch64*-*-* arm*-*-* } } }
- // { dg-options "-Wno-abi" }
--// { dg-require-effective-target arm_eabi }
-+// { dg-require-effective-target arm_eabi { target arm*-*-* } }
-
- // AAPCS \S 7.1.4 requires that va_list be a typedef for "struct
- // __va_list". The mangling is as if it were "std::__va_list".
-+// AAPCS64 \S 7.1.4 has the same requirement for AArch64 targets.
- // #include <stdarg.h>
- typedef __builtin_va_list va_list;
-
---- a/src/gcc/testsuite/g++.dg/abi/mangle-neon-aarch64.C
-+++ b/src/gcc/testsuite/g++.dg/abi/mangle-neon-aarch64.C
-@@ -0,0 +1,55 @@
-+// Test that AArch64 AdvSIMD (NEON) vector types have their names mangled
-+// correctly.
-+
-+// { dg-do compile { target { aarch64*-*-* } } }
-+
-+#include <arm_neon.h>
-+
-+void f0 (int8x8_t a) {}
-+void f1 (int16x4_t a) {}
-+void f2 (int32x2_t a) {}
-+void f3 (uint8x8_t a) {}
-+void f4 (uint16x4_t a) {}
-+void f5 (uint32x2_t a) {}
-+void f6 (float32x2_t a) {}
-+void f7 (poly8x8_t a) {}
-+void f8 (poly16x4_t a) {}
-+
-+void f9 (int8x16_t a) {}
-+void f10 (int16x8_t a) {}
-+void f11 (int32x4_t a) {}
-+void f12 (int64x2_t a) {}
-+void f13 (uint8x16_t a) {}
-+void f14 (uint16x8_t a) {}
-+void f15 (uint32x4_t a) {}
-+void f16 (uint64x2_t a) {}
-+void f17 (float32x4_t a) {}
-+void f18 (float64x2_t a) {}
-+void f19 (poly8x16_t a) {}
-+void f20 (poly16x8_t a) {}
-+
-+void f21 (int8x16_t, int8x16_t) {}
-+
-+
-+// { dg-final { scan-assembler "_Z2f010__Int8x8_t:" } }
-+// { dg-final { scan-assembler "_Z2f111__Int16x4_t:" } }
-+// { dg-final { scan-assembler "_Z2f211__Int32x2_t:" } }
-+// { dg-final { scan-assembler "_Z2f311__Uint8x8_t:" } }
-+// { dg-final { scan-assembler "_Z2f412__Uint16x4_t:" } }
-+// { dg-final { scan-assembler "_Z2f512__Uint32x2_t:" } }
-+// { dg-final { scan-assembler "_Z2f613__Float32x2_t:" } }
-+// { dg-final { scan-assembler "_Z2f711__Poly8x8_t:" } }
-+// { dg-final { scan-assembler "_Z2f812__Poly16x4_t:" } }
-+// { dg-final { scan-assembler "_Z2f911__Int8x16_t:" } }
-+// { dg-final { scan-assembler "_Z3f1011__Int16x8_t:" } }
-+// { dg-final { scan-assembler "_Z3f1111__Int32x4_t:" } }
-+// { dg-final { scan-assembler "_Z3f1211__Int64x2_t:" } }
-+// { dg-final { scan-assembler "_Z3f1312__Uint8x16_t:" } }
-+// { dg-final { scan-assembler "_Z3f1412__Uint16x8_t:" } }
-+// { dg-final { scan-assembler "_Z3f1512__Uint32x4_t:" } }
-+// { dg-final { scan-assembler "_Z3f1612__Uint64x2_t:" } }
-+// { dg-final { scan-assembler "_Z3f1713__Float32x4_t:" } }
-+// { dg-final { scan-assembler "_Z3f1813__Float64x2_t:" } }
-+// { dg-final { scan-assembler "_Z3f1912__Poly8x16_t:" } }
-+// { dg-final { scan-assembler "_Z3f2012__Poly16x8_t:" } }
-+// { dg-final { scan-assembler "_Z3f2111__Int8x16_tS_:" } }
---- a/src/gcc/testsuite/g++.dg/cpp0x/constexpr-array-ptr8.C
-+++ b/src/gcc/testsuite/g++.dg/cpp0x/constexpr-array-ptr8.C
-@@ -0,0 +1,54 @@
-+// PR c++/57047
-+// { dg-require-effective-target c++11 }
-+
-+template <typename>
-+struct A;
-+template <typename T>
-+struct A <T &>
-+{
-+ typedef T type;
-+};
-+template <typename T>
-+constexpr T && foo (typename A <T>::type & __t) noexcept
-+{
-+ return static_cast <T &&>(__t);
-+}
-+template <class T1, class T2>
-+struct B
-+{
-+ T1 t1;
-+ T2 t2;
-+ template <class U>
-+ constexpr B (U && __x, const T2 & __y) : t1 (foo <U> (__x)), t2 (__y) {}
-+};
-+static inline constexpr bool
-+fn1 (const char c)
-+{
-+ return ('0' <= c) && (c <= '9');
-+}
-+static inline constexpr bool
-+fn2 (const char c)
-+{
-+ return (('A' <= c) && (c <= 'Z')) || (('a' <= c) && (c <= 'z'));
-+}
-+static constexpr bool
-+fn3 (const char *const x)
-+{
-+ return (x[1] == '\0' && x[0] == ']') ? true : (!fn1 (x[0])) ? false : fn3 (&x[1]);
-+}
-+static constexpr bool
-+fn4 (const char *const x)
-+{
-+ return (x[0] == '\0') ? fn3 (&x[1]) : fn4 (&x[1]);
-+}
-+static inline constexpr bool
-+fn5 (const char *const x)
-+{
-+ return fn2 (x[0]) ? fn4 (x) : false;
-+}
-+struct C final
-+{
-+ constexpr C (const char *const t1) : c (fn5 (t1) ? 199 : 69) {}
-+ unsigned c;
-+};
-+B <C, C> p ("a", "b");
---- a/src/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-eh3.C
-+++ b/src/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-eh3.C
-@@ -0,0 +1,14 @@
-+// PR c++/56388
-+// { dg-require-effective-target c++11 }
-+
-+int main()
-+{
-+ bool /*const*/ condition = false;
-+
-+ [&]{
-+ try{}
-+ catch(...){
-+ if(condition){}
-+ }
-+ }();
-+}
---- a/src/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-nullptr.C
-+++ b/src/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-nullptr.C
-@@ -0,0 +1,47 @@
-+// PR c++/54170
-+// { dg-do run { target c++11 } }
-+
-+#include <cassert>
-+
-+struct A;
-+typedef A* ptr;
-+typedef int (A::*pmf) (int);
-+typedef int (A::*pdm);
-+
-+int total;
-+
-+void add(int n)
-+{
-+ total += n;
-+}
-+
-+template <typename RType, typename Callable>
-+RType Call(Callable native_func, int arg)
-+{
-+ return native_func(arg);
-+}
-+
-+template <typename RType>
-+RType do_test(int delta)
-+{
-+ return Call<RType>([=](int delta) { add(delta); return nullptr; }, delta);
-+}
-+
-+template <typename RType>
-+void test()
-+{
-+ total = 0;
-+ assert (!do_test<RType>(5));
-+ assert (total == 5);
-+ assert (!do_test<RType>(20));
-+ assert (total == 25);
-+ assert (!do_test<RType>(-256));
-+ assert (total == -231);
-+}
-+
-+int main()
-+{
-+ test<ptr>();
-+ test<pdm>();
-+ test<pmf>();
-+}
---- a/src/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-return1.C
-+++ b/src/gcc/testsuite/g++.dg/cpp0x/lambda/lambda-return1.C
-@@ -0,0 +1,26 @@
-+// PR c++/57437
-+// { dg-require-effective-target c++11 }
-+
-+struct A {
-+ int i;
-+
-+ A(): i(42) {}
-+ A(const A&) = default;
-+ A(A&& a): i(a.i) { a.i = 0; }
-+};
-+
-+int main()
-+{
-+ A x;
-+
-+ auto y = [x] () mutable {
-+ x.i++;
-+ return x;
-+ };
-+
-+ if (y().i != 43)
-+ __builtin_abort ();
-+
-+ if (y().i != 44)
-+ __builtin_abort ();
-+}
---- a/src/gcc/testsuite/g++.dg/debug/template2.C
-+++ b/src/gcc/testsuite/g++.dg/debug/template2.C
-@@ -0,0 +1,14 @@
-+// PR c++/57545
-+
-+template<typename T, long unsigned int N>
-+struct array {
-+ T data[N];
-+};
-+
-+template<typename T>
-+struct derived {
-+ typedef long unsigned int size_type;
-+ static const size_type n = 42;
-+
-+ array<int, n> a;
-+};
---- a/src/gcc/testsuite/g++.dg/expr/const1.C
-+++ b/src/gcc/testsuite/g++.dg/expr/const1.C
-@@ -0,0 +1,9 @@
-+// PR c++/57551
-+
-+extern unsigned long ADDR;
-+
-+unsigned long f(){
-+ const unsigned long* const var=&ADDR;
-+ const unsigned long retval=var[1];
-+ return retval;
-+}
---- a/src/gcc/testsuite/g++.dg/other/pr23205-2.C
-+++ b/src/gcc/testsuite/g++.dg/other/pr23205-2.C
-@@ -1,5 +1,5 @@
- /* { dg-do compile } */
--/* { dg-skip-if "No stabs" { mmix-*-* *-*-aix* alpha*-*-* hppa*64*-*-* ia64-*-* tile*-*-* } { "*" } { "" } } */
-+/* { dg-skip-if "No stabs" { aarch64*-*-* mmix-*-* *-*-aix* alpha*-*-* hppa*64*-*-* ia64-*-* tile*-*-* } { "*" } { "" } } */
- /* { dg-options "-gstabs+ -fno-eliminate-unused-debug-types -ftoplevel-reorder" } */
-
- const int foobar = 4;
---- a/src/gcc/testsuite/g++.dg/other/PR23205.C
-+++ b/src/gcc/testsuite/g++.dg/other/PR23205.C
-@@ -1,5 +1,5 @@
- /* { dg-do compile } */
--/* { dg-skip-if "No stabs" { mmix-*-* *-*-aix* alpha*-*-* hppa*64*-*-* ia64-*-* tile*-*-* *-*-vxworks } { "*" } { "" } } */
-+/* { dg-skip-if "No stabs" { aarch64*-*-* mmix-*-* *-*-aix* alpha*-*-* hppa*64*-*-* ia64-*-* tile*-*-* *-*-vxworks } { "*" } { "" } } */
- /* { dg-options "-gstabs+ -fno-eliminate-unused-debug-types" } */
-
- const int foobar = 4;
---- a/src/gcc/testsuite/g++.dg/template/array26.C
-+++ b/src/gcc/testsuite/g++.dg/template/array26.C
-@@ -0,0 +1,40 @@
-+// PR c++/57325
-+
-+class valarray { int _M_data; };
-+template < typename > struct SimpleJet { valarray partials; };
-+
-+template < class C > struct scoped_ptr_impl
-+{
-+ scoped_ptr_impl (C *):data_ () { }
-+ struct Data
-+ {
-+ C ptr;
-+ };
-+ Data data_;
-+};
-+
-+template < class, class = int >struct scoped_ptr;
-+template < class C, class D > struct scoped_ptr <C[], D >
-+{
-+ scoped_ptr ():impl_ (0) { }
-+ scoped_ptr_impl < C > impl_;
-+};
-+
-+template < typename JetsT > void
-+TestJets (JetsT *)
-+{
-+ typedef typename JetsT::JetType JetT;
-+ scoped_ptr < JetT[] > a;
-+}
-+
-+template < typename T > struct SimpleJets
-+{
-+ typedef SimpleJet < T > JetType;
-+ scoped_ptr < SimpleJet < T >[] > vars_;
-+};
-+
-+void fn ()
-+{
-+ SimpleJets < double >b;
-+ TestJets (&b);
-+}
---- a/src/gcc/testsuite/g++.dg/template/using23.C
-+++ b/src/gcc/testsuite/g++.dg/template/using23.C
-@@ -0,0 +1,15 @@
-+// PR c++/57831
-+
-+struct A {
-+ void f();
-+};
-+template <class T> struct B : T {
-+ typedef T base;
-+ using base::f; // If I write "using B<T>::f" it's ok
-+ void g( ) {
-+ B<T>::f(); // This is OK as expected
-+ (this->*&T::f)(); // This is also OK
-+ (this->*&B<T>::f)(); // This causes error
-+ }
-+};
-+template struct B< A >;
---- a/src/gcc/testsuite/g++.dg/torture/pr54684.C
-+++ b/src/gcc/testsuite/g++.dg/torture/pr54684.C
-@@ -0,0 +1,62 @@
-+// { dg-do compile }
-+
-+typedef union tree_node *tree;
-+typedef union gimple_statement_d *gimple;
-+struct vec_prefix { unsigned num_; };
-+template<typename T> struct vec_t {
-+ unsigned length (void) const;
-+ T &operator[] (unsigned);
-+ vec_prefix prefix_;
-+ T vec_[1];
-+};
-+template<typename T> inline unsigned vec_t<T>::length (void) const {
-+ return prefix_.num_;
-+}
-+template<typename T> T & vec_t<T>::operator[] (unsigned ix) {
-+ ((void)(__builtin_expect(!(ix < prefix_.num_), 0) ? __builtin_unreachable(), 0 : 0));
-+ return vec_[ix];
-+}
-+enum tree_code { PARM_DECL };
-+struct tree_base {
-+ enum tree_code code : 16;
-+ unsigned default_def_flag : 1;
-+};
-+union tree_node {
-+ struct tree_base base;
-+};
-+struct ipa_param_descriptor {
-+ tree decl;
-+ unsigned used : 1;
-+};
-+typedef struct ipa_param_descriptor ipa_param_descriptor_t;
-+struct ipa_node_params {
-+ vec_t<ipa_param_descriptor_t> *descriptors;
-+};
-+static inline int ipa_get_param_count (struct ipa_node_params *info) {
-+ return ((info->descriptors) ? (info->descriptors)->length () : 0);
-+}
-+static inline tree ipa_get_param (struct ipa_node_params *info, int i) {
-+ return ((*(info->descriptors))[i]).decl;
-+}
-+static inline void ipa_set_param_used (struct ipa_node_params *info, int i, bool val) {
-+ ((*(info->descriptors))[i]).used = val;
-+}
-+int ipa_get_param_decl_index (struct ipa_node_params *info, tree ptree)
-+{
-+ int i, count;
-+ count = ipa_get_param_count (info);
-+ for (i = 0; i < count; i++)
-+ if (ipa_get_param (info, i) == ptree) return i;
-+ return -1;
-+}
-+bool visit_ref_for_mod_analysis (gimple stmt __attribute__ ((__unused__)),
-+ tree op, void *data)
-+{
-+ struct ipa_node_params *info = (struct ipa_node_params *) data;
-+ if (op && ((enum tree_code) (op)->base.code) == PARM_DECL)
-+ {
-+ int index = ipa_get_param_decl_index (info, op);
-+ ((void)(__builtin_expect(!(index >= 0), 0) ? __builtin_unreachable(), 0 : 0));
-+ ipa_set_param_used (info, index, true);
-+ }
-+}
---- a/src/gcc/testsuite/g++.dg/tree-ssa/ivopts-2.C
-+++ b/src/gcc/testsuite/g++.dg/tree-ssa/ivopts-2.C
-@@ -7,5 +7,5 @@
- *p = 1;
- }
-
--/* { dg-final { scan-tree-dump-times "PHI <p" 1 "ivopts"} } */
-+/* { dg-final { scan-tree-dump-times "PHI <\[pb\]" 1 "ivopts"} } */
- /* { dg-final { cleanup-tree-dump "ivopts" } } */
--- a/src/gcc/testsuite/gfortran.dg/debug/pr35154-stabs.f
+++ b/src/gcc/testsuite/gfortran.dg/debug/pr35154-stabs.f
@@ -1,6 +1,6 @@
@@ -78196,6 +79504,146 @@
+end module
+
+! { dg-final { cleanup-modules "base_mod r_mod" } }
+--- a/src/gcc/testsuite/gnat.dg/in_out_parameter4.adb
++++ b/src/gcc/testsuite/gnat.dg/in_out_parameter4.adb
+@@ -0,0 +1,30 @@
++-- { dg-do run }
++-- { dg-options "-gnat12 -gnatVa" }
++
++procedure In_Out_Parameter4 is
++
++ type Enum is (E_Undetermined, E_Down, E_Up);
++ subtype Status_T is Enum range E_Down .. E_Up;
++
++ function Recurse (Val : in out Integer) return Status_T is
++
++ Result : Status_T;
++
++ procedure Dummy (I : in out Integer) is begin null; end;
++
++ begin
++ if Val > 500 then
++ Val := Val - 1;
++ Result := Recurse (Val);
++ return Result;
++ else
++ return E_UP;
++ end if;
++ end;
++
++ Val : Integer := 501;
++ S : Status_T;
++
++begin
++ S := Recurse (Val);
++end;
+--- a/src/gcc/testsuite/gnat.dg/loop_optimization16.adb
++++ b/src/gcc/testsuite/gnat.dg/loop_optimization16.adb
+@@ -0,0 +1,24 @@
++-- { dg-do run }
++
++with Loop_Optimization16_Pkg; use Loop_Optimization16_Pkg;
++
++procedure Loop_Optimization16 is
++
++ Counter : Natural := 0;
++
++ C : constant Natural := F;
++
++ subtype Index_T is Index_Base range 1 .. Index_Base (C);
++
++begin
++
++ for I in Index_T'First .. Index_T'Last loop
++ Counter := Counter + 1;
++ exit when Counter > 200;
++ end loop;
++
++ if Counter > 200 then
++ raise Program_Error;
++ end if;
++
++end Loop_Optimization16;
+--- a/src/gcc/testsuite/gnat.dg/loop_optimization16_pkg.adb
++++ b/src/gcc/testsuite/gnat.dg/loop_optimization16_pkg.adb
+@@ -0,0 +1,8 @@
++package body Loop_Optimization16_Pkg is
++
++ function F return Natural is
++ begin
++ return Natural (Index_Base'Last);
++ end;
++
++end Loop_Optimization16_Pkg;
+--- a/src/gcc/testsuite/gnat.dg/loop_optimization16_pkg.ads
++++ b/src/gcc/testsuite/gnat.dg/loop_optimization16_pkg.ads
+@@ -0,0 +1,7 @@
++package Loop_Optimization16_Pkg is
++
++ type Index_Base is range 0 .. 127;
++
++ function F return Natural;
++
++end Loop_Optimization16_Pkg;
+--- a/src/gcc/testsuite/gnat.dg/opt28.adb
++++ b/src/gcc/testsuite/gnat.dg/opt28.adb
+@@ -0,0 +1,31 @@
++with Opt28_Pkg; use Opt28_Pkg;
++
++package body Opt28 is
++
++ function Full_Filename (Filename : String) return String is
++ Path : constant String := "PATH";
++ Posix_Path : constant Posix_String := To_Posix (Path);
++ begin
++
++ declare
++ M : constant Posix_String := Value_Of (Posix_Path);
++ N : constant Posix_String (1 .. M'Length) := M;
++ Var : constant String := To_String (Str => N);
++ Start_Pos : Natural := 1;
++ End_Pos : Natural := 1;
++ begin
++ while Start_Pos <= Var'Length loop
++ End_Pos := Position (Var (Start_Pos .. Var'Length));
++
++ if Is_File (To_Posix (Var (Start_Pos .. End_Pos - 1) & Filename)) then
++ return Var (Start_Pos .. End_Pos - 1) & Filename;
++ else
++ Start_Pos := End_Pos + 1;
++ end if;
++ end loop;
++ end;
++
++ return "";
++ end;
++
++end Opt28;
+--- a/src/gcc/testsuite/gnat.dg/opt28.ads
++++ b/src/gcc/testsuite/gnat.dg/opt28.ads
+@@ -0,0 +1,8 @@
++-- { dg-do compile }
++-- { dg-options "-O2" }
++
++package Opt28 is
++
++ function Full_Filename (Filename : String) return String;
++
++end Opt28;
+--- a/src/gcc/testsuite/gnat.dg/opt28_pkg.ads
++++ b/src/gcc/testsuite/gnat.dg/opt28_pkg.ads
+@@ -0,0 +1,11 @@
++package Opt28_Pkg is
++
++ type Posix_String is array (Positive range <>) of aliased Character;
++
++ function To_Posix (Str : String) return Posix_String;
++ function To_String (Str : Posix_String) return String;
++ function Is_File (Str : Posix_String) return Boolean;
++ function Value_Of (Name : Posix_String) return Posix_String;
++ function Position (In_Line : String) return Natural;
++
++end Opt28_Pkg;
--- a/src/gcc/testsuite/gnat.dg/specs/last_bit.ads
+++ b/src/gcc/testsuite/gnat.dg/specs/last_bit.ads
@@ -0,0 +1,19 @@
@@ -78218,17 +79666,6 @@
+ List_Last_Bit : Integer := Null_Record.List'Last_Bit;
+
+end Last_Bit;
---- a/src/gcc/testsuite/g++.old-deja/g++.abi/ptrmem.C
-+++ b/src/gcc/testsuite/g++.old-deja/g++.abi/ptrmem.C
-@@ -7,7 +7,7 @@
- function. However, some platforms use all bits to encode a
- function pointer. Such platforms use the lowest bit of the delta,
- that is shifted left by one bit. */
--#if defined __MN10300__ || defined __SH5__ || defined __arm__ || defined __thumb__ || defined __mips__
-+#if defined __MN10300__ || defined __SH5__ || defined __arm__ || defined __thumb__ || defined __mips__ || defined __aarch64__
- #define ADJUST_PTRFN(func, virt) ((void (*)())(func))
- #define ADJUST_DELTA(delta, virt) (((delta) << 1) + !!(virt))
- #else
--- a/src/gcc/testsuite/lib/target-supports.exp
+++ b/src/gcc/testsuite/lib/target-supports.exp
@@ -493,6 +493,13 @@
@@ -79568,37 +81005,19 @@
+ exit (ret);
}
}
---- a/src/gcc/tree.c
-+++ b/src/gcc/tree.c
-@@ -9358,6 +9358,7 @@
- integer_ptr_type_node = build_pointer_type (integer_type_node);
-
- /* Fixed size integer types. */
-+ uint16_type_node = build_nonstandard_integer_type (16, true);
- uint32_type_node = build_nonstandard_integer_type (32, true);
- uint64_type_node = build_nonstandard_integer_type (64, true);
-
---- a/src/gcc/tree.h
-+++ b/src/gcc/tree.h
-@@ -3746,6 +3746,7 @@
- TI_UINTDI_TYPE,
- TI_UINTTI_TYPE,
-
-+ TI_UINT16_TYPE,
- TI_UINT32_TYPE,
- TI_UINT64_TYPE,
-
-@@ -3901,6 +3902,7 @@
- #define unsigned_intDI_type_node global_trees[TI_UINTDI_TYPE]
- #define unsigned_intTI_type_node global_trees[TI_UINTTI_TYPE]
-
-+#define uint16_type_node global_trees[TI_UINT16_TYPE]
- #define uint32_type_node global_trees[TI_UINT32_TYPE]
- #define uint64_type_node global_trees[TI_UINT64_TYPE]
-
--- a/src/gcc/tree-ssa-ccp.c
+++ b/src/gcc/tree-ssa-ccp.c
-@@ -2342,6 +2342,72 @@
+@@ -1744,6 +1744,9 @@
+ insert_clobber_before_stack_restore (gimple_phi_result (stmt), var,
+ visited);
+ }
++ else if (gimple_assign_ssa_name_copy_p (stmt))
++ insert_clobber_before_stack_restore (gimple_assign_lhs (stmt), var,
++ visited);
+ else
+ gcc_assert (is_gimple_debug (stmt));
+ }
+@@ -2342,6 +2345,72 @@
}
}
@@ -79671,7 +81090,7 @@
/* A simple pass that attempts to fold all builtin functions. This pass
is run after we've propagated as many constants as we can. */
-@@ -2403,6 +2469,11 @@
+@@ -2403,6 +2472,11 @@
gsi_next (&i);
continue;
@@ -79683,7 +81102,7 @@
case BUILT_IN_VA_START:
case BUILT_IN_VA_END:
case BUILT_IN_VA_COPY:
-@@ -2417,6 +2488,9 @@
+@@ -2417,6 +2491,9 @@
continue;
}
@@ -79945,6 +81364,52 @@
/* This has to happen before SCCVN runs because
loop_optimizer_init may create new phis, etc. */
+--- a/src/gcc/tree-streamer-in.c
++++ b/src/gcc/tree-streamer-in.c
+@@ -172,12 +172,11 @@
+ static void
+ unpack_ts_fixed_cst_value_fields (struct bitpack_d *bp, tree expr)
+ {
+- struct fixed_value fv;
+-
+- fv.mode = bp_unpack_enum (bp, machine_mode, MAX_MACHINE_MODE);
+- fv.data.low = bp_unpack_var_len_int (bp);
+- fv.data.high = bp_unpack_var_len_int (bp);
+- TREE_FIXED_CST (expr) = fv;
++ FIXED_VALUE_TYPE *fp = ggc_alloc_fixed_value ();
++ fp->mode = bp_unpack_enum (bp, machine_mode, MAX_MACHINE_MODE);
++ fp->data.low = bp_unpack_var_len_int (bp);
++ fp->data.high = bp_unpack_var_len_int (bp);
++ TREE_FIXED_CST_PTR (expr) = fp;
+ }
+
+
+--- a/src/gcc/tree-tailcall.c
++++ b/src/gcc/tree-tailcall.c
+@@ -329,8 +329,10 @@
+ case NEGATE_EXPR:
+ if (FLOAT_TYPE_P (TREE_TYPE (op0)))
+ *m = build_real (TREE_TYPE (op0), dconstm1);
+- else
++ else if (INTEGRAL_TYPE_P (TREE_TYPE (op0)))
+ *m = build_int_cst (TREE_TYPE (op0), -1);
++ else
++ return false;
+
+ *ass_var = dest;
+ return true;
+@@ -342,8 +344,10 @@
+ {
+ if (FLOAT_TYPE_P (TREE_TYPE (non_ass_var)))
+ *m = build_real (TREE_TYPE (non_ass_var), dconstm1);
+- else
++ else if (INTEGRAL_TYPE_P (TREE_TYPE (non_ass_var)))
+ *m = build_int_cst (TREE_TYPE (non_ass_var), -1);
++ else
++ return false;
+
+ *a = fold_build1 (NEGATE_EXPR, TREE_TYPE (non_ass_var), non_ass_var);
+ }
--- a/src/gcc/tree-vect-data-refs.c
+++ b/src/gcc/tree-vect-data-refs.c
@@ -111,6 +111,7 @@
@@ -80031,17 +81496,6 @@
/* Data-flow analysis to detect stmts that do not need to be vectorized. */
---- a/src/gcc/tree-vectorizer.h
-+++ b/src/gcc/tree-vectorizer.h
-@@ -941,7 +941,7 @@
- in the future. */
- typedef gimple (* vect_recog_func_ptr) (VEC (gimple, heap) **, tree *, tree *);
- #define NUM_PATTERNS 10
--void vect_pattern_recog (loop_vec_info);
-+void vect_pattern_recog (loop_vec_info, bb_vec_info);
-
- /* In tree-vectorizer.c. */
- unsigned vectorize_loops (void);
--- a/src/gcc/tree-vect-patterns.c
+++ b/src/gcc/tree-vect-patterns.c
@@ -84,15 +84,47 @@
@@ -80829,146 +82283,45 @@
&def, &dt[1]);
if (!ok)
---- a/src/INSTALL/binaries.html
-+++ b/src/INSTALL/binaries.html
-@@ -3,7 +3,7 @@
- <title>Installing GCC: Binaries</title>
- <meta http-equiv="Content-Type" content="text/html">
- <meta name="description" content="Installing GCC: Binaries">
--<meta name="generator" content="makeinfo 4.12">
-+<meta name="generator" content="makeinfo 4.13">
- <link title="Top" rel="top" href="#Top">
- <link href="http://www.gnu.org/software/texinfo/" rel="generator-home" title="Texinfo Homepage">
- <!--
---- a/src/INSTALL/build.html
-+++ b/src/INSTALL/build.html
-@@ -3,7 +3,7 @@
- <title>Installing GCC: Building</title>
- <meta http-equiv="Content-Type" content="text/html">
- <meta name="description" content="Installing GCC: Building">
--<meta name="generator" content="makeinfo 4.12">
-+<meta name="generator" content="makeinfo 4.13">
- <link title="Top" rel="top" href="#Top">
- <link href="http://www.gnu.org/software/texinfo/" rel="generator-home" title="Texinfo Homepage">
- <!--
---- a/src/INSTALL/configure.html
-+++ b/src/INSTALL/configure.html
-@@ -3,7 +3,7 @@
- <title>Installing GCC: Configuration</title>
- <meta http-equiv="Content-Type" content="text/html">
- <meta name="description" content="Installing GCC: Configuration">
--<meta name="generator" content="makeinfo 4.12">
-+<meta name="generator" content="makeinfo 4.13">
- <link title="Top" rel="top" href="#Top">
- <link href="http://www.gnu.org/software/texinfo/" rel="generator-home" title="Texinfo Homepage">
- <!--
-@@ -446,6 +446,14 @@
- conventions, etc. should not be built. The default is to build a
- predefined set of them.
+--- a/src/gcc/tree-vectorizer.h
++++ b/src/gcc/tree-vectorizer.h
+@@ -941,7 +941,7 @@
+ in the future. */
+ typedef gimple (* vect_recog_func_ptr) (VEC (gimple, heap) **, tree *, tree *);
+ #define NUM_PATTERNS 10
+-void vect_pattern_recog (loop_vec_info);
++void vect_pattern_recog (loop_vec_info, bb_vec_info);
+
+ /* In tree-vectorizer.c. */
+ unsigned vectorize_loops (void);
+--- a/src/gcc/tree.c
++++ b/src/gcc/tree.c
+@@ -9358,6 +9358,7 @@
+ integer_ptr_type_node = build_pointer_type (integer_type_node);
+
+ /* Fixed size integer types. */
++ uint16_type_node = build_nonstandard_integer_type (16, true);
+ uint32_type_node = build_nonstandard_integer_type (32, true);
+ uint64_type_node = build_nonstandard_integer_type (64, true);
+
+--- a/src/gcc/tree.h
++++ b/src/gcc/tree.h
+@@ -3746,6 +3746,7 @@
+ TI_UINTDI_TYPE,
+ TI_UINTTI_TYPE,
+
++ TI_UINT16_TYPE,
+ TI_UINT32_TYPE,
+ TI_UINT64_TYPE,
+
+@@ -3901,6 +3902,7 @@
+ #define unsigned_intDI_type_node global_trees[TI_UINTDI_TYPE]
+ #define unsigned_intTI_type_node global_trees[TI_UINTTI_TYPE]
+
++#define uint16_type_node global_trees[TI_UINT16_TYPE]
+ #define uint32_type_node global_trees[TI_UINT32_TYPE]
+ #define uint64_type_node global_trees[TI_UINT64_TYPE]
-+ <br><dt><code>--enable-multiarch</code><dd>Specify whether to enable or disable multiarch support. The default is
-+to check for glibc start files in a multiarch location, and enable it
-+if the files are found. The auto detection is enabled for native builds,
-+and for cross builds configured with <samp><span class="option">--with-sysroot</span></samp>, and without
-+<samp><span class="option">--with-native-system-header-dir</span></samp>.
-+More documentation about multiarch can be found at
-+<a href="http://wiki.debian.org/Multiarch">http://wiki.debian.org/Multiarch</a>.
-+
- <p>Some targets provide finer-grained control over which multilibs are built
- (e.g., <samp><span class="option">--disable-softfloat</span></samp>):
- <dl>
---- a/src/INSTALL/download.html
-+++ b/src/INSTALL/download.html
-@@ -3,7 +3,7 @@
- <title>Downloading GCC</title>
- <meta http-equiv="Content-Type" content="text/html">
- <meta name="description" content="Downloading GCC">
--<meta name="generator" content="makeinfo 4.12">
-+<meta name="generator" content="makeinfo 4.13">
- <link title="Top" rel="top" href="#Top">
- <link href="http://www.gnu.org/software/texinfo/" rel="generator-home" title="Texinfo Homepage">
- <!--
---- a/src/INSTALL/finalinstall.html
-+++ b/src/INSTALL/finalinstall.html
-@@ -3,7 +3,7 @@
- <title>Installing GCC: Final installation</title>
- <meta http-equiv="Content-Type" content="text/html">
- <meta name="description" content="Installing GCC: Final installation">
--<meta name="generator" content="makeinfo 4.12">
-+<meta name="generator" content="makeinfo 4.13">
- <link title="Top" rel="top" href="#Top">
- <link href="http://www.gnu.org/software/texinfo/" rel="generator-home" title="Texinfo Homepage">
- <!--
---- a/src/INSTALL/gfdl.html
-+++ b/src/INSTALL/gfdl.html
-@@ -3,7 +3,7 @@
- <title>Installing GCC: GNU Free Documentation License</title>
- <meta http-equiv="Content-Type" content="text/html">
- <meta name="description" content="Installing GCC: GNU Free Documentation License">
--<meta name="generator" content="makeinfo 4.12">
-+<meta name="generator" content="makeinfo 4.13">
- <link title="Top" rel="top" href="#Top">
- <link href="http://www.gnu.org/software/texinfo/" rel="generator-home" title="Texinfo Homepage">
- <!--
---- a/src/INSTALL/index.html
-+++ b/src/INSTALL/index.html
-@@ -3,7 +3,7 @@
- <title>Installing GCC</title>
- <meta http-equiv="Content-Type" content="text/html">
- <meta name="description" content="Installing GCC">
--<meta name="generator" content="makeinfo 4.12">
-+<meta name="generator" content="makeinfo 4.13">
- <link title="Top" rel="top" href="#Top">
- <link href="http://www.gnu.org/software/texinfo/" rel="generator-home" title="Texinfo Homepage">
- <!--
---- a/src/INSTALL/old.html
-+++ b/src/INSTALL/old.html
-@@ -3,7 +3,7 @@
- <title>Installing GCC: Old documentation</title>
- <meta http-equiv="Content-Type" content="text/html">
- <meta name="description" content="Installing GCC: Old documentation">
--<meta name="generator" content="makeinfo 4.12">
-+<meta name="generator" content="makeinfo 4.13">
- <link title="Top" rel="top" href="#Top">
- <link href="http://www.gnu.org/software/texinfo/" rel="generator-home" title="Texinfo Homepage">
- <!--
---- a/src/INSTALL/prerequisites.html
-+++ b/src/INSTALL/prerequisites.html
-@@ -3,7 +3,7 @@
- <title>Prerequisites for GCC</title>
- <meta http-equiv="Content-Type" content="text/html">
- <meta name="description" content="Prerequisites for GCC">
--<meta name="generator" content="makeinfo 4.12">
-+<meta name="generator" content="makeinfo 4.13">
- <link title="Top" rel="top" href="#Top">
- <link href="http://www.gnu.org/software/texinfo/" rel="generator-home" title="Texinfo Homepage">
- <!--
---- a/src/INSTALL/specific.html
-+++ b/src/INSTALL/specific.html
-@@ -3,7 +3,7 @@
- <title>Host/Target specific installation notes for GCC</title>
- <meta http-equiv="Content-Type" content="text/html">
- <meta name="description" content="Host/Target specific installation notes for GCC">
--<meta name="generator" content="makeinfo 4.12">
-+<meta name="generator" content="makeinfo 4.13">
- <link title="Top" rel="top" href="#Top">
- <link href="http://www.gnu.org/software/texinfo/" rel="generator-home" title="Texinfo Homepage">
- <!--
---- a/src/INSTALL/test.html
-+++ b/src/INSTALL/test.html
-@@ -3,7 +3,7 @@
- <title>Installing GCC: Testing</title>
- <meta http-equiv="Content-Type" content="text/html">
- <meta name="description" content="Installing GCC: Testing">
--<meta name="generator" content="makeinfo 4.12">
-+<meta name="generator" content="makeinfo 4.13">
- <link title="Top" rel="top" href="#Top">
- <link href="http://www.gnu.org/software/texinfo/" rel="generator-home" title="Texinfo Homepage">
- <!--
---- a/src/LAST_UPDATED
-+++ b/src/LAST_UPDATED
-@@ -1 +0,0 @@
--Obtained from SVN: tags/gcc_4_7_3_release revision 197739
--- a/src/libcpp/ChangeLog.aarch64
+++ b/src/libcpp/ChangeLog.aarch64
@@ -0,0 +1,13 @@
@@ -81077,26 +82430,26 @@
#else
/* We only have one accellerated alternative. Use a direct call so that
-Binary files gcc-4.7.3/libcpp/po/be.gmo and gcc-linaro-4.7-2013.08/libcpp/po/be.gmo differ
-Binary files gcc-4.7.3/libcpp/po/ca.gmo and gcc-linaro-4.7-2013.08/libcpp/po/ca.gmo differ
-Binary files gcc-4.7.3/libcpp/po/da.gmo and gcc-linaro-4.7-2013.08/libcpp/po/da.gmo differ
-Binary files gcc-4.7.3/libcpp/po/de.gmo and gcc-linaro-4.7-2013.08/libcpp/po/de.gmo differ
-Binary files gcc-4.7.3/libcpp/po/el.gmo and gcc-linaro-4.7-2013.08/libcpp/po/el.gmo differ
-Binary files gcc-4.7.3/libcpp/po/eo.gmo and gcc-linaro-4.7-2013.08/libcpp/po/eo.gmo differ
-Binary files gcc-4.7.3/libcpp/po/es.gmo and gcc-linaro-4.7-2013.08/libcpp/po/es.gmo differ
-Binary files gcc-4.7.3/libcpp/po/fi.gmo and gcc-linaro-4.7-2013.08/libcpp/po/fi.gmo differ
-Binary files gcc-4.7.3/libcpp/po/fr.gmo and gcc-linaro-4.7-2013.08/libcpp/po/fr.gmo differ
-Binary files gcc-4.7.3/libcpp/po/id.gmo and gcc-linaro-4.7-2013.08/libcpp/po/id.gmo differ
-Binary files gcc-4.7.3/libcpp/po/ja.gmo and gcc-linaro-4.7-2013.08/libcpp/po/ja.gmo differ
-Binary files gcc-4.7.3/libcpp/po/nl.gmo and gcc-linaro-4.7-2013.08/libcpp/po/nl.gmo differ
-Binary files gcc-4.7.3/libcpp/po/ru.gmo and gcc-linaro-4.7-2013.08/libcpp/po/ru.gmo differ
-Binary files gcc-4.7.3/libcpp/po/sr.gmo and gcc-linaro-4.7-2013.08/libcpp/po/sr.gmo differ
-Binary files gcc-4.7.3/libcpp/po/sv.gmo and gcc-linaro-4.7-2013.08/libcpp/po/sv.gmo differ
-Binary files gcc-4.7.3/libcpp/po/tr.gmo and gcc-linaro-4.7-2013.08/libcpp/po/tr.gmo differ
-Binary files gcc-4.7.3/libcpp/po/uk.gmo and gcc-linaro-4.7-2013.08/libcpp/po/uk.gmo differ
-Binary files gcc-4.7.3/libcpp/po/vi.gmo and gcc-linaro-4.7-2013.08/libcpp/po/vi.gmo differ
-Binary files gcc-4.7.3/libcpp/po/zh_CN.gmo and gcc-linaro-4.7-2013.08/libcpp/po/zh_CN.gmo differ
-Binary files gcc-4.7.3/libcpp/po/zh_TW.gmo and gcc-linaro-4.7-2013.08/libcpp/po/zh_TW.gmo differ
+Binary files gcc-4.7.3/libcpp/po/be.gmo and gcc-linaro-4.7-2013.10/libcpp/po/be.gmo differ
+Binary files gcc-4.7.3/libcpp/po/ca.gmo and gcc-linaro-4.7-2013.10/libcpp/po/ca.gmo differ
+Binary files gcc-4.7.3/libcpp/po/da.gmo and gcc-linaro-4.7-2013.10/libcpp/po/da.gmo differ
+Binary files gcc-4.7.3/libcpp/po/de.gmo and gcc-linaro-4.7-2013.10/libcpp/po/de.gmo differ
+Binary files gcc-4.7.3/libcpp/po/el.gmo and gcc-linaro-4.7-2013.10/libcpp/po/el.gmo differ
+Binary files gcc-4.7.3/libcpp/po/eo.gmo and gcc-linaro-4.7-2013.10/libcpp/po/eo.gmo differ
+Binary files gcc-4.7.3/libcpp/po/es.gmo and gcc-linaro-4.7-2013.10/libcpp/po/es.gmo differ
+Binary files gcc-4.7.3/libcpp/po/fi.gmo and gcc-linaro-4.7-2013.10/libcpp/po/fi.gmo differ
+Binary files gcc-4.7.3/libcpp/po/fr.gmo and gcc-linaro-4.7-2013.10/libcpp/po/fr.gmo differ
+Binary files gcc-4.7.3/libcpp/po/id.gmo and gcc-linaro-4.7-2013.10/libcpp/po/id.gmo differ
+Binary files gcc-4.7.3/libcpp/po/ja.gmo and gcc-linaro-4.7-2013.10/libcpp/po/ja.gmo differ
+Binary files gcc-4.7.3/libcpp/po/nl.gmo and gcc-linaro-4.7-2013.10/libcpp/po/nl.gmo differ
+Binary files gcc-4.7.3/libcpp/po/ru.gmo and gcc-linaro-4.7-2013.10/libcpp/po/ru.gmo differ
+Binary files gcc-4.7.3/libcpp/po/sr.gmo and gcc-linaro-4.7-2013.10/libcpp/po/sr.gmo differ
+Binary files gcc-4.7.3/libcpp/po/sv.gmo and gcc-linaro-4.7-2013.10/libcpp/po/sv.gmo differ
+Binary files gcc-4.7.3/libcpp/po/tr.gmo and gcc-linaro-4.7-2013.10/libcpp/po/tr.gmo differ
+Binary files gcc-4.7.3/libcpp/po/uk.gmo and gcc-linaro-4.7-2013.10/libcpp/po/uk.gmo differ
+Binary files gcc-4.7.3/libcpp/po/vi.gmo and gcc-linaro-4.7-2013.10/libcpp/po/vi.gmo differ
+Binary files gcc-4.7.3/libcpp/po/zh_CN.gmo and gcc-linaro-4.7-2013.10/libcpp/po/zh_CN.gmo differ
+Binary files gcc-4.7.3/libcpp/po/zh_TW.gmo and gcc-linaro-4.7-2013.10/libcpp/po/zh_TW.gmo differ
--- a/src/libgcc/ChangeLog
+++ b/src/libgcc/ChangeLog
@@ -1,3 +1,39 @@
@@ -81225,6 +82578,18 @@ Binary files gcc-4.7.3/libcpp/po/zh_TW.gmo and gcc-linaro-4.7-2013.08/libcpp/po/
+
+ * configure.ac: Enable AArch64.
+ * configure: Regenerate.
+--- a/src/libgcc/Makefile.in
++++ b/src/libgcc/Makefile.in
+@@ -121,7 +121,8 @@
+ .PHONY: all clean
+
+ clean:
+- -rm -f config.h libgcc_tm.h stamp-h stmp-ldirs libgcc.map
++ -rm -f config.h libgcc_tm.h libgcc.map
++ -rm -f libgcc_tm.stamp stamp-h stmp-ldirs
+ -rm -f *$(objext)
+ -rm -f *.dep
+ -rm -f *.a
--- a/src/libgcc/config/aarch64/crti.S
+++ b/src/libgcc/config/aarch64/crti.S
@@ -0,0 +1,68 @@
@@ -82333,18 +83698,6 @@ Binary files gcc-4.7.3/libcpp/po/zh_TW.gmo and gcc-linaro-4.7-2013.08/libcpp/po/
#define COUNT_LEADING_ZEROS_0 32
#endif
---- a/src/libgcc/Makefile.in
-+++ b/src/libgcc/Makefile.in
-@@ -121,7 +121,8 @@
- .PHONY: all clean
-
- clean:
-- -rm -f config.h libgcc_tm.h stamp-h stmp-ldirs libgcc.map
-+ -rm -f config.h libgcc_tm.h libgcc.map
-+ -rm -f libgcc_tm.stamp stamp-h stmp-ldirs
- -rm -f *$(objext)
- -rm -f *.dep
- -rm -f *.a
--- a/src/libgcc/soft-fp/op-common.h
+++ b/src/libgcc/soft-fp/op-common.h
@@ -1,5 +1,5 @@
@@ -82859,23 +84212,45 @@ Binary files gcc-4.7.3/libcpp/po/zh_TW.gmo and gcc-linaro-4.7-2013.08/libcpp/po/
alpha*-*-linux*)
config_path="linux/alpha linux posix"
;;
---- a/src/libstdc++-v3/acinclude.m4
-+++ b/src/libstdc++-v3/acinclude.m4
-@@ -1132,6 +1132,11 @@
- dnl --disable-libstdcxx-time
- dnl disables the checks completely
- dnl
-+dnl N.B. Darwin provides nanosleep but doesn't support the whole POSIX
-+dnl Timers option, so doesn't define _POSIX_TIMERS. Because the test
-+dnl below fails Darwin unconditionally defines _GLIBCXX_USE_NANOSLEEP in
-+dnl os_defines.h and also defines _GLIBCXX_USE_SCHED_YIELD.
-+dnl
- AC_DEFUN([GLIBCXX_ENABLE_LIBSTDCXX_TIME], [
-
- AC_MSG_CHECKING([for clock_gettime, nanosleep and sched_yield])
--- a/src/libstdc++-v3/ChangeLog
+++ b/src/libstdc++-v3/ChangeLog
-@@ -1,3 +1,31 @@
+@@ -1,3 +1,67 @@
++2013-09-30 Chris Jefferson <chris@bubblescope.net>
++
++ PR libstdc++/58437
++ * include/bits/stl_algo.h (__move_median_first): Rename to
++ __move_median_to_first, change to take an addition argument.
++ (__unguarded_partition_pivot): Adjust.
++ * testsuite/performance/25_algorithms/sort.cc: New.
++ * testsuite/performance/25_algorithms/sort_heap.cc: Likewise.
++ * testsuite/performance/25_algorithms/stable_sort.cc: Likewise.
++
++2013-09-26 Jonathan Wakely <jwakely.gcc@gmail.com>
++
++ Backport from mainline
++
++ 2013-01-19 Jonathan Wakely <jwakely.gcc@gmail.com>
++
++ PR libstdc++/55861
++ * include/std/future (_State_base::_S_check(const shared_ptr<T>&)):
++ Fix return type.
++
++2013-09-03 Paolo Carlini <paolo.carlini@oracle.com>
++
++ PR libstdc++/58302
++ * include/bits/random.tcc (negative_binomial_distribution<>::
++ operator()(_UniformRandomNumberGenerator&, const param_type&):
++ Fix typo in template argument.
++ * testsuite/26_numerics/random/negative_binomial_distribution/
++ operators/58302.cc: New.
++
++2013-08-17 Uros Bizjak <ubizjak@gmail.com>
++
++ * src/c++98/compatibility.cc (_ZTIe): Use
++ reinterpret_cast<const cast *> to avoid -Wcast-qual warnings.
++ (_ZTIPe): Ditto.
++ (ZTIPKe): Ditto.
++
+2013-05-15 Jonathan Wakely <jwakely.gcc@gmail.com>
+
+ * include/bits/basic_string.h (getline): Fix doxygen comments.
@@ -82914,6 +84289,20 @@ Binary files gcc-4.7.3/libcpp/po/zh_TW.gmo and gcc-linaro-4.7-2013.08/libcpp/po/
+
+ * config/cpu/aarch64/cxxabi_tweaks.h: New file.
+ * configure.host: Enable aarch64.
+--- a/src/libstdc++-v3/acinclude.m4
++++ b/src/libstdc++-v3/acinclude.m4
+@@ -1132,6 +1132,11 @@
+ dnl --disable-libstdcxx-time
+ dnl disables the checks completely
+ dnl
++dnl N.B. Darwin provides nanosleep but doesn't support the whole POSIX
++dnl Timers option, so doesn't define _POSIX_TIMERS. Because the test
++dnl below fails Darwin unconditionally defines _GLIBCXX_USE_NANOSLEEP in
++dnl os_defines.h and also defines _GLIBCXX_USE_SCHED_YIELD.
++dnl
+ AC_DEFUN([GLIBCXX_ENABLE_LIBSTDCXX_TIME], [
+
+ AC_MSG_CHECKING([for clock_gettime, nanosleep and sched_yield])
--- a/src/libstdc++-v3/config/cpu/aarch64/cxxabi_tweaks.h
+++ b/src/libstdc++-v3/config/cpu/aarch64/cxxabi_tweaks.h
@@ -0,0 +1,60 @@
@@ -83031,6 +84420,130 @@ Binary files gcc-4.7.3/libcpp/po/zh_TW.gmo and gcc-linaro-4.7-2013.08/libcpp/po/
*/
template<typename _CharT, typename _Traits, typename _Alloc>
inline basic_istream<_CharT, _Traits>&
+--- a/src/libstdc++-v3/include/bits/random.tcc
++++ b/src/libstdc++-v3/include/bits/random.tcc
+@@ -1125,7 +1125,7 @@
+ operator()(_UniformRandomNumberGenerator& __urng,
+ const param_type& __p)
+ {
+- typedef typename std::gamma_distribution<result_type>::param_type
++ typedef typename std::gamma_distribution<double>::param_type
+ param_type;
+
+ const double __y =
+--- a/src/libstdc++-v3/include/bits/stl_algo.h
++++ b/src/libstdc++-v3/include/bits/stl_algo.h
+@@ -74,10 +74,11 @@
+ {
+ _GLIBCXX_BEGIN_NAMESPACE_VERSION
+
+- /// Swaps the median value of *__a, *__b and *__c to *__a
++ /// Swaps the median value of *__a, *__b and *__c to *__result
+ template<typename _Iterator>
+ void
+- __move_median_first(_Iterator __a, _Iterator __b, _Iterator __c)
++ __move_median_to_first(_Iterator __result, _Iterator __a,
++ _Iterator __b, _Iterator __c)
+ {
+ // concept requirements
+ __glibcxx_function_requires(_LessThanComparableConcept<
+@@ -86,23 +87,26 @@
+ if (*__a < *__b)
+ {
+ if (*__b < *__c)
+- std::iter_swap(__a, __b);
++ std::iter_swap(__result, __b);
+ else if (*__a < *__c)
+- std::iter_swap(__a, __c);
++ std::iter_swap(__result, __c);
++ else
++ std::iter_swap(__result, __a);
+ }
+ else if (*__a < *__c)
+- return;
++ std::iter_swap(__result, __a);
+ else if (*__b < *__c)
+- std::iter_swap(__a, __c);
++ std::iter_swap(__result, __c);
+ else
+- std::iter_swap(__a, __b);
++ std::iter_swap(__result, __b);
+ }
+
+- /// Swaps the median value of *__a, *__b and *__c under __comp to *__a
++ /// Swaps the median value of *__a, *__b and *__c under __comp to *__result
+ template<typename _Iterator, typename _Compare>
+ void
+- __move_median_first(_Iterator __a, _Iterator __b, _Iterator __c,
+- _Compare __comp)
++ __move_median_to_first(_Iterator __result, _Iterator __a,
++ _Iterator __b, _Iterator __c,
++ _Compare __comp)
+ {
+ // concept requirements
+ __glibcxx_function_requires(_BinaryFunctionConcept<_Compare, bool,
+@@ -112,16 +116,18 @@
+ if (__comp(*__a, *__b))
+ {
+ if (__comp(*__b, *__c))
+- std::iter_swap(__a, __b);
++ std::iter_swap(__result, __b);
+ else if (__comp(*__a, *__c))
+- std::iter_swap(__a, __c);
++ std::iter_swap(__result, __c);
++ else
++ std::iter_swap(__result, __a);
+ }
+ else if (__comp(*__a, *__c))
+- return;
++ std::iter_swap(__result, __a);
+ else if (__comp(*__b, *__c))
+- std::iter_swap(__a, __c);
++ std::iter_swap(__result, __c);
+ else
+- std::iter_swap(__a, __b);
++ std::iter_swap(__result, __b);
+ }
+
+ // for_each
+@@ -2305,7 +2311,7 @@
+ _RandomAccessIterator __last)
+ {
+ _RandomAccessIterator __mid = __first + (__last - __first) / 2;
+- std::__move_median_first(__first, __mid, (__last - 1));
++ std::__move_median_to_first(__first, __first + 1, __mid, (__last - 2));
+ return std::__unguarded_partition(__first + 1, __last, *__first);
+ }
+
+@@ -2317,7 +2323,8 @@
+ _RandomAccessIterator __last, _Compare __comp)
+ {
+ _RandomAccessIterator __mid = __first + (__last - __first) / 2;
+- std::__move_median_first(__first, __mid, (__last - 1), __comp);
++ std::__move_median_to_first(__first, __first + 1, __mid, (__last - 2),
++ __comp);
+ return std::__unguarded_partition(__first + 1, __last, *__first, __comp);
+ }
+
+--- a/src/libstdc++-v3/include/std/future
++++ b/src/libstdc++-v3/include/std/future
+@@ -1,6 +1,6 @@
+ // <future> -*- C++ -*-
+
+-// Copyright (C) 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
++// Copyright (C) 2009, 2010, 2011, 2012, 2013 Free Software Foundation, Inc.
+ //
+ // This file is part of the GNU ISO C++ Library. This library is free
+ // software; you can redistribute it and/or modify it under the
+@@ -456,7 +456,7 @@
+ __setter(promise<void>* __prom);
+
+ template<typename _Tp>
+- static bool
++ static void
+ _S_check(const shared_ptr<_Tp>& __p)
+ {
+ if (!static_cast<bool>(__p))
--- a/src/libstdc++-v3/src/c++11/future.cc
+++ b/src/libstdc++-v3/src/c++11/future.cc
@@ -60,7 +60,7 @@
@@ -83055,6 +84568,73 @@ Binary files gcc-4.7.3/libcpp/po/zh_TW.gmo and gcc-linaro-4.7-2013.08/libcpp/po/
}
namespace std _GLIBCXX_VISIBILITY(default)
+--- a/src/libstdc++-v3/src/c++98/compatibility.cc
++++ b/src/libstdc++-v3/src/c++98/compatibility.cc
+@@ -518,14 +518,21 @@
+ extern __attribute__((used, weak)) const char _ZTSPe[3] = "Pe";
+ extern __attribute__((used, weak)) const char _ZTSPKe[4] = "PKe";
+ extern __attribute__((used, weak)) const void * const _ZTIe[2]
+- = { (void *) &_ZTVN10__cxxabiv123__fundamental_type_infoE[2],
+- (void *) _ZTSe };
++ = { reinterpret_cast<const void *>
++ (&_ZTVN10__cxxabiv123__fundamental_type_infoE[2]),
++ reinterpret_cast<const void *>(_ZTSe) };
+ extern __attribute__((used, weak)) const void * const _ZTIPe[4]
+- = { (void *) &_ZTVN10__cxxabiv119__pointer_type_infoE[2],
+- (void *) _ZTSPe, (void *) 0L, (void *) _ZTIe };
++ = { reinterpret_cast<const void *>
++ (&_ZTVN10__cxxabiv119__pointer_type_infoE[2]),
++ reinterpret_cast<const void *>(_ZTSPe),
++ reinterpret_cast<const void *>(0L),
++ reinterpret_cast<const void *>(_ZTIe) };
+ extern __attribute__((used, weak)) const void * const _ZTIPKe[4]
+- = { (void *) &_ZTVN10__cxxabiv119__pointer_type_infoE[2],
+- (void *) _ZTSPKe, (void *) 1L, (void *) _ZTIe };
++ = { reinterpret_cast<const void *>
++ (&_ZTVN10__cxxabiv119__pointer_type_infoE[2]),
++ reinterpret_cast<const void *>(_ZTSPKe),
++ reinterpret_cast<const void *>(1L),
++ reinterpret_cast<const void *>(_ZTIe) };
+ #endif // _GLIBCXX_LONG_DOUBLE_COMPAT
+
+ #ifdef _GLIBCXX_SYMVER_DARWIN
+--- a/src/libstdc++-v3/testsuite/26_numerics/random/negative_binomial_distribution/operators/58302.cc
++++ b/src/libstdc++-v3/testsuite/26_numerics/random/negative_binomial_distribution/operators/58302.cc
+@@ -0,0 +1,34 @@
++// { dg-do compile }
++// { dg-options "-std=gnu++11" }
++// { dg-require-cstdint "" }
++//
++// Copyright (C) 2013 Free Software Foundation, Inc.
++//
++// This file is part of the GNU ISO C++ Library. This library is free
++// software; you can redistribute it and/or modify it under the
++// terms of the GNU General Public License as published by the
++// Free Software Foundation; either version 3, or (at your option)
++// any later version.
++//
++// This library is distributed in the hope that it will be useful,
++// but WITHOUT ANY WARRANTY; without even the implied warranty of
++// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU General Public License along
++// with this library; see the file COPYING3. If not see
++// <http://www.gnu.org/licenses/>.
++
++#include <random>
++
++void test01()
++{
++ typedef std::negative_binomial_distribution<> dist_type;
++
++ std::default_random_engine engine;
++
++ dist_type dist;
++ dist_type::param_type param(3, 0.5);
++
++ dist(engine, param); // compile error!
++}
--- a/src/libstdc++-v3/testsuite/30_threads/condition_variable/members/53841.cc
+++ b/src/libstdc++-v3/testsuite/30_threads/condition_variable/members/53841.cc
@@ -1,5 +1,5 @@
@@ -83064,3 +84644,215 @@ Binary files gcc-4.7.3/libcpp/po/zh_TW.gmo and gcc-linaro-4.7-2013.08/libcpp/po/
// { dg-options " -std=gnu++0x -pthreads" { target *-*-solaris* } }
// { dg-options " -std=gnu++0x " { target *-*-cygwin *-*-darwin* } }
// { dg-require-cstdint "" }
+--- a/src/libstdc++-v3/testsuite/performance/25_algorithms/sort.cc
++++ b/src/libstdc++-v3/testsuite/performance/25_algorithms/sort.cc
+@@ -0,0 +1,65 @@
++// Copyright (C) 2013 Free Software Foundation, Inc.
++//
++// This file is part of the GNU ISO C++ Library. This library is free
++// software; you can redistribute it and/or modify it under the
++// terms of the GNU General Public License as published by the
++// Free Software Foundation; either version 3, or (at your option)
++// any later version.
++
++// This library is distributed in the hope that it will be useful,
++// but WITHOUT ANY WARRANTY; without even the implied warranty of
++// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++// GNU General Public License for more details.
++
++// You should have received a copy of the GNU General Public License along
++// with this library; see the file COPYING3. If not see
++// <http://www.gnu.org/licenses/>.
++
++#include <vector>
++#include <algorithm>
++#include <testsuite_performance.h>
++
++int main()
++{
++ using namespace __gnu_test;
++
++ time_counter time;
++ resource_counter resource;
++
++ const int max_size = 10000000;
++
++ std::vector<int> v(max_size);
++
++ for (int i = 0; i < max_size; ++i)
++ v[i] = -i;
++
++ start_counters(time, resource);
++ std::sort(v.begin(), v.end());
++ stop_counters(time, resource);
++
++ report_performance(__FILE__, "reverse", time, resource);
++ clear_counters(time, resource);
++
++ for (int i = 0; i < max_size; ++i)
++ v[i] = i;
++
++ start_counters(time, resource);
++ std::sort(v.begin(), v.end());
++ stop_counters(time, resource);
++
++ report_performance(__FILE__, "forwards", time, resource);
++ clear_counters(time, resource);
++
++ // a simple psuedo-random series which does not rely on rand() and friends
++ v[0] = 0;
++ for (int i = 1; i < max_size; ++i)
++ v[i] = (v[i-1] + 110211473) * 745988807;
++
++ start_counters(time, resource);
++ std::sort(v.begin(), v.end());
++ stop_counters(time, resource);
++
++ report_performance(__FILE__, "random", time, resource);
++
++ return 0;
++}
+--- a/src/libstdc++-v3/testsuite/performance/25_algorithms/sort_heap.cc
++++ b/src/libstdc++-v3/testsuite/performance/25_algorithms/sort_heap.cc
+@@ -0,0 +1,73 @@
++// Copyright (C) 2013 Free Software Foundation, Inc.
++//
++// This file is part of the GNU ISO C++ Library. This library is free
++// software; you can redistribute it and/or modify it under the
++// terms of the GNU General Public License as published by the
++// Free Software Foundation; either version 3, or (at your option)
++// any later version.
++
++// This library is distributed in the hope that it will be useful,
++// but WITHOUT ANY WARRANTY; without even the implied warranty of
++// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++// GNU General Public License for more details.
++
++// You should have received a copy of the GNU General Public License along
++// with this library; see the file COPYING3. If not see
++// <http://www.gnu.org/licenses/>.
++
++#include <vector>
++#include <algorithm>
++#include <testsuite_performance.h>
++
++int main()
++{
++ using namespace __gnu_test;
++
++ time_counter time;
++ resource_counter resource;
++
++ const int max_size = 10000000;
++
++ std::vector<int> v(max_size);
++
++ for (int i = 0; i < max_size; ++i)
++ v[i] = -i;
++
++ start_counters(time, resource);
++ std::make_heap(v.begin(), v.end());
++ stop_counters(time, resource);
++
++ report_performance(__FILE__, "make_heap_reverse", time, resource);
++ clear_counters(time, resource);
++
++ for (int i = 0; i < max_size; ++i)
++ v[i] = i;
++
++ start_counters(time, resource);
++ std::make_heap(v.begin(), v.end());
++ stop_counters(time, resource);
++
++ report_performance(__FILE__, "make_heap_forwards", time, resource);
++ clear_counters(time, resource);
++
++ // a simple psuedo-random series which does not rely on rand() and friends
++ v[0] = 0;
++ for (int i = 1; i < max_size; ++i)
++ v[i] = (v[i-1] + 110211473) * 745988807;
++
++ start_counters(time, resource);
++ std::make_heap(v.begin(), v.end());
++ stop_counters(time, resource);
++
++ report_performance(__FILE__, "make_heap_random", time, resource);
++
++
++ start_counters(time, resource);
++ std::sort_heap(v.begin(), v.end());
++ stop_counters(time, resource);
++
++ report_performance(__FILE__, "sort_heap", time, resource);
++ clear_counters(time, resource);
++
++ return 0;
++}
+--- a/src/libstdc++-v3/testsuite/performance/25_algorithms/stable_sort.cc
++++ b/src/libstdc++-v3/testsuite/performance/25_algorithms/stable_sort.cc
+@@ -0,0 +1,65 @@
++// Copyright (C) 2013 Free Software Foundation, Inc.
++//
++// This file is part of the GNU ISO C++ Library. This library is free
++// software; you can redistribute it and/or modify it under the
++// terms of the GNU General Public License as published by the
++// Free Software Foundation; either version 3, or (at your option)
++// any later version.
++
++// This library is distributed in the hope that it will be useful,
++// but WITHOUT ANY WARRANTY; without even the implied warranty of
++// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++// GNU General Public License for more details.
++
++// You should have received a copy of the GNU General Public License along
++// with this library; see the file COPYING3. If not see
++// <http://www.gnu.org/licenses/>.
++
++#include <vector>
++#include <algorithm>
++#include <testsuite_performance.h>
++
++int main()
++{
++ using namespace __gnu_test;
++
++ time_counter time;
++ resource_counter resource;
++
++ const int max_size = 10000000;
++
++ std::vector<int> v(max_size);
++
++ for (int i = 0; i < max_size; ++i)
++ v[i] = -i;
++
++ start_counters(time, resource);
++ std::stable_sort(v.begin(), v.end());
++ stop_counters(time, resource);
++
++ report_performance(__FILE__, "reverse", time, resource);
++ clear_counters(time, resource);
++
++ for (int i = 0; i < max_size; ++i)
++ v[i] = i;
++
++ start_counters(time, resource);
++ std::stable_sort(v.begin(), v.end());
++ stop_counters(time, resource);
++
++ report_performance(__FILE__, "forwards", time, resource);
++ clear_counters(time, resource);
++
++ // a simple psuedo-random series which does not rely on rand() and friends
++ v[0] = 0;
++ for (int i = 1; i < max_size; ++i)
++ v[i] = (v[i-1] + 110211473) * 745988807;
++
++ start_counters(time, resource);
++ std::stable_sort(v.begin(), v.end());
++ stop_counters(time, resource);
++
++ report_performance(__FILE__, "random", time, resource);
++
++ return 0;
++}
diff --git a/debian/patches/svn-updates-linaro.diff b/debian/patches/svn-updates-linaro.diff
index 741d683..eb76444 100644
--- a/debian/patches/svn-updates-linaro.diff
+++ b/debian/patches/svn-updates-linaro.diff
@@ -1,463 +1,249 @@
-# DP: updates from the 4.7 branch upto 20130821 (r201895).
+# DP: updates from the 4.7 branch upto 20131020 (r203880).
last_updated()
{
cat > ${dir}LAST_UPDATED <<EOF
-Wed Aug 21 11:15:52 CEST 2013
-Wed Aug 21 09:15:52 UTC 2013 (revision 201895)
+Sun Oct 20 22:56:37 CEST 2013
+Sun Oct 20 20:56:37 UTC 2013 (revision 203880)
EOF
}
-LANG=C svn diff svn://gcc.gnu.org/svn/gcc/branches/gcc-4_7-branch@201655 svn://gcc.gnu.org/svn/gcc/branches/gcc-4_7-branch \
+LANG=C svn diff svn://gcc.gnu.org/svn/gcc/branches/gcc-4_7-branch@203509 svn://gcc.gnu.org/svn/gcc/branches/gcc-4_7-branch \
| sed -r 's,^--- (\S+)\t(\S+)(.*)$,--- a/src/\1\t\2,;s,^\+\+\+ (\S+)\t(\S+)(.*)$,+++ b/src/\1\t\2,' \
| awk '/^Index:.*\.(class|texi)/ {skip=1; next} /^Index:/ { skip=0 } skip==0'
-Index: libstdc++-v3/src/c++98/compatibility.cc
-===================================================================
---- a/src/libstdc++-v3/src/c++98/compatibility.cc (revision
-+++ b/src/libstdc++-v3/src/c++98/compatibility.cc (revision
-@@ -518,14 +518,21 @@
- extern __attribute__((used, weak)) const char _ZTSPe[3] = "Pe";
- extern __attribute__((used, weak)) const char _ZTSPKe[4] = "PKe";
- extern __attribute__((used, weak)) const void * const _ZTIe[2]
-- = { (void *) &_ZTVN10__cxxabiv123__fundamental_type_infoE[2],
-- (void *) _ZTSe };
-+ = { reinterpret_cast<const void *>
-+ (&_ZTVN10__cxxabiv123__fundamental_type_infoE[2]),
-+ reinterpret_cast<const void *>(_ZTSe) };
- extern __attribute__((used, weak)) const void * const _ZTIPe[4]
-- = { (void *) &_ZTVN10__cxxabiv119__pointer_type_infoE[2],
-- (void *) _ZTSPe, (void *) 0L, (void *) _ZTIe };
-+ = { reinterpret_cast<const void *>
-+ (&_ZTVN10__cxxabiv119__pointer_type_infoE[2]),
-+ reinterpret_cast<const void *>(_ZTSPe),
-+ reinterpret_cast<const void *>(0L),
-+ reinterpret_cast<const void *>(_ZTIe) };
- extern __attribute__((used, weak)) const void * const _ZTIPKe[4]
-- = { (void *) &_ZTVN10__cxxabiv119__pointer_type_infoE[2],
-- (void *) _ZTSPKe, (void *) 1L, (void *) _ZTIe };
-+ = { reinterpret_cast<const void *>
-+ (&_ZTVN10__cxxabiv119__pointer_type_infoE[2]),
-+ reinterpret_cast<const void *>(_ZTSPKe),
-+ reinterpret_cast<const void *>(1L),
-+ reinterpret_cast<const void *>(_ZTIe) };
- #endif // _GLIBCXX_LONG_DOUBLE_COMPAT
-
- #ifdef _GLIBCXX_SYMVER_DARWIN
+Index: libstdc++-v3/include/bits/stl_algo.h
+===================================================================
+--- a/src/libstdc++-v3/include/bits/stl_algo.h (revision
++++ b/src/libstdc++-v3/include/bits/stl_algo.h (revision
+@@ -2311,7 +2311,7 @@
+ _RandomAccessIterator __last)
+ {
+ _RandomAccessIterator __mid = __first + (__last - __first) / 2;
+- std::__move_median_to_first(__first, __first + 1, __mid, (__last - 2));
++ std::__move_median_to_first(__first, __first + 1, __mid, __last - 1);
+ return std::__unguarded_partition(__first + 1, __last, *__first);
+ }
+
+@@ -2323,7 +2323,7 @@
+ _RandomAccessIterator __last, _Compare __comp)
+ {
+ _RandomAccessIterator __mid = __first + (__last - __first) / 2;
+- std::__move_median_to_first(__first, __first + 1, __mid, (__last - 2),
++ std::__move_median_to_first(__first, __first + 1, __mid, __last - 1,
+ __comp);
+ return std::__unguarded_partition(__first + 1, __last, *__first, __comp);
+ }
Index: libstdc++-v3/ChangeLog
===================================================================
--- a/src/libstdc++-v3/ChangeLog (revision
+++ b/src/libstdc++-v3/ChangeLog (revision
-@@ -1,3 +1,10 @@
-+2013-08-17 Uros Bizjak <ubizjak@gmail.com>
+@@ -1,3 +1,11 @@
++2013-10-20 Chris Jefferson <chris@bubblescope.net>
++ Paolo Carlini <paolo.carlini@oracle.com>
+
-+ * src/c++98/compatibility.cc (_ZTIe): Use
-+ reinterpret_cast<const cast *> to avoid -Wcast-qual warnings.
-+ (_ZTIPe): Ditto.
-+ (ZTIPKe): Ditto.
++ PR libstdc++/58800
++ * include/bits/stl_algo.h (__unguarded_partition_pivot): Change
++ __last - 2 to __last - 1.
++ * testsuite/25_algorithms/nth_element/58800.cc: New
+
- 2013-05-15 Jonathan Wakely <jwakely.gcc@gmail.com>
-
- * include/bits/basic_string.h (getline): Fix doxygen comments.
-Index: gcc/DATESTAMP
-===================================================================
---- a/src/gcc/DATESTAMP (revision
-+++ b/src/gcc/DATESTAMP (revision
-@@ -1 +1 @@
--20130812
-+20130821
-Index: gcc/tree-tailcall.c
-===================================================================
---- a/src/gcc/tree-tailcall.c (revision
-+++ b/src/gcc/tree-tailcall.c (revision
-@@ -329,8 +329,10 @@
- case NEGATE_EXPR:
- if (FLOAT_TYPE_P (TREE_TYPE (op0)))
- *m = build_real (TREE_TYPE (op0), dconstm1);
-+ else if (INTEGRAL_TYPE_P (TREE_TYPE (op0)))
-+ *m = build_int_cst (TREE_TYPE (op0), -1);
- else
-- *m = build_int_cst (TREE_TYPE (op0), -1);
-+ return false;
-
- *ass_var = dest;
- return true;
-@@ -342,8 +344,10 @@
- {
- if (FLOAT_TYPE_P (TREE_TYPE (non_ass_var)))
- *m = build_real (TREE_TYPE (non_ass_var), dconstm1);
-- else
-+ else if (INTEGRAL_TYPE_P (TREE_TYPE (non_ass_var)))
- *m = build_int_cst (TREE_TYPE (non_ass_var), -1);
-+ else
-+ return false;
+ 2013-09-30 Chris Jefferson <chris@bubblescope.net>
- *a = fold_build1 (NEGATE_EXPR, TREE_TYPE (non_ass_var), non_ass_var);
- }
-Index: gcc/ChangeLog
+ PR libstdc++/58437
+Index: libstdc++-v3/testsuite/25_algorithms/nth_element/58800.cc
===================================================================
---- a/src/gcc/ChangeLog (revision
-+++ b/src/gcc/ChangeLog (revision
-@@ -1,3 +1,30 @@
-+2013-08-17 Uros Bizjak <ubizjak@gmail.com>
-+
-+ Backport from mainline
-+ 2013-08-12 Perez Read <netfirewall@gmail.com>
+--- a/src/libstdc++-v3/testsuite/25_algorithms/nth_element/58800.cc (revision
++++ b/src/libstdc++-v3/testsuite/25_algorithms/nth_element/58800.cc (revision
+@@ -0,0 +1,52 @@
++// Copyright (C) 2013 Free Software Foundation, Inc.
++//
++// This file is part of the GNU ISO C++ Library. This library is free
++// software; you can redistribute it and/or modify it under the
++// terms of the GNU General Public License as published by the
++// Free Software Foundation; either version 3, or (at your option)
++// any later version.
+
-+ PR target/58132
-+ * config/i386/i386.md (*movabs<mode>_1): Add <ptrsize> PTR before
-+ operand 0 for intel asm alternative.
-+ (*movabs<mode>_2): Ditto for operand 1.
++// This library is distributed in the hope that it will be useful,
++// but WITHOUT ANY WARRANTY; without even the implied warranty of
++// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++// GNU General Public License for more details.
+
-+2013-08-13 Marek Polacek <polacek@redhat.com>
++// You should have received a copy of the GNU General Public License along
++// with this library; see the file COPYING3. If not see
++// <http://www.gnu.org/licenses/>.
+
-+ Backport from 4.8:
-+ 2013-0813 Marek Polacek <polacek@redhat.com>
-+ Jakub Jelinek <jakub@redhat.com>
++// 25.3.2 [lib.alg.nth.element]
+
-+ PR tree-optimization/57980
-+ * tree-tailcall.c (process_assignment): Return false
-+ when not dealing with integers or floats.
++// { dg-options "-std=gnu++11" }
+
-+2013-08-12 David Edelsohn <dje.gcc@gmail.com>
++#include <algorithm>
++#include <testsuite_hooks.h>
++#include <testsuite_iterators.h>
+
-+ Backport from mainline
-+ 2013-02-14 Steven Bosscher <steven@gcc.gnu.org>
++using __gnu_test::test_container;
++using __gnu_test::random_access_iterator_wrapper;
+
-+ * collect2-aix.h: Define F_LOADONLY.
-+
- 2013-08-02 Eric Botcazou <ebotcazou@adacore.com>
-
- * config/sparc/sparc.c (sparc_emit_membar_for_model) <SMM_TSO>: Add
-Index: gcc/testsuite/gnat.dg/loop_optimization16_pkg.adb
-===================================================================
---- a/src/gcc/testsuite/gnat.dg/loop_optimization16_pkg.adb (revision
-+++ b/src/gcc/testsuite/gnat.dg/loop_optimization16_pkg.adb (revision
-@@ -0,0 +1,8 @@
-+package body Loop_Optimization16_Pkg is
-+
-+ function F return Natural is
-+ begin
-+ return Natural (Index_Base'Last);
-+ end;
-+
-+end Loop_Optimization16_Pkg;
-Index: gcc/testsuite/gnat.dg/loop_optimization16_pkg.ads
-===================================================================
---- a/src/gcc/testsuite/gnat.dg/loop_optimization16_pkg.ads (revision
-+++ b/src/gcc/testsuite/gnat.dg/loop_optimization16_pkg.ads (revision
-@@ -0,0 +1,7 @@
-+package Loop_Optimization16_Pkg is
-+
-+ type Index_Base is range 0 .. 127;
-+
-+ function F return Natural;
-+
-+end Loop_Optimization16_Pkg;
-Index: gcc/testsuite/gnat.dg/loop_optimization16.adb
-===================================================================
---- a/src/gcc/testsuite/gnat.dg/loop_optimization16.adb (revision
-+++ b/src/gcc/testsuite/gnat.dg/loop_optimization16.adb (revision
-@@ -0,0 +1,24 @@
-+-- { dg-do run }
++typedef test_container<int, random_access_iterator_wrapper> Container;
+
-+with Loop_Optimization16_Pkg; use Loop_Optimization16_Pkg;
-+
-+procedure Loop_Optimization16 is
-+
-+ Counter : Natural := 0;
-+
-+ C : constant Natural := F;
-+
-+ subtype Index_T is Index_Base range 1 .. Index_Base (C);
-+
-+begin
-+
-+ for I in Index_T'First .. Index_T'Last loop
-+ Counter := Counter + 1;
-+ exit when Counter > 200;
-+ end loop;
-+
-+ if Counter > 200 then
-+ raise Program_Error;
-+ end if;
-+
-+end Loop_Optimization16;
-Index: gcc/testsuite/gcc.dg/pr57980.c
-===================================================================
---- a/src/gcc/testsuite/gcc.dg/pr57980.c (revision
-+++ b/src/gcc/testsuite/gcc.dg/pr57980.c (revision
-@@ -0,0 +1,19 @@
-+/* PR tree-optimization/57980 */
-+/* { dg-do compile } */
-+/* { dg-options "-O -foptimize-sibling-calls -w" } */
-+
-+typedef int V __attribute__ ((vector_size (2 * sizeof (int))));
-+extern V f (void);
-+
-+V
-+bar (void)
++void test01()
+{
-+ return -f ();
++ std::vector<int> v = {
++ 207089,
++ 202585,
++ 180067,
++ 157549,
++ 211592,
++ 216096,
++ 207089
++ };
++
++ Container con(v.data(), v.data() + 7);
++
++ std::nth_element(con.begin(), con.begin() + 3, con.end());
+}
+
-+V
-+foo (void)
++int main()
+{
-+ V v = { };
-+ return v - f ();
++ test01();
++ return 0;
+}
+Index: gcc/DATESTAMP
+===================================================================
+--- a/src/gcc/DATESTAMP (revision
++++ b/src/gcc/DATESTAMP (revision
+@@ -1 +1 @@
+-20131014
++20131020
Index: gcc/testsuite/ChangeLog
===================================================================
--- a/src/gcc/testsuite/ChangeLog (revision
+++ b/src/gcc/testsuite/ChangeLog (revision
-@@ -1,3 +1,16 @@
-+2013-08-13 Eric Botcazou <ebotcazou@adacore.com>
-+
-+ * gnat.dg/loop_optimization16.adb: New test.
-+ * gnat.dg/loop_optimization16_pkg.ad[sb]: New helper.
-+
-+2013-08-13 Marek Polacek <polacek@redhat.com>
-+
-+ Backport from 4.8:
-+ 2013-08-13 Marek Polacek <polacek@redhat.com>
-+
-+ PR tree-optimization/57980
-+ * gcc.dg/pr57980.c: New test.
-+
- 2013-08-11 Janus Weil <janus@gcc.gnu.org>
-
- Backport from trunk:
-Index: gcc/testsuite/g++.dg/template/delete2.C
+@@ -1,3 +1,9 @@
++2013-10-16 Paolo Carlini <paolo.carlini@oracle.com>
++
++ PR c++/58633
++ * g++.dg/cpp0x/decltype57.C: New.
++ * g++.dg/cpp0x/enum18.C: Revert r174385 changes.
++
+ 2013-09-23 Eric Botcazou <ebotcazou@adacore.com>
+
+ * gnat.dg/opt28.ad[sb]: New test.
+Index: gcc/testsuite/g++.dg/cpp0x/enum18.C
+===================================================================
+--- a/src/gcc/testsuite/g++.dg/cpp0x/enum18.C (revision
++++ b/src/gcc/testsuite/g++.dg/cpp0x/enum18.C (revision
+@@ -4,5 +4,5 @@
+ int main(void) {
+ enum e {};
+ e ev;
+- ev.e::~e_u(); // { dg-error "e_u. has not been declared" }
++ ev.e::~e_u(); // { dg-error "" }
+ }
+Index: gcc/testsuite/g++.dg/cpp0x/decltype57.C
===================================================================
---- a/src/gcc/testsuite/g++.dg/template/delete2.C (revision
-+++ b/src/gcc/testsuite/g++.dg/template/delete2.C (revision
-@@ -0,0 +1,26 @@
-+// PR c++/58119
-+
-+template <class T>
-+struct A
-+{
-+ operator T*();
-+ template <class U>
-+ operator A<U>();
-+};
-+
-+template <class T>
-+struct B
-+{
-+ operator T*();
-+ template <class U>
-+ operator A<U>*();
-+};
+--- a/src/gcc/testsuite/g++.dg/cpp0x/decltype57.C (revision
++++ b/src/gcc/testsuite/g++.dg/cpp0x/decltype57.C (revision
+@@ -0,0 +1,8 @@
++// PR c++/58633
++// { dg-do compile { target c++11 } }
+
-+int main()
++void foo(int i)
+{
-+ A<int> a;
-+ delete a;
-+
-+ B<int> b;
-+ delete b; // { dg-error "template|delete" }
++ typedef int I;
++ decltype(i.I::~I())* p;
+}
Index: gcc/cp/ChangeLog
===================================================================
--- a/src/gcc/cp/ChangeLog (revision
+++ b/src/gcc/cp/ChangeLog (revision
-@@ -1,3 +1,11 @@
-+2013-08-20 Jason Merrill <jason@redhat.com>
+@@ -1,3 +1,8 @@
++2013-10-16 Paolo Carlini <paolo.carlini@oracle.com>
+
-+ PR c++/58119
-+ * cp-tree.h (WILDCARD_TYPE_P): Split out from...
-+ (MAYBE_CLASS_TYPE_P): ...here.
-+ * cvt.c (build_expr_type_conversion): Don't complain about a
-+ template that can't match the desired type category.
++ PR c++/58633
++ * parser.c (cp_parser_pseudo_destructor_name): Revert r174385 changes.
+
- 2012-12-03 Paolo Carlini <paolo.carlini@oracle.com>
+ 2013-09-13 Jason Merrill <jason@redhat.com>
- PR c++/54170
-Index: gcc/cp/cvt.c
+ PR c++/58273
+Index: gcc/cp/parser.c
===================================================================
---- a/src/gcc/cp/cvt.c (revision
-+++ b/src/gcc/cp/cvt.c (revision
-@@ -1539,17 +1539,6 @@
- if (DECL_NONCONVERTING_P (cand))
- continue;
+--- a/src/gcc/cp/parser.c (revision
++++ b/src/gcc/cp/parser.c (revision
+@@ -6317,10 +6317,6 @@
+ /* Look for the `~'. */
+ cp_parser_require (parser, CPP_COMPL, RT_COMPL);
-- if (TREE_CODE (cand) == TEMPLATE_DECL)
-- {
-- if (complain)
-- {
-- error ("ambiguous default type conversion from %qT",
-- basetype);
-- error (" candidate conversions include %qD", cand);
-- }
-- return error_mark_node;
-- }
+- /* Once we see the ~, this has to be a pseudo-destructor. */
+- if (!processing_template_decl && !cp_parser_error_occurred (parser))
+- cp_parser_commit_to_tentative_parse (parser);
-
- candidate = non_reference (TREE_TYPE (TREE_TYPE (cand)));
-
- switch (TREE_CODE (candidate))
-@@ -1583,11 +1572,23 @@
- break;
-
- default:
-+ /* A wildcard could be instantiated to match any desired
-+ type, but we can't deduce the template argument. */
-+ if (WILDCARD_TYPE_P (candidate))
-+ win = true;
- break;
- }
-
- if (win)
- {
-+ if (TREE_CODE (cand) == TEMPLATE_DECL)
-+ {
-+ if (complain)
-+ error ("default type conversion can't deduce template"
-+ " argument for %qD", cand);
-+ return error_mark_node;
-+ }
-+
- if (winner)
- {
- if (complain)
-Index: gcc/cp/cp-tree.h
-===================================================================
---- a/src/gcc/cp/cp-tree.h (revision
-+++ b/src/gcc/cp/cp-tree.h (revision
-@@ -1191,18 +1191,21 @@
- /* The _DECL for this _TYPE. */
- #define TYPE_MAIN_DECL(NODE) (TYPE_STUB_DECL (TYPE_MAIN_VARIANT (NODE)))
-
--/* Nonzero if T is a class (or struct or union) type. Also nonzero
-- for template type parameters, typename types, and instantiated
-- template template parameters. Keep these checks in ascending code
-- order. */
--#define MAYBE_CLASS_TYPE_P(T) \
-+/* Nonzero if T is a type that could resolve to any kind of concrete type
-+ at instantiation time. */
-+#define WILDCARD_TYPE_P(T) \
- (TREE_CODE (T) == TEMPLATE_TYPE_PARM \
- || TREE_CODE (T) == TYPENAME_TYPE \
- || TREE_CODE (T) == TYPEOF_TYPE \
- || TREE_CODE (T) == BOUND_TEMPLATE_TEMPLATE_PARM \
-- || TREE_CODE (T) == DECLTYPE_TYPE \
-- || CLASS_TYPE_P (T))
-+ || TREE_CODE (T) == DECLTYPE_TYPE)
-
-+/* Nonzero if T is a class (or struct or union) type. Also nonzero
-+ for template type parameters, typename types, and instantiated
-+ template template parameters. Keep these checks in ascending code
-+ order. */
-+#define MAYBE_CLASS_TYPE_P(T) (WILDCARD_TYPE_P (T) || CLASS_TYPE_P (T))
-+
- /* Set CLASS_TYPE_P for T to VAL. T must be a class, struct, or
- union type. */
- #define SET_CLASS_TYPE_P(T, VAL) \
+ /* Look for the type-name again. We are not responsible for
+ checking that it matches the first type-name. */
+ *type = cp_parser_nonclass_name (parser);
Index: gcc/ada/ChangeLog
===================================================================
--- a/src/gcc/ada/ChangeLog (revision
+++ b/src/gcc/ada/ChangeLog (revision
@@ -1,3 +1,8 @@
-+2013-08-13 Eric Botcazou <ebotcazou@adacore.com>
++2013-10-19 Eric Botcazou <ebotcazou@adacore.com>
+
-+ * gcc-interface/trans.c (can_equal_min_or_max_val_p): Be prepared for
-+ values outside of the range of the type.
++ * gcc-interface/utils.c (gnat_set_type_context): New function.
++ (gnat_pushdecl): Use it to set the context of the type.
+
- 2013-05-26 Eric Botcazou <ebotcazou@adacore.com>
+ 2013-09-18 Eric Botcazou <ebotcazou@adacore.com>
- * gcc-interface/trans.c (Attribute_to_gnu) <Attr_Last_Bit>: Add kludge
-Index: gcc/ada/gcc-interface/trans.c
+ * gcc-interface/trans.c (Subprogram_Body_to_gnu): Pop the stack of
+Index: gcc/ada/gcc-interface/utils.c
===================================================================
---- a/src/gcc/ada/gcc-interface/trans.c (revision
-+++ b/src/gcc/ada/gcc-interface/trans.c (revision
-@@ -2232,7 +2232,10 @@
- if (TREE_CODE (val) != INTEGER_CST)
- return true;
-
-- return tree_int_cst_equal (val, min_or_max_val) == 1;
-+ if (max)
-+ return tree_int_cst_lt (val, min_or_max_val) == 0;
-+ else
-+ return tree_int_cst_lt (min_or_max_val, val) == 0;
+--- a/src/gcc/ada/gcc-interface/utils.c (revision
++++ b/src/gcc/ada/gcc-interface/utils.c (revision
+@@ -500,6 +500,22 @@
+ free_binding_level = level;
+ }
+
++/* Set the context of TYPE and its parallel types (if any) to CONTEXT. */
++
++static void
++gnat_set_type_context (tree type, tree context)
++{
++ tree decl = TYPE_STUB_DECL (type);
++
++ TYPE_CONTEXT (type) = context;
++
++ while (decl && DECL_PARALLEL_TYPE (decl))
++ {
++ TYPE_CONTEXT (DECL_PARALLEL_TYPE (decl)) = context;
++ decl = TYPE_STUB_DECL (DECL_PARALLEL_TYPE (decl));
++ }
++}
++
+ /* Record DECL as belonging to the current lexical scope and use GNAT_NODE
+ for location information and flag propagation. */
+
+@@ -581,7 +597,7 @@
+ if (TREE_CODE (t) == POINTER_TYPE)
+ TYPE_NEXT_PTR_TO (t) = tt;
+ TYPE_NAME (tt) = DECL_NAME (decl);
+- TYPE_CONTEXT (tt) = DECL_CONTEXT (decl);
++ gnat_set_type_context (tt, DECL_CONTEXT (decl));
+ TYPE_STUB_DECL (tt) = TYPE_STUB_DECL (t);
+ DECL_ORIGINAL_TYPE (decl) = tt;
+ }
+@@ -591,7 +607,7 @@
+ /* We need a variant for the placeholder machinery to work. */
+ tree tt = build_variant_type_copy (t);
+ TYPE_NAME (tt) = decl;
+- TYPE_CONTEXT (tt) = DECL_CONTEXT (decl);
++ gnat_set_type_context (tt, DECL_CONTEXT (decl));
+ TREE_USED (tt) = TREE_USED (t);
+ TREE_TYPE (decl) = tt;
+ if (DECL_ORIGINAL_TYPE (TYPE_NAME (t)))
+@@ -613,7 +629,7 @@
+ if (!(TYPE_NAME (t) && TREE_CODE (TYPE_NAME (t)) == TYPE_DECL))
+ {
+ TYPE_NAME (t) = decl;
+- TYPE_CONTEXT (t) = DECL_CONTEXT (decl);
++ gnat_set_type_context (t, DECL_CONTEXT (decl));
+ }
+ }
}
-
- /* Return true if VAL (of type TYPE) can equal the minimum value of TYPE.
-Index: gcc/collect2-aix.h
-===================================================================
---- a/src/gcc/collect2-aix.h (revision
-+++ b/src/gcc/collect2-aix.h (revision
-@@ -1,5 +1,5 @@
- /* AIX cross support for collect2.
-- Copyright (C) 2009 Free Software Foundation, Inc.
-+ Copyright (C) 2009-2013 Free Software Foundation, Inc.
-
- This file is part of GCC.
-
-@@ -29,7 +29,7 @@
- Definitions adapted from bfd. (Fairly heavily adapted in some cases.)
- ------------------------------------------------------------------------- */
-
--/* Compatiblity types for bfd. */
-+/* Compatibility types for bfd. */
- typedef unsigned HOST_WIDE_INT bfd_vma;
-
- /* The size of an archive's fl_magic field. */
-@@ -135,7 +135,7 @@
- /* The number of entries in the symbol table. */
- char f_nsyms[4];
-
-- /* The size of the auxillary header. */
-+ /* The size of the auxiliary header. */
- char f_opthdr[2];
-
- /* Flags. */
-@@ -157,7 +157,7 @@
- /* The offset of the symbol table from the start of the file. */
- char f_symptr[8];
-
-- /* The size of the auxillary header. */
-+ /* The size of the auxiliary header. */
- char f_opthdr[2];
-
- /* Flags. */
-@@ -222,14 +222,15 @@
- /* The class of symbol (a C_* value). */
- char n_sclass[1];
-
-- /* The number of auxillary symbols attached to this entry. */
-+ /* The number of auxiliary symbols attached to this entry. */
- char n_numaux[1];
- };
-
- /* Definitions required by collect2. */
- #define C_EXT 2
-
--#define F_SHROBJ 0x2000
-+#define F_SHROBJ 0x2000
-+#define F_LOADONLY 0x4000
-
- #define N_UNDEF ((short) 0)
- #define N_TMASK 060
-Index: gcc/config/i386/i386.md
-===================================================================
---- a/src/gcc/config/i386/i386.md (revision
-+++ b/src/gcc/config/i386/i386.md (revision
-@@ -2327,7 +2327,7 @@
- "TARGET_LP64 && ix86_check_movabs (insn, 0)"
- "@
- movabs{<imodesuffix>}\t{%1, %P0|[%P0], %1}
-- mov{<imodesuffix>}\t{%1, %a0|%a0, %1}"
-+ mov{<imodesuffix>}\t{%1, %a0|<iptrsize> PTR %a0, %1}"
- [(set_attr "type" "imov")
- (set_attr "modrm" "0,*")
- (set_attr "length_address" "8,0")
-@@ -2341,7 +2341,7 @@
- "TARGET_LP64 && ix86_check_movabs (insn, 1)"
- "@
- movabs{<imodesuffix>}\t{%P1, %0|%0, [%P1]}
-- mov{<imodesuffix>}\t{%a1, %0|%0, %a1}"
-+ mov{<imodesuffix>}\t{%a1, %0|%0, <iptrsize> PTR %a1}"
- [(set_attr "type" "imov")
- (set_attr "modrm" "0,*")
- (set_attr "length_address" "8,0")
diff --git a/debian/rules.patch b/debian/rules.patch
index 036938c..302004c 100644
--- a/debian/rules.patch
+++ b/debian/rules.patch
@@ -38,7 +38,6 @@ debian_patches += \
# boehm-gc-nocheck: seems to work on the buildds \
debian_patches += \
- $(if $(with_linaro_branch),aarch64-bootstrap) \
$(if $(with_linaro_branch),aarch64-multiarch) \
$(if $(with_linaro_branch),aarch64-hash-style-gnu) \
aarch64-libffi \