LCOV - code coverage report
Current view: top level - gcc - tree-vect-data-refs.c (source / functions) Hit Total Coverage
Test: gcc.info Lines: 2454 2777 88.4 %
Date: 2020-03-28 11:57:23 Functions: 71 74 95.9 %
Legend: Lines: hit not hit | Branches: + taken - not taken # not executed Branches: 0 0 -

           Branch data     Line data    Source code
       1                 :            : /* Data References Analysis and Manipulation Utilities for Vectorization.
       2                 :            :    Copyright (C) 2003-2020 Free Software Foundation, Inc.
       3                 :            :    Contributed by Dorit Naishlos <dorit@il.ibm.com>
       4                 :            :    and Ira Rosen <irar@il.ibm.com>
       5                 :            : 
       6                 :            : This file is part of GCC.
       7                 :            : 
       8                 :            : GCC is free software; you can redistribute it and/or modify it under
       9                 :            : the terms of the GNU General Public License as published by the Free
      10                 :            : Software Foundation; either version 3, or (at your option) any later
      11                 :            : version.
      12                 :            : 
      13                 :            : GCC is distributed in the hope that it will be useful, but WITHOUT ANY
      14                 :            : WARRANTY; without even the implied warranty of MERCHANTABILITY or
      15                 :            : FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
      16                 :            : for more details.
      17                 :            : 
      18                 :            : You should have received a copy of the GNU General Public License
      19                 :            : along with GCC; see the file COPYING3.  If not see
      20                 :            : <http://www.gnu.org/licenses/>.  */
      21                 :            : 
      22                 :            : #include "config.h"
      23                 :            : #include "system.h"
      24                 :            : #include "coretypes.h"
      25                 :            : #include "backend.h"
      26                 :            : #include "target.h"
      27                 :            : #include "rtl.h"
      28                 :            : #include "tree.h"
      29                 :            : #include "gimple.h"
      30                 :            : #include "predict.h"
      31                 :            : #include "memmodel.h"
      32                 :            : #include "tm_p.h"
      33                 :            : #include "ssa.h"
      34                 :            : #include "optabs-tree.h"
      35                 :            : #include "cgraph.h"
      36                 :            : #include "dumpfile.h"
      37                 :            : #include "alias.h"
      38                 :            : #include "fold-const.h"
      39                 :            : #include "stor-layout.h"
      40                 :            : #include "tree-eh.h"
      41                 :            : #include "gimplify.h"
      42                 :            : #include "gimple-iterator.h"
      43                 :            : #include "gimplify-me.h"
      44                 :            : #include "tree-ssa-loop-ivopts.h"
      45                 :            : #include "tree-ssa-loop-manip.h"
      46                 :            : #include "tree-ssa-loop.h"
      47                 :            : #include "cfgloop.h"
      48                 :            : #include "tree-scalar-evolution.h"
      49                 :            : #include "tree-vectorizer.h"
      50                 :            : #include "expr.h"
      51                 :            : #include "builtins.h"
      52                 :            : #include "tree-cfg.h"
      53                 :            : #include "tree-hash-traits.h"
      54                 :            : #include "vec-perm-indices.h"
      55                 :            : #include "internal-fn.h"
      56                 :            : 
      57                 :            : /* Return true if load- or store-lanes optab OPTAB is implemented for
      58                 :            :    COUNT vectors of type VECTYPE.  NAME is the name of OPTAB.  */
      59                 :            : 
      60                 :            : static bool
      61                 :      14616 : vect_lanes_optab_supported_p (const char *name, convert_optab optab,
      62                 :            :                               tree vectype, unsigned HOST_WIDE_INT count)
      63                 :            : {
      64                 :      14616 :   machine_mode mode, array_mode;
      65                 :      14616 :   bool limit_p;
      66                 :            : 
      67                 :      14616 :   mode = TYPE_MODE (vectype);
      68                 :      14616 :   if (!targetm.array_mode (mode, count).exists (&array_mode))
      69                 :            :     {
      70                 :      29232 :       poly_uint64 bits = count * GET_MODE_BITSIZE (mode);
      71                 :      14616 :       limit_p = !targetm.array_mode_supported_p (mode, count);
      72                 :      29487 :       if (!int_mode_for_size (bits, limit_p).exists (&array_mode))
      73                 :            :         {
      74                 :      14361 :           if (dump_enabled_p ())
      75                 :       3941 :             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
      76                 :            :                              "no array mode for %s[%wu]\n",
      77                 :       3941 :                              GET_MODE_NAME (mode), count);
      78                 :      14361 :           return false;
      79                 :            :         }
      80                 :            :     }
      81                 :            : 
      82                 :        255 :   if (convert_optab_handler (optab, array_mode, mode) == CODE_FOR_nothing)
      83                 :            :     {
      84                 :        255 :       if (dump_enabled_p ())
      85                 :        102 :         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
      86                 :            :                          "cannot use %s<%s><%s>\n", name,
      87                 :        102 :                          GET_MODE_NAME (array_mode), GET_MODE_NAME (mode));
      88                 :        255 :       return false;
      89                 :            :     }
      90                 :            : 
      91                 :          0 :   if (dump_enabled_p ())
      92                 :          0 :     dump_printf_loc (MSG_NOTE, vect_location,
      93                 :          0 :                      "can use %s<%s><%s>\n", name, GET_MODE_NAME (array_mode),
      94                 :          0 :                      GET_MODE_NAME (mode));
      95                 :            : 
      96                 :            :   return true;
      97                 :            : }
      98                 :            : 
      99                 :            : 
     100                 :            : /* Return the smallest scalar part of STMT_INFO.
     101                 :            :    This is used to determine the vectype of the stmt.  We generally set the
     102                 :            :    vectype according to the type of the result (lhs).  For stmts whose
     103                 :            :    result-type is different than the type of the arguments (e.g., demotion,
     104                 :            :    promotion), vectype will be reset appropriately (later).  Note that we have
     105                 :            :    to visit the smallest datatype in this function, because that determines the
     106                 :            :    VF.  If the smallest datatype in the loop is present only as the rhs of a
     107                 :            :    promotion operation - we'd miss it.
     108                 :            :    Such a case, where a variable of this datatype does not appear in the lhs
     109                 :            :    anywhere in the loop, can only occur if it's an invariant: e.g.:
     110                 :            :    'int_x = (int) short_inv', which we'd expect to have been optimized away by
     111                 :            :    invariant motion.  However, we cannot rely on invariant motion to always
     112                 :            :    take invariants out of the loop, and so in the case of promotion we also
     113                 :            :    have to check the rhs.
     114                 :            :    LHS_SIZE_UNIT and RHS_SIZE_UNIT contain the sizes of the corresponding
     115                 :            :    types.  */
     116                 :            : 
     117                 :            : tree
     118                 :     841102 : vect_get_smallest_scalar_type (stmt_vec_info stmt_info,
     119                 :            :                                HOST_WIDE_INT *lhs_size_unit,
     120                 :            :                                HOST_WIDE_INT *rhs_size_unit)
     121                 :            : {
     122                 :     841102 :   tree scalar_type = gimple_expr_type (stmt_info->stmt);
     123                 :     841102 :   HOST_WIDE_INT lhs, rhs;
     124                 :            : 
     125                 :            :   /* During the analysis phase, this function is called on arbitrary
     126                 :            :      statements that might not have scalar results.  */
     127                 :     841102 :   if (!tree_fits_uhwi_p (TYPE_SIZE_UNIT (scalar_type)))
     128                 :            :     return scalar_type;
     129                 :            : 
     130                 :     841102 :   lhs = rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
     131                 :            : 
     132                 :     841102 :   gassign *assign = dyn_cast <gassign *> (stmt_info->stmt);
     133                 :     838395 :   if (assign
     134                 :     838395 :       && (gimple_assign_cast_p (assign)
     135                 :     781656 :           || gimple_assign_rhs_code (assign) == DOT_PROD_EXPR
     136                 :     781623 :           || gimple_assign_rhs_code (assign) == WIDEN_SUM_EXPR
     137                 :     781623 :           || gimple_assign_rhs_code (assign) == WIDEN_MULT_EXPR
     138                 :     780925 :           || gimple_assign_rhs_code (assign) == WIDEN_LSHIFT_EXPR
     139                 :     780925 :           || gimple_assign_rhs_code (assign) == FLOAT_EXPR))
     140                 :            :     {
     141                 :      61101 :       tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (assign));
     142                 :            : 
     143                 :      61101 :       rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (rhs_type));
     144                 :      61101 :       if (rhs < lhs)
     145                 :      13696 :         scalar_type = rhs_type;
     146                 :            :     }
     147                 :     780001 :   else if (gcall *call = dyn_cast <gcall *> (stmt_info->stmt))
     148                 :            :     {
     149                 :       2707 :       unsigned int i = 0;
     150                 :       2707 :       if (gimple_call_internal_p (call))
     151                 :            :         {
     152                 :       1366 :           internal_fn ifn = gimple_call_internal_fn (call);
     153                 :       1366 :           if (internal_load_fn_p (ifn) || internal_store_fn_p (ifn))
     154                 :            :             /* gimple_expr_type already picked the type of the loaded
     155                 :            :                or stored data.  */
     156                 :            :             i = ~0U;
     157                 :        643 :           else if (internal_fn_mask_index (ifn) == 0)
     158                 :          0 :             i = 1;
     159                 :            :         }
     160                 :       2707 :       if (i < gimple_call_num_args (call))
     161                 :            :         {
     162                 :       1979 :           tree rhs_type = TREE_TYPE (gimple_call_arg (call, i));
     163                 :       1979 :           if (tree_fits_uhwi_p (TYPE_SIZE_UNIT (rhs_type)))
     164                 :            :             {
     165                 :       1979 :               rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (rhs_type));
     166                 :       1979 :               if (rhs < lhs)
     167                 :         56 :                 scalar_type = rhs_type;
     168                 :            :             }
     169                 :            :         }
     170                 :            :     }
     171                 :            : 
     172                 :     841102 :   *lhs_size_unit = lhs;
     173                 :     841102 :   *rhs_size_unit = rhs;
     174                 :     841102 :   return scalar_type;
     175                 :            : }
     176                 :            : 
     177                 :            : 
     178                 :            : /* Insert DDR into LOOP_VINFO list of ddrs that may alias and need to be
     179                 :            :    tested at run-time.  Return TRUE if DDR was successfully inserted.
     180                 :            :    Return false if versioning is not supported.  */
     181                 :            : 
     182                 :            : static opt_result
     183                 :      29166 : vect_mark_for_runtime_alias_test (ddr_p ddr, loop_vec_info loop_vinfo)
     184                 :            : {
     185                 :      29166 :   class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
     186                 :            : 
     187                 :      29166 :   if ((unsigned) param_vect_max_version_for_alias_checks == 0)
     188                 :         23 :     return opt_result::failure_at (vect_location,
     189                 :            :                                    "will not create alias checks, as"
     190                 :            :                                    " --param vect-max-version-for-alias-checks"
     191                 :         23 :                                    " == 0\n");
     192                 :            : 
     193                 :      29143 :   opt_result res
     194                 :            :     = runtime_alias_check_p (ddr, loop,
     195                 :      29143 :                              optimize_loop_nest_for_speed_p (loop));
     196                 :      29143 :   if (!res)
     197                 :         34 :     return res;
     198                 :            : 
     199                 :      29109 :   LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo).safe_push (ddr);
     200                 :      29109 :   return opt_result::success ();
     201                 :            : }
     202                 :            : 
     203                 :            : /* Record that loop LOOP_VINFO needs to check that VALUE is nonzero.  */
     204                 :            : 
     205                 :            : static void
     206                 :        967 : vect_check_nonzero_value (loop_vec_info loop_vinfo, tree value)
     207                 :            : {
     208                 :        967 :   vec<tree> checks = LOOP_VINFO_CHECK_NONZERO (loop_vinfo);
     209                 :       2635 :   for (unsigned int i = 0; i < checks.length(); ++i)
     210                 :        656 :     if (checks[i] == value)
     211                 :        967 :       return;
     212                 :            : 
     213                 :        959 :   if (dump_enabled_p ())
     214                 :        370 :     dump_printf_loc (MSG_NOTE, vect_location,
     215                 :            :                      "need run-time check that %T is nonzero\n",
     216                 :            :                      value);
     217                 :        959 :   LOOP_VINFO_CHECK_NONZERO (loop_vinfo).safe_push (value);
     218                 :            : }
     219                 :            : 
     220                 :            : /* Return true if we know that the order of vectorized DR_INFO_A and
     221                 :            :    vectorized DR_INFO_B will be the same as the order of DR_INFO_A and
     222                 :            :    DR_INFO_B.  At least one of the accesses is a write.  */
     223                 :            : 
     224                 :            : static bool
     225                 :      42116 : vect_preserves_scalar_order_p (dr_vec_info *dr_info_a, dr_vec_info *dr_info_b)
     226                 :            : {
     227                 :      42116 :   stmt_vec_info stmtinfo_a = dr_info_a->stmt;
     228                 :      42116 :   stmt_vec_info stmtinfo_b = dr_info_b->stmt;
     229                 :            : 
     230                 :            :   /* Single statements are always kept in their original order.  */
     231                 :      42116 :   if (!STMT_VINFO_GROUPED_ACCESS (stmtinfo_a)
     232                 :      62599 :       && !STMT_VINFO_GROUPED_ACCESS (stmtinfo_b))
     233                 :            :     return true;
     234                 :            : 
     235                 :            :   /* STMT_A and STMT_B belong to overlapping groups.  All loads in a
     236                 :            :      SLP group are emitted at the position of the last scalar load and
     237                 :            :      all loads in an interleaving group are emitted at the position
     238                 :            :      of the first scalar load.
     239                 :            :      Stores in a group are emitted at the position of the last scalar store.
     240                 :            :      Compute that position and check whether the resulting order matches
     241                 :            :      the current one.
     242                 :            :      We have not yet decided between SLP and interleaving so we have
     243                 :            :      to conservatively assume both.  */
     244                 :      22662 :   stmt_vec_info il_a;
     245                 :      22662 :   stmt_vec_info last_a = il_a = DR_GROUP_FIRST_ELEMENT (stmtinfo_a);
     246                 :      22662 :   if (last_a)
     247                 :            :     {
     248                 :     164342 :       for (stmt_vec_info s = DR_GROUP_NEXT_ELEMENT (last_a); s;
     249                 :     142709 :            s = DR_GROUP_NEXT_ELEMENT (s))
     250                 :     285418 :         last_a = get_later_stmt (last_a, s);
     251                 :      21633 :       if (!DR_IS_WRITE (STMT_VINFO_DATA_REF (stmtinfo_a)))
     252                 :            :         {
     253                 :      25636 :           for (stmt_vec_info s = DR_GROUP_NEXT_ELEMENT (il_a); s;
     254                 :      18696 :                s = DR_GROUP_NEXT_ELEMENT (s))
     255                 :      37392 :             if (get_later_stmt (il_a, s) == il_a)
     256                 :         42 :               il_a = s;
     257                 :            :         }
     258                 :            :       else
     259                 :            :         il_a = last_a;
     260                 :            :     }
     261                 :            :   else
     262                 :            :     last_a = il_a = stmtinfo_a;
     263                 :      22662 :   stmt_vec_info il_b;
     264                 :      22662 :   stmt_vec_info last_b = il_b = DR_GROUP_FIRST_ELEMENT (stmtinfo_b);
     265                 :      22662 :   if (last_b)
     266                 :            :     {
     267                 :     165697 :       for (stmt_vec_info s = DR_GROUP_NEXT_ELEMENT (last_b); s;
     268                 :     143812 :            s = DR_GROUP_NEXT_ELEMENT (s))
     269                 :     287624 :         last_b = get_later_stmt (last_b, s);
     270                 :      21885 :       if (!DR_IS_WRITE (STMT_VINFO_DATA_REF (stmtinfo_b)))
     271                 :            :         {
     272                 :      11109 :           for (stmt_vec_info s = DR_GROUP_NEXT_ELEMENT (il_b); s;
     273                 :       7850 :                s = DR_GROUP_NEXT_ELEMENT (s))
     274                 :      15700 :             if (get_later_stmt (il_b, s) == il_b)
     275                 :          0 :               il_b = s;
     276                 :            :         }
     277                 :            :       else
     278                 :            :         il_b = last_b;
     279                 :            :     }
     280                 :            :   else
     281                 :            :     last_b = il_b = stmtinfo_b;
     282                 :      22662 :   bool a_after_b = (get_later_stmt (stmtinfo_a, stmtinfo_b) == stmtinfo_a);
     283                 :      22662 :   return (/* SLP */
     284                 :      22662 :           (get_later_stmt (last_a, last_b) == last_a) == a_after_b
     285                 :            :           /* Interleaving */
     286                 :      16855 :           && (get_later_stmt (il_a, il_b) == il_a) == a_after_b
     287                 :            :           /* Mixed */
     288                 :      16796 :           && (get_later_stmt (il_a, last_b) == il_a) == a_after_b
     289                 :      39458 :           && (get_later_stmt (last_a, il_b) == last_a) == a_after_b);
     290                 :            : }
     291                 :            : 
     292                 :            : /* A subroutine of vect_analyze_data_ref_dependence.  Handle
     293                 :            :    DDR_COULD_BE_INDEPENDENT_P ddr DDR that has a known set of dependence
     294                 :            :    distances.  These distances are conservatively correct but they don't
     295                 :            :    reflect a guaranteed dependence.
     296                 :            : 
     297                 :            :    Return true if this function does all the work necessary to avoid
     298                 :            :    an alias or false if the caller should use the dependence distances
     299                 :            :    to limit the vectorization factor in the usual way.  LOOP_DEPTH is
     300                 :            :    the depth of the loop described by LOOP_VINFO and the other arguments
     301                 :            :    are as for vect_analyze_data_ref_dependence.  */
     302                 :            : 
     303                 :            : static bool
     304                 :        131 : vect_analyze_possibly_independent_ddr (data_dependence_relation *ddr,
     305                 :            :                                        loop_vec_info loop_vinfo,
     306                 :            :                                        int loop_depth, unsigned int *max_vf)
     307                 :            : {
     308                 :        131 :   class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
     309                 :        131 :   lambda_vector dist_v;
     310                 :        131 :   unsigned int i;
     311                 :        292 :   FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
     312                 :            :     {
     313                 :        222 :       int dist = dist_v[loop_depth];
     314                 :        222 :       if (dist != 0 && !(dist > 0 && DDR_REVERSED_P (ddr)))
     315                 :            :         {
     316                 :            :           /* If the user asserted safelen >= DIST consecutive iterations
     317                 :            :              can be executed concurrently, assume independence.
     318                 :            : 
     319                 :            :              ??? An alternative would be to add the alias check even
     320                 :            :              in this case, and vectorize the fallback loop with the
     321                 :            :              maximum VF set to safelen.  However, if the user has
     322                 :            :              explicitly given a length, it's less likely that that
     323                 :            :              would be a win.  */
     324                 :        103 :           if (loop->safelen >= 2 && abs_hwi (dist) <= loop->safelen)
     325                 :            :             {
     326                 :         42 :               if ((unsigned int) loop->safelen < *max_vf)
     327                 :          2 :                 *max_vf = loop->safelen;
     328                 :         42 :               LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false;
     329                 :         42 :               continue;
     330                 :            :             }
     331                 :            : 
     332                 :            :           /* For dependence distances of 2 or more, we have the option
     333                 :            :              of limiting VF or checking for an alias at runtime.
     334                 :            :              Prefer to check at runtime if we can, to avoid limiting
     335                 :            :              the VF unnecessarily when the bases are in fact independent.
     336                 :            : 
     337                 :            :              Note that the alias checks will be removed if the VF ends up
     338                 :            :              being small enough.  */
     339                 :         61 :           dr_vec_info *dr_info_a = loop_vinfo->lookup_dr (DDR_A (ddr));
     340                 :         61 :           dr_vec_info *dr_info_b = loop_vinfo->lookup_dr (DDR_B (ddr));
     341                 :         61 :           return (!STMT_VINFO_GATHER_SCATTER_P (dr_info_a->stmt)
     342                 :         61 :                   && !STMT_VINFO_GATHER_SCATTER_P (dr_info_b->stmt)
     343                 :        128 :                   && vect_mark_for_runtime_alias_test (ddr, loop_vinfo));
     344                 :            :         }
     345                 :            :     }
     346                 :            :   return true;
     347                 :            : }
     348                 :            : 
     349                 :            : 
     350                 :            : /* Function vect_analyze_data_ref_dependence.
     351                 :            : 
     352                 :            :    FIXME: I needed to change the sense of the returned flag.
     353                 :            : 
     354                 :            :    Return FALSE if there (might) exist a dependence between a memory-reference
     355                 :            :    DRA and a memory-reference DRB.  When versioning for alias may check a
     356                 :            :    dependence at run-time, return TRUE.  Adjust *MAX_VF according to
     357                 :            :    the data dependence.  */
     358                 :            : 
     359                 :            : static opt_result
     360                 :     619570 : vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
     361                 :            :                                   loop_vec_info loop_vinfo,
     362                 :            :                                   unsigned int *max_vf)
     363                 :            : {
     364                 :     619570 :   unsigned int i;
     365                 :     619570 :   class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
     366                 :     619570 :   struct data_reference *dra = DDR_A (ddr);
     367                 :     619570 :   struct data_reference *drb = DDR_B (ddr);
     368                 :     619570 :   dr_vec_info *dr_info_a = loop_vinfo->lookup_dr (dra);
     369                 :     619570 :   dr_vec_info *dr_info_b = loop_vinfo->lookup_dr (drb);
     370                 :     619570 :   stmt_vec_info stmtinfo_a = dr_info_a->stmt;
     371                 :     619570 :   stmt_vec_info stmtinfo_b = dr_info_b->stmt;
     372                 :     619570 :   lambda_vector dist_v;
     373                 :     619570 :   unsigned int loop_depth;
     374                 :            : 
     375                 :            :   /* In loop analysis all data references should be vectorizable.  */
     376                 :     619570 :   if (!STMT_VINFO_VECTORIZABLE (stmtinfo_a)
     377                 :     619570 :       || !STMT_VINFO_VECTORIZABLE (stmtinfo_b))
     378                 :          0 :     gcc_unreachable ();
     379                 :            : 
     380                 :            :   /* Independent data accesses.  */
     381                 :     619570 :   if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
     382                 :     439248 :     return opt_result::success ();
     383                 :            : 
     384                 :     180322 :   if (dra == drb
     385                 :      84683 :       || (DR_IS_READ (dra) && DR_IS_READ (drb)))
     386                 :     129633 :     return opt_result::success ();
     387                 :            : 
     388                 :            :   /* We do not have to consider dependences between accesses that belong
     389                 :            :      to the same group, unless the stride could be smaller than the
     390                 :            :      group size.  */
     391                 :      50689 :   if (DR_GROUP_FIRST_ELEMENT (stmtinfo_a)
     392                 :      22366 :       && (DR_GROUP_FIRST_ELEMENT (stmtinfo_a)
     393                 :      22366 :           == DR_GROUP_FIRST_ELEMENT (stmtinfo_b))
     394                 :      55260 :       && !STMT_VINFO_STRIDED_P (stmtinfo_a))
     395                 :          0 :     return opt_result::success ();
     396                 :            : 
     397                 :            :   /* Even if we have an anti-dependence then, as the vectorized loop covers at
     398                 :            :      least two scalar iterations, there is always also a true dependence.
     399                 :            :      As the vectorizer does not re-order loads and stores we can ignore
     400                 :            :      the anti-dependence if TBAA can disambiguate both DRs similar to the
     401                 :            :      case with known negative distance anti-dependences (positive
     402                 :            :      distance anti-dependences would violate TBAA constraints).  */
     403                 :      22401 :   if (((DR_IS_READ (dra) && DR_IS_WRITE (drb))
     404                 :      28288 :        || (DR_IS_WRITE (dra) && DR_IS_READ (drb)))
     405                 :      79146 :       && !alias_sets_conflict_p (get_alias_set (DR_REF (dra)),
     406                 :            :                                  get_alias_set (DR_REF (drb))))
     407                 :        595 :     return opt_result::success ();
     408                 :            : 
     409                 :            :   /* Unknown data dependence.  */
     410                 :      50094 :   if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
     411                 :            :     {
     412                 :            :       /* If user asserted safelen consecutive iterations can be
     413                 :            :          executed concurrently, assume independence.  */
     414                 :      34877 :       if (loop->safelen >= 2)
     415                 :            :         {
     416                 :       5823 :           if ((unsigned int) loop->safelen < *max_vf)
     417                 :        975 :             *max_vf = loop->safelen;
     418                 :       5823 :           LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false;
     419                 :       5823 :           return opt_result::success ();
     420                 :            :         }
     421                 :            : 
     422                 :      29054 :       if (STMT_VINFO_GATHER_SCATTER_P (stmtinfo_a)
     423                 :      29050 :           || STMT_VINFO_GATHER_SCATTER_P (stmtinfo_b))
     424                 :          4 :         return opt_result::failure_at
     425                 :          4 :           (stmtinfo_a->stmt,
     426                 :            :            "versioning for alias not supported for: "
     427                 :            :            "can't determine dependence between %T and %T\n",
     428                 :          4 :            DR_REF (dra), DR_REF (drb));
     429                 :            : 
     430                 :      29050 :       if (dump_enabled_p ())
     431                 :       4839 :         dump_printf_loc (MSG_MISSED_OPTIMIZATION, stmtinfo_a->stmt,
     432                 :            :                          "versioning for alias required: "
     433                 :            :                          "can't determine dependence between %T and %T\n",
     434                 :            :                          DR_REF (dra), DR_REF (drb));
     435                 :            : 
     436                 :            :       /* Add to list of ddrs that need to be tested at run-time.  */
     437                 :      29050 :       return vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
     438                 :            :     }
     439                 :            : 
     440                 :            :   /* Known data dependence.  */
     441                 :      15217 :   if (DDR_NUM_DIST_VECTS (ddr) == 0)
     442                 :            :     {
     443                 :            :       /* If user asserted safelen consecutive iterations can be
     444                 :            :          executed concurrently, assume independence.  */
     445                 :         55 :       if (loop->safelen >= 2)
     446                 :            :         {
     447                 :          0 :           if ((unsigned int) loop->safelen < *max_vf)
     448                 :          0 :             *max_vf = loop->safelen;
     449                 :          0 :           LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false;
     450                 :          0 :           return opt_result::success ();
     451                 :            :         }
     452                 :            : 
     453                 :         55 :       if (STMT_VINFO_GATHER_SCATTER_P (stmtinfo_a)
     454                 :         55 :           || STMT_VINFO_GATHER_SCATTER_P (stmtinfo_b))
     455                 :          0 :         return opt_result::failure_at
     456                 :          0 :           (stmtinfo_a->stmt,
     457                 :            :            "versioning for alias not supported for: "
     458                 :            :            "bad dist vector for %T and %T\n",
     459                 :          0 :            DR_REF (dra), DR_REF (drb));
     460                 :            : 
     461                 :         55 :       if (dump_enabled_p ())
     462                 :         34 :         dump_printf_loc (MSG_MISSED_OPTIMIZATION, stmtinfo_a->stmt,
     463                 :            :                          "versioning for alias required: "
     464                 :            :                          "bad dist vector for %T and %T\n",
     465                 :            :                          DR_REF (dra), DR_REF (drb));
     466                 :            :       /* Add to list of ddrs that need to be tested at run-time.  */
     467                 :         55 :       return vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
     468                 :            :     }
     469                 :            : 
     470                 :      15162 :   loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr));
     471                 :            : 
     472                 :      15162 :   if (DDR_COULD_BE_INDEPENDENT_P (ddr)
     473                 :      15162 :       && vect_analyze_possibly_independent_ddr (ddr, loop_vinfo,
     474                 :            :                                                 loop_depth, max_vf))
     475                 :        125 :     return opt_result::success ();
     476                 :            : 
     477                 :      28876 :   FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
     478                 :            :     {
     479                 :      15038 :       int dist = dist_v[loop_depth];
     480                 :            : 
     481                 :      15038 :       if (dump_enabled_p ())
     482                 :       1686 :         dump_printf_loc (MSG_NOTE, vect_location,
     483                 :            :                          "dependence distance  = %d.\n", dist);
     484                 :            : 
     485                 :      15038 :       if (dist == 0)
     486                 :            :         {
     487                 :      13686 :           if (dump_enabled_p ())
     488                 :       1506 :             dump_printf_loc (MSG_NOTE, vect_location,
     489                 :            :                              "dependence distance == 0 between %T and %T\n",
     490                 :            :                              DR_REF (dra), DR_REF (drb));
     491                 :            : 
     492                 :            :           /* When we perform grouped accesses and perform implicit CSE
     493                 :            :              by detecting equal accesses and doing disambiguation with
     494                 :            :              runtime alias tests like for
     495                 :            :                 .. = a[i];
     496                 :            :                 .. = a[i+1];
     497                 :            :                 a[i] = ..;
     498                 :            :                 a[i+1] = ..;
     499                 :            :                 *p = ..;
     500                 :            :                 .. = a[i];
     501                 :            :                 .. = a[i+1];
     502                 :            :              where we will end up loading { a[i], a[i+1] } once, make
     503                 :            :              sure that inserting group loads before the first load and
     504                 :            :              stores after the last store will do the right thing.
     505                 :            :              Similar for groups like
     506                 :            :                 a[i] = ...;
     507                 :            :                 ... = a[i];
     508                 :            :                 a[i+1] = ...;
     509                 :            :              where loads from the group interleave with the store.  */
     510                 :      13686 :           if (!vect_preserves_scalar_order_p (dr_info_a, dr_info_b))
     511                 :         34 :             return opt_result::failure_at (stmtinfo_a->stmt,
     512                 :            :                                            "READ_WRITE dependence"
     513                 :         34 :                                            " in interleaving.\n");
     514                 :            : 
     515                 :      13652 :           if (loop->safelen < 2)
     516                 :            :             {
     517                 :      12030 :               tree indicator = dr_zero_step_indicator (dra);
     518                 :      12030 :               if (!indicator || integer_zerop (indicator))
     519                 :          0 :                 return opt_result::failure_at (stmtinfo_a->stmt,
     520                 :          0 :                                                "access also has a zero step\n");
     521                 :      12030 :               else if (TREE_CODE (indicator) != INTEGER_CST)
     522                 :        967 :                 vect_check_nonzero_value (loop_vinfo, indicator);
     523                 :            :             }
     524                 :      13652 :           continue;
     525                 :            :         }
     526                 :            : 
     527                 :       1352 :       if (dist > 0 && DDR_REVERSED_P (ddr))
     528                 :            :         {
     529                 :            :           /* If DDR_REVERSED_P the order of the data-refs in DDR was
     530                 :            :              reversed (to make distance vector positive), and the actual
     531                 :            :              distance is negative.  */
     532                 :        101 :           if (dump_enabled_p ())
     533                 :         51 :             dump_printf_loc (MSG_NOTE, vect_location,
     534                 :            :                              "dependence distance negative.\n");
     535                 :            :           /* When doing outer loop vectorization, we need to check if there is
     536                 :            :              a backward dependence at the inner loop level if the dependence
     537                 :            :              at the outer loop is reversed.  See PR81740.  */
     538                 :        101 :           if (nested_in_vect_loop_p (loop, stmtinfo_a)
     539                 :         92 :               || nested_in_vect_loop_p (loop, stmtinfo_b))
     540                 :            :             {
     541                 :          9 :               unsigned inner_depth = index_in_loop_nest (loop->inner->num,
     542                 :          9 :                                                          DDR_LOOP_NEST (ddr));
     543                 :          9 :               if (dist_v[inner_depth] < 0)
     544                 :          6 :                 return opt_result::failure_at (stmtinfo_a->stmt,
     545                 :            :                                                "not vectorized, dependence "
     546                 :            :                                                "between data-refs %T and %T\n",
     547                 :          6 :                                                DR_REF (dra), DR_REF (drb));
     548                 :            :             }
     549                 :            :           /* Record a negative dependence distance to later limit the
     550                 :            :              amount of stmt copying / unrolling we can perform.
     551                 :            :              Only need to handle read-after-write dependence.  */
     552                 :         95 :           if (DR_IS_READ (drb)
     553                 :         21 :               && (STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) == 0
     554                 :          4 :                   || STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) > (unsigned)dist))
     555                 :         21 :             STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) = dist;
     556                 :         95 :           continue;
     557                 :            :         }
     558                 :            : 
     559                 :       1251 :       unsigned int abs_dist = abs (dist);
     560                 :       1251 :       if (abs_dist >= 2 && abs_dist < *max_vf)
     561                 :            :         {
     562                 :            :           /* The dependence distance requires reduction of the maximal
     563                 :            :              vectorization factor.  */
     564                 :         64 :           *max_vf = abs_dist;
     565                 :         64 :           if (dump_enabled_p ())
     566                 :         44 :             dump_printf_loc (MSG_NOTE, vect_location,
     567                 :            :                              "adjusting maximal vectorization factor to %i\n",
     568                 :            :                              *max_vf);
     569                 :            :         }
     570                 :            : 
     571                 :       1251 :       if (abs_dist >= *max_vf)
     572                 :            :         {
     573                 :            :           /* Dependence distance does not create dependence, as far as
     574                 :            :              vectorization is concerned, in this case.  */
     575                 :         92 :           if (dump_enabled_p ())
     576                 :         62 :             dump_printf_loc (MSG_NOTE, vect_location,
     577                 :            :                              "dependence distance >= VF.\n");
     578                 :         92 :           continue;
     579                 :            :         }
     580                 :            : 
     581                 :       1159 :       return opt_result::failure_at (stmtinfo_a->stmt,
     582                 :            :                                      "not vectorized, possible dependence "
     583                 :            :                                      "between data-refs %T and %T\n",
     584                 :       1159 :                                      DR_REF (dra), DR_REF (drb));
     585                 :            :     }
     586                 :            : 
     587                 :      13838 :   return opt_result::success ();
     588                 :            : }
     589                 :            : 
     590                 :            : /* Function vect_analyze_data_ref_dependences.
     591                 :            : 
     592                 :            :    Examine all the data references in the loop, and make sure there do not
     593                 :            :    exist any data dependences between them.  Set *MAX_VF according to
     594                 :            :    the maximum vectorization factor the data dependences allow.  */
     595                 :            : 
     596                 :            : opt_result
     597                 :      36380 : vect_analyze_data_ref_dependences (loop_vec_info loop_vinfo,
     598                 :            :                                    unsigned int *max_vf)
     599                 :            : {
     600                 :      36380 :   unsigned int i;
     601                 :      36380 :   struct data_dependence_relation *ddr;
     602                 :            : 
     603                 :      36380 :   DUMP_VECT_SCOPE ("vect_analyze_data_ref_dependences");
     604                 :            : 
     605                 :      36380 :   if (!LOOP_VINFO_DDRS (loop_vinfo).exists ())
     606                 :            :     {
     607                 :      24355 :       LOOP_VINFO_DDRS (loop_vinfo)
     608                 :      46727 :         .create (LOOP_VINFO_DATAREFS (loop_vinfo).length ()
     609                 :      24355 :                  * LOOP_VINFO_DATAREFS (loop_vinfo).length ());
     610                 :            :       /* We need read-read dependences to compute
     611                 :            :          STMT_VINFO_SAME_ALIGN_REFS.  */
     612                 :      48710 :       bool res = compute_all_dependences (LOOP_VINFO_DATAREFS (loop_vinfo),
     613                 :            :                                           &LOOP_VINFO_DDRS (loop_vinfo),
     614                 :      24355 :                                           LOOP_VINFO_LOOP_NEST (loop_vinfo),
     615                 :            :                                           true);
     616                 :      24355 :       gcc_assert (res);
     617                 :            :     }
     618                 :            : 
     619                 :      36380 :   LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = true;
     620                 :            : 
     621                 :            :   /* For epilogues we either have no aliases or alias versioning
     622                 :            :      was applied to original loop.  Therefore we may just get max_vf
     623                 :            :      using VF of original loop.  */
     624                 :      36380 :   if (LOOP_VINFO_EPILOGUE_P (loop_vinfo))
     625                 :       6362 :     *max_vf = LOOP_VINFO_ORIG_MAX_VECT_FACTOR (loop_vinfo);
     626                 :            :   else
     627                 :     648334 :     FOR_EACH_VEC_ELT (LOOP_VINFO_DDRS (loop_vinfo), i, ddr)
     628                 :            :       {
     629                 :     619570 :         opt_result res
     630                 :     619570 :           = vect_analyze_data_ref_dependence (ddr, loop_vinfo, max_vf);
     631                 :     619570 :         if (!res)
     632                 :       1254 :           return res;
     633                 :            :       }
     634                 :            : 
     635                 :      35126 :   return opt_result::success ();
     636                 :            : }
     637                 :            : 
     638                 :            : 
     639                 :            : /* Function vect_slp_analyze_data_ref_dependence.
     640                 :            : 
     641                 :            :    Return TRUE if there (might) exist a dependence between a memory-reference
     642                 :            :    DRA and a memory-reference DRB for VINFO.  When versioning for alias
     643                 :            :    may check a dependence at run-time, return FALSE.  Adjust *MAX_VF
     644                 :            :    according to the data dependence.  */
     645                 :            : 
     646                 :            : static bool
     647                 :     741052 : vect_slp_analyze_data_ref_dependence (vec_info *vinfo,
     648                 :            :                                       struct data_dependence_relation *ddr)
     649                 :            : {
     650                 :     741052 :   struct data_reference *dra = DDR_A (ddr);
     651                 :     741052 :   struct data_reference *drb = DDR_B (ddr);
     652                 :     741052 :   dr_vec_info *dr_info_a = vinfo->lookup_dr (dra);
     653                 :     741052 :   dr_vec_info *dr_info_b = vinfo->lookup_dr (drb);
     654                 :            : 
     655                 :            :   /* We need to check dependences of statements marked as unvectorizable
     656                 :            :      as well, they still can prohibit vectorization.  */
     657                 :            : 
     658                 :            :   /* Independent data accesses.  */
     659                 :     741052 :   if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
     660                 :            :     return false;
     661                 :            : 
     662                 :     203216 :   if (dra == drb)
     663                 :            :     return false;
     664                 :            : 
     665                 :            :   /* Read-read is OK.  */
     666                 :      81628 :   if (DR_IS_READ (dra) && DR_IS_READ (drb))
     667                 :            :     return false;
     668                 :            : 
     669                 :            :   /* If dra and drb are part of the same interleaving chain consider
     670                 :            :      them independent.  */
     671                 :      81628 :   if (STMT_VINFO_GROUPED_ACCESS (dr_info_a->stmt)
     672                 :      81628 :       && (DR_GROUP_FIRST_ELEMENT (dr_info_a->stmt)
     673                 :      81628 :           == DR_GROUP_FIRST_ELEMENT (dr_info_b->stmt)))
     674                 :            :     return false;
     675                 :            : 
     676                 :            :   /* Unknown data dependence.  */
     677                 :       4253 :   if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
     678                 :            :     {
     679                 :       4253 :       if  (dump_enabled_p ())
     680                 :          4 :         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
     681                 :            :                          "can't determine dependence between %T and %T\n",
     682                 :            :                          DR_REF (dra), DR_REF (drb));
     683                 :            :     }
     684                 :          0 :   else if (dump_enabled_p ())
     685                 :          0 :     dump_printf_loc (MSG_NOTE, vect_location,
     686                 :            :                      "determined dependence between %T and %T\n",
     687                 :            :                      DR_REF (dra), DR_REF (drb));
     688                 :            : 
     689                 :            :   return true;
     690                 :            : }
     691                 :            : 
     692                 :            : 
     693                 :            : /* Analyze dependences involved in the transform of SLP NODE.  STORES
     694                 :            :    contain the vector of scalar stores of this instance if we are
     695                 :            :    disambiguating the loads.  */
     696                 :            : 
     697                 :            : static bool
     698                 :      69838 : vect_slp_analyze_node_dependences (slp_instance instance, slp_tree node,
     699                 :            :                                    vec<stmt_vec_info> stores,
     700                 :            :                                    stmt_vec_info last_store_info)
     701                 :            : {
     702                 :            :   /* This walks over all stmts involved in the SLP load/store done
     703                 :            :      in NODE verifying we can sink them up to the last stmt in the
     704                 :            :      group.  */
     705                 :      69838 :   stmt_vec_info last_access_info = vect_find_last_scalar_stmt_in_slp (node);
     706                 :      69838 :   vec_info *vinfo = last_access_info->vinfo;
     707                 :     259860 :   for (unsigned k = 0; k < SLP_INSTANCE_GROUP_SIZE (instance); ++k)
     708                 :            :     {
     709                 :     194277 :       stmt_vec_info access_info = SLP_TREE_SCALAR_STMTS (node)[k];
     710                 :     194277 :       if (access_info == last_access_info)
     711                 :      66036 :         continue;
     712                 :     128241 :       data_reference *dr_a = STMT_VINFO_DATA_REF (access_info);
     713                 :     128241 :       ao_ref ref;
     714                 :     128241 :       bool ref_initialized_p = false;
     715                 :     128241 :       for (gimple_stmt_iterator gsi = gsi_for_stmt (access_info->stmt);
     716                 :    1197800 :            gsi_stmt (gsi) != last_access_info->stmt; gsi_next (&gsi))
     717                 :            :         {
     718                 :    1073810 :           gimple *stmt = gsi_stmt (gsi);
     719                 :    1073810 :           if (! gimple_vuse (stmt)
     720                 :    1014940 :               || (DR_IS_READ (dr_a) && ! gimple_vdef (stmt)))
     721                 :     311212 :             continue;
     722                 :            : 
     723                 :            :           /* If we couldn't record a (single) data reference for this
     724                 :            :              stmt we have to resort to the alias oracle.  */
     725                 :     762598 :           stmt_vec_info stmt_info = vinfo->lookup_stmt (stmt);
     726                 :     762598 :           data_reference *dr_b = STMT_VINFO_DATA_REF (stmt_info);
     727                 :     762598 :           if (!dr_b)
     728                 :            :             {
     729                 :            :               /* We are moving a store or sinking a load - this means
     730                 :            :                  we cannot use TBAA for disambiguation.  */
     731                 :        229 :               if (!ref_initialized_p)
     732                 :        229 :                 ao_ref_init (&ref, DR_REF (dr_a));
     733                 :        229 :               if (stmt_may_clobber_ref_p_1 (stmt, &ref, false)
     734                 :        229 :                   || ref_maybe_used_by_stmt_p (stmt, &ref, false))
     735                 :       4255 :                 return false;
     736                 :        227 :               continue;
     737                 :            :             }
     738                 :            : 
     739                 :     762369 :           bool dependent = false;
     740                 :            :           /* If we run into a store of this same instance (we've just
     741                 :            :              marked those) then delay dependence checking until we run
     742                 :            :              into the last store because this is where it will have
     743                 :            :              been sunk to (and we verify if we can do that as well).  */
     744                 :     762369 :           if (gimple_visited_p (stmt))
     745                 :            :             {
     746                 :      21317 :               if (stmt_info != last_store_info)
     747                 :      21317 :                 continue;
     748                 :            :               unsigned i;
     749                 :            :               stmt_vec_info store_info;
     750                 :          0 :               FOR_EACH_VEC_ELT (stores, i, store_info)
     751                 :            :                 {
     752                 :          0 :                   data_reference *store_dr = STMT_VINFO_DATA_REF (store_info);
     753                 :          0 :                   ddr_p ddr = initialize_data_dependence_relation
     754                 :          0 :                                 (dr_a, store_dr, vNULL);
     755                 :          0 :                   dependent
     756                 :          0 :                     = vect_slp_analyze_data_ref_dependence (vinfo, ddr);
     757                 :          0 :                   free_dependence_relation (ddr);
     758                 :          0 :                   if (dependent)
     759                 :            :                     break;
     760                 :            :                 }
     761                 :            :             }
     762                 :            :           else
     763                 :            :             {
     764                 :     741052 :               ddr_p ddr = initialize_data_dependence_relation (dr_a,
     765                 :            :                                                                dr_b, vNULL);
     766                 :     741052 :               dependent = vect_slp_analyze_data_ref_dependence (vinfo, ddr);
     767                 :     741052 :               free_dependence_relation (ddr);
     768                 :            :             }
     769                 :     741052 :           if (dependent)
     770                 :            :             return false;
     771                 :            :         }
     772                 :            :     }
     773                 :            :   return true;
     774                 :            : }
     775                 :            : 
     776                 :            : 
     777                 :            : /* Function vect_analyze_data_ref_dependences.
     778                 :            : 
     779                 :            :    Examine all the data references in the basic-block, and make sure there
     780                 :            :    do not exist any data dependences between them.  Set *MAX_VF according to
     781                 :            :    the maximum vectorization factor the data dependences allow.  */
     782                 :            : 
     783                 :            : bool
     784                 :      65013 : vect_slp_analyze_instance_dependence (slp_instance instance)
     785                 :            : {
     786                 :      65013 :   DUMP_VECT_SCOPE ("vect_slp_analyze_instance_dependence");
     787                 :            : 
     788                 :            :   /* The stores of this instance are at the root of the SLP tree.  */
     789                 :      65013 :   slp_tree store = SLP_INSTANCE_TREE (instance);
     790                 :      65013 :   if (! STMT_VINFO_DATA_REF (SLP_TREE_SCALAR_STMTS (store)[0]))
     791                 :            :     store = NULL;
     792                 :            : 
     793                 :            :   /* Verify we can sink stores to the vectorized stmt insert location.  */
     794                 :      64869 :   stmt_vec_info last_store_info = NULL;
     795                 :      64869 :   if (store)
     796                 :            :     {
     797                 :      64869 :       if (! vect_slp_analyze_node_dependences (instance, store, vNULL, NULL))
     798                 :            :         return false;
     799                 :            : 
     800                 :            :       /* Mark stores in this instance and remember the last one.  */
     801                 :      60680 :       last_store_info = vect_find_last_scalar_stmt_in_slp (store);
     802                 :     239003 :       for (unsigned k = 0; k < SLP_INSTANCE_GROUP_SIZE (instance); ++k)
     803                 :     178323 :         gimple_set_visited (SLP_TREE_SCALAR_STMTS (store)[k]->stmt, true);
     804                 :            :     }
     805                 :            : 
     806                 :      60824 :   bool res = true;
     807                 :            : 
     808                 :            :   /* Verify we can sink loads to the vectorized stmt insert location,
     809                 :            :      special-casing stores of this instance.  */
     810                 :      60824 :   slp_tree load;
     811                 :      60824 :   unsigned int i;
     812                 :      65727 :   FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), i, load)
     813                 :       4969 :     if (! vect_slp_analyze_node_dependences (instance, load,
     814                 :            :                                              store
     815                 :            :                                              ? SLP_TREE_SCALAR_STMTS (store)
     816                 :            :                                              : vNULL, last_store_info))
     817                 :            :       {
     818                 :            :         res = false;
     819                 :            :         break;
     820                 :            :       }
     821                 :            : 
     822                 :            :   /* Unset the visited flag.  */
     823                 :      60824 :   if (store)
     824                 :     239003 :     for (unsigned k = 0; k < SLP_INSTANCE_GROUP_SIZE (instance); ++k)
     825                 :     178323 :       gimple_set_visited (SLP_TREE_SCALAR_STMTS (store)[k]->stmt, false);
     826                 :            : 
     827                 :            :   return res;
     828                 :            : }
     829                 :            : 
     830                 :            : /* Record the base alignment guarantee given by DRB, which occurs
     831                 :            :    in STMT_INFO.  */
     832                 :            : 
     833                 :            : static void
     834                 :     428566 : vect_record_base_alignment (stmt_vec_info stmt_info,
     835                 :            :                             innermost_loop_behavior *drb)
     836                 :            : {
     837                 :     428566 :   vec_info *vinfo = stmt_info->vinfo;
     838                 :     428566 :   bool existed;
     839                 :     428566 :   innermost_loop_behavior *&entry
     840                 :     428566 :     = vinfo->base_alignments.get_or_insert (drb->base_address, &existed);
     841                 :     428566 :   if (!existed || entry->base_alignment < drb->base_alignment)
     842                 :            :     {
     843                 :     129781 :       entry = drb;
     844                 :     129781 :       if (dump_enabled_p ())
     845                 :      20383 :         dump_printf_loc (MSG_NOTE, vect_location,
     846                 :            :                          "recording new base alignment for %T\n"
     847                 :            :                          "  alignment:    %d\n"
     848                 :            :                          "  misalignment: %d\n"
     849                 :            :                          "  based on:     %G",
     850                 :            :                          drb->base_address,
     851                 :            :                          drb->base_alignment,
     852                 :            :                          drb->base_misalignment,
     853                 :            :                          stmt_info->stmt);
     854                 :            :     }
     855                 :     428566 : }
     856                 :            : 
     857                 :            : /* If the region we're going to vectorize is reached, all unconditional
     858                 :            :    data references occur at least once.  We can therefore pool the base
     859                 :            :    alignment guarantees from each unconditional reference.  Do this by
     860                 :            :    going through all the data references in VINFO and checking whether
     861                 :            :    the containing statement makes the reference unconditionally.  If so,
     862                 :            :    record the alignment of the base address in VINFO so that it can be
     863                 :            :    used for all other references with the same base.  */
     864                 :            : 
     865                 :            : void
     866                 :      76183 : vect_record_base_alignments (vec_info *vinfo)
     867                 :            : {
     868                 :      76183 :   loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
     869                 :      33894 :   class loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
     870                 :      76183 :   data_reference *dr;
     871                 :      76183 :   unsigned int i;
     872                 :     653504 :   FOR_EACH_VEC_ELT (vinfo->shared->datarefs, i, dr)
     873                 :            :     {
     874                 :     577321 :       dr_vec_info *dr_info = vinfo->lookup_dr (dr);
     875                 :     577321 :       stmt_vec_info stmt_info = dr_info->stmt;
     876                 :     577321 :       if (!DR_IS_CONDITIONAL_IN_STMT (dr)
     877                 :     576602 :           && STMT_VINFO_VECTORIZABLE (stmt_info)
     878                 :     428277 :           && !STMT_VINFO_GATHER_SCATTER_P (stmt_info))
     879                 :            :         {
     880                 :     428121 :           vect_record_base_alignment (stmt_info, &DR_INNERMOST (dr));
     881                 :            : 
     882                 :            :           /* If DR is nested in the loop that is being vectorized, we can also
     883                 :            :              record the alignment of the base wrt the outer loop.  */
     884                 :     428121 :           if (loop && nested_in_vect_loop_p (loop, stmt_info))
     885                 :        445 :             vect_record_base_alignment
     886                 :        445 :               (stmt_info, &STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info));
     887                 :            :         }
     888                 :            :     }
     889                 :      76183 : }
     890                 :            : 
     891                 :            : /* Return the target alignment for the vectorized form of DR_INFO.  */
     892                 :            : 
     893                 :            : static poly_uint64
     894                 :     222190 : vect_calculate_target_alignment (dr_vec_info *dr_info)
     895                 :            : {
     896                 :     222190 :   tree vectype = STMT_VINFO_VECTYPE (dr_info->stmt);
     897                 :          0 :   return targetm.vectorize.preferred_vector_alignment (vectype);
     898                 :            : }
     899                 :            : 
     900                 :            : /* Function vect_compute_data_ref_alignment
     901                 :            : 
     902                 :            :    Compute the misalignment of the data reference DR_INFO.
     903                 :            : 
     904                 :            :    Output:
     905                 :            :    1. DR_MISALIGNMENT (DR_INFO) is defined.
     906                 :            : 
     907                 :            :    FOR NOW: No analysis is actually performed. Misalignment is calculated
     908                 :            :    only for trivial cases. TODO.  */
     909                 :            : 
     910                 :            : static void
     911                 :     189899 : vect_compute_data_ref_alignment (dr_vec_info *dr_info)
     912                 :            : {
     913                 :     189899 :   stmt_vec_info stmt_info = dr_info->stmt;
     914                 :     189899 :   vec_base_alignments *base_alignments = &stmt_info->vinfo->base_alignments;
     915                 :     189899 :   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
     916                 :     189899 :   class loop *loop = NULL;
     917                 :     189899 :   tree ref = DR_REF (dr_info->dr);
     918                 :     189899 :   tree vectype = STMT_VINFO_VECTYPE (stmt_info);
     919                 :            : 
     920                 :     189899 :   if (dump_enabled_p ())
     921                 :      37239 :     dump_printf_loc (MSG_NOTE, vect_location,
     922                 :            :                      "vect_compute_data_ref_alignment:\n");
     923                 :            : 
     924                 :     189899 :   if (loop_vinfo)
     925                 :     114035 :     loop = LOOP_VINFO_LOOP (loop_vinfo);
     926                 :            : 
     927                 :            :   /* Initialize misalignment to unknown.  */
     928                 :     189899 :   SET_DR_MISALIGNMENT (dr_info, DR_MISALIGNMENT_UNKNOWN);
     929                 :            : 
     930                 :     189899 :   if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
     931                 :            :     return;
     932                 :            : 
     933                 :     189556 :   innermost_loop_behavior *drb = vect_dr_behavior (dr_info);
     934                 :     189556 :   bool step_preserves_misalignment_p;
     935                 :            : 
     936                 :     189556 :   poly_uint64 vector_alignment
     937                 :     189556 :     = exact_div (vect_calculate_target_alignment (dr_info), BITS_PER_UNIT);
     938                 :     189556 :   DR_TARGET_ALIGNMENT (dr_info) = vector_alignment;
     939                 :            : 
     940                 :            :   /* If the main loop has peeled for alignment we have no way of knowing
     941                 :            :      whether the data accesses in the epilogues are aligned.  We can't at
     942                 :            :      compile time answer the question whether we have entered the main loop or
     943                 :            :      not.  Fixes PR 92351.  */
     944                 :     189556 :   if (loop_vinfo)
     945                 :            :     {
     946                 :     113692 :       loop_vec_info orig_loop_vinfo = LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo);
     947                 :     113692 :       if (orig_loop_vinfo
     948                 :      19674 :           && LOOP_VINFO_PEELING_FOR_ALIGNMENT (orig_loop_vinfo) != 0)
     949                 :            :         return;
     950                 :            :     }
     951                 :            : 
     952                 :     189390 :   unsigned HOST_WIDE_INT vect_align_c;
     953                 :     189390 :   if (!vector_alignment.is_constant (&vect_align_c))
     954                 :            :     return;
     955                 :            : 
     956                 :            :   /* No step for BB vectorization.  */
     957                 :     189390 :   if (!loop)
     958                 :            :     {
     959                 :      75864 :       gcc_assert (integer_zerop (drb->step));
     960                 :            :       step_preserves_misalignment_p = true;
     961                 :            :     }
     962                 :            : 
     963                 :            :   /* In case the dataref is in an inner-loop of the loop that is being
     964                 :            :      vectorized (LOOP), we use the base and misalignment information
     965                 :            :      relative to the outer-loop (LOOP).  This is ok only if the misalignment
     966                 :            :      stays the same throughout the execution of the inner-loop, which is why
     967                 :            :      we have to check that the stride of the dataref in the inner-loop evenly
     968                 :            :      divides by the vector alignment.  */
     969                 :     113526 :   else if (nested_in_vect_loop_p (loop, stmt_info))
     970                 :            :     {
     971                 :        447 :       step_preserves_misalignment_p
     972                 :        447 :         = (DR_STEP_ALIGNMENT (dr_info->dr) % vect_align_c) == 0;
     973                 :            : 
     974                 :        447 :       if (dump_enabled_p ())
     975                 :            :         {
     976                 :        399 :           if (step_preserves_misalignment_p)
     977                 :        292 :             dump_printf_loc (MSG_NOTE, vect_location,
     978                 :            :                              "inner step divides the vector alignment.\n");
     979                 :            :           else
     980                 :        107 :             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
     981                 :            :                              "inner step doesn't divide the vector"
     982                 :            :                              " alignment.\n");
     983                 :            :         }
     984                 :            :     }
     985                 :            : 
     986                 :            :   /* Similarly we can only use base and misalignment information relative to
     987                 :            :      an innermost loop if the misalignment stays the same throughout the
     988                 :            :      execution of the loop.  As above, this is the case if the stride of
     989                 :            :      the dataref evenly divides by the alignment.  */
     990                 :            :   else
     991                 :            :     {
     992                 :     113079 :       poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
     993                 :     113079 :       step_preserves_misalignment_p
     994                 :     113079 :         = multiple_p (DR_STEP_ALIGNMENT (dr_info->dr) * vf, vect_align_c);
     995                 :            : 
     996                 :     113079 :       if (!step_preserves_misalignment_p && dump_enabled_p ())
     997                 :        946 :         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
     998                 :            :                          "step doesn't divide the vector alignment.\n");
     999                 :            :     }
    1000                 :            : 
    1001                 :     189390 :   unsigned int base_alignment = drb->base_alignment;
    1002                 :     189390 :   unsigned int base_misalignment = drb->base_misalignment;
    1003                 :            : 
    1004                 :            :   /* Calculate the maximum of the pooled base address alignment and the
    1005                 :            :      alignment that we can compute for DR itself.  */
    1006                 :     189390 :   innermost_loop_behavior **entry = base_alignments->get (drb->base_address);
    1007                 :     189390 :   if (entry && base_alignment < (*entry)->base_alignment)
    1008                 :            :     {
    1009                 :        106 :       base_alignment = (*entry)->base_alignment;
    1010                 :        106 :       base_misalignment = (*entry)->base_misalignment;
    1011                 :            :     }
    1012                 :            : 
    1013                 :     189390 :   if (drb->offset_alignment < vect_align_c
    1014                 :     177161 :       || !step_preserves_misalignment_p
    1015                 :            :       /* We need to know whether the step wrt the vectorized loop is
    1016                 :            :          negative when computing the starting misalignment below.  */
    1017                 :     175289 :       || TREE_CODE (drb->step) != INTEGER_CST)
    1018                 :            :     {
    1019                 :      20018 :       if (dump_enabled_p ())
    1020                 :       3403 :         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    1021                 :            :                          "Unknown alignment for access: %T\n", ref);
    1022                 :      20018 :       return;
    1023                 :            :     }
    1024                 :            : 
    1025                 :     169372 :   if (base_alignment < vect_align_c)
    1026                 :            :     {
    1027                 :     102635 :       unsigned int max_alignment;
    1028                 :     102635 :       tree base = get_base_for_alignment (drb->base_address, &max_alignment);
    1029                 :     102635 :       if (max_alignment < vect_align_c
    1030                 :     102635 :           || !vect_can_force_dr_alignment_p (base,
    1031                 :     102633 :                                              vect_align_c * BITS_PER_UNIT))
    1032                 :            :         {
    1033                 :      60928 :           if (dump_enabled_p ())
    1034                 :      10228 :             dump_printf_loc (MSG_NOTE, vect_location,
    1035                 :            :                              "can't force alignment of ref: %T\n", ref);
    1036                 :      60928 :           return;
    1037                 :            :         }
    1038                 :            : 
    1039                 :            :       /* Force the alignment of the decl.
    1040                 :            :          NOTE: This is the only change to the code we make during
    1041                 :            :          the analysis phase, before deciding to vectorize the loop.  */
    1042                 :      41707 :       if (dump_enabled_p ())
    1043                 :      10566 :         dump_printf_loc (MSG_NOTE, vect_location,
    1044                 :            :                          "force alignment of %T\n", ref);
    1045                 :            : 
    1046                 :      41707 :       dr_info->base_decl = base;
    1047                 :      41707 :       dr_info->base_misaligned = true;
    1048                 :      41707 :       base_misalignment = 0;
    1049                 :            :     }
    1050                 :     108444 :   poly_int64 misalignment
    1051                 :     108444 :     = base_misalignment + wi::to_poly_offset (drb->init).force_shwi ();
    1052                 :            : 
    1053                 :            :   /* If this is a backward running DR then first access in the larger
    1054                 :            :      vectype actually is N-1 elements before the address in the DR.
    1055                 :            :      Adjust misalign accordingly.  */
    1056                 :     108444 :   if (tree_int_cst_sgn (drb->step) < 0)
    1057                 :            :     /* PLUS because STEP is negative.  */
    1058                 :        341 :     misalignment += ((TYPE_VECTOR_SUBPARTS (vectype) - 1)
    1059                 :        341 :                      * TREE_INT_CST_LOW (drb->step));
    1060                 :            : 
    1061                 :     108444 :   unsigned int const_misalignment;
    1062                 :     108444 :   if (!known_misalignment (misalignment, vect_align_c, &const_misalignment))
    1063                 :            :     {
    1064                 :            :       if (dump_enabled_p ())
    1065                 :            :         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    1066                 :            :                          "Non-constant misalignment for access: %T\n", ref);
    1067                 :            :       return;
    1068                 :            :     }
    1069                 :            : 
    1070                 :     108444 :   SET_DR_MISALIGNMENT (dr_info, const_misalignment);
    1071                 :            : 
    1072                 :     108444 :   if (dump_enabled_p ())
    1073                 :      23322 :     dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    1074                 :            :                      "misalign = %d bytes of ref %T\n",
    1075                 :            :                      DR_MISALIGNMENT (dr_info), ref);
    1076                 :            : 
    1077                 :            :   return;
    1078                 :            : }
    1079                 :            : 
    1080                 :            : /* Function vect_update_misalignment_for_peel.
    1081                 :            :    Sets DR_INFO's misalignment
    1082                 :            :    - to 0 if it has the same alignment as DR_PEEL_INFO,
    1083                 :            :    - to the misalignment computed using NPEEL if DR_INFO's salignment is known,
    1084                 :            :    - to -1 (unknown) otherwise.
    1085                 :            : 
    1086                 :            :    DR_INFO - the data reference whose misalignment is to be adjusted.
    1087                 :            :    DR_PEEL_INFO - the data reference whose misalignment is being made
    1088                 :            :                   zero in the vector loop by the peel.
    1089                 :            :    NPEEL - the number of iterations in the peel loop if the misalignment
    1090                 :            :            of DR_PEEL_INFO is known at compile time.  */
    1091                 :            : 
    1092                 :            : static void
    1093                 :      31723 : vect_update_misalignment_for_peel (dr_vec_info *dr_info,
    1094                 :            :                                    dr_vec_info *dr_peel_info, int npeel)
    1095                 :            : {
    1096                 :      31723 :   unsigned int i;
    1097                 :      31723 :   vec<dr_p> same_aligned_drs;
    1098                 :      31723 :   struct data_reference *current_dr;
    1099                 :      31723 :   stmt_vec_info peel_stmt_info = dr_peel_info->stmt;
    1100                 :            : 
    1101                 :            :   /* It can be assumed that if dr_info has the same alignment as dr_peel,
    1102                 :            :      it is aligned in the vector loop.  */
    1103                 :      31723 :   same_aligned_drs = STMT_VINFO_SAME_ALIGN_REFS (peel_stmt_info);
    1104                 :      39380 :   FOR_EACH_VEC_ELT (same_aligned_drs, i, current_dr)
    1105                 :            :     {
    1106                 :      13115 :       if (current_dr != dr_info->dr)
    1107                 :       7657 :         continue;
    1108                 :       5577 :       gcc_assert (!known_alignment_for_access_p (dr_info)
    1109                 :            :                   || !known_alignment_for_access_p (dr_peel_info)
    1110                 :            :                   || (DR_MISALIGNMENT (dr_info)
    1111                 :            :                       == DR_MISALIGNMENT (dr_peel_info)));
    1112                 :       5458 :       SET_DR_MISALIGNMENT (dr_info, 0);
    1113                 :      31723 :       return;
    1114                 :            :     }
    1115                 :            : 
    1116                 :      26265 :   unsigned HOST_WIDE_INT alignment;
    1117                 :      26265 :   if (DR_TARGET_ALIGNMENT (dr_info).is_constant (&alignment)
    1118                 :      26265 :       && known_alignment_for_access_p (dr_info)
    1119                 :       9327 :       && known_alignment_for_access_p (dr_peel_info))
    1120                 :            :     {
    1121                 :       3034 :       int misal = DR_MISALIGNMENT (dr_info);
    1122                 :       3034 :       misal += npeel * TREE_INT_CST_LOW (DR_STEP (dr_info->dr));
    1123                 :       3034 :       misal &= alignment - 1;
    1124                 :       3034 :       SET_DR_MISALIGNMENT (dr_info, misal);
    1125                 :       3034 :       return;
    1126                 :            :     }
    1127                 :            : 
    1128                 :      23231 :   if (dump_enabled_p ())
    1129                 :       9488 :     dump_printf_loc (MSG_NOTE, vect_location, "Setting misalignment " \
    1130                 :            :                      "to unknown (-1).\n");
    1131                 :      23231 :   SET_DR_MISALIGNMENT (dr_info, DR_MISALIGNMENT_UNKNOWN);
    1132                 :            : }
    1133                 :            : 
    1134                 :            : 
    1135                 :            : /* Function verify_data_ref_alignment
    1136                 :            : 
    1137                 :            :    Return TRUE if DR_INFO can be handled with respect to alignment.  */
    1138                 :            : 
    1139                 :            : static opt_result
    1140                 :     153046 : verify_data_ref_alignment (dr_vec_info *dr_info)
    1141                 :            : {
    1142                 :     153046 :   enum dr_alignment_support supportable_dr_alignment
    1143                 :     153046 :     = vect_supportable_dr_alignment (dr_info, false);
    1144                 :     153046 :   if (!supportable_dr_alignment)
    1145                 :       4981 :     return opt_result::failure_at
    1146                 :       4981 :       (dr_info->stmt->stmt,
    1147                 :       4981 :        DR_IS_READ (dr_info->dr)
    1148                 :            :         ? "not vectorized: unsupported unaligned load: %T\n"
    1149                 :            :         : "not vectorized: unsupported unaligned store: %T\n",
    1150                 :       9697 :        DR_REF (dr_info->dr));
    1151                 :            : 
    1152                 :     148065 :   if (supportable_dr_alignment != dr_aligned && dump_enabled_p ())
    1153                 :       8519 :     dump_printf_loc (MSG_NOTE, vect_location,
    1154                 :            :                      "Vectorizing an unaligned access.\n");
    1155                 :            : 
    1156                 :     148065 :   return opt_result::success ();
    1157                 :            : }
    1158                 :            : 
    1159                 :            : /* Function vect_verify_datarefs_alignment
    1160                 :            : 
    1161                 :            :    Return TRUE if all data references in the loop can be
    1162                 :            :    handled with respect to alignment.  */
    1163                 :            : 
    1164                 :            : opt_result
    1165                 :      33664 : vect_verify_datarefs_alignment (loop_vec_info vinfo)
    1166                 :            : {
    1167                 :      33664 :   vec<data_reference_p> datarefs = vinfo->shared->datarefs;
    1168                 :      33664 :   struct data_reference *dr;
    1169                 :      33664 :   unsigned int i;
    1170                 :            : 
    1171                 :     145327 :   FOR_EACH_VEC_ELT (datarefs, i, dr)
    1172                 :            :     {
    1173                 :     111663 :       dr_vec_info *dr_info = vinfo->lookup_dr (dr);
    1174                 :     111663 :       stmt_vec_info stmt_info = dr_info->stmt;
    1175                 :            : 
    1176                 :     111663 :       if (!STMT_VINFO_RELEVANT_P (stmt_info))
    1177                 :      34132 :         continue;
    1178                 :            : 
    1179                 :            :       /* For interleaving, only the alignment of the first access matters.   */
    1180                 :     111651 :       if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
    1181                 :     151455 :           && DR_GROUP_FIRST_ELEMENT (stmt_info) != stmt_info)
    1182                 :      27944 :         continue;
    1183                 :            : 
    1184                 :            :       /* Strided accesses perform only component accesses, alignment is
    1185                 :            :          irrelevant for them.  */
    1186                 :      89883 :       if (STMT_VINFO_STRIDED_P (stmt_info)
    1187                 :      83707 :           && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
    1188                 :       6176 :         continue;
    1189                 :            : 
    1190                 :      77531 :       opt_result res = verify_data_ref_alignment (dr_info);
    1191                 :      77531 :       if (!res)
    1192                 :          0 :         return res;
    1193                 :            :     }
    1194                 :            : 
    1195                 :      33664 :   return opt_result::success ();
    1196                 :            : }
    1197                 :            : 
    1198                 :            : /* Given an memory reference EXP return whether its alignment is less
    1199                 :            :    than its size.  */
    1200                 :            : 
    1201                 :            : static bool
    1202                 :     255926 : not_size_aligned (tree exp)
    1203                 :            : {
    1204                 :     255926 :   if (!tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (exp))))
    1205                 :            :     return true;
    1206                 :            : 
    1207                 :     255926 :   return (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (exp)))
    1208                 :     255926 :           > get_object_alignment (exp));
    1209                 :            : }
    1210                 :            : 
    1211                 :            : /* Function vector_alignment_reachable_p
    1212                 :            : 
    1213                 :            :    Return true if vector alignment for DR_INFO is reachable by peeling
    1214                 :            :    a few loop iterations.  Return false otherwise.  */
    1215                 :            : 
    1216                 :            : static bool
    1217                 :      56279 : vector_alignment_reachable_p (dr_vec_info *dr_info)
    1218                 :            : {
    1219                 :      56279 :   stmt_vec_info stmt_info = dr_info->stmt;
    1220                 :      56279 :   tree vectype = STMT_VINFO_VECTYPE (stmt_info);
    1221                 :            : 
    1222                 :      56279 :   if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
    1223                 :            :     {
    1224                 :            :       /* For interleaved access we peel only if number of iterations in
    1225                 :            :          the prolog loop ({VF - misalignment}), is a multiple of the
    1226                 :            :          number of the interleaved accesses.  */
    1227                 :       5814 :       int elem_size, mis_in_elements;
    1228                 :            : 
    1229                 :            :       /* FORNOW: handle only known alignment.  */
    1230                 :       5814 :       if (!known_alignment_for_access_p (dr_info))
    1231                 :       4440 :         return false;
    1232                 :            : 
    1233                 :       3015 :       poly_uint64 nelements = TYPE_VECTOR_SUBPARTS (vectype);
    1234                 :       6030 :       poly_uint64 vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
    1235                 :       3015 :       elem_size = vector_element_size (vector_size, nelements);
    1236                 :       3015 :       mis_in_elements = DR_MISALIGNMENT (dr_info) / elem_size;
    1237                 :            : 
    1238                 :       4389 :       if (!multiple_p (nelements - mis_in_elements, DR_GROUP_SIZE (stmt_info)))
    1239                 :            :         return false;
    1240                 :            :     }
    1241                 :            : 
    1242                 :            :   /* If misalignment is known at the compile time then allow peeling
    1243                 :            :      only if natural alignment is reachable through peeling.  */
    1244                 :      51839 :   if (known_alignment_for_access_p (dr_info) && !aligned_access_p (dr_info))
    1245                 :            :     {
    1246                 :       1429 :       HOST_WIDE_INT elmsize =
    1247                 :       1429 :                 int_cst_value (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
    1248                 :       1429 :       if (dump_enabled_p ())
    1249                 :            :         {
    1250                 :        447 :           dump_printf_loc (MSG_NOTE, vect_location,
    1251                 :            :                            "data size = %wd. misalignment = %d.\n", elmsize,
    1252                 :            :                            DR_MISALIGNMENT (dr_info));
    1253                 :            :         }
    1254                 :       1429 :       if (DR_MISALIGNMENT (dr_info) % elmsize)
    1255                 :            :         {
    1256                 :          7 :           if (dump_enabled_p ())
    1257                 :          7 :             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    1258                 :            :                              "data size does not divide the misalignment.\n");
    1259                 :          7 :           return false;
    1260                 :            :         }
    1261                 :            :     }
    1262                 :            : 
    1263                 :      51832 :   if (!known_alignment_for_access_p (dr_info))
    1264                 :            :     {
    1265                 :      22762 :       tree type = TREE_TYPE (DR_REF (dr_info->dr));
    1266                 :      22762 :       bool is_packed = not_size_aligned (DR_REF (dr_info->dr));
    1267                 :      22762 :       if (dump_enabled_p ())
    1268                 :       8978 :         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    1269                 :            :                          "Unknown misalignment, %snaturally aligned\n",
    1270                 :            :                          is_packed ? "not " : "");
    1271                 :      22762 :       return targetm.vectorize.vector_alignment_reachable (type, is_packed);
    1272                 :            :     }
    1273                 :            : 
    1274                 :            :   return true;
    1275                 :            : }
    1276                 :            : 
    1277                 :            : 
    1278                 :            : /* Calculate the cost of the memory access represented by DR_INFO.  */
    1279                 :            : 
    1280                 :            : static void
    1281                 :      79483 : vect_get_data_access_cost (dr_vec_info *dr_info,
    1282                 :            :                            unsigned int *inside_cost,
    1283                 :            :                            unsigned int *outside_cost,
    1284                 :            :                            stmt_vector_for_cost *body_cost_vec,
    1285                 :            :                            stmt_vector_for_cost *prologue_cost_vec)
    1286                 :            : {
    1287                 :      79483 :   stmt_vec_info stmt_info = dr_info->stmt;
    1288                 :      79483 :   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
    1289                 :      79483 :   int ncopies;
    1290                 :            : 
    1291                 :      79483 :   if (PURE_SLP_STMT (stmt_info))
    1292                 :            :     ncopies = 1;
    1293                 :            :   else
    1294                 :      79311 :     ncopies = vect_get_num_copies (loop_vinfo, STMT_VINFO_VECTYPE (stmt_info));
    1295                 :            : 
    1296                 :      79483 :   if (DR_IS_READ (dr_info->dr))
    1297                 :      47306 :     vect_get_load_cost (stmt_info, ncopies, true, inside_cost, outside_cost,
    1298                 :            :                         prologue_cost_vec, body_cost_vec, false);
    1299                 :            :   else
    1300                 :      32177 :     vect_get_store_cost (stmt_info, ncopies, inside_cost, body_cost_vec);
    1301                 :            : 
    1302                 :      79483 :   if (dump_enabled_p ())
    1303                 :      17338 :     dump_printf_loc (MSG_NOTE, vect_location,
    1304                 :            :                      "vect_get_data_access_cost: inside_cost = %d, "
    1305                 :            :                      "outside_cost = %d.\n", *inside_cost, *outside_cost);
    1306                 :      79483 : }
    1307                 :            : 
    1308                 :            : 
    1309                 :            : typedef struct _vect_peel_info
    1310                 :            : {
    1311                 :            :   dr_vec_info *dr_info;
    1312                 :            :   int npeel;
    1313                 :            :   unsigned int count;
    1314                 :            : } *vect_peel_info;
    1315                 :            : 
    1316                 :            : typedef struct _vect_peel_extended_info
    1317                 :            : {
    1318                 :            :   struct _vect_peel_info peel_info;
    1319                 :            :   unsigned int inside_cost;
    1320                 :            :   unsigned int outside_cost;
    1321                 :            : } *vect_peel_extended_info;
    1322                 :            : 
    1323                 :            : 
    1324                 :            : /* Peeling hashtable helpers.  */
    1325                 :            : 
    1326                 :            : struct peel_info_hasher : free_ptr_hash <_vect_peel_info>
    1327                 :            : {
    1328                 :            :   static inline hashval_t hash (const _vect_peel_info *);
    1329                 :            :   static inline bool equal (const _vect_peel_info *, const _vect_peel_info *);
    1330                 :            : };
    1331                 :            : 
    1332                 :            : inline hashval_t
    1333                 :      97164 : peel_info_hasher::hash (const _vect_peel_info *peel_info)
    1334                 :            : {
    1335                 :      97164 :   return (hashval_t) peel_info->npeel;
    1336                 :            : }
    1337                 :            : 
    1338                 :            : inline bool
    1339                 :      53124 : peel_info_hasher::equal (const _vect_peel_info *a, const _vect_peel_info *b)
    1340                 :            : {
    1341                 :      53124 :   return (a->npeel == b->npeel);
    1342                 :            : }
    1343                 :            : 
    1344                 :            : 
    1345                 :            : /* Insert DR_INFO into peeling hash table with NPEEL as key.  */
    1346                 :            : 
    1347                 :            : static void
    1348                 :      55522 : vect_peeling_hash_insert (hash_table<peel_info_hasher> *peeling_htab,
    1349                 :            :                           loop_vec_info loop_vinfo, dr_vec_info *dr_info,
    1350                 :            :                           int npeel)
    1351                 :            : {
    1352                 :      55522 :   struct _vect_peel_info elem, *slot;
    1353                 :      55522 :   _vect_peel_info **new_slot;
    1354                 :      55522 :   bool supportable_dr_alignment
    1355                 :      55522 :     = vect_supportable_dr_alignment (dr_info, true);
    1356                 :            : 
    1357                 :      55522 :   elem.npeel = npeel;
    1358                 :      55522 :   slot = peeling_htab->find (&elem);
    1359                 :      55522 :   if (slot)
    1360                 :      33460 :     slot->count++;
    1361                 :            :   else
    1362                 :            :     {
    1363                 :      22062 :       slot = XNEW (struct _vect_peel_info);
    1364                 :      22062 :       slot->npeel = npeel;
    1365                 :      22062 :       slot->dr_info = dr_info;
    1366                 :      22062 :       slot->count = 1;
    1367                 :      22062 :       new_slot = peeling_htab->find_slot (slot, INSERT);
    1368                 :      22062 :       *new_slot = slot;
    1369                 :            :     }
    1370                 :            : 
    1371                 :      55522 :   if (!supportable_dr_alignment
    1372                 :      55528 :       && unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
    1373                 :          6 :     slot->count += VECT_MAX_COST;
    1374                 :      55522 : }
    1375                 :            : 
    1376                 :            : 
    1377                 :            : /* Traverse peeling hash table to find peeling option that aligns maximum
    1378                 :            :    number of data accesses.  */
    1379                 :            : 
    1380                 :            : int
    1381                 :      16361 : vect_peeling_hash_get_most_frequent (_vect_peel_info **slot,
    1382                 :            :                                      _vect_peel_extended_info *max)
    1383                 :            : {
    1384                 :      16361 :   vect_peel_info elem = *slot;
    1385                 :            : 
    1386                 :      16361 :   if (elem->count > max->peel_info.count
    1387                 :       9692 :       || (elem->count == max->peel_info.count
    1388                 :       7084 :           && max->peel_info.npeel > elem->npeel))
    1389                 :            :     {
    1390                 :       6673 :       max->peel_info.npeel = elem->npeel;
    1391                 :       6673 :       max->peel_info.count = elem->count;
    1392                 :       6673 :       max->peel_info.dr_info = elem->dr_info;
    1393                 :            :     }
    1394                 :            : 
    1395                 :      16361 :   return 1;
    1396                 :            : }
    1397                 :            : 
    1398                 :            : /* Get the costs of peeling NPEEL iterations for LOOP_VINFO, checking
    1399                 :            :    data access costs for all data refs.  If UNKNOWN_MISALIGNMENT is true,
    1400                 :            :    we assume DR0_INFO's misalignment will be zero after peeling.  */
    1401                 :            : 
    1402                 :            : static void
    1403                 :      29986 : vect_get_peeling_costs_all_drs (loop_vec_info loop_vinfo,
    1404                 :            :                                 dr_vec_info *dr0_info,
    1405                 :            :                                 unsigned int *inside_cost,
    1406                 :            :                                 unsigned int *outside_cost,
    1407                 :            :                                 stmt_vector_for_cost *body_cost_vec,
    1408                 :            :                                 stmt_vector_for_cost *prologue_cost_vec,
    1409                 :            :                                 unsigned int npeel,
    1410                 :            :                                 bool unknown_misalignment)
    1411                 :            : {
    1412                 :      29986 :   vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
    1413                 :      29986 :   unsigned i;
    1414                 :      29986 :   data_reference *dr;
    1415                 :            : 
    1416                 :     112481 :   FOR_EACH_VEC_ELT (datarefs, i, dr)
    1417                 :            :     {
    1418                 :      82495 :       dr_vec_info *dr_info = loop_vinfo->lookup_dr (dr);
    1419                 :      82495 :       stmt_vec_info stmt_info = dr_info->stmt;
    1420                 :      82495 :       if (!STMT_VINFO_RELEVANT_P (stmt_info))
    1421                 :         24 :         continue;
    1422                 :            : 
    1423                 :            :       /* For interleaving, only the alignment of the first access
    1424                 :            :          matters.  */
    1425                 :      82471 :       if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
    1426                 :      84082 :           && DR_GROUP_FIRST_ELEMENT (stmt_info) != stmt_info)
    1427                 :       1085 :         continue;
    1428                 :            : 
    1429                 :            :       /* Strided accesses perform only component accesses, alignment is
    1430                 :            :          irrelevant for them.  */
    1431                 :      83289 :       if (STMT_VINFO_STRIDED_P (stmt_info)
    1432                 :      81386 :           && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
    1433                 :       1903 :         continue;
    1434                 :            : 
    1435                 :      79483 :       int save_misalignment;
    1436                 :      79483 :       save_misalignment = DR_MISALIGNMENT (dr_info);
    1437                 :      79483 :       if (npeel == 0)
    1438                 :            :         ;
    1439                 :      47604 :       else if (unknown_misalignment && dr_info == dr0_info)
    1440                 :      16334 :         SET_DR_MISALIGNMENT (dr_info, 0);
    1441                 :            :       else
    1442                 :      31270 :         vect_update_misalignment_for_peel (dr_info, dr0_info, npeel);
    1443                 :      79483 :       vect_get_data_access_cost (dr_info, inside_cost, outside_cost,
    1444                 :            :                                  body_cost_vec, prologue_cost_vec);
    1445                 :      82495 :       SET_DR_MISALIGNMENT (dr_info, save_misalignment);
    1446                 :            :     }
    1447                 :      29986 : }
    1448                 :            : 
    1449                 :            : /* Traverse peeling hash table and calculate cost for each peeling option.
    1450                 :            :    Find the one with the lowest cost.  */
    1451                 :            : 
    1452                 :            : int
    1453                 :       4748 : vect_peeling_hash_get_lowest_cost (_vect_peel_info **slot,
    1454                 :            :                                    _vect_peel_extended_info *min)
    1455                 :            : {
    1456                 :       4748 :   vect_peel_info elem = *slot;
    1457                 :       4748 :   int dummy;
    1458                 :       4748 :   unsigned int inside_cost = 0, outside_cost = 0;
    1459                 :       4748 :   stmt_vec_info stmt_info = elem->dr_info->stmt;
    1460                 :       4748 :   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
    1461                 :       4748 :   stmt_vector_for_cost prologue_cost_vec, body_cost_vec,
    1462                 :            :                        epilogue_cost_vec;
    1463                 :            : 
    1464                 :       4748 :   prologue_cost_vec.create (2);
    1465                 :       4748 :   body_cost_vec.create (2);
    1466                 :       4748 :   epilogue_cost_vec.create (2);
    1467                 :            : 
    1468                 :       4748 :   vect_get_peeling_costs_all_drs (loop_vinfo, elem->dr_info, &inside_cost,
    1469                 :            :                                   &outside_cost, &body_cost_vec,
    1470                 :       4748 :                                   &prologue_cost_vec, elem->npeel, false);
    1471                 :            : 
    1472                 :       4748 :   body_cost_vec.release ();
    1473                 :            : 
    1474                 :       9496 :   outside_cost += vect_get_known_peeling_cost
    1475                 :       4748 :     (loop_vinfo, elem->npeel, &dummy,
    1476                 :            :      &LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
    1477                 :            :      &prologue_cost_vec, &epilogue_cost_vec);
    1478                 :            : 
    1479                 :            :   /* Prologue and epilogue costs are added to the target model later.
    1480                 :            :      These costs depend only on the scalar iteration cost, the
    1481                 :            :      number of peeling iterations finally chosen, and the number of
    1482                 :            :      misaligned statements.  So discard the information found here.  */
    1483                 :       4748 :   prologue_cost_vec.release ();
    1484                 :       4748 :   epilogue_cost_vec.release ();
    1485                 :            : 
    1486                 :       4748 :   if (inside_cost < min->inside_cost
    1487                 :        223 :       || (inside_cost == min->inside_cost
    1488                 :        221 :           && outside_cost < min->outside_cost))
    1489                 :            :     {
    1490                 :       4526 :       min->inside_cost = inside_cost;
    1491                 :       4526 :       min->outside_cost = outside_cost;
    1492                 :       4526 :       min->peel_info.dr_info = elem->dr_info;
    1493                 :       4526 :       min->peel_info.npeel = elem->npeel;
    1494                 :       4526 :       min->peel_info.count = elem->count;
    1495                 :            :     }
    1496                 :            : 
    1497                 :       4748 :   return 1;
    1498                 :            : }
    1499                 :            : 
    1500                 :            : 
    1501                 :            : /* Choose best peeling option by traversing peeling hash table and either
    1502                 :            :    choosing an option with the lowest cost (if cost model is enabled) or the
    1503                 :            :    option that aligns as many accesses as possible.  */
    1504                 :            : 
    1505                 :            : static struct _vect_peel_extended_info
    1506                 :      11159 : vect_peeling_hash_choose_best_peeling (hash_table<peel_info_hasher> *peeling_htab,
    1507                 :            :                                        loop_vec_info loop_vinfo)
    1508                 :            : {
    1509                 :      11159 :    struct _vect_peel_extended_info res;
    1510                 :            : 
    1511                 :      11159 :    res.peel_info.dr_info = NULL;
    1512                 :            : 
    1513                 :      22318 :    if (!unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
    1514                 :            :      {
    1515                 :       4525 :        res.inside_cost = INT_MAX;
    1516                 :       4525 :        res.outside_cost = INT_MAX;
    1517                 :       4525 :        peeling_htab->traverse <_vect_peel_extended_info *,
    1518                 :       4525 :                                vect_peeling_hash_get_lowest_cost> (&res);
    1519                 :            :      }
    1520                 :            :    else
    1521                 :            :      {
    1522                 :       6634 :        res.peel_info.count = 0;
    1523                 :       6634 :        peeling_htab->traverse <_vect_peel_extended_info *,
    1524                 :       6634 :                                vect_peeling_hash_get_most_frequent> (&res);
    1525                 :       6634 :        res.inside_cost = 0;
    1526                 :       6634 :        res.outside_cost = 0;
    1527                 :            :      }
    1528                 :            : 
    1529                 :      11159 :    return res;
    1530                 :            : }
    1531                 :            : 
    1532                 :            : /* Return true if the new peeling NPEEL is supported.  */
    1533                 :            : 
    1534                 :            : static bool
    1535                 :        212 : vect_peeling_supportable (loop_vec_info loop_vinfo, dr_vec_info *dr0_info,
    1536                 :            :                           unsigned npeel)
    1537                 :            : {
    1538                 :        212 :   unsigned i;
    1539                 :        212 :   struct data_reference *dr = NULL;
    1540                 :        212 :   vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
    1541                 :        212 :   enum dr_alignment_support supportable_dr_alignment;
    1542                 :            : 
    1543                 :            :   /* Ensure that all data refs can be vectorized after the peel.  */
    1544                 :        668 :   FOR_EACH_VEC_ELT (datarefs, i, dr)
    1545                 :            :     {
    1546                 :        456 :       int save_misalignment;
    1547                 :            : 
    1548                 :        456 :       if (dr == dr0_info->dr)
    1549                 :        212 :         continue;
    1550                 :            : 
    1551                 :        244 :       dr_vec_info *dr_info = loop_vinfo->lookup_dr (dr);
    1552                 :        244 :       stmt_vec_info stmt_info = dr_info->stmt;
    1553                 :            :       /* For interleaving, only the alignment of the first access
    1554                 :            :          matters.  */
    1555                 :        244 :       if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
    1556                 :        263 :           && DR_GROUP_FIRST_ELEMENT (stmt_info) != stmt_info)
    1557                 :         19 :         continue;
    1558                 :            : 
    1559                 :            :       /* Strided accesses perform only component accesses, alignment is
    1560                 :            :          irrelevant for them.  */
    1561                 :        228 :       if (STMT_VINFO_STRIDED_P (stmt_info)
    1562                 :        225 :           && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
    1563                 :          3 :         continue;
    1564                 :            : 
    1565                 :        222 :       save_misalignment = DR_MISALIGNMENT (dr_info);
    1566                 :        222 :       vect_update_misalignment_for_peel (dr_info, dr0_info, npeel);
    1567                 :        222 :       supportable_dr_alignment
    1568                 :        222 :         = vect_supportable_dr_alignment (dr_info, false);
    1569                 :        222 :       SET_DR_MISALIGNMENT (dr_info, save_misalignment);
    1570                 :            : 
    1571                 :        222 :       if (!supportable_dr_alignment)
    1572                 :            :         return false;
    1573                 :            :     }
    1574                 :            : 
    1575                 :            :   return true;
    1576                 :            : }
    1577                 :            : 
    1578                 :            : /* Function vect_enhance_data_refs_alignment
    1579                 :            : 
    1580                 :            :    This pass will use loop versioning and loop peeling in order to enhance
    1581                 :            :    the alignment of data references in the loop.
    1582                 :            : 
    1583                 :            :    FOR NOW: we assume that whatever versioning/peeling takes place, only the
    1584                 :            :    original loop is to be vectorized.  Any other loops that are created by
    1585                 :            :    the transformations performed in this pass - are not supposed to be
    1586                 :            :    vectorized.  This restriction will be relaxed.
    1587                 :            : 
    1588                 :            :    This pass will require a cost model to guide it whether to apply peeling
    1589                 :            :    or versioning or a combination of the two.  For example, the scheme that
    1590                 :            :    intel uses when given a loop with several memory accesses, is as follows:
    1591                 :            :    choose one memory access ('p') which alignment you want to force by doing
    1592                 :            :    peeling.  Then, either (1) generate a loop in which 'p' is aligned and all
    1593                 :            :    other accesses are not necessarily aligned, or (2) use loop versioning to
    1594                 :            :    generate one loop in which all accesses are aligned, and another loop in
    1595                 :            :    which only 'p' is necessarily aligned.
    1596                 :            : 
    1597                 :            :    ("Automatic Intra-Register Vectorization for the Intel Architecture",
    1598                 :            :    Aart J.C. Bik, Milind Girkar, Paul M. Grey and Ximmin Tian, International
    1599                 :            :    Journal of Parallel Programming, Vol. 30, No. 2, April 2002.)
    1600                 :            : 
    1601                 :            :    Devising a cost model is the most critical aspect of this work.  It will
    1602                 :            :    guide us on which access to peel for, whether to use loop versioning, how
    1603                 :            :    many versions to create, etc.  The cost model will probably consist of
    1604                 :            :    generic considerations as well as target specific considerations (on
    1605                 :            :    powerpc for example, misaligned stores are more painful than misaligned
    1606                 :            :    loads).
    1607                 :            : 
    1608                 :            :    Here are the general steps involved in alignment enhancements:
    1609                 :            : 
    1610                 :            :      -- original loop, before alignment analysis:
    1611                 :            :         for (i=0; i<N; i++){
    1612                 :            :           x = q[i];                     # DR_MISALIGNMENT(q) = unknown
    1613                 :            :           p[i] = y;                     # DR_MISALIGNMENT(p) = unknown
    1614                 :            :         }
    1615                 :            : 
    1616                 :            :      -- After vect_compute_data_refs_alignment:
    1617                 :            :         for (i=0; i<N; i++){
    1618                 :            :           x = q[i];                     # DR_MISALIGNMENT(q) = 3
    1619                 :            :           p[i] = y;                     # DR_MISALIGNMENT(p) = unknown
    1620                 :            :         }
    1621                 :            : 
    1622                 :            :      -- Possibility 1: we do loop versioning:
    1623                 :            :      if (p is aligned) {
    1624                 :            :         for (i=0; i<N; i++){ # loop 1A
    1625                 :            :           x = q[i];                     # DR_MISALIGNMENT(q) = 3
    1626                 :            :           p[i] = y;                     # DR_MISALIGNMENT(p) = 0
    1627                 :            :         }
    1628                 :            :      }
    1629                 :            :      else {
    1630                 :            :         for (i=0; i<N; i++){ # loop 1B
    1631                 :            :           x = q[i];                     # DR_MISALIGNMENT(q) = 3
    1632                 :            :           p[i] = y;                     # DR_MISALIGNMENT(p) = unaligned
    1633                 :            :         }
    1634                 :            :      }
    1635                 :            : 
    1636                 :            :      -- Possibility 2: we do loop peeling:
    1637                 :            :      for (i = 0; i < 3; i++){        # (scalar loop, not to be vectorized).
    1638                 :            :         x = q[i];
    1639                 :            :         p[i] = y;
    1640                 :            :      }
    1641                 :            :      for (i = 3; i < N; i++){        # loop 2A
    1642                 :            :         x = q[i];                       # DR_MISALIGNMENT(q) = 0
    1643                 :            :         p[i] = y;                       # DR_MISALIGNMENT(p) = unknown
    1644                 :            :      }
    1645                 :            : 
    1646                 :            :      -- Possibility 3: combination of loop peeling and versioning:
    1647                 :            :      for (i = 0; i < 3; i++){        # (scalar loop, not to be vectorized).
    1648                 :            :         x = q[i];
    1649                 :            :         p[i] = y;
    1650                 :            :      }
    1651                 :            :      if (p is aligned) {
    1652                 :            :         for (i = 3; i<N; i++){       # loop 3A
    1653                 :            :           x = q[i];                     # DR_MISALIGNMENT(q) = 0
    1654                 :            :           p[i] = y;                     # DR_MISALIGNMENT(p) = 0
    1655                 :            :         }
    1656                 :            :      }
    1657                 :            :      else {
    1658                 :            :         for (i = 3; i<N; i++){       # loop 3B
    1659                 :            :           x = q[i];                     # DR_MISALIGNMENT(q) = 0
    1660                 :            :           p[i] = y;                     # DR_MISALIGNMENT(p) = unaligned
    1661                 :            :         }
    1662                 :            :      }
    1663                 :            : 
    1664                 :            :      These loops are later passed to loop_transform to be vectorized.  The
    1665                 :            :      vectorizer will use the alignment information to guide the transformation
    1666                 :            :      (whether to generate regular loads/stores, or with special handling for
    1667                 :            :      misalignment).  */
    1668                 :            : 
    1669                 :            : opt_result
    1670                 :      27309 : vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
    1671                 :            : {
    1672                 :      27309 :   vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
    1673                 :      27309 :   class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
    1674                 :      27309 :   enum dr_alignment_support supportable_dr_alignment;
    1675                 :      27309 :   dr_vec_info *first_store = NULL;
    1676                 :      27309 :   dr_vec_info *dr0_info = NULL;
    1677                 :      27309 :   struct data_reference *dr;
    1678                 :      27309 :   unsigned int i, j;
    1679                 :      27309 :   bool do_peeling = false;
    1680                 :      27309 :   bool do_versioning = false;
    1681                 :      27309 :   unsigned int npeel = 0;
    1682                 :      27309 :   bool one_misalignment_known = false;
    1683                 :      27309 :   bool one_misalignment_unknown = false;
    1684                 :      27309 :   bool one_dr_unsupportable = false;
    1685                 :      27309 :   dr_vec_info *unsupportable_dr_info = NULL;
    1686                 :      27309 :   poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
    1687                 :      27309 :   unsigned possible_npeel_number = 1;
    1688                 :      27309 :   tree vectype;
    1689                 :      27309 :   unsigned int mis, same_align_drs_max = 0;
    1690                 :      54618 :   hash_table<peel_info_hasher> peeling_htab (1);
    1691                 :            : 
    1692                 :      27309 :   DUMP_VECT_SCOPE ("vect_enhance_data_refs_alignment");
    1693                 :            : 
    1694                 :            :   /* Reset data so we can safely be called multiple times.  */
    1695                 :      27309 :   LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).truncate (0);
    1696                 :      27309 :   LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) = 0;
    1697                 :            : 
    1698                 :            :   /* While cost model enhancements are expected in the future, the high level
    1699                 :            :      view of the code at this time is as follows:
    1700                 :            : 
    1701                 :            :      A) If there is a misaligned access then see if peeling to align
    1702                 :            :         this access can make all data references satisfy
    1703                 :            :         vect_supportable_dr_alignment.  If so, update data structures
    1704                 :            :         as needed and return true.
    1705                 :            : 
    1706                 :            :      B) If peeling wasn't possible and there is a data reference with an
    1707                 :            :         unknown misalignment that does not satisfy vect_supportable_dr_alignment
    1708                 :            :         then see if loop versioning checks can be used to make all data
    1709                 :            :         references satisfy vect_supportable_dr_alignment.  If so, update
    1710                 :            :         data structures as needed and return true.
    1711                 :            : 
    1712                 :            :      C) If neither peeling nor versioning were successful then return false if
    1713                 :            :         any data reference does not satisfy vect_supportable_dr_alignment.
    1714                 :            : 
    1715                 :            :      D) Return true (all data references satisfy vect_supportable_dr_alignment).
    1716                 :            : 
    1717                 :            :      Note, Possibility 3 above (which is peeling and versioning together) is not
    1718                 :            :      being done at this time.  */
    1719                 :            : 
    1720                 :            :   /* (1) Peeling to force alignment.  */
    1721                 :            : 
    1722                 :            :   /* (1.1) Decide whether to perform peeling, and how many iterations to peel:
    1723                 :            :      Considerations:
    1724                 :            :      + How many accesses will become aligned due to the peeling
    1725                 :            :      - How many accesses will become unaligned due to the peeling,
    1726                 :            :        and the cost of misaligned accesses.
    1727                 :            :      - The cost of peeling (the extra runtime checks, the increase
    1728                 :            :        in code size).  */
    1729                 :            : 
    1730                 :      97885 :   FOR_EACH_VEC_ELT (datarefs, i, dr)
    1731                 :            :     {
    1732                 :      73761 :       dr_vec_info *dr_info = loop_vinfo->lookup_dr (dr);
    1733                 :      73761 :       stmt_vec_info stmt_info = dr_info->stmt;
    1734                 :            : 
    1735                 :      73761 :       if (!STMT_VINFO_RELEVANT_P (stmt_info))
    1736                 :         12 :         continue;
    1737                 :            : 
    1738                 :            :       /* For interleaving, only the alignment of the first access
    1739                 :            :          matters.  */
    1740                 :      73749 :       if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
    1741                 :      90968 :           && DR_GROUP_FIRST_ELEMENT (stmt_info) != stmt_info)
    1742                 :      11405 :         continue;
    1743                 :            : 
    1744                 :            :       /* For scatter-gather or invariant accesses there is nothing
    1745                 :            :          to enhance.  */
    1746                 :      63206 :       if (STMT_VINFO_GATHER_SCATTER_P (stmt_info)
    1747                 :      62344 :           || integer_zerop (DR_STEP (dr)))
    1748                 :        862 :         continue;
    1749                 :            : 
    1750                 :            :       /* Strided accesses perform only component accesses, alignment is
    1751                 :            :          irrelevant for them.  */
    1752                 :      66685 :       if (STMT_VINFO_STRIDED_P (stmt_info)
    1753                 :      61482 :           && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
    1754                 :       5203 :         continue;
    1755                 :            : 
    1756                 :      56279 :       supportable_dr_alignment = vect_supportable_dr_alignment (dr_info, true);
    1757                 :      56279 :       do_peeling = vector_alignment_reachable_p (dr_info);
    1758                 :      56279 :       if (do_peeling)
    1759                 :            :         {
    1760                 :      51628 :           if (known_alignment_for_access_p (dr_info))
    1761                 :            :             {
    1762                 :      29070 :               unsigned int npeel_tmp = 0;
    1763                 :      29070 :               bool negative = tree_int_cst_compare (DR_STEP (dr),
    1764                 :      29070 :                                                     size_zero_node) < 0;
    1765                 :            : 
    1766                 :      29070 :               vectype = STMT_VINFO_VECTYPE (stmt_info);
    1767                 :            :               /* If known_alignment_for_access_p then we have set
    1768                 :            :                  DR_MISALIGNMENT which is only done if we know it at compiler
    1769                 :            :                  time, so it is safe to assume target alignment is constant.
    1770                 :            :                */
    1771                 :      29070 :               unsigned int target_align =
    1772                 :      29070 :                 DR_TARGET_ALIGNMENT (dr_info).to_constant ();
    1773                 :      29070 :               unsigned int dr_size = vect_get_scalar_dr_size (dr_info);
    1774                 :      58140 :               mis = (negative
    1775                 :      29070 :                      ? DR_MISALIGNMENT (dr_info)
    1776                 :      28854 :                      : -DR_MISALIGNMENT (dr_info));
    1777                 :      29070 :               if (DR_MISALIGNMENT (dr_info) != 0)
    1778                 :       1422 :                 npeel_tmp = (mis & (target_align - 1)) / dr_size;
    1779                 :            : 
    1780                 :            :               /* For multiple types, it is possible that the bigger type access
    1781                 :            :                  will have more than one peeling option.  E.g., a loop with two
    1782                 :            :                  types: one of size (vector size / 4), and the other one of
    1783                 :            :                  size (vector size / 8).  Vectorization factor will 8.  If both
    1784                 :            :                  accesses are misaligned by 3, the first one needs one scalar
    1785                 :            :                  iteration to be aligned, and the second one needs 5.  But the
    1786                 :            :                  first one will be aligned also by peeling 5 scalar
    1787                 :            :                  iterations, and in that case both accesses will be aligned.
    1788                 :            :                  Hence, except for the immediate peeling amount, we also want
    1789                 :            :                  to try to add full vector size, while we don't exceed
    1790                 :            :                  vectorization factor.
    1791                 :            :                  We do this automatically for cost model, since we calculate
    1792                 :            :                  cost for every peeling option.  */
    1793                 :      58140 :               if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
    1794                 :            :                 {
    1795                 :      18831 :                   poly_uint64 nscalars = (STMT_SLP_TYPE (stmt_info)
    1796                 :      18831 :                                           ? vf * DR_GROUP_SIZE (stmt_info) : vf);
    1797                 :      18831 :                   possible_npeel_number
    1798                 :      18831 :                     = vect_get_num_vectors (nscalars, vectype);
    1799                 :            : 
    1800                 :            :                   /* NPEEL_TMP is 0 when there is no misalignment, but also
    1801                 :            :                      allow peeling NELEMENTS.  */
    1802                 :      18831 :                   if (DR_MISALIGNMENT (dr_info) == 0)
    1803                 :      18389 :                     possible_npeel_number++;
    1804                 :            :                 }
    1805                 :            : 
    1806                 :            :               /* Save info about DR in the hash table.  Also include peeling
    1807                 :            :                  amounts according to the explanation above.  */
    1808                 :      84592 :               for (j = 0; j < possible_npeel_number; j++)
    1809                 :            :                 {
    1810                 :      55522 :                   vect_peeling_hash_insert (&peeling_htab, loop_vinfo,
    1811                 :            :                                             dr_info, npeel_tmp);
    1812                 :      55522 :                   npeel_tmp += target_align / dr_size;
    1813                 :            :                 }
    1814                 :            : 
    1815                 :            :               one_misalignment_known = true;
    1816                 :            :             }
    1817                 :            :           else
    1818                 :            :             {
    1819                 :            :               /* If we don't know any misalignment values, we prefer
    1820                 :            :                  peeling for data-ref that has the maximum number of data-refs
    1821                 :            :                  with the same alignment, unless the target prefers to align
    1822                 :            :                  stores over load.  */
    1823                 :      22558 :               unsigned same_align_drs
    1824                 :      22558 :                 = STMT_VINFO_SAME_ALIGN_REFS (stmt_info).length ();
    1825                 :      22558 :               if (!dr0_info
    1826                 :      22558 :                   || same_align_drs_max < same_align_drs)
    1827                 :            :                 {
    1828                 :            :                   same_align_drs_max = same_align_drs;
    1829                 :            :                   dr0_info = dr_info;
    1830                 :            :                 }
    1831                 :            :               /* For data-refs with the same number of related
    1832                 :            :                  accesses prefer the one where the misalign
    1833                 :            :                  computation will be invariant in the outermost loop.  */
    1834                 :      12584 :               else if (same_align_drs_max == same_align_drs)
    1835                 :            :                 {
    1836                 :      12000 :                   class loop *ivloop0, *ivloop;
    1837                 :      12000 :                   ivloop0 = outermost_invariant_loop_for_expr
    1838                 :      12000 :                     (loop, DR_BASE_ADDRESS (dr0_info->dr));
    1839                 :      12000 :                   ivloop = outermost_invariant_loop_for_expr
    1840                 :      12000 :                     (loop, DR_BASE_ADDRESS (dr));
    1841                 :      12000 :                   if ((ivloop && !ivloop0)
    1842                 :      12000 :                       || (ivloop && ivloop0
    1843                 :      12000 :                           && flow_loop_nested_p (ivloop, ivloop0)))
    1844                 :            :                     dr0_info = dr_info;
    1845                 :            :                 }
    1846                 :            : 
    1847                 :      22558 :               one_misalignment_unknown = true;
    1848                 :            : 
    1849                 :            :               /* Check for data refs with unsupportable alignment that
    1850                 :            :                  can be peeled.  */
    1851                 :      22558 :               if (!supportable_dr_alignment)
    1852                 :            :               {
    1853                 :          0 :                 one_dr_unsupportable = true;
    1854                 :          0 :                 unsupportable_dr_info = dr_info;
    1855                 :            :               }
    1856                 :            : 
    1857                 :      22558 :               if (!first_store && DR_IS_WRITE (dr))
    1858                 :       6774 :                 first_store = dr_info;
    1859                 :            :             }
    1860                 :            :         }
    1861                 :            :       else
    1862                 :            :         {
    1863                 :       4651 :           if (!aligned_access_p (dr_info))
    1864                 :            :             {
    1865                 :       3185 :               if (dump_enabled_p ())
    1866                 :        809 :                 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    1867                 :            :                                  "vector alignment may not be reachable\n");
    1868                 :            :               break;
    1869                 :            :             }
    1870                 :            :         }
    1871                 :            :     }
    1872                 :            : 
    1873                 :            :   /* Check if we can possibly peel the loop.  */
    1874                 :      27309 :   if (!vect_can_advance_ivs_p (loop_vinfo)
    1875                 :      27233 :       || !slpeel_can_duplicate_loop_p (loop, single_exit (loop))
    1876                 :      54542 :       || loop->inner)
    1877                 :            :     do_peeling = false;
    1878                 :            : 
    1879                 :      27309 :   struct _vect_peel_extended_info peel_for_known_alignment;
    1880                 :      27309 :   struct _vect_peel_extended_info peel_for_unknown_alignment;
    1881                 :      27309 :   struct _vect_peel_extended_info best_peel;
    1882                 :            : 
    1883                 :      27309 :   peel_for_unknown_alignment.inside_cost = INT_MAX;
    1884                 :      27309 :   peel_for_unknown_alignment.outside_cost = INT_MAX;
    1885                 :      27309 :   peel_for_unknown_alignment.peel_info.count = 0;
    1886                 :            : 
    1887                 :      27309 :   if (do_peeling
    1888                 :      27309 :       && one_misalignment_unknown)
    1889                 :            :     {
    1890                 :            :       /* Check if the target requires to prefer stores over loads, i.e., if
    1891                 :            :          misaligned stores are more expensive than misaligned loads (taking
    1892                 :            :          drs with same alignment into account).  */
    1893                 :       9624 :       unsigned int load_inside_cost = 0;
    1894                 :       9624 :       unsigned int load_outside_cost = 0;
    1895                 :       9624 :       unsigned int store_inside_cost = 0;
    1896                 :       9624 :       unsigned int store_outside_cost = 0;
    1897                 :       9624 :       unsigned int estimated_npeels = vect_vf_for_cost (loop_vinfo) / 2;
    1898                 :            : 
    1899                 :       9624 :       stmt_vector_for_cost dummy;
    1900                 :       9624 :       dummy.create (2);
    1901                 :       9624 :       vect_get_peeling_costs_all_drs (loop_vinfo, dr0_info,
    1902                 :            :                                       &load_inside_cost,
    1903                 :            :                                       &load_outside_cost,
    1904                 :            :                                       &dummy, &dummy, estimated_npeels, true);
    1905                 :       9624 :       dummy.release ();
    1906                 :            : 
    1907                 :       9624 :       if (first_store)
    1908                 :            :         {
    1909                 :       6710 :           dummy.create (2);
    1910                 :       6710 :           vect_get_peeling_costs_all_drs (loop_vinfo, first_store,
    1911                 :            :                                           &store_inside_cost,
    1912                 :            :                                           &store_outside_cost,
    1913                 :            :                                           &dummy, &dummy,
    1914                 :            :                                           estimated_npeels, true);
    1915                 :       6710 :           dummy.release ();
    1916                 :            :         }
    1917                 :            :       else
    1918                 :            :         {
    1919                 :       2914 :           store_inside_cost = INT_MAX;
    1920                 :       2914 :           store_outside_cost = INT_MAX;
    1921                 :            :         }
    1922                 :            : 
    1923                 :       9624 :       if (load_inside_cost > store_inside_cost
    1924                 :       9624 :           || (load_inside_cost == store_inside_cost
    1925                 :       6707 :               && load_outside_cost > store_outside_cost))
    1926                 :            :         {
    1927                 :       9624 :           dr0_info = first_store;
    1928                 :       9624 :           peel_for_unknown_alignment.inside_cost = store_inside_cost;
    1929                 :       9624 :           peel_for_unknown_alignment.outside_cost = store_outside_cost;
    1930                 :            :         }
    1931                 :            :       else
    1932                 :            :         {
    1933                 :       9624 :           peel_for_unknown_alignment.inside_cost = load_inside_cost;
    1934                 :       9624 :           peel_for_unknown_alignment.outside_cost = load_outside_cost;
    1935                 :            :         }
    1936                 :            : 
    1937                 :       9624 :       stmt_vector_for_cost prologue_cost_vec, epilogue_cost_vec;
    1938                 :       9624 :       prologue_cost_vec.create (2);
    1939                 :       9624 :       epilogue_cost_vec.create (2);
    1940                 :            : 
    1941                 :       9624 :       int dummy2;
    1942                 :      19248 :       peel_for_unknown_alignment.outside_cost += vect_get_known_peeling_cost
    1943                 :       9624 :         (loop_vinfo, estimated_npeels, &dummy2,
    1944                 :            :          &LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
    1945                 :            :          &prologue_cost_vec, &epilogue_cost_vec);
    1946                 :            : 
    1947                 :       9624 :       prologue_cost_vec.release ();
    1948                 :       9624 :       epilogue_cost_vec.release ();
    1949                 :            : 
    1950                 :       9624 :       peel_for_unknown_alignment.peel_info.count = 1
    1951                 :       9624 :         + STMT_VINFO_SAME_ALIGN_REFS (dr0_info->stmt).length ();
    1952                 :            :     }
    1953                 :            : 
    1954                 :      27309 :   peel_for_unknown_alignment.peel_info.npeel = 0;
    1955                 :      27309 :   peel_for_unknown_alignment.peel_info.dr_info = dr0_info;
    1956                 :            : 
    1957                 :      27309 :   best_peel = peel_for_unknown_alignment;
    1958                 :            : 
    1959                 :      27309 :   peel_for_known_alignment.inside_cost = INT_MAX;
    1960                 :      27309 :   peel_for_known_alignment.outside_cost = INT_MAX;
    1961                 :      27309 :   peel_for_known_alignment.peel_info.count = 0;
    1962                 :      27309 :   peel_for_known_alignment.peel_info.dr_info = NULL;
    1963                 :            : 
    1964                 :      27309 :   if (do_peeling && one_misalignment_known)
    1965                 :            :     {
    1966                 :            :       /* Peeling is possible, but there is no data access that is not supported
    1967                 :            :          unless aligned.  So we try to choose the best possible peeling from
    1968                 :            :          the hash table.  */
    1969                 :      11159 :       peel_for_known_alignment = vect_peeling_hash_choose_best_peeling
    1970                 :      11159 :         (&peeling_htab, loop_vinfo);
    1971                 :            :     }
    1972                 :            : 
    1973                 :            :   /* Compare costs of peeling for known and unknown alignment. */
    1974                 :      27309 :   if (peel_for_known_alignment.peel_info.dr_info != NULL
    1975                 :      11159 :       && peel_for_unknown_alignment.inside_cost
    1976                 :            :       >= peel_for_known_alignment.inside_cost)
    1977                 :            :     {
    1978                 :      11159 :       best_peel = peel_for_known_alignment;
    1979                 :            : 
    1980                 :            :       /* If the best peeling for known alignment has NPEEL == 0, perform no
    1981                 :            :          peeling at all except if there is an unsupportable dr that we can
    1982                 :            :          align.  */
    1983                 :      11159 :       if (best_peel.peel_info.npeel == 0 && !one_dr_unsupportable)
    1984                 :      10820 :         do_peeling = false;
    1985                 :            :     }
    1986                 :            : 
    1987                 :            :   /* If there is an unsupportable data ref, prefer this over all choices so far
    1988                 :            :      since we'd have to discard a chosen peeling except when it accidentally
    1989                 :            :      aligned the unsupportable data ref.  */
    1990                 :      27309 :   if (one_dr_unsupportable)
    1991                 :            :     dr0_info = unsupportable_dr_info;
    1992                 :      27309 :   else if (do_peeling)
    1993                 :            :     {
    1994                 :            :       /* Calculate the penalty for no peeling, i.e. leaving everything as-is.
    1995                 :            :          TODO: Use nopeel_outside_cost or get rid of it?  */
    1996                 :       8904 :       unsigned nopeel_inside_cost = 0;
    1997                 :       8904 :       unsigned nopeel_outside_cost = 0;
    1998                 :            : 
    1999                 :       8904 :       stmt_vector_for_cost dummy;
    2000                 :       8904 :       dummy.create (2);
    2001                 :       8904 :       vect_get_peeling_costs_all_drs (loop_vinfo, NULL, &nopeel_inside_cost,
    2002                 :            :                                       &nopeel_outside_cost, &dummy, &dummy,
    2003                 :            :                                       0, false);
    2004                 :       8904 :       dummy.release ();
    2005                 :            : 
    2006                 :            :       /* Add epilogue costs.  As we do not peel for alignment here, no prologue
    2007                 :            :          costs will be recorded.  */
    2008                 :       8904 :       stmt_vector_for_cost prologue_cost_vec, epilogue_cost_vec;
    2009                 :       8904 :       prologue_cost_vec.create (2);
    2010                 :       8904 :       epilogue_cost_vec.create (2);
    2011                 :            : 
    2012                 :       8904 :       int dummy2;
    2013                 :      17808 :       nopeel_outside_cost += vect_get_known_peeling_cost
    2014                 :       8904 :         (loop_vinfo, 0, &dummy2,
    2015                 :            :          &LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
    2016                 :            :          &prologue_cost_vec, &epilogue_cost_vec);
    2017                 :            : 
    2018                 :       8904 :       prologue_cost_vec.release ();
    2019                 :       8904 :       epilogue_cost_vec.release ();
    2020                 :            : 
    2021                 :       8904 :       npeel = best_peel.peel_info.npeel;
    2022                 :       8904 :       dr0_info = best_peel.peel_info.dr_info;
    2023                 :            : 
    2024                 :            :       /* If no peeling is not more expensive than the best peeling we
    2025                 :            :          have so far, don't perform any peeling.  */
    2026                 :       8904 :       if (nopeel_inside_cost <= best_peel.inside_cost)
    2027                 :       8692 :         do_peeling = false;
    2028                 :            :     }
    2029                 :            : 
    2030                 :      27309 :   if (do_peeling)
    2031                 :            :     {
    2032                 :        212 :       stmt_vec_info stmt_info = dr0_info->stmt;
    2033                 :        212 :       vectype = STMT_VINFO_VECTYPE (stmt_info);
    2034                 :            : 
    2035                 :        212 :       if (known_alignment_for_access_p (dr0_info))
    2036                 :            :         {
    2037                 :        191 :           bool negative = tree_int_cst_compare (DR_STEP (dr0_info->dr),
    2038                 :        191 :                                                 size_zero_node) < 0;
    2039                 :        191 :           if (!npeel)
    2040                 :            :             {
    2041                 :            :               /* Since it's known at compile time, compute the number of
    2042                 :            :                  iterations in the peeled loop (the peeling factor) for use in
    2043                 :            :                  updating DR_MISALIGNMENT values.  The peeling factor is the
    2044                 :            :                  vectorization factor minus the misalignment as an element
    2045                 :            :                  count.  */
    2046                 :          0 :               mis = (negative
    2047                 :          0 :                      ? DR_MISALIGNMENT (dr0_info)
    2048                 :          0 :                      : -DR_MISALIGNMENT (dr0_info));
    2049                 :            :               /* If known_alignment_for_access_p then we have set
    2050                 :            :                  DR_MISALIGNMENT which is only done if we know it at compiler
    2051                 :            :                  time, so it is safe to assume target alignment is constant.
    2052                 :            :                */
    2053                 :          0 :               unsigned int target_align =
    2054                 :          0 :                 DR_TARGET_ALIGNMENT (dr0_info).to_constant ();
    2055                 :          0 :               npeel = ((mis & (target_align - 1))
    2056                 :          0 :                        / vect_get_scalar_dr_size (dr0_info));
    2057                 :            :             }
    2058                 :            : 
    2059                 :            :           /* For interleaved data access every iteration accesses all the
    2060                 :            :              members of the group, therefore we divide the number of iterations
    2061                 :            :              by the group size.  */
    2062                 :        191 :           if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
    2063                 :         17 :             npeel /= DR_GROUP_SIZE (stmt_info);
    2064                 :            : 
    2065                 :        191 :           if (dump_enabled_p ())
    2066                 :        189 :             dump_printf_loc (MSG_NOTE, vect_location,
    2067                 :            :                              "Try peeling by %d\n", npeel);
    2068                 :            :         }
    2069                 :            : 
    2070                 :            :       /* Ensure that all datarefs can be vectorized after the peel.  */
    2071                 :        212 :       if (!vect_peeling_supportable (loop_vinfo, dr0_info, npeel))
    2072                 :            :         do_peeling = false;
    2073                 :            : 
    2074                 :            :       /* Check if all datarefs are supportable and log.  */
    2075                 :        212 :       if (do_peeling && known_alignment_for_access_p (dr0_info) && npeel == 0)
    2076                 :            :         {
    2077                 :          0 :           opt_result stat = vect_verify_datarefs_alignment (loop_vinfo);
    2078                 :          0 :           if (!stat)
    2079                 :          0 :             do_peeling = false;
    2080                 :            :           else
    2081                 :          0 :             return stat;
    2082                 :            :         }
    2083                 :            : 
    2084                 :            :       /* Cost model #1 - honor --param vect-max-peeling-for-alignment.  */
    2085                 :        212 :       if (do_peeling)
    2086                 :            :         {
    2087                 :        212 :           unsigned max_allowed_peel
    2088                 :        212 :             = param_vect_max_peeling_for_alignment;
    2089                 :        212 :           if (flag_vect_cost_model == VECT_COST_MODEL_CHEAP)
    2090                 :            :             max_allowed_peel = 0;
    2091                 :        209 :           if (max_allowed_peel != (unsigned)-1)
    2092                 :            :             {
    2093                 :          6 :               unsigned max_peel = npeel;
    2094                 :          6 :               if (max_peel == 0)
    2095                 :            :                 {
    2096                 :          1 :                   poly_uint64 target_align = DR_TARGET_ALIGNMENT (dr0_info);
    2097                 :          1 :                   unsigned HOST_WIDE_INT target_align_c;
    2098                 :          1 :                   if (target_align.is_constant (&target_align_c))
    2099                 :          2 :                     max_peel =
    2100                 :          1 :                       target_align_c / vect_get_scalar_dr_size (dr0_info) - 1;
    2101                 :            :                   else
    2102                 :            :                     {
    2103                 :            :                       do_peeling = false;
    2104                 :            :                       if (dump_enabled_p ())
    2105                 :            :                         dump_printf_loc (MSG_NOTE, vect_location,
    2106                 :            :                           "Disable peeling, max peels set and vector"
    2107                 :            :                           " alignment unknown\n");
    2108                 :            :                     }
    2109                 :            :                 }
    2110                 :          6 :               if (max_peel > max_allowed_peel)
    2111                 :            :                 {
    2112                 :          6 :                   do_peeling = false;
    2113                 :          6 :                   if (dump_enabled_p ())
    2114                 :          3 :                     dump_printf_loc (MSG_NOTE, vect_location,
    2115                 :            :                         "Disable peeling, max peels reached: %d\n", max_peel);
    2116                 :            :                 }
    2117                 :            :             }
    2118                 :            :         }
    2119                 :            : 
    2120                 :            :       /* Cost model #2 - if peeling may result in a remaining loop not
    2121                 :            :          iterating enough to be vectorized then do not peel.  Since this
    2122                 :            :          is a cost heuristic rather than a correctness decision, use the
    2123                 :            :          most likely runtime value for variable vectorization factors.  */
    2124                 :        209 :       if (do_peeling
    2125                 :        206 :           && LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
    2126                 :            :         {
    2127                 :        159 :           unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
    2128                 :        159 :           unsigned int max_peel = npeel == 0 ? assumed_vf - 1 : npeel;
    2129                 :        159 :           if ((unsigned HOST_WIDE_INT) LOOP_VINFO_INT_NITERS (loop_vinfo)
    2130                 :        159 :               < assumed_vf + max_peel)
    2131                 :            :             do_peeling = false;
    2132                 :            :         }
    2133                 :            : 
    2134                 :        204 :       if (do_peeling)
    2135                 :            :         {
    2136                 :            :           /* (1.2) Update the DR_MISALIGNMENT of each data reference DR_i.
    2137                 :            :              If the misalignment of DR_i is identical to that of dr0 then set
    2138                 :            :              DR_MISALIGNMENT (DR_i) to zero.  If the misalignment of DR_i and
    2139                 :            :              dr0 are known at compile time then increment DR_MISALIGNMENT (DR_i)
    2140                 :            :              by the peeling factor times the element size of DR_i (MOD the
    2141                 :            :              vectorization factor times the size).  Otherwise, the
    2142                 :            :              misalignment of DR_i must be set to unknown.  */
    2143                 :        630 :           FOR_EACH_VEC_ELT (datarefs, i, dr)
    2144                 :        432 :             if (dr != dr0_info->dr)
    2145                 :            :               {
    2146                 :            :                 /* Strided accesses perform only component accesses, alignment
    2147                 :            :                    is irrelevant for them.  */
    2148                 :        234 :                 dr_vec_info *dr_info = loop_vinfo->lookup_dr (dr);
    2149                 :        234 :                 stmt_info = dr_info->stmt;
    2150                 :        237 :                 if (STMT_VINFO_STRIDED_P (stmt_info)
    2151                 :        234 :                     && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
    2152                 :          3 :                   continue;
    2153                 :            : 
    2154                 :        231 :                 vect_update_misalignment_for_peel (dr_info, dr0_info, npeel);
    2155                 :            :               }
    2156                 :            : 
    2157                 :        198 :           LOOP_VINFO_UNALIGNED_DR (loop_vinfo) = dr0_info;
    2158                 :        198 :           if (npeel)
    2159                 :        179 :             LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) = npeel;
    2160                 :            :           else
    2161                 :         19 :             LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)
    2162                 :         19 :               = DR_MISALIGNMENT (dr0_info);
    2163                 :        198 :           SET_DR_MISALIGNMENT (dr0_info, 0);
    2164                 :        198 :           if (dump_enabled_p ())
    2165                 :            :             {
    2166                 :        182 :               dump_printf_loc (MSG_NOTE, vect_location,
    2167                 :            :                                "Alignment of access forced using peeling.\n");
    2168                 :        182 :               dump_printf_loc (MSG_NOTE, vect_location,
    2169                 :            :                                "Peeling for alignment will be applied.\n");
    2170                 :            :             }
    2171                 :            : 
    2172                 :            :           /* The inside-loop cost will be accounted for in vectorizable_load
    2173                 :            :              and vectorizable_store correctly with adjusted alignments.
    2174                 :            :              Drop the body_cst_vec on the floor here.  */
    2175                 :        198 :           opt_result stat = vect_verify_datarefs_alignment (loop_vinfo);
    2176                 :        198 :           gcc_assert (stat);
    2177                 :        198 :           return stat;
    2178                 :            :         }
    2179                 :            :     }
    2180                 :            : 
    2181                 :            :   /* (2) Versioning to force alignment.  */
    2182                 :            : 
    2183                 :            :   /* Try versioning if:
    2184                 :            :      1) optimize loop for speed and the cost-model is not cheap
    2185                 :            :      2) there is at least one unsupported misaligned data ref with an unknown
    2186                 :            :         misalignment, and
    2187                 :            :      3) all misaligned data refs with a known misalignment are supported, and
    2188                 :            :      4) the number of runtime alignment checks is within reason.  */
    2189                 :            : 
    2190                 :      27111 :   do_versioning
    2191                 :      27111 :     = (optimize_loop_nest_for_speed_p (loop)
    2192                 :      26892 :        && !loop->inner /* FORNOW */
    2193                 :      53590 :        && flag_vect_cost_model != VECT_COST_MODEL_CHEAP);
    2194                 :            : 
    2195                 :            :   if (do_versioning)
    2196                 :            :     {
    2197                 :      86595 :       FOR_EACH_VEC_ELT (datarefs, i, dr)
    2198                 :            :         {
    2199                 :      66125 :           dr_vec_info *dr_info = loop_vinfo->lookup_dr (dr);
    2200                 :      66125 :           stmt_vec_info stmt_info = dr_info->stmt;
    2201                 :            : 
    2202                 :            :           /* For interleaving, only the alignment of the first access
    2203                 :            :              matters.  */
    2204                 :      66125 :           if (aligned_access_p (dr_info)
    2205                 :      66125 :               || (STMT_VINFO_GROUPED_ACCESS (stmt_info)
    2206                 :      19012 :                   && DR_GROUP_FIRST_ELEMENT (stmt_info) != stmt_info))
    2207                 :      44940 :             continue;
    2208                 :            : 
    2209                 :      23548 :           if (STMT_VINFO_STRIDED_P (stmt_info))
    2210                 :            :             {
    2211                 :            :               /* Strided loads perform only component accesses, alignment is
    2212                 :            :                  irrelevant for them.  */
    2213                 :       2826 :               if (!STMT_VINFO_GROUPED_ACCESS (stmt_info))
    2214                 :       2363 :                 continue;
    2215                 :            :               do_versioning = false;
    2216                 :        463 :               break;
    2217                 :            :             }
    2218                 :            : 
    2219                 :      20722 :           supportable_dr_alignment
    2220                 :      20722 :             = vect_supportable_dr_alignment (dr_info, false);
    2221                 :            : 
    2222                 :      20722 :           if (!supportable_dr_alignment)
    2223                 :            :             {
    2224                 :          0 :               int mask;
    2225                 :          0 :               tree vectype;
    2226                 :            : 
    2227                 :          0 :               if (known_alignment_for_access_p (dr_info)
    2228                 :          0 :                   || LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length ()
    2229                 :          0 :                   >= (unsigned) param_vect_max_version_for_alignment_checks)
    2230                 :            :                 {
    2231                 :            :                   do_versioning = false;
    2232                 :        463 :                   break;
    2233                 :            :                 }
    2234                 :            : 
    2235                 :          0 :               vectype = STMT_VINFO_VECTYPE (stmt_info);
    2236                 :          0 :               gcc_assert (vectype);
    2237                 :            : 
    2238                 :            :               /* At present we don't support versioning for alignment
    2239                 :            :                  with variable VF, since there's no guarantee that the
    2240                 :            :                  VF is a power of two.  We could relax this if we added
    2241                 :            :                  a way of enforcing a power-of-two size.  */
    2242                 :          0 :               unsigned HOST_WIDE_INT size;
    2243                 :          0 :               if (!GET_MODE_SIZE (TYPE_MODE (vectype)).is_constant (&size))
    2244                 :            :                 {
    2245                 :            :                   do_versioning = false;
    2246                 :            :                   break;
    2247                 :            :                 }
    2248                 :            : 
    2249                 :            :               /* Forcing alignment in the first iteration is no good if
    2250                 :            :                  we don't keep it across iterations.  For now, just disable
    2251                 :            :                  versioning in this case.
    2252                 :            :                  ?? We could actually unroll the loop to achieve the required
    2253                 :            :                  overall step alignment, and forcing the alignment could be
    2254                 :            :                  done by doing some iterations of the non-vectorized loop.  */
    2255                 :        463 :               if (!multiple_p (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
    2256                 :          0 :                                * DR_STEP_ALIGNMENT (dr),
    2257                 :            :                                DR_TARGET_ALIGNMENT (dr_info)))
    2258                 :            :                 {
    2259                 :            :                   do_versioning = false;
    2260                 :            :                   break;
    2261                 :            :                 }
    2262                 :            : 
    2263                 :            :               /* The rightmost bits of an aligned address must be zeros.
    2264                 :            :                  Construct the mask needed for this test.  For example,
    2265                 :            :                  GET_MODE_SIZE for the vector mode V4SI is 16 bytes so the
    2266                 :            :                  mask must be 15 = 0xf. */
    2267                 :          0 :               mask = size - 1;
    2268                 :            : 
    2269                 :            :               /* FORNOW: use the same mask to test all potentially unaligned
    2270                 :            :                  references in the loop.  */
    2271                 :          0 :               if (LOOP_VINFO_PTR_MASK (loop_vinfo)
    2272                 :          0 :                   && LOOP_VINFO_PTR_MASK (loop_vinfo) != mask)
    2273                 :            :                 {
    2274                 :            :                   do_versioning = false;
    2275                 :            :                   break;
    2276                 :            :                 }
    2277                 :            : 
    2278                 :          0 :               LOOP_VINFO_PTR_MASK (loop_vinfo) = mask;
    2279                 :          0 :               LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).safe_push (stmt_info);
    2280                 :            :             }
    2281                 :            :         }
    2282                 :            : 
    2283                 :            :       /* Versioning requires at least one misaligned data reference.  */
    2284                 :      20933 :       if (!LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
    2285                 :            :         do_versioning = false;
    2286                 :          0 :       else if (!do_versioning)
    2287                 :          0 :         LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).truncate (0);
    2288                 :            :     }
    2289                 :            : 
    2290                 :       6178 :   if (do_versioning)
    2291                 :            :     {
    2292                 :          0 :       vec<stmt_vec_info> may_misalign_stmts
    2293                 :            :         = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo);
    2294                 :          0 :       stmt_vec_info stmt_info;
    2295                 :            : 
    2296                 :            :       /* It can now be assumed that the data references in the statements
    2297                 :            :          in LOOP_VINFO_MAY_MISALIGN_STMTS will be aligned in the version
    2298                 :            :          of the loop being vectorized.  */
    2299                 :          0 :       FOR_EACH_VEC_ELT (may_misalign_stmts, i, stmt_info)
    2300                 :            :         {
    2301                 :          0 :           dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
    2302                 :          0 :           SET_DR_MISALIGNMENT (dr_info, 0);
    2303                 :          0 :           if (dump_enabled_p ())
    2304                 :          0 :             dump_printf_loc (MSG_NOTE, vect_location,
    2305                 :            :                              "Alignment of access forced using versioning.\n");
    2306                 :            :         }
    2307                 :            : 
    2308                 :          0 :       if (dump_enabled_p ())
    2309                 :          0 :         dump_printf_loc (MSG_NOTE, vect_location,
    2310                 :            :                          "Versioning for alignment will be applied.\n");
    2311                 :            : 
    2312                 :            :       /* Peeling and versioning can't be done together at this time.  */
    2313                 :          0 :       gcc_assert (! (do_peeling && do_versioning));
    2314                 :            : 
    2315                 :          0 :       opt_result stat = vect_verify_datarefs_alignment (loop_vinfo);
    2316                 :          0 :       gcc_assert (stat);
    2317                 :          0 :       return stat;
    2318                 :            :     }
    2319                 :            : 
    2320                 :            :   /* This point is reached if neither peeling nor versioning is being done.  */
    2321                 :      27111 :   gcc_assert (! (do_peeling || do_versioning));
    2322                 :            : 
    2323                 :      27111 :   opt_result stat = vect_verify_datarefs_alignment (loop_vinfo);
    2324                 :      27111 :   return stat;
    2325                 :            : }
    2326                 :            : 
    2327                 :            : 
    2328                 :            : /* Function vect_find_same_alignment_drs.
    2329                 :            : 
    2330                 :            :    Update group and alignment relations in VINFO according to the chosen
    2331                 :            :    vectorization factor.  */
    2332                 :            : 
    2333                 :            : static void
    2334                 :     720517 : vect_find_same_alignment_drs (vec_info *vinfo, data_dependence_relation *ddr)
    2335                 :            : {
    2336                 :     720517 :   struct data_reference *dra = DDR_A (ddr);
    2337                 :     720517 :   struct data_reference *drb = DDR_B (ddr);
    2338                 :     720517 :   dr_vec_info *dr_info_a = vinfo->lookup_dr (dra);
    2339                 :     720517 :   dr_vec_info *dr_info_b = vinfo->lookup_dr (drb);
    2340                 :     720517 :   stmt_vec_info stmtinfo_a = dr_info_a->stmt;
    2341                 :     720517 :   stmt_vec_info stmtinfo_b = dr_info_b->stmt;
    2342                 :            : 
    2343                 :     720517 :   if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
    2344                 :     691935 :     return;
    2345                 :            : 
    2346                 :     214720 :   if (dra == drb)
    2347                 :            :     return;
    2348                 :            : 
    2349                 :     100697 :   if (STMT_VINFO_GATHER_SCATTER_P (stmtinfo_a)
    2350                 :     100684 :       || STMT_VINFO_GATHER_SCATTER_P (stmtinfo_b))
    2351                 :            :     return;
    2352                 :            : 
    2353                 :     100632 :   if (!operand_equal_p (DR_BASE_ADDRESS (dra), DR_BASE_ADDRESS (drb), 0)
    2354                 :      53195 :       || !operand_equal_p (DR_OFFSET (dra), DR_OFFSET (drb), 0)
    2355                 :     142434 :       || !operand_equal_p (DR_STEP (dra), DR_STEP (drb), 0))
    2356                 :      59131 :     return;
    2357                 :            : 
    2358                 :            :   /* Two references with distance zero have the same alignment.  */
    2359                 :      41501 :   poly_offset_int diff = (wi::to_poly_offset (DR_INIT (dra))
    2360                 :      41501 :                           - wi::to_poly_offset (DR_INIT (drb)));
    2361                 :      41501 :   if (maybe_ne (diff, 0))
    2362                 :            :     {
    2363                 :            :       /* Get the wider of the two alignments.  */
    2364                 :      16317 :       poly_uint64 align_a =
    2365                 :      32634 :         exact_div (vect_calculate_target_alignment (dr_info_a),
    2366                 :      16317 :                    BITS_PER_UNIT);
    2367                 :      16317 :       poly_uint64 align_b =
    2368                 :      32634 :         exact_div (vect_calculate_target_alignment (dr_info_b),
    2369                 :      16317 :                    BITS_PER_UNIT);
    2370                 :      16317 :       unsigned HOST_WIDE_INT align_a_c, align_b_c;
    2371                 :      16317 :       if (!align_a.is_constant (&align_a_c)
    2372                 :      16317 :           || !align_b.is_constant (&align_b_c))
    2373                 :      12919 :         return;
    2374                 :            : 
    2375                 :      16317 :       unsigned HOST_WIDE_INT max_align = MAX (align_a_c, align_b_c);
    2376                 :            : 
    2377                 :            :       /* Require the gap to be a multiple of the larger vector alignment.  */
    2378                 :      16317 :       if (!multiple_p (diff, max_align))
    2379                 :      12919 :         return;
    2380                 :            :     }
    2381                 :            : 
    2382                 :      28582 :   STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_a).safe_push (drb);
    2383                 :      28582 :   STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_b).safe_push (dra);
    2384                 :      28582 :   if (dump_enabled_p ())
    2385                 :       8270 :     dump_printf_loc (MSG_NOTE, vect_location,
    2386                 :            :                      "accesses have the same alignment: %T and %T\n",
    2387                 :            :                      DR_REF (dra), DR_REF (drb));
    2388                 :            : }
    2389                 :            : 
    2390                 :            : 
    2391                 :            : /* Function vect_analyze_data_refs_alignment
    2392                 :            : 
    2393                 :            :    Analyze the alignment of the data-references in the loop.
    2394                 :            :    Return FALSE if a data reference is found that cannot be vectorized.  */
    2395                 :            : 
    2396                 :            : opt_result
    2397                 :      33894 : vect_analyze_data_refs_alignment (loop_vec_info vinfo)
    2398                 :            : {
    2399                 :      33894 :   DUMP_VECT_SCOPE ("vect_analyze_data_refs_alignment");
    2400                 :            : 
    2401                 :            :   /* Mark groups of data references with same alignment using
    2402                 :            :      data dependence information.  */
    2403                 :      33894 :   vec<ddr_p> ddrs = vinfo->shared->ddrs;
    2404                 :      33894 :   struct data_dependence_relation *ddr;
    2405                 :      33894 :   unsigned int i;
    2406                 :            : 
    2407                 :     754411 :   FOR_EACH_VEC_ELT (ddrs, i, ddr)
    2408                 :     720517 :     vect_find_same_alignment_drs (vinfo, ddr);
    2409                 :            : 
    2410                 :      33894 :   vec<data_reference_p> datarefs = vinfo->shared->datarefs;
    2411                 :      33894 :   struct data_reference *dr;
    2412                 :            : 
    2413                 :      33894 :   vect_record_base_alignments (vinfo);
    2414                 :     147929 :   FOR_EACH_VEC_ELT (datarefs, i, dr)
    2415                 :            :     {
    2416                 :     114035 :       dr_vec_info *dr_info = vinfo->lookup_dr (dr);
    2417                 :     114035 :       if (STMT_VINFO_VECTORIZABLE (dr_info->stmt))
    2418                 :     114035 :         vect_compute_data_ref_alignment (dr_info);
    2419                 :            :     }
    2420                 :            : 
    2421                 :      33894 :   return opt_result::success ();
    2422                 :            : }
    2423                 :            : 
    2424                 :            : 
    2425                 :            : /* Analyze alignment of DRs of stmts in NODE.  */
    2426                 :            : 
    2427                 :            : static bool
    2428                 :      75515 : vect_slp_analyze_and_verify_node_alignment (slp_tree node)
    2429                 :            : {
    2430                 :            :   /* We vectorize from the first scalar stmt in the node unless
    2431                 :            :      the node is permuted in which case we start from the first
    2432                 :            :      element in the group.  */
    2433                 :      75515 :   stmt_vec_info first_stmt_info = SLP_TREE_SCALAR_STMTS (node)[0];
    2434                 :      75515 :   dr_vec_info *first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
    2435                 :      75515 :   if (SLP_TREE_LOAD_PERMUTATION (node).exists ())
    2436                 :        479 :     first_stmt_info = DR_GROUP_FIRST_ELEMENT (first_stmt_info);
    2437                 :            : 
    2438                 :      75515 :   dr_vec_info *dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
    2439                 :      75515 :   vect_compute_data_ref_alignment (dr_info);
    2440                 :            :   /* For creating the data-ref pointer we need alignment of the
    2441                 :            :      first element anyway.  */
    2442                 :      75515 :   if (dr_info != first_dr_info)
    2443                 :        349 :     vect_compute_data_ref_alignment (first_dr_info);
    2444                 :      75515 :   if (! verify_data_ref_alignment (dr_info))
    2445                 :            :     {
    2446                 :       4981 :       if (dump_enabled_p ())
    2447                 :          3 :         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    2448                 :            :                          "not vectorized: bad data alignment in basic "
    2449                 :            :                          "block.\n");
    2450                 :       4981 :       return false;
    2451                 :            :     }
    2452                 :            : 
    2453                 :            :   return true;
    2454                 :            : }
    2455                 :            : 
    2456                 :            : /* Function vect_slp_analyze_instance_alignment
    2457                 :            : 
    2458                 :            :    Analyze the alignment of the data-references in the SLP instance.
    2459                 :            :    Return FALSE if a data reference is found that cannot be vectorized.  */
    2460                 :            : 
    2461                 :            : bool
    2462                 :      69994 : vect_slp_analyze_and_verify_instance_alignment (slp_instance instance)
    2463                 :            : {
    2464                 :      69994 :   DUMP_VECT_SCOPE ("vect_slp_analyze_and_verify_instance_alignment");
    2465                 :            : 
    2466                 :      69994 :   slp_tree node;
    2467                 :      69994 :   unsigned i;
    2468                 :      75659 :   FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), i, node)
    2469                 :       5930 :     if (! vect_slp_analyze_and_verify_node_alignment (node))
    2470                 :            :       return false;
    2471                 :            : 
    2472                 :      69729 :   node = SLP_INSTANCE_TREE (instance);
    2473                 :      69729 :   if (STMT_VINFO_DATA_REF (SLP_TREE_SCALAR_STMTS (node)[0])
    2474                 :     139314 :       && ! vect_slp_analyze_and_verify_node_alignment
    2475                 :      69585 :              (SLP_INSTANCE_TREE (instance)))
    2476                 :       4716 :     return false;
    2477                 :            : 
    2478                 :            :   return true;
    2479                 :            : }
    2480                 :            : 
    2481                 :            : 
    2482                 :            : /* Analyze groups of accesses: check that DR_INFO belongs to a group of
    2483                 :            :    accesses of legal size, step, etc.  Detect gaps, single element
    2484                 :            :    interleaving, and other special cases. Set grouped access info.
    2485                 :            :    Collect groups of strided stores for further use in SLP analysis.
    2486                 :            :    Worker for vect_analyze_group_access.  */
    2487                 :            : 
    2488                 :            : static bool
    2489                 :     782176 : vect_analyze_group_access_1 (dr_vec_info *dr_info)
    2490                 :            : {
    2491                 :     782176 :   data_reference *dr = dr_info->dr;
    2492                 :     782176 :   tree step = DR_STEP (dr);
    2493                 :     782176 :   tree scalar_type = TREE_TYPE (DR_REF (dr));
    2494                 :     782176 :   HOST_WIDE_INT type_size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
    2495                 :     782176 :   stmt_vec_info stmt_info = dr_info->stmt;
    2496                 :     782176 :   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
    2497                 :     782176 :   bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
    2498                 :     782176 :   HOST_WIDE_INT dr_step = -1;
    2499                 :     782176 :   HOST_WIDE_INT groupsize, last_accessed_element = 1;
    2500                 :     782176 :   bool slp_impossible = false;
    2501                 :            : 
    2502                 :            :   /* For interleaving, GROUPSIZE is STEP counted in elements, i.e., the
    2503                 :            :      size of the interleaving group (including gaps).  */
    2504                 :     782176 :   if (tree_fits_shwi_p (step))
    2505                 :            :     {
    2506                 :     777762 :       dr_step = tree_to_shwi (step);
    2507                 :            :       /* Check that STEP is a multiple of type size.  Otherwise there is
    2508                 :            :          a non-element-sized gap at the end of the group which we
    2509                 :            :          cannot represent in DR_GROUP_GAP or DR_GROUP_SIZE.
    2510                 :            :          ???  As we can handle non-constant step fine here we should
    2511                 :            :          simply remove uses of DR_GROUP_GAP between the last and first
    2512                 :            :          element and instead rely on DR_STEP.  DR_GROUP_SIZE then would
    2513                 :            :          simply not include that gap.  */
    2514                 :     777762 :       if ((dr_step % type_size) != 0)
    2515                 :            :         {
    2516                 :         47 :           if (dump_enabled_p ())
    2517                 :         35 :             dump_printf_loc (MSG_NOTE, vect_location,
    2518                 :            :                              "Step %T is not a multiple of the element size"
    2519                 :            :                              " for %T\n",
    2520                 :            :                              step, DR_REF (dr));
    2521                 :         47 :           return false;
    2522                 :            :         }
    2523                 :    1555430 :       groupsize = absu_hwi (dr_step) / type_size;
    2524                 :            :     }
    2525                 :            :   else
    2526                 :            :     groupsize = 0;
    2527                 :            : 
    2528                 :            :   /* Not consecutive access is possible only if it is a part of interleaving.  */
    2529                 :     782129 :   if (!DR_GROUP_FIRST_ELEMENT (stmt_info))
    2530                 :            :     {
    2531                 :            :       /* Check if it this DR is a part of interleaving, and is a single
    2532                 :            :          element of the group that is accessed in the loop.  */
    2533                 :            : 
    2534                 :            :       /* Gaps are supported only for loads. STEP must be a multiple of the type
    2535                 :            :          size.  */
    2536                 :     364020 :       if (DR_IS_READ (dr)
    2537                 :     175058 :           && (dr_step % type_size) == 0
    2538                 :     175058 :           && groupsize > 0)
    2539                 :            :         {
    2540                 :       2625 :           DR_GROUP_FIRST_ELEMENT (stmt_info) = stmt_info;
    2541                 :       2625 :           DR_GROUP_SIZE (stmt_info) = groupsize;
    2542                 :       2625 :           DR_GROUP_GAP (stmt_info) = groupsize - 1;
    2543                 :       2625 :           if (dump_enabled_p ())
    2544                 :        407 :             dump_printf_loc (MSG_NOTE, vect_location,
    2545                 :            :                              "Detected single element interleaving %T"
    2546                 :            :                              " step %T\n",
    2547                 :            :                              DR_REF (dr), step);
    2548                 :            : 
    2549                 :       2625 :           return true;
    2550                 :            :         }
    2551                 :            : 
    2552                 :     361395 :       if (dump_enabled_p ())
    2553                 :       1219 :         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    2554                 :            :                          "not consecutive access %G", stmt_info->stmt);
    2555                 :            : 
    2556                 :     361395 :       if (bb_vinfo)
    2557                 :            :         {
    2558                 :            :           /* Mark the statement as unvectorizable.  */
    2559                 :     359198 :           STMT_VINFO_VECTORIZABLE (stmt_info) = false;
    2560                 :     359198 :           return true;
    2561                 :            :         }
    2562                 :            : 
    2563                 :       2197 :       if (dump_enabled_p ())
    2564                 :        146 :         dump_printf_loc (MSG_NOTE, vect_location, "using strided accesses\n");
    2565                 :       2197 :       STMT_VINFO_STRIDED_P (stmt_info) = true;
    2566                 :       2197 :       return true;
    2567                 :            :     }
    2568                 :            : 
    2569                 :     418109 :   if (DR_GROUP_FIRST_ELEMENT (stmt_info) == stmt_info)
    2570                 :            :     {
    2571                 :            :       /* First stmt in the interleaving chain. Check the chain.  */
    2572                 :     123503 :       stmt_vec_info next = DR_GROUP_NEXT_ELEMENT (stmt_info);
    2573                 :     123503 :       struct data_reference *data_ref = dr;
    2574                 :     123503 :       unsigned int count = 1;
    2575                 :     123503 :       tree prev_init = DR_INIT (data_ref);
    2576                 :     123503 :       HOST_WIDE_INT diff, gaps = 0;
    2577                 :            : 
    2578                 :            :       /* By construction, all group members have INTEGER_CST DR_INITs.  */
    2579                 :     418313 :       while (next)
    2580                 :            :         {
    2581                 :            :           /* We never have the same DR multiple times.  */
    2582                 :     294923 :           gcc_assert (tree_int_cst_compare (DR_INIT (data_ref),
    2583                 :            :                                 DR_INIT (STMT_VINFO_DATA_REF (next))) != 0);
    2584                 :            : 
    2585                 :     294923 :           data_ref = STMT_VINFO_DATA_REF (next);
    2586                 :            : 
    2587                 :            :           /* All group members have the same STEP by construction.  */
    2588                 :     294923 :           gcc_checking_assert (operand_equal_p (DR_STEP (data_ref), step, 0));
    2589                 :            : 
    2590                 :            :           /* Check that the distance between two accesses is equal to the type
    2591                 :            :              size. Otherwise, we have gaps.  */
    2592                 :     294923 :           diff = (TREE_INT_CST_LOW (DR_INIT (data_ref))
    2593                 :     294923 :                   - TREE_INT_CST_LOW (prev_init)) / type_size;
    2594                 :     294923 :           if (diff != 1)
    2595                 :            :             {
    2596                 :            :               /* FORNOW: SLP of accesses with gaps is not supported.  */
    2597                 :      10722 :               slp_impossible = true;
    2598                 :      10722 :               if (DR_IS_WRITE (data_ref))
    2599                 :            :                 {
    2600                 :        113 :                   if (dump_enabled_p ())
    2601                 :          0 :                     dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    2602                 :            :                                      "interleaved store with gaps\n");
    2603                 :        113 :                   return false;
    2604                 :            :                 }
    2605                 :            : 
    2606                 :      10609 :               gaps += diff - 1;
    2607                 :            :             }
    2608                 :            : 
    2609                 :     294810 :           last_accessed_element += diff;
    2610                 :            : 
    2611                 :            :           /* Store the gap from the previous member of the group. If there is no
    2612                 :            :              gap in the access, DR_GROUP_GAP is always 1.  */
    2613                 :     294810 :           DR_GROUP_GAP (next) = diff;
    2614                 :            : 
    2615                 :     294810 :           prev_init = DR_INIT (data_ref);
    2616                 :     294810 :           next = DR_GROUP_NEXT_ELEMENT (next);
    2617                 :            :           /* Count the number of data-refs in the chain.  */
    2618                 :     294810 :           count++;
    2619                 :            :         }
    2620                 :            : 
    2621                 :     123390 :       if (groupsize == 0)
    2622                 :     114109 :         groupsize = count + gaps;
    2623                 :            : 
    2624                 :            :       /* This could be UINT_MAX but as we are generating code in a very
    2625                 :            :          inefficient way we have to cap earlier.  See PR78699 for example.  */
    2626                 :     123390 :       if (groupsize > 4096)
    2627                 :            :         {
    2628                 :         30 :           if (dump_enabled_p ())
    2629                 :          0 :             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    2630                 :            :                              "group is too large\n");
    2631                 :         30 :           return false;
    2632                 :            :         }
    2633                 :            : 
    2634                 :            :       /* Check that the size of the interleaving is equal to count for stores,
    2635                 :            :          i.e., that there are no gaps.  */
    2636                 :     123360 :       if (groupsize != count
    2637                 :       7515 :           && !DR_IS_READ (dr))
    2638                 :            :         {
    2639                 :       1031 :           groupsize = count;
    2640                 :       1031 :           STMT_VINFO_STRIDED_P (stmt_info) = true;
    2641                 :            :         }
    2642                 :            : 
    2643                 :            :       /* If there is a gap after the last load in the group it is the
    2644                 :            :          difference between the groupsize and the last accessed
    2645                 :            :          element.
    2646                 :            :          When there is no gap, this difference should be 0.  */
    2647                 :     123360 :       DR_GROUP_GAP (stmt_info) = groupsize - last_accessed_element;
    2648                 :            : 
    2649                 :     123360 :       DR_GROUP_SIZE (stmt_info) = groupsize;
    2650                 :     123360 :       if (dump_enabled_p ())
    2651                 :            :         {
    2652                 :       3744 :           dump_printf_loc (MSG_NOTE, vect_location,
    2653                 :            :                            "Detected interleaving ");
    2654                 :       3744 :           if (DR_IS_READ (dr))
    2655                 :       1967 :             dump_printf (MSG_NOTE, "load ");
    2656                 :       1777 :           else if (STMT_VINFO_STRIDED_P (stmt_info))
    2657                 :        237 :             dump_printf (MSG_NOTE, "strided store ");
    2658                 :            :           else
    2659                 :       1540 :             dump_printf (MSG_NOTE, "store ");
    2660                 :       3744 :           dump_printf (MSG_NOTE, "of size %u\n",
    2661                 :            :                        (unsigned)groupsize);
    2662                 :       3744 :           dump_printf_loc (MSG_NOTE, vect_location, "\t%G", stmt_info->stmt);
    2663                 :       3744 :           next = DR_GROUP_NEXT_ELEMENT (stmt_info);
    2664                 :      18089 :           while (next)
    2665                 :            :             {
    2666                 :      14345 :               if (DR_GROUP_GAP (next) != 1)
    2667                 :        172 :                 dump_printf_loc (MSG_NOTE, vect_location,
    2668                 :            :                                  "\t<gap of %d elements>\n",
    2669                 :        172 :                                  DR_GROUP_GAP (next) - 1);
    2670                 :      14345 :               dump_printf_loc (MSG_NOTE, vect_location, "\t%G", next->stmt);
    2671                 :      14345 :               next = DR_GROUP_NEXT_ELEMENT (next);
    2672                 :            :             }
    2673                 :       3744 :           if (DR_GROUP_GAP (stmt_info) != 0)
    2674                 :        187 :             dump_printf_loc (MSG_NOTE, vect_location,
    2675                 :            :                              "\t<gap of %d elements>\n",
    2676                 :        187 :                              DR_GROUP_GAP (stmt_info));
    2677                 :            :         }
    2678                 :            : 
    2679                 :            :       /* SLP: create an SLP data structure for every interleaving group of
    2680                 :            :          stores for further analysis in vect_analyse_slp.  */
    2681                 :     123360 :       if (DR_IS_WRITE (dr) && !slp_impossible)
    2682                 :            :         {
    2683                 :      81949 :           if (loop_vinfo)
    2684                 :       4090 :             LOOP_VINFO_GROUPED_STORES (loop_vinfo).safe_push (stmt_info);
    2685                 :      81949 :           if (bb_vinfo)
    2686                 :      77859 :             BB_VINFO_GROUPED_STORES (bb_vinfo).safe_push (stmt_info);
    2687                 :            :         }
    2688                 :            :     }
    2689                 :            : 
    2690                 :            :   return true;
    2691                 :            : }
    2692                 :            : 
    2693                 :            : /* Analyze groups of accesses: check that DR_INFO belongs to a group of
    2694                 :            :    accesses of legal size, step, etc.  Detect gaps, single element
    2695                 :            :    interleaving, and other special cases. Set grouped access info.
    2696                 :            :    Collect groups of strided stores for further use in SLP analysis.  */
    2697                 :            : 
    2698                 :            : static bool
    2699                 :     782176 : vect_analyze_group_access (dr_vec_info *dr_info)
    2700                 :            : {
    2701                 :     782176 :   if (!vect_analyze_group_access_1 (dr_info))
    2702                 :            :     {
    2703                 :            :       /* Dissolve the group if present.  */
    2704                 :        190 :       stmt_vec_info stmt_info = DR_GROUP_FIRST_ELEMENT (dr_info->stmt);
    2705                 :        928 :       while (stmt_info)
    2706                 :            :         {
    2707                 :        738 :           stmt_vec_info next = DR_GROUP_NEXT_ELEMENT (stmt_info);
    2708                 :        738 :           DR_GROUP_FIRST_ELEMENT (stmt_info) = NULL;
    2709                 :        738 :           DR_GROUP_NEXT_ELEMENT (stmt_info) = NULL;
    2710                 :        738 :           stmt_info = next;
    2711                 :            :         }
    2712                 :            :       return false;
    2713                 :            :     }
    2714                 :            :   return true;
    2715                 :            : }
    2716                 :            : 
    2717                 :            : /* Analyze the access pattern of the data-reference DR_INFO.
    2718                 :            :    In case of non-consecutive accesses call vect_analyze_group_access() to
    2719                 :            :    analyze groups of accesses.  */
    2720                 :            : 
    2721                 :            : static bool
    2722                 :     861740 : vect_analyze_data_ref_access (dr_vec_info *dr_info)
    2723                 :            : {
    2724                 :     861740 :   data_reference *dr = dr_info->dr;
    2725                 :     861740 :   tree step = DR_STEP (dr);
    2726                 :     861740 :   tree scalar_type = TREE_TYPE (DR_REF (dr));
    2727                 :     861740 :   stmt_vec_info stmt_info = dr_info->stmt;
    2728                 :     861740 :   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
    2729                 :     861740 :   class loop *loop = NULL;
    2730                 :            : 
    2731                 :     861740 :   if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
    2732                 :            :     return true;
    2733                 :            : 
    2734                 :     861393 :   if (loop_vinfo)
    2735                 :     123759 :     loop = LOOP_VINFO_LOOP (loop_vinfo);
    2736                 :            : 
    2737                 :     861393 :   if (loop_vinfo && !step)
    2738                 :            :     {
    2739                 :          0 :       if (dump_enabled_p ())
    2740                 :          0 :         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    2741                 :            :                          "bad data-ref access in loop\n");
    2742                 :          0 :       return false;
    2743                 :            :     }
    2744                 :            : 
    2745                 :            :   /* Allow loads with zero step in inner-loop vectorization.  */
    2746                 :     861393 :   if (loop_vinfo && integer_zerop (step))
    2747                 :            :     {
    2748                 :       1356 :       DR_GROUP_FIRST_ELEMENT (stmt_info) = NULL;
    2749                 :       1356 :       if (!nested_in_vect_loop_p (loop, stmt_info))
    2750                 :       1279 :         return DR_IS_READ (dr);
    2751                 :            :       /* Allow references with zero step for outer loops marked
    2752                 :            :          with pragma omp simd only - it guarantees absence of
    2753                 :            :          loop-carried dependencies between inner loop iterations.  */
    2754                 :         77 :       if (loop->safelen < 2)
    2755                 :            :         {
    2756                 :         41 :           if (dump_enabled_p ())
    2757                 :          9 :             dump_printf_loc (MSG_NOTE, vect_location,
    2758                 :            :                              "zero step in inner loop of nest\n");
    2759                 :         41 :           return false;
    2760                 :            :         }
    2761                 :            :     }
    2762                 :            : 
    2763                 :     860073 :   if (loop && nested_in_vect_loop_p (loop, stmt_info))
    2764                 :            :     {
    2765                 :            :       /* Interleaved accesses are not yet supported within outer-loop
    2766                 :            :         vectorization for references in the inner-loop.  */
    2767                 :       1679 :       DR_GROUP_FIRST_ELEMENT (stmt_info) = NULL;
    2768                 :            : 
    2769                 :            :       /* For the rest of the analysis we use the outer-loop step.  */
    2770                 :       1679 :       step = STMT_VINFO_DR_STEP (stmt_info);
    2771                 :       1679 :       if (integer_zerop (step))
    2772                 :            :         {
    2773                 :        699 :           if (dump_enabled_p ())
    2774                 :        192 :             dump_printf_loc (MSG_NOTE, vect_location,
    2775                 :            :                              "zero step in outer loop.\n");
    2776                 :        699 :           return DR_IS_READ (dr);
    2777                 :            :         }
    2778                 :            :     }
    2779                 :            : 
    2780                 :            :   /* Consecutive?  */
    2781                 :     859374 :   if (TREE_CODE (step) == INTEGER_CST)
    2782                 :            :     {
    2783                 :     849713 :       HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
    2784                 :     849713 :       if (!tree_int_cst_compare (step, TYPE_SIZE_UNIT (scalar_type))
    2785                 :     849713 :           || (dr_step < 0
    2786                 :       2061 :               && !compare_tree_int (TYPE_SIZE_UNIT (scalar_type), -dr_step)))
    2787                 :            :         {
    2788                 :            :           /* Mark that it is not interleaving.  */
    2789                 :      71626 :           DR_GROUP_FIRST_ELEMENT (stmt_info) = NULL;
    2790                 :      71626 :           return true;
    2791                 :            :         }
    2792                 :            :     }
    2793                 :            : 
    2794                 :     787748 :   if (loop && nested_in_vect_loop_p (loop, stmt_info))
    2795                 :            :     {
    2796                 :        583 :       if (dump_enabled_p ())
    2797                 :        123 :         dump_printf_loc (MSG_NOTE, vect_location,
    2798                 :            :                          "grouped access in outer loop.\n");
    2799                 :        583 :       return false;
    2800                 :            :     }
    2801                 :            : 
    2802                 :            : 
    2803                 :            :   /* Assume this is a DR handled by non-constant strided load case.  */
    2804                 :     787165 :   if (TREE_CODE (step) != INTEGER_CST)
    2805                 :       9403 :     return (STMT_VINFO_STRIDED_P (stmt_info)
    2806                 :       9403 :             && (!STMT_VINFO_GROUPED_ACCESS (stmt_info)
    2807                 :       4414 :                 || vect_analyze_group_access (dr_info)));
    2808                 :            : 
    2809                 :            :   /* Not consecutive access - check if it's a part of interleaving group.  */
    2810                 :     777762 :   return vect_analyze_group_access (dr_info);
    2811                 :            : }
    2812                 :            : 
    2813                 :            : /* Compare two data-references DRA and DRB to group them into chunks
    2814                 :            :    suitable for grouping.  */
    2815                 :            : 
    2816                 :            : static int
    2817                 :   14328000 : dr_group_sort_cmp (const void *dra_, const void *drb_)
    2818                 :            : {
    2819                 :   14328000 :   data_reference_p dra = *(data_reference_p *)const_cast<void *>(dra_);
    2820                 :   14328000 :   data_reference_p drb = *(data_reference_p *)const_cast<void *>(drb_);
    2821                 :   14328000 :   int cmp;
    2822                 :            : 
    2823                 :            :   /* Stabilize sort.  */
    2824                 :   14328000 :   if (dra == drb)
    2825                 :            :     return 0;
    2826                 :            : 
    2827                 :            :   /* DRs in different loops never belong to the same group.  */
    2828                 :   14328000 :   loop_p loopa = gimple_bb (DR_STMT (dra))->loop_father;
    2829                 :   14328000 :   loop_p loopb = gimple_bb (DR_STMT (drb))->loop_father;
    2830                 :   14328000 :   if (loopa != loopb)
    2831                 :       5656 :     return loopa->num < loopb->num ? -1 : 1;
    2832                 :            : 
    2833                 :            :   /* Ordering of DRs according to base.  */
    2834                 :   14324100 :   cmp = data_ref_compare_tree (DR_BASE_ADDRESS (dra),
    2835                 :            :                                DR_BASE_ADDRESS (drb));
    2836                 :   14324100 :   if (cmp != 0)
    2837                 :            :     return cmp;
    2838                 :            : 
    2839                 :            :   /* And according to DR_OFFSET.  */
    2840                 :    8214160 :   cmp = data_ref_compare_tree (DR_OFFSET (dra), DR_OFFSET (drb));
    2841                 :    8214160 :   if (cmp != 0)
    2842                 :            :     return cmp;
    2843                 :            : 
    2844                 :            :   /* Put reads before writes.  */
    2845                 :    7959610 :   if (DR_IS_READ (dra) != DR_IS_READ (drb))
    2846                 :     708284 :     return DR_IS_READ (dra) ? -1 : 1;
    2847                 :            : 
    2848                 :            :   /* Then sort after access size.  */
    2849                 :    7474240 :   cmp = data_ref_compare_tree (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))),
    2850                 :    7474240 :                                TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))));
    2851                 :    7474240 :   if (cmp != 0)
    2852                 :            :     return cmp;
    2853                 :            : 
    2854                 :            :   /* And after step.  */
    2855                 :    6439820 :   cmp = data_ref_compare_tree (DR_STEP (dra), DR_STEP (drb));
    2856                 :    6439820 :   if (cmp != 0)
    2857                 :            :     return cmp;
    2858                 :            : 
    2859                 :            :   /* Then sort after DR_INIT.  In case of identical DRs sort after stmt UID.  */
    2860                 :    6438260 :   cmp = data_ref_compare_tree (DR_INIT (dra), DR_INIT (drb));
    2861                 :    6438260 :   if (cmp == 0)
    2862                 :     141671 :     return gimple_uid (DR_STMT (dra)) < gimple_uid (DR_STMT (drb)) ? -1 : 1;
    2863                 :            :   return cmp;
    2864                 :            : }
    2865                 :            : 
    2866                 :            : /* If OP is the result of a conversion, return the unconverted value,
    2867                 :            :    otherwise return null.  */
    2868                 :            : 
    2869                 :            : static tree
    2870                 :         51 : strip_conversion (tree op)
    2871                 :            : {
    2872                 :         51 :   if (TREE_CODE (op) != SSA_NAME)
    2873                 :            :     return NULL_TREE;
    2874                 :         51 :   gimple *stmt = SSA_NAME_DEF_STMT (op);
    2875                 :         51 :   if (!is_gimple_assign (stmt)
    2876                 :         51 :       || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt)))
    2877                 :            :     return NULL_TREE;
    2878                 :         48 :   return gimple_assign_rhs1 (stmt);
    2879                 :            : }
    2880                 :            : 
    2881                 :            : /* Return true if vectorizable_* routines can handle statements STMT1_INFO
    2882                 :            :    and STMT2_INFO being in a single group.  When ALLOW_SLP_P, masked loads can
    2883                 :            :    be grouped in SLP mode.  */
    2884                 :            : 
    2885                 :            : static bool
    2886                 :     754068 : can_group_stmts_p (stmt_vec_info stmt1_info, stmt_vec_info stmt2_info,
    2887                 :            :                    bool allow_slp_p)
    2888                 :            : {
    2889                 :     754068 :   if (gimple_assign_single_p (stmt1_info->stmt))
    2890                 :     754033 :     return gimple_assign_single_p (stmt2_info->stmt);
    2891                 :            : 
    2892                 :         35 :   gcall *call1 = dyn_cast <gcall *> (stmt1_info->stmt);
    2893                 :         35 :   if (call1 && gimple_call_internal_p (call1))
    2894                 :            :     {
    2895                 :            :       /* Check for two masked loads or two masked stores.  */
    2896                 :         35 :       gcall *call2 = dyn_cast <gcall *> (stmt2_info->stmt);
    2897                 :         35 :       if (!call2 || !gimple_call_internal_p (call2))
    2898                 :            :         return false;
    2899                 :         35 :       internal_fn ifn = gimple_call_internal_fn (call1);
    2900                 :         35 :       if (ifn != IFN_MASK_LOAD && ifn != IFN_MASK_STORE)
    2901                 :            :         return false;
    2902                 :         35 :       if (ifn != gimple_call_internal_fn (call2))
    2903                 :            :         return false;
    2904                 :            : 
    2905                 :            :       /* Check that the masks are the same.  Cope with casts of masks,
    2906                 :            :          like those created by build_mask_conversion.  */
    2907                 :         35 :       tree mask1 = gimple_call_arg (call1, 2);
    2908                 :         35 :       tree mask2 = gimple_call_arg (call2, 2);
    2909                 :         35 :       if (!operand_equal_p (mask1, mask2, 0)
    2910                 :         35 :           && (ifn == IFN_MASK_STORE || !allow_slp_p))
    2911                 :            :         {
    2912                 :         27 :           mask1 = strip_conversion (mask1);
    2913                 :         27 :           if (!mask1)
    2914                 :            :             return false;
    2915                 :         24 :           mask2 = strip_conversion (mask2);
    2916                 :         24 :           if (!mask2)
    2917                 :            :             return false;
    2918                 :         24 :           if (!operand_equal_p (mask1, mask2, 0))
    2919                 :            :             return false;
    2920                 :            :         }
    2921                 :         32 :       return true;
    2922                 :            :     }
    2923                 :            : 
    2924                 :            :   return false;
    2925                 :            : }
    2926                 :            : 
    2927                 :            : /* Function vect_analyze_data_ref_accesses.
    2928                 :            : 
    2929                 :            :    Analyze the access pattern of all the data references in the loop.
    2930                 :            : 
    2931                 :            :    FORNOW: the only access pattern that is considered vectorizable is a
    2932                 :            :            simple step 1 (consecutive) access.
    2933                 :            : 
    2934                 :            :    FORNOW: handle only arrays and pointer accesses.  */
    2935                 :            : 
    2936                 :            : opt_result
    2937                 :     198691 : vect_analyze_data_ref_accesses (vec_info *vinfo)
    2938                 :            : {
    2939                 :     198691 :   unsigned int i;
    2940                 :     198691 :   vec<data_reference_p> datarefs = vinfo->shared->datarefs;
    2941                 :     198691 :   struct data_reference *dr;
    2942                 :            : 
    2943                 :     198691 :   DUMP_VECT_SCOPE ("vect_analyze_data_ref_accesses");
    2944                 :            : 
    2945                 :     198691 :   if (datarefs.is_empty ())
    2946                 :       2235 :     return opt_result::success ();
    2947                 :            : 
    2948                 :            :   /* Sort the array of datarefs to make building the interleaving chains
    2949                 :            :      linear.  Don't modify the original vector's order, it is needed for
    2950                 :            :      determining what dependencies are reversed.  */
    2951                 :     196456 :   vec<data_reference_p> datarefs_copy = datarefs.copy ();
    2952                 :     196456 :   datarefs_copy.qsort (dr_group_sort_cmp);
    2953                 :     395147 :   hash_set<stmt_vec_info> to_fixup;
    2954                 :            : 
    2955                 :            :   /* Build the interleaving chains.  */
    2956                 :    1488080 :   for (i = 0; i < datarefs_copy.length () - 1;)
    2957                 :            :     {
    2958                 :     547583 :       data_reference_p dra = datarefs_copy[i];
    2959                 :     547583 :       dr_vec_info *dr_info_a = vinfo->lookup_dr (dra);
    2960                 :     547583 :       stmt_vec_info stmtinfo_a = dr_info_a->stmt;
    2961                 :     547583 :       stmt_vec_info lastinfo = NULL;
    2962                 :     547583 :       if (!STMT_VINFO_VECTORIZABLE (stmtinfo_a)
    2963                 :     423178 :           || STMT_VINFO_GATHER_SCATTER_P (stmtinfo_a))
    2964                 :            :         {
    2965                 :     124751 :           ++i;
    2966                 :     124751 :           continue;
    2967                 :            :         }
    2968                 :     726824 :       for (i = i + 1; i < datarefs_copy.length (); ++i)
    2969                 :            :         {
    2970                 :     688432 :           data_reference_p drb = datarefs_copy[i];
    2971                 :     688432 :           dr_vec_info *dr_info_b = vinfo->lookup_dr (drb);
    2972                 :     688432 :           stmt_vec_info stmtinfo_b = dr_info_b->stmt;
    2973                 :     688432 :           if (!STMT_VINFO_VECTORIZABLE (stmtinfo_b)
    2974                 :     682427 :               || STMT_VINFO_GATHER_SCATTER_P (stmtinfo_b))
    2975                 :            :             break;
    2976                 :            : 
    2977                 :            :           /* ???  Imperfect sorting (non-compatible types, non-modulo
    2978                 :            :              accesses, same accesses) can lead to a group to be artificially
    2979                 :            :              split here as we don't just skip over those.  If it really
    2980                 :            :              matters we can push those to a worklist and re-iterate
    2981                 :            :              over them.  The we can just skip ahead to the next DR here.  */
    2982                 :            : 
    2983                 :            :           /* DRs in a different loop should not be put into the same
    2984                 :            :              interleaving group.  */
    2985                 :     682427 :           if (gimple_bb (DR_STMT (dra))->loop_father
    2986                 :     682427 :               != gimple_bb (DR_STMT (drb))->loop_father)
    2987                 :            :             break;
    2988                 :            : 
    2989                 :            :           /* Check that the data-refs have same first location (except init)
    2990                 :            :              and they are both either store or load (not load and store,
    2991                 :            :              not masked loads or stores).  */
    2992                 :     682144 :           if (DR_IS_READ (dra) != DR_IS_READ (drb)
    2993                 :     592108 :               || data_ref_compare_tree (DR_BASE_ADDRESS (dra),
    2994                 :            :                                         DR_BASE_ADDRESS (drb)) != 0
    2995                 :     456488 :               || data_ref_compare_tree (DR_OFFSET (dra), DR_OFFSET (drb)) != 0
    2996                 :    1132220 :               || !can_group_stmts_p (stmtinfo_a, stmtinfo_b, true))
    2997                 :            :             break;
    2998                 :            : 
    2999                 :            :           /* Check that the data-refs have the same constant size.  */
    3000                 :     450068 :           tree sza = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra)));
    3001                 :     450068 :           tree szb = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb)));
    3002                 :     450068 :           if (!tree_fits_uhwi_p (sza)
    3003                 :     450068 :               || !tree_fits_uhwi_p (szb)
    3004                 :     900136 :               || !tree_int_cst_equal (sza, szb))
    3005                 :            :             break;
    3006                 :            : 
    3007                 :            :           /* Check that the data-refs have the same step.  */
    3008                 :     416862 :           if (data_ref_compare_tree (DR_STEP (dra), DR_STEP (drb)) != 0)
    3009                 :            :             break;
    3010                 :            : 
    3011                 :            :           /* Check the types are compatible.
    3012                 :            :              ???  We don't distinguish this during sorting.  */
    3013                 :     416781 :           if (!types_compatible_p (TREE_TYPE (DR_REF (dra)),
    3014                 :     416781 :                                    TREE_TYPE (DR_REF (drb))))
    3015                 :            :             break;
    3016                 :            : 
    3017                 :            :           /* Check that the DR_INITs are compile-time constants.  */
    3018                 :     348096 :           if (TREE_CODE (DR_INIT (dra)) != INTEGER_CST
    3019                 :     348096 :               || TREE_CODE (DR_INIT (drb)) != INTEGER_CST)
    3020                 :            :             break;
    3021                 :            : 
    3022                 :            :           /* Different .GOMP_SIMD_LANE calls still give the same lane,
    3023                 :            :              just hold extra information.  */
    3024                 :     348096 :           if (STMT_VINFO_SIMD_LANE_ACCESS_P (stmtinfo_a)
    3025                 :       1324 :               && STMT_VINFO_SIMD_LANE_ACCESS_P (stmtinfo_b)
    3026                 :     349420 :               && data_ref_compare_tree (DR_INIT (dra), DR_INIT (drb)) == 0)
    3027                 :            :             break;
    3028                 :            : 
    3029                 :            :           /* Sorting has ensured that DR_INIT (dra) <= DR_INIT (drb).  */
    3030                 :     346772 :           HOST_WIDE_INT init_a = TREE_INT_CST_LOW (DR_INIT (dra));
    3031                 :     346772 :           HOST_WIDE_INT init_b = TREE_INT_CST_LOW (DR_INIT (drb));
    3032                 :     346772 :           HOST_WIDE_INT init_prev
    3033                 :     346772 :             = TREE_INT_CST_LOW (DR_INIT (datarefs_copy[i-1]));
    3034                 :     346772 :           gcc_assert (init_a <= init_b
    3035                 :            :                       && init_a <= init_prev
    3036                 :            :                       && init_prev <= init_b);
    3037                 :            : 
    3038                 :            :           /* Do not place the same access in the interleaving chain twice.  */
    3039                 :     346772 :           if (init_b == init_prev)
    3040                 :            :             {
    3041                 :      10430 :               gcc_assert (gimple_uid (DR_STMT (datarefs_copy[i-1]))
    3042                 :            :                           < gimple_uid (DR_STMT (drb)));
    3043                 :            :               /* Simply link in duplicates and fix up the chain below.  */
    3044                 :            :             }
    3045                 :            :           else
    3046                 :            :             {
    3047                 :            :               /* If init_b == init_a + the size of the type * k, we have an
    3048                 :            :                  interleaving, and DRA is accessed before DRB.  */
    3049                 :     336342 :               HOST_WIDE_INT type_size_a = tree_to_uhwi (sza);
    3050                 :     336342 :               if (type_size_a == 0
    3051                 :     336342 :                   || (init_b - init_a) % type_size_a != 0)
    3052                 :            :                 break;
    3053                 :            : 
    3054                 :            :               /* If we have a store, the accesses are adjacent.  This splits
    3055                 :            :                  groups into chunks we support (we don't support vectorization
    3056                 :            :                  of stores with gaps).  */
    3057                 :     336240 :               if (!DR_IS_READ (dra) && init_b - init_prev != type_size_a)
    3058                 :            :                 break;
    3059                 :            : 
    3060                 :            :               /* If the step (if not zero or non-constant) is greater than the
    3061                 :            :                  difference between data-refs' inits this splits groups into
    3062                 :            :                  suitable sizes.  */
    3063                 :     296755 :               if (tree_fits_shwi_p (DR_STEP (dra)))
    3064                 :            :                 {
    3065                 :     293753 :                   HOST_WIDE_INT step = tree_to_shwi (DR_STEP (dra));
    3066                 :     293753 :                   if (step != 0 && step <= (init_b - init_a))
    3067                 :            :                     break;
    3068                 :            :                 }
    3069                 :            :             }
    3070                 :            : 
    3071                 :     303992 :           if (dump_enabled_p ())
    3072                 :      15050 :             dump_printf_loc (MSG_NOTE, vect_location,
    3073                 :      15050 :                              DR_IS_READ (dra)
    3074                 :            :                              ? "Detected interleaving load %T and %T\n"
    3075                 :            :                              : "Detected interleaving store %T and %T\n",
    3076                 :            :                              DR_REF (dra), DR_REF (drb));
    3077                 :            : 
    3078                 :            :           /* Link the found element into the group list.  */
    3079                 :     303992 :           if (!DR_GROUP_FIRST_ELEMENT (stmtinfo_a))
    3080                 :            :             {
    3081                 :     117726 :               DR_GROUP_FIRST_ELEMENT (stmtinfo_a) = stmtinfo_a;
    3082                 :     117726 :               lastinfo = stmtinfo_a;
    3083                 :            :             }
    3084                 :     303992 :           DR_GROUP_FIRST_ELEMENT (stmtinfo_b) = stmtinfo_a;
    3085                 :     303992 :           DR_GROUP_NEXT_ELEMENT (lastinfo) = stmtinfo_b;
    3086                 :     303992 :           lastinfo = stmtinfo_b;
    3087                 :            : 
    3088                 :     303992 :           STMT_VINFO_SLP_VECT_ONLY (stmtinfo_a)
    3089                 :     303992 :             = !can_group_stmts_p (stmtinfo_a, stmtinfo_b, false);
    3090                 :            : 
    3091                 :     303992 :           if (dump_enabled_p () && STMT_VINFO_SLP_VECT_ONLY (stmtinfo_a))
    3092                 :          0 :             dump_printf_loc (MSG_NOTE, vect_location,
    3093                 :            :                              "Load suitable for SLP vectorization only.\n");
    3094                 :            : 
    3095                 :     303992 :           if (init_b == init_prev
    3096                 :      10430 :               && !to_fixup.add (DR_GROUP_FIRST_ELEMENT (stmtinfo_a))
    3097                 :     306883 :               && dump_enabled_p ())
    3098                 :         91 :             dump_printf_loc (MSG_NOTE, vect_location,
    3099                 :            :                              "Queuing group with duplicate access for fixup\n");
    3100                 :            :         }
    3101                 :            :     }
    3102                 :            : 
    3103                 :            :   /* Fixup groups with duplicate entries by splitting it.  */
    3104                 :     206212 :   while (1)
    3105                 :            :     {
    3106                 :     206212 :       hash_set<stmt_vec_info>::iterator it = to_fixup.begin ();
    3107                 :     206212 :       if (!(it != to_fixup.end ()))
    3108                 :            :         break;
    3109                 :       9756 :       stmt_vec_info grp = *it;
    3110                 :       9756 :       to_fixup.remove (grp);
    3111                 :            : 
    3112                 :            :       /* Find the earliest duplicate group member.  */
    3113                 :       9756 :       unsigned first_duplicate = -1u;
    3114                 :       9756 :       stmt_vec_info next, g = grp;
    3115                 :      68810 :       while ((next = DR_GROUP_NEXT_ELEMENT (g)))
    3116                 :            :         {
    3117                 :      59054 :           if (tree_int_cst_equal (DR_INIT (STMT_VINFO_DR_INFO (next)->dr),
    3118                 :      59054 :                                   DR_INIT (STMT_VINFO_DR_INFO (g)->dr))
    3119                 :      59054 :               && gimple_uid (STMT_VINFO_STMT (next)) < first_duplicate)
    3120                 :            :             first_duplicate = gimple_uid (STMT_VINFO_STMT (next));
    3121                 :            :           g = next;
    3122                 :            :         }
    3123                 :       9756 :       if (first_duplicate == -1U)
    3124                 :       2891 :         continue;
    3125                 :            : 
    3126                 :            :       /* Then move all stmts after the first duplicate to a new group.
    3127                 :            :          Note this is a heuristic but one with the property that *it
    3128                 :            :          is fixed up completely.  */
    3129                 :       6865 :       g = grp;
    3130                 :       6865 :       stmt_vec_info newgroup = NULL, ng = grp;
    3131                 :      64351 :       while ((next = DR_GROUP_NEXT_ELEMENT (g)))
    3132                 :            :         {
    3133                 :      57486 :           if (gimple_uid (STMT_VINFO_STMT (next)) >= first_duplicate)
    3134                 :            :             {
    3135                 :      51760 :               DR_GROUP_NEXT_ELEMENT (g) = DR_GROUP_NEXT_ELEMENT (next);
    3136                 :      51760 :               if (!newgroup)
    3137                 :       6865 :                 newgroup = next;
    3138                 :            :               else
    3139                 :      44895 :                 DR_GROUP_NEXT_ELEMENT (ng) = next;
    3140                 :      51760 :               ng = next;
    3141                 :      51760 :               DR_GROUP_FIRST_ELEMENT (ng) = newgroup;
    3142                 :            :             }
    3143                 :            :           else
    3144                 :            :             g = DR_GROUP_NEXT_ELEMENT (g);
    3145                 :            :         }
    3146                 :       6865 :       DR_GROUP_NEXT_ELEMENT (ng) = NULL;
    3147                 :            : 
    3148                 :            :       /* Fixup the new group which still may contain duplicates.  */
    3149                 :       6865 :       to_fixup.add (newgroup);
    3150                 :            :     }
    3151                 :            : 
    3152                 :    1202550 :   FOR_EACH_VEC_ELT (datarefs_copy, i, dr)
    3153                 :            :     {
    3154                 :    1007080 :       dr_vec_info *dr_info = vinfo->lookup_dr (dr);
    3155                 :    1007080 :       if (STMT_VINFO_VECTORIZABLE (dr_info->stmt)
    3156                 :    1007080 :           && !vect_analyze_data_ref_access (dr_info))
    3157                 :            :         {
    3158                 :       1127 :           if (dump_enabled_p ())
    3159                 :        226 :             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    3160                 :            :                              "not vectorized: complicated access pattern.\n");
    3161                 :            : 
    3162                 :       1127 :           if (is_a <bb_vec_info> (vinfo))
    3163                 :            :             {
    3164                 :            :               /* Mark the statement as not vectorizable.  */
    3165                 :        141 :               STMT_VINFO_VECTORIZABLE (dr_info->stmt) = false;
    3166                 :        141 :               continue;
    3167                 :            :             }
    3168                 :            :           else
    3169                 :            :             {
    3170                 :        986 :               datarefs_copy.release ();
    3171                 :        986 :               return opt_result::failure_at (dr_info->stmt->stmt,
    3172                 :            :                                              "not vectorized:"
    3173                 :        986 :                                              " complicated access pattern.\n");
    3174                 :            :             }
    3175                 :            :         }
    3176                 :            :     }
    3177                 :            : 
    3178                 :     195470 :   datarefs_copy.release ();
    3179                 :     195470 :   return opt_result::success ();
    3180                 :            : }
    3181                 :            : 
    3182                 :            : /* Function vect_vfa_segment_size.
    3183                 :            : 
    3184                 :            :    Input:
    3185                 :            :      DR_INFO: The data reference.
    3186                 :            :      LENGTH_FACTOR: segment length to consider.
    3187                 :            : 
    3188                 :            :    Return a value suitable for the dr_with_seg_len::seg_len field.
    3189                 :            :    This is the "distance travelled" by the pointer from the first
    3190                 :            :    iteration in the segment to the last.  Note that it does not include
    3191                 :            :    the size of the access; in effect it only describes the first byte.  */
    3192                 :            : 
    3193                 :            : static tree
    3194                 :      39550 : vect_vfa_segment_size (dr_vec_info *dr_info, tree length_factor)
    3195                 :            : {
    3196                 :      39550 :   length_factor = size_binop (MINUS_EXPR,
    3197                 :            :                               fold_convert (sizetype, length_factor),
    3198                 :            :                               size_one_node);
    3199                 :      39550 :   return size_binop (MULT_EXPR, fold_convert (sizetype, DR_STEP (dr_info->dr)),
    3200                 :            :                      length_factor);
    3201                 :            : }
    3202                 :            : 
    3203                 :            : /* Return a value that, when added to abs (vect_vfa_segment_size (DR_INFO)),
    3204                 :            :    gives the worst-case number of bytes covered by the segment.  */
    3205                 :            : 
    3206                 :            : static unsigned HOST_WIDE_INT
    3207                 :      39780 : vect_vfa_access_size (dr_vec_info *dr_info)
    3208                 :            : {
    3209                 :      39780 :   stmt_vec_info stmt_vinfo = dr_info->stmt;
    3210                 :      39780 :   tree ref_type = TREE_TYPE (DR_REF (dr_info->dr));
    3211                 :      39780 :   unsigned HOST_WIDE_INT ref_size = tree_to_uhwi (TYPE_SIZE_UNIT (ref_type));
    3212                 :      39780 :   unsigned HOST_WIDE_INT access_size = ref_size;
    3213                 :      39780 :   if (DR_GROUP_FIRST_ELEMENT (stmt_vinfo))
    3214                 :            :     {
    3215                 :      10333 :       gcc_assert (DR_GROUP_FIRST_ELEMENT (stmt_vinfo) == stmt_vinfo);
    3216                 :      10333 :       access_size *= DR_GROUP_SIZE (stmt_vinfo) - DR_GROUP_GAP (stmt_vinfo);
    3217                 :            :     }
    3218                 :      39780 :   if (STMT_VINFO_VEC_STMT (stmt_vinfo)
    3219                 :      39780 :       && (vect_supportable_dr_alignment (dr_info, false)
    3220                 :            :           == dr_explicit_realign_optimized))
    3221                 :            :     {
    3222                 :            :       /* We might access a full vector's worth.  */
    3223                 :          0 :       tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
    3224                 :          0 :       access_size += tree_to_uhwi (TYPE_SIZE_UNIT (vectype)) - ref_size;
    3225                 :            :     }
    3226                 :      39780 :   return access_size;
    3227                 :            : }
    3228                 :            : 
    3229                 :            : /* Get the minimum alignment for all the scalar accesses that DR_INFO
    3230                 :            :    describes.  */
    3231                 :            : 
    3232                 :            : static unsigned int
    3233                 :      39780 : vect_vfa_align (dr_vec_info *dr_info)
    3234                 :            : {
    3235                 :      39780 :   return TYPE_ALIGN_UNIT (TREE_TYPE (DR_REF (dr_info->dr)));
    3236                 :            : }
    3237                 :            : 
    3238                 :            : /* Function vect_no_alias_p.
    3239                 :            : 
    3240                 :            :    Given data references A and B with equal base and offset, see whether
    3241                 :            :    the alias relation can be decided at compilation time.  Return 1 if
    3242                 :            :    it can and the references alias, 0 if it can and the references do
    3243                 :            :    not alias, and -1 if we cannot decide at compile time.  SEGMENT_LENGTH_A,
    3244                 :            :    SEGMENT_LENGTH_B, ACCESS_SIZE_A and ACCESS_SIZE_B are the equivalent
    3245                 :            :    of dr_with_seg_len::{seg_len,access_size} for A and B.  */
    3246                 :            : 
    3247                 :            : static int
    3248                 :        314 : vect_compile_time_alias (dr_vec_info *a, dr_vec_info *b,
    3249                 :            :                          tree segment_length_a, tree segment_length_b,
    3250                 :            :                          unsigned HOST_WIDE_INT access_size_a,
    3251                 :            :                          unsigned HOST_WIDE_INT access_size_b)
    3252                 :            : {
    3253                 :        314 :   poly_offset_int offset_a = wi::to_poly_offset (DR_INIT (a->dr));
    3254                 :        314 :   poly_offset_int offset_b = wi::to_poly_offset (DR_INIT (b->dr));
    3255                 :        314 :   poly_uint64 const_length_a;
    3256                 :        314 :   poly_uint64 const_length_b;
    3257                 :            : 
    3258                 :            :   /* For negative step, we need to adjust address range by TYPE_SIZE_UNIT
    3259                 :            :      bytes, e.g., int a[3] -> a[1] range is [a+4, a+16) instead of
    3260                 :            :      [a, a+12) */
    3261                 :        314 :   if (tree_int_cst_compare (DR_STEP (a->dr), size_zero_node) < 0)
    3262                 :            :     {
    3263                 :         32 :       const_length_a = (-wi::to_poly_wide (segment_length_a)).force_uhwi ();
    3264                 :         16 :       offset_a -= const_length_a;
    3265                 :            :     }
    3266                 :            :   else
    3267                 :        298 :     const_length_a = tree_to_poly_uint64 (segment_length_a);
    3268                 :        314 :   if (tree_int_cst_compare (DR_STEP (b->dr), size_zero_node) < 0)
    3269                 :            :     {
    3270                 :         56 :       const_length_b = (-wi::to_poly_wide (segment_length_b)).force_uhwi ();
    3271                 :         28 :       offset_b -= const_length_b;
    3272                 :            :     }
    3273                 :            :   else
    3274                 :        286 :     const_length_b = tree_to_poly_uint64 (segment_length_b);
    3275                 :            : 
    3276                 :        314 :   const_length_a += access_size_a;
    3277                 :        314 :   const_length_b += access_size_b;
    3278                 :            : 
    3279                 :        314 :   if (ranges_known_overlap_p (offset_a, const_length_a,
    3280                 :            :                               offset_b, const_length_b))
    3281                 :            :     return 1;
    3282                 :            : 
    3283                 :        186 :   if (!ranges_maybe_overlap_p (offset_a, const_length_a,
    3284                 :            :                                offset_b, const_length_b))
    3285                 :        186 :     return 0;
    3286                 :            : 
    3287                 :            :   return -1;
    3288                 :            : }
    3289                 :            : 
    3290                 :            : /* Return true if the minimum nonzero dependence distance for loop LOOP_DEPTH
    3291                 :            :    in DDR is >= VF.  */
    3292                 :            : 
    3293                 :            : static bool
    3294                 :      28452 : dependence_distance_ge_vf (data_dependence_relation *ddr,
    3295                 :            :                            unsigned int loop_depth, poly_uint64 vf)
    3296                 :            : {
    3297                 :      28452 :   if (DDR_ARE_DEPENDENT (ddr) != NULL_TREE
    3298                 :      28452 :       || DDR_NUM_DIST_VECTS (ddr) == 0)
    3299                 :            :     return false;
    3300                 :            : 
    3301                 :            :   /* If the dependence is exact, we should have limited the VF instead.  */
    3302                 :         55 :   gcc_checking_assert (DDR_COULD_BE_INDEPENDENT_P (ddr));
    3303                 :            : 
    3304                 :            :   unsigned int i;
    3305                 :            :   lambda_vector dist_v;
    3306                 :        124 :   FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
    3307                 :            :     {
    3308                 :        106 :       HOST_WIDE_INT dist = dist_v[loop_depth];
    3309                 :        106 :       if (dist != 0
    3310                 :         55 :           && !(dist > 0 && DDR_REVERSED_P (ddr))
    3311                 :        161 :           && maybe_lt ((unsigned HOST_WIDE_INT) abs_hwi (dist), vf))
    3312                 :            :         return false;
    3313                 :            :     }
    3314                 :            : 
    3315                 :         18 :   if (dump_enabled_p ())
    3316                 :         18 :     dump_printf_loc (MSG_NOTE, vect_location,
    3317                 :            :                      "dependence distance between %T and %T is >= VF\n",
    3318                 :         18 :                      DR_REF (DDR_A (ddr)), DR_REF (DDR_B (ddr)));
    3319                 :            : 
    3320                 :            :   return true;
    3321                 :            : }
    3322                 :            : 
    3323                 :            : /* Dump LOWER_BOUND using flags DUMP_KIND.  Dumps are known to be enabled.  */
    3324                 :            : 
    3325                 :            : static void
    3326                 :        345 : dump_lower_bound (dump_flags_t dump_kind, const vec_lower_bound &lower_bound)
    3327                 :            : {
    3328                 :        345 :   dump_printf (dump_kind, "%s (%T) >= ",
    3329                 :        345 :                lower_bound.unsigned_p ? "unsigned" : "abs",
    3330                 :        345 :                lower_bound.expr);
    3331                 :        345 :   dump_dec (dump_kind, lower_bound.min_value);
    3332                 :        345 : }
    3333                 :            : 
    3334                 :            : /* Record that the vectorized loop requires the vec_lower_bound described
    3335                 :            :    by EXPR, UNSIGNED_P and MIN_VALUE.  */
    3336                 :            : 
    3337                 :            : static void
    3338                 :       3926 : vect_check_lower_bound (loop_vec_info loop_vinfo, tree expr, bool unsigned_p,
    3339                 :            :                         poly_uint64 min_value)
    3340                 :            : {
    3341                 :       3926 :   vec<vec_lower_bound> lower_bounds = LOOP_VINFO_LOWER_BOUNDS (loop_vinfo);
    3342                 :       9150 :   for (unsigned int i = 0; i < lower_bounds.length (); ++i)
    3343                 :       4198 :     if (operand_equal_p (lower_bounds[i].expr, expr, 0))
    3344                 :            :       {
    3345                 :       3226 :         unsigned_p &= lower_bounds[i].unsigned_p;
    3346                 :       3226 :         min_value = upper_bound (lower_bounds[i].min_value, min_value);
    3347                 :       3226 :         if (lower_bounds[i].unsigned_p != unsigned_p
    3348                 :       3226 :             || maybe_lt (lower_bounds[i].min_value, min_value))
    3349                 :            :           {
    3350                 :        494 :             lower_bounds[i].unsigned_p = unsigned_p;
    3351                 :        494 :             lower_bounds[i].min_value = min_value;
    3352                 :        494 :             if (dump_enabled_p ())
    3353                 :            :               {
    3354                 :        198 :                 dump_printf_loc (MSG_NOTE, vect_location,
    3355                 :            :                                  "updating run-time check to ");
    3356                 :        198 :                 dump_lower_bound (MSG_NOTE, lower_bounds[i]);
    3357                 :        198 :                 dump_printf (MSG_NOTE, "\n");
    3358                 :            :               }
    3359                 :            :           }
    3360                 :       3226 :         return;
    3361                 :            :       }
    3362                 :            : 
    3363                 :        700 :   vec_lower_bound lower_bound (expr, unsigned_p, min_value);
    3364                 :        700 :   if (dump_enabled_p ())
    3365                 :            :     {
    3366                 :        147 :       dump_printf_loc (MSG_NOTE, vect_location, "need a run-time check that ");
    3367                 :        147 :       dump_lower_bound (MSG_NOTE, lower_bound);
    3368                 :        147 :       dump_printf (MSG_NOTE, "\n");
    3369                 :            :     }
    3370                 :        700 :   LOOP_VINFO_LOWER_BOUNDS (loop_vinfo).safe_push (lower_bound);
    3371                 :            : }
    3372                 :            : 
    3373                 :            : /* Return true if it's unlikely that the step of the vectorized form of DR_INFO
    3374                 :            :    will span fewer than GAP bytes.  */
    3375                 :            : 
    3376                 :            : static bool
    3377                 :       3075 : vect_small_gap_p (loop_vec_info loop_vinfo, dr_vec_info *dr_info,
    3378                 :            :                   poly_int64 gap)
    3379                 :            : {
    3380                 :       3075 :   stmt_vec_info stmt_info = dr_info->stmt;
    3381                 :       3075 :   HOST_WIDE_INT count
    3382                 :       3075 :     = estimated_poly_value (LOOP_VINFO_VECT_FACTOR (loop_vinfo));
    3383                 :       3075 :   if (DR_GROUP_FIRST_ELEMENT (stmt_info))
    3384                 :       3059 :     count *= DR_GROUP_SIZE (DR_GROUP_FIRST_ELEMENT (stmt_info));
    3385                 :       3075 :   return (estimated_poly_value (gap)
    3386                 :       3075 :           <= count * vect_get_scalar_dr_size (dr_info));
    3387                 :            : }
    3388                 :            : 
    3389                 :            : /* Return true if we know that there is no alias between DR_INFO_A and
    3390                 :            :    DR_INFO_B when abs (DR_STEP (DR_INFO_A->dr)) >= N for some N.
    3391                 :            :    When returning true, set *LOWER_BOUND_OUT to this N.  */
    3392                 :            : 
    3393                 :            : static bool
    3394                 :      11640 : vectorizable_with_step_bound_p (dr_vec_info *dr_info_a, dr_vec_info *dr_info_b,
    3395                 :            :                                 poly_uint64 *lower_bound_out)
    3396                 :            : {
    3397                 :            :   /* Check that there is a constant gap of known sign between DR_A
    3398                 :            :      and DR_B.  */
    3399                 :      11640 :   data_reference *dr_a = dr_info_a->dr;
    3400                 :      11640 :   data_reference *dr_b = dr_info_b->dr;
    3401                 :      11640 :   poly_int64 init_a, init_b;
    3402                 :      11640 :   if (!operand_equal_p (DR_BASE_ADDRESS (dr_a), DR_BASE_ADDRESS (dr_b), 0)
    3403                 :      10793 :       || !operand_equal_p (DR_OFFSET (dr_a), DR_OFFSET (dr_b), 0)
    3404                 :       5657 :       || !operand_equal_p (DR_STEP (dr_a), DR_STEP (dr_b), 0)
    3405                 :       5655 :       || !poly_int_tree_p (DR_INIT (dr_a), &init_a)
    3406                 :       5655 :       || !poly_int_tree_p (DR_INIT (dr_b), &init_b)
    3407                 :      11640 :       || !ordered_p (init_a, init_b))
    3408                 :       5985 :     return false;
    3409                 :            : 
    3410                 :            :   /* Sort DR_A and DR_B by the address they access.  */
    3411                 :       5655 :   if (maybe_lt (init_b, init_a))
    3412                 :            :     {
    3413                 :          0 :       std::swap (init_a, init_b);
    3414                 :          0 :       std::swap (dr_info_a, dr_info_b);
    3415                 :          0 :       std::swap (dr_a, dr_b);
    3416                 :            :     }
    3417                 :            : 
    3418                 :            :   /* If the two accesses could be dependent within a scalar iteration,
    3419                 :            :      make sure that we'd retain their order.  */
    3420                 :       5655 :   if (maybe_gt (init_a + vect_get_scalar_dr_size (dr_info_a), init_b)
    3421                 :       5655 :       && !vect_preserves_scalar_order_p (dr_info_a, dr_info_b))
    3422                 :            :     return false;
    3423                 :            : 
    3424                 :            :   /* There is no alias if abs (DR_STEP) is greater than or equal to
    3425                 :            :      the bytes spanned by the combination of the two accesses.  */
    3426                 :       5655 :   *lower_bound_out = init_b + vect_get_scalar_dr_size (dr_info_b) - init_a;
    3427                 :       5655 :   return true;
    3428                 :            : }
    3429                 :            : 
    3430                 :            : /* Function vect_prune_runtime_alias_test_list.
    3431                 :            : 
    3432                 :            :    Prune a list of ddrs to be tested at run-time by versioning for alias.
    3433                 :            :    Merge several alias checks into one if possible.
    3434                 :            :    Return FALSE if resulting list of ddrs is longer then allowed by
    3435                 :            :    PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS, otherwise return TRUE.  */
    3436                 :            : 
    3437                 :            : opt_result
    3438                 :      33894 : vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
    3439                 :            : {
    3440                 :      33894 :   typedef pair_hash <tree_operand_hash, tree_operand_hash> tree_pair_hash;
    3441                 :      33894 :   hash_set <tree_pair_hash> compared_objects;
    3442                 :            : 
    3443                 :      33894 :   vec<ddr_p> may_alias_ddrs = LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo);
    3444                 :      33894 :   vec<dr_with_seg_len_pair_t> &comp_alias_ddrs
    3445                 :            :     = LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo);
    3446                 :      33894 :   vec<vec_object_pair> &check_unequal_addrs
    3447                 :            :     = LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo);
    3448                 :      33894 :   poly_uint64 vect_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
    3449                 :      33894 :   tree scalar_loop_iters = LOOP_VINFO_NITERS (loop_vinfo);
    3450                 :            : 
    3451                 :      33894 :   ddr_p ddr;
    3452                 :      33894 :   unsigned int i;
    3453                 :      33894 :   tree length_factor;
    3454                 :            : 
    3455                 :      67788 :   DUMP_VECT_SCOPE ("vect_prune_runtime_alias_test_list");
    3456                 :            : 
    3457                 :            :   /* Step values are irrelevant for aliasing if the number of vector
    3458                 :            :      iterations is equal to the number of scalar iterations (which can
    3459                 :            :      happen for fully-SLP loops).  */
    3460                 :      33894 :   bool ignore_step_p = known_eq (LOOP_VINFO_VECT_FACTOR (loop_vinfo), 1U);
    3461                 :            : 
    3462                 :      33894 :   if (!ignore_step_p)
    3463                 :            :     {
    3464                 :            :       /* Convert the checks for nonzero steps into bound tests.  */
    3465                 :            :       tree value;
    3466                 :      34048 :       FOR_EACH_VEC_ELT (LOOP_VINFO_CHECK_NONZERO (loop_vinfo), i, value)
    3467                 :        867 :         vect_check_lower_bound (loop_vinfo, value, true, 1);
    3468                 :            :     }
    3469                 :            : 
    3470                 :      33894 :   if (may_alias_ddrs.is_empty ())
    3471                 :      30449 :     return opt_result::success ();
    3472                 :            : 
    3473                 :       3445 :   comp_alias_ddrs.create (may_alias_ddrs.length ());
    3474                 :            : 
    3475                 :       3445 :   unsigned int loop_depth
    3476                 :       3445 :     = index_in_loop_nest (LOOP_VINFO_LOOP (loop_vinfo)->num,
    3477                 :       3445 :                           LOOP_VINFO_LOOP_NEST (loop_vinfo));
    3478                 :            : 
    3479                 :            :   /* First, we collect all data ref pairs for aliasing checks.  */
    3480                 :      31769 :   FOR_EACH_VEC_ELT (may_alias_ddrs, i, ddr)
    3481                 :            :     {
    3482                 :      28452 :       poly_uint64 lower_bound;
    3483                 :      28452 :       tree segment_length_a, segment_length_b;
    3484                 :      28452 :       unsigned HOST_WIDE_INT access_size_a, access_size_b;
    3485                 :      28452 :       unsigned int align_a, align_b;
    3486                 :            : 
    3487                 :            :       /* Ignore the alias if the VF we chose ended up being no greater
    3488                 :            :          than the dependence distance.  */
    3489                 :      28452 :       if (dependence_distance_ge_vf (ddr, loop_depth, vect_factor))
    3490                 :       8748 :         continue;
    3491                 :            : 
    3492                 :      28434 :       if (DDR_OBJECT_A (ddr))
    3493                 :            :         {
    3494                 :          4 :           vec_object_pair new_pair (DDR_OBJECT_A (ddr), DDR_OBJECT_B (ddr));
    3495                 :          4 :           if (!compared_objects.add (new_pair))
    3496                 :            :             {
    3497                 :          4 :               if (dump_enabled_p ())
    3498                 :          4 :                 dump_printf_loc (MSG_NOTE, vect_location,
    3499                 :            :                                  "checking that %T and %T"
    3500                 :            :                                  " have different addresses\n",
    3501                 :            :                                  new_pair.first, new_pair.second);
    3502                 :          4 :               LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).safe_push (new_pair);
    3503                 :            :             }
    3504                 :          4 :           continue;
    3505                 :            :         }
    3506                 :            : 
    3507                 :      28430 :       dr_vec_info *dr_info_a = loop_vinfo->lookup_dr (DDR_A (ddr));
    3508                 :      28430 :       stmt_vec_info stmt_info_a = dr_info_a->stmt;
    3509                 :            : 
    3510                 :      28430 :       dr_vec_info *dr_info_b = loop_vinfo->lookup_dr (DDR_B (ddr));
    3511                 :      28430 :       stmt_vec_info stmt_info_b = dr_info_b->stmt;
    3512                 :            : 
    3513                 :      28430 :       bool preserves_scalar_order_p
    3514                 :      28430 :         = vect_preserves_scalar_order_p (dr_info_a, dr_info_b);
    3515                 :            : 
    3516                 :            :       /* Skip the pair if inter-iteration dependencies are irrelevant
    3517                 :            :          and intra-iteration dependencies are guaranteed to be honored.  */
    3518                 :      33911 :       if (ignore_step_p
    3519                 :      28430 :           && (preserves_scalar_order_p
    3520                 :       2703 :               || vectorizable_with_step_bound_p (dr_info_a, dr_info_b,
    3521                 :            :                                                  &lower_bound)))
    3522                 :            :         {
    3523                 :       5481 :           if (dump_enabled_p ())
    3524                 :       1607 :             dump_printf_loc (MSG_NOTE, vect_location,
    3525                 :            :                              "no need for alias check between "
    3526                 :            :                              "%T and %T when VF is 1\n",
    3527                 :       1607 :                              DR_REF (dr_info_a->dr), DR_REF (dr_info_b->dr));
    3528                 :       5481 :           continue;
    3529                 :            :         }
    3530                 :            : 
    3531                 :            :       /* See whether we can handle the alias using a bounds check on
    3532                 :            :          the step, and whether that's likely to be the best approach.
    3533                 :            :          (It might not be, for example, if the minimum step is much larger
    3534                 :            :          than the number of bytes handled by one vector iteration.)  */
    3535                 :      22949 :       if (!ignore_step_p
    3536                 :      22834 :           && TREE_CODE (DR_STEP (dr_info_a->dr)) != INTEGER_CST
    3537                 :       8937 :           && vectorizable_with_step_bound_p (dr_info_a, dr_info_b,
    3538                 :            :                                              &lower_bound)
    3539                 :      26016 :           && (vect_small_gap_p (loop_vinfo, dr_info_a, lower_bound)
    3540                 :          8 :               || vect_small_gap_p (loop_vinfo, dr_info_b, lower_bound)))
    3541                 :            :         {
    3542                 :       3059 :           bool unsigned_p = dr_known_forward_stride_p (dr_info_a->dr);
    3543                 :       3059 :           if (dump_enabled_p ())
    3544                 :            :             {
    3545                 :       2886 :               dump_printf_loc (MSG_NOTE, vect_location, "no alias between "
    3546                 :            :                                "%T and %T when the step %T is outside ",
    3547                 :            :                                DR_REF (dr_info_a->dr),
    3548                 :       1443 :                                DR_REF (dr_info_b->dr),
    3549                 :       1443 :                                DR_STEP (dr_info_a->dr));
    3550                 :       1443 :               if (unsigned_p)
    3551                 :        540 :                 dump_printf (MSG_NOTE, "[0");
    3552                 :            :               else
    3553                 :            :                 {
    3554                 :        903 :                   dump_printf (MSG_NOTE, "(");
    3555                 :        903 :                   dump_dec (MSG_NOTE, poly_int64 (-lower_bound));
    3556                 :            :                 }
    3557                 :       1443 :               dump_printf (MSG_NOTE, ", ");
    3558                 :       1443 :               dump_dec (MSG_NOTE, lower_bound);
    3559                 :       1443 :               dump_printf (MSG_NOTE, ")\n");
    3560                 :            :             }
    3561                 :       3059 :           vect_check_lower_bound (loop_vinfo, DR_STEP (dr_info_a->dr),
    3562                 :            :                                   unsigned_p, lower_bound);
    3563                 :       3059 :           continue;
    3564                 :            :         }
    3565                 :            : 
    3566                 :      19890 :       stmt_vec_info dr_group_first_a = DR_GROUP_FIRST_ELEMENT (stmt_info_a);
    3567                 :      19890 :       if (dr_group_first_a)
    3568                 :            :         {
    3569                 :       4954 :           stmt_info_a = dr_group_first_a;
    3570                 :       4954 :           dr_info_a = STMT_VINFO_DR_INFO (stmt_info_a);
    3571                 :            :         }
    3572                 :            : 
    3573                 :      19890 :       stmt_vec_info dr_group_first_b = DR_GROUP_FIRST_ELEMENT (stmt_info_b);
    3574                 :      19890 :       if (dr_group_first_b)
    3575                 :            :         {
    3576                 :       5379 :           stmt_info_b = dr_group_first_b;
    3577                 :       5379 :           dr_info_b = STMT_VINFO_DR_INFO (stmt_info_b);
    3578                 :            :         }
    3579                 :            : 
    3580                 :      19890 :       if (ignore_step_p)
    3581                 :            :         {
    3582                 :        115 :           segment_length_a = size_zero_node;
    3583                 :        115 :           segment_length_b = size_zero_node;
    3584                 :            :         }
    3585                 :            :       else
    3586                 :            :         {
    3587                 :      19775 :           if (!operand_equal_p (DR_STEP (dr_info_a->dr),
    3588                 :      19775 :                                 DR_STEP (dr_info_b->dr), 0))
    3589                 :            :             length_factor = scalar_loop_iters;
    3590                 :            :           else
    3591                 :      16012 :             length_factor = size_int (vect_factor);
    3592                 :      19775 :           segment_length_a = vect_vfa_segment_size (dr_info_a, length_factor);
    3593                 :      19775 :           segment_length_b = vect_vfa_segment_size (dr_info_b, length_factor);
    3594                 :            :         }
    3595                 :      19890 :       access_size_a = vect_vfa_access_size (dr_info_a);
    3596                 :      19890 :       access_size_b = vect_vfa_access_size (dr_info_b);
    3597                 :      19890 :       align_a = vect_vfa_align (dr_info_a);
    3598                 :      19890 :       align_b = vect_vfa_align (dr_info_b);
    3599                 :            : 
    3600                 :            :       /* See whether the alias is known at compilation time.  */
    3601                 :      19890 :       if (operand_equal_p (DR_BASE_ADDRESS (dr_info_a->dr),
    3602                 :      19890 :                            DR_BASE_ADDRESS (dr_info_b->dr), 0)
    3603                 :      10478 :           && operand_equal_p (DR_OFFSET (dr_info_a->dr),
    3604                 :      10478 :                               DR_OFFSET (dr_info_b->dr), 0)
    3605                 :        330 :           && TREE_CODE (DR_STEP (dr_info_a->dr)) == INTEGER_CST
    3606                 :        320 :           && TREE_CODE (DR_STEP (dr_info_b->dr)) == INTEGER_CST
    3607                 :        320 :           && poly_int_tree_p (segment_length_a)
    3608                 :      20209 :           && poly_int_tree_p (segment_length_b))
    3609                 :            :         {
    3610                 :        314 :           int res = vect_compile_time_alias (dr_info_a, dr_info_b,
    3611                 :            :                                              segment_length_a,
    3612                 :            :                                              segment_length_b,
    3613                 :            :                                              access_size_a,
    3614                 :            :                                              access_size_b);
    3615                 :        314 :           if (res >= 0 && dump_enabled_p ())
    3616                 :            :             {
    3617                 :        190 :               dump_printf_loc (MSG_NOTE, vect_location,
    3618                 :            :                                "can tell at compile time that %T and %T",
    3619                 :         95 :                                DR_REF (dr_info_a->dr), DR_REF (dr_info_b->dr));
    3620                 :         95 :               if (res == 0)
    3621                 :         46 :                 dump_printf (MSG_NOTE, " do not alias\n");
    3622                 :            :               else
    3623                 :         49 :                 dump_printf (MSG_NOTE, " alias\n");
    3624                 :            :             }
    3625                 :            : 
    3626                 :        314 :           if (res == 0)
    3627                 :        186 :             continue;
    3628                 :            : 
    3629                 :        128 :           if (res == 1)
    3630                 :        128 :             return opt_result::failure_at (stmt_info_b->stmt,
    3631                 :            :                                            "not vectorized:"
    3632                 :            :                                            " compilation time alias: %G%G",
    3633                 :            :                                            stmt_info_a->stmt,
    3634                 :        128 :                                            stmt_info_b->stmt);
    3635                 :            :         }
    3636                 :            : 
    3637                 :      19576 :       dr_with_seg_len dr_a (dr_info_a->dr, segment_length_a,
    3638                 :      19576 :                             access_size_a, align_a);
    3639                 :      19576 :       dr_with_seg_len dr_b (dr_info_b->dr, segment_length_b,
    3640                 :      19576 :                             access_size_b, align_b);
    3641                 :            :       /* Canonicalize the order to be the one that's needed for accurate
    3642                 :            :          RAW, WAR and WAW flags, in cases where the data references are
    3643                 :            :          well-ordered.  The order doesn't really matter otherwise,
    3644                 :            :          but we might as well be consistent.  */
    3645                 :      39152 :       if (get_later_stmt (stmt_info_a, stmt_info_b) == stmt_info_a)
    3646                 :        561 :         std::swap (dr_a, dr_b);
    3647                 :            : 
    3648                 :      19576 :       dr_with_seg_len_pair_t dr_with_seg_len_pair
    3649                 :            :         (dr_a, dr_b, (preserves_scalar_order_p
    3650                 :            :                       ? dr_with_seg_len_pair_t::WELL_ORDERED
    3651                 :      20192 :                       : dr_with_seg_len_pair_t::REORDERED));
    3652                 :            : 
    3653                 :      19576 :       comp_alias_ddrs.safe_push (dr_with_seg_len_pair);
    3654                 :            :     }
    3655                 :            : 
    3656                 :       3317 :   prune_runtime_alias_test_list (&comp_alias_ddrs, vect_factor);
    3657                 :            : 
    3658                 :       3317 :   unsigned int count = (comp_alias_ddrs.length ()
    3659                 :       3317 :                         + check_unequal_addrs.length ());
    3660                 :            : 
    3661                 :       3317 :   if (dump_enabled_p ())
    3662                 :        687 :     dump_printf_loc (MSG_NOTE, vect_location,
    3663                 :            :                      "improved number of alias checks from %d to %d\n",
    3664                 :            :                      may_alias_ddrs.length (), count);
    3665                 :       3317 :   unsigned limit = param_vect_max_version_for_alias_checks;
    3666                 :       3317 :   if (flag_simd_cost_model == VECT_COST_MODEL_CHEAP)
    3667                 :          0 :     limit = param_vect_max_version_for_alias_checks * 6 / 10;
    3668                 :       3317 :   if (count > limit)
    3669                 :        102 :     return opt_result::failure_at
    3670                 :            :       (vect_location,
    3671                 :            :        "number of versioning for alias run-time tests exceeds %d "
    3672                 :        102 :        "(--param vect-max-version-for-alias-checks)\n", limit);
    3673                 :            : 
    3674                 :       3215 :   return opt_result::success ();
    3675                 :            : }
    3676                 :            : 
    3677                 :            : /* Check whether we can use an internal function for a gather load
    3678                 :            :    or scatter store.  READ_P is true for loads and false for stores.
    3679                 :            :    MASKED_P is true if the load or store is conditional.  MEMORY_TYPE is
    3680                 :            :    the type of the memory elements being loaded or stored.  OFFSET_TYPE
    3681                 :            :    is the type of the offset that is being applied to the invariant
    3682                 :            :    base address.  SCALE is the amount by which the offset should
    3683                 :            :    be multiplied *after* it has been converted to address width.
    3684                 :            : 
    3685                 :            :    Return true if the function is supported, storing the function id in
    3686                 :            :    *IFN_OUT and the vector type for the offset in *OFFSET_VECTYPE_OUT.  */
    3687                 :            : 
    3688                 :            : bool
    3689                 :       6488 : vect_gather_scatter_fn_p (vec_info *vinfo, bool read_p, bool masked_p,
    3690                 :            :                           tree vectype, tree memory_type, tree offset_type,
    3691                 :            :                           int scale, internal_fn *ifn_out,
    3692                 :            :                           tree *offset_vectype_out)
    3693                 :            : {
    3694                 :       6488 :   unsigned int memory_bits = tree_to_uhwi (TYPE_SIZE (memory_type));
    3695                 :       6488 :   unsigned int element_bits = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (vectype)));
    3696                 :       6488 :   if (element_bits != memory_bits)
    3697                 :            :     /* For now the vector elements must be the same width as the
    3698                 :            :        memory elements.  */
    3699                 :            :     return false;
    3700                 :            : 
    3701                 :            :   /* Work out which function we need.  */
    3702                 :       6488 :   internal_fn ifn;
    3703                 :       6488 :   if (read_p)
    3704                 :       2058 :     ifn = masked_p ? IFN_MASK_GATHER_LOAD : IFN_GATHER_LOAD;
    3705                 :            :   else
    3706                 :      10918 :     ifn = masked_p ? IFN_MASK_SCATTER_STORE : IFN_SCATTER_STORE;
    3707                 :            : 
    3708                 :      41428 :   for (;;)
    3709                 :            :     {
    3710                 :      23958 :       tree offset_vectype = get_vectype_for_scalar_type (vinfo, offset_type);
    3711                 :      23958 :       if (!offset_vectype)
    3712                 :            :         return false;
    3713                 :            : 
    3714                 :            :       /* Test whether the target supports this combination.  */
    3715                 :      23958 :       if (internal_gather_scatter_fn_supported_p (ifn, vectype, memory_type,
    3716                 :            :                                                   offset_vectype, scale))
    3717                 :            :         {
    3718                 :          0 :           *ifn_out = ifn;
    3719                 :          0 :           *offset_vectype_out = offset_vectype;
    3720                 :          0 :           return true;
    3721                 :            :         }
    3722                 :            : 
    3723                 :      23958 :       if (TYPE_PRECISION (offset_type) >= POINTER_SIZE
    3724                 :      23958 :           && TYPE_PRECISION (offset_type) >= element_bits)
    3725                 :            :         return false;
    3726                 :            : 
    3727                 :      17470 :       offset_type = build_nonstandard_integer_type
    3728                 :      17470 :         (TYPE_PRECISION (offset_type) * 2, TYPE_UNSIGNED (offset_type));
    3729                 :      17470 :     }
    3730                 :            : }
    3731                 :            : 
    3732                 :            : /* STMT_INFO is a call to an internal gather load or scatter store function.
    3733                 :            :    Describe the operation in INFO.  */
    3734                 :            : 
    3735                 :            : static void
    3736                 :          0 : vect_describe_gather_scatter_call (stmt_vec_info stmt_info,
    3737                 :            :                                    gather_scatter_info *info)
    3738                 :            : {
    3739                 :          0 :   gcall *call = as_a <gcall *> (stmt_info->stmt);
    3740                 :          0 :   tree vectype = STMT_VINFO_VECTYPE (stmt_info);
    3741                 :          0 :   data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
    3742                 :            : 
    3743                 :          0 :   info->ifn = gimple_call_internal_fn (call);
    3744                 :          0 :   info->decl = NULL_TREE;
    3745                 :          0 :   info->base = gimple_call_arg (call, 0);
    3746                 :          0 :   info->offset = gimple_call_arg (call, 1);
    3747                 :          0 :   info->offset_dt = vect_unknown_def_type;
    3748                 :          0 :   info->offset_vectype = NULL_TREE;
    3749                 :          0 :   info->scale = TREE_INT_CST_LOW (gimple_call_arg (call, 2));
    3750                 :          0 :   info->element_type = TREE_TYPE (vectype);
    3751                 :          0 :   info->memory_type = TREE_TYPE (DR_REF (dr));
    3752                 :          0 : }
    3753                 :            : 
    3754                 :            : /* Return true if a non-affine read or write in STMT_INFO is suitable for a
    3755                 :            :    gather load or scatter store.  Describe the operation in *INFO if so.  */
    3756                 :            : 
    3757                 :            : bool
    3758                 :      14780 : vect_check_gather_scatter (stmt_vec_info stmt_info, loop_vec_info loop_vinfo,
    3759                 :            :                            gather_scatter_info *info)
    3760                 :            : {
    3761                 :      14780 :   HOST_WIDE_INT scale = 1;
    3762                 :      14780 :   poly_int64 pbitpos, pbitsize;
    3763                 :      14780 :   class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
    3764                 :      14780 :   struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
    3765                 :      14780 :   tree offtype = NULL_TREE;
    3766                 :      14780 :   tree decl = NULL_TREE, base, off;
    3767                 :      14780 :   tree vectype = STMT_VINFO_VECTYPE (stmt_info);
    3768                 :      14780 :   tree memory_type = TREE_TYPE (DR_REF (dr));
    3769                 :      14780 :   machine_mode pmode;
    3770                 :      14780 :   int punsignedp, reversep, pvolatilep = 0;
    3771                 :      14780 :   internal_fn ifn;
    3772                 :      14780 :   tree offset_vectype;
    3773                 :      14780 :   bool masked_p = false;
    3774                 :            : 
    3775                 :            :   /* See whether this is already a call to a gather/scatter internal function.
    3776                 :            :      If not, see whether it's a masked load or store.  */
    3777                 :      14780 :   gcall *call = dyn_cast <gcall *> (stmt_info->stmt);
    3778                 :       1096 :   if (call && gimple_call_internal_p (call))
    3779                 :            :     {
    3780                 :       1096 :       ifn = gimple_call_internal_fn (call);
    3781                 :       1096 :       if (internal_gather_scatter_fn_p (ifn))
    3782                 :            :         {
    3783                 :          0 :           vect_describe_gather_scatter_call (stmt_info, info);
    3784                 :          0 :           return true;
    3785                 :            :         }
    3786                 :       1096 :       masked_p = (ifn == IFN_MASK_LOAD || ifn == IFN_MASK_STORE);
    3787                 :            :     }
    3788                 :            : 
    3789                 :            :   /* True if we should aim to use internal functions rather than
    3790                 :            :      built-in functions.  */
    3791                 :      14780 :   bool use_ifn_p = (DR_IS_READ (dr)
    3792                 :      14780 :                     ? supports_vec_gather_load_p ()
    3793                 :       4586 :                     : supports_vec_scatter_store_p ());
    3794                 :            : 
    3795                 :      14780 :   base = DR_REF (dr);
    3796                 :            :   /* For masked loads/stores, DR_REF (dr) is an artificial MEM_REF,
    3797                 :            :      see if we can use the def stmt of the address.  */
    3798                 :      14780 :   if (masked_p
    3799                 :       1096 :       && TREE_CODE (base) == MEM_REF
    3800                 :       1096 :       && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME
    3801                 :       1096 :       && integer_zerop (TREE_OPERAND (base, 1))
    3802                 :      15876 :       && !expr_invariant_in_loop_p (loop, TREE_OPERAND (base, 0)))
    3803                 :            :     {
    3804                 :       1096 :       gimple *def_stmt = SSA_NAME_DEF_STMT (TREE_OPERAND (base, 0));
    3805                 :       1096 :       if (is_gimple_assign (def_stmt)
    3806                 :       1096 :           && gimple_assign_rhs_code (def_stmt) == ADDR_EXPR)
    3807                 :        220 :         base = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 0);
    3808                 :            :     }
    3809                 :            : 
    3810                 :            :   /* The gather and scatter builtins need address of the form
    3811                 :            :      loop_invariant + vector * {1, 2, 4, 8}
    3812                 :            :      or
    3813                 :            :      loop_invariant + sign_extend (vector) * { 1, 2, 4, 8 }.
    3814                 :            :      Unfortunately DR_BASE_ADDRESS/DR_OFFSET can be a mixture
    3815                 :            :      of loop invariants/SSA_NAMEs defined in the loop, with casts,
    3816                 :            :      multiplications and additions in it.  To get a vector, we need
    3817                 :            :      a single SSA_NAME that will be defined in the loop and will
    3818                 :            :      contain everything that is not loop invariant and that can be
    3819                 :            :      vectorized.  The following code attempts to find such a preexistng
    3820                 :            :      SSA_NAME OFF and put the loop invariants into a tree BASE
    3821                 :            :      that can be gimplified before the loop.  */
    3822                 :      14780 :   base = get_inner_reference (base, &pbitsize, &pbitpos, &off, &pmode,
    3823                 :            :                               &punsignedp, &reversep, &pvolatilep);
    3824                 :      14780 :   if (reversep)
    3825                 :            :     return false;
    3826                 :            : 
    3827                 :      14780 :   poly_int64 pbytepos = exact_div (pbitpos, BITS_PER_UNIT);
    3828                 :            : 
    3829                 :      14780 :   if (TREE_CODE (base) == MEM_REF)
    3830                 :            :     {
    3831                 :       9892 :       if (!integer_zerop (TREE_OPERAND (base, 1)))
    3832                 :            :         {
    3833                 :         36 :           if (off == NULL_TREE)
    3834                 :         36 :             off = wide_int_to_tree (sizetype, mem_ref_offset (base));
    3835                 :            :           else
    3836                 :          0 :             off = size_binop (PLUS_EXPR, off,
    3837                 :            :                               fold_convert (sizetype, TREE_OPERAND (base, 1)));
    3838                 :            :         }
    3839                 :       9892 :       base = TREE_OPERAND (base, 0);
    3840                 :            :     }
    3841                 :            :   else
    3842                 :       4888 :     base = build_fold_addr_expr (base);
    3843                 :            : 
    3844                 :      14780 :   if (off == NULL_TREE)
    3845                 :       7525 :     off = size_zero_node;
    3846                 :            : 
    3847                 :            :   /* If base is not loop invariant, either off is 0, then we start with just
    3848                 :            :      the constant offset in the loop invariant BASE and continue with base
    3849                 :            :      as OFF, otherwise give up.
    3850                 :            :      We could handle that case by gimplifying the addition of base + off
    3851                 :            :      into some SSA_NAME and use that as off, but for now punt.  */
    3852                 :      14780 :   if (!expr_invariant_in_loop_p (loop, base))
    3853                 :            :     {
    3854                 :       7585 :       if (!integer_zerop (off))
    3855                 :            :         return false;
    3856                 :       7406 :       off = base;
    3857                 :       7406 :       base = size_int (pbytepos);
    3858                 :            :     }
    3859                 :            :   /* Otherwise put base + constant offset into the loop invariant BASE
    3860                 :            :      and continue with OFF.  */
    3861                 :            :   else
    3862                 :            :     {
    3863                 :       7195 :       base = fold_convert (sizetype, base);
    3864                 :       7195 :       base = size_binop (PLUS_EXPR, base, size_int (pbytepos));
    3865                 :            :     }
    3866                 :            : 
    3867                 :            :   /* OFF at this point may be either a SSA_NAME or some tree expression
    3868                 :            :      from get_inner_reference.  Try to peel off loop invariants from it
    3869                 :            :      into BASE as long as possible.  */
    3870                 :      14601 :   STRIP_NOPS (off);
    3871                 :      53409 :   while (offtype == NULL_TREE)
    3872                 :            :     {
    3873                 :      49300 :       enum tree_code code;
    3874                 :      49300 :       tree op0, op1, add = NULL_TREE;
    3875                 :            : 
    3876                 :      49300 :       if (TREE_CODE (off) == SSA_NAME)
    3877                 :            :         {
    3878                 :      34150 :           gimple *def_stmt = SSA_NAME_DEF_STMT (off);
    3879                 :            : 
    3880                 :      34150 :           if (expr_invariant_in_loop_p (loop, off))
    3881                 :         30 :             return false;
    3882                 :            : 
    3883                 :      34120 :           if (gimple_code (def_stmt) != GIMPLE_ASSIGN)
    3884                 :            :             break;
    3885                 :            : 
    3886                 :      32527 :           op0 = gimple_assign_rhs1 (def_stmt);
    3887                 :      32527 :           code = gimple_assign_rhs_code (def_stmt);
    3888                 :      32527 :           op1 = gimple_assign_rhs2 (def_stmt);
    3889                 :            :         }
    3890                 :            :       else
    3891                 :            :         {
    3892                 :      15150 :           if (get_gimple_rhs_class (TREE_CODE (off)) == GIMPLE_TERNARY_RHS)
    3893                 :            :             return false;
    3894                 :      15150 :           code = TREE_CODE (off);
    3895                 :      15150 :           extract_ops_from_tree (off, &code, &op0, &op1);
    3896                 :            :         }
    3897                 :      47677 :       switch (code)
    3898                 :            :         {
    3899                 :      15866 :         case POINTER_PLUS_EXPR:
    3900                 :      15866 :         case PLUS_EXPR:
    3901                 :      15866 :           if (expr_invariant_in_loop_p (loop, op0))
    3902                 :            :             {
    3903                 :       8133 :               add = op0;
    3904                 :       8133 :               off = op1;
    3905                 :      14242 :             do_add:
    3906                 :      14242 :               add = fold_convert (sizetype, add);
    3907                 :      14242 :               if (scale != 1)
    3908                 :       5915 :                 add = size_binop (MULT_EXPR, add, size_int (scale));
    3909                 :      14242 :               base = size_binop (PLUS_EXPR, base, add);
    3910                 :      38808 :               continue;
    3911                 :            :             }
    3912                 :       7733 :           if (expr_invariant_in_loop_p (loop, op1))
    3913                 :            :             {
    3914                 :       6097 :               add = op1;
    3915                 :       6097 :               off = op0;
    3916                 :       6097 :               goto do_add;
    3917                 :            :             }
    3918                 :            :           break;
    3919                 :         12 :         case MINUS_EXPR:
    3920                 :         12 :           if (expr_invariant_in_loop_p (loop, op1))
    3921                 :            :             {
    3922                 :         12 :               add = fold_convert (sizetype, op1);
    3923                 :         12 :               add = size_binop (MINUS_EXPR, size_zero_node, add);
    3924                 :         12 :               off = op0;
    3925                 :         12 :               goto do_add;
    3926                 :            :             }
    3927                 :            :           break;
    3928                 :      16841 :         case MULT_EXPR:
    3929                 :      16841 :           if (scale == 1 && tree_fits_shwi_p (op1))
    3930                 :            :             {
    3931                 :      12173 :               int new_scale = tree_to_shwi (op1);
    3932                 :            :               /* Only treat this as a scaling operation if the target
    3933                 :            :                  supports it for at least some offset type.  */
    3934                 :      12173 :               if (use_ifn_p
    3935                 :          0 :                   && !vect_gather_scatter_fn_p (loop_vinfo, DR_IS_READ (dr),
    3936                 :            :                                                 masked_p, vectype, memory_type,
    3937                 :            :                                                 signed_char_type_node,
    3938                 :            :                                                 new_scale, &ifn,
    3939                 :            :                                                 &offset_vectype)
    3940                 :      12173 :                   && !vect_gather_scatter_fn_p (loop_vinfo, DR_IS_READ (dr),
    3941                 :            :                                                 masked_p, vectype, memory_type,
    3942                 :            :                                                 unsigned_char_type_node,
    3943                 :            :                                                 new_scale, &ifn,
    3944                 :            :                                                 &offset_vectype))
    3945                 :            :                 break;
    3946                 :      12173 :               scale = new_scale;
    3947                 :      12173 :               off = op0;
    3948                 :      12173 :               continue;
    3949                 :            :             }
    3950                 :            :           break;
    3951                 :          0 :         case SSA_NAME:
    3952                 :          0 :           off = op0;
    3953                 :          0 :           continue;
    3954                 :      12394 :         CASE_CONVERT:
    3955                 :      24788 :           if (!POINTER_TYPE_P (TREE_TYPE (op0))
    3956                 :      24788 :               && !INTEGRAL_TYPE_P (TREE_TYPE (op0)))
    3957                 :            :             break;
    3958                 :            : 
    3959                 :            :           /* Don't include the conversion if the target is happy with
    3960                 :            :              the current offset type.  */
    3961                 :      12394 :           if (use_ifn_p
    3962                 :      12394 :               && vect_gather_scatter_fn_p (loop_vinfo, DR_IS_READ (dr),
    3963                 :            :                                            masked_p, vectype, memory_type,
    3964                 :          0 :                                            TREE_TYPE (off), scale, &ifn,
    3965                 :            :                                            &offset_vectype))
    3966                 :            :             break;
    3967                 :            : 
    3968                 :      12394 :           if (TYPE_PRECISION (TREE_TYPE (op0))
    3969                 :      12394 :               == TYPE_PRECISION (TREE_TYPE (off)))
    3970                 :            :             {
    3971                 :       8284 :               off = op0;
    3972                 :       8284 :               continue;
    3973                 :            :             }
    3974                 :            : 
    3975                 :       4110 :           if (TYPE_PRECISION (TREE_TYPE (op0))
    3976                 :       4110 :               < TYPE_PRECISION (TREE_TYPE (off)))
    3977                 :            :             {
    3978                 :       4109 :               off = op0;
    3979                 :       4109 :               offtype = TREE_TYPE (off);
    3980                 :       4109 :               STRIP_NOPS (off);
    3981                 :       4109 :               continue;
    3982                 :            :             }
    3983                 :            :           break;
    3984                 :            :         default:
    3985                 :          0 :           break;
    3986                 :            :         }
    3987                 :            :       break;
    3988                 :            :     }
    3989                 :            : 
    3990                 :            :   /* If at the end OFF still isn't a SSA_NAME or isn't
    3991                 :            :      defined in the loop, punt.  */
    3992                 :      14571 :   if (TREE_CODE (off) != SSA_NAME
    3993                 :      14571 :       || expr_invariant_in_loop_p (loop, off))
    3994                 :       1445 :     return false;
    3995                 :            : 
    3996                 :      13126 :   if (offtype == NULL_TREE)
    3997                 :       9019 :     offtype = TREE_TYPE (off);
    3998                 :            : 
    3999                 :      13126 :   if (use_ifn_p)
    4000                 :            :     {
    4001                 :          0 :       if (!vect_gather_scatter_fn_p (loop_vinfo, DR_IS_READ (dr), masked_p,
    4002                 :            :                                      vectype, memory_type, offtype, scale,
    4003                 :            :                                      &ifn, &offset_vectype))
    4004                 :            :         return false;
    4005                 :            :     }
    4006                 :            :   else
    4007                 :            :     {
    4008                 :      13126 :       if (DR_IS_READ (dr))
    4009                 :            :         {
    4010                 :       8894 :           if (targetm.vectorize.builtin_gather)
    4011                 :       8894 :             decl = targetm.vectorize.builtin_gather (vectype, offtype, scale);
    4012                 :            :         }
    4013                 :            :       else
    4014                 :            :         {
    4015                 :       4232 :           if (targetm.vectorize.builtin_scatter)
    4016                 :       4232 :             decl = targetm.vectorize.builtin_scatter (vectype, offtype, scale);
    4017                 :            :         }
    4018                 :            : 
    4019                 :      13126 :       if (!decl)
    4020                 :      11289 :         return false;
    4021                 :            : 
    4022                 :       1837 :       ifn = IFN_LAST;
    4023                 :            :       /* The offset vector type will be read from DECL when needed.  */
    4024                 :       1837 :       offset_vectype = NULL_TREE;
    4025                 :            :     }
    4026                 :            : 
    4027                 :       1837 :   info->ifn = ifn;
    4028                 :       1837 :   info->decl = decl;
    4029                 :       1837 :   info->base = base;
    4030                 :       1837 :   info->offset = off;
    4031                 :       1837 :   info->offset_dt = vect_unknown_def_type;
    4032                 :       1837 :   info->offset_vectype = offset_vectype;
    4033                 :       1837 :   info->scale = scale;
    4034                 :       1837 :   info->element_type = TREE_TYPE (vectype);
    4035                 :       1837 :   info->memory_type = memory_type;
    4036                 :       1837 :   return true;
    4037                 :            : }
    4038                 :            : 
    4039                 :            : /* Find the data references in STMT, analyze them with respect to LOOP and
    4040                 :            :    append them to DATAREFS.  Return false if datarefs in this stmt cannot
    4041                 :            :    be handled.  */
    4042                 :            : 
    4043                 :            : opt_result
    4044                 :    4150270 : vect_find_stmt_data_reference (loop_p loop, gimple *stmt,
    4045                 :            :                                vec<data_reference_p> *datarefs)
    4046                 :            : {
    4047                 :            :   /* We can ignore clobbers for dataref analysis - they are removed during
    4048                 :            :      loop vectorization and BB vectorization checks dependences with a
    4049                 :            :      stmt walk.  */
    4050                 :    4150270 :   if (gimple_clobber_p (stmt))
    4051                 :      71589 :     return opt_result::success ();
    4052                 :            : 
    4053                 :    7753030 :   if (gimple_has_volatile_ops (stmt))
    4054                 :      62495 :     return opt_result::failure_at (stmt, "not vectorized: volatile type: %G",
    4055                 :      62495 :                                    stmt);
    4056                 :            : 
    4057                 :    4016190 :   if (stmt_can_throw_internal (cfun, stmt))
    4058                 :       2176 :     return opt_result::failure_at (stmt,
    4059                 :            :                                    "not vectorized:"
    4060                 :            :                                    " statement can throw an exception: %G",
    4061                 :       2176 :                                    stmt);
    4062                 :            : 
    4063                 :    4014010 :   auto_vec<data_reference_p, 2> refs;
    4064                 :    4014010 :   opt_result res = find_data_references_in_stmt (loop, stmt, &refs);
    4065                 :    4014010 :   if (!res)
    4066                 :     261069 :     return res;
    4067                 :            : 
    4068                 :    3752940 :   if (refs.is_empty ())
    4069                 :    2700640 :     return opt_result::success ();
    4070                 :            : 
    4071                 :    1052300 :   if (refs.length () > 1)
    4072                 :       6761 :     return opt_result::failure_at (stmt,
    4073                 :            :                                    "not vectorized:"
    4074                 :       6761 :                                    " more than one data ref in stmt: %G", stmt);
    4075                 :            : 
    4076                 :    1045540 :   if (gcall *call = dyn_cast <gcall *> (stmt))
    4077                 :       1520 :     if (!gimple_call_internal_p (call)
    4078                 :       1520 :         || (gimple_call_internal_fn (call) != IFN_MASK_LOAD
    4079                 :        556 :             && gimple_call_internal_fn (call) != IFN_MASK_STORE))
    4080                 :        417 :       return opt_result::failure_at (stmt,
    4081                 :        417 :                                      "not vectorized: dr in a call %G", stmt);
    4082                 :            : 
    4083                 :    1045120 :   data_reference_p dr = refs.pop ();
    4084                 :    1045120 :   if (TREE_CODE (DR_REF (dr)) == COMPONENT_REF
    4085                 :    1045120 :       && DECL_BIT_FIELD (TREE_OPERAND (DR_REF (dr), 1)))
    4086                 :       3088 :     return opt_result::failure_at (stmt,
    4087                 :            :                                    "not vectorized:"
    4088                 :       3088 :                                    " statement is bitfield access %G", stmt);
    4089                 :            : 
    4090                 :    1042030 :   if (DR_BASE_ADDRESS (dr)
    4091                 :    1027900 :       && TREE_CODE (DR_BASE_ADDRESS (dr)) == INTEGER_CST)
    4092                 :        259 :     return opt_result::failure_at (stmt,
    4093                 :            :                                    "not vectorized:"
    4094                 :        259 :                                    " base addr of dr is a constant\n");
    4095                 :            : 
    4096                 :            :   /* Check whether this may be a SIMD lane access and adjust the
    4097                 :            :      DR to make it easier for us to handle it.  */
    4098                 :    1041770 :   if (loop
    4099                 :     113612 :       && loop->simduid
    4100                 :       9578 :       && (!DR_BASE_ADDRESS (dr)
    4101                 :       1239 :           || !DR_OFFSET (dr)
    4102                 :       1239 :           || !DR_INIT (dr)
    4103                 :       1239 :           || !DR_STEP (dr)))
    4104                 :            :     {
    4105                 :       8339 :       struct data_reference *newdr
    4106                 :       8339 :         = create_data_ref (NULL, loop_containing_stmt (stmt), DR_REF (dr), stmt,
    4107                 :       8339 :                            DR_IS_READ (dr), DR_IS_CONDITIONAL_IN_STMT (dr));
    4108                 :       8339 :       if (DR_BASE_ADDRESS (newdr)
    4109                 :       8339 :           && DR_OFFSET (newdr)
    4110                 :       8339 :           && DR_INIT (newdr)
    4111                 :       8339 :           && DR_STEP (newdr)
    4112                 :       8339 :           && TREE_CODE (DR_INIT (newdr)) == INTEGER_CST
    4113                 :      16678 :           && integer_zerop (DR_STEP (newdr)))
    4114                 :            :         {
    4115                 :       8339 :           tree base_address = DR_BASE_ADDRESS (newdr);
    4116                 :       8339 :           tree off = DR_OFFSET (newdr);
    4117                 :       8339 :           tree step = ssize_int (1);
    4118                 :       8339 :           if (integer_zerop (off)
    4119                 :       8339 :               && TREE_CODE (base_address) == POINTER_PLUS_EXPR)
    4120                 :            :             {
    4121                 :         85 :               off = TREE_OPERAND (base_address, 1);
    4122                 :         85 :               base_address = TREE_OPERAND (base_address, 0);
    4123                 :            :             }
    4124                 :       8339 :           STRIP_NOPS (off);
    4125                 :       8339 :           if (TREE_CODE (off) == MULT_EXPR
    4126                 :       8339 :               && tree_fits_uhwi_p (TREE_OPERAND (off, 1)))
    4127                 :            :             {
    4128                 :       8103 :               step = TREE_OPERAND (off, 1);
    4129                 :       8103 :               off = TREE_OPERAND (off, 0);
    4130                 :       8103 :               STRIP_NOPS (off);
    4131                 :            :             }
    4132                 :       2086 :           if (CONVERT_EXPR_P (off)
    4133                 :       8339 :               && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (off, 0)))
    4134                 :       6253 :                   < TYPE_PRECISION (TREE_TYPE (off))))
    4135                 :       6253 :             off = TREE_OPERAND (off, 0);
    4136                 :       8339 :           if (TREE_CODE (off) == SSA_NAME)
    4137                 :            :             {
    4138                 :       6264 :               gimple *def = SSA_NAME_DEF_STMT (off);
    4139                 :            :               /* Look through widening conversion.  */
    4140                 :       6264 :               if (is_gimple_assign (def)
    4141                 :       6264 :                   && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def)))
    4142                 :            :                 {
    4143                 :         48 :                   tree rhs1 = gimple_assign_rhs1 (def);
    4144                 :         48 :                   if (TREE_CODE (rhs1) == SSA_NAME
    4145                 :         48 :                       && INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
    4146                 :         96 :                       && (TYPE_PRECISION (TREE_TYPE (off))
    4147                 :         48 :                           > TYPE_PRECISION (TREE_TYPE (rhs1))))
    4148                 :         48 :                     def = SSA_NAME_DEF_STMT (rhs1);
    4149                 :            :                 }
    4150                 :       6264 :               if (is_gimple_call (def)
    4151                 :       6131 :                   && gimple_call_internal_p (def)
    4152                 :      12395 :                   && (gimple_call_internal_fn (def) == IFN_GOMP_SIMD_LANE))
    4153                 :            :                 {
    4154                 :       6131 :                   tree arg = gimple_call_arg (def, 0);
    4155                 :       6131 :                   tree reft = TREE_TYPE (DR_REF (newdr));
    4156                 :       6131 :                   gcc_assert (TREE_CODE (arg) == SSA_NAME);
    4157                 :       6131 :                   arg = SSA_NAME_VAR (arg);
    4158                 :       6131 :                   if (arg == loop->simduid
    4159                 :            :                       /* For now.  */
    4160                 :      12262 :                       && tree_int_cst_equal (TYPE_SIZE_UNIT (reft), step))
    4161                 :            :                     {
    4162                 :       6087 :                       DR_BASE_ADDRESS (newdr) = base_address;
    4163                 :       6087 :                       DR_OFFSET (newdr) = ssize_int (0);
    4164                 :       6087 :                       DR_STEP (newdr) = step;
    4165                 :       6087 :                       DR_OFFSET_ALIGNMENT (newdr) = BIGGEST_ALIGNMENT;
    4166                 :       6087 :                       DR_STEP_ALIGNMENT (newdr) = highest_pow2_factor (step);
    4167                 :            :                       /* Mark as simd-lane access.  */
    4168                 :       6087 :                       tree arg2 = gimple_call_arg (def, 1);
    4169                 :       6087 :                       newdr->aux = (void *) (-1 - tree_to_uhwi (arg2));
    4170                 :       6087 :                       free_data_ref (dr);
    4171                 :       6087 :                       datarefs->safe_push (newdr);
    4172                 :       6087 :                       return opt_result::success ();
    4173                 :            :                     }
    4174                 :            :                 }
    4175                 :            :             }
    4176                 :            :         }
    4177                 :       2252 :       free_data_ref (newdr);
    4178                 :            :     }
    4179                 :            : 
    4180                 :    1035690 :   datarefs->safe_push (dr);
    4181                 :    1035690 :   return opt_result::success ();
    4182                 :            : }
    4183                 :            : 
    4184                 :            : /* Function vect_analyze_data_refs.
    4185                 :            : 
    4186                 :            :   Find all the data references in the loop or basic block.
    4187                 :            : 
    4188                 :            :    The general structure of the analysis of data refs in the vectorizer is as
    4189                 :            :    follows:
    4190                 :            :    1- vect_analyze_data_refs(loop/bb): call
    4191                 :            :       compute_data_dependences_for_loop/bb to find and analyze all data-refs
    4192                 :            :       in the loop/bb and their dependences.
    4193                 :            :    2- vect_analyze_dependences(): apply dependence testing using ddrs.
    4194                 :            :    3- vect_analyze_drs_alignment(): check that ref_stmt.alignment is ok.
    4195                 :            :    4- vect_analyze_drs_access(): check that ref_stmt.step is ok.
    4196                 :            : 
    4197                 :            : */
    4198                 :            : 
    4199                 :            : opt_result
    4200                 :     728298 : vect_analyze_data_refs (vec_info *vinfo, poly_uint64 *min_vf, bool *fatal)
    4201                 :            : {
    4202                 :     728298 :   class loop *loop = NULL;
    4203                 :     728298 :   unsigned int i;
    4204                 :     728298 :   struct data_reference *dr;
    4205                 :     728298 :   tree scalar_type;
    4206                 :            : 
    4207                 :     728298 :   DUMP_VECT_SCOPE ("vect_analyze_data_refs");
    4208                 :            : 
    4209                 :     728298 :   if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo))
    4210                 :      49682 :     loop = LOOP_VINFO_LOOP (loop_vinfo);
    4211                 :            : 
    4212                 :            :   /* Go through the data-refs, check that the analysis succeeded.  Update
    4213                 :            :      pointer from stmt_vec_info struct to DR and vectype.  */
    4214                 :            : 
    4215                 :     728298 :   vec<data_reference_p> datarefs = vinfo->shared->datarefs;
    4216                 :    1876880 :   FOR_EACH_VEC_ELT (datarefs, i, dr)
    4217                 :            :     {
    4218                 :    1159710 :       enum { SG_NONE, GATHER, SCATTER } gatherscatter = SG_NONE;
    4219                 :    1159710 :       poly_uint64 vf;
    4220                 :            : 
    4221                 :    1159710 :       gcc_assert (DR_REF (dr));
    4222                 :    1159710 :       stmt_vec_info stmt_info = vinfo->lookup_stmt (DR_STMT (dr));
    4223                 :    1159710 :       gcc_assert (!stmt_info->dr_aux.dr);
    4224                 :    1159710 :       stmt_info->dr_aux.dr = dr;
    4225                 :    1159710 :       stmt_info->dr_aux.stmt = stmt_info;
    4226                 :            : 
    4227                 :            :       /* Check that analysis of the data-ref succeeded.  */
    4228                 :    1159710 :       if (!DR_BASE_ADDRESS (dr) || !DR_OFFSET (dr) || !DR_INIT (dr)
    4229                 :    1152380 :           || !DR_STEP (dr))
    4230                 :            :         {
    4231                 :       7322 :           bool maybe_gather
    4232                 :       7322 :             = DR_IS_READ (dr)
    4233                 :       6628 :               && !TREE_THIS_VOLATILE (DR_REF (dr))
    4234                 :      13950 :               && (targetm.vectorize.builtin_gather != NULL
    4235                 :          0 :                   || supports_vec_gather_load_p ());
    4236                 :       7322 :           bool maybe_scatter
    4237                 :       7322 :             = DR_IS_WRITE (dr)
    4238                 :        694 :               && !TREE_THIS_VOLATILE (DR_REF (dr))
    4239                 :       8016 :               && (targetm.vectorize.builtin_scatter != NULL
    4240                 :          0 :                   || supports_vec_scatter_store_p ());
    4241                 :            : 
    4242                 :            :           /* If target supports vector gather loads or scatter stores,
    4243                 :            :              see if they can't be used.  */
    4244                 :       7322 :           if (is_a <loop_vec_info> (vinfo)
    4245                 :       7322 :               && !nested_in_vect_loop_p (loop, stmt_info))
    4246                 :            :             {
    4247                 :       7014 :               if (maybe_gather || maybe_scatter)
    4248                 :            :                 {
    4249                 :       7014 :                   if (maybe_gather)
    4250                 :            :                     gatherscatter = GATHER;
    4251                 :            :                   else
    4252                 :        548 :                     gatherscatter = SCATTER;
    4253                 :            :                 }
    4254                 :            :             }
    4255                 :            : 
    4256                 :        548 :           if (gatherscatter == SG_NONE)
    4257                 :            :             {
    4258                 :        308 :               if (dump_enabled_p ())
    4259                 :          0 :                 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    4260                 :            :                                  "not vectorized: data ref analysis "
    4261                 :            :                                  "failed %G", stmt_info->stmt);
    4262                 :        308 :               if (is_a <bb_vec_info> (vinfo))
    4263                 :            :                 {
    4264                 :            :                   /* In BB vectorization the ref can still participate
    4265                 :            :                      in dependence analysis, we just can't vectorize it.  */
    4266                 :        279 :                   STMT_VINFO_VECTORIZABLE (stmt_info) = false;
    4267                 :     152882 :                   continue;
    4268                 :            :                 }
    4269                 :         29 :               return opt_result::failure_at (stmt_info->stmt,
    4270                 :            :                                              "not vectorized:"
    4271                 :            :                                              " data ref analysis failed: %G",
    4272                 :         29 :                                              stmt_info->stmt);
    4273                 :            :             }
    4274                 :            :         }
    4275                 :            : 
    4276                 :            :       /* See if this was detected as SIMD lane access.  */
    4277                 :    1159400 :       if (dr->aux == (void *)-1
    4278                 :    1159400 :           || dr->aux == (void *)-2
    4279                 :    1150580 :           || dr->aux == (void *)-3
    4280                 :    1149680 :           || dr->aux == (void *)-4)
    4281                 :            :         {
    4282                 :      10562 :           if (nested_in_vect_loop_p (loop, stmt_info))
    4283                 :          0 :             return opt_result::failure_at (stmt_info->stmt,
    4284                 :            :                                            "not vectorized:"
    4285                 :            :                                            " data ref analysis failed: %G",
    4286                 :          0 :                                            stmt_info->stmt);
    4287                 :      10562 :           STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info)
    4288                 :      10562 :             = -(uintptr_t) dr->aux;
    4289                 :            :         }
    4290                 :            : 
    4291                 :    1159400 :       tree base = get_base_address (DR_REF (dr));
    4292                 :    1159400 :       if (base && VAR_P (base) && DECL_NONALIASED (base))
    4293                 :            :         {
    4294                 :       1529 :           if (dump_enabled_p ())
    4295                 :         66 :             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    4296                 :            :                              "not vectorized: base object not addressable "
    4297                 :            :                              "for stmt: %G", stmt_info->stmt);
    4298                 :       1529 :           if (is_a <bb_vec_info> (vinfo))
    4299                 :            :             {
    4300                 :            :               /* In BB vectorization the ref can still participate
    4301                 :            :                  in dependence analysis, we just can't vectorize it.  */
    4302                 :       1529 :               STMT_VINFO_VECTORIZABLE (stmt_info) = false;
    4303                 :       1529 :               continue;
    4304                 :            :             }
    4305                 :          0 :           return opt_result::failure_at (stmt_info->stmt,
    4306                 :            :                                          "not vectorized: base object not"
    4307                 :            :                                          " addressable for stmt: %G",
    4308                 :          0 :                                          stmt_info->stmt);
    4309                 :            :         }
    4310                 :            : 
    4311                 :    1157870 :       if (is_a <loop_vec_info> (vinfo)
    4312                 :     145950 :           && DR_STEP (dr)
    4313                 :    1296810 :           && TREE_CODE (DR_STEP (dr)) != INTEGER_CST)
    4314                 :            :         {
    4315                 :      10773 :           if (nested_in_vect_loop_p (loop, stmt_info))
    4316                 :        183 :             return opt_result::failure_at (stmt_info->stmt,
    4317                 :            :                                            "not vectorized: "
    4318                 :            :                                            "not suitable for strided load %G",
    4319                 :        183 :                                            stmt_info->stmt);
    4320                 :      10590 :           STMT_VINFO_STRIDED_P (stmt_info) = true;
    4321                 :            :         }
    4322                 :            : 
    4323                 :            :       /* Update DR field in stmt_vec_info struct.  */
    4324                 :            : 
    4325                 :            :       /* If the dataref is in an inner-loop of the loop that is considered for
    4326                 :            :          for vectorization, we also want to analyze the access relative to
    4327                 :            :          the outer-loop (DR contains information only relative to the
    4328                 :            :          inner-most enclosing loop).  We do that by building a reference to the
    4329                 :            :          first location accessed by the inner-loop, and analyze it relative to
    4330                 :            :          the outer-loop.  */
    4331                 :    1157690 :       if (loop && nested_in_vect_loop_p (loop, stmt_info))
    4332                 :            :         {
    4333                 :            :           /* Build a reference to the first location accessed by the
    4334                 :            :              inner loop: *(BASE + INIT + OFFSET).  By construction,
    4335                 :            :              this address must be invariant in the inner loop, so we
    4336                 :            :              can consider it as being used in the outer loop.  */
    4337                 :       3865 :           tree base = unshare_expr (DR_BASE_ADDRESS (dr));
    4338                 :       3865 :           tree offset = unshare_expr (DR_OFFSET (dr));
    4339                 :       3865 :           tree init = unshare_expr (DR_INIT (dr));
    4340                 :       3865 :           tree init_offset = fold_build2 (PLUS_EXPR, TREE_TYPE (offset),
    4341                 :            :                                           init, offset);
    4342                 :       3865 :           tree init_addr = fold_build_pointer_plus (base, init_offset);
    4343                 :       3865 :           tree init_ref = build_fold_indirect_ref (init_addr);
    4344                 :            : 
    4345                 :       3865 :           if (dump_enabled_p ())
    4346                 :       1325 :             dump_printf_loc (MSG_NOTE, vect_location,
    4347                 :            :                              "analyze in outer loop: %T\n", init_ref);
    4348                 :            : 
    4349                 :       3865 :           opt_result res
    4350                 :            :             = dr_analyze_innermost (&STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info),
    4351                 :       3865 :                                     init_ref, loop, stmt_info->stmt);
    4352                 :       3865 :           if (!res)
    4353                 :            :             /* dr_analyze_innermost already explained the failure.  */
    4354                 :         42 :             return res;
    4355                 :            : 
    4356                 :       3823 :           if (dump_enabled_p ())
    4357                 :       1325 :             dump_printf_loc (MSG_NOTE, vect_location,
    4358                 :            :                              "\touter base_address: %T\n"
    4359                 :            :                              "\touter offset from base address: %T\n"
    4360                 :            :                              "\touter constant offset from base address: %T\n"
    4361                 :            :                              "\touter step: %T\n"
    4362                 :            :                              "\touter base alignment: %d\n\n"
    4363                 :            :                              "\touter base misalignment: %d\n"
    4364                 :            :                              "\touter offset alignment: %d\n"
    4365                 :            :                              "\touter step alignment: %d\n",
    4366                 :            :                              STMT_VINFO_DR_BASE_ADDRESS (stmt_info),
    4367                 :            :                              STMT_VINFO_DR_OFFSET (stmt_info),
    4368                 :            :                              STMT_VINFO_DR_INIT (stmt_info),
    4369                 :            :                              STMT_VINFO_DR_STEP (stmt_info),
    4370                 :            :                              STMT_VINFO_DR_BASE_ALIGNMENT (stmt_info),
    4371                 :            :                              STMT_VINFO_DR_BASE_MISALIGNMENT (stmt_info),
    4372                 :            :                              STMT_VINFO_DR_OFFSET_ALIGNMENT (stmt_info),
    4373                 :            :                              STMT_VINFO_DR_STEP_ALIGNMENT (stmt_info));
    4374                 :            :         }
    4375                 :            : 
    4376                 :            :       /* Set vectype for STMT.  */
    4377                 :    1157640 :       scalar_type = TREE_TYPE (DR_REF (dr));
    4378                 :    1157640 :       tree vectype = get_vectype_for_scalar_type (vinfo, scalar_type);
    4379                 :    1157640 :       if (!vectype)
    4380                 :            :         {
    4381                 :     156512 :           if (dump_enabled_p ())
    4382                 :            :             {
    4383                 :        797 :               dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    4384                 :            :                                "not vectorized: no vectype for stmt: %G",
    4385                 :            :                                stmt_info->stmt);
    4386                 :        797 :               dump_printf (MSG_MISSED_OPTIMIZATION, " scalar_type: ");
    4387                 :        797 :               dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_DETAILS,
    4388                 :            :                                  scalar_type);
    4389                 :        797 :               dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
    4390                 :            :             }
    4391                 :            : 
    4392                 :     156512 :           if (is_a <bb_vec_info> (vinfo))
    4393                 :            :             {
    4394                 :            :               /* No vector type is fine, the ref can still participate
    4395                 :            :                  in dependence analysis, we just can't vectorize it.  */
    4396                 :     151074 :               STMT_VINFO_VECTORIZABLE (stmt_info) = false;
    4397                 :     151074 :               continue;
    4398                 :            :             }
    4399                 :       5438 :           if (fatal)
    4400                 :       5438 :             *fatal = false;
    4401                 :       5438 :           return opt_result::failure_at (stmt_info->stmt,
    4402                 :            :                                          "not vectorized:"
    4403                 :            :                                          " no vectype for stmt: %G"
    4404                 :            :                                          " scalar_type: %T\n",
    4405                 :       5438 :                                          stmt_info->stmt, scalar_type);
    4406                 :            :         }
    4407                 :            :       else
    4408                 :            :         {
    4409                 :    1001130 :           if (dump_enabled_p ())
    4410                 :      44620 :             dump_printf_loc (MSG_NOTE, vect_location,
    4411                 :            :                              "got vectype for stmt: %G%T\n",
    4412                 :            :                              stmt_info->stmt, vectype);
    4413                 :            :         }
    4414                 :            : 
    4415                 :            :       /* Adjust the minimal vectorization factor according to the
    4416                 :            :          vector type.  */
    4417                 :    1001130 :       vf = TYPE_VECTOR_SUBPARTS (vectype);
    4418                 :    1001130 :       *min_vf = upper_bound (*min_vf, vf);
    4419                 :            : 
    4420                 :            :       /* Leave the BB vectorizer to pick the vector type later, based on
    4421                 :            :          the final dataref group size and SLP node size.  */
    4422                 :    1001130 :       if (is_a <loop_vec_info> (vinfo))
    4423                 :     140287 :         STMT_VINFO_VECTYPE (stmt_info) = vectype;
    4424                 :            : 
    4425                 :    1001130 :       if (gatherscatter != SG_NONE)
    4426                 :            :         {
    4427                 :       5782 :           gather_scatter_info gs_info;
    4428                 :       5782 :           if (!vect_check_gather_scatter (stmt_info,
    4429                 :            :                                           as_a <loop_vec_info> (vinfo),
    4430                 :            :                                           &gs_info)
    4431                 :       6133 :               || !get_vectype_for_scalar_type (vinfo,
    4432                 :        351 :                                                TREE_TYPE (gs_info.offset)))
    4433                 :            :             {
    4434                 :       5431 :               if (fatal)
    4435                 :       5431 :                 *fatal = false;
    4436                 :       5431 :               return opt_result::failure_at
    4437                 :       5431 :                         (stmt_info->stmt,
    4438                 :            :                          (gatherscatter == GATHER)
    4439                 :            :                          ? "not vectorized: not suitable for gather load %G"
    4440                 :            :                          : "not vectorized: not suitable for scatter store %G",
    4441                 :       5857 :                          stmt_info->stmt);
    4442                 :            :             }
    4443                 :        351 :           STMT_VINFO_GATHER_SCATTER_P (stmt_info) = gatherscatter;
    4444                 :            :         }
    4445                 :            :     }
    4446                 :            : 
    4447                 :            :   /* We used to stop processing and prune the list here.  Verify we no
    4448                 :            :      longer need to.  */
    4449                 :    1044380 :   gcc_assert (i == datarefs.length ());
    4450                 :            : 
    4451                 :     717175 :   return opt_result::success ();
    4452                 :            : }
    4453                 :            : 
    4454                 :            : 
    4455                 :            : /* Function vect_get_new_vect_var.
    4456                 :            : 
    4457                 :            :    Returns a name for a new variable.  The current naming scheme appends the
    4458                 :            :    prefix "vect_" or "vect_p" (depending on the value of VAR_KIND) to
    4459                 :            :    the name of vectorizer generated variables, and appends that to NAME if
    4460                 :            :    provided.  */
    4461                 :            : 
    4462                 :            : tree
    4463                 :     490715 : vect_get_new_vect_var (tree type, enum vect_var_kind var_kind, const char *name)
    4464                 :            : {
    4465                 :     490715 :   const char *prefix;
    4466                 :     490715 :   tree new_vect_var;
    4467                 :            : 
    4468                 :     490715 :   switch (var_kind)
    4469                 :            :   {
    4470                 :            :   case vect_simple_var:
    4471                 :            :     prefix = "vect";
    4472                 :            :     break;
    4473                 :      11602 :   case vect_scalar_var:
    4474                 :      11602 :     prefix = "stmp";
    4475                 :      11602 :     break;
    4476                 :       2049 :   case vect_mask_var:
    4477                 :       2049 :     prefix = "mask";
    4478                 :       2049 :     break;
    4479                 :     200105 :   case vect_pointer_var:
    4480                 :     200105 :     prefix = "vectp";
    4481                 :     200105 :     break;
    4482                 :          0 :   default:
    4483                 :          0 :     gcc_unreachable ();
    4484                 :            :   }
    4485                 :            : 
    4486                 :     490715 :   if (name)
    4487                 :            :     {
    4488                 :     399072 :       char* tmp = concat (prefix, "_", name, NULL);
    4489                 :     399072 :       new_vect_var = create_tmp_reg (type, tmp);
    4490                 :     399072 :       free (tmp);
    4491                 :            :     }
    4492                 :            :   else
    4493                 :      91643 :     new_vect_var = create_tmp_reg (type, prefix);
    4494                 :            : 
    4495                 :     490715 :   return new_vect_var;
    4496                 :            : }
    4497                 :            : 
    4498                 :            : /* Like vect_get_new_vect_var but return an SSA name.  */
    4499                 :            : 
    4500                 :            : tree
    4501                 :      97966 : vect_get_new_ssa_name (tree type, enum vect_var_kind var_kind, const char *name)
    4502                 :            : {
    4503                 :      97966 :   const char *prefix;
    4504                 :      97966 :   tree new_vect_var;
    4505                 :            : 
    4506                 :      97966 :   switch (var_kind)
    4507                 :            :   {
    4508                 :            :   case vect_simple_var:
    4509                 :            :     prefix = "vect";
    4510                 :            :     break;
    4511                 :        247 :   case vect_scalar_var:
    4512                 :        247 :     prefix = "stmp";
    4513                 :        247 :     break;
    4514                 :          0 :   case vect_pointer_var:
    4515                 :          0 :     prefix = "vectp";
    4516                 :          0 :     break;
    4517                 :          0 :   default:
    4518                 :          0 :     gcc_unreachable ();
    4519                 :            :   }
    4520                 :            : 
    4521                 :      97966 :   if (name)
    4522                 :            :     {
    4523                 :      97455 :       char* tmp = concat (prefix, "_", name, NULL);
    4524                 :      97455 :       new_vect_var = make_temp_ssa_name (type, NULL, tmp);
    4525                 :      97455 :       free (tmp);
    4526                 :            :     }
    4527                 :            :   else
    4528                 :        511 :     new_vect_var = make_temp_ssa_name (type, NULL, prefix);
    4529                 :            : 
    4530                 :      97966 :   return new_vect_var;
    4531                 :            : }
    4532                 :            : 
    4533                 :            : /* Duplicate ptr info and set alignment/misaligment on NAME from DR_INFO.  */
    4534                 :            : 
    4535                 :            : static void
    4536                 :      85880 : vect_duplicate_ssa_name_ptr_info (tree name, dr_vec_info *dr_info)
    4537                 :            : {
    4538                 :      85880 :   duplicate_ssa_name_ptr_info (name, DR_PTR_INFO (dr_info->dr));
    4539                 :      85880 :   int misalign = DR_MISALIGNMENT (dr_info);
    4540                 :      85880 :   if (misalign == DR_MISALIGNMENT_UNKNOWN)
    4541                 :      78630 :     mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (name));
    4542                 :            :   else
    4543                 :       7250 :     set_ptr_info_alignment (SSA_NAME_PTR_INFO (name),
    4544                 :       7250 :                             known_alignment (DR_TARGET_ALIGNMENT (dr_info)),
    4545                 :            :                             misalign);
    4546                 :      85880 : }
    4547                 :            : 
    4548                 :            : /* Function vect_create_addr_base_for_vector_ref.
    4549                 :            : 
    4550                 :            :    Create an expression that computes the address of the first memory location
    4551                 :            :    that will be accessed for a data reference.
    4552                 :            : 
    4553                 :            :    Input:
    4554                 :            :    STMT_INFO: The statement containing the data reference.
    4555                 :            :    NEW_STMT_LIST: Must be initialized to NULL_TREE or a statement list.
    4556                 :            :    OFFSET: Optional. If supplied, it is be added to the initial address.
    4557                 :            :    LOOP:    Specify relative to which loop-nest should the address be computed.
    4558                 :            :             For example, when the dataref is in an inner-loop nested in an
    4559                 :            :             outer-loop that is now being vectorized, LOOP can be either the
    4560                 :            :             outer-loop, or the inner-loop.  The first memory location accessed
    4561                 :            :             by the following dataref ('in' points to short):
    4562                 :            : 
    4563                 :            :                 for (i=0; i<N; i++)
    4564                 :            :                    for (j=0; j<M; j++)
    4565                 :            :                      s += in[i+j]
    4566                 :            : 
    4567                 :            :             is as follows:
    4568                 :            :             if LOOP=i_loop:     &in         (relative to i_loop)
    4569                 :            :             if LOOP=j_loop:     &in+i*2B    (relative to j_loop)
    4570                 :            :    BYTE_OFFSET: Optional, defaulted to NULL.  If supplied, it is added to the
    4571                 :            :             initial address.  Unlike OFFSET, which is number of elements to
    4572                 :            :             be added, BYTE_OFFSET is measured in bytes.
    4573                 :            : 
    4574                 :            :    Output:
    4575                 :            :    1. Return an SSA_NAME whose value is the address of the memory location of
    4576                 :            :       the first vector of the data reference.
    4577                 :            :    2. If new_stmt_list is not NULL_TREE after return then the caller must insert
    4578                 :            :       these statement(s) which define the returned SSA_NAME.
    4579                 :            : 
    4580                 :            :    FORNOW: We are only handling array accesses with step 1.  */
    4581                 :            : 
    4582                 :            : tree
    4583                 :     100061 : vect_create_addr_base_for_vector_ref (stmt_vec_info stmt_info,
    4584                 :            :                                       gimple_seq *new_stmt_list,
    4585                 :            :                                       tree offset,
    4586                 :            :                                       tree byte_offset)
    4587                 :            : {
    4588                 :     100061 :   dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
    4589                 :     100061 :   struct data_reference *dr = dr_info->dr;
    4590                 :     100061 :   const char *base_name;
    4591                 :     100061 :   tree addr_base;
    4592                 :     100061 :   tree dest;
    4593                 :     100061 :   gimple_seq seq = NULL;
    4594                 :     100061 :   tree vect_ptr_type;
    4595                 :     100061 :   tree step = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr)));
    4596                 :     100061 :   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
    4597                 :     100061 :   innermost_loop_behavior *drb = vect_dr_behavior (dr_info);
    4598                 :            : 
    4599                 :     100061 :   tree data_ref_base = unshare_expr (drb->base_address);
    4600                 :     100061 :   tree base_offset = unshare_expr (get_dr_vinfo_offset (dr_info, true));
    4601                 :     100061 :   tree init = unshare_expr (drb->init);
    4602                 :            : 
    4603                 :     100061 :   if (loop_vinfo)
    4604                 :      50493 :     base_name = get_name (data_ref_base);
    4605                 :            :   else
    4606                 :            :     {
    4607                 :      49568 :       base_offset = ssize_int (0);
    4608                 :      49568 :       init = ssize_int (0);
    4609                 :      49568 :       base_name = get_name (DR_REF (dr));
    4610                 :            :     }
    4611                 :            : 
    4612                 :            :   /* Create base_offset */
    4613                 :     100061 :   base_offset = size_binop (PLUS_EXPR,
    4614                 :            :                             fold_convert (sizetype, base_offset),
    4615                 :            :                             fold_convert (sizetype, init));
    4616                 :            : 
    4617                 :     100061 :   if (offset)
    4618                 :            :     {
    4619                 :        958 :       offset = fold_build2 (MULT_EXPR, sizetype,
    4620                 :            :                             fold_convert (sizetype, offset), step);
    4621                 :        958 :       base_offset = fold_build2 (PLUS_EXPR, sizetype,
    4622                 :            :                                  base_offset, offset);
    4623                 :            :     }
    4624                 :     100061 :   if (byte_offset)
    4625                 :            :     {
    4626                 :          0 :       byte_offset = fold_convert (sizetype, byte_offset);
    4627                 :          0 :       base_offset = fold_build2 (PLUS_EXPR, sizetype,
    4628                 :            :                                  base_offset, byte_offset);
    4629                 :            :     }
    4630                 :            : 
    4631                 :            :   /* base + base_offset */
    4632                 :     100061 :   if (loop_vinfo)
    4633                 :      50493 :     addr_base = fold_build_pointer_plus (data_ref_base, base_offset);
    4634                 :            :   else
    4635                 :            :     {
    4636                 :      99136 :       addr_base = build1 (ADDR_EXPR,
    4637                 :      49568 :                           build_pointer_type (TREE_TYPE (DR_REF (dr))),
    4638                 :            :                           unshare_expr (DR_REF (dr)));
    4639                 :            :     }
    4640                 :            : 
    4641                 :     100061 :   vect_ptr_type = build_pointer_type (STMT_VINFO_VECTYPE (stmt_info));
    4642                 :     100061 :   dest = vect_get_new_vect_var (vect_ptr_type, vect_pointer_var, base_name);
    4643                 :     100061 :   addr_base = force_gimple_operand (addr_base, &seq, true, dest);
    4644                 :     100061 :   gimple_seq_add_seq (new_stmt_list, seq);
    4645                 :            : 
    4646                 :     100061 :   if (DR_PTR_INFO (dr)
    4647                 :      36443 :       && TREE_CODE (addr_base) == SSA_NAME
    4648                 :     134933 :       && !SSA_NAME_PTR_INFO (addr_base))
    4649                 :            :     {
    4650                 :      25502 :       vect_duplicate_ssa_name_ptr_info (addr_base, dr_info);
    4651                 :      25502 :       if (offset || byte_offset)
    4652                 :        320 :         mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (addr_base));
    4653                 :            :     }
    4654                 :            : 
    4655                 :     100061 :   if (dump_enabled_p ())
    4656                 :      15306 :     dump_printf_loc (MSG_NOTE, vect_location, "created %T\n", addr_base);
    4657                 :            : 
    4658                 :     100061 :   return addr_base;
    4659                 :            : }
    4660                 :            : 
    4661                 :            : 
    4662                 :            : /* Function vect_create_data_ref_ptr.
    4663                 :            : 
    4664                 :            :    Create a new pointer-to-AGGR_TYPE variable (ap), that points to the first
    4665                 :            :    location accessed in the loop by STMT_INFO, along with the def-use update
    4666                 :            :    chain to appropriately advance the pointer through the loop iterations.
    4667                 :            :    Also set aliasing information for the pointer.  This pointer is used by
    4668                 :            :    the callers to this function to create a memory reference expression for
    4669                 :            :    vector load/store access.
    4670                 :            : 
    4671                 :            :    Input:
    4672                 :            :    1. STMT_INFO: a stmt that references memory. Expected to be of the form
    4673                 :            :          GIMPLE_ASSIGN <name, data-ref> or
    4674                 :            :          GIMPLE_ASSIGN <data-ref, name>.
    4675                 :            :    2. AGGR_TYPE: the type of the reference, which should be either a vector
    4676                 :            :         or an array.
    4677                 :            :    3. AT_LOOP: the loop where the vector memref is to be created.
    4678                 :            :    4. OFFSET (optional): an offset to be added to the initial address accessed
    4679                 :            :         by the data-ref in STMT_INFO.
    4680                 :            :    5. BSI: location where the new stmts are to be placed if there is no loop
    4681                 :            :    6. ONLY_INIT: indicate if ap is to be updated in the loop, or remain
    4682                 :            :         pointing to the initial address.
    4683                 :            :    7. BYTE_OFFSET (optional, defaults to NULL): a byte offset to be added
    4684                 :            :         to the initial address accessed by the data-ref in STMT_INFO.  This is
    4685                 :            :         similar to OFFSET, but OFFSET is counted in elements, while BYTE_OFFSET
    4686                 :            :         in bytes.
    4687                 :            :    8. IV_STEP (optional, defaults to NULL): the amount that should be added
    4688                 :            :         to the IV during each iteration of the loop.  NULL says to move
    4689                 :            :         by one copy of AGGR_TYPE up or down, depending on the step of the
    4690                 :            :         data reference.
    4691                 :            : 
    4692                 :            :    Output:
    4693                 :            :    1. Declare a new ptr to vector_type, and have it point to the base of the
    4694                 :            :       data reference (initial addressed accessed by the data reference).
    4695                 :            :       For example, for vector of type V8HI, the following code is generated:
    4696                 :            : 
    4697                 :            :       v8hi *ap;
    4698                 :            :       ap = (v8hi *)initial_address;
    4699                 :            : 
    4700                 :            :       if OFFSET is not supplied:
    4701                 :            :          initial_address = &a[init];
    4702                 :            :       if OFFSET is supplied:
    4703                 :            :          initial_address = &a[init + OFFSET];
    4704                 :            :       if BYTE_OFFSET is supplied:
    4705                 :            :          initial_address = &a[init] + BYTE_OFFSET;
    4706                 :            : 
    4707                 :            :       Return the initial_address in INITIAL_ADDRESS.
    4708                 :            : 
    4709                 :            :    2. If ONLY_INIT is true, just return the initial pointer.  Otherwise, also
    4710                 :            :       update the pointer in each iteration of the loop.
    4711                 :            : 
    4712                 :            :       Return the increment stmt that updates the pointer in PTR_INCR.
    4713                 :            : 
    4714                 :            :    3. Return the pointer.  */
    4715                 :            : 
    4716                 :            : tree
    4717                 :     100044 : vect_create_data_ref_ptr (stmt_vec_info stmt_info, tree aggr_type,
    4718                 :            :                           class loop *at_loop, tree offset,
    4719                 :            :                           tree *initial_address, gimple_stmt_iterator *gsi,
    4720                 :            :                           gimple **ptr_incr, bool only_init,
    4721                 :            :                           tree byte_offset, tree iv_step)
    4722                 :            : {
    4723                 :     100044 :   const char *base_name;
    4724                 :     100044 :   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
    4725                 :     100044 :   class loop *loop = NULL;
    4726                 :     100044 :   bool nested_in_vect_loop = false;
    4727                 :     100044 :   class loop *containing_loop = NULL;
    4728                 :     100044 :   tree aggr_ptr_type;
    4729                 :     100044 :   tree aggr_ptr;
    4730                 :     100044 :   tree new_temp;
    4731                 :     100044 :   gimple_seq new_stmt_list = NULL;
    4732                 :     100044 :   edge pe = NULL;
    4733                 :     100044 :   basic_block new_bb;
    4734                 :     100044 :   tree aggr_ptr_init;
    4735                 :     100044 :   dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
    4736                 :     100044 :   struct data_reference *dr = dr_info->dr;
    4737                 :     100044 :   tree aptr;
    4738                 :     100044 :   gimple_stmt_iterator incr_gsi;
    4739                 :     100044 :   bool insert_after;
    4740                 :     100044 :   tree indx_before_incr, indx_after_incr;
    4741                 :     100044 :   gimple *incr;
    4742                 :     100044 :   bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
    4743                 :            : 
    4744                 :     100044 :   gcc_assert (iv_step != NULL_TREE
    4745                 :            :               || TREE_CODE (aggr_type) == ARRAY_TYPE
    4746                 :            :               || TREE_CODE (aggr_type) == VECTOR_TYPE);
    4747                 :            : 
    4748                 :     100044 :   if (loop_vinfo)
    4749                 :            :     {
    4750                 :      50476 :       loop = LOOP_VINFO_LOOP (loop_vinfo);
    4751                 :      50476 :       nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt_info);
    4752                 :      50476 :       containing_loop = (gimple_bb (stmt_info->stmt))->loop_father;
    4753                 :      50476 :       pe = loop_preheader_edge (loop);
    4754                 :            :     }
    4755                 :            :   else
    4756                 :            :     {
    4757                 :      49568 :       gcc_assert (bb_vinfo);
    4758                 :      49568 :       only_init = true;
    4759                 :      49568 :       *ptr_incr = NULL;
    4760                 :            :     }
    4761                 :            : 
    4762                 :            :   /* Create an expression for the first address accessed by this load
    4763                 :            :      in LOOP.  */
    4764                 :     100044 :   base_name = get_name (DR_BASE_ADDRESS (dr));
    4765                 :            : 
    4766                 :     100044 :   if (dump_enabled_p ())
    4767                 :            :     {
    4768                 :      15304 :       tree dr_base_type = TREE_TYPE (DR_BASE_OBJECT (dr));
    4769                 :      15304 :       dump_printf_loc (MSG_NOTE, vect_location,
    4770                 :            :                        "create %s-pointer variable to type: %T",
    4771                 :      15304 :                        get_tree_code_name (TREE_CODE (aggr_type)),
    4772                 :            :                        aggr_type);
    4773                 :      15304 :       if (TREE_CODE (dr_base_type) == ARRAY_TYPE)
    4774                 :       9355 :         dump_printf (MSG_NOTE, "  vectorizing an array ref: ");
    4775                 :       5949 :       else if (TREE_CODE (dr_base_type) == VECTOR_TYPE)
    4776                 :          0 :         dump_printf (MSG_NOTE, "  vectorizing a vector ref: ");
    4777                 :       5949 :       else if (TREE_CODE (dr_base_type) == RECORD_TYPE)
    4778                 :       1061 :         dump_printf (MSG_NOTE, "  vectorizing a record based array ref: ");
    4779                 :            :       else
    4780                 :       4888 :         dump_printf (MSG_NOTE, "  vectorizing a pointer ref: ");
    4781                 :      15304 :       dump_printf (MSG_NOTE, "%T\n", DR_BASE_OBJECT (dr));
    4782                 :            :     }
    4783                 :            : 
    4784                 :            :   /* (1) Create the new aggregate-pointer variable.
    4785                 :            :      Vector and array types inherit the alias set of their component
    4786                 :            :      type by default so we need to use a ref-all pointer if the data
    4787                 :            :      reference does not conflict with the created aggregated data
    4788                 :            :      reference because it is not addressable.  */
    4789                 :     100044 :   bool need_ref_all = false;
    4790                 :     100044 :   if (!alias_sets_conflict_p (get_alias_set (aggr_type),
    4791                 :            :                               get_alias_set (DR_REF (dr))))
    4792                 :            :     need_ref_all = true;
    4793                 :            :   /* Likewise for any of the data references in the stmt group.  */
    4794                 :      95228 :   else if (DR_GROUP_SIZE (stmt_info) > 1)
    4795                 :            :     {
    4796                 :      50811 :       stmt_vec_info sinfo = DR_GROUP_FIRST_ELEMENT (stmt_info);
    4797                 :     173803 :       do
    4798                 :            :         {
    4799                 :     173803 :           struct data_reference *sdr = STMT_VINFO_DATA_REF (sinfo);
    4800                 :     173803 :           if (!alias_sets_conflict_p (get_alias_set (aggr_type),
    4801                 :            :                                       get_alias_set (DR_REF (sdr))))
    4802                 :            :             {
    4803                 :            :               need_ref_all = true;
    4804                 :            :               break;
    4805                 :            :             }
    4806                 :     173803 :           sinfo = DR_GROUP_NEXT_ELEMENT (sinfo);
    4807                 :            :         }
    4808                 :     173803 :       while (sinfo);
    4809                 :            :     }
    4810                 :     100044 :   aggr_ptr_type = build_pointer_type_for_mode (aggr_type, ptr_mode,
    4811                 :            :                                                need_ref_all);
    4812                 :     100044 :   aggr_ptr = vect_get_new_vect_var (aggr_ptr_type, vect_pointer_var, base_name);
    4813                 :            : 
    4814                 :            : 
    4815                 :            :   /* Note: If the dataref is in an inner-loop nested in LOOP, and we are
    4816                 :            :      vectorizing LOOP (i.e., outer-loop vectorization), we need to create two
    4817                 :            :      def-use update cycles for the pointer: one relative to the outer-loop
    4818                 :            :      (LOOP), which is what steps (3) and (4) below do.  The other is relative
    4819                 :            :      to the inner-loop (which is the inner-most loop containing the dataref),
    4820                 :            :      and this is done be step (5) below.
    4821                 :            : 
    4822                 :            :      When vectorizing inner-most loops, the vectorized loop (LOOP) is also the
    4823                 :            :      inner-most loop, and so steps (3),(4) work the same, and step (5) is
    4824                 :            :      redundant.  Steps (3),(4) create the following:
    4825                 :            : 
    4826                 :            :         vp0 = &base_addr;
    4827                 :            :         LOOP:   vp1 = phi(vp0,vp2)
    4828                 :            :                 ...
    4829                 :            :                 ...
    4830                 :            :                 vp2 = vp1 + step
    4831                 :            :                 goto LOOP
    4832                 :            : 
    4833                 :            :      If there is an inner-loop nested in loop, then step (5) will also be
    4834                 :            :      applied, and an additional update in the inner-loop will be created:
    4835                 :            : 
    4836                 :            :         vp0 = &base_addr;
    4837                 :            :         LOOP:   vp1 = phi(vp0,vp2)
    4838                 :            :                 ...
    4839                 :            :         inner:     vp3 = phi(vp1,vp4)
    4840                 :            :                    vp4 = vp3 + inner_step
    4841                 :            :                    if () goto inner
    4842                 :            :                 ...
    4843                 :            :                 vp2 = vp1 + step
    4844                 :            :                 if () goto LOOP   */
    4845                 :            : 
    4846                 :            :   /* (2) Calculate the initial address of the aggregate-pointer, and set
    4847                 :            :      the aggregate-pointer to point to it before the loop.  */
    4848                 :            : 
    4849                 :            :   /* Create: (&(base[init_val+offset]+byte_offset) in the loop preheader.  */
    4850                 :            : 
    4851                 :     100044 :   new_temp = vect_create_addr_base_for_vector_ref (stmt_info, &new_stmt_list,
    4852                 :            :                                                    offset, byte_offset);
    4853                 :     100044 :   if (new_stmt_list)
    4854                 :            :     {
    4855                 :      33120 :       if (pe)
    4856                 :            :         {
    4857                 :      25284 :           new_bb = gsi_insert_seq_on_edge_immediate (pe, new_stmt_list);
    4858                 :      25284 :           gcc_assert (!new_bb);
    4859                 :            :         }
    4860                 :            :       else
    4861                 :       7836 :         gsi_insert_seq_before (gsi, new_stmt_list, GSI_SAME_STMT);
    4862                 :            :     }
    4863                 :            : 
    4864                 :     100044 :   *initial_address = new_temp;
    4865                 :     100044 :   aggr_ptr_init = new_temp;
    4866                 :            : 
    4867                 :            :   /* (3) Handle the updating of the aggregate-pointer inside the loop.
    4868                 :            :      This is needed when ONLY_INIT is false, and also when AT_LOOP is the
    4869                 :            :      inner-loop nested in LOOP (during outer-loop vectorization).  */
    4870                 :            : 
    4871                 :            :   /* No update in loop is required.  */
    4872                 :     100044 :   if (only_init && (!loop_vinfo || at_loop == loop))
    4873                 :            :     aptr = aggr_ptr_init;
    4874                 :            :   else
    4875                 :            :     {
    4876                 :            :       /* Accesses to invariant addresses should be handled specially
    4877                 :            :          by the caller.  */
    4878                 :      50476 :       tree step = vect_dr_behavior (dr_info)->step;
    4879                 :      50476 :       gcc_assert (!integer_zerop (step));
    4880                 :            : 
    4881                 :      50476 :       if (iv_step == NULL_TREE)
    4882                 :            :         {
    4883                 :            :           /* The step of the aggregate pointer is the type size,
    4884                 :            :              negated for downward accesses.  */
    4885                 :          0 :           iv_step = TYPE_SIZE_UNIT (aggr_type);
    4886                 :          0 :           if (tree_int_cst_sgn (step) == -1)
    4887                 :          0 :             iv_step = fold_build1 (NEGATE_EXPR, TREE_TYPE (iv_step), iv_step);
    4888                 :            :         }
    4889                 :            : 
    4890                 :      50476 :       standard_iv_increment_position (loop, &incr_gsi, &insert_after);
    4891                 :            : 
    4892                 :      50476 :       create_iv (aggr_ptr_init,
    4893                 :            :                  fold_convert (aggr_ptr_type, iv_step),
    4894                 :            :                  aggr_ptr, loop, &incr_gsi, insert_after,
    4895                 :            :                  &indx_before_incr, &indx_after_incr);
    4896                 :      50476 :       incr = gsi_stmt (incr_gsi);
    4897                 :      50476 :       loop_vinfo->add_stmt (incr);
    4898                 :            : 
    4899                 :            :       /* Copy the points-to information if it exists. */
    4900                 :      50476 :       if (DR_PTR_INFO (dr))
    4901                 :            :         {
    4902                 :      30144 :           vect_duplicate_ssa_name_ptr_info (indx_before_incr, dr_info);
    4903                 :      30144 :           vect_duplicate_ssa_name_ptr_info (indx_after_incr, dr_info);
    4904                 :            :         }
    4905                 :      50476 :       if (ptr_incr)
    4906                 :      50476 :         *ptr_incr = incr;
    4907                 :            : 
    4908                 :      50476 :       aptr = indx_before_incr;
    4909                 :            :     }
    4910                 :            : 
    4911                 :     100044 :   if (!nested_in_vect_loop || only_init)
    4912                 :            :     return aptr;
    4913                 :            : 
    4914                 :            : 
    4915                 :            :   /* (4) Handle the updating of the aggregate-pointer inside the inner-loop
    4916                 :            :      nested in LOOP, if exists.  */
    4917                 :            : 
    4918                 :        229 :   gcc_assert (nested_in_vect_loop);
    4919                 :        229 :   if (!only_init)
    4920                 :            :     {
    4921                 :        229 :       standard_iv_increment_position (containing_loop, &incr_gsi,
    4922                 :            :                                       &insert_after);
    4923                 :        229 :       create_iv (aptr, fold_convert (aggr_ptr_type, DR_STEP (dr)), aggr_ptr,
    4924                 :            :                  containing_loop, &incr_gsi, insert_after, &indx_before_incr,
    4925                 :            :                  &indx_after_incr);
    4926                 :        229 :       incr = gsi_stmt (incr_gsi);
    4927                 :        229 :       loop_vinfo->add_stmt (incr);
    4928                 :            : 
    4929                 :            :       /* Copy the points-to information if it exists. */
    4930                 :        229 :       if (DR_PTR_INFO (dr))
    4931                 :            :         {
    4932                 :         45 :           vect_duplicate_ssa_name_ptr_info (indx_before_incr, dr_info);
    4933                 :         45 :           vect_duplicate_ssa_name_ptr_info (indx_after_incr, dr_info);
    4934                 :            :         }
    4935                 :        229 :       if (ptr_incr)
    4936                 :        229 :         *ptr_incr = incr;
    4937                 :            : 
    4938                 :        229 :       return indx_before_incr;
    4939                 :            :     }
    4940                 :            :   else
    4941                 :            :     gcc_unreachable ();
    4942                 :            : }
    4943                 :            : 
    4944                 :            : 
    4945                 :            : /* Function bump_vector_ptr
    4946                 :            : 
    4947                 :            :    Increment a pointer (to a vector type) by vector-size. If requested,
    4948                 :            :    i.e. if PTR-INCR is given, then also connect the new increment stmt
    4949                 :            :    to the existing def-use update-chain of the pointer, by modifying
    4950                 :            :    the PTR_INCR as illustrated below:
    4951                 :            : 
    4952                 :            :    The pointer def-use update-chain before this function:
    4953                 :            :                         DATAREF_PTR = phi (p_0, p_2)
    4954                 :            :                         ....
    4955                 :            :         PTR_INCR:       p_2 = DATAREF_PTR + step
    4956                 :            : 
    4957                 :            :    The pointer def-use update-chain after this function:
    4958                 :            :                         DATAREF_PTR = phi (p_0, p_2)
    4959                 :            :                         ....
    4960                 :            :                         NEW_DATAREF_PTR = DATAREF_PTR + BUMP
    4961                 :            :                         ....
    4962                 :            :         PTR_INCR:       p_2 = NEW_DATAREF_PTR + step
    4963                 :            : 
    4964                 :            :    Input:
    4965                 :            :    DATAREF_PTR - ssa_name of a pointer (to vector type) that is being updated
    4966                 :            :                  in the loop.
    4967                 :            :    PTR_INCR - optional. The stmt that updates the pointer in each iteration of
    4968                 :            :               the loop.  The increment amount across iterations is expected
    4969                 :            :               to be vector_size.
    4970                 :            :    BSI - location where the new update stmt is to be placed.
    4971                 :            :    STMT_INFO - the original scalar memory-access stmt that is being vectorized.
    4972                 :            :    BUMP - optional. The offset by which to bump the pointer. If not given,
    4973                 :            :           the offset is assumed to be vector_size.
    4974                 :            : 
    4975                 :            :    Output: Return NEW_DATAREF_PTR as illustrated above.
    4976                 :            : 
    4977                 :            : */
    4978                 :            : 
    4979                 :            : tree
    4980                 :     183714 : bump_vector_ptr (tree dataref_ptr, gimple *ptr_incr, gimple_stmt_iterator *gsi,
    4981                 :            :                  stmt_vec_info stmt_info, tree bump)
    4982                 :            : {
    4983                 :     183714 :   struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
    4984                 :     183714 :   tree vectype = STMT_VINFO_VECTYPE (stmt_info);
    4985                 :     183714 :   tree update = TYPE_SIZE_UNIT (vectype);
    4986                 :     183714 :   gassign *incr_stmt;
    4987                 :     183714 :   ssa_op_iter iter;
    4988                 :     183714 :   use_operand_p use_p;
    4989                 :     183714 :   tree new_dataref_ptr;
    4990                 :            : 
    4991                 :     183714 :   if (bump)
    4992                 :     183714 :     update = bump;
    4993                 :            : 
    4994                 :     183714 :   if (TREE_CODE (dataref_ptr) == SSA_NAME)
    4995                 :     172129 :     new_dataref_ptr = copy_ssa_name (dataref_ptr);
    4996                 :            :   else
    4997                 :      11585 :     new_dataref_ptr = make_ssa_name (TREE_TYPE (dataref_ptr));
    4998                 :     183714 :   incr_stmt = gimple_build_assign (new_dataref_ptr, POINTER_PLUS_EXPR,
    4999                 :            :                                    dataref_ptr, update);
    5000                 :     183714 :   vect_finish_stmt_generation (stmt_info, incr_stmt, gsi);
    5001                 :            : 
    5002                 :            :   /* Copy the points-to information if it exists. */
    5003                 :     183714 :   if (DR_PTR_INFO (dr))
    5004                 :            :     {
    5005                 :      12562 :       duplicate_ssa_name_ptr_info (new_dataref_ptr, DR_PTR_INFO (dr));
    5006                 :      12562 :       mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (new_dataref_ptr));
    5007                 :            :     }
    5008                 :            : 
    5009                 :     183714 :   if (!ptr_incr)
    5010                 :            :     return new_dataref_ptr;
    5011                 :            : 
    5012                 :            :   /* Update the vector-pointer's cross-iteration increment.  */
    5013                 :     317726 :   FOR_EACH_SSA_USE_OPERAND (use_p, ptr_incr, iter, SSA_OP_USE)
    5014                 :            :     {
    5015                 :     158863 :       tree use = USE_FROM_PTR (use_p);
    5016                 :            : 
    5017                 :     158863 :       if (use == dataref_ptr)
    5018                 :     158863 :         SET_USE (use_p, new_dataref_ptr);
    5019                 :            :       else
    5020                 :          0 :         gcc_assert (operand_equal_p (use, update, 0));
    5021                 :            :     }
    5022                 :            : 
    5023                 :            :   return new_dataref_ptr;
    5024                 :            : }
    5025                 :            : 
    5026                 :            : 
    5027                 :            : /* Copy memory reference info such as base/clique from the SRC reference
    5028                 :            :    to the DEST MEM_REF.  */
    5029                 :            : 
    5030                 :            : void
    5031                 :     304720 : vect_copy_ref_info (tree dest, tree src)
    5032                 :            : {
    5033                 :     304720 :   if (TREE_CODE (dest) != MEM_REF)
    5034                 :            :     return;
    5035                 :            : 
    5036                 :            :   tree src_base = src;
    5037                 :     751145 :   while (handled_component_p (src_base))
    5038                 :     446425 :     src_base = TREE_OPERAND (src_base, 0);
    5039                 :     304720 :   if (TREE_CODE (src_base) != MEM_REF
    5040                 :     304720 :       && TREE_CODE (src_base) != TARGET_MEM_REF)
    5041                 :            :     return;
    5042                 :            : 
    5043                 :      71881 :   MR_DEPENDENCE_CLIQUE (dest) = MR_DEPENDENCE_CLIQUE (src_base);
    5044                 :      71881 :   MR_DEPENDENCE_BASE (dest) = MR_DEPENDENCE_BASE (src_base);
    5045                 :            : }
    5046                 :            : 
    5047                 :            : 
    5048                 :            : /* Function vect_create_destination_var.
    5049                 :            : 
    5050                 :            :    Create a new temporary of type VECTYPE.  */
    5051                 :            : 
    5052                 :            : tree
    5053                 :     286589 : vect_create_destination_var (tree scalar_dest, tree vectype)
    5054                 :            : {
    5055                 :     286589 :   tree vec_dest;
    5056                 :     286589 :   const char *name;
    5057                 :     286589 :   char *new_name;
    5058                 :     286589 :   tree type;
    5059                 :     286589 :   enum vect_var_kind kind;
    5060                 :            : 
    5061                 :     573178 :   kind = vectype
    5062                 :     561576 :     ? VECTOR_BOOLEAN_TYPE_P (vectype)
    5063                 :     274987 :     ? vect_mask_var
    5064                 :            :     : vect_simple_var
    5065                 :            :     : vect_scalar_var;
    5066                 :     298191 :   type = vectype ? vectype : TREE_TYPE (scalar_dest);
    5067                 :            : 
    5068                 :     286589 :   gcc_assert (TREE_CODE (scalar_dest) == SSA_NAME);
    5069                 :            : 
    5070                 :     286589 :   name = get_name (scalar_dest);
    5071                 :     286589 :   if (name)
    5072                 :      42047 :     new_name = xasprintf ("%s_%u", name, SSA_NAME_VERSION (scalar_dest));
    5073                 :            :   else
    5074                 :     244542 :     new_name = xasprintf ("_%u", SSA_NAME_VERSION (scalar_dest));
    5075                 :     286589 :   vec_dest = vect_get_new_vect_var (type, kind, new_name);
    5076                 :     286589 :   free (new_name);
    5077                 :            : 
    5078                 :     286589 :   return vec_dest;
    5079                 :            : }
    5080                 :            : 
    5081                 :            : /* Function vect_grouped_store_supported.
    5082                 :            : 
    5083                 :            :    Returns TRUE if interleave high and interleave low permutations
    5084                 :            :    are supported, and FALSE otherwise.  */
    5085                 :            : 
    5086                 :            : bool
    5087                 :       2599 : vect_grouped_store_supported (tree vectype, unsigned HOST_WIDE_INT count)
    5088                 :            : {
    5089                 :       2599 :   machine_mode mode = TYPE_MODE (vectype);
    5090                 :            : 
    5091                 :            :   /* vect_permute_store_chain requires the group size to be equal to 3 or
    5092                 :            :      be a power of two.  */
    5093                 :       2599 :   if (count != 3 && exact_log2 (count) == -1)
    5094                 :            :     {
    5095                 :        203 :       if (dump_enabled_p ())
    5096                 :          8 :         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    5097                 :            :                          "the size of the group of accesses"
    5098                 :            :                          " is not a power of 2 or not eqaul to 3\n");
    5099                 :        203 :       return false;
    5100                 :            :     }
    5101                 :            : 
    5102                 :            :   /* Check that the permutation is supported.  */
    5103                 :       2396 :   if (VECTOR_MODE_P (mode))
    5104                 :            :     {
    5105                 :       2396 :       unsigned int i;
    5106                 :       2396 :       if (count == 3)
    5107                 :            :         {
    5108                 :        158 :           unsigned int j0 = 0, j1 = 0, j2 = 0;
    5109                 :        158 :           unsigned int i, j;
    5110                 :            : 
    5111                 :        158 :           unsigned int nelt;
    5112                 :        316 :           if (!GET_MODE_NUNITS (mode).is_constant (&nelt))
    5113                 :            :             {
    5114                 :            :               if (dump_enabled_p ())
    5115                 :            :                 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    5116                 :            :                                  "cannot handle groups of 3 stores for"
    5117                 :            :                                  " variable-length vectors\n");
    5118                 :            :               return false;
    5119                 :            :             }
    5120                 :            : 
    5121                 :        316 :           vec_perm_builder sel (nelt, nelt, 1);
    5122                 :        158 :           sel.quick_grow (nelt);
    5123                 :        316 :           vec_perm_indices indices;
    5124                 :        446 :           for (j = 0; j < 3; j++)
    5125                 :            :             {
    5126                 :        350 :               int nelt0 = ((3 - j) * nelt) % 3;
    5127                 :        350 :               int nelt1 = ((3 - j) * nelt + 1) % 3;
    5128                 :        350 :               int nelt2 = ((3 - j) * nelt + 2) % 3;
    5129                 :       2532 :               for (i = 0; i < nelt; i++)
    5130                 :            :                 {
    5131                 :       2182 :                   if (3 * i + nelt0 < nelt)
    5132                 :        758 :                     sel[3 * i + nelt0] = j0++;
    5133                 :       2182 :                   if (3 * i + nelt1 < nelt)
    5134                 :        728 :                     sel[3 * i + nelt1] = nelt + j1++;
    5135                 :       2182 :                   if (3 * i + nelt2 < nelt)
    5136                 :        696 :                     sel[3 * i + nelt2] = 0;
    5137                 :            :                 }
    5138                 :        350 :               indices.new_vector (sel, 2, nelt);
    5139                 :        350 :               if (!can_vec_perm_const_p (mode, indices))
    5140                 :            :                 {
    5141                 :         36 :                   if (dump_enabled_p ())
    5142                 :         15 :                     dump_printf (MSG_MISSED_OPTIMIZATION,
    5143                 :            :                                  "permutation op not supported by target.\n");
    5144                 :         36 :                   return false;
    5145                 :            :                 }
    5146                 :            : 
    5147                 :       2326 :               for (i = 0; i < nelt; i++)
    5148                 :            :                 {
    5149                 :       2012 :                   if (3 * i + nelt0 < nelt)
    5150                 :        688 :                     sel[3 * i + nelt0] = 3 * i + nelt0;
    5151                 :       2012 :                   if (3 * i + nelt1 < nelt)
    5152                 :        662 :                     sel[3 * i + nelt1] = 3 * i + nelt1;
    5153                 :       2012 :                   if (3 * i + nelt2 < nelt)
    5154                 :        662 :                     sel[3 * i + nelt2] = nelt + j2++;
    5155                 :            :                 }
    5156                 :        314 :               indices.new_vector (sel, 2, nelt);
    5157                 :        314 :               if (!can_vec_perm_const_p (mode, indices))
    5158                 :            :                 {
    5159                 :         26 :                   if (dump_enabled_p ())
    5160                 :          5 :                     dump_printf (MSG_MISSED_OPTIMIZATION,
    5161                 :            :                                  "permutation op not supported by target.\n");
    5162                 :         26 :                   return false;
    5163                 :            :                 }
    5164                 :            :             }
    5165                 :            :           return true;
    5166                 :            :         }
    5167                 :            :       else
    5168                 :            :         {
    5169                 :            :           /* If length is not equal to 3 then only power of 2 is supported.  */
    5170                 :       2238 :           gcc_assert (pow2p_hwi (count));
    5171                 :       4476 :           poly_uint64 nelt = GET_MODE_NUNITS (mode);
    5172                 :            : 
    5173                 :            :           /* The encoding has 2 interleaved stepped patterns.  */
    5174                 :       2297 :           vec_perm_builder sel (nelt, 2, 3);
    5175                 :       2238 :           sel.quick_grow (6);
    5176                 :       8952 :           for (i = 0; i < 3; i++)
    5177                 :            :             {
    5178                 :       6714 :               sel[i * 2] = i;
    5179                 :       6714 :               sel[i * 2 + 1] = i + nelt;
    5180                 :            :             }
    5181                 :       2297 :           vec_perm_indices indices (sel, 2, nelt);
    5182                 :       2238 :           if (can_vec_perm_const_p (mode, indices))
    5183                 :            :             {
    5184                 :      15253 :               for (i = 0; i < 6; i++)
    5185                 :      13074 :                 sel[i] += exact_div (nelt, 2);
    5186                 :       2179 :               indices.new_vector (sel, 2, nelt);
    5187                 :       2179 :               if (can_vec_perm_const_p (mode, indices))
    5188                 :       4358 :                 return true;
    5189                 :            :             }
    5190                 :            :         }
    5191                 :            :     }
    5192                 :            : 
    5193                 :         59 :   if (dump_enabled_p ())
    5194                 :          6 :     dump_printf (MSG_MISSED_OPTIMIZATION,
    5195                 :            :                  "permutation op not supported by target.\n");
    5196                 :            :   return false;
    5197                 :            : }
    5198                 :            : 
    5199                 :            : 
    5200                 :            : /* Return TRUE if vec_{mask_}store_lanes is available for COUNT vectors of
    5201                 :            :    type VECTYPE.  MASKED_P says whether the masked form is needed.  */
    5202                 :            : 
    5203                 :            : bool
    5204                 :       2965 : vect_store_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count,
    5205                 :            :                             bool masked_p)
    5206                 :            : {
    5207                 :       2965 :   if (masked_p)
    5208                 :          0 :     return vect_lanes_optab_supported_p ("vec_mask_store_lanes",
    5209                 :            :                                          vec_mask_store_lanes_optab,
    5210                 :          0 :                                          vectype, count);
    5211                 :            :   else
    5212                 :       2965 :     return vect_lanes_optab_supported_p ("vec_store_lanes",
    5213                 :            :                                          vec_store_lanes_optab,
    5214                 :       2965 :                                          vectype, count);
    5215                 :            : }
    5216                 :            : 
    5217                 :            : 
    5218                 :            : /* Function vect_permute_store_chain.
    5219                 :            : 
    5220                 :            :    Given a chain of interleaved stores in DR_CHAIN of LENGTH that must be
    5221                 :            :    a power of 2 or equal to 3, generate interleave_high/low stmts to reorder
    5222                 :            :    the data correctly for the stores.  Return the final references for stores
    5223                 :            :    in RESULT_CHAIN.
    5224                 :            : 
    5225                 :            :    E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
    5226                 :            :    The input is 4 vectors each containing 8 elements.  We assign a number to
    5227                 :            :    each element, the input sequence is:
    5228                 :            : 
    5229                 :            :    1st vec:   0  1  2  3  4  5  6  7
    5230                 :            :    2nd vec:   8  9 10 11 12 13 14 15
    5231                 :            :    3rd vec:  16 17 18 19 20 21 22 23
    5232                 :            :    4th vec:  24 25 26 27 28 29 30 31
    5233                 :            : 
    5234                 :            :    The output sequence should be:
    5235                 :            : 
    5236                 :            :    1st vec:  0  8 16 24  1  9 17 25
    5237                 :            :    2nd vec:  2 10 18 26  3 11 19 27
    5238                 :            :    3rd vec:  4 12 20 28  5 13 21 30
    5239                 :            :    4th vec:  6 14 22 30  7 15 23 31
    5240                 :            : 
    5241                 :            :    i.e., we interleave the contents of the four vectors in their order.
    5242                 :            : 
    5243                 :            :    We use interleave_high/low instructions to create such output.  The input of
    5244                 :            :    each interleave_high/low operation is two vectors:
    5245                 :            :    1st vec    2nd vec
    5246                 :            :    0 1 2 3    4 5 6 7
    5247                 :            :    the even elements of the result vector are obtained left-to-right from the
    5248                 :            :    high/low elements of the first vector.  The odd elements of the result are
    5249                 :            :    obtained left-to-right from the high/low elements of the second vector.
    5250                 :            :    The output of interleave_high will be:   0 4 1 5
    5251                 :            :    and of interleave_low:                   2 6 3 7
    5252                 :            : 
    5253                 :            : 
    5254                 :            :    The permutation is done in log LENGTH stages.  In each stage interleave_high
    5255                 :            :    and interleave_low stmts are created for each pair of vectors in DR_CHAIN,
    5256                 :            :    where the first argument is taken from the first half of DR_CHAIN and the
    5257                 :            :    second argument from it's second half.
    5258                 :            :    In our example,
    5259                 :            : 
    5260                 :            :    I1: interleave_high (1st vec, 3rd vec)
    5261                 :            :    I2: interleave_low (1st vec, 3rd vec)
    5262                 :            :    I3: interleave_high (2nd vec, 4th vec)
    5263                 :            :    I4: interleave_low (2nd vec, 4th vec)
    5264                 :            : 
    5265                 :            :    The output for the first stage is:
    5266                 :            : 
    5267                 :            :    I1:  0 16  1 17  2 18  3 19
    5268                 :            :    I2:  4 20  5 21  6 22  7 23
    5269                 :            :    I3:  8 24  9 25 10 26 11 27
    5270                 :            :    I4: 12 28 13 29 14 30 15 31
    5271                 :            : 
    5272                 :            :    The output of the second stage, i.e. the final result is:
    5273                 :            : 
    5274                 :            :    I1:  0  8 16 24  1  9 17 25
    5275                 :            :    I2:  2 10 18 26  3 11 19 27
    5276                 :            :    I3:  4 12 20 28  5 13 21 30
    5277                 :            :    I4:  6 14 22 30  7 15 23 31.  */
    5278                 :            : 
    5279                 :            : void
    5280                 :        332 : vect_permute_store_chain (vec<tree> dr_chain,
    5281                 :            :                           unsigned int length,
    5282                 :            :                           stmt_vec_info stmt_info,
    5283                 :            :                           gimple_stmt_iterator *gsi,
    5284                 :            :                           vec<tree> *result_chain)
    5285                 :            : {
    5286                 :        332 :   tree vect1, vect2, high, low;
    5287                 :        332 :   gimple *perm_stmt;
    5288                 :        332 :   tree vectype = STMT_VINFO_VECTYPE (stmt_info);
    5289                 :        332 :   tree perm_mask_low, perm_mask_high;
    5290                 :        332 :   tree data_ref;
    5291                 :        332 :   tree perm3_mask_low, perm3_mask_high;
    5292                 :        332 :   unsigned int i, j, n, log_length = exact_log2 (length);
    5293                 :            : 
    5294                 :        332 :   result_chain->quick_grow (length);
    5295                 :        664 :   memcpy (result_chain->address (), dr_chain.address (),
    5296                 :            :           length * sizeof (tree));
    5297                 :            : 
    5298                 :        332 :   if (length == 3)
    5299                 :            :     {
    5300                 :            :       /* vect_grouped_store_supported ensures that this is constant.  */
    5301                 :         18 :       unsigned int nelt = TYPE_VECTOR_SUBPARTS (vectype).to_constant ();
    5302                 :         18 :       unsigned int j0 = 0, j1 = 0, j2 = 0;
    5303                 :            : 
    5304                 :         36 :       vec_perm_builder sel (nelt, nelt, 1);
    5305                 :         18 :       sel.quick_grow (nelt);
    5306                 :         36 :       vec_perm_indices indices;
    5307                 :         72 :       for (j = 0; j < 3; j++)
    5308                 :            :         {
    5309                 :         54 :           int nelt0 = ((3 - j) * nelt) % 3;
    5310                 :         54 :           int nelt1 = ((3 - j) * nelt + 1) % 3;
    5311                 :         54 :           int nelt2 = ((3 - j) * nelt + 2) % 3;
    5312                 :            : 
    5313                 :        390 :           for (i = 0; i < nelt; i++)
    5314                 :            :             {
    5315                 :        336 :               if (3 * i + nelt0 < nelt)
    5316                 :        112 :                 sel[3 * i + nelt0] = j0++;
    5317                 :        336 :               if (3 * i + nelt1 < nelt)
    5318                 :        112 :                 sel[3 * i + nelt1] = nelt + j1++;
    5319                 :        336 :               if (3 * i + nelt2 < nelt)
    5320                 :        112 :                 sel[3 * i + nelt2] = 0;
    5321                 :            :             }
    5322                 :         54 :           indices.new_vector (sel, 2, nelt);
    5323                 :         54 :           perm3_mask_low = vect_gen_perm_mask_checked (vectype, indices);
    5324                 :            : 
    5325                 :        390 :           for (i = 0; i < nelt; i++)
    5326                 :            :             {
    5327                 :        336 :               if (3 * i + nelt0 < nelt)
    5328                 :        112 :                 sel[3 * i + nelt0] = 3 * i + nelt0;
    5329                 :        336 :               if (3 * i + nelt1 < nelt)
    5330                 :        112 :                 sel[3 * i + nelt1] = 3 * i + nelt1;
    5331                 :        336 :               if (3 * i + nelt2 < nelt)
    5332                 :        112 :                 sel[3 * i + nelt2] = nelt + j2++;
    5333                 :            :             }
    5334                 :         54 :           indices.new_vector (sel, 2, nelt);
    5335                 :         54 :           perm3_mask_high = vect_gen_perm_mask_checked (vectype, indices);
    5336                 :            : 
    5337                 :         54 :           vect1 = dr_chain[0];
    5338                 :         54 :           vect2 = dr_chain[1];
    5339                 :            : 
    5340                 :            :           /* Create interleaving stmt:
    5341                 :            :              low = VEC_PERM_EXPR <vect1, vect2,
    5342                 :            :                                   {j, nelt, *, j + 1, nelt + j + 1, *,
    5343                 :            :                                    j + 2, nelt + j + 2, *, ...}>  */
    5344                 :         54 :           data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_low");
    5345                 :         54 :           perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect1,
    5346                 :            :                                            vect2, perm3_mask_low);
    5347                 :         54 :           vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
    5348                 :            : 
    5349                 :         54 :           vect1 = data_ref;
    5350                 :         54 :           vect2 = dr_chain[2];
    5351                 :            :           /* Create interleaving stmt:
    5352                 :            :              low = VEC_PERM_EXPR <vect1, vect2,
    5353                 :            :                                   {0, 1, nelt + j, 3, 4, nelt + j + 1,
    5354                 :            :                                    6, 7, nelt + j + 2, ...}>  */
    5355                 :         54 :           data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_high");
    5356                 :         54 :           perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect1,
    5357                 :            :                                            vect2, perm3_mask_high);
    5358                 :         54 :           vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
    5359                 :         54 :           (*result_chain)[j] = data_ref;
    5360                 :            :         }
    5361                 :            :     }
    5362                 :            :   else
    5363                 :            :     {
    5364                 :            :       /* If length is not equal to 3 then only power of 2 is supported.  */
    5365                 :        314 :       gcc_assert (pow2p_hwi (length));
    5366                 :            : 
    5367                 :            :       /* The encoding has 2 interleaved stepped patterns.  */
    5368                 :        314 :       poly_uint64 nelt = TYPE_VECTOR_SUBPARTS (vectype);
    5369                 :        628 :       vec_perm_builder sel (nelt, 2, 3);
    5370                 :        314 :       sel.quick_grow (6);
    5371                 :       1256 :       for (i = 0; i < 3; i++)
    5372                 :            :         {
    5373                 :        942 :           sel[i * 2] = i;
    5374                 :        942 :           sel[i * 2 + 1] = i + nelt;
    5375                 :            :         }
    5376                 :        628 :         vec_perm_indices indices (sel, 2, nelt);
    5377                 :        314 :         perm_mask_high = vect_gen_perm_mask_checked (vectype, indices);
    5378                 :            : 
    5379                 :       2198 :         for (i = 0; i < 6; i++)
    5380                 :       1884 :           sel[i] += exact_div (nelt, 2);
    5381                 :        314 :         indices.new_vector (sel, 2, nelt);
    5382                 :        314 :         perm_mask_low = vect_gen_perm_mask_checked (vectype, indices);
    5383                 :            : 
    5384                 :        799 :         for (i = 0, n = log_length; i < n; i++)
    5385                 :            :           {
    5386                 :       1655 :             for (j = 0; j < length/2; j++)
    5387                 :            :               {
    5388                 :       1170 :                 vect1 = dr_chain[j];
    5389                 :       1170 :                 vect2 = dr_chain[j+length/2];
    5390                 :            : 
    5391                 :            :                 /* Create interleaving stmt:
    5392                 :            :                    high = VEC_PERM_EXPR <vect1, vect2, {0, nelt, 1, nelt+1,
    5393                 :            :                                                         ...}>  */
    5394                 :       1170 :                 high = make_temp_ssa_name (vectype, NULL, "vect_inter_high");
    5395                 :       1170 :                 perm_stmt = gimple_build_assign (high, VEC_PERM_EXPR, vect1,
    5396                 :            :                                                  vect2, perm_mask_high);
    5397                 :       1170 :                 vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
    5398                 :       1170 :                 (*result_chain)[2*j] = high;
    5399                 :            : 
    5400                 :            :                 /* Create interleaving stmt:
    5401                 :            :                    low = VEC_PERM_EXPR <vect1, vect2,
    5402                 :            :                                         {nelt/2, nelt*3/2, nelt/2+1, nelt*3/2+1,
    5403                 :            :                                          ...}>  */
    5404                 :       1170 :                 low = make_temp_ssa_name (vectype, NULL, "vect_inter_low");
    5405                 :       1170 :                 perm_stmt = gimple_build_assign (low, VEC_PERM_EXPR, vect1,
    5406                 :            :                                                  vect2, perm_mask_low);
    5407                 :       1170 :                 vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
    5408                 :       1170 :                 (*result_chain)[2*j+1] = low;
    5409                 :            :               }
    5410                 :        970 :             memcpy (dr_chain.address (), result_chain->address (),
    5411                 :            :                     length * sizeof (tree));
    5412                 :            :           }
    5413                 :            :     }
    5414                 :        332 : }
    5415                 :            : 
    5416                 :            : /* Function vect_setup_realignment
    5417                 :            : 
    5418                 :            :    This function is called when vectorizing an unaligned load using
    5419                 :            :    the dr_explicit_realign[_optimized] scheme.
    5420                 :            :    This function generates the following code at the loop prolog:
    5421                 :            : 
    5422                 :            :       p = initial_addr;
    5423                 :            :    x  msq_init = *(floor(p));   # prolog load
    5424                 :            :       realignment_token = call target_builtin;
    5425                 :            :     loop:
    5426                 :            :    x  msq = phi (msq_init, ---)
    5427                 :            : 
    5428                 :            :    The stmts marked with x are generated only for the case of
    5429                 :            :    dr_explicit_realign_optimized.
    5430                 :            : 
    5431                 :            :    The code above sets up a new (vector) pointer, pointing to the first
    5432                 :            :    location accessed by STMT_INFO, and a "floor-aligned" load using that
    5433                 :            :    pointer.  It also generates code to compute the "realignment-token"
    5434                 :            :    (if the relevant target hook was defined), and creates a phi-node at the
    5435                 :            :    loop-header bb whose arguments are the result of the prolog-load (created
    5436                 :            :    by this function) and the result of a load that takes place in the loop
    5437                 :            :    (to be created by the caller to this function).
    5438                 :            : 
    5439                 :            :    For the case of dr_explicit_realign_optimized:
    5440                 :            :    The caller to this function uses the phi-result (msq) to create the
    5441                 :            :    realignment code inside the loop, and sets up the missing phi argument,
    5442                 :            :    as follows:
    5443                 :            :     loop:
    5444                 :            :       msq = phi (msq_init, lsq)
    5445                 :            :       lsq = *(floor(p'));        # load in loop
    5446                 :            :       result = realign_load (msq, lsq, realignment_token);
    5447                 :            : 
    5448                 :            :    For the case of dr_explicit_realign:
    5449                 :            :     loop:
    5450                 :            :       msq = *(floor(p));        # load in loop
    5451                 :            :       p' = p + (VS-1);
    5452                 :            :       lsq = *(floor(p'));       # load in loop
    5453                 :            :       result = realign_load (msq, lsq, realignment_token);
    5454                 :            : 
    5455                 :            :    Input:
    5456                 :            :    STMT_INFO - (scalar) load stmt to be vectorized. This load accesses
    5457                 :            :                a memory location that may be unaligned.
    5458                 :            :    BSI - place where new code is to be inserted.
    5459                 :            :    ALIGNMENT_SUPPORT_SCHEME - which of the two misalignment handling schemes
    5460                 :            :                               is used.
    5461                 :            : 
    5462                 :            :    Output:
    5463                 :            :    REALIGNMENT_TOKEN - the result of a call to the builtin_mask_for_load
    5464                 :            :                        target hook, if defined.
    5465                 :            :    Return value - the result of the loop-header phi node.  */
    5466                 :            : 
    5467                 :            : tree
    5468                 :          0 : vect_setup_realignment (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
    5469                 :            :                         tree *realignment_token,
    5470                 :            :                         enum dr_alignment_support alignment_support_scheme,
    5471                 :            :                         tree init_addr,
    5472                 :            :                         class loop **at_loop)
    5473                 :            : {
    5474                 :          0 :   tree vectype = STMT_VINFO_VECTYPE (stmt_info);
    5475                 :          0 :   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
    5476                 :          0 :   dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
    5477                 :          0 :   struct data_reference *dr = dr_info->dr;
    5478                 :          0 :   class loop *loop = NULL;
    5479                 :          0 :   edge pe = NULL;
    5480                 :          0 :   tree scalar_dest = gimple_assign_lhs (stmt_info->stmt);
    5481                 :          0 :   tree vec_dest;
    5482                 :          0 :   gimple *inc;
    5483                 :          0 :   tree ptr;
    5484                 :          0 :   tree data_ref;
    5485                 :          0 :   basic_block new_bb;
    5486                 :          0 :   tree msq_init = NULL_TREE;
    5487                 :          0 :   tree new_temp;
    5488                 :          0 :   gphi *phi_stmt;
    5489                 :          0 :   tree msq = NULL_TREE;
    5490                 :          0 :   gimple_seq stmts = NULL;
    5491                 :          0 :   bool compute_in_loop = false;
    5492                 :          0 :   bool nested_in_vect_loop = false;
    5493                 :          0 :   class loop *containing_loop = (gimple_bb (stmt_info->stmt))->loop_father;
    5494                 :          0 :   class loop *loop_for_initial_load = NULL;
    5495                 :            : 
    5496                 :          0 :   if (loop_vinfo)
    5497                 :            :     {
    5498                 :          0 :       loop = LOOP_VINFO_LOOP (loop_vinfo);
    5499                 :          0 :       nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt_info);
    5500                 :            :     }
    5501                 :            : 
    5502                 :          0 :   gcc_assert (alignment_support_scheme == dr_explicit_realign
    5503                 :            :               || alignment_support_scheme == dr_explicit_realign_optimized);
    5504                 :            : 
    5505                 :            :   /* We need to generate three things:
    5506                 :            :      1. the misalignment computation
    5507                 :            :      2. the extra vector load (for the optimized realignment scheme).
    5508                 :            :      3. the phi node for the two vectors from which the realignment is
    5509                 :            :       done (for the optimized realignment scheme).  */
    5510                 :            : 
    5511                 :            :   /* 1. Determine where to generate the misalignment computation.
    5512                 :            : 
    5513                 :            :      If INIT_ADDR is NULL_TREE, this indicates that the misalignment
    5514                 :            :      calculation will be generated by this function, outside the loop (in the
    5515                 :            :      preheader).  Otherwise, INIT_ADDR had already been computed for us by the
    5516                 :            :      caller, inside the loop.
    5517                 :            : 
    5518                 :            :      Background: If the misalignment remains fixed throughout the iterations of
    5519                 :            :      the loop, then both realignment schemes are applicable, and also the
    5520                 :            :      misalignment computation can be done outside LOOP.  This is because we are
    5521                 :            :      vectorizing LOOP, and so the memory accesses in LOOP advance in steps that
    5522                 :            :      are a multiple of VS (the Vector Size), and therefore the misalignment in
    5523                 :            :      different vectorized LOOP iterations is always the same.
    5524                 :            :      The problem arises only if the memory access is in an inner-loop nested
    5525                 :            :      inside LOOP, which is now being vectorized using outer-loop vectorization.
    5526                 :            :      This is the only case when the misalignment of the memory access may not
    5527                 :            :      remain fixed throughout the iterations of the inner-loop (as explained in
    5528                 :            :      detail in vect_supportable_dr_alignment).  In this case, not only is the
    5529                 :            :      optimized realignment scheme not applicable, but also the misalignment
    5530                 :            :      computation (and generation of the realignment token that is passed to
    5531                 :            :      REALIGN_LOAD) have to be done inside the loop.
    5532                 :            : 
    5533                 :            :      In short, INIT_ADDR indicates whether we are in a COMPUTE_IN_LOOP mode
    5534                 :            :      or not, which in turn determines if the misalignment is computed inside
    5535                 :            :      the inner-loop, or outside LOOP.  */
    5536                 :            : 
    5537                 :          0 :   if (init_addr != NULL_TREE || !loop_vinfo)
    5538                 :            :     {
    5539                 :          0 :       compute_in_loop = true;
    5540                 :          0 :       gcc_assert (alignment_support_scheme == dr_explicit_realign);
    5541                 :            :     }
    5542                 :            : 
    5543                 :            : 
    5544                 :            :   /* 2. Determine where to generate the extra vector load.
    5545                 :            : 
    5546                 :            :      For the optimized realignment scheme, instead of generating two vector
    5547                 :            :      loads in each iteration, we generate a single extra vector load in the
    5548                 :            :      preheader of the loop, and in each iteration reuse the result of the
    5549                 :            :      vector load from the previous iteration.  In case the memory access is in
    5550                 :            :      an inner-loop nested inside LOOP, which is now being vectorized using
    5551                 :            :      outer-loop vectorization, we need to determine whether this initial vector
    5552                 :            :      load should be generated at the preheader of the inner-loop, or can be
    5553                 :            :      generated at the preheader of LOOP.  If the memory access has no evolution
    5554                 :            :      in LOOP, it can be generated in the preheader of LOOP. Otherwise, it has
    5555                 :            :      to be generated inside LOOP (in the preheader of the inner-loop).  */
    5556                 :            : 
    5557                 :          0 :   if (nested_in_vect_loop)
    5558                 :            :     {
    5559                 :          0 :       tree outerloop_step = STMT_VINFO_DR_STEP (stmt_info);
    5560                 :          0 :       bool invariant_in_outerloop =
    5561                 :          0 :             (tree_int_cst_compare (outerloop_step, size_zero_node) == 0);
    5562                 :          0 :       loop_for_initial_load = (invariant_in_outerloop ? loop : loop->inner);
    5563                 :            :     }
    5564                 :            :   else
    5565                 :            :     loop_for_initial_load = loop;
    5566                 :          0 :   if (at_loop)
    5567                 :          0 :     *at_loop = loop_for_initial_load;
    5568                 :            : 
    5569                 :          0 :   if (loop_for_initial_load)
    5570                 :          0 :     pe = loop_preheader_edge (loop_for_initial_load);
    5571                 :            : 
    5572                 :            :   /* 3. For the case of the optimized realignment, create the first vector
    5573                 :            :       load at the loop preheader.  */
    5574                 :            : 
    5575                 :          0 :   if (alignment_support_scheme == dr_explicit_realign_optimized)
    5576                 :            :     {
    5577                 :            :       /* Create msq_init = *(floor(p1)) in the loop preheader  */
    5578                 :          0 :       gassign *new_stmt;
    5579                 :            : 
    5580                 :          0 :       gcc_assert (!compute_in_loop);
    5581                 :          0 :       vec_dest = vect_create_destination_var (scalar_dest, vectype);
    5582                 :          0 :       ptr = vect_create_data_ref_ptr (stmt_info, vectype,
    5583                 :            :                                       loop_for_initial_load, NULL_TREE,
    5584                 :            :                                       &init_addr, NULL, &inc, true);
    5585                 :          0 :       if (TREE_CODE (ptr) == SSA_NAME)
    5586                 :          0 :         new_temp = copy_ssa_name (ptr);
    5587                 :            :       else
    5588                 :          0 :         new_temp = make_ssa_name (TREE_TYPE (ptr));
    5589                 :          0 :       poly_uint64 align = DR_TARGET_ALIGNMENT (dr_info);
    5590                 :          0 :       tree type = TREE_TYPE (ptr);
    5591                 :          0 :       new_stmt = gimple_build_assign
    5592                 :          0 :                    (new_temp, BIT_AND_EXPR, ptr,
    5593                 :          0 :                     fold_build2 (MINUS_EXPR, type,
    5594                 :            :                                  build_int_cst (type, 0),
    5595                 :            :                                  build_int_cst (type, align)));
    5596                 :          0 :       new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
    5597                 :          0 :       gcc_assert (!new_bb);
    5598                 :          0 :       data_ref
    5599                 :          0 :         = build2 (MEM_REF, TREE_TYPE (vec_dest), new_temp,
    5600                 :          0 :                   build_int_cst (reference_alias_ptr_type (DR_REF (dr)), 0));
    5601                 :          0 :       vect_copy_ref_info (data_ref, DR_REF (dr));
    5602                 :          0 :       new_stmt = gimple_build_assign (vec_dest, data_ref);
    5603                 :          0 :       new_temp = make_ssa_name (vec_dest, new_stmt);
    5604                 :          0 :       gimple_assign_set_lhs (new_stmt, new_temp);
    5605                 :          0 :       if (pe)
    5606                 :            :         {
    5607                 :          0 :           new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
    5608                 :          0 :           gcc_assert (!new_bb);
    5609                 :            :         }
    5610                 :            :       else
    5611                 :          0 :          gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
    5612                 :            : 
    5613                 :          0 :       msq_init = gimple_assign_lhs (new_stmt);
    5614                 :            :     }
    5615                 :            : 
    5616                 :            :   /* 4. Create realignment token using a target builtin, if available.
    5617                 :            :       It is done either inside the containing loop, or before LOOP (as
    5618                 :            :       determined above).  */
    5619                 :            : 
    5620                 :          0 :   if (targetm.vectorize.builtin_mask_for_load)
    5621                 :            :     {
    5622                 :          0 :       gcall *new_stmt;
    5623                 :          0 :       tree builtin_decl;
    5624                 :            : 
    5625                 :            :       /* Compute INIT_ADDR - the initial addressed accessed by this memref.  */
    5626                 :          0 :       if (!init_addr)
    5627                 :            :         {
    5628                 :            :           /* Generate the INIT_ADDR computation outside LOOP.  */
    5629                 :          0 :           init_addr = vect_create_addr_base_for_vector_ref (stmt_info, &stmts,
    5630                 :            :                                                             NULL_TREE);
    5631                 :          0 :           if (loop)
    5632                 :            :             {
    5633                 :          0 :               pe = loop_preheader_edge (loop);
    5634                 :          0 :               new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
    5635                 :          0 :               gcc_assert (!new_bb);
    5636                 :            :             }
    5637                 :            :           else
    5638                 :          0 :              gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
    5639                 :            :         }
    5640                 :            : 
    5641                 :          0 :       builtin_decl = targetm.vectorize.builtin_mask_for_load ();
    5642                 :          0 :       new_stmt = gimple_build_call (builtin_decl, 1, init_addr);
    5643                 :          0 :       vec_dest =
    5644                 :          0 :         vect_create_destination_var (scalar_dest,
    5645                 :            :                                      gimple_call_return_type (new_stmt));
    5646                 :          0 :       new_temp = make_ssa_name (vec_dest, new_stmt);
    5647                 :          0 :       gimple_call_set_lhs (new_stmt, new_temp);
    5648                 :            : 
    5649                 :          0 :       if (compute_in_loop)
    5650                 :          0 :         gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
    5651                 :            :       else
    5652                 :            :         {
    5653                 :            :           /* Generate the misalignment computation outside LOOP.  */
    5654                 :          0 :           pe = loop_preheader_edge (loop);
    5655                 :          0 :           new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
    5656                 :          0 :           gcc_assert (!new_bb);
    5657                 :            :         }
    5658                 :            : 
    5659                 :          0 :       *realignment_token = gimple_call_lhs (new_stmt);
    5660                 :            : 
    5661                 :            :       /* The result of the CALL_EXPR to this builtin is determined from
    5662                 :            :          the value of the parameter and no global variables are touched
    5663                 :            :          which makes the builtin a "const" function.  Requiring the
    5664                 :            :          builtin to have the "const" attribute makes it unnecessary
    5665                 :            :          to call mark_call_clobbered.  */
    5666                 :          0 :       gcc_assert (TREE_READONLY (builtin_decl));
    5667                 :            :     }
    5668                 :            : 
    5669                 :          0 :   if (alignment_support_scheme == dr_explicit_realign)
    5670                 :            :     return msq;
    5671                 :            : 
    5672                 :          0 :   gcc_assert (!compute_in_loop);
    5673                 :          0 :   gcc_assert (alignment_support_scheme == dr_explicit_realign_optimized);
    5674                 :            : 
    5675                 :            : 
    5676                 :            :   /* 5. Create msq = phi <msq_init, lsq> in loop  */
    5677                 :            : 
    5678                 :          0 :   pe = loop_preheader_edge (containing_loop);
    5679                 :          0 :   vec_dest = vect_create_destination_var (scalar_dest, vectype);
    5680                 :          0 :   msq = make_ssa_name (vec_dest);
    5681                 :          0 :   phi_stmt = create_phi_node (msq, containing_loop->header);
    5682                 :          0 :   add_phi_arg (phi_stmt, msq_init, pe, UNKNOWN_LOCATION);
    5683                 :            : 
    5684                 :          0 :   return msq;
    5685                 :            : }
    5686                 :            : 
    5687                 :            : 
    5688                 :            : /* Function vect_grouped_load_supported.
    5689                 :            : 
    5690                 :            :    COUNT is the size of the load group (the number of statements plus the
    5691                 :            :    number of gaps).  SINGLE_ELEMENT_P is true if there is actually
    5692                 :            :    only one statement, with a gap of COUNT - 1.
    5693                 :            : 
    5694                 :            :    Returns true if a suitable permute exists.  */
    5695                 :            : 
    5696                 :            : bool
    5697                 :      11651 : vect_grouped_load_supported (tree vectype, bool single_element_p,
    5698                 :            :                              unsigned HOST_WIDE_INT count)
    5699                 :            : {
    5700                 :      11651 :   machine_mode mode = TYPE_MODE (vectype);
    5701                 :            : 
    5702                 :            :   /* If this is single-element interleaving with an element distance
    5703                 :            :      that leaves unused vector loads around punt - we at least create
    5704                 :            :      very sub-optimal code in that case (and blow up memory,
    5705                 :            :      see PR65518).  */
    5706                 :      11651 :   if (single_element_p && maybe_gt (count, TYPE_VECTOR_SUBPARTS (vectype)))
    5707                 :            :     {
    5708                 :        843 :       if (dump_enabled_p ())
    5709                 :        144 :         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    5710                 :            :                          "single-element interleaving not supported "
    5711                 :            :                          "for not adjacent vector loads\n");
    5712                 :        843 :       return false;
    5713                 :            :     }
    5714                 :            : 
    5715                 :            :   /* vect_permute_load_chain requires the group size to be equal to 3 or
    5716                 :            :      be a power of two.  */
    5717                 :      10808 :   if (count != 3 && exact_log2 (count) == -1)
    5718                 :            :     {
    5719                 :        166 :       if (dump_enabled_p ())
    5720                 :         19 :         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    5721                 :            :                          "the size of the group of accesses"
    5722                 :            :                          " is not a power of 2 or not equal to 3\n");
    5723                 :        166 :       return false;
    5724                 :            :     }
    5725                 :            : 
    5726                 :            :   /* Check that the permutation is supported.  */
    5727                 :      10642 :   if (VECTOR_MODE_P (mode))
    5728                 :            :     {
    5729                 :      10642 :       unsigned int i, j;
    5730                 :      10642 :       if (count == 3)
    5731                 :            :         {
    5732                 :        311 :           unsigned int nelt;
    5733                 :        622 :           if (!GET_MODE_NUNITS (mode).is_constant (&nelt))
    5734                 :            :             {
    5735                 :            :               if (dump_enabled_p ())
    5736                 :            :                 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    5737                 :            :                                  "cannot handle groups of 3 loads for"
    5738                 :            :                                  " variable-length vectors\n");
    5739                 :            :               return false;
    5740                 :            :             }
    5741                 :            : 
    5742                 :        622 :           vec_perm_builder sel (nelt, nelt, 1);
    5743                 :        311 :           sel.quick_grow (nelt);
    5744                 :        622 :           vec_perm_indices indices;
    5745                 :        311 :           unsigned int k;
    5746                 :        812 :           for (k = 0; k < 3; k++)
    5747                 :            :             {
    5748                 :       4127 :               for (i = 0; i < nelt; i++)
    5749                 :       3482 :                 if (3 * i + k < 2 * nelt)
    5750                 :       2397 :                   sel[i] = 3 * i + k;
    5751                 :            :                 else
    5752                 :       1085 :                   sel[i] = 0;
    5753                 :        645 :               indices.new_vector (sel, 2, nelt);
    5754                 :        645 :               if (!can_vec_perm_const_p (mode, indices))
    5755                 :            :                 {
    5756                 :        144 :                   if (dump_enabled_p ())
    5757                 :         54 :                     dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    5758                 :            :                                      "shuffle of 3 loads is not supported by"
    5759                 :            :                                      " target\n");
    5760                 :        144 :                   return false;
    5761                 :            :                 }
    5762                 :       3063 :               for (i = 0, j = 0; i < nelt; i++)
    5763                 :       2562 :                 if (3 * i + k < 2 * nelt)
    5764                 :       2562 :                   sel[i] = i;
    5765                 :            :                 else
    5766                 :        854 :                   sel[i] = nelt + ((nelt + k) % 3) + 3 * (j++);
    5767                 :        501 :               indices.new_vector (sel, 2, nelt);
    5768                 :        501 :               if (!can_vec_perm_const_p (mode, indices))
    5769                 :            :                 {
    5770                 :          0 :                   if (dump_enabled_p ())
    5771                 :          0 :                     dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    5772                 :            :                                      "shuffle of 3 loads is not supported by"
    5773                 :            :                                      " target\n");
    5774                 :          0 :                   return false;
    5775                 :            :                 }
    5776                 :            :             }
    5777                 :            :           return true;
    5778                 :            :         }
    5779                 :            :       else
    5780                 :            :         {
    5781                 :            :           /* If length is not equal to 3 then only power of 2 is supported.  */
    5782                 :      10331 :           gcc_assert (pow2p_hwi (count));
    5783                 :      20662 :           poly_uint64 nelt = GET_MODE_NUNITS (mode);
    5784                 :            : 
    5785                 :            :           /* The encoding has a single stepped pattern.  */
    5786                 :      10601 :           vec_perm_builder sel (nelt, 1, 3);
    5787                 :      10331 :           sel.quick_grow (3);
    5788                 :      41324 :           for (i = 0; i < 3; i++)
    5789                 :      30993 :             sel[i] = i * 2;
    5790                 :      10601 :           vec_perm_indices indices (sel, 2, nelt);
    5791                 :      10331 :           if (can_vec_perm_const_p (mode, indices))
    5792                 :            :             {
    5793                 :      40244 :               for (i = 0; i < 3; i++)
    5794                 :      30183 :                 sel[i] = i * 2 + 1;
    5795                 :      10061 :               indices.new_vector (sel, 2, nelt);
    5796                 :      10061 :               if (can_vec_perm_const_p (mode, indices))
    5797                 :      20122 :                 return true;
    5798                 :            :             }
    5799                 :            :         }
    5800                 :            :     }
    5801                 :            : 
    5802                 :        270 :   if (dump_enabled_p ())
    5803                 :        134 :     dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    5804                 :            :                      "extract even/odd not supported by target\n");
    5805                 :            :   return false;
    5806                 :            : }
    5807                 :            : 
    5808                 :            : /* Return TRUE if vec_{masked_}load_lanes is available for COUNT vectors of
    5809                 :            :    type VECTYPE.  MASKED_P says whether the masked form is needed.  */
    5810                 :            : 
    5811                 :            : bool
    5812                 :      11651 : vect_load_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count,
    5813                 :            :                            bool masked_p)
    5814                 :            : {
    5815                 :      11651 :   if (masked_p)
    5816                 :          2 :     return vect_lanes_optab_supported_p ("vec_mask_load_lanes",
    5817                 :            :                                          vec_mask_load_lanes_optab,
    5818                 :          2 :                                          vectype, count);
    5819                 :            :   else
    5820                 :      11649 :     return vect_lanes_optab_supported_p ("vec_load_lanes",
    5821                 :            :                                          vec_load_lanes_optab,
    5822                 :      11649 :                                          vectype, count);
    5823                 :            : }
    5824                 :            : 
    5825                 :            : /* Function vect_permute_load_chain.
    5826                 :            : 
    5827                 :            :    Given a chain of interleaved loads in DR_CHAIN of LENGTH that must be
    5828                 :            :    a power of 2 or equal to 3, generate extract_even/odd stmts to reorder
    5829                 :            :    the input data correctly.  Return the final references for loads in
    5830                 :            :    RESULT_CHAIN.
    5831                 :            : 
    5832                 :            :    E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
    5833                 :            :    The input is 4 vectors each containing 8 elements. We assign a number to each
    5834                 :            :    element, the input sequence is:
    5835                 :            : 
    5836                 :            :    1st vec:   0  1  2  3  4  5  6  7
    5837                 :            :    2nd vec:   8  9 10 11 12 13 14 15
    5838                 :            :    3rd vec:  16 17 18 19 20 21 22 23
    5839                 :            :    4th vec:  24 25 26 27 28 29 30 31
    5840                 :            : 
    5841                 :            :    The output sequence should be:
    5842                 :            : 
    5843                 :            :    1st vec:  0 4  8 12 16 20 24 28
    5844                 :            :    2nd vec:  1 5  9 13 17 21 25 29
    5845                 :            :    3rd vec:  2 6 10 14 18 22 26 30
    5846                 :            :    4th vec:  3 7 11 15 19 23 27 31
    5847                 :            : 
    5848                 :            :    i.e., the first output vector should contain the first elements of each
    5849                 :            :    interleaving group, etc.
    5850                 :            : 
    5851                 :            :    We use extract_even/odd instructions to create such output.  The input of
    5852                 :            :    each extract_even/odd operation is two vectors
    5853                 :            :    1st vec    2nd vec
    5854                 :            :    0 1 2 3    4 5 6 7
    5855                 :            : 
    5856                 :            :    and the output is the vector of extracted even/odd elements.  The output of
    5857                 :            :    extract_even will be:   0 2 4 6
    5858                 :            :    and of extract_odd:     1 3 5 7
    5859                 :            : 
    5860                 :            : 
    5861                 :            :    The permutation is done in log LENGTH stages.  In each stage extract_even
    5862                 :            :    and extract_odd stmts are created for each pair of vectors in DR_CHAIN in
    5863                 :            :    their order.  In our example,
    5864                 :            : 
    5865                 :            :    E1: extract_even (1st vec, 2nd vec)
    5866                 :            :    E2: extract_odd (1st vec, 2nd vec)
    5867                 :            :    E3: extract_even (3rd vec, 4th vec)
    5868                 :            :    E4: extract_odd (3rd vec, 4th vec)
    5869                 :            : 
    5870                 :            :    The output for the first stage will be:
    5871                 :            : 
    5872                 :            :    E1:  0  2  4  6  8 10 12 14
    5873                 :            :    E2:  1  3  5  7  9 11 13 15
    5874                 :            :    E3: 16 18 20 22 24 26 28 30
    5875                 :            :    E4: 17 19 21 23 25 27 29 31
    5876                 :            : 
    5877                 :            :    In order to proceed and create the correct sequence for the next stage (or
    5878                 :            :    for the correct output, if the second stage is the last one, as in our
    5879                 :            :    example), we first put the output of extract_even operation and then the
    5880                 :            :    output of extract_odd in RESULT_CHAIN (which is then copied to DR_CHAIN).
    5881                 :            :    The input for the second stage is:
    5882                 :            : 
    5883                 :            :    1st vec (E1):  0  2  4  6  8 10 12 14
    5884                 :            :    2nd vec (E3): 16 18 20 22 24 26 28 30
    5885                 :            :    3rd vec (E2):  1  3  5  7  9 11 13 15
    5886                 :            :    4th vec (E4): 17 19 21 23 25 27 29 31
    5887                 :            : 
    5888                 :            :    The output of the second stage:
    5889                 :            : 
    5890                 :            :    E1: 0 4  8 12 16 20 24 28
    5891                 :            :    E2: 2 6 10 14 18 22 26 30
    5892                 :            :    E3: 1 5  9 13 17 21 25 29
    5893                 :            :    E4: 3 7 11 15 19 23 27 31
    5894                 :            : 
    5895                 :            :    And RESULT_CHAIN after reordering:
    5896                 :            : 
    5897                 :            :    1st vec (E1):  0 4  8 12 16 20 24 28
    5898                 :            :    2nd vec (E3):  1 5  9 13 17 21 25 29
    5899                 :            :    3rd vec (E2):  2 6 10 14 18 22 26 30
    5900                 :            :    4th vec (E4):  3 7 11 15 19 23 27 31.  */
    5901                 :            : 
    5902                 :            : static void
    5903                 :       2660 : vect_permute_load_chain (vec<tree> dr_chain,
    5904                 :            :                          unsigned int length,
    5905                 :            :                          stmt_vec_info stmt_info,
    5906                 :            :                          gimple_stmt_iterator *gsi,
    5907                 :            :                          vec<tree> *result_chain)
    5908                 :            : {
    5909                 :       2660 :   tree data_ref, first_vect, second_vect;
    5910                 :       2660 :   tree perm_mask_even, perm_mask_odd;
    5911                 :       2660 :   tree perm3_mask_low, perm3_mask_high;
    5912                 :       2660 :   gimple *perm_stmt;
    5913                 :       2660 :   tree vectype = STMT_VINFO_VECTYPE (stmt_info);
    5914                 :       2660 :   unsigned int i, j, log_length = exact_log2 (length);
    5915                 :            : 
    5916                 :       2660 :   result_chain->quick_grow (length);
    5917                 :       5320 :   memcpy (result_chain->address (), dr_chain.address (),
    5918                 :            :           length * sizeof (tree));
    5919                 :            : 
    5920                 :       2660 :   if (length == 3)
    5921                 :            :     {
    5922                 :            :       /* vect_grouped_load_supported ensures that this is constant.  */
    5923                 :         12 :       unsigned nelt = TYPE_VECTOR_SUBPARTS (vectype).to_constant ();
    5924                 :         12 :       unsigned int k;
    5925                 :            : 
    5926                 :         24 :       vec_perm_builder sel (nelt, nelt, 1);
    5927                 :         12 :       sel.quick_grow (nelt);
    5928                 :         24 :       vec_perm_indices indices;
    5929                 :         48 :       for (k = 0; k < 3; k++)
    5930                 :            :         {
    5931                 :        324 :           for (i = 0; i < nelt; i++)
    5932                 :        288 :             if (3 * i + k < 2 * nelt)
    5933                 :        192 :               sel[i] = 3 * i + k;
    5934                 :            :             else
    5935                 :         96 :               sel[i] = 0;
    5936                 :         36 :           indices.new_vector (sel, 2, nelt);
    5937                 :         36 :           perm3_mask_low = vect_gen_perm_mask_checked (vectype, indices);
    5938                 :            : 
    5939                 :        324 :           for (i = 0, j = 0; i < nelt; i++)
    5940                 :        288 :             if (3 * i + k < 2 * nelt)
    5941                 :        288 :               sel[i] = i;
    5942                 :            :             else
    5943                 :         96 :               sel[i] = nelt + ((nelt + k) % 3) + 3 * (j++);
    5944                 :         36 :           indices.new_vector (sel, 2, nelt);
    5945                 :         36 :           perm3_mask_high = vect_gen_perm_mask_checked (vectype, indices);
    5946                 :            : 
    5947                 :         36 :           first_vect = dr_chain[0];
    5948                 :         36 :           second_vect = dr_chain[1];
    5949                 :            : 
    5950                 :            :           /* Create interleaving stmt (low part of):
    5951                 :            :              low = VEC_PERM_EXPR <first_vect, second_vect2, {k, 3 + k, 6 + k,
    5952                 :            :                                                              ...}>  */
    5953                 :         36 :           data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_low");
    5954                 :         36 :           perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, first_vect,
    5955                 :            :                                            second_vect, perm3_mask_low);
    5956                 :         36 :           vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
    5957                 :            : 
    5958                 :            :           /* Create interleaving stmt (high part of):
    5959                 :            :              high = VEC_PERM_EXPR <first_vect, second_vect2, {k, 3 + k, 6 + k,
    5960                 :            :                                                               ...}>  */
    5961                 :         36 :           first_vect = data_ref;
    5962                 :         36 :           second_vect = dr_chain[2];
    5963                 :         36 :           data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_high");
    5964                 :         36 :           perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, first_vect,
    5965                 :            :                                            second_vect, perm3_mask_high);
    5966                 :         36 :           vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
    5967                 :         36 :           (*result_chain)[k] = data_ref;
    5968                 :            :         }
    5969                 :            :     }
    5970                 :            :   else
    5971                 :            :     {
    5972                 :            :       /* If length is not equal to 3 then only power of 2 is supported.  */
    5973                 :       2648 :       gcc_assert (pow2p_hwi (length));
    5974                 :            : 
    5975                 :            :       /* The encoding has a single stepped pattern.  */
    5976                 :       2648 :       poly_uint64 nelt = TYPE_VECTOR_SUBPARTS (vectype);
    5977                 :       5296 :       vec_perm_builder sel (nelt, 1, 3);
    5978                 :       2648 :       sel.quick_grow (3);
    5979                 :      10592 :       for (i = 0; i < 3; ++i)
    5980                 :       7944 :         sel[i] = i * 2;
    5981                 :       5296 :       vec_perm_indices indices (sel, 2, nelt);
    5982                 :       2648 :       perm_mask_even = vect_gen_perm_mask_checked (vectype, indices);
    5983                 :            : 
    5984                 :      10592 :       for (i = 0; i < 3; ++i)
    5985                 :       7944 :         sel[i] = i * 2 + 1;
    5986                 :       2648 :       indices.new_vector (sel, 2, nelt);
    5987                 :       2648 :       perm_mask_odd = vect_gen_perm_mask_checked (vectype, indices);
    5988                 :            : 
    5989                 :       5734 :       for (i = 0; i < log_length; i++)
    5990                 :            :         {
    5991                 :       7659 :           for (j = 0; j < length; j += 2)
    5992                 :            :             {
    5993                 :       4573 :               first_vect = dr_chain[j];
    5994                 :       4573 :               second_vect = dr_chain[j+1];
    5995                 :            : 
    5996                 :            :               /* data_ref = permute_even (first_data_ref, second_data_ref);  */
    5997                 :       4573 :               data_ref = make_temp_ssa_name (vectype, NULL, "vect_perm_even");
    5998                 :       4573 :               perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
    5999                 :            :                                                first_vect, second_vect,
    6000                 :            :                                                perm_mask_even);
    6001                 :       4573 :               vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
    6002                 :       4573 :               (*result_chain)[j/2] = data_ref;
    6003                 :            : 
    6004                 :            :               /* data_ref = permute_odd (first_data_ref, second_data_ref);  */
    6005                 :       4573 :               data_ref = make_temp_ssa_name (vectype, NULL, "vect_perm_odd");
    6006                 :       4573 :               perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
    6007                 :            :                                                first_vect, second_vect,
    6008                 :            :                                                perm_mask_odd);
    6009                 :       4573 :               vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
    6010                 :       4573 :               (*result_chain)[j/2+length/2] = data_ref;
    6011                 :            :             }
    6012                 :       6172 :           memcpy (dr_chain.address (), result_chain->address (),
    6013                 :            :                   length * sizeof (tree));
    6014                 :            :         }
    6015                 :            :     }
    6016                 :       2660 : }
    6017                 :            : 
    6018                 :            : /* Function vect_shift_permute_load_chain.
    6019                 :            : 
    6020                 :            :    Given a chain of loads in DR_CHAIN of LENGTH 2 or 3, generate
    6021                 :            :    sequence of stmts to reorder the input data accordingly.
    6022                 :            :    Return the final references for loads in RESULT_CHAIN.
    6023                 :            :    Return true if successed, false otherwise.
    6024                 :            : 
    6025                 :            :    E.g., LENGTH is 3 and the scalar type is short, i.e., VF is 8.
    6026                 :            :    The input is 3 vectors each containing 8 elements.  We assign a
    6027                 :            :    number to each element, the input sequence is:
    6028                 :            : 
    6029                 :            :    1st vec:   0  1  2  3  4  5  6  7
    6030                 :            :    2nd vec:   8  9 10 11 12 13 14 15
    6031                 :            :    3rd vec:  16 17 18 19 20 21 22 23
    6032                 :            : 
    6033                 :            :    The output sequence should be:
    6034                 :            : 
    6035                 :            :    1st vec:  0 3 6  9 12 15 18 21
    6036                 :            :    2nd vec:  1 4 7 10 13 16 19 22
    6037                 :            :    3rd vec:  2 5 8 11 14 17 20 23
    6038                 :            : 
    6039                 :            :    We use 3 shuffle instructions and 3 * 3 - 1 shifts to create such output.
    6040                 :            : 
    6041                 :            :    First we shuffle all 3 vectors to get correct elements order:
    6042                 :            : 
    6043                 :            :    1st vec:  ( 0  3  6) ( 1  4  7) ( 2  5)
    6044                 :            :    2nd vec:  ( 8 11 14) ( 9 12 15) (10 13)
    6045                 :            :    3rd vec:  (16 19 22) (17 20 23) (18 21)
    6046                 :            : 
    6047                 :            :    Next we unite and shift vector 3 times:
    6048                 :            : 
    6049                 :            :    1st step:
    6050                 :            :      shift right by 6 the concatenation of:
    6051                 :            :      "1st vec" and  "2nd vec"
    6052                 :            :        ( 0  3  6) ( 1  4  7) |( 2  5) _ ( 8 11 14) ( 9 12 15)| (10 13)
    6053                 :            :      "2nd vec" and  "3rd vec"
    6054                 :            :        ( 8 11 14) ( 9 12 15) |(10 13) _ (16 19 22) (17 20 23)| (18 21)
    6055                 :            :      "3rd vec" and  "1st vec"
    6056                 :            :        (16 19 22) (17 20 23) |(18 21) _ ( 0  3  6) ( 1  4  7)| ( 2  5)
    6057                 :            :                              | New vectors                   |
    6058                 :            : 
    6059                 :            :      So that now new vectors are:
    6060                 :            : 
    6061                 :            :      1st vec:  ( 2  5) ( 8 11 14) ( 9 12 15)
    6062                 :            :      2nd vec:  (10 13) (16 19 22) (17 20 23)
    6063                 :            :      3rd vec:  (18 21) ( 0  3  6) ( 1  4  7)
    6064                 :            : 
    6065                 :            :    2nd step:
    6066                 :            :      shift right by 5 the concatenation of:
    6067                 :            :      "1st vec" and  "3rd vec"
    6068                 :            :        ( 2  5) ( 8 11 14) |( 9 12 15) _ (18 21) ( 0  3  6)| ( 1  4  7)
    6069                 :            :      "2nd vec" and  "1st vec"
    6070                 :            :        (10 13) (16 19 22) |(17 20 23) _ ( 2  5) ( 8 11 14)| ( 9 12 15)
    6071                 :            :      "3rd vec" and  "2nd vec"
    6072                 :            :        (18 21) ( 0  3  6) |( 1  4  7) _ (10 13) (16 19 22)| (17 20 23)
    6073                 :            :                           | New vectors                   |
    6074                 :            : 
    6075                 :            :      So that now new vectors are:
    6076                 :            : 
    6077                 :            :      1st vec:  ( 9 12 15) (18 21) ( 0  3  6)
    6078                 :            :      2nd vec:  (17 20 23) ( 2  5) ( 8 11 14)
    6079                 :            :      3rd vec:  ( 1  4  7) (10 13) (16 19 22) READY
    6080                 :            : 
    6081                 :            :    3rd step:
    6082                 :            :      shift right by 5 the concatenation of:
    6083                 :            :      "1st vec" and  "1st vec"
    6084                 :            :        ( 9 12 15) (18 21) |( 0  3  6) _ ( 9 12 15) (18 21)| ( 0  3  6)
    6085                 :            :      shift right by 3 the concatenation of:
    6086                 :            :      "2nd vec" and  "2nd vec"
    6087                 :            :                (17 20 23) |( 2  5) ( 8 11 14) _ (17 20 23)| ( 2  5) ( 8 11 14)
    6088                 :            :                           | New vectors                   |
    6089                 :            : 
    6090                 :            :      So that now all vectors are READY:
    6091                 :            :      1st vec:  ( 0  3  6) ( 9 12 15) (18 21)
    6092                 :            :      2nd vec:  ( 2  5) ( 8 11 14) (17 20 23)
    6093                 :            :      3rd vec:  ( 1  4  7) (10 13) (16 19 22)
    6094                 :            : 
    6095                 :            :    This algorithm is faster than one in vect_permute_load_chain if:
    6096                 :            :      1.  "shift of a concatination" is faster than general permutation.
    6097                 :            :          This is usually so.
    6098                 :            :      2.  The TARGET machine can't execute vector instructions in parallel.
    6099                 :            :          This is because each step of the algorithm depends on previous.
    6100                 :            :          The algorithm in vect_permute_load_chain is much more parallel.
    6101                 :            : 
    6102                 :            :    The algorithm is applicable only for LOAD CHAIN LENGTH less than VF.
    6103                 :            : */
    6104                 :            : 
    6105                 :            : static bool
    6106                 :          1 : vect_shift_permute_load_chain (vec<tree> dr_chain,
    6107                 :            :                                unsigned int length,
    6108                 :            :                                stmt_vec_info stmt_info,
    6109                 :            :                                gimple_stmt_iterator *gsi,
    6110                 :            :                                vec<tree> *result_chain)
    6111                 :            : {
    6112                 :          1 :   tree vect[3], vect_shift[3], data_ref, first_vect, second_vect;
    6113                 :          1 :   tree perm2_mask1, perm2_mask2, perm3_mask;
    6114                 :          1 :   tree select_mask, shift1_mask, shift2_mask, shift3_mask, shift4_mask;
    6115                 :          1 :   gimple *perm_stmt;
    6116                 :            : 
    6117                 :          1 :   tree vectype = STMT_VINFO_VECTYPE (stmt_info);
    6118                 :          1 :   unsigned int i;
    6119                 :          1 :   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
    6120                 :            : 
    6121                 :          1 :   unsigned HOST_WIDE_INT nelt, vf;
    6122                 :          1 :   if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant (&nelt)
    6123                 :          1 :       || !LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&vf))
    6124                 :            :     /* Not supported for variable-length vectors.  */
    6125                 :            :     return false;
    6126                 :            : 
    6127                 :          1 :   vec_perm_builder sel (nelt, nelt, 1);
    6128                 :          1 :   sel.quick_grow (nelt);
    6129                 :            : 
    6130                 :          1 :   result_chain->quick_grow (length);
    6131                 :          1 :   memcpy (result_chain->address (), dr_chain.address (),
    6132                 :          1 :           length * sizeof (tree));
    6133                 :            : 
    6134                 :          1 :   if (pow2p_hwi (length) && vf > 4)
    6135                 :            :     {
    6136                 :          0 :       unsigned int j, log_length = exact_log2 (length);
    6137                 :          0 :       for (i = 0; i < nelt / 2; ++i)
    6138                 :          0 :         sel[i] = i * 2;
    6139                 :          0 :       for (i = 0; i < nelt / 2; ++i)
    6140                 :          0 :         sel[nelt / 2 + i] = i * 2 + 1;
    6141                 :          0 :       vec_perm_indices indices (sel, 2, nelt);
    6142                 :          0 :       if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
    6143                 :            :         {
    6144                 :          0 :           if (dump_enabled_p ())
    6145                 :          0 :             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    6146                 :            :                              "shuffle of 2 fields structure is not \
    6147                 :            :                               supported by target\n");
    6148                 :          0 :           return false;
    6149                 :            :         }
    6150                 :          0 :       perm2_mask1 = vect_gen_perm_mask_checked (vectype, indices);
    6151                 :            : 
    6152                 :          0 :       for (i = 0; i < nelt / 2; ++i)
    6153                 :          0 :         sel[i] = i * 2 + 1;
    6154                 :          0 :       for (i = 0; i < nelt / 2; ++i)
    6155                 :          0 :         sel[nelt / 2 + i] = i * 2;
    6156                 :          0 :       indices.new_vector (sel, 2, nelt);
    6157                 :          0 :       if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
    6158                 :            :         {
    6159                 :          0 :           if (dump_enabled_p ())
    6160                 :          0 :             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    6161                 :            :                              "shuffle of 2 fields structure is not \
    6162                 :            :                               supported by target\n");
    6163                 :          0 :           return false;
    6164                 :            :         }
    6165                 :          0 :       perm2_mask2 = vect_gen_perm_mask_checked (vectype, indices);
    6166                 :            : 
    6167                 :            :       /* Generating permutation constant to shift all elements.
    6168                 :            :          For vector length 8 it is {4 5 6 7 8 9 10 11}.  */
    6169                 :          0 :       for (i = 0; i < nelt; i++)
    6170                 :          0 :         sel[i] = nelt / 2 + i;
    6171                 :          0 :       indices.new_vector (sel, 2, nelt);
    6172                 :          0 :       if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
    6173                 :            :         {
    6174                 :          0 :           if (dump_enabled_p ())
    6175                 :          0 :             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    6176                 :            :                              "shift permutation is not supported by target\n");
    6177                 :          0 :           return false;
    6178                 :            :         }
    6179                 :          0 :       shift1_mask = vect_gen_perm_mask_checked (vectype, indices);
    6180                 :            : 
    6181                 :            :       /* Generating permutation constant to select vector from 2.
    6182                 :            :          For vector length 8 it is {0 1 2 3 12 13 14 15}.  */
    6183                 :          0 :       for (i = 0; i < nelt / 2; i++)
    6184                 :          0 :         sel[i] = i;
    6185                 :          0 :       for (i = nelt / 2; i < nelt; i++)
    6186                 :          0 :         sel[i] = nelt + i;
    6187                 :          0 :       indices.new_vector (sel, 2, nelt);
    6188                 :          0 :       if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
    6189                 :            :         {
    6190                 :          0 :           if (dump_enabled_p ())
    6191                 :          0 :             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    6192                 :            :                              "select is not supported by target\n");
    6193                 :          0 :           return false;
    6194                 :            :         }
    6195                 :          0 :       select_mask = vect_gen_perm_mask_checked (vectype, indices);
    6196                 :            : 
    6197                 :          0 :       for (i = 0; i < log_length; i++)
    6198                 :            :         {
    6199                 :          0 :           for (j = 0; j < length; j += 2)
    6200                 :            :             {
    6201                 :          0 :               first_vect = dr_chain[j];
    6202                 :          0 :               second_vect = dr_chain[j + 1];
    6203                 :            : 
    6204                 :          0 :               data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle2");
    6205                 :          0 :               perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
    6206                 :            :                                                first_vect, first_vect,
    6207                 :            :                                                perm2_mask1);
    6208                 :          0 :               vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
    6209                 :          0 :               vect[0] = data_ref;
    6210                 :            : 
    6211                 :          0 :               data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle2");
    6212                 :          0 :               perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
    6213                 :            :                                                second_vect, second_vect,
    6214                 :            :                                                perm2_mask2);
    6215                 :          0 :               vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
    6216                 :          0 :               vect[1] = data_ref;
    6217                 :            : 
    6218                 :          0 :               data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift");
    6219                 :          0 :               perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
    6220                 :            :                                                vect[0], vect[1], shift1_mask);
    6221                 :          0 :               vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
    6222                 :          0 :               (*result_chain)[j/2 + length/2] = data_ref;
    6223                 :            : 
    6224                 :          0 :               data_ref = make_temp_ssa_name (vectype, NULL, "vect_select");
    6225                 :          0 :               perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
    6226                 :            :                                                vect[0], vect[1], select_mask);
    6227                 :          0 :               vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
    6228                 :          0 :               (*result_chain)[j/2] = data_ref;
    6229                 :            :             }
    6230                 :          0 :           memcpy (dr_chain.address (), result_chain->address (),
    6231                 :            :                   length * sizeof (tree));
    6232                 :            :         }
    6233                 :            :       return true;
    6234                 :            :     }
    6235                 :          1 :   if (length == 3 && vf > 2)
    6236                 :            :     {
    6237                 :            :       unsigned int k = 0, l = 0;
    6238                 :            : 
    6239                 :            :       /* Generating permutation constant to get all elements in rigth order.
    6240                 :            :          For vector length 8 it is {0 3 6 1 4 7 2 5}.  */
    6241                 :         17 :       for (i = 0; i < nelt; i++)
    6242                 :            :         {
    6243                 :         16 :           if (3 * k + (l % 3) >= nelt)
    6244                 :            :             {
    6245                 :          2 :               k = 0;
    6246                 :          2 :               l += (3 - (nelt % 3));
    6247                 :            :             }
    6248                 :         16 :           sel[i] = 3 * k + (l % 3);
    6249                 :         16 :           k++;
    6250                 :            :         }
    6251                 :          2 :       vec_perm_indices indices (sel, 2, nelt);
    6252                 :          1 :       if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
    6253                 :            :         {
    6254                 :          0 :           if (dump_enabled_p ())
    6255                 :          0 :             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    6256                 :            :                              "shuffle of 3 fields structure is not \
    6257                 :            :                               supported by target\n");
    6258                 :          0 :           return false;
    6259                 :            :         }
    6260                 :          1 :       perm3_mask = vect_gen_perm_mask_checked (vectype, indices);
    6261                 :            : 
    6262                 :            :       /* Generating permutation constant to shift all elements.
    6263                 :            :          For vector length 8 it is {6 7 8 9 10 11 12 13}.  */
    6264                 :         17 :       for (i = 0; i < nelt; i++)
    6265                 :         16 :         sel[i] = 2 * (nelt / 3) + (nelt % 3) + i;
    6266                 :          1 :       indices.new_vector (sel, 2, nelt);
    6267                 :          1 :       if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
    6268                 :            :         {
    6269                 :          0 :           if (dump_enabled_p ())
    6270                 :          0 :             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    6271                 :            :                              "shift permutation is not supported by target\n");
    6272                 :          0 :           return false;
    6273                 :            :         }
    6274                 :          1 :       shift1_mask = vect_gen_perm_mask_checked (vectype, indices);
    6275                 :            : 
    6276                 :            :       /* Generating permutation constant to shift all elements.
    6277                 :            :          For vector length 8 it is {5 6 7 8 9 10 11 12}.  */
    6278                 :         17 :       for (i = 0; i < nelt; i++)
    6279                 :         16 :         sel[i] = 2 * (nelt / 3) + 1 + i;
    6280                 :          1 :       indices.new_vector (sel, 2, nelt);
    6281                 :          1 :       if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
    6282                 :            :         {
    6283                 :          0 :           if (dump_enabled_p ())
    6284                 :          0 :             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    6285                 :            :                              "shift permutation is not supported by target\n");
    6286                 :          0 :           return false;
    6287                 :            :         }
    6288                 :          1 :       shift2_mask = vect_gen_perm_mask_checked (vectype, indices);
    6289                 :            : 
    6290                 :            :       /* Generating permutation constant to shift all elements.
    6291                 :            :          For vector length 8 it is {3 4 5 6 7 8 9 10}.  */
    6292                 :         17 :       for (i = 0; i < nelt; i++)
    6293                 :         16 :         sel[i] = (nelt / 3) + (nelt % 3) / 2 + i;
    6294                 :          1 :       indices.new_vector (sel, 2, nelt);
    6295                 :          1 :       if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
    6296                 :            :         {
    6297                 :          0 :           if (dump_enabled_p ())
    6298                 :          0 :             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    6299                 :            :                              "shift permutation is not supported by target\n");
    6300                 :          0 :           return false;
    6301                 :            :         }
    6302                 :          1 :       shift3_mask = vect_gen_perm_mask_checked (vectype, indices);
    6303                 :            : 
    6304                 :            :       /* Generating permutation constant to shift all elements.
    6305                 :            :          For vector length 8 it is {5 6 7 8 9 10 11 12}.  */
    6306                 :         17 :       for (i = 0; i < nelt; i++)
    6307                 :         16 :         sel[i] = 2 * (nelt / 3) + (nelt % 3) / 2 + i;
    6308                 :          1 :       indices.new_vector (sel, 2, nelt);
    6309                 :          1 :       if (!can_vec_perm_const_p (TYPE_MODE (vectype), indices))
    6310                 :            :         {
    6311                 :          0 :           if (dump_enabled_p ())
    6312                 :          0 :             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    6313                 :            :                              "shift permutation is not supported by target\n");
    6314                 :          0 :           return false;
    6315                 :            :         }
    6316                 :          1 :       shift4_mask = vect_gen_perm_mask_checked (vectype, indices);
    6317                 :            : 
    6318                 :          4 :       for (k = 0; k < 3; k++)
    6319                 :            :         {
    6320                 :          3 :           data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3");
    6321                 :          9 :           perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
    6322                 :          3 :                                            dr_chain[k], dr_chain[k],
    6323                 :            :                                            perm3_mask);
    6324                 :          3 :           vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
    6325                 :          3 :           vect[k] = data_ref;
    6326                 :            :         }
    6327                 :            : 
    6328                 :          4 :       for (k = 0; k < 3; k++)
    6329                 :            :         {
    6330                 :          3 :           data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift1");
    6331                 :          6 :           perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
    6332                 :          3 :                                            vect[k % 3], vect[(k + 1) % 3],
    6333                 :            :                                            shift1_mask);
    6334                 :          3 :           vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
    6335                 :          3 :           vect_shift[k] = data_ref;
    6336                 :            :         }
    6337                 :            : 
    6338                 :          4 :       for (k = 0; k < 3; k++)
    6339                 :            :         {
    6340                 :          3 :           data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift2");
    6341                 :          6 :           perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
    6342                 :          3 :                                            vect_shift[(4 - k) % 3],
    6343                 :          3 :                                            vect_shift[(3 - k) % 3],
    6344                 :            :                                            shift2_mask);
    6345                 :          3 :           vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
    6346                 :          3 :           vect[k] = data_ref;
    6347                 :            :         }
    6348                 :            : 
    6349                 :          1 :       (*result_chain)[3 - (nelt % 3)] = vect[2];
    6350                 :            : 
    6351                 :          1 :       data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift3");
    6352                 :          1 :       perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect[0],
    6353                 :            :                                        vect[0], shift3_mask);
    6354                 :          1 :       vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
    6355                 :          1 :       (*result_chain)[nelt % 3] = data_ref;
    6356                 :            : 
    6357                 :          1 :       data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift4");
    6358                 :          1 :       perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect[1],
    6359                 :            :                                        vect[1], shift4_mask);
    6360                 :          1 :       vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
    6361                 :          1 :       (*result_chain)[0] = data_ref;
    6362                 :          1 :       return true;
    6363                 :            :     }
    6364                 :            :   return false;
    6365                 :            : }
    6366                 :            : 
    6367                 :            : /* Function vect_transform_grouped_load.
    6368                 :            : 
    6369                 :            :    Given a chain of input interleaved data-refs (in DR_CHAIN), build statements
    6370                 :            :    to perform their permutation and ascribe the result vectorized statements to
    6371                 :            :    the scalar statements.
    6372                 :            : */
    6373                 :            : 
    6374                 :            : void
    6375                 :       2661 : vect_transform_grouped_load (stmt_vec_info stmt_info, vec<tree> dr_chain,
    6376                 :            :                              int size, gimple_stmt_iterator *gsi)
    6377                 :            : {
    6378                 :       2661 :   machine_mode mode;
    6379                 :       2661 :   vec<tree> result_chain = vNULL;
    6380                 :            : 
    6381                 :            :   /* DR_CHAIN contains input data-refs that are a part of the interleaving.
    6382                 :            :      RESULT_CHAIN is the output of vect_permute_load_chain, it contains permuted
    6383                 :            :      vectors, that are ready for vector computation.  */
    6384                 :       2661 :   result_chain.create (size);
    6385                 :            : 
    6386                 :            :   /* If reassociation width for vector type is 2 or greater target machine can
    6387                 :            :      execute 2 or more vector instructions in parallel.  Otherwise try to
    6388                 :            :      get chain for loads group using vect_shift_permute_load_chain.  */
    6389                 :       2661 :   mode = TYPE_MODE (STMT_VINFO_VECTYPE (stmt_info));
    6390                 :       2661 :   if (targetm.sched.reassociation_width (VEC_PERM_EXPR, mode) > 1
    6391                 :          2 :       || pow2p_hwi (size)
    6392                 :       2662 :       || !vect_shift_permute_load_chain (dr_chain, size, stmt_info,
    6393                 :            :                                          gsi, &result_chain))
    6394                 :       2660 :     vect_permute_load_chain (dr_chain, size, stmt_info, gsi, &result_chain);
    6395                 :       2661 :   vect_record_grouped_load_vectors (stmt_info, result_chain);
    6396                 :       2661 :   result_chain.release ();
    6397                 :       2661 : }
    6398                 :            : 
    6399                 :            : /* RESULT_CHAIN contains the output of a group of grouped loads that were
    6400                 :            :    generated as part of the vectorization of STMT_INFO.  Assign the statement
    6401                 :            :    for each vector to the associated scalar statement.  */
    6402                 :            : 
    6403                 :            : void
    6404                 :       2661 : vect_record_grouped_load_vectors (stmt_vec_info stmt_info,
    6405                 :            :                                   vec<tree> result_chain)
    6406                 :            : {
    6407                 :       2661 :   vec_info *vinfo = stmt_info->vinfo;
    6408                 :       2661 :   stmt_vec_info first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
    6409                 :       2661 :   unsigned int i, gap_count;
    6410                 :       2661 :   tree tmp_data_ref;
    6411                 :            : 
    6412                 :            :   /* Put a permuted data-ref in the VECTORIZED_STMT field.
    6413                 :            :      Since we scan the chain starting from it's first node, their order
    6414                 :            :      corresponds the order of data-refs in RESULT_CHAIN.  */
    6415                 :       2661 :   stmt_vec_info next_stmt_info = first_stmt_info;
    6416                 :       2661 :   gap_count = 1;
    6417                 :       7634 :   FOR_EACH_VEC_ELT (result_chain, i, tmp_data_ref)
    6418                 :            :     {
    6419                 :       5900 :       if (!next_stmt_info)
    6420                 :            :         break;
    6421                 :            : 
    6422                 :            :       /* Skip the gaps.  Loads created for the gaps will be removed by dead
    6423                 :            :        code elimination pass later.  No need to check for the first stmt in
    6424                 :            :        the group, since it always exists.
    6425                 :            :        DR_GROUP_GAP is the number of steps in elements from the previous
    6426                 :            :        access (if there is no gap DR_GROUP_GAP is 1).  We skip loads that
    6427                 :            :        correspond to the gaps.  */
    6428                 :       5048 :       if (next_stmt_info != first_stmt_info
    6429                 :       4973 :           && gap_count < DR_GROUP_GAP (next_stmt_info))
    6430                 :            :         {
    6431                 :         75 :           gap_count++;
    6432                 :         75 :           continue;
    6433                 :            :         }
    6434                 :            : 
    6435                 :            :       /* ???  The following needs cleanup after the removal of
    6436                 :            :          DR_GROUP_SAME_DR_STMT.  */
    6437                 :       4898 :       if (next_stmt_info)
    6438                 :            :         {
    6439                 :       4898 :           stmt_vec_info new_stmt_info = vinfo->lookup_def (tmp_data_ref);
    6440                 :            :           /* We assume that if VEC_STMT is not NULL, this is a case of multiple
    6441                 :            :              copies, and we put the new vector statement in the first available
    6442                 :            :              RELATED_STMT.  */
    6443                 :       4898 :           if (!STMT_VINFO_VEC_STMT (next_stmt_info))
    6444                 :       4761 :             STMT_VINFO_VEC_STMT (next_stmt_info) = new_stmt_info;
    6445                 :            :           else
    6446                 :            :             {
    6447                 :        137 :               stmt_vec_info prev_stmt_info
    6448                 :            :                 = STMT_VINFO_VEC_STMT (next_stmt_info);
    6449                 :        137 :               stmt_vec_info rel_stmt_info
    6450                 :            :                 = STMT_VINFO_RELATED_STMT (prev_stmt_info);
    6451                 :        245 :               while (rel_stmt_info)
    6452                 :            :                 {
    6453                 :        108 :                   prev_stmt_info = rel_stmt_info;
    6454                 :        108 :                   rel_stmt_info = STMT_VINFO_RELATED_STMT (rel_stmt_info);
    6455                 :            :                 }
    6456                 :            : 
    6457                 :        137 :               STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
    6458                 :            :             }
    6459                 :            : 
    6460                 :       4898 :           next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
    6461                 :       4898 :           gap_count = 1;
    6462                 :            :         }
    6463                 :            :     }
    6464                 :       2661 : }
    6465                 :            : 
    6466                 :            : /* Function vect_force_dr_alignment_p.
    6467                 :            : 
    6468                 :            :    Returns whether the alignment of a DECL can be forced to be aligned
    6469                 :            :    on ALIGNMENT bit boundary.  */
    6470                 :            : 
    6471                 :            : bool
    6472                 :     102633 : vect_can_force_dr_alignment_p (const_tree decl, poly_uint64 alignment)
    6473                 :            : {
    6474                 :     102633 :   if (!VAR_P (decl))
    6475                 :            :     return false;
    6476                 :            : 
    6477                 :      43018 :   if (decl_in_symtab_p (decl)
    6478                 :      43018 :       && !symtab_node::get (decl)->can_increase_alignment_p ())
    6479                 :            :     return false;
    6480                 :            : 
    6481                 :      41707 :   if (TREE_STATIC (decl))
    6482                 :        475 :     return (known_le (alignment,
    6483                 :        475 :                       (unsigned HOST_WIDE_INT) MAX_OFILE_ALIGNMENT));
    6484                 :            :   else
    6485                 :      41232 :     return (known_le (alignment, (unsigned HOST_WIDE_INT) MAX_STACK_ALIGNMENT));
    6486                 :            : }
    6487                 :            : 
    6488                 :            : 
    6489                 :            : /* Return whether the data reference DR_INFO is supported with respect to its
    6490                 :            :    alignment.
    6491                 :            :    If CHECK_ALIGNED_ACCESSES is TRUE, check if the access is supported even
    6492                 :            :    it is aligned, i.e., check if it is possible to vectorize it with different
    6493                 :            :    alignment.  */
    6494                 :            : 
    6495                 :            : enum dr_alignment_support
    6496                 :     604591 : vect_supportable_dr_alignment (dr_vec_info *dr_info,
    6497                 :            :                                bool check_aligned_accesses)
    6498                 :            : {
    6499                 :     604591 :   data_reference *dr = dr_info->dr;
    6500                 :     604591 :   stmt_vec_info stmt_info = dr_info->stmt;
    6501                 :     604591 :   tree vectype = STMT_VINFO_VECTYPE (stmt_info);
    6502                 :     604591 :   machine_mode mode = TYPE_MODE (vectype);
    6503                 :     604591 :   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
    6504                 :     604591 :   class loop *vect_loop = NULL;
    6505                 :     604591 :   bool nested_in_vect_loop = false;
    6506                 :            : 
    6507                 :     604591 :   if (aligned_access_p (dr_info) && !check_aligned_accesses)
    6508                 :            :     return dr_aligned;
    6509                 :            : 
    6510                 :            :   /* For now assume all conditional loads/stores support unaligned
    6511                 :            :      access without any special code.  */
    6512                 :     337160 :   if (gcall *stmt = dyn_cast <gcall *> (stmt_info->stmt))
    6513                 :       2957 :     if (gimple_call_internal_p (stmt)
    6514                 :       2957 :         && (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD
    6515                 :       1275 :             || gimple_call_internal_fn (stmt) == IFN_MASK_STORE))
    6516                 :            :       return dr_unaligned_supported;
    6517                 :            : 
    6518                 :     334203 :   if (loop_vinfo)
    6519                 :            :     {
    6520                 :     299930 :       vect_loop = LOOP_VINFO_LOOP (loop_vinfo);
    6521                 :     299930 :       nested_in_vect_loop = nested_in_vect_loop_p (vect_loop, stmt_info);
    6522                 :            :     }
    6523                 :            : 
    6524                 :            :   /* Possibly unaligned access.  */
    6525                 :            : 
    6526                 :            :   /* We can choose between using the implicit realignment scheme (generating
    6527                 :            :      a misaligned_move stmt) and the explicit realignment scheme (generating
    6528                 :            :      aligned loads with a REALIGN_LOAD).  There are two variants to the
    6529                 :            :      explicit realignment scheme: optimized, and unoptimized.
    6530                 :            :      We can optimize the realignment only if the step between consecutive
    6531                 :            :      vector loads is equal to the vector size.  Since the vector memory
    6532                 :            :      accesses advance in steps of VS (Vector Size) in the vectorized loop, it
    6533                 :            :      is guaranteed that the misalignment amount remains the same throughout the
    6534                 :            :      execution of the vectorized loop.  Therefore, we can create the
    6535                 :            :      "realignment token" (the permutation mask that is passed to REALIGN_LOAD)
    6536                 :            :      at the loop preheader.
    6537                 :            : 
    6538                 :            :      However, in the case of outer-loop vectorization, when vectorizing a
    6539                 :            :      memory access in the inner-loop nested within the LOOP that is now being
    6540                 :            :      vectorized, while it is guaranteed that the misalignment of the
    6541                 :            :      vectorized memory access will remain the same in different outer-loop
    6542                 :            :      iterations, it is *not* guaranteed that is will remain the same throughout
    6543                 :            :      the execution of the inner-loop.  This is because the inner-loop advances
    6544                 :            :      with the original scalar step (and not in steps of VS).  If the inner-loop
    6545                 :            :      step happens to be a multiple of VS, then the misalignment remains fixed
    6546                 :            :      and we can use the optimized realignment scheme.  For example:
    6547                 :            : 
    6548                 :            :       for (i=0; i<N; i++)
    6549                 :            :         for (j=0; j<M; j++)
    6550                 :            :           s += a[i+j];
    6551                 :            : 
    6552                 :            :      When vectorizing the i-loop in the above example, the step between
    6553                 :            :      consecutive vector loads is 1, and so the misalignment does not remain
    6554                 :            :      fixed across the execution of the inner-loop, and the realignment cannot
    6555                 :            :      be optimized (as illustrated in the following pseudo vectorized loop):
    6556                 :            : 
    6557                 :            :       for (i=0; i<N; i+=4)
    6558                 :            :         for (j=0; j<M; j++){
    6559                 :            :           vs += vp[i+j]; // misalignment of &vp[i+j] is {0,1,2,3,0,1,2,3,...}
    6560                 :            :                          // when j is {0,1,2,3,4,5,6,7,...} respectively.
    6561                 :            :                          // (assuming that we start from an aligned address).
    6562                 :            :           }
    6563                 :            : 
    6564                 :            :      We therefore have to use the unoptimized realignment scheme:
    6565                 :            : 
    6566                 :            :       for (i=0; i<N; i+=4)
    6567                 :            :           for (j=k; j<M; j+=4)
    6568                 :            :           vs += vp[i+j]; // misalignment of &vp[i+j] is always k (assuming
    6569                 :            :                            // that the misalignment of the initial address is
    6570                 :            :                            // 0).
    6571                 :            : 
    6572                 :            :      The loop can then be vectorized as follows:
    6573                 :            : 
    6574                 :            :       for (k=0; k<4; k++){
    6575                 :            :         rt = get_realignment_token (&vp[k]);
    6576                 :            :         for (i=0; i<N; i+=4){
    6577                 :            :           v1 = vp[i+k];
    6578                 :            :           for (j=k; j<M; j+=4){
    6579                 :            :             v2 = vp[i+j+VS-1];
    6580                 :            :             va = REALIGN_LOAD <v1,v2,rt>;
    6581                 :            :             vs += va;
    6582                 :            :             v1 = v2;
    6583                 :            :           }
    6584                 :            :         }
    6585                 :            :     } */
    6586                 :            : 
    6587                 :     334203 :   if (DR_IS_READ (dr))
    6588                 :            :     {
    6589                 :     190877 :       bool is_packed = false;
    6590                 :     190877 :       tree type = (TREE_TYPE (DR_REF (dr)));
    6591                 :            : 
    6592                 :     190877 :       if (optab_handler (vec_realign_load_optab, mode) != CODE_FOR_nothing
    6593                 :     190877 :           && (!targetm.vectorize.builtin_mask_for_load
    6594                 :          0 :               || targetm.vectorize.builtin_mask_for_load ()))
    6595                 :            :         {
    6596                 :          0 :           tree vectype = STMT_VINFO_VECTYPE (stmt_info);
    6597                 :            : 
    6598                 :            :           /* If we are doing SLP then the accesses need not have the
    6599                 :            :              same alignment, instead it depends on the SLP group size.  */
    6600                 :          0 :           if (loop_vinfo
    6601                 :          0 :               && STMT_SLP_TYPE (stmt_info)
    6602                 :          0 :               && !multiple_p (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
    6603                 :          0 :                               * (DR_GROUP_SIZE
    6604                 :          0 :                                  (DR_GROUP_FIRST_ELEMENT (stmt_info))),
    6605                 :          0 :                               TYPE_VECTOR_SUBPARTS (vectype)))
    6606                 :            :             ;
    6607                 :          0 :           else if (!loop_vinfo
    6608                 :          0 :                    || (nested_in_vect_loop
    6609                 :          0 :                        && maybe_ne (TREE_INT_CST_LOW (DR_STEP (dr)),
    6610                 :          0 :                                     GET_MODE_SIZE (TYPE_MODE (vectype)))))
    6611                 :          0 :             return dr_explicit_realign;
    6612                 :            :           else
    6613                 :          0 :             return dr_explicit_realign_optimized;
    6614                 :            :         }
    6615                 :     190877 :       if (!known_alignment_for_access_p (dr_info))
    6616                 :     138222 :         is_packed = not_size_aligned (DR_REF (dr));
    6617                 :            : 
    6618                 :     190877 :       if (targetm.vectorize.support_vector_misalignment
    6619                 :     190877 :             (mode, type, DR_MISALIGNMENT (dr_info), is_packed))
    6620                 :            :         /* Can't software pipeline the loads, but can at least do them.  */
    6621                 :     190612 :         return dr_unaligned_supported;
    6622                 :            :     }
    6623                 :            :   else
    6624                 :            :     {
    6625                 :     143326 :       bool is_packed = false;
    6626                 :     143326 :       tree type = (TREE_TYPE (DR_REF (dr)));
    6627                 :            : 
    6628                 :     143326 :       if (!known_alignment_for_access_p (dr_info))
    6629                 :      94942 :         is_packed = not_size_aligned (DR_REF (dr));
    6630                 :            : 
    6631                 :     143326 :      if (targetm.vectorize.support_vector_misalignment
    6632                 :     143326 :            (mode, type, DR_MISALIGNMENT (dr_info), is_packed))
    6633                 :     138601 :        return dr_unaligned_supported;
    6634                 :            :     }
    6635                 :            : 
    6636                 :            :   /* Unsupported.  */
    6637                 :            :   return dr_unaligned_unsupported;
    6638                 :            : }

Generated by: LCOV version 1.0

LCOV profile is generated on x86_64 machine using following configure options: configure --disable-bootstrap --enable-coverage=opt --enable-languages=c,c++,fortran,go,jit,lto --enable-host-shared. GCC test suite is run with the built compiler.