LCOV - code coverage report
Current view: top level - gcc - tree-vect-loop.c (source / functions) Hit Total Coverage
Test: gcc.info Lines: 3622 4011 90.3 %
Date: 2020-04-04 11:58:09 Functions: 67 78 85.9 %
Legend: Lines: hit not hit | Branches: + taken - not taken # not executed Branches: 0 0 -

           Branch data     Line data    Source code
       1                 :            : /* Loop Vectorization
       2                 :            :    Copyright (C) 2003-2020 Free Software Foundation, Inc.
       3                 :            :    Contributed by Dorit Naishlos <dorit@il.ibm.com> and
       4                 :            :    Ira Rosen <irar@il.ibm.com>
       5                 :            : 
       6                 :            : This file is part of GCC.
       7                 :            : 
       8                 :            : GCC is free software; you can redistribute it and/or modify it under
       9                 :            : the terms of the GNU General Public License as published by the Free
      10                 :            : Software Foundation; either version 3, or (at your option) any later
      11                 :            : version.
      12                 :            : 
      13                 :            : GCC is distributed in the hope that it will be useful, but WITHOUT ANY
      14                 :            : WARRANTY; without even the implied warranty of MERCHANTABILITY or
      15                 :            : FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
      16                 :            : for more details.
      17                 :            : 
      18                 :            : You should have received a copy of the GNU General Public License
      19                 :            : along with GCC; see the file COPYING3.  If not see
      20                 :            : <http://www.gnu.org/licenses/>.  */
      21                 :            : 
      22                 :            : #include "config.h"
      23                 :            : #include "system.h"
      24                 :            : #include "coretypes.h"
      25                 :            : #include "backend.h"
      26                 :            : #include "target.h"
      27                 :            : #include "rtl.h"
      28                 :            : #include "tree.h"
      29                 :            : #include "gimple.h"
      30                 :            : #include "cfghooks.h"
      31                 :            : #include "tree-pass.h"
      32                 :            : #include "ssa.h"
      33                 :            : #include "optabs-tree.h"
      34                 :            : #include "diagnostic-core.h"
      35                 :            : #include "fold-const.h"
      36                 :            : #include "stor-layout.h"
      37                 :            : #include "cfganal.h"
      38                 :            : #include "gimplify.h"
      39                 :            : #include "gimple-iterator.h"
      40                 :            : #include "gimplify-me.h"
      41                 :            : #include "tree-ssa-loop-ivopts.h"
      42                 :            : #include "tree-ssa-loop-manip.h"
      43                 :            : #include "tree-ssa-loop-niter.h"
      44                 :            : #include "tree-ssa-loop.h"
      45                 :            : #include "cfgloop.h"
      46                 :            : #include "tree-scalar-evolution.h"
      47                 :            : #include "tree-vectorizer.h"
      48                 :            : #include "gimple-fold.h"
      49                 :            : #include "cgraph.h"
      50                 :            : #include "tree-cfg.h"
      51                 :            : #include "tree-if-conv.h"
      52                 :            : #include "internal-fn.h"
      53                 :            : #include "tree-vector-builder.h"
      54                 :            : #include "vec-perm-indices.h"
      55                 :            : #include "tree-eh.h"
      56                 :            : 
      57                 :            : /* Loop Vectorization Pass.
      58                 :            : 
      59                 :            :    This pass tries to vectorize loops.
      60                 :            : 
      61                 :            :    For example, the vectorizer transforms the following simple loop:
      62                 :            : 
      63                 :            :         short a[N]; short b[N]; short c[N]; int i;
      64                 :            : 
      65                 :            :         for (i=0; i<N; i++){
      66                 :            :           a[i] = b[i] + c[i];
      67                 :            :         }
      68                 :            : 
      69                 :            :    as if it was manually vectorized by rewriting the source code into:
      70                 :            : 
      71                 :            :         typedef int __attribute__((mode(V8HI))) v8hi;
      72                 :            :         short a[N];  short b[N]; short c[N];   int i;
      73                 :            :         v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
      74                 :            :         v8hi va, vb, vc;
      75                 :            : 
      76                 :            :         for (i=0; i<N/8; i++){
      77                 :            :           vb = pb[i];
      78                 :            :           vc = pc[i];
      79                 :            :           va = vb + vc;
      80                 :            :           pa[i] = va;
      81                 :            :         }
      82                 :            : 
      83                 :            :         The main entry to this pass is vectorize_loops(), in which
      84                 :            :    the vectorizer applies a set of analyses on a given set of loops,
      85                 :            :    followed by the actual vectorization transformation for the loops that
      86                 :            :    had successfully passed the analysis phase.
      87                 :            :         Throughout this pass we make a distinction between two types of
      88                 :            :    data: scalars (which are represented by SSA_NAMES), and memory references
      89                 :            :    ("data-refs").  These two types of data require different handling both
      90                 :            :    during analysis and transformation. The types of data-refs that the
      91                 :            :    vectorizer currently supports are ARRAY_REFS which base is an array DECL
      92                 :            :    (not a pointer), and INDIRECT_REFS through pointers; both array and pointer
      93                 :            :    accesses are required to have a simple (consecutive) access pattern.
      94                 :            : 
      95                 :            :    Analysis phase:
      96                 :            :    ===============
      97                 :            :         The driver for the analysis phase is vect_analyze_loop().
      98                 :            :    It applies a set of analyses, some of which rely on the scalar evolution
      99                 :            :    analyzer (scev) developed by Sebastian Pop.
     100                 :            : 
     101                 :            :         During the analysis phase the vectorizer records some information
     102                 :            :    per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
     103                 :            :    loop, as well as general information about the loop as a whole, which is
     104                 :            :    recorded in a "loop_vec_info" struct attached to each loop.
     105                 :            : 
     106                 :            :    Transformation phase:
     107                 :            :    =====================
     108                 :            :         The loop transformation phase scans all the stmts in the loop, and
     109                 :            :    creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
     110                 :            :    the loop that needs to be vectorized.  It inserts the vector code sequence
     111                 :            :    just before the scalar stmt S, and records a pointer to the vector code
     112                 :            :    in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
     113                 :            :    attached to S).  This pointer will be used for the vectorization of following
     114                 :            :    stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
     115                 :            :    otherwise, we rely on dead code elimination for removing it.
     116                 :            : 
     117                 :            :         For example, say stmt S1 was vectorized into stmt VS1:
     118                 :            : 
     119                 :            :    VS1: vb = px[i];
     120                 :            :    S1:  b = x[i];    STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
     121                 :            :    S2:  a = b;
     122                 :            : 
     123                 :            :    To vectorize stmt S2, the vectorizer first finds the stmt that defines
     124                 :            :    the operand 'b' (S1), and gets the relevant vector def 'vb' from the
     125                 :            :    vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)).  The
     126                 :            :    resulting sequence would be:
     127                 :            : 
     128                 :            :    VS1: vb = px[i];
     129                 :            :    S1:  b = x[i];       STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
     130                 :            :    VS2: va = vb;
     131                 :            :    S2:  a = b;          STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
     132                 :            : 
     133                 :            :         Operands that are not SSA_NAMEs, are data-refs that appear in
     134                 :            :    load/store operations (like 'x[i]' in S1), and are handled differently.
     135                 :            : 
     136                 :            :    Target modeling:
     137                 :            :    =================
     138                 :            :         Currently the only target specific information that is used is the
     139                 :            :    size of the vector (in bytes) - "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD".
     140                 :            :    Targets that can support different sizes of vectors, for now will need
     141                 :            :    to specify one value for "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD".  More
     142                 :            :    flexibility will be added in the future.
     143                 :            : 
     144                 :            :         Since we only vectorize operations which vector form can be
     145                 :            :    expressed using existing tree codes, to verify that an operation is
     146                 :            :    supported, the vectorizer checks the relevant optab at the relevant
     147                 :            :    machine_mode (e.g, optab_handler (add_optab, V8HImode)).  If
     148                 :            :    the value found is CODE_FOR_nothing, then there's no target support, and
     149                 :            :    we can't vectorize the stmt.
     150                 :            : 
     151                 :            :    For additional information on this project see:
     152                 :            :    http://gcc.gnu.org/projects/tree-ssa/vectorization.html
     153                 :            : */
     154                 :            : 
     155                 :            : static void vect_estimate_min_profitable_iters (loop_vec_info, int *, int *);
     156                 :            : static stmt_vec_info vect_is_simple_reduction (loop_vec_info, stmt_vec_info,
     157                 :            :                                                bool *, bool *);
     158                 :            : 
     159                 :            : /* Subroutine of vect_determine_vf_for_stmt that handles only one
     160                 :            :    statement.  VECTYPE_MAYBE_SET_P is true if STMT_VINFO_VECTYPE
     161                 :            :    may already be set for general statements (not just data refs).  */
     162                 :            : 
     163                 :            : static opt_result
     164                 :     600361 : vect_determine_vf_for_stmt_1 (stmt_vec_info stmt_info,
     165                 :            :                               bool vectype_maybe_set_p,
     166                 :            :                               poly_uint64 *vf)
     167                 :            : {
     168                 :     600361 :   gimple *stmt = stmt_info->stmt;
     169                 :            : 
     170                 :     600361 :   if ((!STMT_VINFO_RELEVANT_P (stmt_info)
     171                 :     357350 :        && !STMT_VINFO_LIVE_P (stmt_info))
     172                 :     600478 :       || gimple_clobber_p (stmt))
     173                 :            :     {
     174                 :     357233 :       if (dump_enabled_p ())
     175                 :      65280 :         dump_printf_loc (MSG_NOTE, vect_location, "skip.\n");
     176                 :     357233 :       return opt_result::success ();
     177                 :            :     }
     178                 :            : 
     179                 :     243128 :   tree stmt_vectype, nunits_vectype;
     180                 :     243128 :   opt_result res = vect_get_vector_types_for_stmt (stmt_info, &stmt_vectype,
     181                 :     243128 :                                                    &nunits_vectype);
     182                 :     243128 :   if (!res)
     183                 :         21 :     return res;
     184                 :            : 
     185                 :     243107 :   if (stmt_vectype)
     186                 :            :     {
     187                 :     243094 :       if (STMT_VINFO_VECTYPE (stmt_info))
     188                 :            :         /* The only case when a vectype had been already set is for stmts
     189                 :            :            that contain a data ref, or for "pattern-stmts" (stmts generated
     190                 :            :            by the vectorizer to represent/replace a certain idiom).  */
     191                 :     134568 :         gcc_assert ((STMT_VINFO_DATA_REF (stmt_info)
     192                 :            :                      || vectype_maybe_set_p)
     193                 :            :                     && STMT_VINFO_VECTYPE (stmt_info) == stmt_vectype);
     194                 :            :       else
     195                 :     108526 :         STMT_VINFO_VECTYPE (stmt_info) = stmt_vectype;
     196                 :            :     }
     197                 :            : 
     198                 :     243107 :   if (nunits_vectype)
     199                 :     243094 :     vect_update_max_nunits (vf, nunits_vectype);
     200                 :            : 
     201                 :     243107 :   return opt_result::success ();
     202                 :            : }
     203                 :            : 
     204                 :            : /* Subroutine of vect_determine_vectorization_factor.  Set the vector
     205                 :            :    types of STMT_INFO and all attached pattern statements and update
     206                 :            :    the vectorization factor VF accordingly.  Return true on success
     207                 :            :    or false if something prevented vectorization.  */
     208                 :            : 
     209                 :            : static opt_result
     210                 :     560253 : vect_determine_vf_for_stmt (stmt_vec_info stmt_info, poly_uint64 *vf)
     211                 :            : {
     212                 :     560253 :   vec_info *vinfo = stmt_info->vinfo;
     213                 :     560253 :   if (dump_enabled_p ())
     214                 :     115227 :     dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: %G",
     215                 :            :                      stmt_info->stmt);
     216                 :     560253 :   opt_result res = vect_determine_vf_for_stmt_1 (stmt_info, false, vf);
     217                 :     560253 :   if (!res)
     218                 :         21 :     return res;
     219                 :            : 
     220                 :     560232 :   if (STMT_VINFO_IN_PATTERN_P (stmt_info)
     221                 :      18715 :       && STMT_VINFO_RELATED_STMT (stmt_info))
     222                 :            :     {
     223                 :      18715 :       gimple *pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
     224                 :      18715 :       stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
     225                 :            : 
     226                 :            :       /* If a pattern statement has def stmts, analyze them too.  */
     227                 :      18715 :       for (gimple_stmt_iterator si = gsi_start (pattern_def_seq);
     228                 :      40108 :            !gsi_end_p (si); gsi_next (&si))
     229                 :            :         {
     230                 :      21393 :           stmt_vec_info def_stmt_info = vinfo->lookup_stmt (gsi_stmt (si));
     231                 :      21393 :           if (dump_enabled_p ())
     232                 :      10358 :             dump_printf_loc (MSG_NOTE, vect_location,
     233                 :            :                              "==> examining pattern def stmt: %G",
     234                 :            :                              def_stmt_info->stmt);
     235                 :      21393 :           res = vect_determine_vf_for_stmt_1 (def_stmt_info, true, vf);
     236                 :      21393 :           if (!res)
     237                 :          0 :             return res;
     238                 :            :         }
     239                 :            : 
     240                 :      18715 :       if (dump_enabled_p ())
     241                 :       8338 :         dump_printf_loc (MSG_NOTE, vect_location,
     242                 :            :                          "==> examining pattern statement: %G",
     243                 :            :                          stmt_info->stmt);
     244                 :      18715 :       res = vect_determine_vf_for_stmt_1 (stmt_info, true, vf);
     245                 :      18715 :       if (!res)
     246                 :          0 :         return res;
     247                 :            :     }
     248                 :            : 
     249                 :     560232 :   return opt_result::success ();
     250                 :            : }
     251                 :            : 
     252                 :            : /* Function vect_determine_vectorization_factor
     253                 :            : 
     254                 :            :    Determine the vectorization factor (VF).  VF is the number of data elements
     255                 :            :    that are operated upon in parallel in a single iteration of the vectorized
     256                 :            :    loop.  For example, when vectorizing a loop that operates on 4byte elements,
     257                 :            :    on a target with vector size (VS) 16byte, the VF is set to 4, since 4
     258                 :            :    elements can fit in a single vector register.
     259                 :            : 
     260                 :            :    We currently support vectorization of loops in which all types operated upon
     261                 :            :    are of the same size.  Therefore this function currently sets VF according to
     262                 :            :    the size of the types operated upon, and fails if there are multiple sizes
     263                 :            :    in the loop.
     264                 :            : 
     265                 :            :    VF is also the factor by which the loop iterations are strip-mined, e.g.:
     266                 :            :    original loop:
     267                 :            :         for (i=0; i<N; i++){
     268                 :            :           a[i] = b[i] + c[i];
     269                 :            :         }
     270                 :            : 
     271                 :            :    vectorized loop:
     272                 :            :         for (i=0; i<N; i+=VF){
     273                 :            :           a[i:VF] = b[i:VF] + c[i:VF];
     274                 :            :         }
     275                 :            : */
     276                 :            : 
     277                 :            : static opt_result
     278                 :      35142 : vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
     279                 :            : {
     280                 :      35142 :   class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
     281                 :      35142 :   basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
     282                 :      35142 :   unsigned nbbs = loop->num_nodes;
     283                 :      35142 :   poly_uint64 vectorization_factor = 1;
     284                 :      35142 :   tree scalar_type = NULL_TREE;
     285                 :      35142 :   gphi *phi;
     286                 :      35142 :   tree vectype;
     287                 :      35142 :   stmt_vec_info stmt_info;
     288                 :      35142 :   unsigned i;
     289                 :            : 
     290                 :      35142 :   DUMP_VECT_SCOPE ("vect_determine_vectorization_factor");
     291                 :            : 
     292                 :     106407 :   for (i = 0; i < nbbs; i++)
     293                 :            :     {
     294                 :      71415 :       basic_block bb = bbs[i];
     295                 :            : 
     296                 :     179161 :       for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
     297                 :     107746 :            gsi_next (&si))
     298                 :            :         {
     299                 :     107875 :           phi = si.phi ();
     300                 :     107875 :           stmt_info = loop_vinfo->lookup_stmt (phi);
     301                 :     107875 :           if (dump_enabled_p ())
     302                 :      30164 :             dump_printf_loc (MSG_NOTE, vect_location, "==> examining phi: %G",
     303                 :            :                              phi);
     304                 :            : 
     305                 :     107875 :           gcc_assert (stmt_info);
     306                 :            : 
     307                 :     107875 :           if (STMT_VINFO_RELEVANT_P (stmt_info)
     308                 :      81418 :               || STMT_VINFO_LIVE_P (stmt_info))
     309                 :            :             {
     310                 :      26457 :               gcc_assert (!STMT_VINFO_VECTYPE (stmt_info));
     311                 :      26457 :               scalar_type = TREE_TYPE (PHI_RESULT (phi));
     312                 :            : 
     313                 :      26457 :               if (dump_enabled_p ())
     314                 :       4706 :                 dump_printf_loc (MSG_NOTE, vect_location,
     315                 :            :                                  "get vectype for scalar type:  %T\n",
     316                 :            :                                  scalar_type);
     317                 :            : 
     318                 :      26457 :               vectype = get_vectype_for_scalar_type (loop_vinfo, scalar_type);
     319                 :      26457 :               if (!vectype)
     320                 :        129 :                 return opt_result::failure_at (phi,
     321                 :            :                                                "not vectorized: unsupported "
     322                 :            :                                                "data-type %T\n",
     323                 :        129 :                                                scalar_type);
     324                 :      26328 :               STMT_VINFO_VECTYPE (stmt_info) = vectype;
     325                 :            : 
     326                 :      26328 :               if (dump_enabled_p ())
     327                 :       4700 :                 dump_printf_loc (MSG_NOTE, vect_location, "vectype: %T\n",
     328                 :            :                                  vectype);
     329                 :            : 
     330                 :      26328 :               if (dump_enabled_p ())
     331                 :            :                 {
     332                 :       4700 :                   dump_printf_loc (MSG_NOTE, vect_location, "nunits = ");
     333                 :       4700 :                   dump_dec (MSG_NOTE, TYPE_VECTOR_SUBPARTS (vectype));
     334                 :       4700 :                   dump_printf (MSG_NOTE, "\n");
     335                 :            :                 }
     336                 :            : 
     337                 :      26328 :               vect_update_max_nunits (&vectorization_factor, vectype);
     338                 :            :             }
     339                 :            :         }
     340                 :            : 
     341                 :     702804 :       for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
     342                 :     560232 :            gsi_next (&si))
     343                 :            :         {
     344                 :     560253 :           stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
     345                 :     560253 :           opt_result res
     346                 :     560253 :             = vect_determine_vf_for_stmt (stmt_info, &vectorization_factor);
     347                 :     560253 :           if (!res)
     348                 :         21 :             return res;
     349                 :            :         }
     350                 :            :     }
     351                 :            : 
     352                 :            :   /* TODO: Analyze cost. Decide if worth while to vectorize.  */
     353                 :      34992 :   if (dump_enabled_p ())
     354                 :            :     {
     355                 :       9520 :       dump_printf_loc (MSG_NOTE, vect_location, "vectorization factor = ");
     356                 :       9520 :       dump_dec (MSG_NOTE, vectorization_factor);
     357                 :       9520 :       dump_printf (MSG_NOTE, "\n");
     358                 :            :     }
     359                 :            : 
     360                 :      34992 :   if (known_le (vectorization_factor, 1U))
     361                 :       1376 :     return opt_result::failure_at (vect_location,
     362                 :       1376 :                                    "not vectorized: unsupported data-type\n");
     363                 :      33616 :   LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
     364                 :      33616 :   return opt_result::success ();
     365                 :            : }
     366                 :            : 
     367                 :            : 
     368                 :            : /* Function vect_is_simple_iv_evolution.
     369                 :            : 
     370                 :            :    FORNOW: A simple evolution of an induction variables in the loop is
     371                 :            :    considered a polynomial evolution.  */
     372                 :            : 
     373                 :            : static bool
     374                 :      89901 : vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
     375                 :            :                              tree * step)
     376                 :            : {
     377                 :      89901 :   tree init_expr;
     378                 :      89901 :   tree step_expr;
     379                 :      89901 :   tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb);
     380                 :      89901 :   basic_block bb;
     381                 :            : 
     382                 :            :   /* When there is no evolution in this loop, the evolution function
     383                 :            :      is not "simple".  */
     384                 :      89901 :   if (evolution_part == NULL_TREE)
     385                 :            :     return false;
     386                 :            : 
     387                 :            :   /* When the evolution is a polynomial of degree >= 2
     388                 :            :      the evolution function is not "simple".  */
     389                 :      85401 :   if (tree_is_chrec (evolution_part))
     390                 :            :     return false;
     391                 :            : 
     392                 :      85401 :   step_expr = evolution_part;
     393                 :      85401 :   init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb));
     394                 :            : 
     395                 :      85401 :   if (dump_enabled_p ())
     396                 :      22421 :     dump_printf_loc (MSG_NOTE, vect_location, "step: %T,  init: %T\n",
     397                 :            :                      step_expr, init_expr);
     398                 :            : 
     399                 :      85401 :   *init = init_expr;
     400                 :      85401 :   *step = step_expr;
     401                 :            : 
     402                 :      85401 :   if (TREE_CODE (step_expr) != INTEGER_CST
     403                 :      17619 :       && (TREE_CODE (step_expr) != SSA_NAME
     404                 :      16756 :           || ((bb = gimple_bb (SSA_NAME_DEF_STMT (step_expr)))
     405                 :      16737 :               && flow_bb_inside_loop_p (get_loop (cfun, loop_nb), bb))
     406                 :        389 :           || (!INTEGRAL_TYPE_P (TREE_TYPE (step_expr))
     407                 :         34 :               && (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr))
     408                 :         34 :                   || !flag_associative_math)))
     409                 :     102652 :       && (TREE_CODE (step_expr) != REAL_CST
     410                 :        119 :           || !flag_associative_math))
     411                 :            :     {
     412                 :      17220 :       if (dump_enabled_p ())
     413                 :       1518 :         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
     414                 :            :                          "step unknown.\n");
     415                 :      17220 :       return false;
     416                 :            :     }
     417                 :            : 
     418                 :            :   return true;
     419                 :            : }
     420                 :            : 
     421                 :            : /* Return true if PHI, described by STMT_INFO, is the inner PHI in
     422                 :            :    what we are assuming is a double reduction.  For example, given
     423                 :            :    a structure like this:
     424                 :            : 
     425                 :            :       outer1:
     426                 :            :         x_1 = PHI <x_4(outer2), ...>;
     427                 :            :         ...
     428                 :            : 
     429                 :            :       inner:
     430                 :            :         x_2 = PHI <x_1(outer1), ...>;
     431                 :            :         ...
     432                 :            :         x_3 = ...;
     433                 :            :         ...
     434                 :            : 
     435                 :            :       outer2:
     436                 :            :         x_4 = PHI <x_3(inner)>;
     437                 :            :         ...
     438                 :            : 
     439                 :            :    outer loop analysis would treat x_1 as a double reduction phi and
     440                 :            :    this function would then return true for x_2.  */
     441                 :            : 
     442                 :            : static bool
     443                 :      90089 : vect_inner_phi_in_double_reduction_p (stmt_vec_info stmt_info, gphi *phi)
     444                 :            : {
     445                 :      90089 :   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
     446                 :      90089 :   use_operand_p use_p;
     447                 :      90089 :   ssa_op_iter op_iter;
     448                 :     269891 :   FOR_EACH_PHI_ARG (use_p, phi, op_iter, SSA_OP_USE)
     449                 :     179990 :     if (stmt_vec_info def_info = loop_vinfo->lookup_def (USE_FROM_PTR (use_p)))
     450                 :      90342 :       if (STMT_VINFO_DEF_TYPE (def_info) == vect_double_reduction_def)
     451                 :            :         return true;
     452                 :            :   return false;
     453                 :            : }
     454                 :            : 
     455                 :            : /* Function vect_analyze_scalar_cycles_1.
     456                 :            : 
     457                 :            :    Examine the cross iteration def-use cycles of scalar variables
     458                 :            :    in LOOP.  LOOP_VINFO represents the loop that is now being
     459                 :            :    considered for vectorization (can be LOOP, or an outer-loop
     460                 :            :    enclosing LOOP).  */
     461                 :            : 
     462                 :            : static void
     463                 :      39839 : vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, class loop *loop)
     464                 :            : {
     465                 :      39839 :   basic_block bb = loop->header;
     466                 :      39839 :   tree init, step;
     467                 :      39839 :   auto_vec<stmt_vec_info, 64> worklist;
     468                 :      39839 :   gphi_iterator gsi;
     469                 :      39839 :   bool double_reduc, reduc_chain;
     470                 :            : 
     471                 :      79678 :   DUMP_VECT_SCOPE ("vect_analyze_scalar_cycles");
     472                 :            : 
     473                 :            :   /* First - identify all inductions.  Reduction detection assumes that all the
     474                 :            :      inductions have been identified, therefore, this order must not be
     475                 :            :      changed.  */
     476                 :     160351 :   for (gsi = gsi_start_phis  (bb); !gsi_end_p (gsi); gsi_next (&gsi))
     477                 :            :     {
     478                 :     120512 :       gphi *phi = gsi.phi ();
     479                 :     120512 :       tree access_fn = NULL;
     480                 :     120512 :       tree def = PHI_RESULT (phi);
     481                 :     120512 :       stmt_vec_info stmt_vinfo = loop_vinfo->lookup_stmt (phi);
     482                 :            : 
     483                 :     120512 :       if (dump_enabled_p ())
     484                 :      32192 :         dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: %G", phi);
     485                 :            : 
     486                 :            :       /* Skip virtual phi's.  The data dependences that are associated with
     487                 :            :          virtual defs/uses (i.e., memory accesses) are analyzed elsewhere.  */
     488                 :     241024 :       if (virtual_operand_p (def))
     489                 :      52340 :         continue;
     490                 :            : 
     491                 :      90089 :       STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_unknown_def_type;
     492                 :            : 
     493                 :            :       /* Analyze the evolution function.  */
     494                 :      90089 :       access_fn = analyze_scalar_evolution (loop, def);
     495                 :      90089 :       if (access_fn)
     496                 :            :         {
     497                 :      90089 :           STRIP_NOPS (access_fn);
     498                 :      90089 :           if (dump_enabled_p ())
     499                 :      23372 :             dump_printf_loc (MSG_NOTE, vect_location,
     500                 :            :                              "Access function of PHI: %T\n", access_fn);
     501                 :      90089 :           STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo)
     502                 :      90089 :             = initial_condition_in_loop_num (access_fn, loop->num);
     503                 :      90089 :           STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo)
     504                 :      90089 :             = evolution_part_in_loop_num (access_fn, loop->num);
     505                 :            :         }
     506                 :            : 
     507                 :     112006 :       if (!access_fn
     508                 :      90089 :           || vect_inner_phi_in_double_reduction_p (stmt_vinfo, phi)
     509                 :      89901 :           || !vect_is_simple_iv_evolution (loop->num, access_fn, &init, &step)
     510                 :     158270 :           || (LOOP_VINFO_LOOP (loop_vinfo) != loop
     511                 :       2168 :               && TREE_CODE (step) != INTEGER_CST))
     512                 :            :         {
     513                 :      21917 :           worklist.safe_push (stmt_vinfo);
     514                 :      21917 :           continue;
     515                 :            :         }
     516                 :            : 
     517                 :      68172 :       gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo)
     518                 :            :                   != NULL_TREE);
     519                 :      68172 :       gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo) != NULL_TREE);
     520                 :            : 
     521                 :      68172 :       if (dump_enabled_p ())
     522                 :      20894 :         dump_printf_loc (MSG_NOTE, vect_location, "Detected induction.\n");
     523                 :      68172 :       STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def;
     524                 :            :     }
     525                 :            : 
     526                 :            : 
     527                 :            :   /* Second - identify all reductions and nested cycles.  */
     528                 :      61756 :   while (worklist.length () > 0)
     529                 :            :     {
     530                 :      21917 :       stmt_vec_info stmt_vinfo = worklist.pop ();
     531                 :      21917 :       gphi *phi = as_a <gphi *> (stmt_vinfo->stmt);
     532                 :      21917 :       tree def = PHI_RESULT (phi);
     533                 :            : 
     534                 :      21917 :       if (dump_enabled_p ())
     535                 :       2478 :         dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: %G", phi);
     536                 :            : 
     537                 :      43834 :       gcc_assert (!virtual_operand_p (def)
     538                 :            :                   && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type);
     539                 :            : 
     540                 :      21917 :       stmt_vec_info reduc_stmt_info
     541                 :      21917 :         = vect_is_simple_reduction (loop_vinfo, stmt_vinfo, &double_reduc,
     542                 :      21917 :                                     &reduc_chain);
     543                 :      21917 :       if (reduc_stmt_info)
     544                 :            :         {
     545                 :      20341 :           STMT_VINFO_REDUC_DEF (stmt_vinfo) = reduc_stmt_info;
     546                 :      20341 :           STMT_VINFO_REDUC_DEF (reduc_stmt_info) = stmt_vinfo;
     547                 :      20341 :           if (double_reduc)
     548                 :            :             {
     549                 :        188 :               if (dump_enabled_p ())
     550                 :         96 :                 dump_printf_loc (MSG_NOTE, vect_location,
     551                 :            :                                  "Detected double reduction.\n");
     552                 :            : 
     553                 :        188 :               STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_double_reduction_def;
     554                 :        188 :               STMT_VINFO_DEF_TYPE (reduc_stmt_info) = vect_double_reduction_def;
     555                 :            :             }
     556                 :            :           else
     557                 :            :             {
     558                 :      20153 :               if (loop != LOOP_VINFO_LOOP (loop_vinfo))
     559                 :            :                 {
     560                 :        677 :                   if (dump_enabled_p ())
     561                 :        329 :                     dump_printf_loc (MSG_NOTE, vect_location,
     562                 :            :                                      "Detected vectorizable nested cycle.\n");
     563                 :            : 
     564                 :        677 :                   STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_nested_cycle;
     565                 :            :                 }
     566                 :            :               else
     567                 :            :                 {
     568                 :      19476 :                   if (dump_enabled_p ())
     569                 :       1874 :                     dump_printf_loc (MSG_NOTE, vect_location,
     570                 :            :                                      "Detected reduction.\n");
     571                 :            : 
     572                 :      19476 :                   STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def;
     573                 :      19476 :                   STMT_VINFO_DEF_TYPE (reduc_stmt_info) = vect_reduction_def;
     574                 :            :                   /* Store the reduction cycles for possible vectorization in
     575                 :            :                      loop-aware SLP if it was not detected as reduction
     576                 :            :                      chain.  */
     577                 :      19476 :                   if (! reduc_chain)
     578                 :      19271 :                     LOOP_VINFO_REDUCTIONS (loop_vinfo).safe_push
     579                 :      19271 :                       (reduc_stmt_info);
     580                 :            :                 }
     581                 :            :             }
     582                 :            :         }
     583                 :            :       else
     584                 :       1576 :         if (dump_enabled_p ())
     585                 :        179 :           dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
     586                 :            :                            "Unknown def-use cycle pattern.\n");
     587                 :            :     }
     588                 :      39839 : }
     589                 :            : 
     590                 :            : 
     591                 :            : /* Function vect_analyze_scalar_cycles.
     592                 :            : 
     593                 :            :    Examine the cross iteration def-use cycles of scalar variables, by
     594                 :            :    analyzing the loop-header PHIs of scalar variables.  Classify each
     595                 :            :    cycle as one of the following: invariant, induction, reduction, unknown.
     596                 :            :    We do that for the loop represented by LOOP_VINFO, and also to its
     597                 :            :    inner-loop, if exists.
     598                 :            :    Examples for scalar cycles:
     599                 :            : 
     600                 :            :    Example1: reduction:
     601                 :            : 
     602                 :            :               loop1:
     603                 :            :               for (i=0; i<N; i++)
     604                 :            :                  sum += a[i];
     605                 :            : 
     606                 :            :    Example2: induction:
     607                 :            : 
     608                 :            :               loop2:
     609                 :            :               for (i=0; i<N; i++)
     610                 :            :                  a[i] = i;  */
     611                 :            : 
     612                 :            : static void
     613                 :      38589 : vect_analyze_scalar_cycles (loop_vec_info loop_vinfo)
     614                 :            : {
     615                 :      38589 :   class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
     616                 :            : 
     617                 :      38589 :   vect_analyze_scalar_cycles_1 (loop_vinfo, loop);
     618                 :            : 
     619                 :            :   /* When vectorizing an outer-loop, the inner-loop is executed sequentially.
     620                 :            :      Reductions in such inner-loop therefore have different properties than
     621                 :            :      the reductions in the nest that gets vectorized:
     622                 :            :      1. When vectorized, they are executed in the same order as in the original
     623                 :            :         scalar loop, so we can't change the order of computation when
     624                 :            :         vectorizing them.
     625                 :            :      2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
     626                 :            :         current checks are too strict.  */
     627                 :            : 
     628                 :      38589 :   if (loop->inner)
     629                 :       1250 :     vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner);
     630                 :      38589 : }
     631                 :            : 
     632                 :            : /* Transfer group and reduction information from STMT_INFO to its
     633                 :            :    pattern stmt.  */
     634                 :            : 
     635                 :            : static void
     636                 :         16 : vect_fixup_reduc_chain (stmt_vec_info stmt_info)
     637                 :            : {
     638                 :         16 :   stmt_vec_info firstp = STMT_VINFO_RELATED_STMT (stmt_info);
     639                 :         16 :   stmt_vec_info stmtp;
     640                 :         16 :   gcc_assert (!REDUC_GROUP_FIRST_ELEMENT (firstp)
     641                 :            :               && REDUC_GROUP_FIRST_ELEMENT (stmt_info));
     642                 :         16 :   REDUC_GROUP_SIZE (firstp) = REDUC_GROUP_SIZE (stmt_info);
     643                 :        146 :   do
     644                 :            :     {
     645                 :        146 :       stmtp = STMT_VINFO_RELATED_STMT (stmt_info);
     646                 :        146 :       gcc_checking_assert (STMT_VINFO_DEF_TYPE (stmtp)
     647                 :            :                            == STMT_VINFO_DEF_TYPE (stmt_info));
     648                 :        146 :       REDUC_GROUP_FIRST_ELEMENT (stmtp) = firstp;
     649                 :        146 :       stmt_info = REDUC_GROUP_NEXT_ELEMENT (stmt_info);
     650                 :        146 :       if (stmt_info)
     651                 :        130 :         REDUC_GROUP_NEXT_ELEMENT (stmtp)
     652                 :        130 :           = STMT_VINFO_RELATED_STMT (stmt_info);
     653                 :            :     }
     654                 :        146 :   while (stmt_info);
     655                 :         16 : }
     656                 :            : 
     657                 :            : /* Fixup scalar cycles that now have their stmts detected as patterns.  */
     658                 :            : 
     659                 :            : static void
     660                 :      38589 : vect_fixup_scalar_cycles_with_patterns (loop_vec_info loop_vinfo)
     661                 :            : {
     662                 :      38589 :   stmt_vec_info first;
     663                 :      38589 :   unsigned i;
     664                 :            : 
     665                 :      38794 :   FOR_EACH_VEC_ELT (LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo), i, first)
     666                 :        205 :     if (STMT_VINFO_IN_PATTERN_P (first))
     667                 :            :       {
     668                 :         28 :         stmt_vec_info next = REDUC_GROUP_NEXT_ELEMENT (first);
     669                 :        174 :         while (next)
     670                 :            :           {
     671                 :        158 :             if (! STMT_VINFO_IN_PATTERN_P (next)
     672                 :        154 :                 || STMT_VINFO_REDUC_IDX (STMT_VINFO_RELATED_STMT (next)) == -1)
     673                 :            :               break;
     674                 :        146 :             next = REDUC_GROUP_NEXT_ELEMENT (next);
     675                 :            :           }
     676                 :            :         /* If not all stmt in the chain are patterns or if we failed
     677                 :            :            to update STMT_VINFO_REDUC_IDX try to handle the chain
     678                 :            :            without patterns.  */
     679                 :         28 :         if (! next
     680                 :         16 :             && STMT_VINFO_REDUC_IDX (STMT_VINFO_RELATED_STMT (first)) != -1)
     681                 :            :           {
     682                 :         16 :             vect_fixup_reduc_chain (first);
     683                 :         16 :             LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo)[i]
     684                 :         16 :               = STMT_VINFO_RELATED_STMT (first);
     685                 :            :           }
     686                 :            :       }
     687                 :      38589 : }
     688                 :            : 
     689                 :            : /* Function vect_get_loop_niters.
     690                 :            : 
     691                 :            :    Determine how many iterations the loop is executed and place it
     692                 :            :    in NUMBER_OF_ITERATIONS.  Place the number of latch iterations
     693                 :            :    in NUMBER_OF_ITERATIONSM1.  Place the condition under which the
     694                 :            :    niter information holds in ASSUMPTIONS.
     695                 :            : 
     696                 :            :    Return the loop exit condition.  */
     697                 :            : 
     698                 :            : 
     699                 :            : static gcond *
     700                 :      61241 : vect_get_loop_niters (class loop *loop, tree *assumptions,
     701                 :            :                       tree *number_of_iterations, tree *number_of_iterationsm1)
     702                 :            : {
     703                 :      61241 :   edge exit = single_exit (loop);
     704                 :      61241 :   class tree_niter_desc niter_desc;
     705                 :      61241 :   tree niter_assumptions, niter, may_be_zero;
     706                 :      61241 :   gcond *cond = get_loop_exit_condition (loop);
     707                 :            : 
     708                 :      61241 :   *assumptions = boolean_true_node;
     709                 :      61241 :   *number_of_iterationsm1 = chrec_dont_know;
     710                 :      61241 :   *number_of_iterations = chrec_dont_know;
     711                 :      61241 :   DUMP_VECT_SCOPE ("get_loop_niters");
     712                 :            : 
     713                 :      61241 :   if (!exit)
     714                 :            :     return cond;
     715                 :            : 
     716                 :      61241 :   may_be_zero = NULL_TREE;
     717                 :      61241 :   if (!number_of_iterations_exit_assumptions (loop, exit, &niter_desc, NULL)
     718                 :      61241 :       || chrec_contains_undetermined (niter_desc.niter))
     719                 :       4392 :     return cond;
     720                 :            : 
     721                 :      56849 :   niter_assumptions = niter_desc.assumptions;
     722                 :      56849 :   may_be_zero = niter_desc.may_be_zero;
     723                 :      56849 :   niter = niter_desc.niter;
     724                 :            : 
     725                 :      56849 :   if (may_be_zero && integer_zerop (may_be_zero))
     726                 :            :     may_be_zero = NULL_TREE;
     727                 :            : 
     728                 :       2132 :   if (may_be_zero)
     729                 :            :     {
     730                 :       2132 :       if (COMPARISON_CLASS_P (may_be_zero))
     731                 :            :         {
     732                 :            :           /* Try to combine may_be_zero with assumptions, this can simplify
     733                 :            :              computation of niter expression.  */
     734                 :       2132 :           if (niter_assumptions && !integer_nonzerop (niter_assumptions))
     735                 :         42 :             niter_assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
     736                 :            :                                              niter_assumptions,
     737                 :            :                                              fold_build1 (TRUTH_NOT_EXPR,
     738                 :            :                                                           boolean_type_node,
     739                 :            :                                                           may_be_zero));
     740                 :            :           else
     741                 :       2090 :             niter = fold_build3 (COND_EXPR, TREE_TYPE (niter), may_be_zero,
     742                 :            :                                  build_int_cst (TREE_TYPE (niter), 0),
     743                 :            :                                  rewrite_to_non_trapping_overflow (niter));
     744                 :            : 
     745                 :      56849 :           may_be_zero = NULL_TREE;
     746                 :            :         }
     747                 :          0 :       else if (integer_nonzerop (may_be_zero))
     748                 :            :         {
     749                 :          0 :           *number_of_iterationsm1 = build_int_cst (TREE_TYPE (niter), 0);
     750                 :          0 :           *number_of_iterations = build_int_cst (TREE_TYPE (niter), 1);
     751                 :          0 :           return cond;
     752                 :            :         }
     753                 :            :       else
     754                 :            :         return cond;
     755                 :            :     }
     756                 :            : 
     757                 :      56849 :   *assumptions = niter_assumptions;
     758                 :      56849 :   *number_of_iterationsm1 = niter;
     759                 :            : 
     760                 :            :   /* We want the number of loop header executions which is the number
     761                 :            :      of latch executions plus one.
     762                 :            :      ???  For UINT_MAX latch executions this number overflows to zero
     763                 :            :      for loops like do { n++; } while (n != 0);  */
     764                 :      56849 :   if (niter && !chrec_contains_undetermined (niter))
     765                 :      56849 :     niter = fold_build2 (PLUS_EXPR, TREE_TYPE (niter), unshare_expr (niter),
     766                 :            :                           build_int_cst (TREE_TYPE (niter), 1));
     767                 :      56849 :   *number_of_iterations = niter;
     768                 :            : 
     769                 :      56849 :   return cond;
     770                 :            : }
     771                 :            : 
     772                 :            : /* Function bb_in_loop_p
     773                 :            : 
     774                 :            :    Used as predicate for dfs order traversal of the loop bbs.  */
     775                 :            : 
     776                 :            : static bool
     777                 :     115282 : bb_in_loop_p (const_basic_block bb, const void *data)
     778                 :            : {
     779                 :     115282 :   const class loop *const loop = (const class loop *)data;
     780                 :     115282 :   if (flow_bb_inside_loop_p (loop, bb))
     781                 :      60599 :     return true;
     782                 :            :   return false;
     783                 :            : }
     784                 :            : 
     785                 :            : 
     786                 :            : /* Create and initialize a new loop_vec_info struct for LOOP_IN, as well as
     787                 :            :    stmt_vec_info structs for all the stmts in LOOP_IN.  */
     788                 :            : 
     789                 :      54683 : _loop_vec_info::_loop_vec_info (class loop *loop_in, vec_info_shared *shared)
     790                 :            :   : vec_info (vec_info::loop, init_cost (loop_in), shared),
     791                 :            :     loop (loop_in),
     792                 :      54683 :     bbs (XCNEWVEC (basic_block, loop->num_nodes)),
     793                 :            :     num_itersm1 (NULL_TREE),
     794                 :            :     num_iters (NULL_TREE),
     795                 :            :     num_iters_unchanged (NULL_TREE),
     796                 :            :     num_iters_assumptions (NULL_TREE),
     797                 :            :     th (0),
     798                 :      54683 :     versioning_threshold (0),
     799                 :      54683 :     vectorization_factor (0),
     800                 :            :     max_vectorization_factor (0),
     801                 :            :     mask_skip_niters (NULL_TREE),
     802                 :            :     mask_compare_type (NULL_TREE),
     803                 :            :     simd_if_cond (NULL_TREE),
     804                 :            :     unaligned_dr (NULL),
     805                 :            :     peeling_for_alignment (0),
     806                 :            :     ptr_mask (0),
     807                 :            :     ivexpr_map (NULL),
     808                 :            :     scan_map (NULL),
     809                 :      54683 :     slp_unrolling_factor (1),
     810                 :            :     single_scalar_iteration_cost (0),
     811                 :            :     vec_outside_cost (0),
     812                 :            :     vec_inside_cost (0),
     813                 :            :     vectorizable (false),
     814                 :            :     can_fully_mask_p (true),
     815                 :            :     fully_masked_p (false),
     816                 :            :     peeling_for_gaps (false),
     817                 :            :     peeling_for_niter (false),
     818                 :            :     no_data_dependencies (false),
     819                 :            :     has_mask_store (false),
     820                 :      54683 :     scalar_loop_scaling (profile_probability::uninitialized ()),
     821                 :            :     scalar_loop (NULL),
     822                 :      54683 :     orig_loop_info (NULL)
     823                 :            : {
     824                 :            :   /* CHECKME: We want to visit all BBs before their successors (except for
     825                 :            :      latch blocks, for which this assertion wouldn't hold).  In the simple
     826                 :            :      case of the loop forms we allow, a dfs order of the BBs would the same
     827                 :            :      as reversed postorder traversal, so we are safe.  */
     828                 :            : 
     829                 :      54683 :   unsigned int nbbs = dfs_enumerate_from (loop->header, 0, bb_in_loop_p,
     830                 :      54683 :                                           bbs, loop->num_nodes, loop);
     831                 :      54683 :   gcc_assert (nbbs == loop->num_nodes);
     832                 :            : 
     833                 :     169965 :   for (unsigned int i = 0; i < nbbs; i++)
     834                 :            :     {
     835                 :     115282 :       basic_block bb = bbs[i];
     836                 :     115282 :       gimple_stmt_iterator si;
     837                 :            : 
     838                 :     290723 :       for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
     839                 :            :         {
     840                 :     175441 :           gimple *phi = gsi_stmt (si);
     841                 :     175441 :           gimple_set_uid (phi, 0);
     842                 :     175441 :           add_stmt (phi);
     843                 :            :         }
     844                 :            : 
     845                 :    1148380 :       for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
     846                 :            :         {
     847                 :     917816 :           gimple *stmt = gsi_stmt (si);
     848                 :     917816 :           gimple_set_uid (stmt, 0);
     849                 :     917816 :           add_stmt (stmt);
     850                 :            :           /* If .GOMP_SIMD_LANE call for the current loop has 3 arguments, the
     851                 :            :              third argument is the #pragma omp simd if (x) condition, when 0,
     852                 :            :              loop shouldn't be vectorized, when non-zero constant, it should
     853                 :            :              be vectorized normally, otherwise versioned with vectorized loop
     854                 :            :              done if the condition is non-zero at runtime.  */
     855                 :     917816 :           if (loop_in->simduid
     856                 :      59301 :               && is_gimple_call (stmt)
     857                 :       3986 :               && gimple_call_internal_p (stmt)
     858                 :       3823 :               && gimple_call_internal_fn (stmt) == IFN_GOMP_SIMD_LANE
     859                 :       3823 :               && gimple_call_num_args (stmt) >= 3
     860                 :         19 :               && TREE_CODE (gimple_call_arg (stmt, 0)) == SSA_NAME
     861                 :     917835 :               && (loop_in->simduid
     862                 :         19 :                   == SSA_NAME_VAR (gimple_call_arg (stmt, 0))))
     863                 :            :             {
     864                 :         19 :               tree arg = gimple_call_arg (stmt, 2);
     865                 :         19 :               if (integer_zerop (arg) || TREE_CODE (arg) == SSA_NAME)
     866                 :         19 :                 simd_if_cond = arg;
     867                 :            :               else
     868                 :          0 :                 gcc_assert (integer_nonzerop (arg));
     869                 :            :             }
     870                 :            :         }
     871                 :            :     }
     872                 :            : 
     873                 :      54683 :   epilogue_vinfos.create (6);
     874                 :      54683 : }
     875                 :            : 
     876                 :            : /* Free all levels of MASKS.  */
     877                 :            : 
     878                 :            : void
     879                 :      54761 : release_vec_loop_masks (vec_loop_masks *masks)
     880                 :            : {
     881                 :      54761 :   rgroup_masks *rgm;
     882                 :      54761 :   unsigned int i;
     883                 :      62635 :   FOR_EACH_VEC_ELT (*masks, i, rgm)
     884                 :       7874 :     rgm->masks.release ();
     885                 :      54761 :   masks->release ();
     886                 :      54761 : }
     887                 :            : 
     888                 :            : /* Free all memory used by the _loop_vec_info, as well as all the
     889                 :            :    stmt_vec_info structs of all the stmts in the loop.  */
     890                 :            : 
     891                 :      71966 : _loop_vec_info::~_loop_vec_info ()
     892                 :            : {
     893                 :      54453 :   free (bbs);
     894                 :            : 
     895                 :      54453 :   release_vec_loop_masks (&masks);
     896                 :      55796 :   delete ivexpr_map;
     897                 :      54801 :   delete scan_map;
     898                 :      54453 :   epilogue_vinfos.release ();
     899                 :            : 
     900                 :      54453 :   loop->aux = NULL;
     901                 :      54453 : }
     902                 :            : 
     903                 :            : /* Return an invariant or register for EXPR and emit necessary
     904                 :            :    computations in the LOOP_VINFO loop preheader.  */
     905                 :            : 
     906                 :            : tree
     907                 :       8313 : cse_and_gimplify_to_preheader (loop_vec_info loop_vinfo, tree expr)
     908                 :            : {
     909                 :       8313 :   if (is_gimple_reg (expr)
     910                 :       8313 :       || is_gimple_min_invariant (expr))
     911                 :       3390 :     return expr;
     912                 :            : 
     913                 :       4923 :   if (! loop_vinfo->ivexpr_map)
     914                 :       1365 :     loop_vinfo->ivexpr_map = new hash_map<tree_operand_hash, tree>;
     915                 :       4923 :   tree &cached = loop_vinfo->ivexpr_map->get_or_insert (expr);
     916                 :       4923 :   if (! cached)
     917                 :            :     {
     918                 :       3177 :       gimple_seq stmts = NULL;
     919                 :       3177 :       cached = force_gimple_operand (unshare_expr (expr),
     920                 :            :                                      &stmts, true, NULL_TREE);
     921                 :       3177 :       if (stmts)
     922                 :            :         {
     923                 :       3144 :           edge e = loop_preheader_edge (LOOP_VINFO_LOOP (loop_vinfo));
     924                 :       3144 :           gsi_insert_seq_on_edge_immediate (e, stmts);
     925                 :            :         }
     926                 :            :     }
     927                 :       4923 :   return cached;
     928                 :            : }
     929                 :            : 
     930                 :            : /* Return true if we can use CMP_TYPE as the comparison type to produce
     931                 :            :    all masks required to mask LOOP_VINFO.  */
     932                 :            : 
     933                 :            : static bool
     934                 :      13795 : can_produce_all_loop_masks_p (loop_vec_info loop_vinfo, tree cmp_type)
     935                 :            : {
     936                 :      13795 :   rgroup_masks *rgm;
     937                 :      13795 :   unsigned int i;
     938                 :      15296 :   FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo), i, rgm)
     939                 :      15296 :     if (rgm->mask_type != NULL_TREE
     940                 :      15296 :         && !direct_internal_fn_supported_p (IFN_WHILE_ULT,
     941                 :            :                                             cmp_type, rgm->mask_type,
     942                 :            :                                             OPTIMIZE_FOR_SPEED))
     943                 :            :       return false;
     944                 :            :   return true;
     945                 :            : }
     946                 :            : 
     947                 :            : /* Calculate the maximum number of scalars per iteration for every
     948                 :            :    rgroup in LOOP_VINFO.  */
     949                 :            : 
     950                 :            : static unsigned int
     951                 :       3154 : vect_get_max_nscalars_per_iter (loop_vec_info loop_vinfo)
     952                 :            : {
     953                 :       3154 :   unsigned int res = 1;
     954                 :       3154 :   unsigned int i;
     955                 :       3154 :   rgroup_masks *rgm;
     956                 :       6995 :   FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo), i, rgm)
     957                 :       3841 :     res = MAX (res, rgm->max_nscalars_per_iter);
     958                 :       3154 :   return res;
     959                 :            : }
     960                 :            : 
     961                 :            : /* Each statement in LOOP_VINFO can be masked where necessary.  Check
     962                 :            :    whether we can actually generate the masks required.  Return true if so,
     963                 :            :    storing the type of the scalar IV in LOOP_VINFO_MASK_COMPARE_TYPE.  */
     964                 :            : 
     965                 :            : static bool
     966                 :       3154 : vect_verify_full_masking (loop_vec_info loop_vinfo)
     967                 :            : {
     968                 :       3154 :   class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
     969                 :       3154 :   unsigned int min_ni_width;
     970                 :       3154 :   unsigned int max_nscalars_per_iter
     971                 :       3154 :     = vect_get_max_nscalars_per_iter (loop_vinfo);
     972                 :            : 
     973                 :            :   /* Use a normal loop if there are no statements that need masking.
     974                 :            :      This only happens in rare degenerate cases: it means that the loop
     975                 :            :      has no loads, no stores, and no live-out values.  */
     976                 :       3154 :   if (LOOP_VINFO_MASKS (loop_vinfo).is_empty ())
     977                 :            :     return false;
     978                 :            : 
     979                 :            :   /* Get the maximum number of iterations that is representable
     980                 :            :      in the counter type.  */
     981                 :       3129 :   tree ni_type = TREE_TYPE (LOOP_VINFO_NITERSM1 (loop_vinfo));
     982                 :       3129 :   widest_int max_ni = wi::to_widest (TYPE_MAX_VALUE (ni_type)) + 1;
     983                 :            : 
     984                 :            :   /* Get a more refined estimate for the number of iterations.  */
     985                 :       3129 :   widest_int max_back_edges;
     986                 :       3129 :   if (max_loop_iterations (loop, &max_back_edges))
     987                 :       3129 :     max_ni = wi::smin (max_ni, max_back_edges + 1);
     988                 :            : 
     989                 :            :   /* Account for rgroup masks, in which each bit is replicated N times.  */
     990                 :       3129 :   max_ni *= max_nscalars_per_iter;
     991                 :            : 
     992                 :            :   /* Work out how many bits we need to represent the limit.  */
     993                 :       3129 :   min_ni_width = wi::min_precision (max_ni, UNSIGNED);
     994                 :            : 
     995                 :            :   /* Find a scalar mode for which WHILE_ULT is supported.  */
     996                 :       3129 :   opt_scalar_int_mode cmp_mode_iter;
     997                 :       3129 :   tree cmp_type = NULL_TREE;
     998                 :       3129 :   tree iv_type = NULL_TREE;
     999                 :       3129 :   widest_int iv_limit = vect_iv_limit_for_full_masking (loop_vinfo);
    1000                 :       3129 :   unsigned int iv_precision = UINT_MAX;
    1001                 :            : 
    1002                 :       3129 :   if (iv_limit != -1)
    1003                 :       3129 :     iv_precision = wi::min_precision (iv_limit * max_nscalars_per_iter,
    1004                 :            :                                       UNSIGNED);
    1005                 :            : 
    1006                 :      46935 :   FOR_EACH_MODE_IN_CLASS (cmp_mode_iter, MODE_INT)
    1007                 :            :     {
    1008                 :      21903 :       unsigned int cmp_bits = GET_MODE_BITSIZE (cmp_mode_iter.require ());
    1009                 :      21903 :       if (cmp_bits >= min_ni_width
    1010                 :      21903 :           && targetm.scalar_mode_supported_p (cmp_mode_iter.require ()))
    1011                 :            :         {
    1012                 :      13795 :           tree this_type = build_nonstandard_integer_type (cmp_bits, true);
    1013                 :      13795 :           if (this_type
    1014                 :      13795 :               && can_produce_all_loop_masks_p (loop_vinfo, this_type))
    1015                 :            :             {
    1016                 :            :               /* Although we could stop as soon as we find a valid mode,
    1017                 :            :                  there are at least two reasons why that's not always the
    1018                 :            :                  best choice:
    1019                 :            : 
    1020                 :            :                  - An IV that's Pmode or wider is more likely to be reusable
    1021                 :            :                    in address calculations than an IV that's narrower than
    1022                 :            :                    Pmode.
    1023                 :            : 
    1024                 :            :                  - Doing the comparison in IV_PRECISION or wider allows
    1025                 :            :                    a natural 0-based IV, whereas using a narrower comparison
    1026                 :            :                    type requires mitigations against wrap-around.
    1027                 :            : 
    1028                 :            :                  Conversely, if the IV limit is variable, doing the comparison
    1029                 :            :                  in a wider type than the original type can introduce
    1030                 :            :                  unnecessary extensions, so picking the widest valid mode
    1031                 :            :                  is not always a good choice either.
    1032                 :            : 
    1033                 :            :                  Here we prefer the first IV type that's Pmode or wider,
    1034                 :            :                  and the first comparison type that's IV_PRECISION or wider.
    1035                 :            :                  (The comparison type must be no wider than the IV type,
    1036                 :            :                  to avoid extensions in the vector loop.)
    1037                 :            : 
    1038                 :            :                  ??? We might want to try continuing beyond Pmode for ILP32
    1039                 :            :                  targets if CMP_BITS < IV_PRECISION.  */
    1040                 :          0 :               iv_type = this_type;
    1041                 :          0 :               if (!cmp_type || iv_precision > TYPE_PRECISION (cmp_type))
    1042                 :            :                 cmp_type = this_type;
    1043                 :          0 :               if (cmp_bits >= GET_MODE_BITSIZE (Pmode))
    1044                 :            :                 break;
    1045                 :            :             }
    1046                 :            :         }
    1047                 :            :     }
    1048                 :            : 
    1049                 :       3129 :   if (!cmp_type)
    1050                 :            :     return false;
    1051                 :            : 
    1052                 :          0 :   LOOP_VINFO_MASK_COMPARE_TYPE (loop_vinfo) = cmp_type;
    1053                 :          0 :   LOOP_VINFO_MASK_IV_TYPE (loop_vinfo) = iv_type;
    1054                 :          0 :   return true;
    1055                 :            : }
    1056                 :            : 
    1057                 :            : /* Calculate the cost of one scalar iteration of the loop.  */
    1058                 :            : static void
    1059                 :      33616 : vect_compute_single_scalar_iteration_cost (loop_vec_info loop_vinfo)
    1060                 :            : {
    1061                 :      33616 :   class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
    1062                 :      33616 :   basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
    1063                 :      33616 :   int nbbs = loop->num_nodes, factor;
    1064                 :      33616 :   int innerloop_iters, i;
    1065                 :            : 
    1066                 :      33616 :   DUMP_VECT_SCOPE ("vect_compute_single_scalar_iteration_cost");
    1067                 :            : 
    1068                 :            :   /* Gather costs for statements in the scalar loop.  */
    1069                 :            : 
    1070                 :            :   /* FORNOW.  */
    1071                 :      33616 :   innerloop_iters = 1;
    1072                 :      33616 :   if (loop->inner)
    1073                 :        408 :     innerloop_iters = 50; /* FIXME */
    1074                 :            : 
    1075                 :     102072 :   for (i = 0; i < nbbs; i++)
    1076                 :            :     {
    1077                 :      68456 :       gimple_stmt_iterator si;
    1078                 :      68456 :       basic_block bb = bbs[i];
    1079                 :            : 
    1080                 :      68456 :       if (bb->loop_father == loop->inner)
    1081                 :        816 :         factor = innerloop_iters;
    1082                 :            :       else
    1083                 :            :         factor = 1;
    1084                 :            : 
    1085                 :     680721 :       for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
    1086                 :            :         {
    1087                 :     543809 :           gimple *stmt = gsi_stmt (si);
    1088                 :     543809 :           stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (stmt);
    1089                 :            : 
    1090                 :     543809 :           if (!is_gimple_assign (stmt) && !is_gimple_call (stmt))
    1091                 :     120960 :             continue;
    1092                 :            : 
    1093                 :            :           /* Skip stmts that are not vectorized inside the loop.  */
    1094                 :     422849 :           stmt_vec_info vstmt_info = vect_stmt_to_vectorize (stmt_info);
    1095                 :     422849 :           if (!STMT_VINFO_RELEVANT_P (vstmt_info)
    1096                 :     199318 :               && (!STMT_VINFO_LIVE_P (vstmt_info)
    1097                 :        115 :                   || !VECTORIZABLE_CYCLE_DEF
    1098                 :            :                         (STMT_VINFO_DEF_TYPE (vstmt_info))))
    1099                 :     199318 :             continue;
    1100                 :            : 
    1101                 :     223531 :           vect_cost_for_stmt kind;
    1102                 :     223531 :           if (STMT_VINFO_DATA_REF (stmt_info))
    1103                 :            :             {
    1104                 :     112388 :               if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)))
    1105                 :            :                kind = scalar_load;
    1106                 :            :              else
    1107                 :      50616 :                kind = scalar_store;
    1108                 :            :             }
    1109                 :     111143 :           else if (vect_nop_conversion_p (stmt_info))
    1110                 :      11476 :             continue;
    1111                 :            :           else
    1112                 :            :             kind = scalar_stmt;
    1113                 :            : 
    1114                 :     212055 :           record_stmt_cost (&LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
    1115                 :            :                             factor, kind, stmt_info, 0, vect_prologue);
    1116                 :            :         }
    1117                 :            :     }
    1118                 :            : 
    1119                 :            :   /* Now accumulate cost.  */
    1120                 :      33616 :   void *target_cost_data = init_cost (loop);
    1121                 :      33616 :   stmt_info_for_cost *si;
    1122                 :      33616 :   int j;
    1123                 :     245671 :   FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
    1124                 :            :                     j, si)
    1125                 :     212055 :     (void) add_stmt_cost (target_cost_data, si->count,
    1126                 :            :                           si->kind, si->stmt_info, si->misalign,
    1127                 :            :                           vect_body);
    1128                 :      33616 :   unsigned dummy, body_cost = 0;
    1129                 :      33616 :   finish_cost (target_cost_data, &dummy, &body_cost, &dummy);
    1130                 :      33616 :   destroy_cost_data (target_cost_data);
    1131                 :      33616 :   LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo) = body_cost;
    1132                 :      33616 : }
    1133                 :            : 
    1134                 :            : 
    1135                 :            : /* Function vect_analyze_loop_form_1.
    1136                 :            : 
    1137                 :            :    Verify that certain CFG restrictions hold, including:
    1138                 :            :    - the loop has a pre-header
    1139                 :            :    - the loop has a single entry and exit
    1140                 :            :    - the loop exit condition is simple enough
    1141                 :            :    - the number of iterations can be analyzed, i.e, a countable loop.  The
    1142                 :            :      niter could be analyzed under some assumptions.  */
    1143                 :            : 
    1144                 :            : opt_result
    1145                 :      90683 : vect_analyze_loop_form_1 (class loop *loop, gcond **loop_cond,
    1146                 :            :                           tree *assumptions, tree *number_of_iterationsm1,
    1147                 :            :                           tree *number_of_iterations, gcond **inner_loop_cond)
    1148                 :            : {
    1149                 :      90683 :   DUMP_VECT_SCOPE ("vect_analyze_loop_form");
    1150                 :            : 
    1151                 :            :   /* Different restrictions apply when we are considering an inner-most loop,
    1152                 :            :      vs. an outer (nested) loop.
    1153                 :            :      (FORNOW. May want to relax some of these restrictions in the future).  */
    1154                 :            : 
    1155                 :      90683 :   if (!loop->inner)
    1156                 :            :     {
    1157                 :            :       /* Inner-most loop.  We currently require that the number of BBs is
    1158                 :            :          exactly 2 (the header and latch).  Vectorizable inner-most loops
    1159                 :            :          look like this:
    1160                 :            : 
    1161                 :            :                         (pre-header)
    1162                 :            :                            |
    1163                 :            :                           header <--------+
    1164                 :            :                            | |            |
    1165                 :            :                            | +--> latch --+
    1166                 :            :                            |
    1167                 :            :                         (exit-bb)  */
    1168                 :            : 
    1169                 :      79465 :       if (loop->num_nodes != 2)
    1170                 :      17564 :         return opt_result::failure_at (vect_location,
    1171                 :            :                                        "not vectorized:"
    1172                 :      17564 :                                        " control flow in loop.\n");
    1173                 :            : 
    1174                 :      61901 :       if (empty_block_p (loop->header))
    1175                 :        476 :         return opt_result::failure_at (vect_location,
    1176                 :        476 :                                        "not vectorized: empty loop.\n");
    1177                 :            :     }
    1178                 :            :   else
    1179                 :            :     {
    1180                 :      11218 :       class loop *innerloop = loop->inner;
    1181                 :      11218 :       edge entryedge;
    1182                 :            : 
    1183                 :            :       /* Nested loop. We currently require that the loop is doubly-nested,
    1184                 :            :          contains a single inner loop, and the number of BBs is exactly 5.
    1185                 :            :          Vectorizable outer-loops look like this:
    1186                 :            : 
    1187                 :            :                         (pre-header)
    1188                 :            :                            |
    1189                 :            :                           header <---+
    1190                 :            :                            |         |
    1191                 :            :                           inner-loop |
    1192                 :            :                            |         |
    1193                 :            :                           tail ------+
    1194                 :            :                            |
    1195                 :            :                         (exit-bb)
    1196                 :            : 
    1197                 :            :          The inner-loop has the properties expected of inner-most loops
    1198                 :            :          as described above.  */
    1199                 :            : 
    1200                 :      11218 :       if ((loop->inner)->inner || (loop->inner)->next)
    1201                 :       1883 :         return opt_result::failure_at (vect_location,
    1202                 :            :                                        "not vectorized:"
    1203                 :       1883 :                                        " multiple nested loops.\n");
    1204                 :            : 
    1205                 :       9335 :       if (loop->num_nodes != 5)
    1206                 :       6857 :         return opt_result::failure_at (vect_location,
    1207                 :            :                                        "not vectorized:"
    1208                 :       6857 :                                        " control flow in loop.\n");
    1209                 :            : 
    1210                 :       2478 :       entryedge = loop_preheader_edge (innerloop);
    1211                 :       2478 :       if (entryedge->src != loop->header
    1212                 :       2471 :           || !single_exit (innerloop)
    1213                 :       4905 :           || single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src)
    1214                 :         51 :         return opt_result::failure_at (vect_location,
    1215                 :            :                                        "not vectorized:"
    1216                 :         51 :                                        " unsupported outerloop form.\n");
    1217                 :            : 
    1218                 :            :       /* Analyze the inner-loop.  */
    1219                 :       2427 :       tree inner_niterm1, inner_niter, inner_assumptions;
    1220                 :       2427 :       opt_result res
    1221                 :            :         = vect_analyze_loop_form_1 (loop->inner, inner_loop_cond,
    1222                 :            :                                     &inner_assumptions, &inner_niterm1,
    1223                 :       2427 :                                     &inner_niter, NULL);
    1224                 :       2427 :       if (!res)
    1225                 :            :         {
    1226                 :        265 :           if (dump_enabled_p ())
    1227                 :          2 :             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    1228                 :            :                              "not vectorized: Bad inner loop.\n");
    1229                 :        265 :           return res;
    1230                 :            :         }
    1231                 :            : 
    1232                 :            :       /* Don't support analyzing niter under assumptions for inner
    1233                 :            :          loop.  */
    1234                 :       2162 :       if (!integer_onep (inner_assumptions))
    1235                 :         71 :         return opt_result::failure_at (vect_location,
    1236                 :         71 :                                        "not vectorized: Bad inner loop.\n");
    1237                 :            : 
    1238                 :       2091 :       if (!expr_invariant_in_loop_p (loop, inner_niter))
    1239                 :         65 :         return opt_result::failure_at (vect_location,
    1240                 :            :                                        "not vectorized: inner-loop count not"
    1241                 :         65 :                                        " invariant.\n");
    1242                 :            : 
    1243                 :       2026 :       if (dump_enabled_p ())
    1244                 :        623 :         dump_printf_loc (MSG_NOTE, vect_location,
    1245                 :            :                          "Considering outer-loop vectorization.\n");
    1246                 :            :     }
    1247                 :            : 
    1248                 :      63451 :   if (!single_exit (loop))
    1249                 :        495 :     return opt_result::failure_at (vect_location,
    1250                 :        495 :                                    "not vectorized: multiple exits.\n");
    1251                 :      62956 :   if (EDGE_COUNT (loop->header->preds) != 2)
    1252                 :          0 :     return opt_result::failure_at (vect_location,
    1253                 :            :                                    "not vectorized:"
    1254                 :          0 :                                    " too many incoming edges.\n");
    1255                 :            : 
    1256                 :            :   /* We assume that the loop exit condition is at the end of the loop. i.e,
    1257                 :            :      that the loop is represented as a do-while (with a proper if-guard
    1258                 :            :      before the loop if needed), where the loop header contains all the
    1259                 :            :      executable statements, and the latch is empty.  */
    1260                 :      62956 :   if (!empty_block_p (loop->latch)
    1261                 :      62956 :       || !gimple_seq_empty_p (phi_nodes (loop->latch)))
    1262                 :       1703 :     return opt_result::failure_at (vect_location,
    1263                 :       1703 :                                    "not vectorized: latch block not empty.\n");
    1264                 :            : 
    1265                 :            :   /* Make sure the exit is not abnormal.  */
    1266                 :      61253 :   edge e = single_exit (loop);
    1267                 :      61253 :   if (e->flags & EDGE_ABNORMAL)
    1268                 :         12 :     return opt_result::failure_at (vect_location,
    1269                 :            :                                    "not vectorized:"
    1270                 :         12 :                                    " abnormal loop exit edge.\n");
    1271                 :            : 
    1272                 :      61241 :   *loop_cond = vect_get_loop_niters (loop, assumptions, number_of_iterations,
    1273                 :            :                                      number_of_iterationsm1);
    1274                 :      61241 :   if (!*loop_cond)
    1275                 :         14 :     return opt_result::failure_at
    1276                 :            :       (vect_location,
    1277                 :         14 :        "not vectorized: complicated exit condition.\n");
    1278                 :            : 
    1279                 :      61227 :   if (integer_zerop (*assumptions)
    1280                 :      61227 :       || !*number_of_iterations
    1281                 :     122454 :       || chrec_contains_undetermined (*number_of_iterations))
    1282                 :       4378 :     return opt_result::failure_at
    1283                 :            :       (*loop_cond,
    1284                 :       4378 :        "not vectorized: number of iterations cannot be computed.\n");
    1285                 :            : 
    1286                 :      56849 :   if (integer_zerop (*number_of_iterations))
    1287                 :          4 :     return opt_result::failure_at
    1288                 :            :       (*loop_cond,
    1289                 :          4 :        "not vectorized: number of iterations = 0.\n");
    1290                 :            : 
    1291                 :      56845 :   return opt_result::success ();
    1292                 :            : }
    1293                 :            : 
    1294                 :            : /* Analyze LOOP form and return a loop_vec_info if it is of suitable form.  */
    1295                 :            : 
    1296                 :            : opt_loop_vec_info
    1297                 :      88256 : vect_analyze_loop_form (class loop *loop, vec_info_shared *shared)
    1298                 :            : {
    1299                 :      88256 :   tree assumptions, number_of_iterations, number_of_iterationsm1;
    1300                 :      88256 :   gcond *loop_cond, *inner_loop_cond = NULL;
    1301                 :            : 
    1302                 :      88256 :   opt_result res
    1303                 :            :     = vect_analyze_loop_form_1 (loop, &loop_cond,
    1304                 :            :                                 &assumptions, &number_of_iterationsm1,
    1305                 :      88256 :                                 &number_of_iterations, &inner_loop_cond);
    1306                 :      88256 :   if (!res)
    1307                 :      33573 :     return opt_loop_vec_info::propagate_failure (res);
    1308                 :            : 
    1309                 :      54683 :   loop_vec_info loop_vinfo = new _loop_vec_info (loop, shared);
    1310                 :      54683 :   LOOP_VINFO_NITERSM1 (loop_vinfo) = number_of_iterationsm1;
    1311                 :      54683 :   LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations;
    1312                 :      54683 :   LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = number_of_iterations;
    1313                 :      54683 :   if (!integer_onep (assumptions))
    1314                 :            :     {
    1315                 :            :       /* We consider to vectorize this loop by versioning it under
    1316                 :            :          some assumptions.  In order to do this, we need to clear
    1317                 :            :          existing information computed by scev and niter analyzer.  */
    1318                 :        492 :       scev_reset_htab ();
    1319                 :        492 :       free_numbers_of_iterations_estimates (loop);
    1320                 :            :       /* Also set flag for this loop so that following scev and niter
    1321                 :            :          analysis are done under the assumptions.  */
    1322                 :        492 :       loop_constraint_set (loop, LOOP_C_FINITE);
    1323                 :            :       /* Also record the assumptions for versioning.  */
    1324                 :        492 :       LOOP_VINFO_NITERS_ASSUMPTIONS (loop_vinfo) = assumptions;
    1325                 :            :     }
    1326                 :            : 
    1327                 :      54683 :   if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
    1328                 :            :     {
    1329                 :      28455 :       if (dump_enabled_p ())
    1330                 :            :         {
    1331                 :       2510 :           dump_printf_loc (MSG_NOTE, vect_location,
    1332                 :            :                            "Symbolic number of iterations is ");
    1333                 :       2510 :           dump_generic_expr (MSG_NOTE, TDF_DETAILS, number_of_iterations);
    1334                 :       2510 :           dump_printf (MSG_NOTE, "\n");
    1335                 :            :         }
    1336                 :            :     }
    1337                 :            : 
    1338                 :      54683 :   stmt_vec_info loop_cond_info = loop_vinfo->lookup_stmt (loop_cond);
    1339                 :      54683 :   STMT_VINFO_TYPE (loop_cond_info) = loop_exit_ctrl_vec_info_type;
    1340                 :      54683 :   if (inner_loop_cond)
    1341                 :            :     {
    1342                 :       1972 :       stmt_vec_info inner_loop_cond_info
    1343                 :       1972 :         = loop_vinfo->lookup_stmt (inner_loop_cond);
    1344                 :       1972 :       STMT_VINFO_TYPE (inner_loop_cond_info) = loop_exit_ctrl_vec_info_type;
    1345                 :            :     }
    1346                 :            : 
    1347                 :      54683 :   gcc_assert (!loop->aux);
    1348                 :      54683 :   loop->aux = loop_vinfo;
    1349                 :      54683 :   return opt_loop_vec_info::success (loop_vinfo);
    1350                 :            : }
    1351                 :            : 
    1352                 :            : 
    1353                 :            : 
    1354                 :            : /* Scan the loop stmts and dependent on whether there are any (non-)SLP
    1355                 :            :    statements update the vectorization factor.  */
    1356                 :            : 
    1357                 :            : static void
    1358                 :       2698 : vect_update_vf_for_slp (loop_vec_info loop_vinfo)
    1359                 :            : {
    1360                 :       2698 :   class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
    1361                 :       2698 :   basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
    1362                 :       2698 :   int nbbs = loop->num_nodes;
    1363                 :       2698 :   poly_uint64 vectorization_factor;
    1364                 :       2698 :   int i;
    1365                 :            : 
    1366                 :       2698 :   DUMP_VECT_SCOPE ("vect_update_vf_for_slp");
    1367                 :            : 
    1368                 :       2698 :   vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
    1369                 :       2698 :   gcc_assert (known_ne (vectorization_factor, 0U));
    1370                 :            : 
    1371                 :            :   /* If all the stmts in the loop can be SLPed, we perform only SLP, and
    1372                 :            :      vectorization factor of the loop is the unrolling factor required by
    1373                 :            :      the SLP instances.  If that unrolling factor is 1, we say, that we
    1374                 :            :      perform pure SLP on loop - cross iteration parallelism is not
    1375                 :            :      exploited.  */
    1376                 :            :   bool only_slp_in_loop = true;
    1377                 :       8106 :   for (i = 0; i < nbbs; i++)
    1378                 :            :     {
    1379                 :       5408 :       basic_block bb = bbs[i];
    1380                 :      14108 :       for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
    1381                 :       8700 :            gsi_next (&si))
    1382                 :            :         {
    1383                 :       8700 :           stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (si.phi ());
    1384                 :       8700 :           if (!stmt_info)
    1385                 :          0 :             continue;
    1386                 :       8700 :           if ((STMT_VINFO_RELEVANT_P (stmt_info)
    1387                 :       7105 :                || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
    1388                 :       1595 :               && !PURE_SLP_STMT (stmt_info))
    1389                 :            :             /* STMT needs both SLP and loop-based vectorization.  */
    1390                 :         73 :             only_slp_in_loop = false;
    1391                 :            :         }
    1392                 :      90470 :       for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
    1393                 :      79654 :            gsi_next (&si))
    1394                 :            :         {
    1395                 :      79654 :           stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
    1396                 :      79654 :           stmt_info = vect_stmt_to_vectorize (stmt_info);
    1397                 :      79654 :           if ((STMT_VINFO_RELEVANT_P (stmt_info)
    1398                 :      40085 :                || VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
    1399                 :      39569 :               && !PURE_SLP_STMT (stmt_info))
    1400                 :            :             /* STMT needs both SLP and loop-based vectorization.  */
    1401                 :       1548 :             only_slp_in_loop = false;
    1402                 :            :         }
    1403                 :            :     }
    1404                 :            : 
    1405                 :       2698 :   if (only_slp_in_loop)
    1406                 :            :     {
    1407                 :       2316 :       if (dump_enabled_p ())
    1408                 :        795 :         dump_printf_loc (MSG_NOTE, vect_location,
    1409                 :            :                          "Loop contains only SLP stmts\n");
    1410                 :       2316 :       vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo);
    1411                 :            :     }
    1412                 :            :   else
    1413                 :            :     {
    1414                 :        382 :       if (dump_enabled_p ())
    1415                 :        227 :         dump_printf_loc (MSG_NOTE, vect_location,
    1416                 :            :                          "Loop contains SLP and non-SLP stmts\n");
    1417                 :            :       /* Both the vectorization factor and unroll factor have the form
    1418                 :            :          GET_MODE_SIZE (loop_vinfo->vector_mode) * X for some rational X,
    1419                 :            :          so they must have a common multiple.  */
    1420                 :        382 :       vectorization_factor
    1421                 :            :         = force_common_multiple (vectorization_factor,
    1422                 :        382 :                                  LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo));
    1423                 :            :     }
    1424                 :            : 
    1425                 :       2698 :   LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
    1426                 :       2698 :   if (dump_enabled_p ())
    1427                 :            :     {
    1428                 :       1022 :       dump_printf_loc (MSG_NOTE, vect_location,
    1429                 :            :                        "Updating vectorization factor to ");
    1430                 :       1022 :       dump_dec (MSG_NOTE, vectorization_factor);
    1431                 :       1022 :       dump_printf (MSG_NOTE, ".\n");
    1432                 :            :     }
    1433                 :       2698 : }
    1434                 :            : 
    1435                 :            : /* Return true if STMT_INFO describes a double reduction phi and if
    1436                 :            :    the other phi in the reduction is also relevant for vectorization.
    1437                 :            :    This rejects cases such as:
    1438                 :            : 
    1439                 :            :       outer1:
    1440                 :            :         x_1 = PHI <x_3(outer2), ...>;
    1441                 :            :         ...
    1442                 :            : 
    1443                 :            :       inner:
    1444                 :            :         x_2 = ...;
    1445                 :            :         ...
    1446                 :            : 
    1447                 :            :       outer2:
    1448                 :            :         x_3 = PHI <x_2(inner)>;
    1449                 :            : 
    1450                 :            :    if nothing in x_2 or elsewhere makes x_1 relevant.  */
    1451                 :            : 
    1452                 :            : static bool
    1453                 :         47 : vect_active_double_reduction_p (stmt_vec_info stmt_info)
    1454                 :            : {
    1455                 :         47 :   if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_double_reduction_def)
    1456                 :            :     return false;
    1457                 :            : 
    1458                 :          0 :   return STMT_VINFO_RELEVANT_P (STMT_VINFO_REDUC_DEF (stmt_info));
    1459                 :            : }
    1460                 :            : 
    1461                 :            : /* Function vect_analyze_loop_operations.
    1462                 :            : 
    1463                 :            :    Scan the loop stmts and make sure they are all vectorizable.  */
    1464                 :            : 
    1465                 :            : static opt_result
    1466                 :      33506 : vect_analyze_loop_operations (loop_vec_info loop_vinfo)
    1467                 :            : {
    1468                 :      33506 :   class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
    1469                 :      33506 :   basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
    1470                 :      33506 :   int nbbs = loop->num_nodes;
    1471                 :      33506 :   int i;
    1472                 :      33506 :   stmt_vec_info stmt_info;
    1473                 :      33506 :   bool need_to_vectorize = false;
    1474                 :      33506 :   bool ok;
    1475                 :            : 
    1476                 :      33506 :   DUMP_VECT_SCOPE ("vect_analyze_loop_operations");
    1477                 :            : 
    1478                 :      67012 :   auto_vec<stmt_info_for_cost> cost_vec;
    1479                 :            : 
    1480                 :      88635 :   for (i = 0; i < nbbs; i++)
    1481                 :            :     {
    1482                 :      61543 :       basic_block bb = bbs[i];
    1483                 :            : 
    1484                 :     163607 :       for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
    1485                 :     102064 :            gsi_next (&si))
    1486                 :            :         {
    1487                 :     102592 :           gphi *phi = si.phi ();
    1488                 :     102592 :           ok = true;
    1489                 :            : 
    1490                 :     102592 :           stmt_info = loop_vinfo->lookup_stmt (phi);
    1491                 :     102592 :           if (dump_enabled_p ())
    1492                 :      29414 :             dump_printf_loc (MSG_NOTE, vect_location, "examining phi: %G", phi);
    1493                 :     205184 :           if (virtual_operand_p (gimple_phi_result (phi)))
    1494                 :      25813 :             continue;
    1495                 :            : 
    1496                 :            :           /* Inner-loop loop-closed exit phi in outer-loop vectorization
    1497                 :            :              (i.e., a phi in the tail of the outer-loop).  */
    1498                 :      76779 :           if (! is_loop_header_bb_p (bb))
    1499                 :            :             {
    1500                 :            :               /* FORNOW: we currently don't support the case that these phis
    1501                 :            :                  are not used in the outerloop (unless it is double reduction,
    1502                 :            :                  i.e., this phi is vect_reduction_def), cause this case
    1503                 :            :                  requires to actually do something here.  */
    1504                 :        254 :               if (STMT_VINFO_LIVE_P (stmt_info)
    1505                 :        295 :                   && !vect_active_double_reduction_p (stmt_info))
    1506                 :          6 :                 return opt_result::failure_at (phi,
    1507                 :            :                                                "Unsupported loop-closed phi"
    1508                 :          6 :                                                " in outer-loop.\n");
    1509                 :            : 
    1510                 :            :               /* If PHI is used in the outer loop, we check that its operand
    1511                 :            :                  is defined in the inner loop.  */
    1512                 :        248 :               if (STMT_VINFO_RELEVANT_P (stmt_info))
    1513                 :            :                 {
    1514                 :        244 :                   tree phi_op;
    1515                 :            : 
    1516                 :        244 :                   if (gimple_phi_num_args (phi) != 1)
    1517                 :          0 :                     return opt_result::failure_at (phi, "unsupported phi");
    1518                 :            : 
    1519                 :        244 :                   phi_op = PHI_ARG_DEF (phi, 0);
    1520                 :        244 :                   stmt_vec_info op_def_info = loop_vinfo->lookup_def (phi_op);
    1521                 :        244 :                   if (!op_def_info)
    1522                 :          0 :                     return opt_result::failure_at (phi, "unsupported phi\n");
    1523                 :            : 
    1524                 :        244 :                   if (STMT_VINFO_RELEVANT (op_def_info) != vect_used_in_outer
    1525                 :        244 :                       && (STMT_VINFO_RELEVANT (op_def_info)
    1526                 :            :                           != vect_used_in_outer_by_reduction))
    1527                 :          0 :                     return opt_result::failure_at (phi, "unsupported phi\n");
    1528                 :            : 
    1529                 :        244 :                   if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_internal_def
    1530                 :         41 :                        || (STMT_VINFO_DEF_TYPE (stmt_info)
    1531                 :            :                            == vect_double_reduction_def))
    1532                 :        285 :                       && !vectorizable_lc_phi (stmt_info, NULL, NULL))
    1533                 :          0 :                     return opt_result::failure_at (phi, "unsupported phi\n");
    1534                 :            :                 }
    1535                 :            : 
    1536                 :        248 :               continue;
    1537                 :            :             }
    1538                 :            : 
    1539                 :      76525 :           gcc_assert (stmt_info);
    1540                 :            : 
    1541                 :      76525 :           if ((STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope
    1542                 :      71437 :                || STMT_VINFO_LIVE_P (stmt_info))
    1543                 :       5112 :               && STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
    1544                 :            :             /* A scalar-dependence cycle that we don't support.  */
    1545                 :         10 :             return opt_result::failure_at (phi,
    1546                 :            :                                            "not vectorized:"
    1547                 :         10 :                                            " scalar dependence cycle.\n");
    1548                 :            : 
    1549                 :      76515 :           if (STMT_VINFO_RELEVANT_P (stmt_info))
    1550                 :            :             {
    1551                 :      24882 :               need_to_vectorize = true;
    1552                 :      24882 :               if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def
    1553                 :       6378 :                   && ! PURE_SLP_STMT (stmt_info))
    1554                 :       6169 :                 ok = vectorizable_induction (stmt_info, NULL, NULL, NULL,
    1555                 :            :                                              &cost_vec);
    1556                 :      18713 :               else if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def
    1557                 :            :                         || (STMT_VINFO_DEF_TYPE (stmt_info)
    1558                 :            :                             == vect_double_reduction_def)
    1559                 :      18713 :                         || STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle)
    1560                 :      18504 :                        && ! PURE_SLP_STMT (stmt_info))
    1561                 :      17430 :                 ok = vectorizable_reduction (stmt_info, NULL, NULL, &cost_vec);
    1562                 :            :             }
    1563                 :            : 
    1564                 :            :           /* SLP PHIs are tested by vect_slp_analyze_node_operations.  */
    1565                 :      23599 :           if (ok
    1566                 :      76003 :               && STMT_VINFO_LIVE_P (stmt_info)
    1567                 :         17 :               && !PURE_SLP_STMT (stmt_info))
    1568                 :         14 :             ok = vectorizable_live_operation (stmt_info, NULL, NULL, NULL,
    1569                 :            :                                               -1, false, &cost_vec);
    1570                 :            : 
    1571                 :      76515 :           if (!ok)
    1572                 :        512 :             return opt_result::failure_at (phi,
    1573                 :            :                                            "not vectorized: relevant phi not "
    1574                 :            :                                            "supported: %G",
    1575                 :        512 :                                            static_cast <gimple *> (phi));
    1576                 :            :         }
    1577                 :            : 
    1578                 :     584577 :       for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
    1579                 :     462547 :            gsi_next (&si))
    1580                 :            :         {
    1581                 :     468433 :           gimple *stmt = gsi_stmt (si);
    1582                 :     468433 :           if (!gimple_clobber_p (stmt))
    1583                 :            :             {
    1584                 :     468393 :               opt_result res
    1585                 :            :                 = vect_analyze_stmt (loop_vinfo->lookup_stmt (stmt),
    1586                 :            :                                      &need_to_vectorize,
    1587                 :     468393 :                                      NULL, NULL, &cost_vec);
    1588                 :     468393 :               if (!res)
    1589                 :       5886 :                 return res;
    1590                 :            :             }
    1591                 :            :         }
    1592                 :            :     } /* bbs */
    1593                 :            : 
    1594                 :      27092 :   add_stmt_costs (loop_vinfo->target_cost_data, &cost_vec);
    1595                 :            : 
    1596                 :            :   /* All operations in the loop are either irrelevant (deal with loop
    1597                 :            :      control, or dead), or only used outside the loop and can be moved
    1598                 :            :      out of the loop (e.g. invariants, inductions).  The loop can be
    1599                 :            :      optimized away by scalar optimizations.  We're better off not
    1600                 :            :      touching this loop.  */
    1601                 :      27092 :   if (!need_to_vectorize)
    1602                 :            :     {
    1603                 :         12 :       if (dump_enabled_p ())
    1604                 :          0 :         dump_printf_loc (MSG_NOTE, vect_location,
    1605                 :            :                          "All the computation can be taken out of the loop.\n");
    1606                 :         12 :       return opt_result::failure_at
    1607                 :            :         (vect_location,
    1608                 :         12 :          "not vectorized: redundant loop. no profit to vectorize.\n");
    1609                 :            :     }
    1610                 :            : 
    1611                 :      27080 :   return opt_result::success ();
    1612                 :            : }
    1613                 :            : 
    1614                 :            : /* Analyze the cost of the loop described by LOOP_VINFO.  Decide if it
    1615                 :            :    is worthwhile to vectorize.  Return 1 if definitely yes, 0 if
    1616                 :            :    definitely no, or -1 if it's worth retrying.  */
    1617                 :            : 
    1618                 :            : static int
    1619                 :      26992 : vect_analyze_loop_costing (loop_vec_info loop_vinfo)
    1620                 :            : {
    1621                 :      26992 :   class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
    1622                 :      26992 :   unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
    1623                 :            : 
    1624                 :            :   /* Only fully-masked loops can have iteration counts less than the
    1625                 :            :      vectorization factor.  */
    1626                 :      26992 :   if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
    1627                 :            :     {
    1628                 :      26992 :       HOST_WIDE_INT max_niter;
    1629                 :            : 
    1630                 :      26992 :       if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
    1631                 :      13233 :         max_niter = LOOP_VINFO_INT_NITERS (loop_vinfo);
    1632                 :            :       else
    1633                 :      13759 :         max_niter = max_stmt_executions_int (loop);
    1634                 :            : 
    1635                 :      26992 :       if (max_niter != -1
    1636                 :      25802 :           && (unsigned HOST_WIDE_INT) max_niter < assumed_vf)
    1637                 :            :         {
    1638                 :        767 :           if (dump_enabled_p ())
    1639                 :        164 :             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    1640                 :            :                              "not vectorized: iteration count smaller than "
    1641                 :            :                              "vectorization factor.\n");
    1642                 :        767 :           return 0;
    1643                 :            :         }
    1644                 :            :     }
    1645                 :            : 
    1646                 :      26225 :   int min_profitable_iters, min_profitable_estimate;
    1647                 :      26225 :   vect_estimate_min_profitable_iters (loop_vinfo, &min_profitable_iters,
    1648                 :            :                                       &min_profitable_estimate);
    1649                 :            : 
    1650                 :      26225 :   if (min_profitable_iters < 0)
    1651                 :            :     {
    1652                 :       3014 :       if (dump_enabled_p ())
    1653                 :          1 :         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    1654                 :            :                          "not vectorized: vectorization not profitable.\n");
    1655                 :       3014 :       if (dump_enabled_p ())
    1656                 :          1 :         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    1657                 :            :                          "not vectorized: vector version will never be "
    1658                 :            :                          "profitable.\n");
    1659                 :       3014 :       return -1;
    1660                 :            :     }
    1661                 :            : 
    1662                 :      23211 :   int min_scalar_loop_bound = (param_min_vect_loop_bound
    1663                 :      23211 :                                * assumed_vf);
    1664                 :            : 
    1665                 :            :   /* Use the cost model only if it is more conservative than user specified
    1666                 :            :      threshold.  */
    1667                 :      23211 :   unsigned int th = (unsigned) MAX (min_scalar_loop_bound,
    1668                 :            :                                     min_profitable_iters);
    1669                 :            : 
    1670                 :      23211 :   LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = th;
    1671                 :            : 
    1672                 :      12098 :   if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
    1673                 :      35309 :       && LOOP_VINFO_INT_NITERS (loop_vinfo) < th)
    1674                 :            :     {
    1675                 :         34 :       if (dump_enabled_p ())
    1676                 :          0 :         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    1677                 :            :                          "not vectorized: vectorization not profitable.\n");
    1678                 :         34 :       if (dump_enabled_p ())
    1679                 :          0 :         dump_printf_loc (MSG_NOTE, vect_location,
    1680                 :            :                          "not vectorized: iteration count smaller than user "
    1681                 :            :                          "specified loop bound parameter or minimum profitable "
    1682                 :            :                          "iterations (whichever is more conservative).\n");
    1683                 :         34 :       return 0;
    1684                 :            :     }
    1685                 :            : 
    1686                 :            :   /* The static profitablity threshold min_profitable_estimate includes
    1687                 :            :      the cost of having to check at runtime whether the scalar loop
    1688                 :            :      should be used instead.  If it turns out that we don't need or want
    1689                 :            :      such a check, the threshold we should use for the static estimate
    1690                 :            :      is simply the point at which the vector loop becomes more profitable
    1691                 :            :      than the scalar loop.  */
    1692                 :      23177 :   if (min_profitable_estimate > min_profitable_iters
    1693                 :       4127 :       && !LOOP_REQUIRES_VERSIONING (loop_vinfo)
    1694                 :       4047 :       && !LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo)
    1695                 :       4047 :       && !LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)
    1696                 :      27224 :       && !vect_apply_runtime_profitability_check_p (loop_vinfo))
    1697                 :            :     {
    1698                 :          0 :       if (dump_enabled_p ())
    1699                 :          0 :         dump_printf_loc (MSG_NOTE, vect_location, "no need for a runtime"
    1700                 :            :                          " choice between the scalar and vector loops\n");
    1701                 :          0 :       min_profitable_estimate = min_profitable_iters;
    1702                 :            :     }
    1703                 :            : 
    1704                 :      23177 :   HOST_WIDE_INT estimated_niter;
    1705                 :            : 
    1706                 :            :   /* If we are vectorizing an epilogue then we know the maximum number of
    1707                 :            :      scalar iterations it will cover is at least one lower than the
    1708                 :            :      vectorization factor of the main loop.  */
    1709                 :      23177 :   if (LOOP_VINFO_EPILOGUE_P (loop_vinfo))
    1710                 :       4518 :     estimated_niter
    1711                 :       4518 :       = vect_vf_for_cost (LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo)) - 1;
    1712                 :            :   else
    1713                 :            :     {
    1714                 :      18659 :       estimated_niter = estimated_stmt_executions_int (loop);
    1715                 :      18659 :       if (estimated_niter == -1)
    1716                 :       7505 :         estimated_niter = likely_max_stmt_executions_int (loop);
    1717                 :            :     }
    1718                 :      23177 :   if (estimated_niter != -1
    1719                 :      22550 :       && ((unsigned HOST_WIDE_INT) estimated_niter
    1720                 :      22550 :           < MAX (th, (unsigned) min_profitable_estimate)))
    1721                 :            :     {
    1722                 :       1557 :       if (dump_enabled_p ())
    1723                 :          9 :         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    1724                 :            :                          "not vectorized: estimated iteration count too "
    1725                 :            :                          "small.\n");
    1726                 :       1557 :       if (dump_enabled_p ())
    1727                 :          9 :         dump_printf_loc (MSG_NOTE, vect_location,
    1728                 :            :                          "not vectorized: estimated iteration count smaller "
    1729                 :            :                          "than specified loop bound parameter or minimum "
    1730                 :            :                          "profitable iterations (whichever is more "
    1731                 :            :                          "conservative).\n");
    1732                 :       1557 :       return -1;
    1733                 :            :     }
    1734                 :            : 
    1735                 :            :   return 1;
    1736                 :            : }
    1737                 :            : 
    1738                 :            : static opt_result
    1739                 :      35599 : vect_get_datarefs_in_loop (loop_p loop, basic_block *bbs,
    1740                 :            :                            vec<data_reference_p> *datarefs,
    1741                 :            :                            unsigned int *n_stmts)
    1742                 :            : {
    1743                 :      35599 :   *n_stmts = 0;
    1744                 :     102988 :   for (unsigned i = 0; i < loop->num_nodes; i++)
    1745                 :      71639 :     for (gimple_stmt_iterator gsi = gsi_start_bb (bbs[i]);
    1746                 :     638890 :          !gsi_end_p (gsi); gsi_next (&gsi))
    1747                 :            :       {
    1748                 :     571501 :         gimple *stmt = gsi_stmt (gsi);
    1749                 :     571501 :         if (is_gimple_debug (stmt))
    1750                 :      93797 :           continue;
    1751                 :     477859 :         ++(*n_stmts);
    1752                 :     477859 :         opt_result res = vect_find_stmt_data_reference (loop, stmt, datarefs);
    1753                 :     477859 :         if (!res)
    1754                 :            :           {
    1755                 :       4405 :             if (is_gimple_call (stmt) && loop->safelen)
    1756                 :            :               {
    1757                 :        300 :                 tree fndecl = gimple_call_fndecl (stmt), op;
    1758                 :        300 :                 if (fndecl != NULL_TREE)
    1759                 :            :                   {
    1760                 :        267 :                     cgraph_node *node = cgraph_node::get (fndecl);
    1761                 :        267 :                     if (node != NULL && node->simd_clones != NULL)
    1762                 :            :                       {
    1763                 :        156 :                         unsigned int j, n = gimple_call_num_args (stmt);
    1764                 :        487 :                         for (j = 0; j < n; j++)
    1765                 :            :                           {
    1766                 :        332 :                             op = gimple_call_arg (stmt, j);
    1767                 :        332 :                             if (DECL_P (op)
    1768                 :        332 :                                 || (REFERENCE_CLASS_P (op)
    1769                 :          0 :                                     && get_base_address (op)))
    1770                 :            :                               break;
    1771                 :            :                           }
    1772                 :        156 :                         op = gimple_call_lhs (stmt);
    1773                 :            :                         /* Ignore #pragma omp declare simd functions
    1774                 :            :                            if they don't have data references in the
    1775                 :            :                            call stmt itself.  */
    1776                 :        311 :                         if (j == n
    1777                 :        156 :                             && !(op
    1778                 :        145 :                                  && (DECL_P (op)
    1779                 :        145 :                                      || (REFERENCE_CLASS_P (op)
    1780                 :          0 :                                          && get_base_address (op)))))
    1781                 :        155 :                           continue;
    1782                 :            :                       }
    1783                 :            :                   }
    1784                 :            :               }
    1785                 :       4250 :             return res;
    1786                 :            :           }
    1787                 :            :         /* If dependence analysis will give up due to the limit on the
    1788                 :            :            number of datarefs stop here and fail fatally.  */
    1789                 :     473454 :         if (datarefs->length ()
    1790                 :     473454 :             > (unsigned)param_loop_max_datarefs_for_datadeps)
    1791                 :          0 :           return opt_result::failure_at (stmt, "exceeded param "
    1792                 :          0 :                                          "loop-max-datarefs-for-datadeps\n");
    1793                 :            :       }
    1794                 :      31349 :   return opt_result::success ();
    1795                 :            : }
    1796                 :            : 
    1797                 :            : /* Look for SLP-only access groups and turn each individual access into its own
    1798                 :            :    group.  */
    1799                 :            : static void
    1800                 :      33506 : vect_dissolve_slp_only_groups (loop_vec_info loop_vinfo)
    1801                 :            : {
    1802                 :      33506 :   unsigned int i;
    1803                 :      33506 :   struct data_reference *dr;
    1804                 :            : 
    1805                 :      33506 :   DUMP_VECT_SCOPE ("vect_dissolve_slp_only_groups");
    1806                 :            : 
    1807                 :      33506 :   vec<data_reference_p> datarefs = loop_vinfo->shared->datarefs;
    1808                 :     176280 :   FOR_EACH_VEC_ELT (datarefs, i, dr)
    1809                 :            :     {
    1810                 :     110757 :       gcc_assert (DR_REF (dr));
    1811                 :     110757 :       stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (DR_STMT (dr));
    1812                 :            : 
    1813                 :            :       /* Check if the load is a part of an interleaving chain.  */
    1814                 :     110757 :       if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
    1815                 :            :         {
    1816                 :      38456 :           stmt_vec_info first_element = DR_GROUP_FIRST_ELEMENT (stmt_info);
    1817                 :      38456 :           unsigned int group_size = DR_GROUP_SIZE (first_element);
    1818                 :            : 
    1819                 :            :           /* Check if SLP-only groups.  */
    1820                 :      38456 :           if (!STMT_SLP_TYPE (stmt_info)
    1821                 :      15335 :               && STMT_VINFO_SLP_VECT_ONLY (first_element))
    1822                 :            :             {
    1823                 :            :               /* Dissolve the group.  */
    1824                 :          0 :               STMT_VINFO_SLP_VECT_ONLY (first_element) = false;
    1825                 :            : 
    1826                 :          0 :               stmt_vec_info vinfo = first_element;
    1827                 :          0 :               while (vinfo)
    1828                 :            :                 {
    1829                 :          0 :                   stmt_vec_info next = DR_GROUP_NEXT_ELEMENT (vinfo);
    1830                 :          0 :                   DR_GROUP_FIRST_ELEMENT (vinfo) = vinfo;
    1831                 :          0 :                   DR_GROUP_NEXT_ELEMENT (vinfo) = NULL;
    1832                 :          0 :                   DR_GROUP_SIZE (vinfo) = 1;
    1833                 :          0 :                   if (STMT_VINFO_STRIDED_P (first_element))
    1834                 :          0 :                     DR_GROUP_GAP (vinfo) = 0;
    1835                 :            :                   else
    1836                 :          0 :                     DR_GROUP_GAP (vinfo) = group_size - 1;
    1837                 :            :                   vinfo = next;
    1838                 :            :                 }
    1839                 :            :             }
    1840                 :            :         }
    1841                 :            :     }
    1842                 :      33506 : }
    1843                 :            : 
    1844                 :            : 
    1845                 :            : /* Decides whether we need to create an epilogue loop to handle
    1846                 :            :    remaining scalar iterations and sets PEELING_FOR_NITERS accordingly.  */
    1847                 :            : 
    1848                 :            : void
    1849                 :      24888 : determine_peel_for_niter (loop_vec_info loop_vinfo)
    1850                 :            : {
    1851                 :      24888 :   LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = false;
    1852                 :            : 
    1853                 :      24888 :   unsigned HOST_WIDE_INT const_vf;
    1854                 :      24888 :   HOST_WIDE_INT max_niter
    1855                 :      24888 :     = likely_max_stmt_executions_int (LOOP_VINFO_LOOP (loop_vinfo));
    1856                 :            : 
    1857                 :      24888 :   unsigned th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo);
    1858                 :      24888 :   if (!th && LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo))
    1859                 :       2371 :     th = LOOP_VINFO_COST_MODEL_THRESHOLD (LOOP_VINFO_ORIG_LOOP_INFO
    1860                 :            :                                           (loop_vinfo));
    1861                 :            : 
    1862                 :      24888 :   if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
    1863                 :            :     /* The main loop handles all iterations.  */
    1864                 :          0 :     LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = false;
    1865                 :      24888 :   else if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
    1866                 :      12515 :            && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) >= 0)
    1867                 :            :     {
    1868                 :            :       /* Work out the (constant) number of iterations that need to be
    1869                 :            :          peeled for reasons other than niters.  */
    1870                 :      12510 :       unsigned int peel_niter = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
    1871                 :      12510 :       if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
    1872                 :        174 :         peel_niter += 1;
    1873                 :      12510 :       if (!multiple_p (LOOP_VINFO_INT_NITERS (loop_vinfo) - peel_niter,
    1874                 :            :                        LOOP_VINFO_VECT_FACTOR (loop_vinfo)))
    1875                 :       2818 :         LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true;
    1876                 :            :     }
    1877                 :      12378 :   else if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)
    1878                 :            :            /* ??? When peeling for gaps but not alignment, we could
    1879                 :            :               try to check whether the (variable) niters is known to be
    1880                 :            :               VF * N + 1.  That's something of a niche case though.  */
    1881                 :      12326 :            || LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
    1882                 :      12039 :            || !LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&const_vf)
    1883                 :      24417 :            || ((tree_ctz (LOOP_VINFO_NITERS (loop_vinfo))
    1884                 :      24078 :                 < (unsigned) exact_log2 (const_vf))
    1885                 :            :                /* In case of versioning, check if the maximum number of
    1886                 :            :                   iterations is greater than th.  If they are identical,
    1887                 :            :                   the epilogue is unnecessary.  */
    1888                 :      11870 :                && (!LOOP_REQUIRES_VERSIONING (loop_vinfo)
    1889                 :       1632 :                    || ((unsigned HOST_WIDE_INT) max_niter
    1890                 :       1632 :                        > (th / const_vf) * const_vf))))
    1891                 :      12209 :     LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true;
    1892                 :      24888 : }
    1893                 :            : 
    1894                 :            : 
    1895                 :            : /* Function vect_analyze_loop_2.
    1896                 :            : 
    1897                 :            :    Apply a set of analyses on LOOP, and create a loop_vec_info struct
    1898                 :            :    for it.  The different analyses will record information in the
    1899                 :            :    loop_vec_info struct.  */
    1900                 :            : static opt_result
    1901                 :      53969 : vect_analyze_loop_2 (loop_vec_info loop_vinfo, bool &fatal, unsigned *n_stmts)
    1902                 :            : {
    1903                 :      53969 :   opt_result ok = opt_result::success ();
    1904                 :      53969 :   int res;
    1905                 :      53969 :   unsigned int max_vf = MAX_VECTORIZATION_FACTOR;
    1906                 :      53969 :   poly_uint64 min_vf = 2;
    1907                 :      53969 :   loop_vec_info orig_loop_vinfo = NULL;
    1908                 :            : 
    1909                 :            :   /* If we are dealing with an epilogue then orig_loop_vinfo points to the
    1910                 :            :      loop_vec_info of the first vectorized loop.  */
    1911                 :      53969 :   if (LOOP_VINFO_EPILOGUE_P (loop_vinfo))
    1912                 :       8221 :     orig_loop_vinfo = LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo);
    1913                 :            :   else
    1914                 :            :     orig_loop_vinfo = loop_vinfo;
    1915                 :      53969 :   gcc_assert (orig_loop_vinfo);
    1916                 :            : 
    1917                 :            :   /* The first group of checks is independent of the vector size.  */
    1918                 :      53969 :   fatal = true;
    1919                 :            : 
    1920                 :      53969 :   if (LOOP_VINFO_SIMD_IF_COND (loop_vinfo)
    1921                 :      53969 :       && integer_zerop (LOOP_VINFO_SIMD_IF_COND (loop_vinfo)))
    1922                 :          5 :     return opt_result::failure_at (vect_location,
    1923                 :          5 :                                    "not vectorized: simd if(0)\n");
    1924                 :            : 
    1925                 :            :   /* Find all data references in the loop (which correspond to vdefs/vuses)
    1926                 :            :      and analyze their evolution in the loop.  */
    1927                 :            : 
    1928                 :      53964 :   loop_p loop = LOOP_VINFO_LOOP (loop_vinfo);
    1929                 :            : 
    1930                 :            :   /* Gather the data references and count stmts in the loop.  */
    1931                 :      53964 :   if (!LOOP_VINFO_DATAREFS (loop_vinfo).exists ())
    1932                 :            :     {
    1933                 :      35599 :       opt_result res
    1934                 :            :         = vect_get_datarefs_in_loop (loop, LOOP_VINFO_BBS (loop_vinfo),
    1935                 :            :                                      &LOOP_VINFO_DATAREFS (loop_vinfo),
    1936                 :      35599 :                                      n_stmts);
    1937                 :      35599 :       if (!res)
    1938                 :            :         {
    1939                 :       4250 :           if (dump_enabled_p ())
    1940                 :       1002 :             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    1941                 :            :                              "not vectorized: loop contains function "
    1942                 :            :                              "calls or data references that cannot "
    1943                 :            :                              "be analyzed\n");
    1944                 :       4250 :           return res;
    1945                 :            :         }
    1946                 :      31349 :       loop_vinfo->shared->save_datarefs ();
    1947                 :            :     }
    1948                 :            :   else
    1949                 :      18365 :     loop_vinfo->shared->check_datarefs ();
    1950                 :            : 
    1951                 :            :   /* Analyze the data references and also adjust the minimal
    1952                 :            :      vectorization factor according to the loads and stores.  */
    1953                 :            : 
    1954                 :      49714 :   ok = vect_analyze_data_refs (loop_vinfo, &min_vf, &fatal);
    1955                 :      49714 :   if (!ok)
    1956                 :            :     {
    1957                 :      11125 :       if (dump_enabled_p ())
    1958                 :        804 :         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    1959                 :            :                          "bad data references.\n");
    1960                 :      11125 :       return ok;
    1961                 :            :     }
    1962                 :            : 
    1963                 :            :   /* Classify all cross-iteration scalar data-flow cycles.
    1964                 :            :      Cross-iteration cycles caused by virtual phis are analyzed separately.  */
    1965                 :      38589 :   vect_analyze_scalar_cycles (loop_vinfo);
    1966                 :            : 
    1967                 :      38589 :   vect_pattern_recog (loop_vinfo);
    1968                 :            : 
    1969                 :      38589 :   vect_fixup_scalar_cycles_with_patterns (loop_vinfo);
    1970                 :            : 
    1971                 :            :   /* Analyze the access patterns of the data-refs in the loop (consecutive,
    1972                 :            :      complex, etc.). FORNOW: Only handle consecutive access pattern.  */
    1973                 :            : 
    1974                 :      38589 :   ok = vect_analyze_data_ref_accesses (loop_vinfo);
    1975                 :      38589 :   if (!ok)
    1976                 :            :     {
    1977                 :        980 :       if (dump_enabled_p ())
    1978                 :        228 :         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    1979                 :            :                          "bad data access.\n");
    1980                 :        980 :       return ok;
    1981                 :            :     }
    1982                 :            : 
    1983                 :            :   /* Data-flow analysis to detect stmts that do not need to be vectorized.  */
    1984                 :            : 
    1985                 :      37609 :   ok = vect_mark_stmts_to_be_vectorized (loop_vinfo, &fatal);
    1986                 :      37609 :   if (!ok)
    1987                 :            :     {
    1988                 :       1197 :       if (dump_enabled_p ())
    1989                 :        144 :         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    1990                 :            :                          "unexpected pattern.\n");
    1991                 :       1197 :       return ok;
    1992                 :            :     }
    1993                 :            : 
    1994                 :            :   /* While the rest of the analysis below depends on it in some way.  */
    1995                 :      36412 :   fatal = false;
    1996                 :            : 
    1997                 :            :   /* Analyze data dependences between the data-refs in the loop
    1998                 :            :      and adjust the maximum vectorization factor according to
    1999                 :            :      the dependences.
    2000                 :            :      FORNOW: fail at the first data dependence that we encounter.  */
    2001                 :            : 
    2002                 :      36412 :   ok = vect_analyze_data_ref_dependences (loop_vinfo, &max_vf);
    2003                 :      36412 :   if (!ok)
    2004                 :            :     {
    2005                 :       1250 :       if (dump_enabled_p ())
    2006                 :        114 :         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    2007                 :            :                          "bad data dependence.\n");
    2008                 :       1250 :       return ok;
    2009                 :            :     }
    2010                 :      35162 :   if (max_vf != MAX_VECTORIZATION_FACTOR
    2011                 :      35162 :       && maybe_lt (max_vf, min_vf))
    2012                 :         20 :     return opt_result::failure_at (vect_location, "bad data dependence.\n");
    2013                 :      35142 :   LOOP_VINFO_MAX_VECT_FACTOR (loop_vinfo) = max_vf;
    2014                 :            : 
    2015                 :      35142 :   ok = vect_determine_vectorization_factor (loop_vinfo);
    2016                 :      35142 :   if (!ok)
    2017                 :            :     {
    2018                 :       1526 :       if (dump_enabled_p ())
    2019                 :         89 :         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    2020                 :            :                          "can't determine vectorization factor.\n");
    2021                 :       1526 :       return ok;
    2022                 :            :     }
    2023                 :      33616 :   if (max_vf != MAX_VECTORIZATION_FACTOR
    2024                 :      33616 :       && maybe_lt (max_vf, LOOP_VINFO_VECT_FACTOR (loop_vinfo)))
    2025                 :          0 :     return opt_result::failure_at (vect_location, "bad data dependence.\n");
    2026                 :            : 
    2027                 :            :   /* Compute the scalar iteration cost.  */
    2028                 :      33616 :   vect_compute_single_scalar_iteration_cost (loop_vinfo);
    2029                 :            : 
    2030                 :      33616 :   poly_uint64 saved_vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
    2031                 :            : 
    2032                 :            :   /* Check the SLP opportunities in the loop, analyze and build SLP trees.  */
    2033                 :      33616 :   ok = vect_analyze_slp (loop_vinfo, *n_stmts);
    2034                 :      33616 :   if (!ok)
    2035                 :          0 :     return ok;
    2036                 :            : 
    2037                 :            :   /* If there are any SLP instances mark them as pure_slp.  */
    2038                 :      33616 :   bool slp = vect_make_slp_decision (loop_vinfo);
    2039                 :      33616 :   if (slp)
    2040                 :            :     {
    2041                 :            :       /* Find stmts that need to be both vectorized and SLPed.  */
    2042                 :       2698 :       vect_detect_hybrid_slp (loop_vinfo);
    2043                 :            : 
    2044                 :            :       /* Update the vectorization factor based on the SLP decision.  */
    2045                 :       2698 :       vect_update_vf_for_slp (loop_vinfo);
    2046                 :            :     }
    2047                 :            : 
    2048                 :      33616 :   bool saved_can_fully_mask_p = LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo);
    2049                 :            : 
    2050                 :            :   /* We don't expect to have to roll back to anything other than an empty
    2051                 :            :      set of rgroups.  */
    2052                 :      33616 :   gcc_assert (LOOP_VINFO_MASKS (loop_vinfo).is_empty ());
    2053                 :            : 
    2054                 :            :   /* This is the point where we can re-start analysis with SLP forced off.  */
    2055                 :      33924 : start_over:
    2056                 :            : 
    2057                 :            :   /* Now the vectorization factor is final.  */
    2058                 :      33924 :   poly_uint64 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
    2059                 :      33924 :   gcc_assert (known_ne (vectorization_factor, 0U));
    2060                 :            : 
    2061                 :      33924 :   if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && dump_enabled_p ())
    2062                 :            :     {
    2063                 :       7485 :       dump_printf_loc (MSG_NOTE, vect_location,
    2064                 :            :                        "vectorization_factor = ");
    2065                 :       7485 :       dump_dec (MSG_NOTE, vectorization_factor);
    2066                 :       7485 :       dump_printf (MSG_NOTE, ", niters = %wd\n",
    2067                 :       7485 :                    LOOP_VINFO_INT_NITERS (loop_vinfo));
    2068                 :            :     }
    2069                 :            : 
    2070                 :            :   /* Analyze the alignment of the data-refs in the loop.
    2071                 :            :      Fail if a data reference is found that cannot be vectorized.  */
    2072                 :            : 
    2073                 :      33924 :   ok = vect_analyze_data_refs_alignment (loop_vinfo);
    2074                 :      33924 :   if (!ok)
    2075                 :            :     {
    2076                 :          0 :       if (dump_enabled_p ())
    2077                 :          0 :         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    2078                 :            :                          "bad data alignment.\n");
    2079                 :          0 :       return ok;
    2080                 :            :     }
    2081                 :            : 
    2082                 :            :   /* Prune the list of ddrs to be tested at run-time by versioning for alias.
    2083                 :            :      It is important to call pruning after vect_analyze_data_ref_accesses,
    2084                 :            :      since we use grouping information gathered by interleaving analysis.  */
    2085                 :      33924 :   ok = vect_prune_runtime_alias_test_list (loop_vinfo);
    2086                 :      33924 :   if (!ok)
    2087                 :        230 :     return ok;
    2088                 :            : 
    2089                 :            :   /* Do not invoke vect_enhance_data_refs_alignment for epilogue
    2090                 :            :      vectorization, since we do not want to add extra peeling or
    2091                 :            :      add versioning for alignment.  */
    2092                 :      33694 :   if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo))
    2093                 :            :     /* This pass will decide on using loop versioning and/or loop peeling in
    2094                 :            :        order to enhance the alignment of data references in the loop.  */
    2095                 :      27330 :     ok = vect_enhance_data_refs_alignment (loop_vinfo);
    2096                 :            :   else
    2097                 :       6364 :     ok = vect_verify_datarefs_alignment (loop_vinfo);
    2098                 :      33694 :   if (!ok)
    2099                 :          0 :     return ok;
    2100                 :            : 
    2101                 :      33694 :   if (slp)
    2102                 :            :     {
    2103                 :            :       /* Analyze operations in the SLP instances.  Note this may
    2104                 :            :          remove unsupported SLP instances which makes the above
    2105                 :            :          SLP kind detection invalid.  */
    2106                 :       2662 :       unsigned old_size = LOOP_VINFO_SLP_INSTANCES (loop_vinfo).length ();
    2107                 :       2662 :       vect_slp_analyze_operations (loop_vinfo);
    2108                 :       5324 :       if (LOOP_VINFO_SLP_INSTANCES (loop_vinfo).length () != old_size)
    2109                 :            :         {
    2110                 :        188 :           ok = opt_result::failure_at (vect_location,
    2111                 :        188 :                                        "unsupported SLP instances\n");
    2112                 :        188 :           goto again;
    2113                 :            :         }
    2114                 :            :     }
    2115                 :            : 
    2116                 :            :   /* Dissolve SLP-only groups.  */
    2117                 :      33506 :   vect_dissolve_slp_only_groups (loop_vinfo);
    2118                 :            : 
    2119                 :            :   /* Scan all the remaining operations in the loop that are not subject
    2120                 :            :      to SLP and make sure they are vectorizable.  */
    2121                 :      33506 :   ok = vect_analyze_loop_operations (loop_vinfo);
    2122                 :      33506 :   if (!ok)
    2123                 :            :     {
    2124                 :       6426 :       if (dump_enabled_p ())
    2125                 :       2088 :         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    2126                 :            :                          "bad operation or unsupported loop bound.\n");
    2127                 :       6426 :       return ok;
    2128                 :            :     }
    2129                 :            : 
    2130                 :            :   /* Decide whether to use a fully-masked loop for this vectorization
    2131                 :            :      factor.  */
    2132                 :      27080 :   LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
    2133                 :      54160 :     = (LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo)
    2134                 :      27080 :        && vect_verify_full_masking (loop_vinfo));
    2135                 :      27080 :   if (dump_enabled_p ())
    2136                 :            :     {
    2137                 :       7295 :       if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
    2138                 :          0 :         dump_printf_loc (MSG_NOTE, vect_location,
    2139                 :            :                          "using a fully-masked loop.\n");
    2140                 :            :       else
    2141                 :       7295 :         dump_printf_loc (MSG_NOTE, vect_location,
    2142                 :            :                          "not using a fully-masked loop.\n");
    2143                 :            :     }
    2144                 :            : 
    2145                 :            :   /* If epilog loop is required because of data accesses with gaps,
    2146                 :            :      one additional iteration needs to be peeled.  Check if there is
    2147                 :            :      enough iterations for vectorization.  */
    2148                 :      27080 :   if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
    2149                 :        513 :       && LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
    2150                 :        188 :       && !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
    2151                 :            :     {
    2152                 :        188 :       poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
    2153                 :        188 :       tree scalar_niters = LOOP_VINFO_NITERSM1 (loop_vinfo);
    2154                 :            : 
    2155                 :        188 :       if (known_lt (wi::to_widest (scalar_niters), vf))
    2156                 :         15 :         return opt_result::failure_at (vect_location,
    2157                 :            :                                        "loop has no enough iterations to"
    2158                 :         15 :                                        " support peeling for gaps.\n");
    2159                 :            :     }
    2160                 :            : 
    2161                 :            :   /* If we're vectorizing an epilogue loop, we either need a fully-masked
    2162                 :            :      loop or a loop that has a lower VF than the main loop.  */
    2163                 :      27065 :   if (LOOP_VINFO_EPILOGUE_P (loop_vinfo)
    2164                 :       4735 :       && !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
    2165                 :      31800 :       && maybe_ge (LOOP_VINFO_VECT_FACTOR (loop_vinfo),
    2166                 :            :                    LOOP_VINFO_VECT_FACTOR (orig_loop_vinfo)))
    2167                 :         73 :     return opt_result::failure_at (vect_location,
    2168                 :            :                                    "Vectorization factor too high for"
    2169                 :         73 :                                    " epilogue loop.\n");
    2170                 :            : 
    2171                 :            :   /* Check the costings of the loop make vectorizing worthwhile.  */
    2172                 :      26992 :   res = vect_analyze_loop_costing (loop_vinfo);
    2173                 :      26992 :   if (res < 0)
    2174                 :            :     {
    2175                 :       4571 :       ok = opt_result::failure_at (vect_location,
    2176                 :       4571 :                                    "Loop costings may not be worthwhile.\n");
    2177                 :       4571 :       goto again;
    2178                 :            :     }
    2179                 :      22421 :   if (!res)
    2180                 :        801 :     return opt_result::failure_at (vect_location,
    2181                 :        801 :                                    "Loop costings not worthwhile.\n");
    2182                 :            : 
    2183                 :      21620 :   determine_peel_for_niter (loop_vinfo);
    2184                 :            :   /* If an epilogue loop is required make sure we can create one.  */
    2185                 :      21620 :   if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
    2186                 :      21209 :       || LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo))
    2187                 :            :     {
    2188                 :      12190 :       if (dump_enabled_p ())
    2189                 :       2450 :         dump_printf_loc (MSG_NOTE, vect_location, "epilog loop required\n");
    2190                 :      12190 :       if (!vect_can_advance_ivs_p (loop_vinfo)
    2191                 :      24380 :           || !slpeel_can_duplicate_loop_p (LOOP_VINFO_LOOP (loop_vinfo),
    2192                 :      12190 :                                            single_exit (LOOP_VINFO_LOOP
    2193                 :            :                                                          (loop_vinfo))))
    2194                 :            :         {
    2195                 :          0 :           ok = opt_result::failure_at (vect_location,
    2196                 :            :                                        "not vectorized: can't create required "
    2197                 :          0 :                                        "epilog loop\n");
    2198                 :          0 :           goto again;
    2199                 :            :         }
    2200                 :            :     }
    2201                 :            : 
    2202                 :            :   /* During peeling, we need to check if number of loop iterations is
    2203                 :            :      enough for both peeled prolog loop and vector loop.  This check
    2204                 :            :      can be merged along with threshold check of loop versioning, so
    2205                 :            :      increase threshold for this case if necessary.
    2206                 :            : 
    2207                 :            :      If we are analyzing an epilogue we still want to check what its
    2208                 :            :      versioning threshold would be.  If we decide to vectorize the epilogues we
    2209                 :            :      will want to use the lowest versioning threshold of all epilogues and main
    2210                 :            :      loop.  This will enable us to enter a vectorized epilogue even when
    2211                 :            :      versioning the loop.  We can't simply check whether the epilogue requires
    2212                 :            :      versioning though since we may have skipped some versioning checks when
    2213                 :            :      analyzing the epilogue.  For instance, checks for alias versioning will be
    2214                 :            :      skipped when dealing with epilogues as we assume we already checked them
    2215                 :            :      for the main loop.  So instead we always check the 'orig_loop_vinfo'.  */
    2216                 :      21620 :   if (LOOP_REQUIRES_VERSIONING (orig_loop_vinfo))
    2217                 :            :     {
    2218                 :       3511 :       poly_uint64 niters_th = 0;
    2219                 :       3511 :       unsigned int th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo);
    2220                 :            : 
    2221                 :       3511 :       if (!vect_use_loop_mask_for_alignment_p (loop_vinfo))
    2222                 :            :         {
    2223                 :            :           /* Niters for peeled prolog loop.  */
    2224                 :       3511 :           if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
    2225                 :            :             {
    2226                 :          5 :               dr_vec_info *dr_info = LOOP_VINFO_UNALIGNED_DR (loop_vinfo);
    2227                 :          5 :               tree vectype = STMT_VINFO_VECTYPE (dr_info->stmt);
    2228                 :          5 :               niters_th += TYPE_VECTOR_SUBPARTS (vectype) - 1;
    2229                 :            :             }
    2230                 :            :           else
    2231                 :       3506 :             niters_th += LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
    2232                 :            :         }
    2233                 :            : 
    2234                 :            :       /* Niters for at least one iteration of vectorized loop.  */
    2235                 :       3511 :       if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
    2236                 :       3511 :         niters_th += LOOP_VINFO_VECT_FACTOR (loop_vinfo);
    2237                 :            :       /* One additional iteration because of peeling for gap.  */
    2238                 :       3511 :       if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
    2239                 :         89 :         niters_th += 1;
    2240                 :            : 
    2241                 :            :       /*  Use the same condition as vect_transform_loop to decide when to use
    2242                 :            :           the cost to determine a versioning threshold.  */
    2243                 :       3511 :       if (vect_apply_runtime_profitability_check_p (loop_vinfo)
    2244                 :       3511 :           && ordered_p (th, niters_th))
    2245                 :       2594 :         niters_th = ordered_max (poly_uint64 (th), niters_th);
    2246                 :            : 
    2247                 :       3511 :       LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo) = niters_th;
    2248                 :            :     }
    2249                 :            : 
    2250                 :      21620 :   gcc_assert (known_eq (vectorization_factor,
    2251                 :            :                         LOOP_VINFO_VECT_FACTOR (loop_vinfo)));
    2252                 :            : 
    2253                 :            :   /* Ok to vectorize!  */
    2254                 :      21620 :   return opt_result::success ();
    2255                 :            : 
    2256                 :       4759 : again:
    2257                 :            :   /* Ensure that "ok" is false (with an opt_problem if dumping is enabled).  */
    2258                 :       4759 :   gcc_assert (!ok);
    2259                 :            : 
    2260                 :            :   /* Try again with SLP forced off but if we didn't do any SLP there is
    2261                 :            :      no point in re-trying.  */
    2262                 :       4759 :   if (!slp)
    2263                 :       4184 :     return ok;
    2264                 :            : 
    2265                 :            :   /* If there are reduction chains re-trying will fail anyway.  */
    2266                 :        575 :   if (! LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).is_empty ())
    2267                 :         23 :     return ok;
    2268                 :            : 
    2269                 :            :   /* Likewise if the grouped loads or stores in the SLP cannot be handled
    2270                 :            :      via interleaving or lane instructions.  */
    2271                 :            :   slp_instance instance;
    2272                 :            :   slp_tree node;
    2273                 :            :   unsigned i, j;
    2274                 :        704 :   FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), i, instance)
    2275                 :            :     {
    2276                 :        396 :       stmt_vec_info vinfo;
    2277                 :        396 :       vinfo = SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (instance))[0];
    2278                 :        396 :       if (! STMT_VINFO_GROUPED_ACCESS (vinfo))
    2279                 :         83 :         continue;
    2280                 :        313 :       vinfo = DR_GROUP_FIRST_ELEMENT (vinfo);
    2281                 :        313 :       unsigned int size = DR_GROUP_SIZE (vinfo);
    2282                 :        313 :       tree vectype = STMT_VINFO_VECTYPE (vinfo);
    2283                 :        313 :       if (! vect_store_lanes_supported (vectype, size, false)
    2284                 :        382 :          && ! known_eq (TYPE_VECTOR_SUBPARTS (vectype), 1U)
    2285                 :        626 :          && ! vect_grouped_store_supported (vectype, size))
    2286                 :        244 :         return opt_result::failure_at (vinfo->stmt,
    2287                 :        244 :                                        "unsupported grouped store\n");
    2288                 :        238 :       FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), j, node)
    2289                 :            :         {
    2290                 :         55 :           vinfo = SLP_TREE_SCALAR_STMTS (node)[0];
    2291                 :         55 :           vinfo = DR_GROUP_FIRST_ELEMENT (vinfo);
    2292                 :         55 :           bool single_element_p = !DR_GROUP_NEXT_ELEMENT (vinfo);
    2293                 :         55 :           size = DR_GROUP_SIZE (vinfo);
    2294                 :         55 :           vectype = STMT_VINFO_VECTYPE (vinfo);
    2295                 :         55 :           if (! vect_load_lanes_supported (vectype, size, false)
    2296                 :         55 :               && ! vect_grouped_load_supported (vectype, single_element_p,
    2297                 :            :                                                 size))
    2298                 :          0 :             return opt_result::failure_at (vinfo->stmt,
    2299                 :          0 :                                            "unsupported grouped load\n");
    2300                 :            :         }
    2301                 :            :     }
    2302                 :            : 
    2303                 :        308 :   if (dump_enabled_p ())
    2304                 :         55 :     dump_printf_loc (MSG_NOTE, vect_location,
    2305                 :            :                      "re-trying with SLP disabled\n");
    2306                 :            : 
    2307                 :            :   /* Roll back state appropriately.  No SLP this time.  */
    2308                 :        308 :   slp = false;
    2309                 :            :   /* Restore vectorization factor as it were without SLP.  */
    2310                 :        308 :   LOOP_VINFO_VECT_FACTOR (loop_vinfo) = saved_vectorization_factor;
    2311                 :            :   /* Free the SLP instances.  */
    2312                 :        458 :   FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), j, instance)
    2313                 :        150 :     vect_free_slp_instance (instance, false);
    2314                 :        308 :   LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release ();
    2315                 :            :   /* Reset SLP type to loop_vect on all stmts.  */
    2316                 :        924 :   for (i = 0; i < LOOP_VINFO_LOOP (loop_vinfo)->num_nodes; ++i)
    2317                 :            :     {
    2318                 :        616 :       basic_block bb = LOOP_VINFO_BBS (loop_vinfo)[i];
    2319                 :        616 :       for (gimple_stmt_iterator si = gsi_start_phis (bb);
    2320                 :       1708 :            !gsi_end_p (si); gsi_next (&si))
    2321                 :            :         {
    2322                 :       1092 :           stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
    2323                 :       1092 :           STMT_SLP_TYPE (stmt_info) = loop_vect;
    2324                 :       1092 :           if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def
    2325                 :       1092 :               || STMT_VINFO_DEF_TYPE (stmt_info) == vect_double_reduction_def)
    2326                 :            :             {
    2327                 :            :               /* vectorizable_reduction adjusts reduction stmt def-types,
    2328                 :            :                  restore them to that of the PHI.  */
    2329                 :        410 :               STMT_VINFO_DEF_TYPE (STMT_VINFO_REDUC_DEF (stmt_info))
    2330                 :        410 :                 = STMT_VINFO_DEF_TYPE (stmt_info);
    2331                 :        410 :               STMT_VINFO_DEF_TYPE (vect_stmt_to_vectorize
    2332                 :            :                                         (STMT_VINFO_REDUC_DEF (stmt_info)))
    2333                 :        410 :                 = STMT_VINFO_DEF_TYPE (stmt_info);
    2334                 :            :             }
    2335                 :            :         }
    2336                 :        616 :       for (gimple_stmt_iterator si = gsi_start_bb (bb);
    2337                 :       8182 :            !gsi_end_p (si); gsi_next (&si))
    2338                 :            :         {
    2339                 :       7566 :           stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (gsi_stmt (si));
    2340                 :       7566 :           STMT_SLP_TYPE (stmt_info) = loop_vect;
    2341                 :       7566 :           if (STMT_VINFO_IN_PATTERN_P (stmt_info))
    2342                 :            :             {
    2343                 :        530 :               gimple *pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
    2344                 :        530 :               stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
    2345                 :        530 :               STMT_SLP_TYPE (stmt_info) = loop_vect;
    2346                 :        530 :               for (gimple_stmt_iterator pi = gsi_start (pattern_def_seq);
    2347                 :       1218 :                    !gsi_end_p (pi); gsi_next (&pi))
    2348                 :        688 :                 STMT_SLP_TYPE (loop_vinfo->lookup_stmt (gsi_stmt (pi)))
    2349                 :        688 :                   = loop_vect;
    2350                 :            :             }
    2351                 :            :         }
    2352                 :            :     }
    2353                 :            :   /* Free optimized alias test DDRS.  */
    2354                 :        308 :   LOOP_VINFO_LOWER_BOUNDS (loop_vinfo).truncate (0);
    2355                 :        308 :   LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).release ();
    2356                 :        308 :   LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).release ();
    2357                 :            :   /* Reset target cost data.  */
    2358                 :        308 :   destroy_cost_data (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo));
    2359                 :        308 :   LOOP_VINFO_TARGET_COST_DATA (loop_vinfo)
    2360                 :        308 :     = init_cost (LOOP_VINFO_LOOP (loop_vinfo));
    2361                 :            :   /* Reset accumulated rgroup information.  */
    2362                 :        308 :   release_vec_loop_masks (&LOOP_VINFO_MASKS (loop_vinfo));
    2363                 :            :   /* Reset assorted flags.  */
    2364                 :        308 :   LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = false;
    2365                 :        308 :   LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = false;
    2366                 :        308 :   LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = 0;
    2367                 :        308 :   LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo) = 0;
    2368                 :        308 :   LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = saved_can_fully_mask_p;
    2369                 :            : 
    2370                 :        308 :   goto start_over;
    2371                 :            : }
    2372                 :            : 
    2373                 :            : /* Return true if vectorizing a loop using NEW_LOOP_VINFO appears
    2374                 :            :    to be better than vectorizing it using OLD_LOOP_VINFO.  Assume that
    2375                 :            :    OLD_LOOP_VINFO is better unless something specifically indicates
    2376                 :            :    otherwise.
    2377                 :            : 
    2378                 :            :    Note that this deliberately isn't a partial order.  */
    2379                 :            : 
    2380                 :            : static bool
    2381                 :          0 : vect_better_loop_vinfo_p (loop_vec_info new_loop_vinfo,
    2382                 :            :                           loop_vec_info old_loop_vinfo)
    2383                 :            : {
    2384                 :          0 :   struct loop *loop = LOOP_VINFO_LOOP (new_loop_vinfo);
    2385                 :          0 :   gcc_assert (LOOP_VINFO_LOOP (old_loop_vinfo) == loop);
    2386                 :            : 
    2387                 :          0 :   poly_int64 new_vf = LOOP_VINFO_VECT_FACTOR (new_loop_vinfo);
    2388                 :          0 :   poly_int64 old_vf = LOOP_VINFO_VECT_FACTOR (old_loop_vinfo);
    2389                 :            : 
    2390                 :            :   /* Always prefer a VF of loop->simdlen over any other VF.  */
    2391                 :          0 :   if (loop->simdlen)
    2392                 :            :     {
    2393                 :          0 :       bool new_simdlen_p = known_eq (new_vf, loop->simdlen);
    2394                 :          0 :       bool old_simdlen_p = known_eq (old_vf, loop->simdlen);
    2395                 :          0 :       if (new_simdlen_p != old_simdlen_p)
    2396                 :            :         return new_simdlen_p;
    2397                 :            :     }
    2398                 :            : 
    2399                 :            :   /* Limit the VFs to what is likely to be the maximum number of iterations,
    2400                 :            :      to handle cases in which at least one loop_vinfo is fully-masked.  */
    2401                 :          0 :   HOST_WIDE_INT estimated_max_niter = likely_max_stmt_executions_int (loop);
    2402                 :          0 :   if (estimated_max_niter != -1)
    2403                 :            :     {
    2404                 :          0 :       if (known_le (estimated_max_niter, new_vf))
    2405                 :          0 :         new_vf = estimated_max_niter;
    2406                 :          0 :       if (known_le (estimated_max_niter, old_vf))
    2407                 :          0 :         old_vf = estimated_max_niter;
    2408                 :            :     }
    2409                 :            : 
    2410                 :            :   /* Check whether the (fractional) cost per scalar iteration is lower
    2411                 :            :      or higher: new_inside_cost / new_vf vs. old_inside_cost / old_vf.  */
    2412                 :          0 :   poly_widest_int rel_new = (new_loop_vinfo->vec_inside_cost
    2413                 :          0 :                              * poly_widest_int (old_vf));
    2414                 :          0 :   poly_widest_int rel_old = (old_loop_vinfo->vec_inside_cost
    2415                 :          0 :                              * poly_widest_int (new_vf));
    2416                 :          0 :   if (maybe_lt (rel_old, rel_new))
    2417                 :            :     return false;
    2418                 :          0 :   if (known_lt (rel_new, rel_old))
    2419                 :            :     return true;
    2420                 :            : 
    2421                 :            :   /* If there's nothing to choose between the loop bodies, see whether
    2422                 :            :      there's a difference in the prologue and epilogue costs.  */
    2423                 :          0 :   if (new_loop_vinfo->vec_outside_cost != old_loop_vinfo->vec_outside_cost)
    2424                 :          0 :     return new_loop_vinfo->vec_outside_cost < old_loop_vinfo->vec_outside_cost;
    2425                 :            : 
    2426                 :            :   return false;
    2427                 :            : }
    2428                 :            : 
    2429                 :            : /* Decide whether to replace OLD_LOOP_VINFO with NEW_LOOP_VINFO.  Return
    2430                 :            :    true if we should.  */
    2431                 :            : 
    2432                 :            : static bool
    2433                 :          0 : vect_joust_loop_vinfos (loop_vec_info new_loop_vinfo,
    2434                 :            :                         loop_vec_info old_loop_vinfo)
    2435                 :            : {
    2436                 :          0 :   if (!vect_better_loop_vinfo_p (new_loop_vinfo, old_loop_vinfo))
    2437                 :            :     return false;
    2438                 :            : 
    2439                 :          0 :   if (dump_enabled_p ())
    2440                 :          0 :     dump_printf_loc (MSG_NOTE, vect_location,
    2441                 :            :                      "***** Preferring vector mode %s to vector mode %s\n",
    2442                 :          0 :                      GET_MODE_NAME (new_loop_vinfo->vector_mode),
    2443                 :          0 :                      GET_MODE_NAME (old_loop_vinfo->vector_mode));
    2444                 :            :   return true;
    2445                 :            : }
    2446                 :            : 
    2447                 :            : /* Function vect_analyze_loop.
    2448                 :            : 
    2449                 :            :    Apply a set of analyses on LOOP, and create a loop_vec_info struct
    2450                 :            :    for it.  The different analyses will record information in the
    2451                 :            :    loop_vec_info struct.  */
    2452                 :            : opt_loop_vec_info
    2453                 :      72023 : vect_analyze_loop (class loop *loop, vec_info_shared *shared)
    2454                 :            : {
    2455                 :      72023 :   auto_vector_modes vector_modes;
    2456                 :            : 
    2457                 :            :   /* Autodetect first vector size we try.  */
    2458                 :      72023 :   unsigned int autovec_flags
    2459                 :     144046 :     = targetm.vectorize.autovectorize_vector_modes (&vector_modes,
    2460                 :      72023 :                                                     loop->simdlen != 0);
    2461                 :      72023 :   unsigned int mode_i = 0;
    2462                 :            : 
    2463                 :     144046 :   DUMP_VECT_SCOPE ("analyze_loop_nest");
    2464                 :            : 
    2465                 :      72023 :   if (loop_outer (loop)
    2466                 :      72023 :       && loop_vec_info_for_loop (loop_outer (loop))
    2467                 :      72281 :       && LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop))))
    2468                 :        258 :     return opt_loop_vec_info::failure_at (vect_location,
    2469                 :        258 :                                           "outer-loop already vectorized.\n");
    2470                 :            : 
    2471                 :      71765 :   if (!find_loop_nest (loop, &shared->loop_nest))
    2472                 :       3780 :     return opt_loop_vec_info::failure_at
    2473                 :            :       (vect_location,
    2474                 :            :        "not vectorized: loop nest containing two or more consecutive inner"
    2475                 :       3780 :        " loops cannot be vectorized\n");
    2476                 :            : 
    2477                 :      67985 :   unsigned n_stmts = 0;
    2478                 :      67985 :   machine_mode autodetected_vector_mode = VOIDmode;
    2479                 :      67985 :   opt_loop_vec_info first_loop_vinfo = opt_loop_vec_info::success (NULL);
    2480                 :      67985 :   machine_mode next_vector_mode = VOIDmode;
    2481                 :      67985 :   poly_uint64 lowest_th = 0;
    2482                 :      67985 :   unsigned vectorized_loops = 0;
    2483                 :      67985 :   bool pick_lowest_cost_p = ((autovec_flags & VECT_COMPARE_COSTS)
    2484                 :      67985 :                              && !unlimited_cost_model (loop));
    2485                 :            : 
    2486                 :      67985 :   bool vect_epilogues = false;
    2487                 :      67985 :   opt_result res = opt_result::success ();
    2488                 :      67985 :   unsigned HOST_WIDE_INT simdlen = loop->simdlen;
    2489                 :     106307 :   while (1)
    2490                 :            :     {
    2491                 :            :       /* Check the CFG characteristics of the loop (nesting, entry/exit).  */
    2492                 :      87146 :       opt_loop_vec_info loop_vinfo = vect_analyze_loop_form (loop, shared);
    2493                 :      87146 :       if (!loop_vinfo)
    2494                 :            :         {
    2495                 :      33177 :           if (dump_enabled_p ())
    2496                 :       3987 :             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    2497                 :            :                              "bad loop form.\n");
    2498                 :      33177 :           gcc_checking_assert (first_loop_vinfo == NULL);
    2499                 :      33177 :           return loop_vinfo;
    2500                 :            :         }
    2501                 :      53969 :       loop_vinfo->vector_mode = next_vector_mode;
    2502                 :            : 
    2503                 :      53969 :       bool fatal = false;
    2504                 :            : 
    2505                 :            :       /* When pick_lowest_cost_p is true, we should in principle iterate
    2506                 :            :          over all the loop_vec_infos that LOOP_VINFO could replace and
    2507                 :            :          try to vectorize LOOP_VINFO under the same conditions.
    2508                 :            :          E.g. when trying to replace an epilogue loop, we should vectorize
    2509                 :            :          LOOP_VINFO as an epilogue loop with the same VF limit.  When trying
    2510                 :            :          to replace the main loop, we should vectorize LOOP_VINFO as a main
    2511                 :            :          loop too.
    2512                 :            : 
    2513                 :            :          However, autovectorize_vector_modes is usually sorted as follows:
    2514                 :            : 
    2515                 :            :          - Modes that naturally produce lower VFs usually follow modes that
    2516                 :            :            naturally produce higher VFs.
    2517                 :            : 
    2518                 :            :          - When modes naturally produce the same VF, maskable modes
    2519                 :            :            usually follow unmaskable ones, so that the maskable mode
    2520                 :            :            can be used to vectorize the epilogue of the unmaskable mode.
    2521                 :            : 
    2522                 :            :          This order is preferred because it leads to the maximum
    2523                 :            :          epilogue vectorization opportunities.  Targets should only use
    2524                 :            :          a different order if they want to make wide modes available while
    2525                 :            :          disparaging them relative to earlier, smaller modes.  The assumption
    2526                 :            :          in that case is that the wider modes are more expensive in some
    2527                 :            :          way that isn't reflected directly in the costs.
    2528                 :            : 
    2529                 :            :          There should therefore be few interesting cases in which
    2530                 :            :          LOOP_VINFO fails when treated as an epilogue loop, succeeds when
    2531                 :            :          treated as a standalone loop, and ends up being genuinely cheaper
    2532                 :            :          than FIRST_LOOP_VINFO.  */
    2533                 :      53969 :       if (vect_epilogues)
    2534                 :       8221 :         LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo) = first_loop_vinfo;
    2535                 :            : 
    2536                 :      53969 :       res = vect_analyze_loop_2 (loop_vinfo, fatal, &n_stmts);
    2537                 :      53969 :       if (mode_i == 0)
    2538                 :      34808 :         autodetected_vector_mode = loop_vinfo->vector_mode;
    2539                 :      53969 :       if (dump_enabled_p ())
    2540                 :            :         {
    2541                 :      11841 :           if (res)
    2542                 :       7114 :             dump_printf_loc (MSG_NOTE, vect_location,
    2543                 :            :                              "***** Analysis succeeded with vector mode %s\n",
    2544                 :       7114 :                              GET_MODE_NAME (loop_vinfo->vector_mode));
    2545                 :            :           else
    2546                 :       4727 :             dump_printf_loc (MSG_NOTE, vect_location,
    2547                 :            :                              "***** Analysis failed with vector mode %s\n",
    2548                 :       4727 :                              GET_MODE_NAME (loop_vinfo->vector_mode));
    2549                 :            :         }
    2550                 :            : 
    2551                 :      53969 :       loop->aux = NULL;
    2552                 :            : 
    2553                 :      53969 :       if (!fatal)
    2554                 :      76533 :         while (mode_i < vector_modes.length ()
    2555                 :     153066 :                && vect_chooses_same_modes_p (loop_vinfo, vector_modes[mode_i]))
    2556                 :            :           {
    2557                 :      29250 :             if (dump_enabled_p ())
    2558                 :       7171 :               dump_printf_loc (MSG_NOTE, vect_location,
    2559                 :            :                                "***** The result for vector mode %s would"
    2560                 :            :                                " be the same\n",
    2561                 :       7171 :                                GET_MODE_NAME (vector_modes[mode_i]));
    2562                 :      29250 :             mode_i += 1;
    2563                 :            :           }
    2564                 :            : 
    2565                 :      53969 :       if (res)
    2566                 :            :         {
    2567                 :      21620 :           LOOP_VINFO_VECTORIZABLE_P (loop_vinfo) = 1;
    2568                 :      21620 :           vectorized_loops++;
    2569                 :            : 
    2570                 :            :           /* Once we hit the desired simdlen for the first time,
    2571                 :            :              discard any previous attempts.  */
    2572                 :      21620 :           if (simdlen
    2573                 :      21620 :               && known_eq (LOOP_VINFO_VECT_FACTOR (loop_vinfo), simdlen))
    2574                 :            :             {
    2575                 :          5 :               delete first_loop_vinfo;
    2576                 :          5 :               first_loop_vinfo = opt_loop_vec_info::success (NULL);
    2577                 :          5 :               LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo) = NULL;
    2578                 :          5 :               simdlen = 0;
    2579                 :            :             }
    2580                 :      21615 :           else if (pick_lowest_cost_p && first_loop_vinfo)
    2581                 :            :             {
    2582                 :            :               /* Keep trying to roll back vectorization attempts while the
    2583                 :            :                  loop_vec_infos they produced were worse than this one.  */
    2584                 :          0 :               vec<loop_vec_info> &vinfos = first_loop_vinfo->epilogue_vinfos;
    2585                 :          0 :               while (!vinfos.is_empty ()
    2586                 :          0 :                      && vect_joust_loop_vinfos (loop_vinfo, vinfos.last ()))
    2587                 :            :                 {
    2588                 :          0 :                   gcc_assert (vect_epilogues);
    2589                 :          0 :                   delete vinfos.pop ();
    2590                 :            :                 }
    2591                 :          0 :               if (vinfos.is_empty ()
    2592                 :          0 :                   && vect_joust_loop_vinfos (loop_vinfo, first_loop_vinfo))
    2593                 :            :                 {
    2594                 :          0 :                   delete first_loop_vinfo;
    2595                 :          0 :                   first_loop_vinfo = opt_loop_vec_info::success (NULL);
    2596                 :          0 :                   LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo) = NULL;
    2597                 :            :                 }
    2598                 :            :             }
    2599                 :            : 
    2600                 :      21620 :           if (first_loop_vinfo == NULL)
    2601                 :            :             {
    2602                 :      18120 :               first_loop_vinfo = loop_vinfo;
    2603                 :      18120 :               lowest_th = LOOP_VINFO_VERSIONING_THRESHOLD (first_loop_vinfo);
    2604                 :            :             }
    2605                 :       3500 :           else if (vect_epilogues
    2606                 :            :                    /* For now only allow one epilogue loop.  */
    2607                 :       3500 :                    && first_loop_vinfo->epilogue_vinfos.is_empty ())
    2608                 :            :             {
    2609                 :       3498 :               first_loop_vinfo->epilogue_vinfos.safe_push (loop_vinfo);
    2610                 :       3498 :               poly_uint64 th = LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo);
    2611                 :       3498 :               gcc_assert (!LOOP_REQUIRES_VERSIONING (loop_vinfo)
    2612                 :            :                           || maybe_ne (lowest_th, 0U));
    2613                 :            :               /* Keep track of the known smallest versioning
    2614                 :            :                  threshold.  */
    2615                 :       3498 :               if (ordered_p (lowest_th, th))
    2616                 :       3498 :                 lowest_th = ordered_min (lowest_th, th);
    2617                 :            :             }
    2618                 :            :           else
    2619                 :          2 :             delete loop_vinfo;
    2620                 :            : 
    2621                 :            :           /* Only vectorize epilogues if PARAM_VECT_EPILOGUES_NOMASK is
    2622                 :            :              enabled, SIMDUID is not set, it is the innermost loop and we have
    2623                 :            :              either already found the loop's SIMDLEN or there was no SIMDLEN to
    2624                 :            :              begin with.
    2625                 :            :              TODO: Enable epilogue vectorization for loops with SIMDUID set.  */
    2626                 :      43240 :           vect_epilogues = (!simdlen
    2627                 :      21611 :                             && loop->inner == NULL
    2628                 :      21337 :                             && param_vect_epilogues_nomask
    2629                 :      20706 :                             && LOOP_VINFO_PEELING_FOR_NITER (first_loop_vinfo)
    2630                 :      12143 :                             && !loop->simduid
    2631                 :            :                             /* For now only allow one epilogue loop, but allow
    2632                 :            :                                pick_lowest_cost_p to replace it.  */
    2633                 :      33526 :                             && (first_loop_vinfo->epilogue_vinfos.is_empty ()
    2634                 :       3498 :                                 || pick_lowest_cost_p));
    2635                 :            : 
    2636                 :            :           /* Commit to first_loop_vinfo if we have no reason to try
    2637                 :            :              alternatives.  */
    2638                 :      21620 :           if (!simdlen && !vect_epilogues && !pick_lowest_cost_p)
    2639                 :            :             break;
    2640                 :            :         }
    2641                 :            :       else
    2642                 :            :         {
    2643                 :      32349 :           delete loop_vinfo;
    2644                 :      32349 :           if (fatal)
    2645                 :            :             {
    2646                 :       6686 :               gcc_checking_assert (first_loop_vinfo == NULL);
    2647                 :            :               break;
    2648                 :            :             }
    2649                 :            :         }
    2650                 :            : 
    2651                 :      34080 :       if (mode_i < vector_modes.length ()
    2652                 :      19161 :           && VECTOR_MODE_P (autodetected_vector_mode)
    2653                 :      19161 :           && (related_vector_mode (vector_modes[mode_i],
    2654                 :      38322 :                                    GET_MODE_INNER (autodetected_vector_mode))
    2655                 :      19161 :               == autodetected_vector_mode)
    2656                 :      34180 :           && (related_vector_mode (autodetected_vector_mode,
    2657                 :      34080 :                                    GET_MODE_INNER (vector_modes[mode_i]))
    2658                 :        200 :               == vector_modes[mode_i]))
    2659                 :            :         {
    2660                 :        100 :           if (dump_enabled_p ())
    2661                 :          0 :             dump_printf_loc (MSG_NOTE, vect_location,
    2662                 :            :                              "***** Skipping vector mode %s, which would"
    2663                 :            :                              " repeat the analysis for %s\n",
    2664                 :          0 :                              GET_MODE_NAME (vector_modes[mode_i]),
    2665                 :          0 :                              GET_MODE_NAME (autodetected_vector_mode));
    2666                 :        100 :           mode_i += 1;
    2667                 :            :         }
    2668                 :            : 
    2669                 :      34080 :       if (mode_i == vector_modes.length ()
    2670                 :      34080 :           || autodetected_vector_mode == VOIDmode)
    2671                 :            :         break;
    2672                 :            : 
    2673                 :            :       /* Try the next biggest vector size.  */
    2674                 :      19161 :       next_vector_mode = vector_modes[mode_i++];
    2675                 :      19161 :       if (dump_enabled_p ())
    2676                 :       3293 :         dump_printf_loc (MSG_NOTE, vect_location,
    2677                 :            :                          "***** Re-trying analysis with vector mode %s\n",
    2678                 :       3293 :                          GET_MODE_NAME (next_vector_mode));
    2679                 :      19161 :     }
    2680                 :            : 
    2681                 :      34808 :   if (first_loop_vinfo)
    2682                 :            :     {
    2683                 :      18116 :       loop->aux = (loop_vec_info) first_loop_vinfo;
    2684                 :      18116 :       if (dump_enabled_p ())
    2685                 :       6532 :         dump_printf_loc (MSG_NOTE, vect_location,
    2686                 :            :                          "***** Choosing vector mode %s\n",
    2687                 :       6532 :                          GET_MODE_NAME (first_loop_vinfo->vector_mode));
    2688                 :      18116 :       LOOP_VINFO_VERSIONING_THRESHOLD (first_loop_vinfo) = lowest_th;
    2689                 :      18116 :       return first_loop_vinfo;
    2690                 :            :     }
    2691                 :            : 
    2692                 :      16692 :   return opt_loop_vec_info::propagate_failure (res);
    2693                 :            : }
    2694                 :            : 
    2695                 :            : /* Return true if there is an in-order reduction function for CODE, storing
    2696                 :            :    it in *REDUC_FN if so.  */
    2697                 :            : 
    2698                 :            : static bool
    2699                 :        933 : fold_left_reduction_fn (tree_code code, internal_fn *reduc_fn)
    2700                 :            : {
    2701                 :        933 :   switch (code)
    2702                 :            :     {
    2703                 :        864 :     case PLUS_EXPR:
    2704                 :        864 :       *reduc_fn = IFN_FOLD_LEFT_PLUS;
    2705                 :          0 :       return true;
    2706                 :            : 
    2707                 :            :     default:
    2708                 :            :       return false;
    2709                 :            :     }
    2710                 :            : }
    2711                 :            : 
    2712                 :            : /* Function reduction_fn_for_scalar_code
    2713                 :            : 
    2714                 :            :    Input:
    2715                 :            :    CODE - tree_code of a reduction operations.
    2716                 :            : 
    2717                 :            :    Output:
    2718                 :            :    REDUC_FN - the corresponding internal function to be used to reduce the
    2719                 :            :       vector of partial results into a single scalar result, or IFN_LAST
    2720                 :            :       if the operation is a supported reduction operation, but does not have
    2721                 :            :       such an internal function.
    2722                 :            : 
    2723                 :            :    Return FALSE if CODE currently cannot be vectorized as reduction.  */
    2724                 :            : 
    2725                 :            : static bool
    2726                 :      16225 : reduction_fn_for_scalar_code (enum tree_code code, internal_fn *reduc_fn)
    2727                 :            : {
    2728                 :      16225 :   switch (code)
    2729                 :            :     {
    2730                 :        250 :       case MAX_EXPR:
    2731                 :        250 :         *reduc_fn = IFN_REDUC_MAX;
    2732                 :        250 :         return true;
    2733                 :            : 
    2734                 :        814 :       case MIN_EXPR:
    2735                 :        814 :         *reduc_fn = IFN_REDUC_MIN;
    2736                 :        814 :         return true;
    2737                 :            : 
    2738                 :      14545 :       case PLUS_EXPR:
    2739                 :      14545 :         *reduc_fn = IFN_REDUC_PLUS;
    2740                 :      14545 :         return true;
    2741                 :            : 
    2742                 :        134 :       case BIT_AND_EXPR:
    2743                 :        134 :         *reduc_fn = IFN_REDUC_AND;
    2744                 :        134 :         return true;
    2745                 :            : 
    2746                 :        166 :       case BIT_IOR_EXPR:
    2747                 :        166 :         *reduc_fn = IFN_REDUC_IOR;
    2748                 :        166 :         return true;
    2749                 :            : 
    2750                 :         82 :       case BIT_XOR_EXPR:
    2751                 :         82 :         *reduc_fn = IFN_REDUC_XOR;
    2752                 :         82 :         return true;
    2753                 :            : 
    2754                 :        234 :       case MULT_EXPR:
    2755                 :        234 :       case MINUS_EXPR:
    2756                 :        234 :         *reduc_fn = IFN_LAST;
    2757                 :        234 :         return true;
    2758                 :            : 
    2759                 :            :       default:
    2760                 :            :        return false;
    2761                 :            :     }
    2762                 :            : }
    2763                 :            : 
    2764                 :            : /* If there is a neutral value X such that SLP reduction NODE would not
    2765                 :            :    be affected by the introduction of additional X elements, return that X,
    2766                 :            :    otherwise return null.  CODE is the code of the reduction and VECTOR_TYPE
    2767                 :            :    is the vector type that would hold element X.  REDUC_CHAIN is true if
    2768                 :            :    the SLP statements perform a single reduction, false if each statement
    2769                 :            :    performs an independent reduction.  */
    2770                 :            : 
    2771                 :            : static tree
    2772                 :       1060 : neutral_op_for_slp_reduction (slp_tree slp_node, tree vector_type,
    2773                 :            :                               tree_code code, bool reduc_chain)
    2774                 :            : {
    2775                 :       1060 :   vec<stmt_vec_info> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
    2776                 :       1060 :   stmt_vec_info stmt_vinfo = stmts[0];
    2777                 :       1060 :   tree scalar_type = TREE_TYPE (vector_type);
    2778                 :       1060 :   class loop *loop = gimple_bb (stmt_vinfo->stmt)->loop_father;
    2779                 :       1060 :   gcc_assert (loop);
    2780                 :            : 
    2781                 :       1060 :   switch (code)
    2782                 :            :     {
    2783                 :        981 :     case WIDEN_SUM_EXPR:
    2784                 :        981 :     case DOT_PROD_EXPR:
    2785                 :        981 :     case SAD_EXPR:
    2786                 :        981 :     case PLUS_EXPR:
    2787                 :        981 :     case MINUS_EXPR:
    2788                 :        981 :     case BIT_IOR_EXPR:
    2789                 :        981 :     case BIT_XOR_EXPR:
    2790                 :        981 :       return build_zero_cst (scalar_type);
    2791                 :            : 
    2792                 :         23 :     case MULT_EXPR:
    2793                 :         23 :       return build_one_cst (scalar_type);
    2794                 :            : 
    2795                 :         16 :     case BIT_AND_EXPR:
    2796                 :         16 :       return build_all_ones_cst (scalar_type);
    2797                 :            : 
    2798                 :         40 :     case MAX_EXPR:
    2799                 :         40 :     case MIN_EXPR:
    2800                 :            :       /* For MIN/MAX the initial values are neutral.  A reduction chain
    2801                 :            :          has only a single initial value, so that value is neutral for
    2802                 :            :          all statements.  */
    2803                 :         40 :       if (reduc_chain)
    2804                 :          4 :         return PHI_ARG_DEF_FROM_EDGE (stmt_vinfo->stmt,
    2805                 :            :                                       loop_preheader_edge (loop));
    2806                 :            :       return NULL_TREE;
    2807                 :            : 
    2808                 :            :     default:
    2809                 :            :       return NULL_TREE;
    2810                 :            :     }
    2811                 :            : }
    2812                 :            : 
    2813                 :            : /* Error reporting helper for vect_is_simple_reduction below.  GIMPLE statement
    2814                 :            :    STMT is printed with a message MSG. */
    2815                 :            : 
    2816                 :            : static void
    2817                 :        336 : report_vect_op (dump_flags_t msg_type, gimple *stmt, const char *msg)
    2818                 :            : {
    2819                 :          0 :   dump_printf_loc (msg_type, vect_location, "%s%G", msg, stmt);
    2820                 :        336 : }
    2821                 :            : 
    2822                 :            : /* Return true if we need an in-order reduction for operation CODE
    2823                 :            :    on type TYPE.  NEED_WRAPPING_INTEGRAL_OVERFLOW is true if integer
    2824                 :            :    overflow must wrap.  */
    2825                 :            : 
    2826                 :            : bool
    2827                 :      56579 : needs_fold_left_reduction_p (tree type, tree_code code)
    2828                 :            : {
    2829                 :            :   /* CHECKME: check for !flag_finite_math_only too?  */
    2830                 :      56579 :   if (SCALAR_FLOAT_TYPE_P (type))
    2831                 :      29032 :     switch (code)
    2832                 :            :       {
    2833                 :            :       case MIN_EXPR:
    2834                 :            :       case MAX_EXPR:
    2835                 :            :         return false;
    2836                 :            : 
    2837                 :      28965 :       default:
    2838                 :      28965 :         return !flag_associative_math;
    2839                 :            :       }
    2840                 :            : 
    2841                 :      27547 :   if (INTEGRAL_TYPE_P (type))
    2842                 :            :     {
    2843                 :      27533 :       if (!operation_no_trapping_overflow (type, code))
    2844                 :            :         return true;
    2845                 :      27461 :       return false;
    2846                 :            :     }
    2847                 :            : 
    2848                 :         14 :   if (SAT_FIXED_POINT_TYPE_P (type))
    2849                 :          0 :     return true;
    2850                 :            : 
    2851                 :            :   return false;
    2852                 :            : }
    2853                 :            : 
    2854                 :            : /* Return true if the reduction PHI in LOOP with latch arg LOOP_ARG and
    2855                 :            :    has a handled computation expression.  Store the main reduction
    2856                 :            :    operation in *CODE.  */
    2857                 :            : 
    2858                 :            : static bool
    2859                 :      19888 : check_reduction_path (dump_user_location_t loc, loop_p loop, gphi *phi,
    2860                 :            :                       tree loop_arg, enum tree_code *code,
    2861                 :            :                       vec<std::pair<ssa_op_iter, use_operand_p> > &path)
    2862                 :            : {
    2863                 :      19888 :   auto_bitmap visited;
    2864                 :      19888 :   tree lookfor = PHI_RESULT (phi);
    2865                 :      19888 :   ssa_op_iter curri;
    2866                 :      19888 :   use_operand_p curr = op_iter_init_phiuse (&curri, phi, SSA_OP_USE);
    2867                 :      37329 :   while (USE_FROM_PTR (curr) != loop_arg)
    2868                 :      17441 :     curr = op_iter_next_use (&curri);
    2869                 :      19888 :   curri.i = curri.numops;
    2870                 :     263991 :   do
    2871                 :            :     {
    2872                 :     263991 :       path.safe_push (std::make_pair (curri, curr));
    2873                 :     263991 :       tree use = USE_FROM_PTR (curr);
    2874                 :     263991 :       if (use == lookfor)
    2875                 :            :         break;
    2876                 :     244118 :       gimple *def = SSA_NAME_DEF_STMT (use);
    2877                 :     244118 :       if (gimple_nop_p (def)
    2878                 :     244118 :           || ! flow_bb_inside_loop_p (loop, gimple_bb (def)))
    2879                 :            :         {
    2880                 :     216260 : pop:
    2881                 :     216260 :           do
    2882                 :            :             {
    2883                 :     216260 :               std::pair<ssa_op_iter, use_operand_p> x = path.pop ();
    2884                 :     216260 :               curri = x.first;
    2885                 :     216260 :               curr = x.second;
    2886                 :     227084 :               do
    2887                 :     227084 :                 curr = op_iter_next_use (&curri);
    2888                 :            :               /* Skip already visited or non-SSA operands (from iterating
    2889                 :            :                  over PHI args).  */
    2890                 :            :               while (curr != NULL_USE_OPERAND_P
    2891                 :     227084 :                      && (TREE_CODE (USE_FROM_PTR (curr)) != SSA_NAME
    2892                 :      77942 :                          || ! bitmap_set_bit (visited,
    2893                 :      77942 :                                               SSA_NAME_VERSION
    2894                 :            :                                                 (USE_FROM_PTR (curr)))));
    2895                 :            :             }
    2896                 :     216260 :           while (curr == NULL_USE_OPERAND_P && ! path.is_empty ());
    2897                 :      68790 :           if (curr == NULL_USE_OPERAND_P)
    2898                 :            :             break;
    2899                 :            :         }
    2900                 :            :       else
    2901                 :            :         {
    2902                 :     196621 :           if (gimple_code (def) == GIMPLE_PHI)
    2903                 :      14676 :             curr = op_iter_init_phiuse (&curri, as_a <gphi *>(def), SSA_OP_USE);
    2904                 :            :           else
    2905                 :     181945 :             curr = op_iter_init_use (&curri, def, SSA_OP_USE);
    2906                 :      32678 :           while (curr != NULL_USE_OPERAND_P
    2907                 :     229299 :                  && (TREE_CODE (USE_FROM_PTR (curr)) != SSA_NAME
    2908                 :     202891 :                      || ! bitmap_set_bit (visited,
    2909                 :     202891 :                                           SSA_NAME_VERSION
    2910                 :            :                                             (USE_FROM_PTR (curr)))))
    2911                 :      32678 :             curr = op_iter_next_use (&curri);
    2912                 :     196621 :           if (curr == NULL_USE_OPERAND_P)
    2913                 :      21293 :             goto pop;
    2914                 :            :         }
    2915                 :            :     }
    2916                 :            :   while (1);
    2917                 :      19888 :   if (dump_file && (dump_flags & TDF_DETAILS))
    2918                 :            :     {
    2919                 :       1995 :       dump_printf_loc (MSG_NOTE, loc, "reduction path: ");
    2920                 :       1995 :       unsigned i;
    2921                 :       1995 :       std::pair<ssa_op_iter, use_operand_p> *x;
    2922                 :       6482 :       FOR_EACH_VEC_ELT (path, i, x)
    2923                 :       4487 :         dump_printf (MSG_NOTE, "%T ", USE_FROM_PTR (x->second));
    2924                 :       1995 :       dump_printf (MSG_NOTE, "\n");
    2925                 :            :     }
    2926                 :            : 
    2927                 :            :   /* Check whether the reduction path detected is valid.  */
    2928                 :      19888 :   bool fail = path.length () == 0;
    2929                 :      19888 :   bool neg = false;
    2930                 :      19888 :   int sign = -1;
    2931                 :      19888 :   *code = ERROR_MARK;
    2932                 :      93132 :   for (unsigned i = 1; i < path.length (); ++i)
    2933                 :            :     {
    2934                 :      26899 :       gimple *use_stmt = USE_STMT (path[i].second);
    2935                 :      26899 :       tree op = USE_FROM_PTR (path[i].second);
    2936                 :      26899 :       if (! is_gimple_assign (use_stmt)
    2937                 :            :           /* The following make sure we can compute the operand index
    2938                 :            :              easily plus it mostly disallows chaining via COND_EXPR condition
    2939                 :            :              operands.  */
    2940                 :      26899 :           || (gimple_assign_rhs1_ptr (use_stmt) != path[i].second->use
    2941                 :      14409 :               && (gimple_num_ops (use_stmt) <= 2
    2942                 :      14407 :                   || gimple_assign_rhs2_ptr (use_stmt) != path[i].second->use)
    2943                 :        520 :               && (gimple_num_ops (use_stmt) <= 3
    2944                 :        518 :                   || gimple_assign_rhs3_ptr (use_stmt) != path[i].second->use)))
    2945                 :            :         {
    2946                 :            :           fail = true;
    2947                 :        221 :           break;
    2948                 :            :         }
    2949                 :            :       /* Check there's only a single stmt the op is used on inside
    2950                 :            :          of the loop.  */
    2951                 :      26884 :       imm_use_iterator imm_iter;
    2952                 :      26884 :       gimple *op_use_stmt;
    2953                 :      26884 :       unsigned cnt = 0;
    2954                 :      62858 :       FOR_EACH_IMM_USE_STMT (op_use_stmt, imm_iter, op)
    2955                 :      35974 :         if (!is_gimple_debug (op_use_stmt)
    2956                 :      35974 :             && flow_bb_inside_loop_p (loop, gimple_bb (op_use_stmt)))
    2957                 :            :           {
    2958                 :            :             /* We want to allow x + x but not x < 1 ? x : 2.  */
    2959                 :      26976 :             if (is_gimple_assign (op_use_stmt)
    2960                 :      26976 :                 && gimple_assign_rhs_code (op_use_stmt) == COND_EXPR)
    2961                 :            :               {
    2962                 :       1679 :                 use_operand_p use_p;
    2963                 :       3358 :                 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
    2964                 :       1679 :                   cnt++;
    2965                 :            :               }
    2966                 :            :             else
    2967                 :      25297 :               cnt++;
    2968                 :            :           }
    2969                 :      26884 :       if (cnt != 1)
    2970                 :            :         {
    2971                 :            :           fail = true;
    2972                 :            :           break;
    2973                 :            :         }
    2974                 :      26806 :       tree_code use_code = gimple_assign_rhs_code (use_stmt);
    2975                 :      26806 :       if (use_code == MINUS_EXPR)
    2976                 :            :         {
    2977                 :        571 :           use_code = PLUS_EXPR;
    2978                 :            :           /* Track whether we negate the reduction value each iteration.  */
    2979                 :        571 :           if (gimple_assign_rhs2 (use_stmt) == op)
    2980                 :         22 :             neg = ! neg;
    2981                 :            :         }
    2982                 :      26806 :       if (CONVERT_EXPR_CODE_P (use_code)
    2983                 :      32775 :           && tree_nop_conversion_p (TREE_TYPE (gimple_assign_lhs (use_stmt)),
    2984                 :       5969 :                                     TREE_TYPE (gimple_assign_rhs1 (use_stmt))))
    2985                 :            :         ;
    2986                 :      20849 :       else if (*code == ERROR_MARK)
    2987                 :            :         {
    2988                 :      19795 :           *code = use_code;
    2989                 :      19795 :           sign = TYPE_SIGN (TREE_TYPE (gimple_assign_lhs (use_stmt)));
    2990                 :            :         }
    2991                 :       1054 :       else if (use_code != *code)
    2992                 :            :         {
    2993                 :            :           fail = true;
    2994                 :            :           break;
    2995                 :            :         }
    2996                 :        929 :       else if ((use_code == MIN_EXPR
    2997                 :        929 :                 || use_code == MAX_EXPR)
    2998                 :        934 :                && sign != TYPE_SIGN (TREE_TYPE (gimple_assign_lhs (use_stmt))))
    2999                 :            :         {
    3000                 :            :           fail = true;
    3001                 :            :           break;
    3002                 :            :         }
    3003                 :            :     }
    3004                 :      20130 :   return ! fail && ! neg && *code != ERROR_MARK;
    3005                 :            : }
    3006                 :            : 
    3007                 :            : bool
    3008                 :          2 : check_reduction_path (dump_user_location_t loc, loop_p loop, gphi *phi,
    3009                 :            :                       tree loop_arg, enum tree_code code)
    3010                 :            : {
    3011                 :          2 :   auto_vec<std::pair<ssa_op_iter, use_operand_p> > path;
    3012                 :          2 :   enum tree_code code_;
    3013                 :          2 :   return (check_reduction_path (loc, loop, phi, loop_arg, &code_, path)
    3014                 :          2 :           && code_ == code);
    3015                 :            : }
    3016                 :            : 
    3017                 :            : 
    3018                 :            : 
    3019                 :            : /* Function vect_is_simple_reduction
    3020                 :            : 
    3021                 :            :    (1) Detect a cross-iteration def-use cycle that represents a simple
    3022                 :            :    reduction computation.  We look for the following pattern:
    3023                 :            : 
    3024                 :            :    loop_header:
    3025                 :            :      a1 = phi < a0, a2 >
    3026                 :            :      a3 = ...
    3027                 :            :      a2 = operation (a3, a1)
    3028                 :            : 
    3029                 :            :    or
    3030                 :            : 
    3031                 :            :    a3 = ...
    3032                 :            :    loop_header:
    3033                 :            :      a1 = phi < a0, a2 >
    3034                 :            :      a2 = operation (a3, a1)
    3035                 :            : 
    3036                 :            :    such that:
    3037                 :            :    1. operation is commutative and associative and it is safe to
    3038                 :            :       change the order of the computation
    3039                 :            :    2. no uses for a2 in the loop (a2 is used out of the loop)
    3040                 :            :    3. no uses of a1 in the loop besides the reduction operation
    3041                 :            :    4. no uses of a1 outside the loop.
    3042                 :            : 
    3043                 :            :    Conditions 1,4 are tested here.
    3044                 :            :    Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
    3045                 :            : 
    3046                 :            :    (2) Detect a cross-iteration def-use cycle in nested loops, i.e.,
    3047                 :            :    nested cycles.
    3048                 :            : 
    3049                 :            :    (3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double
    3050                 :            :    reductions:
    3051                 :            : 
    3052                 :            :      a1 = phi < a0, a2 >
    3053                 :            :      inner loop (def of a3)
    3054                 :            :      a2 = phi < a3 >
    3055                 :            : 
    3056                 :            :    (4) Detect condition expressions, ie:
    3057                 :            :      for (int i = 0; i < N; i++)
    3058                 :            :        if (a[i] < val)
    3059                 :            :         ret_val = a[i];
    3060                 :            : 
    3061                 :            : */
    3062                 :            : 
    3063                 :            : static stmt_vec_info
    3064                 :      21917 : vect_is_simple_reduction (loop_vec_info loop_info, stmt_vec_info phi_info,
    3065                 :            :                           bool *double_reduc, bool *reduc_chain_p)
    3066                 :            : {
    3067                 :      21917 :   gphi *phi = as_a <gphi *> (phi_info->stmt);
    3068                 :      21917 :   gimple *phi_use_stmt = NULL;
    3069                 :      21917 :   imm_use_iterator imm_iter;
    3070                 :      21917 :   use_operand_p use_p;
    3071                 :            : 
    3072                 :      21917 :   *double_reduc = false;
    3073                 :      21917 :   *reduc_chain_p = false;
    3074                 :      21917 :   STMT_VINFO_REDUC_TYPE (phi_info) = TREE_CODE_REDUCTION;
    3075                 :            : 
    3076                 :      21917 :   tree phi_name = PHI_RESULT (phi);
    3077                 :            :   /* ???  If there are no uses of the PHI result the inner loop reduction
    3078                 :            :      won't be detected as possibly double-reduction by vectorizable_reduction
    3079                 :            :      because that tries to walk the PHI arg from the preheader edge which
    3080                 :            :      can be constant.  See PR60382.  */
    3081                 :      21917 :   if (has_zero_uses (phi_name))
    3082                 :            :     return NULL;
    3083                 :      21884 :   class loop *loop = (gimple_bb (phi))->loop_father;
    3084                 :      21884 :   unsigned nphi_def_loop_uses = 0;
    3085                 :      54549 :   FOR_EACH_IMM_USE_FAST (use_p, imm_iter, phi_name)
    3086                 :            :     {
    3087                 :      32750 :       gimple *use_stmt = USE_STMT (use_p);
    3088                 :      32750 :       if (is_gimple_debug (use_stmt))
    3089                 :       8840 :         continue;
    3090                 :            : 
    3091                 :      23910 :       if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
    3092                 :            :         {
    3093                 :         85 :           if (dump_enabled_p ())
    3094                 :          8 :             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    3095                 :            :                              "intermediate value used outside loop.\n");
    3096                 :            : 
    3097                 :         85 :           return NULL;
    3098                 :            :         }
    3099                 :            : 
    3100                 :      23825 :       nphi_def_loop_uses++;
    3101                 :      23825 :       phi_use_stmt = use_stmt;
    3102                 :            :     }
    3103                 :            : 
    3104                 :      21799 :   tree latch_def = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
    3105                 :      21799 :   if (TREE_CODE (latch_def) != SSA_NAME)
    3106                 :            :     {
    3107                 :         12 :       if (dump_enabled_p ())
    3108                 :          0 :         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    3109                 :            :                          "reduction: not ssa_name: %T\n", latch_def);
    3110                 :         12 :       return NULL;
    3111                 :            :     }
    3112                 :            : 
    3113                 :      21787 :   stmt_vec_info def_stmt_info = loop_info->lookup_def (latch_def);
    3114                 :      21787 :   if (!def_stmt_info
    3115                 :      21787 :       || !flow_bb_inside_loop_p (loop, gimple_bb (def_stmt_info->stmt)))
    3116                 :          6 :     return NULL;
    3117                 :            : 
    3118                 :      21781 :   bool nested_in_vect_loop
    3119                 :      21781 :     = flow_loop_nested_p (LOOP_VINFO_LOOP (loop_info), loop);
    3120                 :      21781 :   unsigned nlatch_def_loop_uses = 0;
    3121                 :      21781 :   auto_vec<gphi *, 3> lcphis;
    3122                 :      21781 :   bool inner_loop_of_double_reduc = false;
    3123                 :      86847 :   FOR_EACH_IMM_USE_FAST (use_p, imm_iter, latch_def)
    3124                 :            :     {
    3125                 :      65066 :       gimple *use_stmt = USE_STMT (use_p);
    3126                 :      65066 :       if (is_gimple_debug (use_stmt))
    3127                 :      21928 :         continue;
    3128                 :      43138 :       if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
    3129                 :      22157 :         nlatch_def_loop_uses++;
    3130                 :            :       else
    3131                 :            :         {
    3132                 :            :           /* We can have more than one loop-closed PHI.  */
    3133                 :      20981 :           lcphis.safe_push (as_a <gphi *> (use_stmt));
    3134                 :      20981 :           if (nested_in_vect_loop
    3135                 :      20981 :               && (STMT_VINFO_DEF_TYPE (loop_info->lookup_stmt (use_stmt))
    3136                 :            :                   == vect_double_reduction_def))
    3137                 :            :             inner_loop_of_double_reduc = true;
    3138                 :            :         }
    3139                 :            :     }
    3140                 :            : 
    3141                 :            :   /* If we are vectorizing an inner reduction we are executing that
    3142                 :            :      in the original order only in case we are not dealing with a
    3143                 :            :      double reduction.  */
    3144                 :      21781 :   if (nested_in_vect_loop && !inner_loop_of_double_reduc)
    3145                 :            :     {
    3146                 :        509 :       if (dump_enabled_p ())
    3147                 :        240 :         report_vect_op (MSG_NOTE, def_stmt_info->stmt,
    3148                 :            :                         "detected nested cycle: ");
    3149                 :        509 :       return def_stmt_info;
    3150                 :            :     }
    3151                 :            : 
    3152                 :            :   /* If this isn't a nested cycle or if the nested cycle reduction value
    3153                 :            :      is used ouside of the inner loop we cannot handle uses of the reduction
    3154                 :            :      value.  */
    3155                 :      21272 :   if (nlatch_def_loop_uses > 1 || nphi_def_loop_uses > 1)
    3156                 :            :     {
    3157                 :       1180 :       if (dump_enabled_p ())
    3158                 :        132 :         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    3159                 :            :                          "reduction used in loop.\n");
    3160                 :       1180 :       return NULL;
    3161                 :            :     }
    3162                 :            : 
    3163                 :            :   /* If DEF_STMT is a phi node itself, we expect it to have a single argument
    3164                 :            :      defined in the inner loop.  */
    3165                 :      20092 :   if (gphi *def_stmt = dyn_cast <gphi *> (def_stmt_info->stmt))
    3166                 :            :     {
    3167                 :        206 :       tree op1 = PHI_ARG_DEF (def_stmt, 0);
    3168                 :        206 :       if (gimple_phi_num_args (def_stmt) != 1
    3169                 :        206 :           || TREE_CODE (op1) != SSA_NAME)
    3170                 :            :         {
    3171                 :          9 :           if (dump_enabled_p ())
    3172                 :          2 :             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    3173                 :            :                              "unsupported phi node definition.\n");
    3174                 :            : 
    3175                 :          9 :           return NULL;
    3176                 :            :         }
    3177                 :            : 
    3178                 :        197 :       gimple *def1 = SSA_NAME_DEF_STMT (op1);
    3179                 :        197 :       if (gimple_bb (def1)
    3180                 :        197 :           && flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
    3181                 :        197 :           && loop->inner
    3182                 :        197 :           && flow_bb_inside_loop_p (loop->inner, gimple_bb (def1))
    3183                 :        197 :           && is_gimple_assign (def1)
    3184                 :        191 :           && is_a <gphi *> (phi_use_stmt)
    3185                 :        385 :           && flow_bb_inside_loop_p (loop->inner, gimple_bb (phi_use_stmt)))
    3186                 :            :         {
    3187                 :        188 :           if (dump_enabled_p ())
    3188                 :         96 :             report_vect_op (MSG_NOTE, def_stmt,
    3189                 :            :                             "detected double reduction: ");
    3190                 :            : 
    3191                 :        188 :           *double_reduc = true;
    3192                 :        188 :           return def_stmt_info;
    3193                 :            :         }
    3194                 :            : 
    3195                 :          9 :       return NULL;
    3196                 :            :     }
    3197                 :            : 
    3198                 :            :   /* Look for the expression computing latch_def from then loop PHI result.  */
    3199                 :      41667 :   auto_vec<std::pair<ssa_op_iter, use_operand_p> > path;
    3200                 :      19886 :   enum tree_code code;
    3201                 :      19886 :   if (check_reduction_path (vect_location, loop, phi, latch_def, &code,
    3202                 :            :                             path))
    3203                 :            :     {
    3204                 :      19644 :       STMT_VINFO_REDUC_CODE (phi_info) = code;
    3205                 :      19644 :       if (code == COND_EXPR && !nested_in_vect_loop)
    3206                 :       1449 :         STMT_VINFO_REDUC_TYPE (phi_info) = COND_REDUCTION;
    3207                 :            : 
    3208                 :            :       /* Fill in STMT_VINFO_REDUC_IDX and gather stmts for an SLP
    3209                 :            :          reduction chain for which the additional restriction is that
    3210                 :            :          all operations in the chain are the same.  */
    3211                 :      39288 :       auto_vec<stmt_vec_info, 8> reduc_chain;
    3212                 :      19644 :       unsigned i;
    3213                 :      19644 :       bool is_slp_reduc = !nested_in_vect_loop && code != COND_EXPR;
    3214                 :      65724 :       for (i = path.length () - 1; i >= 1; --i)
    3215                 :            :         {
    3216                 :      26436 :           gimple *stmt = USE_STMT (path[i].second);
    3217                 :      26436 :           stmt_vec_info stmt_info = loop_info->lookup_stmt (stmt);
    3218                 :      26436 :           STMT_VINFO_REDUC_IDX (stmt_info)
    3219                 :      26436 :             = path[i].second->use - gimple_assign_rhs1_ptr (stmt);
    3220                 :      26436 :           enum tree_code stmt_code = gimple_assign_rhs_code (stmt);
    3221                 :      26436 :           bool leading_conversion = (CONVERT_EXPR_CODE_P (stmt_code)
    3222                 :      26436 :                                      && (i == 1 || i == path.length () - 1));
    3223                 :       6444 :           if ((stmt_code != code && !leading_conversion)
    3224                 :            :               /* We can only handle the final value in epilogue
    3225                 :            :                  generation for reduction chains.  */
    3226                 :      32322 :               || (i != 1 && !has_single_use (gimple_assign_lhs (stmt))))
    3227                 :            :             is_slp_reduc = false;
    3228                 :            :           /* For reduction chains we support a trailing/leading
    3229                 :            :              conversions.  We do not store those in the actual chain.  */
    3230                 :      26436 :           if (leading_conversion)
    3231                 :       5886 :             continue;
    3232                 :      20550 :           reduc_chain.safe_push (stmt_info);
    3233                 :            :         }
    3234                 :      19644 :       if (is_slp_reduc && reduc_chain.length () > 1)
    3235                 :            :         {
    3236                 :        920 :           for (unsigned i = 0; i < reduc_chain.length () - 1; ++i)
    3237                 :            :             {
    3238                 :        715 :               REDUC_GROUP_FIRST_ELEMENT (reduc_chain[i]) = reduc_chain[0];
    3239                 :        715 :               REDUC_GROUP_NEXT_ELEMENT (reduc_chain[i]) = reduc_chain[i+1];
    3240                 :            :             }
    3241                 :        205 :           REDUC_GROUP_FIRST_ELEMENT (reduc_chain.last ()) = reduc_chain[0];
    3242                 :        205 :           REDUC_GROUP_NEXT_ELEMENT (reduc_chain.last ()) = NULL;
    3243                 :            : 
    3244                 :            :           /* Save the chain for further analysis in SLP detection.  */
    3245                 :        205 :           LOOP_VINFO_REDUCTION_CHAINS (loop_info).safe_push (reduc_chain[0]);
    3246                 :        205 :           REDUC_GROUP_SIZE (reduc_chain[0]) = reduc_chain.length ();
    3247                 :            : 
    3248                 :        205 :           *reduc_chain_p = true;
    3249                 :        205 :           if (dump_enabled_p ())
    3250                 :         76 :             dump_printf_loc (MSG_NOTE, vect_location,
    3251                 :            :                             "reduction: detected reduction chain\n");
    3252                 :            :         }
    3253                 :      19439 :       else if (dump_enabled_p ())
    3254                 :       1887 :         dump_printf_loc (MSG_NOTE, vect_location,
    3255                 :            :                          "reduction: detected reduction\n");
    3256                 :            : 
    3257                 :      19644 :       return def_stmt_info;
    3258                 :            :     }
    3259                 :            : 
    3260                 :        242 :   if (dump_enabled_p ())
    3261                 :         30 :     dump_printf_loc (MSG_NOTE, vect_location,
    3262                 :            :                      "reduction: unknown pattern\n");
    3263                 :            : 
    3264                 :            :   return NULL;
    3265                 :            : }
    3266                 :            : 
    3267                 :            : /* Calculate cost of peeling the loop PEEL_ITERS_PROLOGUE times.  */
    3268                 :            : int
    3269                 :      39664 : vect_get_known_peeling_cost (loop_vec_info loop_vinfo, int peel_iters_prologue,
    3270                 :            :                              int *peel_iters_epilogue,
    3271                 :            :                              stmt_vector_for_cost *scalar_cost_vec,
    3272                 :            :                              stmt_vector_for_cost *prologue_cost_vec,
    3273                 :            :                              stmt_vector_for_cost *epilogue_cost_vec)
    3274                 :            : {
    3275                 :      39664 :   int retval = 0;
    3276                 :      39664 :   int assumed_vf = vect_vf_for_cost (loop_vinfo);
    3277                 :            : 
    3278                 :      39664 :   if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
    3279                 :            :     {
    3280                 :      23597 :       *peel_iters_epilogue = assumed_vf / 2;
    3281                 :      23597 :       if (dump_enabled_p ())
    3282                 :       1158 :         dump_printf_loc (MSG_NOTE, vect_location,
    3283                 :            :                          "cost model: epilogue peel iters set to vf/2 "
    3284                 :            :                          "because loop iterations are unknown .\n");
    3285                 :            : 
    3286                 :            :       /* If peeled iterations are known but number of scalar loop
    3287                 :            :          iterations are unknown, count a taken branch per peeled loop.  */
    3288                 :      23597 :       retval = record_stmt_cost (prologue_cost_vec, 1, cond_branch_taken,
    3289                 :            :                                  NULL, 0, vect_prologue);
    3290                 :      23597 :       retval += record_stmt_cost (epilogue_cost_vec, 1, cond_branch_taken,
    3291                 :            :                                   NULL, 0, vect_epilogue);
    3292                 :            :     }
    3293                 :            :   else
    3294                 :            :     {
    3295                 :      16067 :       int niters = LOOP_VINFO_INT_NITERS (loop_vinfo);
    3296                 :      16067 :       peel_iters_prologue = niters < peel_iters_prologue ?
    3297                 :            :                             niters : peel_iters_prologue;
    3298                 :      16067 :       *peel_iters_epilogue = (niters - peel_iters_prologue) % assumed_vf;
    3299                 :            :       /* If we need to peel for gaps, but no peeling is required, we have to
    3300                 :            :          peel VF iterations.  */
    3301                 :      16067 :       if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) && !*peel_iters_epilogue)
    3302                 :         13 :         *peel_iters_epilogue = assumed_vf;
    3303                 :            :     }
    3304                 :            : 
    3305                 :      39664 :   stmt_info_for_cost *si;
    3306                 :      39664 :   int j;
    3307                 :      39664 :   if (peel_iters_prologue)
    3308                 :      64401 :     FOR_EACH_VEC_ELT (*scalar_cost_vec, j, si)
    3309                 :      54403 :       retval += record_stmt_cost (prologue_cost_vec,
    3310                 :      54403 :                                   si->count * peel_iters_prologue,
    3311                 :            :                                   si->kind, si->stmt_info, si->misalign,
    3312                 :            :                                   vect_prologue);
    3313                 :      39664 :   if (*peel_iters_epilogue)
    3314                 :     204374 :     FOR_EACH_VEC_ELT (*scalar_cost_vec, j, si)
    3315                 :     174329 :       retval += record_stmt_cost (epilogue_cost_vec,
    3316                 :     174329 :                                   si->count * *peel_iters_epilogue,
    3317                 :            :                                   si->kind, si->stmt_info, si->misalign,
    3318                 :            :                                   vect_epilogue);
    3319                 :            : 
    3320                 :      39664 :   return retval;
    3321                 :            : }
    3322                 :            : 
    3323                 :            : /* Function vect_estimate_min_profitable_iters
    3324                 :            : 
    3325                 :            :    Return the number of iterations required for the vector version of the
    3326                 :            :    loop to be profitable relative to the cost of the scalar version of the
    3327                 :            :    loop.
    3328                 :            : 
    3329                 :            :    *RET_MIN_PROFITABLE_NITERS is a cost model profitability threshold
    3330                 :            :    of iterations for vectorization.  -1 value means loop vectorization
    3331                 :            :    is not profitable.  This returned value may be used for dynamic
    3332                 :            :    profitability check.
    3333                 :            : 
    3334                 :            :    *RET_MIN_PROFITABLE_ESTIMATE is a profitability threshold to be used
    3335                 :            :    for static check against estimated number of iterations.  */
    3336                 :            : 
    3337                 :            : static void
    3338                 :      26225 : vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo,
    3339                 :            :                                     int *ret_min_profitable_niters,
    3340                 :            :                                     int *ret_min_profitable_estimate)
    3341                 :            : {
    3342                 :      26225 :   int min_profitable_iters;
    3343                 :      26225 :   int min_profitable_estimate;
    3344                 :      26225 :   int peel_iters_prologue;
    3345                 :      26225 :   int peel_iters_epilogue;
    3346                 :      26225 :   unsigned vec_inside_cost = 0;
    3347                 :      26225 :   int vec_outside_cost = 0;
    3348                 :      26225 :   unsigned vec_prologue_cost = 0;
    3349                 :      26225 :   unsigned vec_epilogue_cost = 0;
    3350                 :      26225 :   int scalar_single_iter_cost = 0;
    3351                 :      26225 :   int scalar_outside_cost = 0;
    3352                 :      26225 :   int assumed_vf = vect_vf_for_cost (loop_vinfo);
    3353                 :      26225 :   int npeel = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
    3354                 :      26225 :   void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
    3355                 :            : 
    3356                 :            :   /* Cost model disabled.  */
    3357                 :      52450 :   if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
    3358                 :            :     {
    3359                 :       9828 :       if (dump_enabled_p ())
    3360                 :       6926 :         dump_printf_loc (MSG_NOTE, vect_location, "cost model disabled.\n");
    3361                 :       9828 :       *ret_min_profitable_niters = 0;
    3362                 :       9828 :       *ret_min_profitable_estimate = 0;
    3363                 :      12842 :       return;
    3364                 :            :     }
    3365                 :            : 
    3366                 :            :   /* Requires loop versioning tests to handle misalignment.  */
    3367                 :      16397 :   if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
    3368                 :            :     {
    3369                 :            :       /*  FIXME: Make cost depend on complexity of individual check.  */
    3370                 :          0 :       unsigned len = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length ();
    3371                 :          0 :       (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0,
    3372                 :            :                             vect_prologue);
    3373                 :          0 :       if (dump_enabled_p ())
    3374                 :          0 :         dump_printf (MSG_NOTE,
    3375                 :            :                      "cost model: Adding cost of checks for loop "
    3376                 :            :                      "versioning to treat misalignment.\n");
    3377                 :            :     }
    3378                 :            : 
    3379                 :            :   /* Requires loop versioning with alias checks.  */
    3380                 :      16397 :   if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
    3381                 :            :     {
    3382                 :            :       /*  FIXME: Make cost depend on complexity of individual check.  */
    3383                 :       2362 :       unsigned len = LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).length ();
    3384                 :       2362 :       (void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0,
    3385                 :            :                             vect_prologue);
    3386                 :       2362 :       len = LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).length ();
    3387                 :          0 :       if (len)
    3388                 :            :         /* Count LEN - 1 ANDs and LEN comparisons.  */
    3389                 :          0 :         (void) add_stmt_cost (target_cost_data, len * 2 - 1, scalar_stmt,
    3390                 :            :                               NULL, 0, vect_prologue);
    3391                 :       2362 :       len = LOOP_VINFO_LOWER_BOUNDS (loop_vinfo).length ();
    3392                 :        475 :       if (len)
    3393                 :            :         {
    3394                 :            :           /* Count LEN - 1 ANDs and LEN comparisons.  */
    3395                 :        475 :           unsigned int nstmts = len * 2 - 1;
    3396                 :            :           /* +1 for each bias that needs adding.  */
    3397                 :        950 :           for (unsigned int i = 0; i < len; ++i)
    3398                 :        475 :             if (!LOOP_VINFO_LOWER_BOUNDS (loop_vinfo)[i].unsigned_p)
    3399                 :         62 :               nstmts += 1;
    3400                 :        475 :           (void) add_stmt_cost (target_cost_data, nstmts, scalar_stmt,
    3401                 :            :                                 NULL, 0, vect_prologue);
    3402                 :            :         }
    3403                 :       2362 :       if (dump_enabled_p ())
    3404                 :          9 :         dump_printf (MSG_NOTE,
    3405                 :            :                      "cost model: Adding cost of checks for loop "
    3406                 :            :                      "versioning aliasing.\n");
    3407                 :            :     }
    3408                 :            : 
    3409                 :            :   /* Requires loop versioning with niter checks.  */
    3410                 :      16397 :   if (LOOP_REQUIRES_VERSIONING_FOR_NITERS (loop_vinfo))
    3411                 :            :     {
    3412                 :            :       /*  FIXME: Make cost depend on complexity of individual check.  */
    3413                 :         21 :       (void) add_stmt_cost (target_cost_data, 1, vector_stmt, NULL, 0,
    3414                 :            :                             vect_prologue);
    3415                 :         21 :       if (dump_enabled_p ())
    3416                 :          1 :         dump_printf (MSG_NOTE,
    3417                 :            :                      "cost model: Adding cost of checks for loop "
    3418                 :            :                      "versioning niters.\n");
    3419                 :            :     }
    3420                 :            : 
    3421                 :      16397 :   if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
    3422                 :       2382 :     (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken, NULL, 0,
    3423                 :            :                           vect_prologue);
    3424                 :            : 
    3425                 :            :   /* Count statements in scalar loop.  Using this as scalar cost for a single
    3426                 :            :      iteration for now.
    3427                 :            : 
    3428                 :            :      TODO: Add outer loop support.
    3429                 :            : 
    3430                 :            :      TODO: Consider assigning different costs to different scalar
    3431                 :            :      statements.  */
    3432                 :            : 
    3433                 :      16397 :   scalar_single_iter_cost
    3434                 :      16397 :     = LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo);
    3435                 :            : 
    3436                 :            :   /* Add additional cost for the peeled instructions in prologue and epilogue
    3437                 :            :      loop.  (For fully-masked loops there will be no peeling.)
    3438                 :            : 
    3439                 :            :      FORNOW: If we don't know the value of peel_iters for prologue or epilogue
    3440                 :            :      at compile-time - we assume it's vf/2 (the worst would be vf-1).
    3441                 :            : 
    3442                 :            :      TODO: Build an expression that represents peel_iters for prologue and
    3443                 :            :      epilogue to be used in a run-time test.  */
    3444                 :            : 
    3445                 :      16397 :   if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
    3446                 :            :     {
    3447                 :          0 :       peel_iters_prologue = 0;
    3448                 :          0 :       peel_iters_epilogue = 0;
    3449                 :            : 
    3450                 :          0 :       if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
    3451                 :            :         {
    3452                 :            :           /* We need to peel exactly one iteration.  */
    3453                 :          0 :           peel_iters_epilogue += 1;
    3454                 :          0 :           stmt_info_for_cost *si;
    3455                 :          0 :           int j;
    3456                 :          0 :           FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
    3457                 :            :                             j, si)
    3458                 :          0 :             (void) add_stmt_cost (target_cost_data, si->count,
    3459                 :            :                                   si->kind, si->stmt_info, si->misalign,
    3460                 :            :                                   vect_epilogue);
    3461                 :            :         }
    3462                 :            : 
    3463                 :            :       /* Calculate how many masks we need to generate.  */
    3464                 :            :       unsigned int num_masks = 0;
    3465                 :            :       rgroup_masks *rgm;
    3466                 :            :       unsigned int num_vectors_m1;
    3467                 :          0 :       FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo), num_vectors_m1, rgm)
    3468                 :          0 :         if (rgm->mask_type)
    3469                 :          0 :           num_masks += num_vectors_m1 + 1;
    3470                 :          0 :       gcc_assert (num_masks > 0);
    3471                 :            : 
    3472                 :            :       /* In the worst case, we need to generate each mask in the prologue
    3473                 :            :          and in the loop body.  One of the loop body mask instructions
    3474                 :            :          replaces the comparison in the scalar loop, and since we don't
    3475                 :            :          count the scalar comparison against the scalar body, we shouldn't
    3476                 :            :          count that vector instruction against the vector body either.
    3477                 :            : 
    3478                 :            :          Sometimes we can use unpacks instead of generating prologue
    3479                 :            :          masks and sometimes the prologue mask will fold to a constant,
    3480                 :            :          so the actual prologue cost might be smaller.  However, it's
    3481                 :            :          simpler and safer to use the worst-case cost; if this ends up
    3482                 :            :          being the tie-breaker between vectorizing or not, then it's
    3483                 :            :          probably better not to vectorize.  */
    3484                 :          0 :       (void) add_stmt_cost (target_cost_data, num_masks, vector_stmt,
    3485                 :            :                             NULL, 0, vect_prologue);
    3486                 :          0 :       (void) add_stmt_cost (target_cost_data, num_masks - 1, vector_stmt,
    3487                 :            :                             NULL, 0, vect_body);
    3488                 :            :     }
    3489                 :      16397 :   else if (npeel < 0)
    3490                 :            :     {
    3491                 :          5 :       peel_iters_prologue = assumed_vf / 2;
    3492                 :          5 :       if (dump_enabled_p ())
    3493                 :          1 :         dump_printf (MSG_NOTE, "cost model: "
    3494                 :            :                      "prologue peel iters set to vf/2.\n");
    3495                 :            : 
    3496                 :            :       /* If peeling for alignment is unknown, loop bound of main loop becomes
    3497                 :            :          unknown.  */
    3498                 :          5 :       peel_iters_epilogue = assumed_vf / 2;
    3499                 :          5 :       if (dump_enabled_p ())
    3500                 :          1 :         dump_printf (MSG_NOTE, "cost model: "
    3501                 :            :                      "epilogue peel iters set to vf/2 because "
    3502                 :            :                      "peeling for alignment is unknown.\n");
    3503                 :            : 
    3504                 :            :       /* If peeled iterations are unknown, count a taken branch and a not taken
    3505                 :            :          branch per peeled loop. Even if scalar loop iterations are known,
    3506                 :            :          vector iterations are not known since peeled prologue iterations are
    3507                 :            :          not known. Hence guards remain the same.  */
    3508                 :          5 :       (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken,
    3509                 :            :                             NULL, 0, vect_prologue);
    3510                 :          5 :       (void) add_stmt_cost (target_cost_data, 1, cond_branch_not_taken,
    3511                 :            :                             NULL, 0, vect_prologue);
    3512                 :          5 :       (void) add_stmt_cost (target_cost_data, 1, cond_branch_taken,
    3513                 :            :                             NULL, 0, vect_epilogue);
    3514                 :          5 :       (void) add_stmt_cost (target_cost_data, 1, cond_branch_not_taken,
    3515                 :            :                             NULL, 0, vect_epilogue);
    3516                 :          5 :       stmt_info_for_cost *si;
    3517                 :          5 :       int j;
    3518                 :         30 :       FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo), j, si)
    3519                 :            :         {
    3520                 :         25 :           (void) add_stmt_cost (target_cost_data,
    3521                 :         25 :                                 si->count * peel_iters_prologue,
    3522                 :            :                                 si->kind, si->stmt_info, si->misalign,
    3523                 :            :                                 vect_prologue);
    3524                 :         25 :           (void) add_stmt_cost (target_cost_data,
    3525                 :         25 :                                 si->count * peel_iters_epilogue,
    3526                 :            :                                 si->kind, si->stmt_info, si->misalign,
    3527                 :            :                                 vect_epilogue);
    3528                 :            :         }
    3529                 :            :     }
    3530                 :            :   else
    3531                 :            :     {
    3532                 :      16392 :       stmt_vector_for_cost prologue_cost_vec, epilogue_cost_vec;
    3533                 :      16392 :       stmt_info_for_cost *si;
    3534                 :      16392 :       int j;
    3535                 :      16392 :       void *data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
    3536                 :            : 
    3537                 :      16392 :       prologue_cost_vec.create (2);
    3538                 :      16392 :       epilogue_cost_vec.create (2);
    3539                 :      16392 :       peel_iters_prologue = npeel;
    3540                 :            : 
    3541                 :      16392 :       (void) vect_get_known_peeling_cost (loop_vinfo, peel_iters_prologue,
    3542                 :            :                                           &peel_iters_epilogue,
    3543                 :            :                                           &LOOP_VINFO_SCALAR_ITERATION_COST
    3544                 :            :                                             (loop_vinfo),
    3545                 :            :                                           &prologue_cost_vec,
    3546                 :            :                                           &epilogue_cost_vec);
    3547                 :            : 
    3548                 :      27070 :       FOR_EACH_VEC_ELT (prologue_cost_vec, j, si)
    3549                 :      10678 :         (void) add_stmt_cost (data, si->count, si->kind, si->stmt_info,
    3550                 :            :                               si->misalign, vect_prologue);
    3551                 :            : 
    3552                 :     108395 :       FOR_EACH_VEC_ELT (epilogue_cost_vec, j, si)
    3553                 :      92003 :         (void) add_stmt_cost (data, si->count, si->kind, si->stmt_info,
    3554                 :            :                               si->misalign, vect_epilogue);
    3555                 :            : 
    3556                 :      16392 :       prologue_cost_vec.release ();
    3557                 :      32784 :       epilogue_cost_vec.release ();
    3558                 :            :     }
    3559                 :            : 
    3560                 :            :   /* FORNOW: The scalar outside cost is incremented in one of the
    3561                 :            :      following ways:
    3562                 :            : 
    3563                 :            :      1. The vectorizer checks for alignment and aliasing and generates
    3564                 :            :      a condition that allows dynamic vectorization.  A cost model
    3565                 :            :      check is ANDED with the versioning condition.  Hence scalar code
    3566                 :            :      path now has the added cost of the versioning check.
    3567                 :            : 
    3568                 :            :        if (cost > th & versioning_check)
    3569                 :            :          jmp to vector code
    3570                 :            : 
    3571                 :            :      Hence run-time scalar is incremented by not-taken branch cost.
    3572                 :            : 
    3573                 :            :      2. The vectorizer then checks if a prologue is required.  If the
    3574                 :            :      cost model check was not done before during versioning, it has to
    3575                 :            :      be done before the prologue check.
    3576                 :            : 
    3577                 :            :        if (cost <= th)
    3578                 :            :          prologue = scalar_iters
    3579                 :            :        if (prologue == 0)
    3580                 :            :          jmp to vector code
    3581                 :            :        else
    3582                 :            :          execute prologue
    3583                 :            :        if (prologue == num_iters)
    3584                 :            :          go to exit
    3585                 :            : 
    3586                 :            :      Hence the run-time scalar cost is incremented by a taken branch,
    3587                 :            :      plus a not-taken branch, plus a taken branch cost.
    3588                 :            : 
    3589                 :            :      3. The vectorizer then checks if an epilogue is required.  If the
    3590                 :            :      cost model check was not done before during prologue check, it
    3591                 :            :      has to be done with the epilogue check.
    3592                 :            : 
    3593                 :            :        if (prologue == 0)
    3594                 :            :          jmp to vector code
    3595                 :            :        else
    3596                 :            :          execute prologue
    3597                 :            :        if (prologue == num_iters)
    3598                 :            :          go to exit
    3599                 :            :        vector code:
    3600                 :            :          if ((cost <= th) | (scalar_iters-prologue-epilogue == 0))
    3601                 :            :            jmp to epilogue
    3602                 :            : 
    3603                 :            :      Hence the run-time scalar cost should be incremented by 2 taken
    3604                 :            :      branches.
    3605                 :            : 
    3606                 :            :      TODO: The back end may reorder the BBS's differently and reverse
    3607                 :            :      conditions/branch directions.  Change the estimates below to
    3608                 :            :      something more reasonable.  */
    3609                 :            : 
    3610                 :            :   /* If the number of iterations is known and we do not do versioning, we can
    3611                 :            :      decide whether to vectorize at compile time.  Hence the scalar version
    3612                 :            :      do not carry cost model guard costs.  */
    3613                 :       5721 :   if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
    3614                 :      22118 :       || LOOP_REQUIRES_VERSIONING (loop_vinfo))
    3615                 :            :     {
    3616                 :            :       /* Cost model check occurs at versioning.  */
    3617                 :      10958 :       if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
    3618                 :       2382 :         scalar_outside_cost += vect_get_stmt_cost (cond_branch_not_taken);
    3619                 :            :       else
    3620                 :            :         {
    3621                 :            :           /* Cost model check occurs at prologue generation.  */
    3622                 :       8576 :           if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
    3623                 :          0 :             scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken)
    3624                 :          0 :               + vect_get_stmt_cost (cond_branch_not_taken); 
    3625                 :            :           /* Cost model check occurs at epilogue generation.  */
    3626                 :            :           else
    3627                 :       8576 :             scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken); 
    3628                 :            :         }
    3629                 :            :     }
    3630                 :            : 
    3631                 :            :   /* Complete the target-specific cost calculations.  */
    3632                 :      16397 :   finish_cost (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo), &vec_prologue_cost,
    3633                 :            :                &vec_inside_cost, &vec_epilogue_cost);
    3634                 :            : 
    3635                 :      16397 :   vec_outside_cost = (int)(vec_prologue_cost + vec_epilogue_cost);
    3636                 :            : 
    3637                 :            :   /* Stash the costs so that we can compare two loop_vec_infos.  */
    3638                 :      16397 :   loop_vinfo->vec_inside_cost = vec_inside_cost;
    3639                 :      16397 :   loop_vinfo->vec_outside_cost = vec_outside_cost;
    3640                 :            : 
    3641                 :      16397 :   if (dump_enabled_p ())
    3642                 :            :     {
    3643                 :        198 :       dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n");
    3644                 :        198 :       dump_printf (MSG_NOTE, "  Vector inside of loop cost: %d\n",
    3645                 :            :                    vec_inside_cost);
    3646                 :        198 :       dump_printf (MSG_NOTE, "  Vector prologue cost: %d\n",
    3647                 :            :                    vec_prologue_cost);
    3648                 :        198 :       dump_printf (MSG_NOTE, "  Vector epilogue cost: %d\n",
    3649                 :            :                    vec_epilogue_cost);
    3650                 :        198 :       dump_printf (MSG_NOTE, "  Scalar iteration cost: %d\n",
    3651                 :            :                    scalar_single_iter_cost);
    3652                 :        198 :       dump_printf (MSG_NOTE, "  Scalar outside cost: %d\n",
    3653                 :            :                    scalar_outside_cost);
    3654                 :        198 :       dump_printf (MSG_NOTE, "  Vector outside cost: %d\n",
    3655                 :            :                    vec_outside_cost);
    3656                 :        198 :       dump_printf (MSG_NOTE, "  prologue iterations: %d\n",
    3657                 :            :                    peel_iters_prologue);
    3658                 :        198 :       dump_printf (MSG_NOTE, "  epilogue iterations: %d\n",
    3659                 :            :                    peel_iters_epilogue);
    3660                 :            :     }
    3661                 :            : 
    3662                 :            :   /* Calculate number of iterations required to make the vector version
    3663                 :            :      profitable, relative to the loop bodies only.  The following condition
    3664                 :            :      must hold true:
    3665                 :            :      SIC * niters + SOC > VIC * ((niters - NPEEL) / VF) + VOC
    3666                 :            :      where
    3667                 :            :      SIC = scalar iteration cost, VIC = vector iteration cost,
    3668                 :            :      VOC = vector outside cost, VF = vectorization factor,
    3669                 :            :      NPEEL = prologue iterations + epilogue iterations,
    3670                 :            :      SOC = scalar outside cost for run time cost model check.  */
    3671                 :            : 
    3672                 :      16397 :   int saving_per_viter = (scalar_single_iter_cost * assumed_vf
    3673                 :      16397 :                           - vec_inside_cost);
    3674                 :      16397 :   if (saving_per_viter <= 0)
    3675                 :            :     {
    3676                 :       3014 :       if (LOOP_VINFO_LOOP (loop_vinfo)->force_vectorize)
    3677                 :          0 :         warning_at (vect_location.get_location_t (), OPT_Wopenmp_simd,
    3678                 :            :                     "vectorization did not happen for a simd loop");
    3679                 :            : 
    3680                 :       3014 :       if (dump_enabled_p ())
    3681                 :          1 :         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    3682                 :            :                          "cost model: the vector iteration cost = %d "
    3683                 :            :                          "divided by the scalar iteration cost = %d "
    3684                 :            :                          "is greater or equal to the vectorization factor = %d"
    3685                 :            :                          ".\n",
    3686                 :            :                          vec_inside_cost, scalar_single_iter_cost, assumed_vf);
    3687                 :       3014 :       *ret_min_profitable_niters = -1;
    3688                 :       3014 :       *ret_min_profitable_estimate = -1;
    3689                 :       3014 :       return;
    3690                 :            :     }
    3691                 :            : 
    3692                 :            :   /* ??? The "if" arm is written to handle all cases; see below for what
    3693                 :            :      we would do for !LOOP_VINFO_FULLY_MASKED_P.  */
    3694                 :      13383 :   if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
    3695                 :            :     {
    3696                 :            :       /* Rewriting the condition above in terms of the number of
    3697                 :            :          vector iterations (vniters) rather than the number of
    3698                 :            :          scalar iterations (niters) gives:
    3699                 :            : 
    3700                 :            :          SIC * (vniters * VF + NPEEL) + SOC > VIC * vniters + VOC
    3701                 :            : 
    3702                 :            :          <==> vniters * (SIC * VF - VIC) > VOC - SIC * NPEEL - SOC
    3703                 :            : 
    3704                 :            :          For integer N, X and Y when X > 0:
    3705                 :            : 
    3706                 :            :          N * X > Y <==> N >= (Y /[floor] X) + 1.  */
    3707                 :          0 :       int outside_overhead = (vec_outside_cost
    3708                 :          0 :                               - scalar_single_iter_cost * peel_iters_prologue
    3709                 :          0 :                               - scalar_single_iter_cost * peel_iters_epilogue
    3710                 :            :                               - scalar_outside_cost);
    3711                 :            :       /* We're only interested in cases that require at least one
    3712                 :            :          vector iteration.  */
    3713                 :          0 :       int min_vec_niters = 1;
    3714                 :          0 :       if (outside_overhead > 0)
    3715                 :          0 :         min_vec_niters = outside_overhead / saving_per_viter + 1;
    3716                 :            : 
    3717                 :          0 :       if (dump_enabled_p ())
    3718                 :          0 :         dump_printf (MSG_NOTE, "  Minimum number of vector iterations: %d\n",
    3719                 :            :                      min_vec_niters);
    3720                 :            : 
    3721                 :          0 :       if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
    3722                 :            :         {
    3723                 :            :           /* Now that we know the minimum number of vector iterations,
    3724                 :            :              find the minimum niters for which the scalar cost is larger:
    3725                 :            : 
    3726                 :            :              SIC * niters > VIC * vniters + VOC - SOC
    3727                 :            : 
    3728                 :            :              We know that the minimum niters is no more than
    3729                 :            :              vniters * VF + NPEEL, but it might be (and often is) less
    3730                 :            :              than that if a partial vector iteration is cheaper than the
    3731                 :            :              equivalent scalar code.  */
    3732                 :          0 :           int threshold = (vec_inside_cost * min_vec_niters
    3733                 :          0 :                            + vec_outside_cost
    3734                 :          0 :                            - scalar_outside_cost);
    3735                 :          0 :           if (threshold <= 0)
    3736                 :            :             min_profitable_iters = 1;
    3737                 :            :           else
    3738                 :          0 :             min_profitable_iters = threshold / scalar_single_iter_cost + 1;
    3739                 :            :         }
    3740                 :            :       else
    3741                 :            :         /* Convert the number of vector iterations into a number of
    3742                 :            :            scalar iterations.  */
    3743                 :          0 :         min_profitable_iters = (min_vec_niters * assumed_vf
    3744                 :          0 :                                 + peel_iters_prologue
    3745                 :          0 :                                 + peel_iters_epilogue);
    3746                 :            :     }
    3747                 :            :   else
    3748                 :            :     {
    3749                 :      13383 :       min_profitable_iters = ((vec_outside_cost - scalar_outside_cost)
    3750                 :      13383 :                               * assumed_vf
    3751                 :      13383 :                               - vec_inside_cost * peel_iters_prologue
    3752                 :      13383 :                               - vec_inside_cost * peel_iters_epilogue);
    3753                 :      13383 :       if (min_profitable_iters <= 0)
    3754                 :            :         min_profitable_iters = 0;
    3755                 :            :       else
    3756                 :            :         {
    3757                 :      12138 :           min_profitable_iters /= saving_per_viter;
    3758                 :            : 
    3759                 :      12138 :           if ((scalar_single_iter_cost * assumed_vf * min_profitable_iters)
    3760                 :      12138 :               <= (((int) vec_inside_cost * min_profitable_iters)
    3761                 :      12138 :                   + (((int) vec_outside_cost - scalar_outside_cost)
    3762                 :            :                      * assumed_vf)))
    3763                 :      12138 :             min_profitable_iters++;
    3764                 :            :         }
    3765                 :            :     }
    3766                 :            : 
    3767                 :      13383 :   if (dump_enabled_p ())
    3768                 :        197 :     dump_printf (MSG_NOTE,
    3769                 :            :                  "  Calculated minimum iters for profitability: %d\n",
    3770                 :            :                  min_profitable_iters);
    3771                 :            : 
    3772                 :      13383 :   if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
    3773                 :      13383 :       && min_profitable_iters < (assumed_vf + peel_iters_prologue))
    3774                 :            :     /* We want the vectorized loop to execute at least once.  */
    3775                 :            :     min_profitable_iters = assumed_vf + peel_iters_prologue;
    3776                 :            : 
    3777                 :      13383 :   if (dump_enabled_p ())
    3778                 :        197 :     dump_printf_loc (MSG_NOTE, vect_location,
    3779                 :            :                      "  Runtime profitability threshold = %d\n",
    3780                 :            :                      min_profitable_iters);
    3781                 :            : 
    3782                 :      13383 :   *ret_min_profitable_niters = min_profitable_iters;
    3783                 :            : 
    3784                 :            :   /* Calculate number of iterations required to make the vector version
    3785                 :            :      profitable, relative to the loop bodies only.
    3786                 :            : 
    3787                 :            :      Non-vectorized variant is SIC * niters and it must win over vector
    3788                 :            :      variant on the expected loop trip count.  The following condition must hold true:
    3789                 :            :      SIC * niters > VIC * ((niters - NPEEL) / VF) + VOC + SOC  */
    3790                 :            : 
    3791                 :      13383 :   if (vec_outside_cost <= 0)
    3792                 :            :     min_profitable_estimate = 0;
    3793                 :      12177 :   else if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
    3794                 :            :     {
    3795                 :            :       /* This is a repeat of the code above, but with + SOC rather
    3796                 :            :          than - SOC.  */
    3797                 :          0 :       int outside_overhead = (vec_outside_cost
    3798                 :          0 :                               - scalar_single_iter_cost * peel_iters_prologue
    3799                 :          0 :                               - scalar_single_iter_cost * peel_iters_epilogue
    3800                 :            :                               + scalar_outside_cost);
    3801                 :          0 :       int min_vec_niters = 1;
    3802                 :          0 :       if (outside_overhead > 0)
    3803                 :          0 :         min_vec_niters = outside_overhead / saving_per_viter + 1;
    3804                 :            : 
    3805                 :          0 :       if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
    3806                 :            :         {
    3807                 :          0 :           int threshold = (vec_inside_cost * min_vec_niters
    3808                 :          0 :                            + vec_outside_cost
    3809                 :          0 :                            + scalar_outside_cost);
    3810                 :          0 :           min_profitable_estimate = threshold / scalar_single_iter_cost + 1;
    3811                 :            :         }
    3812                 :            :       else
    3813                 :            :         min_profitable_estimate = (min_vec_niters * assumed_vf
    3814                 :            :                                    + peel_iters_prologue
    3815                 :            :                                    + peel_iters_epilogue);
    3816                 :            :     }
    3817                 :            :   else
    3818                 :            :     {
    3819                 :      12177 :       min_profitable_estimate = ((vec_outside_cost + scalar_outside_cost)
    3820                 :      12177 :                                  * assumed_vf
    3821                 :      12177 :                                  - vec_inside_cost * peel_iters_prologue
    3822                 :      12177 :                                  - vec_inside_cost * peel_iters_epilogue)
    3823                 :      12177 :                                  / ((scalar_single_iter_cost * assumed_vf)
    3824                 :      12177 :                                    - vec_inside_cost);
    3825                 :            :     }
    3826                 :      13383 :   min_profitable_estimate = MAX (min_profitable_estimate, min_profitable_iters);
    3827                 :      13383 :   if (dump_enabled_p ())
    3828                 :        197 :     dump_printf_loc (MSG_NOTE, vect_location,
    3829                 :            :                      "  Static estimate profitability threshold = %d\n",
    3830                 :            :                      min_profitable_estimate);
    3831                 :            : 
    3832                 :      13383 :   *ret_min_profitable_estimate = min_profitable_estimate;
    3833                 :            : }
    3834                 :            : 
    3835                 :            : /* Writes into SEL a mask for a vec_perm, equivalent to a vec_shr by OFFSET
    3836                 :            :    vector elements (not bits) for a vector with NELT elements.  */
    3837                 :            : static void
    3838                 :      12706 : calc_vec_perm_mask_for_shift (unsigned int offset, unsigned int nelt,
    3839                 :            :                               vec_perm_builder *sel)
    3840                 :            : {
    3841                 :            :   /* The encoding is a single stepped pattern.  Any wrap-around is handled
    3842                 :            :      by vec_perm_indices.  */
    3843                 :      12706 :   sel->new_vector (nelt, 1, 3);
    3844                 :      50824 :   for (unsigned int i = 0; i < 3; i++)
    3845                 :      38118 :     sel->quick_push (i + offset);
    3846                 :      12706 : }
    3847                 :            : 
    3848                 :            : /* Checks whether the target supports whole-vector shifts for vectors of mode
    3849                 :            :    MODE.  This is the case if _either_ the platform handles vec_shr_optab, _or_
    3850                 :            :    it supports vec_perm_const with masks for all necessary shift amounts.  */
    3851                 :            : static bool
    3852                 :      11000 : have_whole_vector_shift (machine_mode mode)
    3853                 :            : {
    3854                 :      11000 :   if (optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing)
    3855                 :            :     return true;
    3856                 :            : 
    3857                 :            :   /* Variable-length vectors should be handled via the optab.  */
    3858                 :       3045 :   unsigned int nelt;
    3859                 :       6090 :   if (!GET_MODE_NUNITS (mode).is_constant (&nelt))
    3860                 :            :     return false;
    3861                 :            : 
    3862                 :       3045 :   vec_perm_builder sel;
    3863                 :       6090 :   vec_perm_indices indices;
    3864                 :       6098 :   for (unsigned int i = nelt / 2; i >= 1; i /= 2)
    3865                 :            :     {
    3866                 :       5119 :       calc_vec_perm_mask_for_shift (i, nelt, &sel);
    3867                 :       5119 :       indices.new_vector (sel, 2, nelt);
    3868                 :       5119 :       if (!can_vec_perm_const_p (mode, indices, false))
    3869                 :            :         return false;
    3870                 :            :     }
    3871                 :            :   return true;
    3872                 :            : }
    3873                 :            : 
    3874                 :            : /* TODO: Close dependency between vect_model_*_cost and vectorizable_*
    3875                 :            :    functions. Design better to avoid maintenance issues.  */
    3876                 :            : 
    3877                 :            : /* Function vect_model_reduction_cost.
    3878                 :            : 
    3879                 :            :    Models cost for a reduction operation, including the vector ops
    3880                 :            :    generated within the strip-mine loop, the initial definition before
    3881                 :            :    the loop, and the epilogue code that must be generated.  */
    3882                 :            : 
    3883                 :            : static void
    3884                 :      17105 : vect_model_reduction_cost (stmt_vec_info stmt_info, internal_fn reduc_fn,
    3885                 :            :                            vect_reduction_type reduction_type,
    3886                 :            :                            int ncopies, stmt_vector_for_cost *cost_vec)
    3887                 :            : {
    3888                 :      17105 :   int prologue_cost = 0, epilogue_cost = 0, inside_cost;
    3889                 :      17105 :   enum tree_code code;
    3890                 :      17105 :   optab optab;
    3891                 :      17105 :   tree vectype;
    3892                 :      17105 :   machine_mode mode;
    3893                 :      17105 :   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
    3894                 :      17105 :   class loop *loop = NULL;
    3895                 :            : 
    3896                 :      17105 :   if (loop_vinfo)
    3897                 :      17105 :     loop = LOOP_VINFO_LOOP (loop_vinfo);
    3898                 :            : 
    3899                 :            :   /* Condition reductions generate two reductions in the loop.  */
    3900                 :      17105 :   if (reduction_type == COND_REDUCTION)
    3901                 :         97 :     ncopies *= 2;
    3902                 :            : 
    3903                 :      17105 :   vectype = STMT_VINFO_VECTYPE (stmt_info);
    3904                 :      17105 :   mode = TYPE_MODE (vectype);
    3905                 :      17105 :   stmt_vec_info orig_stmt_info = vect_orig_stmt (stmt_info);
    3906                 :            : 
    3907                 :      17105 :   code = gimple_assign_rhs_code (orig_stmt_info->stmt);
    3908                 :            : 
    3909                 :      17105 :   if (reduction_type == EXTRACT_LAST_REDUCTION)
    3910                 :            :     /* No extra instructions are needed in the prologue.  The loop body
    3911                 :            :        operations are costed in vectorizable_condition.  */
    3912                 :            :     inside_cost = 0;
    3913                 :      17105 :   else if (reduction_type == FOLD_LEFT_REDUCTION)
    3914                 :            :     {
    3915                 :            :       /* No extra instructions needed in the prologue.  */
    3916                 :        783 :       prologue_cost = 0;
    3917                 :            : 
    3918                 :        783 :       if (reduc_fn != IFN_LAST)
    3919                 :            :         /* Count one reduction-like operation per vector.  */
    3920                 :          0 :         inside_cost = record_stmt_cost (cost_vec, ncopies, vec_to_scalar,
    3921                 :            :                                         stmt_info, 0, vect_body);
    3922                 :            :       else
    3923                 :            :         {
    3924                 :            :           /* Use NELEMENTS extracts and NELEMENTS scalar ops.  */
    3925                 :        783 :           unsigned int nelements = ncopies * vect_nunits_for_cost (vectype);
    3926                 :        783 :           inside_cost = record_stmt_cost (cost_vec, nelements,
    3927                 :            :                                           vec_to_scalar, stmt_info, 0,
    3928                 :            :                                           vect_body);
    3929                 :        783 :           inside_cost += record_stmt_cost (cost_vec, nelements,
    3930                 :            :                                            scalar_stmt, stmt_info, 0,
    3931                 :            :                                            vect_body);
    3932                 :            :         }
    3933                 :            :     }
    3934                 :            :   else
    3935                 :            :     {
    3936                 :            :       /* Add in cost for initial definition.
    3937                 :            :          For cond reduction we have four vectors: initial index, step,
    3938                 :            :          initial result of the data reduction, initial value of the index
    3939                 :            :          reduction.  */
    3940                 :      16322 :       int prologue_stmts = reduction_type == COND_REDUCTION ? 4 : 1;
    3941                 :      16322 :       prologue_cost += record_stmt_cost (cost_vec, prologue_stmts,
    3942                 :            :                                          scalar_to_vec, stmt_info, 0,
    3943                 :            :                                          vect_prologue);
    3944                 :            : 
    3945                 :            :       /* Cost of reduction op inside loop.  */
    3946                 :      16322 :       inside_cost = record_stmt_cost (cost_vec, ncopies, vector_stmt,
    3947                 :            :                                       stmt_info, 0, vect_body);
    3948                 :            :     }
    3949                 :            : 
    3950                 :            :   /* Determine cost of epilogue code.
    3951                 :            : 
    3952                 :            :      We have a reduction operator that will reduce the vector in one statement.
    3953                 :            :      Also requires scalar extract.  */
    3954                 :            : 
    3955                 :      17105 :   if (!loop || !nested_in_vect_loop_p (loop, orig_stmt_info))
    3956                 :            :     {
    3957                 :      17062 :       if (reduc_fn != IFN_LAST)
    3958                 :            :         {
    3959                 :       8184 :           if (reduction_type == COND_REDUCTION)
    3960                 :            :             {
    3961                 :            :               /* An EQ stmt and an COND_EXPR stmt.  */
    3962                 :          8 :               epilogue_cost += record_stmt_cost (cost_vec, 2,
    3963                 :            :                                                  vector_stmt, stmt_info, 0,
    3964                 :            :                                                  vect_epilogue);
    3965                 :            :               /* Reduction of the max index and a reduction of the found
    3966                 :            :                  values.  */
    3967                 :          8 :               epilogue_cost += record_stmt_cost (cost_vec, 2,
    3968                 :            :                                                  vec_to_scalar, stmt_info, 0,
    3969                 :            :                                                  vect_epilogue);
    3970                 :            :               /* A broadcast of the max value.  */
    3971                 :          8 :               epilogue_cost += record_stmt_cost (cost_vec, 1,
    3972                 :            :                                                  scalar_to_vec, stmt_info, 0,
    3973                 :            :                                                  vect_epilogue);
    3974                 :            :             }
    3975                 :            :           else
    3976                 :            :             {
    3977                 :       8176 :               epilogue_cost += record_stmt_cost (cost_vec, 1, vector_stmt,
    3978                 :            :                                                  stmt_info, 0, vect_epilogue);
    3979                 :       8176 :               epilogue_cost += record_stmt_cost (cost_vec, 1,
    3980                 :            :                                                  vec_to_scalar, stmt_info, 0,
    3981                 :            :                                                  vect_epilogue);
    3982                 :            :             }
    3983                 :            :         }
    3984                 :       8878 :       else if (reduction_type == COND_REDUCTION)
    3985                 :            :         {
    3986                 :         89 :           unsigned estimated_nunits = vect_nunits_for_cost (vectype);
    3987                 :            :           /* Extraction of scalar elements.  */
    3988                 :        178 :           epilogue_cost += record_stmt_cost (cost_vec,
    3989                 :         89 :                                              2 * estimated_nunits,
    3990                 :            :                                              vec_to_scalar, stmt_info, 0,
    3991                 :            :                                              vect_epilogue);
    3992                 :            :           /* Scalar max reductions via COND_EXPR / MAX_EXPR.  */
    3993                 :         89 :           epilogue_cost += record_stmt_cost (cost_vec,
    3994                 :         89 :                                              2 * estimated_nunits - 3,
    3995                 :            :                                              scalar_stmt, stmt_info, 0,
    3996                 :            :                                              vect_epilogue);
    3997                 :            :         }
    3998                 :       8789 :       else if (reduction_type == EXTRACT_LAST_REDUCTION
    3999                 :       8789 :                || reduction_type == FOLD_LEFT_REDUCTION)
    4000                 :            :         /* No extra instructions need in the epilogue.  */
    4001                 :            :         ;
    4002                 :            :       else
    4003                 :            :         {
    4004                 :       8006 :           int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype));
    4005                 :       8006 :           tree bitsize =
    4006                 :       8006 :             TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt_info->stmt)));
    4007                 :       8006 :           int element_bitsize = tree_to_uhwi (bitsize);
    4008                 :       8006 :           int nelements = vec_size_in_bits / element_bitsize;
    4009                 :            : 
    4010                 :       8006 :           if (code == COND_EXPR)
    4011                 :        416 :             code = MAX_EXPR;
    4012                 :            : 
    4013                 :       8006 :           optab = optab_for_tree_code (code, vectype, optab_default);
    4014                 :            : 
    4015                 :            :           /* We have a whole vector shift available.  */
    4016                 :       8006 :           if (optab != unknown_optab
    4017                 :       8006 :               && VECTOR_MODE_P (mode)
    4018                 :       8006 :               && optab_handler (optab, mode) != CODE_FOR_nothing
    4019                 :      14731 :               && have_whole_vector_shift (mode))
    4020                 :            :             {
    4021                 :            :               /* Final reduction via vector shifts and the reduction operator.
    4022                 :            :                  Also requires scalar extract.  */
    4023                 :      15468 :               epilogue_cost += record_stmt_cost (cost_vec,
    4024                 :      10312 :                                                  exact_log2 (nelements) * 2,
    4025                 :            :                                                  vector_stmt, stmt_info, 0,
    4026                 :            :                                                  vect_epilogue);
    4027                 :       5156 :               epilogue_cost += record_stmt_cost (cost_vec, 1,
    4028                 :            :                                                  vec_to_scalar, stmt_info, 0,
    4029                 :            :                                                  vect_epilogue);
    4030                 :            :             }     
    4031                 :            :           else
    4032                 :            :             /* Use extracts and reduction op for final reduction.  For N
    4033                 :            :                elements, we have N extracts and N-1 reduction ops.  */
    4034                 :       2850 :             epilogue_cost += record_stmt_cost (cost_vec, 
    4035                 :       2850 :                                                nelements + nelements - 1,
    4036                 :            :                                                vector_stmt, stmt_info, 0,
    4037                 :            :                                                vect_epilogue);
    4038                 :            :         }
    4039                 :            :     }
    4040                 :            : 
    4041                 :      17105 :   if (dump_enabled_p ())
    4042                 :       1679 :     dump_printf (MSG_NOTE, 
    4043                 :            :                  "vect_model_reduction_cost: inside_cost = %d, "
    4044                 :            :                  "prologue_cost = %d, epilogue_cost = %d .\n", inside_cost,
    4045                 :            :                  prologue_cost, epilogue_cost);
    4046                 :      17105 : }
    4047                 :            : 
    4048                 :            : 
    4049                 :            : /* Function vect_model_induction_cost.
    4050                 :            : 
    4051                 :            :    Models cost for induction operations.  */
    4052                 :            : 
    4053                 :            : static void
    4054                 :       6405 : vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies,
    4055                 :            :                            stmt_vector_for_cost *cost_vec)
    4056                 :            : {
    4057                 :       6405 :   unsigned inside_cost, prologue_cost;
    4058                 :            : 
    4059                 :       6405 :   if (PURE_SLP_STMT (stmt_info))
    4060                 :            :     return;
    4061                 :            : 
    4062                 :            :   /* loop cost for vec_loop.  */
    4063                 :       6180 :   inside_cost = record_stmt_cost (cost_vec, ncopies, vector_stmt,
    4064                 :            :                                   stmt_info, 0, vect_body);
    4065                 :            : 
    4066                 :            :   /* prologue cost for vec_init and vec_step.  */
    4067                 :       6180 :   prologue_cost = record_stmt_cost (cost_vec, 2, scalar_to_vec,
    4068                 :            :                                     stmt_info, 0, vect_prologue);
    4069                 :            : 
    4070                 :       6180 :   if (dump_enabled_p ())
    4071                 :       2236 :     dump_printf_loc (MSG_NOTE, vect_location,
    4072                 :            :                      "vect_model_induction_cost: inside_cost = %d, "
    4073                 :            :                      "prologue_cost = %d .\n", inside_cost, prologue_cost);
    4074                 :            : }
    4075                 :            : 
    4076                 :            : 
    4077                 :            : 
    4078                 :            : /* Function get_initial_def_for_reduction
    4079                 :            : 
    4080                 :            :    Input:
    4081                 :            :    STMT_VINFO - a stmt that performs a reduction operation in the loop.
    4082                 :            :    INIT_VAL - the initial value of the reduction variable
    4083                 :            : 
    4084                 :            :    Output:
    4085                 :            :    ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
    4086                 :            :         of the reduction (used for adjusting the epilog - see below).
    4087                 :            :    Return a vector variable, initialized according to the operation that
    4088                 :            :         STMT_VINFO performs. This vector will be used as the initial value
    4089                 :            :         of the vector of partial results.
    4090                 :            : 
    4091                 :            :    Option1 (adjust in epilog): Initialize the vector as follows:
    4092                 :            :      add/bit or/xor:    [0,0,...,0,0]
    4093                 :            :      mult/bit and:      [1,1,...,1,1]
    4094                 :            :      min/max/cond_expr: [init_val,init_val,..,init_val,init_val]
    4095                 :            :    and when necessary (e.g. add/mult case) let the caller know
    4096                 :            :    that it needs to adjust the result by init_val.
    4097                 :            : 
    4098                 :            :    Option2: Initialize the vector as follows:
    4099                 :            :      add/bit or/xor:    [init_val,0,0,...,0]
    4100                 :            :      mult/bit and:      [init_val,1,1,...,1]
    4101                 :            :      min/max/cond_expr: [init_val,init_val,...,init_val]
    4102                 :            :    and no adjustments are needed.
    4103                 :            : 
    4104                 :            :    For example, for the following code:
    4105                 :            : 
    4106                 :            :    s = init_val;
    4107                 :            :    for (i=0;i<n;i++)
    4108                 :            :      s = s + a[i];
    4109                 :            : 
    4110                 :            :    STMT_VINFO is 's = s + a[i]', and the reduction variable is 's'.
    4111                 :            :    For a vector of 4 units, we want to return either [0,0,0,init_val],
    4112                 :            :    or [0,0,0,0] and let the caller know that it needs to adjust
    4113                 :            :    the result at the end by 'init_val'.
    4114                 :            : 
    4115                 :            :    FORNOW, we are using the 'adjust in epilog' scheme, because this way the
    4116                 :            :    initialization vector is simpler (same element in all entries), if
    4117                 :            :    ADJUSTMENT_DEF is not NULL, and Option2 otherwise.
    4118                 :            : 
    4119                 :            :    A cost model should help decide between these two schemes.  */
    4120                 :            : 
    4121                 :            : static tree
    4122                 :      10677 : get_initial_def_for_reduction (stmt_vec_info stmt_vinfo,
    4123                 :            :                                enum tree_code code, tree init_val,
    4124                 :            :                                tree *adjustment_def)
    4125                 :            : {
    4126                 :      10677 :   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
    4127                 :      10677 :   class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
    4128                 :      10677 :   tree scalar_type = TREE_TYPE (init_val);
    4129                 :      10677 :   tree vectype = get_vectype_for_scalar_type (loop_vinfo, scalar_type);
    4130                 :      10677 :   tree def_for_init;
    4131                 :      10677 :   tree init_def;
    4132                 :      10677 :   REAL_VALUE_TYPE real_init_val = dconst0;
    4133                 :      10677 :   int int_init_val = 0;
    4134                 :      10677 :   gimple_seq stmts = NULL;
    4135                 :            : 
    4136                 :      10677 :   gcc_assert (vectype);
    4137                 :            : 
    4138                 :      10677 :   gcc_assert (POINTER_TYPE_P (scalar_type) || INTEGRAL_TYPE_P (scalar_type)
    4139                 :            :               || SCALAR_FLOAT_TYPE_P (scalar_type));
    4140                 :            : 
    4141                 :      10743 :   gcc_assert (nested_in_vect_loop_p (loop, stmt_vinfo)
    4142                 :            :               || loop == (gimple_bb (stmt_vinfo->stmt))->loop_father);
    4143                 :            : 
    4144                 :            :   /* ADJUSTMENT_DEF is NULL when called from
    4145                 :            :      vect_create_epilog_for_reduction to vectorize double reduction.  */
    4146                 :      10677 :   if (adjustment_def)
    4147                 :      10638 :     *adjustment_def = NULL;
    4148                 :            : 
    4149                 :      10677 :   switch (code)
    4150                 :            :     {
    4151                 :       9965 :     case WIDEN_SUM_EXPR:
    4152                 :       9965 :     case DOT_PROD_EXPR:
    4153                 :       9965 :     case SAD_EXPR:
    4154                 :       9965 :     case PLUS_EXPR:
    4155                 :       9965 :     case MINUS_EXPR:
    4156                 :       9965 :     case BIT_IOR_EXPR:
    4157                 :       9965 :     case BIT_XOR_EXPR:
    4158                 :       9965 :     case MULT_EXPR:
    4159                 :       9965 :     case BIT_AND_EXPR:
    4160                 :       9965 :       {
    4161                 :       9965 :         if (code == MULT_EXPR)
    4162                 :            :           {
    4163                 :         56 :             real_init_val = dconst1;
    4164                 :         56 :             int_init_val = 1;
    4165                 :            :           }
    4166                 :            : 
    4167                 :       9965 :         if (code == BIT_AND_EXPR)
    4168                 :         83 :           int_init_val = -1;
    4169                 :            : 
    4170                 :       9965 :         if (SCALAR_FLOAT_TYPE_P (scalar_type))
    4171                 :       5536 :           def_for_init = build_real (scalar_type, real_init_val);
    4172                 :            :         else
    4173                 :       4429 :           def_for_init = build_int_cst (scalar_type, int_init_val);
    4174                 :            : 
    4175                 :       9965 :         if (adjustment_def || operand_equal_p (def_for_init, init_val, 0))
    4176                 :            :           {
    4177                 :            :             /* Option1: the first element is '0' or '1' as well.  */
    4178                 :       9949 :             if (!operand_equal_p (def_for_init, init_val, 0))
    4179                 :       8899 :               *adjustment_def = init_val;
    4180                 :       9949 :             init_def = gimple_build_vector_from_val (&stmts, vectype,
    4181                 :            :                                                      def_for_init);
    4182                 :            :           }
    4183                 :         16 :         else if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant ())
    4184                 :            :           {
    4185                 :            :             /* Option2 (variable length): the first element is INIT_VAL.  */
    4186                 :            :             init_def = gimple_build_vector_from_val (&stmts, vectype,
    4187                 :            :                                                      def_for_init);
    4188                 :            :             init_def = gimple_build (&stmts, CFN_VEC_SHL_INSERT,
    4189                 :            :                                      vectype, init_def, init_val);
    4190                 :            :           }
    4191                 :            :         else
    4192                 :            :           {
    4193                 :            :             /* Option2: the first element is INIT_VAL.  */
    4194                 :         32 :             tree_vector_builder elts (vectype, 1, 2);
    4195                 :         16 :             elts.quick_push (init_val);
    4196                 :         16 :             elts.quick_push (def_for_init);
    4197                 :         16 :             init_def = gimple_build_vector (&stmts, &elts);
    4198                 :            :           }
    4199                 :            :       }
    4200                 :            :       break;
    4201                 :            : 
    4202                 :        712 :     case MIN_EXPR:
    4203                 :        712 :     case MAX_EXPR:
    4204                 :        712 :     case COND_EXPR:
    4205                 :        712 :       {
    4206                 :        712 :         init_val = gimple_convert (&stmts, TREE_TYPE (vectype), init_val);
    4207                 :        712 :         init_def = gimple_build_vector_from_val (&stmts, vectype, init_val);
    4208                 :            :       }
    4209                 :        712 :       break;
    4210                 :            : 
    4211                 :          0 :     default:
    4212                 :          0 :       gcc_unreachable ();
    4213                 :            :     }
    4214                 :            : 
    4215                 :      10677 :   if (stmts)
    4216                 :        183 :     gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
    4217                 :      10677 :   return init_def;
    4218                 :            : }
    4219                 :            : 
    4220                 :            : /* Get at the initial defs for the reduction PHIs in SLP_NODE.
    4221                 :            :    NUMBER_OF_VECTORS is the number of vector defs to create.
    4222                 :            :    If NEUTRAL_OP is nonnull, introducing extra elements of that
    4223                 :            :    value will not change the result.  */
    4224                 :            : 
    4225                 :            : static void
    4226                 :        427 : get_initial_defs_for_reduction (slp_tree slp_node,
    4227                 :            :                                 vec<tree> *vec_oprnds,
    4228                 :            :                                 unsigned int number_of_vectors,
    4229                 :            :                                 bool reduc_chain, tree neutral_op)
    4230                 :            : {
    4231                 :        427 :   vec<stmt_vec_info> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
    4232                 :        427 :   stmt_vec_info stmt_vinfo = stmts[0];
    4233                 :        427 :   vec_info *vinfo = stmt_vinfo->vinfo;
    4234                 :        427 :   unsigned HOST_WIDE_INT nunits;
    4235                 :        427 :   unsigned j, number_of_places_left_in_vector;
    4236                 :        427 :   tree vector_type;
    4237                 :        427 :   unsigned int group_size = stmts.length ();
    4238                 :        427 :   unsigned int i;
    4239                 :        427 :   class loop *loop;
    4240                 :            : 
    4241                 :        427 :   vector_type = STMT_VINFO_VECTYPE (stmt_vinfo);
    4242                 :            : 
    4243                 :        427 :   gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def);
    4244                 :            : 
    4245                 :        427 :   loop = (gimple_bb (stmt_vinfo->stmt))->loop_father;
    4246                 :        427 :   gcc_assert (loop);
    4247                 :        427 :   edge pe = loop_preheader_edge (loop);
    4248                 :            : 
    4249                 :        427 :   gcc_assert (!reduc_chain || neutral_op);
    4250                 :            : 
    4251                 :            :   /* NUMBER_OF_COPIES is the number of times we need to use the same values in
    4252                 :            :      created vectors. It is greater than 1 if unrolling is performed.
    4253                 :            : 
    4254                 :            :      For example, we have two scalar operands, s1 and s2 (e.g., group of
    4255                 :            :      strided accesses of size two), while NUNITS is four (i.e., four scalars
    4256                 :            :      of this type can be packed in a vector).  The output vector will contain
    4257                 :            :      two copies of each scalar operand: {s1, s2, s1, s2}.  (NUMBER_OF_COPIES
    4258                 :            :      will be 2).
    4259                 :            : 
    4260                 :            :      If REDUC_GROUP_SIZE > NUNITS, the scalars will be split into several
    4261                 :            :      vectors containing the operands.
    4262                 :            : 
    4263                 :            :      For example, NUNITS is four as before, and the group size is 8
    4264                 :            :      (s1, s2, ..., s8).  We will create two vectors {s1, s2, s3, s4} and
    4265                 :            :      {s5, s6, s7, s8}.  */
    4266                 :            : 
    4267                 :        427 :   if (!TYPE_VECTOR_SUBPARTS (vector_type).is_constant (&nunits))
    4268                 :            :     nunits = group_size;
    4269                 :            : 
    4270                 :        427 :   number_of_places_left_in_vector = nunits;
    4271                 :        427 :   bool constant_p = true;
    4272                 :        427 :   tree_vector_builder elts (vector_type, nunits, 1);
    4273                 :        427 :   elts.quick_grow (nunits);
    4274                 :        427 :   gimple_seq ctor_seq = NULL;
    4275                 :       3305 :   for (j = 0; j < nunits * number_of_vectors; ++j)
    4276                 :            :     {
    4277                 :       2878 :       tree op;
    4278                 :       2878 :       i = j % group_size;
    4279                 :       2878 :       stmt_vinfo = stmts[i];
    4280                 :            : 
    4281                 :            :       /* Get the def before the loop.  In reduction chain we have only
    4282                 :            :          one initial value.  Else we have as many as PHIs in the group.  */
    4283                 :       2878 :       if (reduc_chain)
    4284                 :        736 :         op = j != 0 ? neutral_op : PHI_ARG_DEF_FROM_EDGE (stmt_vinfo->stmt, pe);
    4285                 :       2142 :       else if (((vec_oprnds->length () + 1) * nunits
    4286                 :       2142 :                 - number_of_places_left_in_vector >= group_size)
    4287                 :       2142 :                && neutral_op)
    4288                 :            :         op = neutral_op;
    4289                 :            :       else
    4290                 :        783 :         op = PHI_ARG_DEF_FROM_EDGE (stmt_vinfo->stmt, pe);
    4291                 :            : 
    4292                 :            :       /* Create 'vect_ = {op0,op1,...,opn}'.  */
    4293                 :       2878 :       number_of_places_left_in_vector--;
    4294                 :       2878 :       elts[nunits - number_of_places_left_in_vector - 1] = op;
    4295                 :       2878 :       if (!CONSTANT_CLASS_P (op))
    4296                 :        358 :         constant_p = false;
    4297                 :            : 
    4298                 :       2878 :       if (number_of_places_left_in_vector == 0)
    4299                 :            :         {
    4300                 :        590 :           tree init;
    4301                 :       1180 :           if (constant_p && !neutral_op
    4302                 :       1180 :               ? multiple_p (TYPE_VECTOR_SUBPARTS (vector_type), nunits)
    4303                 :        590 :               : known_eq (TYPE_VECTOR_SUBPARTS (vector_type), nunits))
    4304                 :            :             /* Build the vector directly from ELTS.  */
    4305                 :        590 :             init = gimple_build_vector (&ctor_seq, &elts);
    4306                 :          0 :           else if (neutral_op)
    4307                 :            :             {
    4308                 :            :               /* Build a vector of the neutral value and shift the
    4309                 :            :                  other elements into place.  */
    4310                 :          0 :               init = gimple_build_vector_from_val (&ctor_seq, vector_type,
    4311                 :            :                                                    neutral_op);
    4312                 :          0 :               int k = nunits;
    4313                 :          0 :               while (k > 0 && elts[k - 1] == neutral_op)
    4314                 :            :                 k -= 1;
    4315                 :          0 :               while (k > 0)
    4316                 :            :                 {
    4317                 :          0 :                   k -= 1;
    4318                 :          0 :                   init = gimple_build (&ctor_seq, CFN_VEC_SHL_INSERT,
    4319                 :          0 :                                        vector_type, init, elts[k]);
    4320                 :            :                 }
    4321                 :            :             }
    4322                 :            :           else
    4323                 :            :             {
    4324                 :            :               /* First time round, duplicate ELTS to fill the
    4325                 :            :                  required number of vectors.  */
    4326                 :          0 :               duplicate_and_interleave (vinfo, &ctor_seq, vector_type, elts,
    4327                 :            :                                         number_of_vectors, *vec_oprnds);
    4328                 :          0 :               break;
    4329                 :            :             }
    4330                 :        590 :           vec_oprnds->quick_push (init);
    4331                 :            : 
    4332                 :        590 :           number_of_places_left_in_vector = nunits;
    4333                 :        590 :           elts.new_vector (vector_type, nunits, 1);
    4334                 :        590 :           elts.quick_grow (nunits);
    4335                 :        590 :           constant_p = true;
    4336                 :            :         }
    4337                 :            :     }
    4338                 :        427 :   if (ctor_seq != NULL)
    4339                 :        177 :     gsi_insert_seq_on_edge_immediate (pe, ctor_seq);
    4340                 :        427 : }
    4341                 :            : 
    4342                 :            : /* For a statement STMT_INFO taking part in a reduction operation return
    4343                 :            :    the stmt_vec_info the meta information is stored on.  */
    4344                 :            : 
    4345                 :            : stmt_vec_info
    4346                 :      79566 : info_for_reduction (stmt_vec_info stmt_info)
    4347                 :            : {
    4348                 :      79566 :   stmt_info = vect_orig_stmt (stmt_info);
    4349                 :      79566 :   gcc_assert (STMT_VINFO_REDUC_DEF (stmt_info));
    4350                 :      79566 :   if (!is_a <gphi *> (stmt_info->stmt))
    4351                 :      37602 :     stmt_info = STMT_VINFO_REDUC_DEF (stmt_info);
    4352                 :      79566 :   gphi *phi = as_a <gphi *> (stmt_info->stmt);
    4353                 :      79566 :   if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_double_reduction_def)
    4354                 :            :     {
    4355                 :        302 :       if (gimple_phi_num_args (phi) == 1)
    4356                 :        117 :         stmt_info = STMT_VINFO_REDUC_DEF (stmt_info);
    4357                 :            :     }
    4358                 :      79264 :   else if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle)
    4359                 :            :     {
    4360                 :       1428 :       edge pe = loop_preheader_edge (gimple_bb (phi)->loop_father);
    4361                 :       1428 :       stmt_vec_info info
    4362                 :       1428 :           = stmt_info->vinfo->lookup_def (PHI_ARG_DEF_FROM_EDGE (phi, pe));
    4363                 :       1428 :       if (info && STMT_VINFO_DEF_TYPE (info) == vect_double_reduction_def)
    4364                 :        158 :         stmt_info = info;
    4365                 :            :     }
    4366                 :      79566 :   return stmt_info;
    4367                 :            : }
    4368                 :            : 
    4369                 :            : /* Function vect_create_epilog_for_reduction
    4370                 :            : 
    4371                 :            :    Create code at the loop-epilog to finalize the result of a reduction
    4372                 :            :    computation. 
    4373                 :            :   
    4374                 :            :    STMT_INFO is the scalar reduction stmt that is being vectorized.
    4375                 :            :    SLP_NODE is an SLP node containing a group of reduction statements. The 
    4376                 :            :      first one in this group is STMT_INFO.
    4377                 :            :    SLP_NODE_INSTANCE is the SLP node instance containing SLP_NODE
    4378                 :            :    REDUC_INDEX says which rhs operand of the STMT_INFO is the reduction phi
    4379                 :            :      (counting from 0)
    4380                 :            : 
    4381                 :            :    This function:
    4382                 :            :    1. Completes the reduction def-use cycles.
    4383                 :            :    2. "Reduces" each vector of partial results VECT_DEFS into a single result,
    4384                 :            :       by calling the function specified by REDUC_FN if available, or by
    4385                 :            :       other means (whole-vector shifts or a scalar loop).
    4386                 :            :       The function also creates a new phi node at the loop exit to preserve
    4387                 :            :       loop-closed form, as illustrated below.
    4388                 :            : 
    4389                 :            :      The flow at the entry to this function:
    4390                 :            : 
    4391                 :            :         loop:
    4392                 :            :           vec_def = phi <vec_init, null>        # REDUCTION_PHI
    4393                 :            :           VECT_DEF = vector_stmt                # vectorized form of STMT_INFO
    4394                 :            :           s_loop = scalar_stmt                  # (scalar) STMT_INFO
    4395                 :            :         loop_exit:
    4396                 :            :           s_out0 = phi <s_loop>                 # (scalar) EXIT_PHI
    4397                 :            :           use <s_out0>
    4398                 :            :           use <s_out0>
    4399                 :            : 
    4400                 :            :      The above is transformed by this function into:
    4401                 :            : 
    4402                 :            :         loop:
    4403                 :            :           vec_def = phi <vec_init, VECT_DEF>    # REDUCTION_PHI
    4404                 :            :           VECT_DEF = vector_stmt                # vectorized form of STMT_INFO
    4405                 :            :           s_loop = scalar_stmt                  # (scalar) STMT_INFO
    4406                 :            :         loop_exit:
    4407                 :            :           s_out0 = phi <s_loop>                 # (scalar) EXIT_PHI
    4408                 :            :           v_out1 = phi <VECT_DEF>               # NEW_EXIT_PHI
    4409                 :            :           v_out2 = reduce <v_out1>
    4410                 :            :           s_out3 = extract_field <v_out2, 0>
    4411                 :            :           s_out4 = adjust_result <s_out3>
    4412                 :            :           use <s_out4>
    4413                 :            :           use <s_out4>
    4414                 :            : */
    4415                 :            : 
    4416                 :            : static void
    4417                 :      11196 : vect_create_epilog_for_reduction (stmt_vec_info stmt_info,
    4418                 :            :                                   slp_tree slp_node,
    4419                 :            :                                   slp_instance slp_node_instance)
    4420                 :            : {
    4421                 :      11196 :   stmt_vec_info reduc_info = info_for_reduction (stmt_info);
    4422                 :      11196 :   gcc_assert (reduc_info->is_reduc_info);
    4423                 :      11196 :   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
    4424                 :            :   /* For double reductions we need to get at the inner loop reduction
    4425                 :            :      stmt which has the meta info attached.  Our stmt_info is that of the
    4426                 :            :      loop-closed PHI of the inner loop which we remember as
    4427                 :            :      def for the reduction PHI generation.  */
    4428                 :      11196 :   bool double_reduc = false;
    4429                 :      11196 :   stmt_vec_info rdef_info = stmt_info;
    4430                 :      11196 :   if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_double_reduction_def)
    4431                 :            :     {
    4432                 :         39 :       gcc_assert (!slp_node);
    4433                 :         39 :       double_reduc = true;
    4434                 :         39 :       stmt_info = loop_vinfo->lookup_def (gimple_phi_arg_def
    4435                 :            :                                             (stmt_info->stmt, 0));
    4436                 :         39 :       stmt_info = vect_stmt_to_vectorize (stmt_info);
    4437                 :            :     }
    4438                 :      11196 :   gphi *reduc_def_stmt
    4439                 :      11196 :     = as_a <gphi *> (STMT_VINFO_REDUC_DEF (vect_orig_stmt (stmt_info))->stmt);
    4440                 :      11196 :   enum tree_code code = STMT_VINFO_REDUC_CODE (reduc_info);
    4441                 :      11196 :   internal_fn reduc_fn = STMT_VINFO_REDUC_FN (reduc_info);
    4442                 :      11196 :   stmt_vec_info prev_phi_info;
    4443                 :      11196 :   tree vectype;
    4444                 :      11196 :   machine_mode mode;
    4445                 :      11196 :   class loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *outer_loop = NULL;
    4446                 :      11196 :   basic_block exit_bb;
    4447                 :      11196 :   tree scalar_dest;
    4448                 :      11196 :   tree scalar_type;
    4449                 :      11196 :   gimple *new_phi = NULL, *phi;
    4450                 :      11196 :   stmt_vec_info phi_info;
    4451                 :      11196 :   gimple_stmt_iterator exit_gsi;
    4452                 :      11196 :   tree new_temp = NULL_TREE, new_name, new_scalar_dest;
    4453                 :      11196 :   gimple *epilog_stmt = NULL;
    4454                 :      11196 :   gimple *exit_phi;
    4455                 :      11196 :   tree bitsize;
    4456                 :      11196 :   tree def;
    4457                 :      11196 :   tree orig_name, scalar_result;
    4458                 :      11196 :   imm_use_iterator imm_iter, phi_imm_iter;
    4459                 :      11196 :   use_operand_p use_p, phi_use_p;
    4460                 :      11196 :   gimple *use_stmt;
    4461                 :      11196 :   bool nested_in_vect_loop = false;
    4462                 :      11196 :   auto_vec<gimple *> new_phis;
    4463                 :      11196 :   int j, i;
    4464                 :      11196 :   auto_vec<tree> scalar_results;
    4465                 :      11196 :   unsigned int group_size = 1, k;
    4466                 :      11196 :   auto_vec<gimple *> phis;
    4467                 :      11196 :   bool slp_reduc = false;
    4468                 :      11196 :   bool direct_slp_reduc;
    4469                 :      11196 :   tree new_phi_result;
    4470                 :      11196 :   tree induction_index = NULL_TREE;
    4471                 :            : 
    4472                 :      11196 :   if (slp_node)
    4473                 :        442 :     group_size = SLP_TREE_SCALAR_STMTS (slp_node).length (); 
    4474                 :            : 
    4475                 :      11196 :   if (nested_in_vect_loop_p (loop, stmt_info))
    4476                 :            :     {
    4477                 :         39 :       outer_loop = loop;
    4478                 :         39 :       loop = loop->inner;
    4479                 :         39 :       nested_in_vect_loop = true;
    4480                 :         39 :       gcc_assert (!slp_node);
    4481                 :            :     }
    4482                 :      11196 :   gcc_assert (!nested_in_vect_loop || double_reduc);
    4483                 :            : 
    4484                 :      11196 :   vectype = STMT_VINFO_REDUC_VECTYPE (reduc_info);
    4485                 :      11196 :   gcc_assert (vectype);
    4486                 :      11196 :   mode = TYPE_MODE (vectype);
    4487                 :            : 
    4488                 :      11196 :   tree initial_def = NULL;
    4489                 :      11196 :   tree induc_val = NULL_TREE;
    4490                 :      11196 :   tree adjustment_def = NULL;
    4491                 :      11196 :   if (slp_node)
    4492                 :            :     ;
    4493                 :            :   else
    4494                 :            :     {
    4495                 :            :       /* Get at the scalar def before the loop, that defines the initial value
    4496                 :            :          of the reduction variable.  */
    4497                 :      10754 :       initial_def = PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt,
    4498                 :            :                                            loop_preheader_edge (loop));
    4499                 :            :       /* Optimize: for induction condition reduction, if we can't use zero
    4500                 :            :          for induc_val, use initial_def.  */
    4501                 :      10754 :       if (STMT_VINFO_REDUC_TYPE (reduc_info) == INTEGER_INDUC_COND_REDUCTION)
    4502                 :         63 :         induc_val = STMT_VINFO_VEC_INDUC_COND_INITIAL_VAL (reduc_info);
    4503                 :      10691 :       else if (double_reduc)
    4504                 :            :         ;
    4505                 :      10652 :       else if (nested_in_vect_loop)
    4506                 :            :         ;
    4507                 :            :       else
    4508                 :      10652 :         adjustment_def = STMT_VINFO_REDUC_EPILOGUE_ADJUSTMENT (reduc_info);
    4509                 :            :     }
    4510                 :            : 
    4511                 :      11196 :   unsigned vec_num;
    4512                 :      11196 :   int ncopies;
    4513                 :      11196 :   if (slp_node)
    4514                 :            :     {
    4515                 :        442 :       vec_num = SLP_TREE_VEC_STMTS (slp_node_instance->reduc_phis).length ();
    4516                 :            :       ncopies = 1;
    4517                 :            :     }
    4518                 :            :   else
    4519                 :            :     {
    4520                 :      10754 :       vec_num = 1;
    4521                 :      10754 :       ncopies = 0;
    4522                 :      10754 :       phi_info = STMT_VINFO_VEC_STMT (loop_vinfo->lookup_stmt (reduc_def_stmt));
    4523                 :      10782 :       do
    4524                 :            :         {
    4525                 :      10782 :           ncopies++;
    4526                 :      10782 :           phi_info = STMT_VINFO_RELATED_STMT (phi_info);
    4527                 :            :         }
    4528                 :      10782 :       while (phi_info);
    4529                 :            :     }
    4530                 :            : 
    4531                 :            :   /* For cond reductions we want to create a new vector (INDEX_COND_EXPR)
    4532                 :            :      which is updated with the current index of the loop for every match of
    4533                 :            :      the original loop's cond_expr (VEC_STMT).  This results in a vector
    4534                 :            :      containing the last time the condition passed for that vector lane.
    4535                 :            :      The first match will be a 1 to allow 0 to be used for non-matching
    4536                 :            :      indexes.  If there are no matches at all then the vector will be all
    4537                 :            :      zeroes.
    4538                 :            :    
    4539                 :            :      PR92772: This algorithm is broken for architectures that support
    4540                 :            :      masked vectors, but do not provide fold_extract_last.  */
    4541                 :      11196 :   if (STMT_VINFO_REDUC_TYPE (reduc_info) == COND_REDUCTION)
    4542                 :            :     {
    4543                 :         94 :       auto_vec<std::pair<tree, bool>, 2> ccompares;
    4544                 :         47 :       stmt_vec_info cond_info = STMT_VINFO_REDUC_DEF (reduc_info);
    4545                 :         47 :       cond_info = vect_stmt_to_vectorize (cond_info);
    4546                 :        105 :       while (cond_info != reduc_info)
    4547                 :            :         {
    4548                 :         58 :           if (gimple_assign_rhs_code (cond_info->stmt) == COND_EXPR)
    4549                 :            :             {
    4550                 :         50 :               gimple *vec_stmt = STMT_VINFO_VEC_STMT (cond_info)->stmt;
    4551                 :         50 :               gcc_assert (gimple_assign_rhs_code (vec_stmt) == VEC_COND_EXPR);
    4552                 :         50 :               ccompares.safe_push
    4553                 :         50 :                 (std::make_pair (unshare_expr (gimple_assign_rhs1 (vec_stmt)),
    4554                 :        100 :                                  STMT_VINFO_REDUC_IDX (cond_info) == 2));
    4555                 :            :             }
    4556                 :         58 :           cond_info
    4557                 :         58 :             = loop_vinfo->lookup_def (gimple_op (cond_info->stmt,
    4558                 :         58 :                                                  1 + STMT_VINFO_REDUC_IDX
    4559                 :            :                                                         (cond_info)));
    4560                 :        163 :           cond_info = vect_stmt_to_vectorize (cond_info);
    4561                 :            :         }
    4562                 :         47 :       gcc_assert (ccompares.length () != 0);
    4563                 :            : 
    4564                 :         47 :       tree indx_before_incr, indx_after_incr;
    4565                 :         47 :       poly_uint64 nunits_out = TYPE_VECTOR_SUBPARTS (vectype);
    4566                 :         47 :       int scalar_precision
    4567                 :         47 :         = GET_MODE_PRECISION (SCALAR_TYPE_MODE (TREE_TYPE (vectype)));
    4568                 :         47 :       tree cr_index_scalar_type = make_unsigned_type (scalar_precision);
    4569                 :         47 :       tree cr_index_vector_type = get_related_vectype_for_scalar_type
    4570                 :         47 :         (TYPE_MODE (vectype), cr_index_scalar_type,
    4571                 :            :          TYPE_VECTOR_SUBPARTS (vectype));
    4572                 :            : 
    4573                 :            :       /* First we create a simple vector induction variable which starts
    4574                 :            :          with the values {1,2,3,...} (SERIES_VECT) and increments by the
    4575                 :            :          vector size (STEP).  */
    4576                 :            : 
    4577                 :            :       /* Create a {1,2,3,...} vector.  */
    4578                 :         47 :       tree series_vect = build_index_vector (cr_index_vector_type, 1, 1);
    4579                 :            : 
    4580                 :            :       /* Create a vector of the step value.  */
    4581                 :         47 :       tree step = build_int_cst (cr_index_scalar_type, nunits_out);
    4582                 :         47 :       tree vec_step = build_vector_from_val (cr_index_vector_type, step);
    4583                 :            : 
    4584                 :            :       /* Create an induction variable.  */
    4585                 :         47 :       gimple_stmt_iterator incr_gsi;
    4586                 :         47 :       bool insert_after;
    4587                 :         47 :       standard_iv_increment_position (loop, &incr_gsi, &insert_after);
    4588                 :         47 :       create_iv (series_vect, vec_step, NULL_TREE, loop, &incr_gsi,
    4589                 :            :                  insert_after, &indx_before_incr, &indx_after_incr);
    4590                 :            : 
    4591                 :            :       /* Next create a new phi node vector (NEW_PHI_TREE) which starts
    4592                 :            :          filled with zeros (VEC_ZERO).  */
    4593                 :            : 
    4594                 :            :       /* Create a vector of 0s.  */
    4595                 :         47 :       tree zero = build_zero_cst (cr_index_scalar_type);
    4596                 :         47 :       tree vec_zero = build_vector_from_val (cr_index_vector_type, zero);
    4597                 :            : 
    4598                 :            :       /* Create a vector phi node.  */
    4599                 :         47 :       tree new_phi_tree = make_ssa_name (cr_index_vector_type);
    4600                 :         47 :       new_phi = create_phi_node (new_phi_tree, loop->header);
    4601                 :         47 :       loop_vinfo->add_stmt (new_phi);
    4602                 :         47 :       add_phi_arg (as_a <gphi *> (new_phi), vec_zero,
    4603                 :            :                    loop_preheader_edge (loop), UNKNOWN_LOCATION);
    4604                 :            : 
    4605                 :            :       /* Now take the condition from the loops original cond_exprs
    4606                 :            :          and produce a new cond_exprs (INDEX_COND_EXPR) which for
    4607                 :            :          every match uses values from the induction variable
    4608                 :            :          (INDEX_BEFORE_INCR) otherwise uses values from the phi node
    4609                 :            :          (NEW_PHI_TREE).
    4610                 :            :          Finally, we update the phi (NEW_PHI_TREE) to take the value of
    4611                 :            :          the new cond_expr (INDEX_COND_EXPR).  */
    4612                 :         47 :       gimple_seq stmts = NULL;
    4613                 :        144 :       for (int i = ccompares.length () - 1; i != -1; --i)
    4614                 :            :         {
    4615                 :         50 :           tree ccompare = ccompares[i].first;
    4616                 :         50 :           if (ccompares[i].second)
    4617                 :         41 :             new_phi_tree = gimple_build (&stmts, VEC_COND_EXPR,
    4618                 :            :                                          cr_index_vector_type,
    4619                 :            :                                          ccompare,
    4620                 :            :                                          indx_before_incr, new_phi_tree);
    4621                 :            :           else
    4622                 :          9 :             new_phi_tree = gimple_build (&stmts, VEC_COND_EXPR,
    4623                 :            :                                          cr_index_vector_type,
    4624                 :            :                                          ccompare,
    4625                 :            :                                          new_phi_tree, indx_before_incr);
    4626                 :            :         }
    4627                 :         47 :       gsi_insert_seq_before (&incr_gsi, stmts, GSI_SAME_STMT);
    4628                 :         47 :       stmt_vec_info index_vec_info
    4629                 :         47 :         = loop_vinfo->add_stmt (SSA_NAME_DEF_STMT (new_phi_tree));
    4630                 :         47 :       STMT_VINFO_VECTYPE (index_vec_info) = cr_index_vector_type;
    4631                 :            : 
    4632                 :            :       /* Update the phi with the vec cond.  */
    4633                 :         47 :       induction_index = new_phi_tree;
    4634                 :         47 :       add_phi_arg (as_a <gphi *> (new_phi), induction_index,
    4635                 :            :                    loop_latch_edge (loop), UNKNOWN_LOCATION);
    4636                 :            :     }
    4637                 :            : 
    4638                 :            :   /* 2. Create epilog code.
    4639                 :            :         The reduction epilog code operates across the elements of the vector
    4640                 :            :         of partial results computed by the vectorized loop.
    4641                 :            :         The reduction epilog code consists of:
    4642                 :            : 
    4643                 :            :         step 1: compute the scalar result in a vector (v_out2)
    4644                 :            :         step 2: extract the scalar result (s_out3) from the vector (v_out2)
    4645                 :            :         step 3: adjust the scalar result (s_out3) if needed.
    4646                 :            : 
    4647                 :            :         Step 1 can be accomplished using one the following three schemes:
    4648                 :            :           (scheme 1) using reduc_fn, if available.
    4649                 :            :           (scheme 2) using whole-vector shifts, if available.
    4650                 :            :           (scheme 3) using a scalar loop. In this case steps 1+2 above are
    4651                 :            :                      combined.
    4652                 :            : 
    4653                 :            :           The overall epilog code looks like this:
    4654                 :            : 
    4655                 :            :           s_out0 = phi <s_loop>         # original EXIT_PHI
    4656                 :            :           v_out1 = phi <VECT_DEF>       # NEW_EXIT_PHI
    4657                 :            :           v_out2 = reduce <v_out1>              # step 1
    4658                 :            :           s_out3 = extract_field <v_out2, 0>    # step 2
    4659                 :            :           s_out4 = adjust_result <s_out3>       # step 3
    4660                 :            : 
    4661                 :            :           (step 3 is optional, and steps 1 and 2 may be combined).
    4662                 :            :           Lastly, the uses of s_out0 are replaced by s_out4.  */
    4663                 :            : 
    4664                 :            : 
    4665                 :            :   /* 2.1 Create new loop-exit-phis to preserve loop-closed form:
    4666                 :            :          v_out1 = phi <VECT_DEF> 
    4667                 :            :          Store them in NEW_PHIS.  */
    4668                 :      11196 :   if (double_reduc)
    4669                 :         39 :     loop = outer_loop;
    4670                 :      11196 :   exit_bb = single_exit (loop)->dest;
    4671                 :      11196 :   prev_phi_info = NULL;
    4672                 :      11196 :   new_phis.create (slp_node ? vec_num : ncopies);
    4673                 :      22556 :   for (unsigned i = 0; i < vec_num; i++)
    4674                 :            :     {
    4675                 :      11360 :       if (slp_node)
    4676                 :        606 :         def = gimple_get_lhs (SLP_TREE_VEC_STMTS (slp_node)[i]->stmt);
    4677                 :            :       else
    4678                 :      10754 :         def = gimple_get_lhs (STMT_VINFO_VEC_STMT (rdef_info)->stmt);
    4679                 :      22748 :       for (j = 0; j < ncopies; j++)
    4680                 :            :         {
    4681                 :      11388 :           tree new_def = copy_ssa_name (def);
    4682                 :      11388 :           phi = create_phi_node (new_def, exit_bb);
    4683                 :      11388 :           stmt_vec_info phi_info = loop_vinfo->add_stmt (phi);
    4684                 :      11388 :           if (j == 0)
    4685                 :      11360 :             new_phis.quick_push (phi);
    4686                 :            :           else
    4687                 :            :             {
    4688                 :         28 :               def = vect_get_vec_def_for_stmt_copy (loop_vinfo, def);
    4689                 :         28 :               STMT_VINFO_RELATED_STMT (prev_phi_info) = phi_info;
    4690                 :            :             }
    4691                 :            : 
    4692                 :      11388 :           SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def);
    4693                 :      11388 :           prev_phi_info = phi_info;
    4694                 :            :         }
    4695                 :            :     }
    4696                 :            : 
    4697                 :      11196 :   exit_gsi = gsi_after_labels (exit_bb);
    4698                 :            : 
    4699                 :            :   /* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
    4700                 :            :          (i.e. when reduc_fn is not available) and in the final adjustment
    4701                 :            :          code (if needed).  Also get the original scalar reduction variable as
    4702                 :            :          defined in the loop.  In case STMT is a "pattern-stmt" (i.e. - it
    4703                 :            :          represents a reduction pattern), the tree-code and scalar-def are
    4704                 :            :          taken from the original stmt that the pattern-stmt (STMT) replaces.
    4705                 :            :          Otherwise (it is a regular reduction) - the tree-code and scalar-def
    4706                 :            :          are taken from STMT.  */
    4707                 :            : 
    4708                 :      11196 :   stmt_vec_info orig_stmt_info = vect_orig_stmt (stmt_info);
    4709                 :      11196 :   if (orig_stmt_info != stmt_info)
    4710                 :            :     {
    4711                 :            :       /* Reduction pattern  */
    4712                 :        290 :       gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
    4713                 :        290 :       gcc_assert (STMT_VINFO_RELATED_STMT (orig_stmt_info) == stmt_info);
    4714                 :            :     }
    4715                 :            :   
    4716                 :      11196 :   scalar_dest = gimple_assign_lhs (orig_stmt_info->stmt);
    4717                 :      11196 :   scalar_type = TREE_TYPE (scalar_dest);
    4718                 :      11196 :   scalar_results.create (group_size); 
    4719                 :      11196 :   new_scalar_dest = vect_create_destination_var (scalar_dest, NULL);
    4720                 :      11196 :   bitsize = TYPE_SIZE (scalar_type);
    4721                 :            : 
    4722                 :            :   /* SLP reduction without reduction chain, e.g.,
    4723                 :            :      # a1 = phi <a2, a0>
    4724                 :            :      # b1 = phi <b2, b0>
    4725                 :            :      a2 = operation (a1)
    4726                 :            :      b2 = operation (b1)  */
    4727                 :      11196 :   slp_reduc = (slp_node && !REDUC_GROUP_FIRST_ELEMENT (stmt_info));
    4728                 :            : 
    4729                 :            :   /* True if we should implement SLP_REDUC using native reduction operations
    4730                 :            :      instead of scalar operations.  */
    4731                 :      22392 :   direct_slp_reduc = (reduc_fn != IFN_LAST
    4732                 :      11196 :                       && slp_reduc
    4733                 :      11196 :                       && !TYPE_VECTOR_SUBPARTS (vectype).is_constant ());
    4734                 :            : 
    4735                 :            :   /* In case of reduction chain, e.g.,
    4736                 :            :      # a1 = phi <a3, a0>
    4737                 :            :      a2 = operation (a1)
    4738                 :            :      a3 = operation (a2),
    4739                 :            : 
    4740                 :            :      we may end up with more than one vector result.  Here we reduce them to
    4741                 :            :      one vector.  */
    4742                 :      11196 :   if (REDUC_GROUP_FIRST_ELEMENT (stmt_info) || direct_slp_reduc)
    4743                 :            :     {
    4744                 :         93 :       gimple_seq stmts = NULL;
    4745                 :         93 :       tree first_vect = PHI_RESULT (new_phis[0]);
    4746                 :         93 :       first_vect = gimple_convert (&stmts, vectype, first_vect);
    4747                 :        404 :       for (k = 1; k < new_phis.length (); k++)
    4748                 :            :         {
    4749                 :        109 :           gimple *next_phi = new_phis[k];
    4750                 :        109 :           tree second_vect = PHI_RESULT (next_phi);
    4751                 :        109 :           second_vect = gimple_convert (&stmts, vectype, second_vect);
    4752                 :        109 :           first_vect = gimple_build (&stmts, code, vectype,
    4753                 :            :                                      first_vect, second_vect);
    4754                 :            :         }
    4755                 :         93 :       gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
    4756                 :            : 
    4757                 :         93 :       new_phi_result = first_vect;
    4758                 :         93 :       new_phis.truncate (0);
    4759                 :         93 :       new_phis.safe_push (SSA_NAME_DEF_STMT (first_vect));
    4760                 :            :     }
    4761                 :            :   /* Likewise if we couldn't use a single defuse cycle.  */
    4762                 :      11103 :   else if (ncopies > 1)
    4763                 :            :     {
    4764                 :         14 :       gcc_assert (new_phis.length () == 1);
    4765                 :         14 :       gimple_seq stmts = NULL;
    4766                 :         14 :       tree first_vect = PHI_RESULT (new_phis[0]);
    4767                 :         14 :       first_vect = gimple_convert (&stmts, vectype, first_vect);
    4768                 :         14 :       stmt_vec_info next_phi_info = loop_vinfo->lookup_stmt (new_phis[0]);
    4769                 :         42 :       for (int k = 1; k < ncopies; ++k)
    4770                 :            :         {
    4771                 :         28 :           next_phi_info = STMT_VINFO_RELATED_STMT (next_phi_info);
    4772                 :         28 :           tree second_vect = PHI_RESULT (next_phi_info->stmt);
    4773                 :         28 :           second_vect = gimple_convert (&stmts, vectype, second_vect);
    4774                 :         28 :           first_vect = gimple_build (&stmts, code, vectype,
    4775                 :            :                                      first_vect, second_vect);
    4776                 :            :         }
    4777                 :         14 :       gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
    4778                 :         14 :       new_phi_result = first_vect;
    4779                 :         14 :       new_phis.truncate (0);
    4780                 :         14 :       new_phis.safe_push (SSA_NAME_DEF_STMT (first_vect));
    4781                 :            :     }
    4782                 :            :   else
    4783                 :      11089 :     new_phi_result = PHI_RESULT (new_phis[0]);
    4784                 :            : 
    4785                 :      11196 :   if (STMT_VINFO_REDUC_TYPE (reduc_info) == COND_REDUCTION
    4786                 :         47 :       && reduc_fn != IFN_LAST)
    4787                 :            :     {
    4788                 :            :       /* For condition reductions, we have a vector (NEW_PHI_RESULT) containing
    4789                 :            :          various data values where the condition matched and another vector
    4790                 :            :          (INDUCTION_INDEX) containing all the indexes of those matches.  We
    4791                 :            :          need to extract the last matching index (which will be the index with
    4792                 :            :          highest value) and use this to index into the data vector.
    4793                 :            :          For the case where there were no matches, the data vector will contain
    4794                 :            :          all default values and the index vector will be all zeros.  */
    4795                 :            : 
    4796                 :            :       /* Get various versions of the type of the vector of indexes.  */
    4797                 :          5 :       tree index_vec_type = TREE_TYPE (induction_index);
    4798                 :          5 :       gcc_checking_assert (TYPE_UNSIGNED (index_vec_type));
    4799                 :          5 :       tree index_scalar_type = TREE_TYPE (index_vec_type);
    4800                 :          5 :       tree index_vec_cmp_type = truth_type_for (index_vec_type);
    4801                 :            : 
    4802                 :            :       /* Get an unsigned integer version of the type of the data vector.  */
    4803                 :          5 :       int scalar_precision
    4804                 :          5 :         = GET_MODE_PRECISION (SCALAR_TYPE_MODE (scalar_type));
    4805                 :          5 :       tree scalar_type_unsigned = make_unsigned_type (scalar_precision);
    4806                 :          5 :       tree vectype_unsigned = build_vector_type
    4807                 :          5 :         (scalar_type_unsigned, TYPE_VECTOR_SUBPARTS (vectype));
    4808                 :            : 
    4809                 :            :       /* First we need to create a vector (ZERO_VEC) of zeros and another
    4810                 :            :          vector (MAX_INDEX_VEC) filled with the last matching index, which we
    4811                 :            :          can create using a MAX reduction and then expanding.
    4812                 :            :          In the case where the loop never made any matches, the max index will
    4813                 :            :          be zero.  */
    4814                 :            : 
    4815                 :            :       /* Vector of {0, 0, 0,...}.  */
    4816                 :          5 :       tree zero_vec = build_zero_cst (vectype);
    4817                 :            : 
    4818                 :          5 :       gimple_seq stmts = NULL;
    4819                 :          5 :       new_phi_result = gimple_convert (&stmts, vectype, new_phi_result);
    4820                 :          5 :       gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
    4821                 :            : 
    4822                 :            :       /* Find maximum value from the vector of found indexes.  */
    4823                 :          5 :       tree max_index = make_ssa_name (index_scalar_type);
    4824                 :          5 :       gcall *max_index_stmt = gimple_build_call_internal (IFN_REDUC_MAX,
    4825                 :            :                                                           1, induction_index);
    4826                 :          5 :       gimple_call_set_lhs (max_index_stmt, max_index);
    4827                 :          5 :       gsi_insert_before (&exit_gsi, max_index_stmt, GSI_SAME_STMT);
    4828                 :            : 
    4829                 :            :       /* Vector of {max_index, max_index, max_index,...}.  */
    4830                 :          5 :       tree max_index_vec = make_ssa_name (index_vec_type);
    4831                 :          5 :       tree max_index_vec_rhs = build_vector_from_val (index_vec_type,
    4832                 :            :                                                       max_index);
    4833                 :          5 :       gimple *max_index_vec_stmt = gimple_build_assign (max_index_vec,
    4834                 :            :                                                         max_index_vec_rhs);
    4835                 :          5 :       gsi_insert_before (&exit_gsi, max_index_vec_stmt, GSI_SAME_STMT);
    4836                 :            : 
    4837                 :            :       /* Next we compare the new vector (MAX_INDEX_VEC) full of max indexes
    4838                 :            :          with the vector (INDUCTION_INDEX) of found indexes, choosing values
    4839                 :            :          from the data vector (NEW_PHI_RESULT) for matches, 0 (ZERO_VEC)
    4840                 :            :          otherwise.  Only one value should match, resulting in a vector
    4841                 :            :          (VEC_COND) with one data value and the rest zeros.
    4842                 :            :          In the case where the loop never made any matches, every index will
    4843                 :            :          match, resulting in a vector with all data values (which will all be
    4844                 :            :          the default value).  */
    4845                 :            : 
    4846                 :            :       /* Compare the max index vector to the vector of found indexes to find
    4847                 :            :          the position of the max value.  */
    4848                 :          5 :       tree vec_compare = make_ssa_name (index_vec_cmp_type);
    4849                 :          5 :       gimple *vec_compare_stmt = gimple_build_assign (vec_compare, EQ_EXPR,
    4850                 :            :                                                       induction_index,
    4851                 :            :                                                       max_index_vec);
    4852                 :          5 :       gsi_insert_before (&exit_gsi, vec_compare_stmt, GSI_SAME_STMT);
    4853                 :            : 
    4854                 :            :       /* Use the compare to choose either values from the data vector or
    4855                 :            :          zero.  */
    4856                 :          5 :       tree vec_cond = make_ssa_name (vectype);
    4857                 :          5 :       gimple *vec_cond_stmt = gimple_build_assign (vec_cond, VEC_COND_EXPR,
    4858                 :            :                                                    vec_compare, new_phi_result,
    4859                 :            :                                                    zero_vec);
    4860                 :          5 :       gsi_insert_before (&exit_gsi, vec_cond_stmt, GSI_SAME_STMT);
    4861                 :            : 
    4862                 :            :       /* Finally we need to extract the data value from the vector (VEC_COND)
    4863                 :            :          into a scalar (MATCHED_DATA_REDUC).  Logically we want to do a OR
    4864                 :            :          reduction, but because this doesn't exist, we can use a MAX reduction
    4865                 :            :          instead.  The data value might be signed or a float so we need to cast
    4866                 :            :          it first.
    4867                 :            :          In the case where the loop never made any matches, the data values are
    4868                 :            :          all identical, and so will reduce down correctly.  */
    4869                 :            : 
    4870                 :            :       /* Make the matched data values unsigned.  */
    4871                 :          5 :       tree vec_cond_cast = make_ssa_name (vectype_unsigned);
    4872                 :          5 :       tree vec_cond_cast_rhs = build1 (VIEW_CONVERT_EXPR, vectype_unsigned,
    4873                 :            :                                        vec_cond);
    4874                 :          5 :       gimple *vec_cond_cast_stmt = gimple_build_assign (vec_cond_cast,
    4875                 :            :                                                         VIEW_CONVERT_EXPR,
    4876                 :            :                                                         vec_cond_cast_rhs);
    4877                 :          5 :       gsi_insert_before (&exit_gsi, vec_cond_cast_stmt, GSI_SAME_STMT);
    4878                 :            : 
    4879                 :            :       /* Reduce down to a scalar value.  */
    4880                 :          5 :       tree data_reduc = make_ssa_name (scalar_type_unsigned);
    4881                 :          5 :       gcall *data_reduc_stmt = gimple_build_call_internal (IFN_REDUC_MAX,
    4882                 :            :                                                            1, vec_cond_cast);
    4883                 :          5 :       gimple_call_set_lhs (data_reduc_stmt, data_reduc);
    4884                 :          5 :       gsi_insert_before (&exit_gsi, data_reduc_stmt, GSI_SAME_STMT);
    4885                 :            : 
    4886                 :            :       /* Convert the reduced value back to the result type and set as the
    4887                 :            :          result.  */
    4888                 :          5 :       stmts = NULL;
    4889                 :          5 :       new_temp = gimple_build (&stmts, VIEW_CONVERT_EXPR, scalar_type,
    4890                 :            :                                data_reduc);
    4891                 :          5 :       gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
    4892                 :          5 :       scalar_results.safe_push (new_temp);
    4893                 :            :     }
    4894                 :      11191 :   else if (STMT_VINFO_REDUC_TYPE (reduc_info) == COND_REDUCTION
    4895                 :         42 :            && reduc_fn == IFN_LAST)
    4896                 :            :     {
    4897                 :            :       /* Condition reduction without supported IFN_REDUC_MAX.  Generate
    4898                 :            :          idx = 0;
    4899                 :            :          idx_val = induction_index[0];
    4900                 :            :          val = data_reduc[0];
    4901                 :            :          for (idx = 0, val = init, i = 0; i < nelts; ++i)
    4902                 :            :            if (induction_index[i] > idx_val)
    4903                 :            :              val = data_reduc[i], idx_val = induction_index[i];
    4904                 :            :          return val;  */
    4905                 :            : 
    4906                 :         42 :       tree data_eltype = TREE_TYPE (TREE_TYPE (new_phi_result));
    4907                 :         42 :       tree idx_eltype = TREE_TYPE (TREE_TYPE (induction_index));
    4908                 :         42 :       unsigned HOST_WIDE_INT el_size = tree_to_uhwi (TYPE_SIZE (idx_eltype));
    4909                 :         42 :       poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (TREE_TYPE (induction_index));
    4910                 :            :       /* Enforced by vectorizable_reduction, which ensures we have target
    4911                 :            :          support before allowing a conditional reduction on variable-length
    4912                 :            :          vectors.  */
    4913                 :         42 :       unsigned HOST_WIDE_INT v_size = el_size * nunits.to_constant ();
    4914                 :         42 :       tree idx_val = NULL_TREE, val = NULL_TREE;
    4915                 :        318 :       for (unsigned HOST_WIDE_INT off = 0; off < v_size; off += el_size)
    4916                 :            :         {
    4917                 :        276 :           tree old_idx_val = idx_val;
    4918                 :        276 :           tree old_val = val;
    4919                 :        276 :           idx_val = make_ssa_name (idx_eltype);
    4920                 :        276 :           epilog_stmt = gimple_build_assign (idx_val, BIT_FIELD_REF,
    4921                 :            :                                              build3 (BIT_FIELD_REF, idx_eltype,
    4922                 :            :                                                      induction_index,
    4923                 :            :                                                      bitsize_int (el_size),
    4924                 :            :                                                      bitsize_int (off)));
    4925                 :        276 :           gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
    4926                 :        276 :           val = make_ssa_name (data_eltype);
    4927                 :        276 :           epilog_stmt = gimple_build_assign (val, BIT_FIELD_REF,
    4928                 :            :                                              build3 (BIT_FIELD_REF,
    4929                 :            :                                                      data_eltype,
    4930                 :            :                                                      new_phi_result,
    4931                 :            :                                                      bitsize_int (el_size),
    4932                 :            :                                                      bitsize_int (off)));
    4933                 :        276 :           gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
    4934                 :        276 :           if (off != 0)
    4935                 :            :             {
    4936                 :        234 :               tree new_idx_val = idx_val;
    4937                 :        234 :               if (off != v_size - el_size)
    4938                 :            :                 {
    4939                 :        192 :                   new_idx_val = make_ssa_name (idx_eltype);
    4940                 :        192 :                   epilog_stmt = gimple_build_assign (new_idx_val,
    4941                 :            :                                                      MAX_EXPR, idx_val,
    4942                 :            :                                                      old_idx_val);
    4943                 :        192 :                   gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
    4944                 :            :                 }
    4945                 :        234 :               tree new_val = make_ssa_name (data_eltype);
    4946                 :        234 :               epilog_stmt = gimple_build_assign (new_val,
    4947                 :            :                                                  COND_EXPR,
    4948                 :            :                                                  build2 (GT_EXPR,
    4949                 :            :                                                          boolean_type_node,
    4950                 :            :                                                          idx_val,
    4951                 :            :                                                          old_idx_val),
    4952                 :            :                                                  val, old_val);
    4953                 :        234 :               gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
    4954                 :        234 :               idx_val = new_idx_val;
    4955                 :        234 :               val = new_val;
    4956                 :            :             }
    4957                 :            :         }
    4958                 :            :       /* Convert the reduced value back to the result type and set as the
    4959                 :            :          result.  */
    4960                 :         42 :       gimple_seq stmts = NULL;
    4961                 :         42 :       val = gimple_convert (&stmts, scalar_type, val);
    4962                 :         42 :       gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
    4963                 :         42 :       scalar_results.safe_push (val);
    4964                 :            :     }
    4965                 :            : 
    4966                 :            :   /* 2.3 Create the reduction code, using one of the three schemes described
    4967                 :            :          above. In SLP we simply need to extract all the elements from the 
    4968                 :            :          vector (without reducing them), so we use scalar shifts.  */
    4969                 :      11149 :   else if (reduc_fn != IFN_LAST && !slp_reduc)
    4970                 :            :     {
    4971                 :       6874 :       tree tmp;
    4972                 :       6874 :       tree vec_elem_type;
    4973                 :            : 
    4974                 :            :       /* Case 1:  Create:
    4975                 :            :          v_out2 = reduc_expr <v_out1>  */
    4976                 :            : 
    4977                 :       6874 :       if (dump_enabled_p ())
    4978                 :        216 :         dump_printf_loc (MSG_NOTE, vect_location,
    4979                 :            :                          "Reduce using direct vector reduction.\n");
    4980                 :            : 
    4981                 :       6874 :       gimple_seq stmts = NULL;
    4982                 :       6874 :       new_phi_result = gimple_convert (&stmts, vectype, new_phi_result);
    4983                 :       6874 :       vec_elem_type = TREE_TYPE (TREE_TYPE (new_phi_result));
    4984                 :       6874 :       new_temp = gimple_build (&stmts, as_combined_fn (reduc_fn),
    4985                 :            :                                vec_elem_type, new_phi_result);
    4986                 :       6874 :       new_temp = gimple_convert (&stmts, scalar_type, new_temp);
    4987                 :       6874 :       gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
    4988                 :            : 
    4989                 :       6874 :       if ((STMT_VINFO_REDUC_TYPE (reduc_info) == INTEGER_INDUC_COND_REDUCTION)
    4990                 :         63 :           && induc_val)
    4991                 :            :         {
    4992                 :            :           /* Earlier we set the initial value to be a vector if induc_val
    4993                 :            :              values.  Check the result and if it is induc_val then replace
    4994                 :            :              with the original initial value, unless induc_val is
    4995                 :            :              the same as initial_def already.  */
    4996                 :         61 :           tree zcompare = build2 (EQ_EXPR, boolean_type_node, new_temp,
    4997                 :            :                                   induc_val);
    4998                 :            : 
    4999                 :         61 :           tmp = make_ssa_name (new_scalar_dest);
    5000                 :         61 :           epilog_stmt = gimple_build_assign (tmp, COND_EXPR, zcompare,
    5001                 :            :                                              initial_def, new_temp);
    5002                 :         61 :           gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
    5003                 :         61 :           new_temp = tmp;
    5004                 :            :         }
    5005                 :            : 
    5006                 :       6874 :       scalar_results.safe_push (new_temp);
    5007                 :            :     }
    5008                 :       4275 :   else if (direct_slp_reduc)
    5009                 :            :     {
    5010                 :            :       /* Here we create one vector for each of the REDUC_GROUP_SIZE results,
    5011                 :            :          with the elements for other SLP statements replaced with the
    5012                 :            :          neutral value.  We can then do a normal reduction on each vector.  */
    5013                 :            : 
    5014                 :            :       /* Enforced by vectorizable_reduction.  */
    5015                 :            :       gcc_assert (new_phis.length () == 1);
    5016                 :            :       gcc_assert (pow2p_hwi (group_size));
    5017                 :            : 
    5018                 :            :       slp_tree orig_phis_slp_node = slp_node_instance->reduc_phis;
    5019                 :            :       vec<stmt_vec_info> orig_phis
    5020                 :            :         = SLP_TREE_SCALAR_STMTS (orig_phis_slp_node);
    5021                 :            :       gimple_seq seq = NULL;
    5022                 :            : 
    5023                 :            :       /* Build a vector {0, 1, 2, ...}, with the same number of elements
    5024                 :            :          and the same element size as VECTYPE.  */
    5025                 :            :       tree index = build_index_vector (vectype, 0, 1);
    5026                 :            :       tree index_type = TREE_TYPE (index);
    5027                 :            :       tree index_elt_type = TREE_TYPE (index_type);
    5028                 :            :       tree mask_type = truth_type_for (index_type);
    5029                 :            : 
    5030                 :            :       /* Create a vector that, for each element, identifies which of
    5031                 :            :          the REDUC_GROUP_SIZE results should use it.  */
    5032                 :            :       tree index_mask = build_int_cst (index_elt_type, group_size - 1);
    5033                 :            :       index = gimple_build (&seq, BIT_AND_EXPR, index_type, index,
    5034                 :            :                             build_vector_from_val (index_type, index_mask));
    5035                 :            : 
    5036                 :            :       /* Get a neutral vector value.  This is simply a splat of the neutral
    5037                 :            :          scalar value if we have one, otherwise the initial scalar value
    5038                 :            :          is itself a neutral value.  */
    5039                 :            :       tree vector_identity = NULL_TREE;
    5040                 :            :       tree neutral_op = NULL_TREE;
    5041                 :            :       if (slp_node)
    5042                 :            :         {
    5043                 :            :           stmt_vec_info first = REDUC_GROUP_FIRST_ELEMENT (stmt_info);
    5044                 :            :           neutral_op
    5045                 :            :             = neutral_op_for_slp_reduction (slp_node_instance->reduc_phis,
    5046                 :            :                                             vectype, code, first != NULL);
    5047                 :            :         }
    5048                 :            :       if (neutral_op)
    5049                 :            :         vector_identity = gimple_build_vector_from_val (&seq, vectype,
    5050                 :            :                                                         neutral_op);
    5051                 :            :       for (unsigned int i = 0; i < group_size; ++i)
    5052                 :            :         {
    5053                 :            :           /* If there's no univeral neutral value, we can use the
    5054                 :            :              initial scalar value from the original PHI.  This is used
    5055                 :            :              for MIN and MAX reduction, for example.  */
    5056                 :            :           if (!neutral_op)
    5057                 :            :             {
    5058                 :            :               tree scalar_value
    5059                 :            :                 = PHI_ARG_DEF_FROM_EDGE (orig_phis[i]->stmt,
    5060                 :            :                                          loop_preheader_edge (loop));
    5061                 :            :               scalar_value = gimple_convert (&seq, TREE_TYPE (vectype),
    5062                 :            :                                              scalar_value);
    5063                 :            :               vector_identity = gimple_build_vector_from_val (&seq, vectype,
    5064                 :            :                                                               scalar_value);
    5065                 :            :             }
    5066                 :            : 
    5067                 :            :           /* Calculate the equivalent of:
    5068                 :            : 
    5069                 :            :              sel[j] = (index[j] == i);
    5070                 :            : 
    5071                 :            :              which selects the elements of NEW_PHI_RESULT that should
    5072                 :            :              be included in the result.  */
    5073                 :            :           tree compare_val = build_int_cst (index_elt_type, i);
    5074                 :            :           compare_val = build_vector_from_val (index_type, compare_val);
    5075                 :            :           tree sel = gimple_build (&seq, EQ_EXPR, mask_type,
    5076                 :            :                                    index, compare_val);
    5077                 :            : 
    5078                 :            :           /* Calculate the equivalent of:
    5079                 :            : 
    5080                 :            :              vec = seq ? new_phi_result : vector_identity;
    5081                 :            : 
    5082                 :            :              VEC is now suitable for a full vector reduction.  */
    5083                 :            :           tree vec = gimple_build (&seq, VEC_COND_EXPR, vectype,
    5084                 :            :                                    sel, new_phi_result, vector_identity);
    5085                 :            : 
    5086                 :            :           /* Do the reduction and convert it to the appropriate type.  */
    5087                 :            :           tree scalar = gimple_build (&seq, as_combined_fn (reduc_fn),
    5088                 :            :                                       TREE_TYPE (vectype), vec);
    5089                 :            :           scalar = gimple_convert (&seq, scalar_type, scalar);
    5090                 :            :           scalar_results.safe_push (scalar);
    5091                 :            :         }
    5092                 :            :       gsi_insert_seq_before (&exit_gsi, seq, GSI_SAME_STMT);
    5093                 :            :     }
    5094                 :            :   else
    5095                 :            :     {
    5096                 :       4275 :       bool reduce_with_shift;
    5097                 :       4275 :       tree vec_temp;
    5098                 :            : 
    5099                 :       4275 :       gcc_assert (slp_reduc || new_phis.length () == 1);
    5100                 :            : 
    5101                 :            :       /* See if the target wants to do the final (shift) reduction
    5102                 :            :          in a vector mode of smaller size and first reduce upper/lower
    5103                 :            :          halves against each other.  */
    5104                 :       4275 :       enum machine_mode mode1 = mode;
    5105                 :       4275 :       tree stype = TREE_TYPE (vectype);
    5106                 :       4275 :       unsigned nunits = TYPE_VECTOR_SUBPARTS (vectype).to_constant ();
    5107                 :       4275 :       unsigned nunits1 = nunits;
    5108                 :       4275 :       if ((mode1 = targetm.vectorize.split_reduction (mode)) != mode
    5109                 :       4275 :           && new_phis.length () == 1)
    5110                 :            :         {
    5111                 :       2332 :           nunits1 = GET_MODE_NUNITS (mode1).to_constant ();
    5112                 :            :           /* For SLP reductions we have to make sure lanes match up, but
    5113                 :            :              since we're doing individual element final reduction reducing
    5114                 :            :              vector width here is even more important.
    5115                 :            :              ???  We can also separate lanes with permutes, for the common
    5116                 :            :              case of power-of-two group-size odd/even extracts would work.  */
    5117                 :       1166 :           if (slp_reduc && nunits != nunits1)
    5118                 :            :             {
    5119                 :        156 :               nunits1 = least_common_multiple (nunits1, group_size);
    5120                 :        312 :               gcc_assert (exact_log2 (nunits1) != -1 && nunits1 <= nunits);
    5121                 :            :             }
    5122                 :            :         }
    5123                 :       4275 :       if (!slp_reduc
    5124                 :       4275 :           && (mode1 = targetm.vectorize.split_reduction (mode)) != mode)
    5125                 :       2020 :         nunits1 = GET_MODE_NUNITS (mode1).to_constant ();
    5126                 :            : 
    5127                 :       4275 :       tree vectype1 = get_related_vectype_for_scalar_type (TYPE_MODE (vectype),
    5128                 :            :                                                            stype, nunits1);
    5129                 :       4275 :       reduce_with_shift = have_whole_vector_shift (mode1);
    5130                 :       4275 :       if (!VECTOR_MODE_P (mode1))
    5131                 :            :         reduce_with_shift = false;
    5132                 :            :       else
    5133                 :            :         {
    5134                 :       4275 :           optab optab = optab_for_tree_code (code, vectype1, optab_default);
    5135                 :       4275 :           if (optab_handler (optab, mode1) == CODE_FOR_nothing)
    5136                 :         38 :             reduce_with_shift = false;
    5137                 :            :         }
    5138                 :            : 
    5139                 :            :       /* First reduce the vector to the desired vector size we should
    5140                 :            :          do shift reduction on by combining upper and lower halves.  */
    5141                 :       4275 :       new_temp = new_phi_result;
    5142                 :       5693 :       while (nunits > nunits1)
    5143                 :            :         {
    5144                 :       1418 :           nunits /= 2;
    5145                 :       1418 :           vectype1 = get_related_vectype_for_scalar_type (TYPE_MODE (vectype),
    5146                 :            :                                                           stype, nunits);
    5147                 :       1418 :           unsigned int bitsize = tree_to_uhwi (TYPE_SIZE (vectype1));
    5148                 :            : 
    5149                 :            :           /* The target has to make sure we support lowpart/highpart
    5150                 :            :              extraction, either via direct vector extract or through
    5151                 :            :              an integer mode punning.  */
    5152                 :       1418 :           tree dst1, dst2;
    5153                 :       2836 :           if (convert_optab_handler (vec_extract_optab,
    5154                 :       1418 :                                      TYPE_MODE (TREE_TYPE (new_temp)),
    5155                 :       1418 :                                      TYPE_MODE (vectype1))
    5156                 :            :               != CODE_FOR_nothing)
    5157                 :            :             {
    5158                 :            :               /* Extract sub-vectors directly once vec_extract becomes
    5159                 :            :                  a conversion optab.  */
    5160                 :       1418 :               dst1 = make_ssa_name (vectype1);
    5161                 :       1418 :               epilog_stmt
    5162                 :       2836 :                   = gimple_build_assign (dst1, BIT_FIELD_REF,
    5163                 :            :                                          build3 (BIT_FIELD_REF, vectype1,
    5164                 :       1418 :                                                  new_temp, TYPE_SIZE (vectype1),
    5165                 :       1418 :                                                  bitsize_int (0)));
    5166                 :       1418 :               gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
    5167                 :       1418 :               dst2 =  make_ssa_name (vectype1);
    5168                 :       1418 :               epilog_stmt
    5169                 :       2836 :                   = gimple_build_assign (dst2, BIT_FIELD_REF,
    5170                 :            :                                          build3 (BIT_FIELD_REF, vectype1,
    5171                 :       1418 :                                                  new_temp, TYPE_SIZE (vectype1),
    5172                 :            :                                                  bitsize_int (bitsize)));
    5173                 :       1418 :               gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
    5174                 :            :             }
    5175                 :            :           else
    5176                 :            :             {
    5177                 :            :               /* Extract via punning to appropriately sized integer mode
    5178                 :            :                  vector.  */
    5179                 :          0 :               tree eltype = build_nonstandard_integer_type (bitsize, 1);
    5180                 :          0 :               tree etype = build_vector_type (eltype, 2);
    5181                 :          0 :               gcc_assert (convert_optab_handler (vec_extract_optab,
    5182                 :            :                                                  TYPE_MODE (etype),
    5183                 :            :                                                  TYPE_MODE (eltype))
    5184                 :            :                           != CODE_FOR_nothing);
    5185                 :          0 :               tree tem = make_ssa_name (etype);
    5186                 :          0 :               epilog_stmt = gimple_build_assign (tem, VIEW_CONVERT_EXPR,
    5187                 :            :                                                  build1 (VIEW_CONVERT_EXPR,
    5188                 :            :                                                          etype, new_temp));
    5189                 :          0 :               gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
    5190                 :          0 :               new_temp = tem;
    5191                 :          0 :               tem = make_ssa_name (eltype);
    5192                 :          0 :               epilog_stmt
    5193                 :          0 :                   = gimple_build_assign (tem, BIT_FIELD_REF,
    5194                 :            :                                          build3 (BIT_FIELD_REF, eltype,
    5195                 :          0 :                                                  new_temp, TYPE_SIZE (eltype),
    5196                 :          0 :                                                  bitsize_int (0)));
    5197                 :          0 :               gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
    5198                 :          0 :               dst1 = make_ssa_name (vectype1);
    5199                 :          0 :               epilog_stmt = gimple_build_assign (dst1, VIEW_CONVERT_EXPR,
    5200                 :            :                                                  build1 (VIEW_CONVERT_EXPR,
    5201                 :            :                                                          vectype1, tem));
    5202                 :          0 :               gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
    5203                 :          0 :               tem = make_ssa_name (eltype);
    5204                 :          0 :               epilog_stmt
    5205                 :          0 :                   = gimple_build_assign (tem, BIT_FIELD_REF,
    5206                 :            :                                          build3 (BIT_FIELD_REF, eltype,
    5207                 :          0 :                                                  new_temp, TYPE_SIZE (eltype),
    5208                 :            :                                                  bitsize_int (bitsize)));
    5209                 :          0 :               gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
    5210                 :          0 :               dst2 =  make_ssa_name (vectype1);
    5211                 :          0 :               epilog_stmt = gimple_build_assign (dst2, VIEW_CONVERT_EXPR,
    5212                 :            :                                                  build1 (VIEW_CONVERT_EXPR,
    5213                 :            :                                                          vectype1, tem));
    5214                 :          0 :               gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
    5215                 :            :             }
    5216                 :            : 
    5217                 :       1418 :           new_temp = make_ssa_name (vectype1);
    5218                 :       1418 :           epilog_stmt = gimple_build_assign (new_temp, code, dst1, dst2);
    5219                 :       1418 :           gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
    5220                 :       1418 :           new_phis[0] = epilog_stmt;
    5221                 :            :         }
    5222                 :            : 
    5223                 :       4275 :       if (reduce_with_shift && !slp_reduc)
    5224                 :            :         {
    5225                 :       3432 :           int element_bitsize = tree_to_uhwi (bitsize);
    5226                 :            :           /* Enforced by vectorizable_reduction, which disallows SLP reductions
    5227                 :            :              for variable-length vectors and also requires direct target support
    5228                 :            :              for loop reductions.  */
    5229                 :       3432 :           int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype1));
    5230                 :       3432 :           int nelements = vec_size_in_bits / element_bitsize;
    5231                 :       6864 :           vec_perm_builder sel;
    5232                 :       6864 :           vec_perm_indices indices;
    5233                 :            : 
    5234                 :       3432 :           int elt_offset;
    5235                 :            : 
    5236                 :       3432 :           tree zero_vec = build_zero_cst (vectype1);
    5237                 :            :           /* Case 2: Create:
    5238                 :            :              for (offset = nelements/2; offset >= 1; offset/=2)
    5239                 :            :                 {
    5240                 :            :                   Create:  va' = vec_shift <va, offset>
    5241                 :            :                   Create:  va = vop <va, va'>
    5242                 :            :                 }  */
    5243                 :            : 
    5244                 :       3432 :           tree rhs;
    5245                 :            : 
    5246                 :       3432 :           if (dump_enabled_p ())
    5247                 :        722 :             dump_printf_loc (MSG_NOTE, vect_location,
    5248                 :            :                              "Reduce using vector shifts\n");
    5249                 :            : 
    5250                 :       3432 :           gimple_seq stmts = NULL;
    5251                 :       3432 :           new_temp = gimple_convert (&stmts, vectype1, new_temp);
    5252                 :       3432 :           for (elt_offset = nelements / 2;
    5253                 :      11019 :                elt_offset >= 1;
    5254                 :       7587 :                elt_offset /= 2)
    5255                 :            :             {
    5256                 :       7587 :               calc_vec_perm_mask_for_shift (elt_offset, nelements, &sel);
    5257                 :       7587 :               indices.new_vector (sel, 2, nelements);
    5258                 :       7587 :               tree mask = vect_gen_perm_mask_any (vectype1, indices);
    5259                 :       7587 :               new_name = gimple_build (&stmts, VEC_PERM_EXPR, vectype1,
    5260                 :            :                                        new_temp, zero_vec, mask);
    5261                 :       7587 :               new_temp = gimple_build (&stmts, code,
    5262                 :            :                                        vectype1, new_name, new_temp);
    5263                 :            :             }
    5264                 :       3432 :           gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
    5265                 :            : 
    5266                 :            :           /* 2.4  Extract the final scalar result.  Create:
    5267                 :            :              s_out3 = extract_field <v_out2, bitpos>  */
    5268                 :            : 
    5269                 :       3432 :           if (dump_enabled_p ())
    5270                 :        722 :             dump_printf_loc (MSG_NOTE, vect_location,
    5271                 :            :                              "extract scalar result\n");
    5272                 :            : 
    5273                 :       3432 :           rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp,
    5274                 :            :                         bitsize, bitsize_zero_node);
    5275                 :       3432 :           epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
    5276                 :       3432 :           new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
    5277                 :       3432 :           gimple_assign_set_lhs (epilog_stmt, new_temp);
    5278                 :       3432 :           gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
    5279                 :       3432 :           scalar_results.safe_push (new_temp);
    5280                 :            :         }
    5281                 :            :       else
    5282                 :            :         {
    5283                 :            :           /* Case 3: Create:
    5284                 :            :              s = extract_field <v_out2, 0>
    5285                 :            :              for (offset = element_size;
    5286                 :            :                   offset < vector_size;
    5287                 :            :                   offset += element_size;)
    5288                 :            :                {
    5289                 :            :                  Create:  s' = extract_field <v_out2, offset>
    5290                 :            :                  Create:  s = op <s, s'>  // For non SLP cases
    5291                 :            :                }  */
    5292                 :            : 
    5293                 :        843 :           if (dump_enabled_p ())
    5294                 :        205 :             dump_printf_loc (MSG_NOTE, vect_location,
    5295                 :            :                              "Reduce using scalar code.\n");
    5296                 :            : 
    5297                 :        843 :           int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype1));
    5298                 :        843 :           int element_bitsize = tree_to_uhwi (bitsize);
    5299                 :        843 :           tree compute_type = TREE_TYPE (vectype);
    5300                 :        843 :           gimple_seq stmts = NULL;
    5301                 :       1741 :           FOR_EACH_VEC_ELT (new_phis, i, new_phi)
    5302                 :            :             {
    5303                 :        898 :               int bit_offset;
    5304                 :        898 :               if (gimple_code (new_phi) == GIMPLE_PHI)
    5305                 :        725 :                 vec_temp = PHI_RESULT (new_phi);
    5306                 :            :               else
    5307                 :        173 :                 vec_temp = gimple_assign_lhs (new_phi);
    5308                 :        898 :               new_temp = gimple_build (&stmts, BIT_FIELD_REF, compute_type,
    5309                 :            :                                        vec_temp, bitsize, bitsize_zero_node);
    5310                 :            : 
    5311                 :            :               /* In SLP we don't need to apply reduction operation, so we just
    5312                 :            :                  collect s' values in SCALAR_RESULTS.  */
    5313                 :        898 :               if (slp_reduc)
    5314                 :        404 :                 scalar_results.safe_push (new_temp);
    5315                 :            : 
    5316                 :       1953 :               for (bit_offset = element_bitsize;
    5317                 :       2851 :                    bit_offset < vec_size_in_bits;
    5318                 :       1953 :                    bit_offset += element_bitsize)
    5319                 :            :                 {
    5320                 :       1953 :                   tree bitpos = bitsize_int (bit_offset);
    5321                 :       1953 :                   new_name = gimple_build (&stmts, BIT_FIELD_REF,
    5322                 :            :                                            compute_type, vec_temp,
    5323                 :            :                                            bitsize, bitpos);
    5324                 :       1953 :                   if (slp_reduc)
    5325                 :            :                     {
    5326                 :            :                       /* In SLP we don't need to apply reduction operation, so 
    5327                 :            :                          we just collect s' values in SCALAR_RESULTS.  */
    5328                 :       1020 :                       new_temp = new_name;
    5329                 :       1020 :                       scalar_results.safe_push (new_name);
    5330                 :            :                     }
    5331                 :            :                   else
    5332                 :        933 :                     new_temp = gimple_build (&stmts, code, compute_type,
    5333                 :            :                                              new_name, new_temp);
    5334                 :            :                 }
    5335                 :            :             }
    5336                 :            : 
    5337                 :            :           /* The only case where we need to reduce scalar results in SLP, is
    5338                 :            :              unrolling.  If the size of SCALAR_RESULTS is greater than
    5339                 :            :              REDUC_GROUP_SIZE, we reduce them combining elements modulo 
    5340                 :            :              REDUC_GROUP_SIZE.  */
    5341                 :        843 :           if (slp_reduc)
    5342                 :            :             {
    5343                 :        349 :               tree res, first_res, new_res;
    5344                 :            :             
    5345                 :            :               /* Reduce multiple scalar results in case of SLP unrolling.  */
    5346                 :       1013 :               for (j = group_size; scalar_results.iterate (j, &res);
    5347                 :            :                    j++)
    5348                 :            :                 {
    5349                 :        664 :                   first_res = scalar_results[j % group_size];
    5350                 :        664 :                   new_res = gimple_build (&stmts, code, compute_type,
    5351                 :            :                                           first_res, res);
    5352                 :        664 :                   scalar_results[j % group_size] = new_res;
    5353                 :            :                 }
    5354                 :       1109 :               for (k = 0; k < group_size; k++)
    5355                 :        760 :                 scalar_results[k] = gimple_convert (&stmts, scalar_type,
    5356                 :        760 :                                                     scalar_results[k]);
    5357                 :            :             }
    5358                 :            :           else
    5359                 :            :             {
    5360                 :            :               /* Not SLP - we have one scalar to keep in SCALAR_RESULTS.  */
    5361                 :        494 :               new_temp = gimple_convert (&stmts, scalar_type, new_temp);
    5362                 :        494 :               scalar_results.safe_push (new_temp);
    5363                 :            :             }
    5364                 :            : 
    5365                 :        843 :           gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
    5366                 :            :         }
    5367                 :            : 
    5368                 :       4275 :       if ((STMT_VINFO_REDUC_TYPE (reduc_info) == INTEGER_INDUC_COND_REDUCTION)
    5369                 :          0 :           && induc_val)
    5370                 :            :         {
    5371                 :            :           /* Earlier we set the initial value to be a vector if induc_val
    5372                 :            :              values.  Check the result and if it is induc_val then replace
    5373                 :            :              with the original initial value, unless induc_val is
    5374                 :            :              the same as initial_def already.  */
    5375                 :          0 :           tree zcompare = build2 (EQ_EXPR, boolean_type_node, new_temp,
    5376                 :            :                                   induc_val);
    5377                 :            : 
    5378                 :          0 :           tree tmp = make_ssa_name (new_scalar_dest);
    5379                 :          0 :           epilog_stmt = gimple_build_assign (tmp, COND_EXPR, zcompare,
    5380                 :            :                                              initial_def, new_temp);
    5381                 :          0 :           gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
    5382                 :          0 :           scalar_results[0] = tmp;
    5383                 :            :         }
    5384                 :            :     }
    5385                 :            :  
    5386                 :            :   /* 2.5 Adjust the final result by the initial value of the reduction
    5387                 :            :          variable. (When such adjustment is not needed, then
    5388                 :            :          'adjustment_def' is zero).  For example, if code is PLUS we create:
    5389                 :            :          new_temp = loop_exit_def + adjustment_def  */
    5390                 :            : 
    5391                 :      11196 :   if (adjustment_def)
    5392                 :            :     {
    5393                 :       8907 :       gcc_assert (!slp_reduc);
    5394                 :       8907 :       gimple_seq stmts = NULL;
    5395                 :       8907 :       if (nested_in_vect_loop)
    5396                 :            :         {
    5397                 :          0 :           new_phi = new_phis[0];
    5398                 :          0 :           gcc_assert (VECTOR_TYPE_P (TREE_TYPE (adjustment_def)));
    5399                 :          0 :           adjustment_def = gimple_convert (&stmts, vectype, adjustment_def);
    5400                 :          0 :           new_temp = gimple_build (&stmts, code, vectype,
    5401                 :            :                                    PHI_RESULT (new_phi), adjustment_def);
    5402                 :            :         }
    5403                 :            :       else
    5404                 :            :         {
    5405                 :       8907 :           new_temp = scalar_results[0];
    5406                 :       8907 :           gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE);
    5407                 :       8907 :           adjustment_def = gimple_convert (&stmts, scalar_type, adjustment_def);
    5408                 :       8907 :           new_temp = gimple_build (&stmts, code, scalar_type,
    5409                 :            :                                    new_temp, adjustment_def);
    5410                 :            :         }
    5411                 :            : 
    5412                 :       8907 :       epilog_stmt = gimple_seq_last_stmt (stmts);
    5413                 :       8907 :       gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
    5414                 :       8907 :       if (nested_in_vect_loop)
    5415                 :            :         {
    5416                 :          0 :           stmt_vec_info epilog_stmt_info = loop_vinfo->add_stmt (epilog_stmt);
    5417                 :          0 :           STMT_VINFO_RELATED_STMT (epilog_stmt_info)
    5418                 :          0 :             = STMT_VINFO_RELATED_STMT (loop_vinfo->lookup_stmt (new_phi));
    5419                 :            : 
    5420                 :          0 :           if (!double_reduc)
    5421                 :          0 :             scalar_results.quick_push (new_temp);
    5422                 :            :           else
    5423                 :          0 :             scalar_results[0] = new_temp;
    5424                 :            :         }
    5425                 :            :       else
    5426                 :       8907 :         scalar_results[0] = new_temp;
    5427                 :            : 
    5428                 :       8907 :       new_phis[0] = epilog_stmt;
    5429                 :            :     }
    5430                 :            : 
    5431                 :      11196 :   if (double_reduc)
    5432                 :         39 :     loop = loop->inner;
    5433                 :            : 
    5434                 :            :   /* 2.6  Handle the loop-exit phis.  Replace the uses of scalar loop-exit
    5435                 :            :           phis with new adjusted scalar results, i.e., replace use <s_out0>
    5436                 :            :           with use <s_out4>.        
    5437                 :            : 
    5438                 :            :      Transform:
    5439                 :            :         loop_exit:
    5440                 :            :           s_out0 = phi <s_loop>                 # (scalar) EXIT_PHI
    5441                 :            :           v_out1 = phi <VECT_DEF>               # NEW_EXIT_PHI
    5442                 :            :           v_out2 = reduce <v_out1>
    5443                 :            :           s_out3 = extract_field <v_out2, 0>
    5444                 :            :           s_out4 = adjust_result <s_out3>
    5445                 :            :           use <s_out0>
    5446                 :            :           use <s_out0>
    5447                 :            : 
    5448                 :            :      into:
    5449                 :            : 
    5450                 :            :         loop_exit:
    5451                 :            :           s_out0 = phi <s_loop>                 # (scalar) EXIT_PHI
    5452                 :            :           v_out1 = phi <VECT_DEF>               # NEW_EXIT_PHI
    5453                 :            :           v_out2 = reduce <v_out1>
    5454                 :            :           s_out3 = extract_field <v_out2, 0>
    5455                 :            :           s_out4 = adjust_result <s_out3>
    5456                 :            :           use <s_out4>  
    5457                 :            :           use <s_out4> */
    5458                 :            : 
    5459                 :            : 
    5460                 :            :   /* In SLP reduction chain we reduce vector results into one vector if
    5461                 :            :      necessary, hence we set here REDUC_GROUP_SIZE to 1.  SCALAR_DEST is the
    5462                 :            :      LHS of the last stmt in the reduction chain, since we are looking for
    5463                 :            :      the loop exit phi node.  */
    5464                 :      11196 :   if (REDUC_GROUP_FIRST_ELEMENT (stmt_info))
    5465                 :            :     {
    5466                 :         93 :       stmt_vec_info dest_stmt_info
    5467                 :         93 :         = vect_orig_stmt (SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1]);
    5468                 :         93 :       scalar_dest = gimple_assign_lhs (dest_stmt_info->stmt);
    5469                 :         93 :       group_size = 1;
    5470                 :            :     }
    5471                 :            : 
    5472                 :            :   /* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in
    5473                 :            :      case that REDUC_GROUP_SIZE is greater than vectorization factor).
    5474                 :            :      Therefore, we need to match SCALAR_RESULTS with corresponding statements.
    5475                 :            :      The first (REDUC_GROUP_SIZE / number of new vector stmts) scalar results
    5476                 :            :      correspond to the first vector stmt, etc.
    5477                 :            :      (RATIO is equal to (REDUC_GROUP_SIZE / number of new vector stmts)).  */
    5478                 :      22392 :   if (group_size > new_phis.length ())
    5479                 :        332 :     gcc_assert (!(group_size % new_phis.length ()));
    5480                 :            : 
    5481                 :      22803 :   for (k = 0; k < group_size; k++)
    5482                 :            :     {
    5483                 :      11607 :       if (slp_reduc)
    5484                 :            :         {
    5485                 :        760 :           stmt_vec_info scalar_stmt_info = SLP_TREE_SCALAR_STMTS (slp_node)[k];
    5486                 :            : 
    5487                 :        760 :           orig_stmt_info = STMT_VINFO_RELATED_STMT (scalar_stmt_info);
    5488                 :            :           /* SLP statements can't participate in patterns.  */
    5489                 :        760 :           gcc_assert (!orig_stmt_info);
    5490                 :        760 :           scalar_dest = gimple_assign_lhs (scalar_stmt_info->stmt);
    5491                 :            :         }
    5492                 :            : 
    5493                 :      11607 :       if (nested_in_vect_loop)
    5494                 :            :         {
    5495                 :         39 :           if (double_reduc)
    5496                 :            :             loop = outer_loop;
    5497                 :            :           else
    5498                 :          0 :             gcc_unreachable ();
    5499                 :            :         }
    5500                 :            : 
    5501                 :      11607 :       phis.create (3);
    5502                 :            :       /* Find the loop-closed-use at the loop exit of the original scalar
    5503                 :            :          result.  (The reduction result is expected to have two immediate uses,
    5504                 :            :          one at the latch block, and one at the loop exit).  For double
    5505                 :            :          reductions we are looking for exit phis of the outer loop.  */
    5506                 :      48977 :       FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
    5507                 :            :         {
    5508                 :      37370 :           if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
    5509                 :            :             {
    5510                 :      11563 :               if (!is_gimple_debug (USE_STMT (use_p)))
    5511                 :      11563 :                 phis.safe_push (USE_STMT (use_p));
    5512                 :            :             }
    5513                 :            :           else
    5514                 :            :             {
    5515                 :      25807 :               if (double_reduc && gimple_code (USE_STMT (use_p)) == GIMPLE_PHI)
    5516                 :            :                 {
    5517                 :         78 :                   tree phi_res = PHI_RESULT (USE_STMT (use_p));
    5518                 :            : 
    5519                 :        195 :                   FOR_EACH_IMM_USE_FAST (phi_use_p, phi_imm_iter, phi_res)
    5520                 :            :                     {
    5521                 :        117 :                       if (!flow_bb_inside_loop_p (loop,
    5522                 :        117 :                                              gimple_bb (USE_STMT (phi_use_p)))
    5523                 :        117 :                           && !is_gimple_debug (USE_STMT (phi_use_p)))
    5524                 :         39 :                         phis.safe_push (USE_STMT (phi_use_p));
    5525                 :            :                     }
    5526                 :            :                 }
    5527                 :            :             }
    5528                 :            :         }
    5529                 :            : 
    5530                 :      23209 :       FOR_EACH_VEC_ELT (phis, i, exit_phi)
    5531                 :            :         {
    5532                 :            :           /* Replace the uses:  */
    5533                 :      11602 :           orig_name = PHI_RESULT (exit_phi);
    5534                 :      11602 :           scalar_result = scalar_results[k];
    5535                 :      32939 :           FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
    5536                 :            :             {
    5537                 :      64015 :               FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
    5538                 :      21339 :                 SET_USE (use_p, scalar_result);
    5539                 :      42674 :               update_stmt (use_stmt);
    5540                 :            :             }
    5541                 :            :         }
    5542                 :            : 
    5543                 :      23214 :       phis.release ();
    5544                 :            :     }
    5545                 :      11196 : }
    5546                 :            : 
    5547                 :            : /* Return a vector of type VECTYPE that is equal to the vector select
    5548                 :            :    operation "MASK ? VEC : IDENTITY".  Insert the select statements
    5549                 :            :    before GSI.  */
    5550                 :            : 
    5551                 :            : static tree
    5552                 :          0 : merge_with_identity (gimple_stmt_iterator *gsi, tree mask, tree vectype,
    5553                 :            :                      tree vec, tree identity)
    5554                 :            : {
    5555                 :          0 :   tree cond = make_temp_ssa_name (vectype, NULL, "cond");
    5556                 :          0 :   gimple *new_stmt = gimple_build_assign (cond, VEC_COND_EXPR,
    5557                 :            :                                           mask, vec, identity);
    5558                 :          0 :   gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
    5559                 :          0 :   return cond;
    5560                 :            : }
    5561                 :            : 
    5562                 :            : /* Successively apply CODE to each element of VECTOR_RHS, in left-to-right
    5563                 :            :    order, starting with LHS.  Insert the extraction statements before GSI and
    5564                 :            :    associate the new scalar SSA names with variable SCALAR_DEST.
    5565                 :            :    Return the SSA name for the result.  */
    5566                 :            : 
    5567                 :            : static tree
    5568                 :        497 : vect_expand_fold_left (gimple_stmt_iterator *gsi, tree scalar_dest,
    5569                 :            :                        tree_code code, tree lhs, tree vector_rhs)
    5570                 :            : {
    5571                 :        497 :   tree vectype = TREE_TYPE (vector_rhs);
    5572                 :        497 :   tree scalar_type = TREE_TYPE (vectype);
    5573                 :        497 :   tree bitsize = TYPE_SIZE (scalar_type);
    5574                 :        497 :   unsigned HOST_WIDE_INT vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype));
    5575                 :        497 :   unsigned HOST_WIDE_INT element_bitsize = tree_to_uhwi (bitsize);
    5576                 :            : 
    5577                 :        497 :   for (unsigned HOST_WIDE_INT bit_offset = 0;
    5578                 :       2171 :        bit_offset < vec_size_in_bits;
    5579                 :       1674 :        bit_offset += element_bitsize)
    5580                 :            :     {
    5581                 :       1674 :       tree bitpos = bitsize_int (bit_offset);
    5582                 :       1674 :       tree rhs = build3 (BIT_FIELD_REF, scalar_type, vector_rhs,
    5583                 :            :                          bitsize, bitpos);
    5584                 :            : 
    5585                 :       1674 :       gassign *stmt = gimple_build_assign (scalar_dest, rhs);
    5586                 :       1674 :       rhs = make_ssa_name (scalar_dest, stmt);
    5587                 :       1674 :       gimple_assign_set_lhs (stmt, rhs);
    5588                 :       1674 :       gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
    5589                 :            : 
    5590                 :       1674 :       stmt = gimple_build_assign (scalar_dest, code, lhs, rhs);
    5591                 :       1674 :       tree new_name = make_ssa_name (scalar_dest, stmt);
    5592                 :       1674 :       gimple_assign_set_lhs (stmt, new_name);
    5593                 :       1674 :       gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
    5594                 :       1674 :       lhs = new_name;
    5595                 :            :     }
    5596                 :        497 :   return lhs;
    5597                 :            : }
    5598                 :            : 
    5599                 :            : /* Get a masked internal function equivalent to REDUC_FN.  VECTYPE_IN is the
    5600                 :            :    type of the vector input.  */
    5601                 :            : 
    5602                 :            : static internal_fn
    5603                 :        410 : get_masked_reduction_fn (internal_fn reduc_fn, tree vectype_in)
    5604                 :            : {
    5605                 :        410 :   internal_fn mask_reduc_fn;
    5606                 :            : 
    5607                 :          0 :   switch (reduc_fn)
    5608                 :            :     {
    5609                 :          0 :     case IFN_FOLD_LEFT_PLUS:
    5610                 :          0 :       mask_reduc_fn = IFN_MASK_FOLD_LEFT_PLUS;
    5611                 :          0 :       break;
    5612                 :            : 
    5613                 :            :     default:
    5614                 :            :       return IFN_LAST;
    5615                 :            :     }
    5616                 :            : 
    5617                 :          0 :   if (direct_internal_fn_supported_p (mask_reduc_fn, vectype_in,
    5618                 :            :                                       OPTIMIZE_FOR_SPEED))
    5619                 :          0 :     return mask_reduc_fn;
    5620                 :            :   return IFN_LAST;
    5621                 :            : }
    5622                 :            : 
    5623                 :            : /* Perform an in-order reduction (FOLD_LEFT_REDUCTION).  STMT_INFO is the
    5624                 :            :    statement that sets the live-out value.  REDUC_DEF_STMT is the phi
    5625                 :            :    statement.  CODE is the operation performed by STMT_INFO and OPS are
    5626                 :            :    its scalar operands.  REDUC_INDEX is the index of the operand in
    5627                 :            :    OPS that is set by REDUC_DEF_STMT.  REDUC_FN is the function that
    5628                 :            :    implements in-order reduction, or IFN_LAST if we should open-code it.
    5629                 :            :    VECTYPE_IN is the type of the vector input.  MASKS specifies the masks
    5630                 :            :    that should be used to control the operation in a fully-masked loop.  */
    5631                 :            : 
    5632                 :            : static bool
    5633                 :        410 : vectorize_fold_left_reduction (stmt_vec_info stmt_info,
    5634                 :            :                                gimple_stmt_iterator *gsi,
    5635                 :            :                                stmt_vec_info *vec_stmt, slp_tree slp_node,
    5636                 :            :                                gimple *reduc_def_stmt,
    5637                 :            :                                tree_code code, internal_fn reduc_fn,
    5638                 :            :                                tree ops[3], tree vectype_in,
    5639                 :            :                                int reduc_index, vec_loop_masks *masks)
    5640                 :            : {
    5641                 :        410 :   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
    5642                 :        410 :   class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
    5643                 :        410 :   tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
    5644                 :        410 :   stmt_vec_info new_stmt_info = NULL;
    5645                 :        410 :   internal_fn mask_reduc_fn = get_masked_reduction_fn (reduc_fn, vectype_in);
    5646                 :            : 
    5647                 :        410 :   int ncopies;
    5648                 :        410 :   if (slp_node)
    5649                 :            :     ncopies = 1;
    5650                 :            :   else
    5651                 :        372 :     ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
    5652                 :            : 
    5653                 :        410 :   gcc_assert (!nested_in_vect_loop_p (loop, stmt_info));
    5654                 :        410 :   gcc_assert (ncopies == 1);
    5655                 :        410 :   gcc_assert (TREE_CODE_LENGTH (code) == binary_op);
    5656                 :            : 
    5657                 :        410 :   if (slp_node)
    5658                 :         38 :     gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype_out),
    5659                 :            :                           TYPE_VECTOR_SUBPARTS (vectype_in)));
    5660                 :            : 
    5661                 :        410 :   tree op0 = ops[1 - reduc_index];
    5662                 :            : 
    5663                 :        410 :   int group_size = 1;
    5664                 :        410 :   stmt_vec_info scalar_dest_def_info;
    5665                 :        410 :   auto_vec<tree> vec_oprnds0;
    5666                 :        410 :   if (slp_node)
    5667                 :            :     {
    5668                 :         76 :       auto_vec<vec<tree> > vec_defs (2);
    5669                 :         38 :       vect_get_slp_defs (slp_node, &vec_defs);
    5670                 :         38 :       vec_oprnds0.safe_splice (vec_defs[1 - reduc_index]);
    5671                 :         38 :       vec_defs[0].release ();
    5672                 :         38 :       vec_defs[1].release ();
    5673                 :         38 :       group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
    5674                 :         38 :       scalar_dest_def_info = SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1];
    5675                 :            :     }
    5676                 :            :   else
    5677                 :            :     {
    5678                 :        372 :       tree loop_vec_def0 = vect_get_vec_def_for_operand (op0, stmt_info);
    5679                 :        372 :       vec_oprnds0.create (1);
    5680                 :        372 :       vec_oprnds0.quick_push (loop_vec_def0);
    5681                 :        372 :       scalar_dest_def_info = stmt_info;
    5682                 :            :     }
    5683                 :            : 
    5684                 :        410 :   tree scalar_dest = gimple_assign_lhs (scalar_dest_def_info->stmt);
    5685                 :        410 :   tree scalar_type = TREE_TYPE (scalar_dest);
    5686                 :        410 :   tree reduc_var = gimple_phi_result (reduc_def_stmt);
    5687                 :            : 
    5688                 :        410 :   int vec_num = vec_oprnds0.length ();
    5689                 :        410 :   gcc_assert (vec_num == 1 || slp_node);
    5690                 :        410 :   tree vec_elem_type = TREE_TYPE (vectype_out);
    5691                 :        410 :   gcc_checking_assert (useless_type_conversion_p (scalar_type, vec_elem_type));
    5692                 :            : 
    5693                 :        410 :   tree vector_identity = NULL_TREE;
    5694                 :        410 :   if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
    5695                 :          0 :     vector_identity = build_zero_cst (vectype_out);
    5696                 :            : 
    5697                 :        410 :   tree scalar_dest_var = vect_create_destination_var (scalar_dest, NULL);
    5698                 :        410 :   int i;
    5699                 :        410 :   tree def0;
    5700                 :        907 :   FOR_EACH_VEC_ELT (vec_oprnds0, i, def0)
    5701                 :            :     {
    5702                 :        497 :       gimple *new_stmt;
    5703                 :        497 :       tree mask = NULL_TREE;
    5704                 :        497 :       if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
    5705                 :          0 :         mask = vect_get_loop_mask (gsi, masks, vec_num, vectype_in, i);
    5706                 :            : 
    5707                 :            :       /* Handle MINUS by adding the negative.  */
    5708                 :        497 :       if (reduc_fn != IFN_LAST && code == MINUS_EXPR)
    5709                 :            :         {
    5710                 :          0 :           tree negated = make_ssa_name (vectype_out);
    5711                 :          0 :           new_stmt = gimple_build_assign (negated, NEGATE_EXPR, def0);
    5712                 :          0 :           gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
    5713                 :          0 :           def0 = negated;
    5714                 :            :         }
    5715                 :            : 
    5716                 :        497 :       if (mask && mask_reduc_fn == IFN_LAST)
    5717                 :          0 :         def0 = merge_with_identity (gsi, mask, vectype_out, def0,
    5718                 :            :                                     vector_identity);
    5719                 :            : 
    5720                 :            :       /* On the first iteration the input is simply the scalar phi
    5721                 :            :          result, and for subsequent iterations it is the output of
    5722                 :            :          the preceding operation.  */
    5723                 :        497 :       if (reduc_fn != IFN_LAST || (mask && mask_reduc_fn != IFN_LAST))
    5724                 :            :         {
    5725                 :          0 :           if (mask && mask_reduc_fn != IFN_LAST)
    5726                 :          0 :             new_stmt = gimple_build_call_internal (mask_reduc_fn, 3, reduc_var,
    5727                 :            :                                                    def0, mask);
    5728                 :            :           else
    5729                 :          0 :             new_stmt = gimple_build_call_internal (reduc_fn, 2, reduc_var,
    5730                 :            :                                                    def0);
    5731                 :            :           /* For chained SLP reductions the output of the previous reduction
    5732                 :            :              operation serves as the input of the next. For the final statement
    5733                 :            :              the output cannot be a temporary - we reuse the original
    5734                 :            :              scalar destination of the last statement.  */
    5735                 :          0 :           if (i != vec_num - 1)
    5736                 :            :             {
    5737                 :          0 :               gimple_set_lhs (new_stmt, scalar_dest_var);
    5738                 :          0 :               reduc_var = make_ssa_name (scalar_dest_var, new_stmt);
    5739                 :          0 :               gimple_set_lhs (new_stmt, reduc_var);
    5740                 :            :             }
    5741                 :            :         }
    5742                 :            :       else
    5743                 :            :         {
    5744                 :        497 :           reduc_var = vect_expand_fold_left (gsi, scalar_dest_var, code,
    5745                 :            :                                              reduc_var, def0);
    5746                 :        497 :           new_stmt = SSA_NAME_DEF_STMT (reduc_var);
    5747                 :            :           /* Remove the statement, so that we can use the same code paths
    5748                 :            :              as for statements that we've just created.  */
    5749                 :        497 :           gimple_stmt_iterator tmp_gsi = gsi_for_stmt (new_stmt);
    5750                 :        497 :           gsi_remove (&tmp_gsi, true);
    5751                 :            :         }
    5752                 :            : 
    5753                 :        497 :       if (i == vec_num - 1)
    5754                 :            :         {
    5755                 :        410 :           gimple_set_lhs (new_stmt, scalar_dest);
    5756                 :        410 :           new_stmt_info = vect_finish_replace_stmt (scalar_dest_def_info,
    5757                 :            :                                                     new_stmt);
    5758                 :            :         }
    5759                 :            :       else
    5760                 :         87 :         new_stmt_info = vect_finish_stmt_generation (scalar_dest_def_info,
    5761                 :            :                                                      new_stmt, gsi);
    5762                 :            : 
    5763                 :        497 :       if (slp_node)
    5764                 :        622 :         SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
    5765                 :            :     }
    5766                 :            : 
    5767                 :        410 :   if (!slp_node)
    5768                 :        372 :     STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
    5769                 :            : 
    5770                 :        410 :   return true;
    5771                 :            : }
    5772                 :            : 
    5773                 :            : /* Function is_nonwrapping_integer_induction.
    5774                 :            : 
    5775                 :            :    Check if STMT_VINO (which is part of loop LOOP) both increments and
    5776                 :            :    does not cause overflow.  */
    5777                 :            : 
    5778                 :            : static bool
    5779                 :         82 : is_nonwrapping_integer_induction (stmt_vec_info stmt_vinfo, class loop *loop)
    5780                 :            : {
    5781                 :         82 :   gphi *phi = as_a <gphi *> (stmt_vinfo->stmt);
    5782                 :         82 :   tree base = STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo);
    5783                 :         82 :   tree step = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo);
    5784                 :         82 :   tree lhs_type = TREE_TYPE (gimple_phi_result (phi));
    5785                 :         82 :   widest_int ni, max_loop_value, lhs_max;
    5786                 :         82 :   wi::overflow_type overflow = wi::OVF_NONE;
    5787                 :            : 
    5788                 :            :   /* Make sure the loop is integer based.  */
    5789                 :         82 :   if (TREE_CODE (base) != INTEGER_CST
    5790                 :         72 :       || TREE_CODE (step) != INTEGER_CST)
    5791                 :            :     return false;
    5792                 :            : 
    5793                 :            :   /* Check that the max size of the loop will not wrap.  */
    5794                 :            : 
    5795                 :         72 :   if (TYPE_OVERFLOW_UNDEFINED (lhs_type))
    5796                 :            :     return true;
    5797                 :            : 
    5798                 :          0 :   if (! max_stmt_executions (loop, &ni))
    5799                 :            :     return false;
    5800                 :            : 
    5801                 :          0 :   max_loop_value = wi::mul (wi::to_widest (step), ni, TYPE_SIGN (lhs_type),
    5802                 :          0 :                             &overflow);
    5803                 :          0 :   if (overflow)
    5804                 :            :     return false;
    5805                 :            : 
    5806                 :          0 :   max_loop_value = wi::add (wi::to_widest (base), max_loop_value,
    5807                 :          0 :                             TYPE_SIGN (lhs_type), &overflow);
    5808                 :          0 :   if (overflow)
    5809                 :            :     return false;
    5810                 :            : 
    5811                 :          0 :   return (wi::min_precision (max_loop_value, TYPE_SIGN (lhs_type))
    5812                 :          0 :           <= TYPE_PRECISION (lhs_type));
    5813                 :            : }
    5814                 :            : 
    5815                 :            : /* Check if masking can be supported by inserting a conditional expression.
    5816                 :            :    CODE is the code for the operation.  COND_FN is the conditional internal
    5817                 :            :    function, if it exists.  VECTYPE_IN is the type of the vector input.  */
    5818                 :            : static bool
    5819                 :       1268 : use_mask_by_cond_expr_p (enum tree_code code, internal_fn cond_fn,
    5820                 :            :                          tree vectype_in)
    5821                 :            : {
    5822                 :       1268 :   if (cond_fn != IFN_LAST
    5823                 :       1268 :       && direct_internal_fn_supported_p (cond_fn, vectype_in,
    5824                 :            :                                          OPTIMIZE_FOR_SPEED))
    5825                 :            :     return false;
    5826                 :            : 
    5827                 :       1268 :   switch (code)
    5828                 :            :     {
    5829                 :            :     case DOT_PROD_EXPR:
    5830                 :            :     case SAD_EXPR:
    5831                 :            :       return true;
    5832                 :            : 
    5833                 :       1153 :     default:
    5834                 :       1153 :       return false;
    5835                 :            :     }
    5836                 :            : }
    5837                 :            : 
    5838                 :            : /* Insert a conditional expression to enable masked vectorization.  CODE is the
    5839                 :            :    code for the operation.  VOP is the array of operands.  MASK is the loop
    5840                 :            :    mask.  GSI is a statement iterator used to place the new conditional
    5841                 :            :    expression.  */
    5842                 :            : static void
    5843                 :          0 : build_vect_cond_expr (enum tree_code code, tree vop[3], tree mask,
    5844                 :            :                       gimple_stmt_iterator *gsi)
    5845                 :            : {
    5846                 :          0 :   switch (code)
    5847                 :            :     {
    5848                 :          0 :     case DOT_PROD_EXPR:
    5849                 :          0 :       {
    5850                 :          0 :         tree vectype = TREE_TYPE (vop[1]);
    5851                 :          0 :         tree zero = build_zero_cst (vectype);
    5852                 :          0 :         tree masked_op1 = make_temp_ssa_name (vectype, NULL, "masked_op1");
    5853                 :          0 :         gassign *select = gimple_build_assign (masked_op1, VEC_COND_EXPR,
    5854                 :            :                                                mask, vop[1], zero);
    5855                 :          0 :         gsi_insert_before (gsi, select, GSI_SAME_STMT);
    5856                 :          0 :         vop[1] = masked_op1;
    5857                 :          0 :         break;
    5858                 :            :       }
    5859                 :            : 
    5860                 :          0 :     case SAD_EXPR:
    5861                 :          0 :       {
    5862                 :          0 :         tree vectype = TREE_TYPE (vop[1]);
    5863                 :          0 :         tree masked_op1 = make_temp_ssa_name (vectype, NULL, "masked_op1");
    5864                 :          0 :         gassign *select = gimple_build_assign (masked_op1, VEC_COND_EXPR,
    5865                 :            :                                                mask, vop[1], vop[0]);
    5866                 :          0 :         gsi_insert_before (gsi, select, GSI_SAME_STMT);
    5867                 :          0 :         vop[1] = masked_op1;
    5868                 :          0 :         break;
    5869                 :            :       }
    5870                 :            : 
    5871                 :          0 :     default:
    5872                 :          0 :       gcc_unreachable ();
    5873                 :            :     }
    5874                 :          0 : }
    5875                 :            : 
    5876                 :            : /* Function vectorizable_reduction.
    5877                 :            : 
    5878                 :            :    Check if STMT_INFO performs a reduction operation that can be vectorized.
    5879                 :            :    If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
    5880                 :            :    stmt to replace it, put it in VEC_STMT, and insert it at GSI.
    5881                 :            :    Return true if STMT_INFO is vectorizable in this way.
    5882                 :            : 
    5883                 :            :    This function also handles reduction idioms (patterns) that have been
    5884                 :            :    recognized in advance during vect_pattern_recog.  In this case, STMT_INFO
    5885                 :            :    may be of this form:
    5886                 :            :      X = pattern_expr (arg0, arg1, ..., X)
    5887                 :            :    and its STMT_VINFO_RELATED_STMT points to the last stmt in the original
    5888                 :            :    sequence that had been detected and replaced by the pattern-stmt
    5889                 :            :    (STMT_INFO).
    5890                 :            : 
    5891                 :            :    This function also handles reduction of condition expressions, for example:
    5892                 :            :      for (int i = 0; i < N; i++)
    5893                 :            :        if (a[i] < value)
    5894                 :            :          last = a[i];
    5895                 :            :    This is handled by vectorising the loop and creating an additional vector
    5896                 :            :    containing the loop indexes for which "a[i] < value" was true.  In the
    5897                 :            :    function epilogue this is reduced to a single max value and then used to
    5898                 :            :    index into the vector of results.
    5899                 :            : 
    5900                 :            :    In some cases of reduction patterns, the type of the reduction variable X is
    5901                 :            :    different than the type of the other arguments of STMT_INFO.
    5902                 :            :    In such cases, the vectype that is used when transforming STMT_INFO into
    5903                 :            :    a vector stmt is different than the vectype that is used to determine the
    5904                 :            :    vectorization factor, because it consists of a different number of elements
    5905                 :            :    than the actual number of elements that are being operated upon in parallel.
    5906                 :            : 
    5907                 :            :    For example, consider an accumulation of shorts into an int accumulator.
    5908                 :            :    On some targets it's possible to vectorize this pattern operating on 8
    5909                 :            :    shorts at a time (hence, the vectype for purposes of determining the
    5910                 :            :    vectorization factor should be V8HI); on the other hand, the vectype that
    5911                 :            :    is used to create the vector form is actually V4SI (the type of the result).
    5912                 :            : 
    5913                 :            :    Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
    5914                 :            :    indicates what is the actual level of parallelism (V8HI in the example), so
    5915                 :            :    that the right vectorization factor would be derived.  This vectype
    5916                 :            :    corresponds to the type of arguments to the reduction stmt, and should *NOT*
    5917                 :            :    be used to create the vectorized stmt.  The right vectype for the vectorized
    5918                 :            :    stmt is obtained from the type of the result X:
    5919                 :            :       get_vectype_for_scalar_type (vinfo, TREE_TYPE (X))
    5920                 :            : 
    5921                 :            :    This means that, contrary to "regular" reductions (or "regular" stmts in
    5922                 :            :    general), the following equation:
    5923                 :            :       STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (vinfo, TREE_TYPE (X))
    5924                 :            :    does *NOT* necessarily hold for reduction patterns.  */
    5925                 :            : 
    5926                 :            : bool
    5927                 :      32273 : vectorizable_reduction (stmt_vec_info stmt_info, slp_tree slp_node,
    5928                 :            :                         slp_instance slp_node_instance,
    5929                 :            :                         stmt_vector_for_cost *cost_vec)
    5930                 :            : {
    5931                 :      32273 :   tree scalar_dest;
    5932                 :      32273 :   tree vectype_in = NULL_TREE;
    5933                 :      32273 :   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
    5934                 :      32273 :   class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
    5935                 :      32273 :   enum vect_def_type cond_reduc_dt = vect_unknown_def_type;
    5936                 :      32273 :   stmt_vec_info cond_stmt_vinfo = NULL;
    5937                 :      32273 :   tree scalar_type;
    5938                 :      32273 :   int i;
    5939                 :      32273 :   int ncopies;
    5940                 :      32273 :   bool single_defuse_cycle = false;
    5941                 :      32273 :   bool nested_cycle = false;
    5942                 :      32273 :   bool double_reduc = false;
    5943                 :      32273 :   int vec_num;
    5944                 :      32273 :   tree tem;
    5945                 :      32273 :   tree cr_index_scalar_type = NULL_TREE, cr_index_vector_type = NULL_TREE;
    5946                 :      32273 :   tree cond_reduc_val = NULL_TREE;
    5947                 :            : 
    5948                 :            :   /* Make sure it was already recognized as a reduction computation.  */
    5949                 :      32273 :   if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_reduction_def
    5950                 :            :       && STMT_VINFO_DEF_TYPE (stmt_info) != vect_double_reduction_def
    5951                 :      32273 :       && STMT_VINFO_DEF_TYPE (stmt_info) != vect_nested_cycle)
    5952                 :            :     return false;
    5953                 :            : 
    5954                 :            :   /* The stmt we store reduction analysis meta on.  */
    5955                 :      19156 :   stmt_vec_info reduc_info = info_for_reduction (stmt_info);
    5956                 :      19156 :   reduc_info->is_reduc_info = true;
    5957                 :            : 
    5958                 :      19156 :   if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle)
    5959                 :            :     {
    5960                 :        402 :       if (is_a <gphi *> (stmt_info->stmt))
    5961                 :            :         /* Analysis for double-reduction is done on the outer
    5962                 :            :            loop PHI, nested cycles have no further restrictions.  */
    5963                 :        402 :         STMT_VINFO_TYPE (stmt_info) = cycle_phi_info_type;
    5964                 :            :       else
    5965                 :          0 :         STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
    5966                 :        402 :       return true;
    5967                 :            :     }
    5968                 :            : 
    5969                 :      18754 :   stmt_vec_info orig_stmt_of_analysis = stmt_info;
    5970                 :      18754 :   stmt_vec_info phi_info = stmt_info;
    5971                 :      18754 :   if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def
    5972                 :      18754 :       || STMT_VINFO_DEF_TYPE (stmt_info) == vect_double_reduction_def)
    5973                 :            :     {
    5974                 :      18754 :       if (!is_a <gphi *> (stmt_info->stmt))
    5975                 :            :         {
    5976                 :       1083 :           STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
    5977                 :       1083 :           return true;
    5978                 :            :         }
    5979                 :      17671 :       if (slp_node)
    5980                 :            :         {
    5981                 :        643 :           slp_node_instance->reduc_phis = slp_node;
    5982                 :            :           /* ???  We're leaving slp_node to point to the PHIs, we only
    5983                 :            :              need it to get at the number of vector stmts which wasn't
    5984                 :            :              yet initialized for the instance root.  */
    5985                 :            :         }
    5986                 :      17671 :       if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
    5987                 :      17564 :         stmt_info = vect_stmt_to_vectorize (STMT_VINFO_REDUC_DEF (stmt_info));
    5988                 :            :       else /* STMT_VINFO_DEF_TYPE (stmt_info) == vect_double_reduction_def */
    5989                 :            :         {
    5990                 :        107 :           use_operand_p use_p;
    5991                 :        107 :           gimple *use_stmt;
    5992                 :        107 :           bool res = single_imm_use (gimple_phi_result (stmt_info->stmt),
    5993                 :            :                                      &use_p, &use_stmt);
    5994                 :        107 :           gcc_assert (res);
    5995                 :        107 :           phi_info = loop_vinfo->lookup_stmt (use_stmt);
    5996                 :        107 :           stmt_info = vect_stmt_to_vectorize (STMT_VINFO_REDUC_DEF (phi_info));
    5997                 :            :         }
    5998                 :            :     }
    5999                 :            : 
    6000                 :            :   /* PHIs should not participate in patterns.  */
    6001                 :      17671 :   gcc_assert (!STMT_VINFO_RELATED_STMT (phi_info));
    6002                 :      17671 :   gphi *reduc_def_phi = as_a <gphi *> (phi_info->stmt);
    6003                 :            : 
    6004                 :            :   /* Verify following REDUC_IDX from the latch def leads us back to the PHI
    6005                 :            :      and compute the reduction chain length.  */
    6006                 :      17671 :   tree reduc_def = PHI_ARG_DEF_FROM_EDGE (reduc_def_phi,
    6007                 :            :                                           loop_latch_edge (loop));
    6008                 :      17671 :   unsigned reduc_chain_length = 0;
    6009                 :      17671 :   bool only_slp_reduc_chain = true;
    6010                 :      17671 :   stmt_info = NULL;
    6011                 :      42051 :   while (reduc_def != PHI_RESULT (reduc_def_phi))
    6012                 :            :     {
    6013                 :      24432 :       stmt_vec_info def = loop_vinfo->lookup_def (reduc_def);
    6014                 :      24432 :       stmt_vec_info vdef = vect_stmt_to_vectorize (def);
    6015                 :      24432 :       if (STMT_VINFO_REDUC_IDX (vdef) == -1)
    6016                 :            :         {
    6017                 :         24 :           if (dump_enabled_p ())
    6018                 :          6 :             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    6019                 :            :                              "reduction chain broken by patterns.\n");
    6020                 :         24 :           return false;
    6021                 :            :         }
    6022                 :      24408 :       if (!REDUC_GROUP_FIRST_ELEMENT (vdef))
    6023                 :      23671 :         only_slp_reduc_chain = false;
    6024                 :            :       /* ???  For epilogue generation live members of the chain need
    6025                 :            :          to point back to the PHI via their original stmt for
    6026                 :            :          info_for_reduction to work.  */
    6027                 :      24408 :       if (STMT_VINFO_LIVE_P (vdef))
    6028                 :      17586 :         STMT_VINFO_REDUC_DEF (def) = phi_info;
    6029                 :      24408 :       gassign *assign = dyn_cast <gassign *> (vdef->stmt);
    6030                 :      24408 :       if (!assign)
    6031                 :            :         {
    6032                 :          0 :           if (dump_enabled_p ())
    6033                 :          0 :             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    6034                 :            :                              "reduction chain includes calls.\n");
    6035                 :          0 :           return false;
    6036                 :            :         }
    6037                 :      24408 :       if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (assign)))
    6038                 :            :         {
    6039                 :       5954 :           if (!tree_nop_conversion_p (TREE_TYPE (gimple_assign_lhs (assign)),
    6040                 :       5954 :                                       TREE_TYPE (gimple_assign_rhs1 (assign))))
    6041                 :            :             {
    6042                 :         28 :               if (dump_enabled_p ())
    6043                 :         14 :                 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    6044                 :            :                                  "conversion in the reduction chain.\n");
    6045                 :         28 :               return false;
    6046                 :            :             }
    6047                 :            :         }
    6048                 :      18454 :       else if (!stmt_info)
    6049                 :            :         /* First non-conversion stmt.  */
    6050                 :      17619 :         stmt_info = vdef;
    6051                 :      24380 :       reduc_def = gimple_op (vdef->stmt, 1 + STMT_VINFO_REDUC_IDX (vdef));
    6052                 :      24380 :       reduc_chain_length++;
    6053                 :            :     }
    6054                 :            :   /* PHIs should not participate in patterns.  */
    6055                 :      17619 :   gcc_assert (!STMT_VINFO_RELATED_STMT (phi_info));
    6056                 :            : 
    6057                 :      17619 :   if (nested_in_vect_loop_p (loop, stmt_info))
    6058                 :            :     {
    6059                 :         89 :       loop = loop->inner;
    6060                 :         89 :       nested_cycle = true;
    6061                 :            :     }
    6062                 :            : 
    6063                 :            :   /* STMT_VINFO_REDUC_DEF doesn't point to the first but the last
    6064                 :            :      element.  */
    6065                 :      17619 :   if (slp_node && REDUC_GROUP_FIRST_ELEMENT (stmt_info))
    6066                 :            :     {
    6067                 :        143 :       gcc_assert (!REDUC_GROUP_NEXT_ELEMENT (stmt_info));
    6068                 :            :       stmt_info = REDUC_GROUP_FIRST_ELEMENT (stmt_info);
    6069                 :            :     }
    6070                 :      17619 :   if (REDUC_GROUP_FIRST_ELEMENT (stmt_info))
    6071                 :        143 :     gcc_assert (slp_node
    6072                 :            :                 && REDUC_GROUP_FIRST_ELEMENT (stmt_info) == stmt_info);
    6073                 :            : 
    6074                 :            :   /* 1. Is vectorizable reduction?  */
    6075                 :            :   /* Not supportable if the reduction variable is used in the loop, unless
    6076                 :            :      it's a reduction chain.  */
    6077                 :      17619 :   if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer
    6078                 :      17619 :       && !REDUC_GROUP_FIRST_ELEMENT (stmt_info))
    6079                 :            :     return false;
    6080                 :            : 
    6081                 :            :   /* Reductions that are not used even in an enclosing outer-loop,
    6082                 :            :      are expected to be "live" (used out of the loop).  */
    6083                 :      17619 :   if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope
    6084                 :          0 :       && !STMT_VINFO_LIVE_P (stmt_info))
    6085                 :            :     return false;
    6086                 :            : 
    6087                 :            :   /* 2. Has this been recognized as a reduction pattern?
    6088                 :            : 
    6089                 :            :      Check if STMT represents a pattern that has been recognized
    6090                 :            :      in earlier analysis stages.  For stmts that represent a pattern,
    6091                 :            :      the STMT_VINFO_RELATED_STMT field records the last stmt in
    6092                 :            :      the original sequence that constitutes the pattern.  */
    6093                 :            : 
    6094                 :      17619 :   stmt_vec_info orig_stmt_info = STMT_VINFO_RELATED_STMT (stmt_info);
    6095                 :      17619 :   if (orig_stmt_info)
    6096                 :            :     {
    6097                 :        621 :       gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
    6098                 :        621 :       gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info));
    6099                 :            :     }
    6100                 :            : 
    6101                 :            :   /* 3. Check the operands of the operation.  The first operands are defined
    6102                 :            :         inside the loop body. The last operand is the reduction variable,
    6103                 :            :         which is defined by the loop-header-phi.  */
    6104                 :            : 
    6105                 :      17619 :   tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
    6106                 :      17619 :   STMT_VINFO_REDUC_VECTYPE (reduc_info) = vectype_out;
    6107                 :      17619 :   gassign *stmt = as_a <gassign *> (stmt_info->stmt);
    6108                 :      17619 :   enum tree_code code = gimple_assign_rhs_code (stmt);
    6109                 :      17619 :   bool lane_reduc_code_p
    6110                 :      17619 :     = (code == DOT_PROD_EXPR || code == WIDEN_SUM_EXPR || code == SAD_EXPR);
    6111                 :      17619 :   int op_type = TREE_CODE_LENGTH (code);
    6112                 :            : 
    6113                 :      17619 :   scalar_dest = gimple_assign_lhs (stmt);
    6114                 :      17619 :   scalar_type = TREE_TYPE (scalar_dest);
    6115                 :      17619 :   if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type)
    6116                 :       7320 :       && !SCALAR_FLOAT_TYPE_P (scalar_type))
    6117                 :            :     return false;
    6118                 :            : 
    6119                 :            :   /* Do not try to vectorize bit-precision reductions.  */
    6120                 :      17619 :   if (!type_has_mode_precision_p (scalar_type))
    6121                 :            :     return false;
    6122                 :            : 
    6123                 :            :   /* For lane-reducing ops we're reducing the number of reduction PHIs
    6124                 :            :      which means the only use of that may be in the lane-reducing operation.  */
    6125                 :      17471 :   if (lane_reduc_code_p
    6126                 :      17471 :       && reduc_chain_length != 1
    6127                 :         12 :       && !only_slp_reduc_chain)
    6128                 :            :     {
    6129                 :          1 :       if (dump_enabled_p ())
    6130                 :          0 :         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    6131                 :            :                          "lane-reducing reduction with extra stmts.\n");
    6132                 :          1 :       return false;
    6133                 :            :     }
    6134                 :            : 
    6135                 :            :   /* All uses but the last are expected to be defined in the loop.
    6136                 :            :      The last use is the reduction variable.  In case of nested cycle this
    6137                 :            :      assumption is not true: we use reduc_index to record the index of the
    6138                 :            :      reduction variable.  */
    6139                 :      53348 :   reduc_def = PHI_RESULT (reduc_def_phi);
    6140                 :      53348 :   for (i = 0; i < op_type; i++)
    6141                 :            :     {
    6142                 :      35885 :       tree op = gimple_op (stmt, i + 1);
    6143                 :            :       /* The condition of COND_EXPR is checked in vectorizable_condition().  */
    6144                 :      35885 :       if (i == 0 && code == COND_EXPR)
    6145                 :      18341 :         continue;
    6146                 :            : 
    6147                 :      35012 :       stmt_vec_info def_stmt_info;
    6148                 :      35012 :       enum vect_def_type dt;
    6149                 :      35012 :       if (!vect_is_simple_use (op, loop_vinfo, &dt, &tem,
    6150                 :            :                                &def_stmt_info))
    6151                 :            :         {
    6152                 :          0 :           if (dump_enabled_p ())
    6153                 :          0 :             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    6154                 :            :                              "use not simple.\n");
    6155                 :          7 :           return false;
    6156                 :            :         }
    6157                 :      35012 :       if (i == STMT_VINFO_REDUC_IDX (stmt_info))
    6158                 :      17468 :         continue;
    6159                 :            : 
    6160                 :            :       /* There should be only one cycle def in the stmt, the one
    6161                 :            :          leading to reduc_def.  */
    6162                 :      17544 :       if (VECTORIZABLE_CYCLE_DEF (dt))
    6163                 :            :         return false;
    6164                 :            : 
    6165                 :            :       /* To properly compute ncopies we are interested in the widest
    6166                 :            :          non-reduction input type in case we're looking at a widening
    6167                 :            :          accumulation that we later handle in vect_transform_reduction.  */
    6168                 :      17537 :       if (lane_reduc_code_p
    6169                 :        148 :           && tem
    6170                 :      17677 :           && (!vectype_in
    6171                 :         66 :               || (GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (vectype_in)))
    6172                 :        132 :                   < GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (tem))))))
    6173                 :         74 :         vectype_in = tem;
    6174                 :            : 
    6175                 :      17537 :       if (code == COND_EXPR)
    6176                 :            :         {
    6177                 :            :           /* Record how the non-reduction-def value of COND_EXPR is defined.  */
    6178                 :        873 :           if (dt == vect_constant_def)
    6179                 :            :             {
    6180                 :        680 :               cond_reduc_dt = dt;
    6181                 :        680 :               cond_reduc_val = op;
    6182                 :            :             }
    6183                 :        873 :           if (dt == vect_induction_def
    6184                 :         82 :               && def_stmt_info
    6185                 :        955 :               && is_nonwrapping_integer_induction (def_stmt_info, loop))
    6186                 :            :             {
    6187                 :         72 :               cond_reduc_dt = dt;
    6188                 :         72 :               cond_stmt_vinfo = def_stmt_info;
    6189                 :            :             }
    6190                 :            :         }
    6191                 :            :     }
    6192                 :      17463 :   if (!vectype_in)
    6193                 :      17389 :     vectype_in = STMT_VINFO_VECTYPE (phi_info);
    6194                 :      17463 :   STMT_VINFO_REDUC_VECTYPE_IN (reduc_info) = vectype_in;
    6195                 :            : 
    6196                 :      17463 :   enum vect_reduction_type v_reduc_type = STMT_VINFO_REDUC_TYPE (phi_info);
    6197                 :      17463 :   STMT_VINFO_REDUC_TYPE (reduc_info) = v_reduc_type;
    6198                 :            :   /* If we have a condition reduction, see if we can simplify it further.  */
    6199                 :      17463 :   if (v_reduc_type == COND_REDUCTION)
    6200                 :            :     {
    6201                 :        873 :       if (slp_node)
    6202                 :            :         return false;
    6203                 :            : 
    6204                 :            :       /* When the condition uses the reduction value in the condition, fail.  */
    6205                 :        865 :       if (STMT_VINFO_REDUC_IDX (stmt_info) == 0)
    6206                 :            :         {
    6207                 :          0 :           if (dump_enabled_p ())
    6208                 :          0 :             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    6209                 :            :                              "condition depends on previous iteration\n");
    6210                 :          0 :           return false;
    6211                 :            :         }
    6212                 :            : 
    6213                 :        865 :       if (reduc_chain_length == 1
    6214                 :        865 :           && direct_internal_fn_supported_p (IFN_FOLD_EXTRACT_LAST,
    6215                 :            :                                              vectype_in, OPTIMIZE_FOR_SPEED))
    6216                 :            :         {
    6217                 :          0 :           if (dump_enabled_p ())
    6218                 :          0 :             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    6219                 :            :                              "optimizing condition reduction with"
    6220                 :            :                              " FOLD_EXTRACT_LAST.\n");
    6221                 :          0 :           STMT_VINFO_REDUC_TYPE (reduc_info) = EXTRACT_LAST_REDUCTION;
    6222                 :            :         }
    6223                 :        865 :       else if (cond_reduc_dt == vect_induction_def)
    6224                 :            :         {
    6225                 :         72 :           tree base
    6226                 :            :             = STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (cond_stmt_vinfo);
    6227                 :         72 :           tree step = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (cond_stmt_vinfo);
    6228                 :            : 
    6229                 :         72 :           gcc_assert (TREE_CODE (base) == INTEGER_CST
    6230                 :            :                       && TREE_CODE (step) == INTEGER_CST);
    6231                 :         72 :           cond_reduc_val = NULL_TREE;
    6232                 :         72 :           enum tree_code cond_reduc_op_code = ERROR_MARK;
    6233                 :         72 :           tree res = PHI_RESULT (STMT_VINFO_STMT (cond_stmt_vinfo));
    6234                 :         72 :           if (!types_compatible_p (TREE_TYPE (res), TREE_TYPE (base)))
    6235                 :            :             ;
    6236                 :            :           /* Find a suitable value, for MAX_EXPR below base, for MIN_EXPR
    6237                 :            :              above base; punt if base is the minimum value of the type for
    6238                 :            :              MAX_EXPR or maximum value of the type for MIN_EXPR for now.  */
    6239                 :         68 :           else if (tree_int_cst_sgn (step) == -1)
    6240                 :            :             {
    6241                 :         20 :               cond_reduc_op_code = MIN_EXPR;
    6242                 :         20 :               if (tree_int_cst_sgn (base) == -1)
    6243                 :          0 :                 cond_reduc_val = build_int_cst (TREE_TYPE (base), 0);
    6244                 :         40 :               else if (tree_int_cst_lt (base,
    6245                 :         20 :                                         TYPE_MAX_VALUE (TREE_TYPE (base))))
    6246                 :         20 :                 cond_reduc_val
    6247                 :         20 :                   = int_const_binop (PLUS_EXPR, base, integer_one_node);
    6248                 :            :             }
    6249                 :            :           else
    6250                 :            :             {
    6251                 :         48 :               cond_reduc_op_code = MAX_EXPR;
    6252                 :         48 :               if (tree_int_cst_sgn (base) == 1)
    6253                 :         16 :                 cond_reduc_val = build_int_cst (TREE_TYPE (base), 0);
    6254                 :         32 :               else if (tree_int_cst_lt (TYPE_MIN_VALUE (TREE_TYPE (base)),
    6255                 :            :                                         base))
    6256                 :         32 :                 cond_reduc_val
    6257                 :         32 :                   = int_const_binop (MINUS_EXPR, base, integer_one_node);
    6258                 :            :             }
    6259                 :         68 :           if (cond_reduc_val)
    6260                 :            :             {
    6261                 :         68 :               if (dump_enabled_p ())
    6262                 :         61 :                 dump_printf_loc (MSG_NOTE, vect_location,
    6263                 :            :                                  "condition expression based on "
    6264                 :            :                                  "integer induction.\n");
    6265                 :         68 :               STMT_VINFO_REDUC_CODE (reduc_info) = cond_reduc_op_code;
    6266                 :         68 :               STMT_VINFO_VEC_INDUC_COND_INITIAL_VAL (reduc_info)
    6267                 :         68 :                 = cond_reduc_val;
    6268                 :         68 :               STMT_VINFO_REDUC_TYPE (reduc_info) = INTEGER_INDUC_COND_REDUCTION;
    6269                 :            :             }
    6270                 :            :         }
    6271                 :        793 :       else if (cond_reduc_dt == vect_constant_def)
    6272                 :            :         {
    6273                 :        672 :           enum vect_def_type cond_initial_dt;
    6274                 :        672 :           tree cond_initial_val
    6275                 :        672 :             = PHI_ARG_DEF_FROM_EDGE (reduc_def_phi, loop_preheader_edge (loop));
    6276                 :            : 
    6277                 :        672 :           gcc_assert (cond_reduc_val != NULL_TREE);
    6278                 :        672 :           vect_is_simple_use (cond_initial_val, loop_vinfo, &cond_initial_dt);
    6279                 :        672 :           if (cond_initial_dt == vect_constant_def
    6280                 :       1304 :               && types_compatible_p (TREE_TYPE (cond_initial_val),
    6281                 :        632 :                                      TREE_TYPE (cond_reduc_val)))
    6282                 :            :             {
    6283                 :        632 :               tree e = fold_binary (LE_EXPR, boolean_type_node,
    6284                 :            :                                     cond_initial_val, cond_reduc_val);
    6285                 :        632 :               if (e && (integer_onep (e) || integer_zerop (e)))
    6286                 :            :                 {
    6287                 :        632 :                   if (dump_enabled_p ())
    6288                 :         12 :                     dump_printf_loc (MSG_NOTE, vect_location,
    6289                 :            :                                      "condition expression based on "
    6290                 :            :                                      "compile time constant.\n");
    6291                 :            :                   /* Record reduction code at analysis stage.  */
    6292                 :        632 :                   STMT_VINFO_REDUC_CODE (reduc_info)
    6293                 :        632 :                     = integer_onep (e) ? MAX_EXPR : MIN_EXPR;
    6294                 :        632 :                   STMT_VINFO_REDUC_TYPE (reduc_info) = CONST_COND_REDUCTION;
    6295                 :            :                 }
    6296                 :            :             }
    6297                 :            :         }
    6298                 :            :     }
    6299                 :            : 
    6300                 :      17455 :   if (STMT_VINFO_LIVE_P (phi_info))
    6301                 :            :     return false;
    6302                 :            : 
    6303                 :      17455 :   if (slp_node)
    6304                 :            :     ncopies = 1;
    6305                 :            :   else
    6306                 :      16820 :     ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
    6307                 :            : 
    6308                 :      16820 :   gcc_assert (ncopies >= 1);
    6309                 :            : 
    6310                 :      17455 :   poly_uint64 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
    6311                 :            : 
    6312                 :      17455 :   if (nested_cycle)
    6313                 :            :     {
    6314                 :         89 :       gcc_assert (STMT_VINFO_DEF_TYPE (reduc_info)
    6315                 :            :                   == vect_double_reduction_def);
    6316                 :            :       double_reduc = true;
    6317                 :            :     }
    6318                 :            : 
    6319                 :            :   /* 4.2. Check support for the epilog operation.
    6320                 :            : 
    6321                 :            :           If STMT represents a reduction pattern, then the type of the
    6322                 :            :           reduction variable may be different than the type of the rest
    6323                 :            :           of the arguments.  For example, consider the case of accumulation
    6324                 :            :           of shorts into an int accumulator; The original code:
    6325                 :            :                         S1: int_a = (int) short_a;
    6326                 :            :           orig_stmt->   S2: int_acc = plus <int_a ,int_acc>;
    6327                 :            : 
    6328                 :            :           was replaced with:
    6329                 :            :                         STMT: int_acc = widen_sum <short_a, int_acc>
    6330                 :            : 
    6331                 :            :           This means that:
    6332                 :            :           1. The tree-code that is used to create the vector operation in the
    6333                 :            :              epilog code (that reduces the partial results) is not the
    6334                 :            :              tree-code of STMT, but is rather the tree-code of the original
    6335                 :            :              stmt from the pattern that STMT is replacing.  I.e, in the example
    6336                 :            :              above we want to use 'widen_sum' in the loop, but 'plus' in the
    6337                 :            :              epilog.
    6338                 :            :           2. The type (mode) we use to check available target support
    6339                 :            :              for the vector operation to be created in the *epilog*, is
    6340                 :            :              determined by the type of the reduction variable (in the example
    6341                 :            :              above we'd check this: optab_handler (plus_optab, vect_int_mode])).
    6342                 :            :              However the type (mode) we use to check available target support
    6343                 :            :              for the vector operation to be created *inside the loop*, is
    6344                 :            :              determined by the type of the other arguments to STMT (in the
    6345                 :            :              example we'd check this: optab_handler (widen_sum_optab,
    6346                 :            :              vect_short_mode)).
    6347                 :            : 
    6348                 :            :           This is contrary to "regular" reductions, in which the types of all
    6349                 :            :           the arguments are the same as the type of the reduction variable.
    6350                 :            :           For "regular" reductions we can therefore use the same vector type
    6351                 :            :           (and also the same tree-code) when generating the epilog code and
    6352                 :            :           when generating the code inside the loop.  */
    6353                 :            : 
    6354                 :      17455 :   enum tree_code orig_code = STMT_VINFO_REDUC_CODE (phi_info);
    6355                 :      17455 :   STMT_VINFO_REDUC_CODE (reduc_info) = orig_code;
    6356                 :            : 
    6357                 :      17455 :   vect_reduction_type reduction_type = STMT_VINFO_REDUC_TYPE (reduc_info);
    6358                 :      17455 :   if (reduction_type == TREE_CODE_REDUCTION)
    6359                 :            :     {
    6360                 :            :       /* Check whether it's ok to change the order of the computation.
    6361                 :            :          Generally, when vectorizing a reduction we change the order of the
    6362                 :            :          computation.  This may change the behavior of the program in some
    6363                 :            :          cases, so we need to check that this is ok.  One exception is when
    6364                 :            :          vectorizing an outer-loop: the inner-loop is executed sequentially,
    6365                 :            :          and therefore vectorizing reductions in the inner-loop during
    6366                 :            :          outer-loop vectorization is safe.  */
    6367                 :      16577 :       if (needs_fold_left_reduction_p (scalar_type, orig_code))
    6368                 :            :         {
    6369                 :            :           /* When vectorizing a reduction chain w/o SLP the reduction PHI
    6370                 :            :              is not directy used in stmt.  */
    6371                 :       1004 :           if (!only_slp_reduc_chain
    6372                 :       1004 :               && reduc_chain_length != 1)
    6373                 :            :             {
    6374                 :         16 :               if (dump_enabled_p ())
    6375                 :          4 :                 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    6376                 :            :                                  "in-order reduction chain without SLP.\n");
    6377                 :         16 :               return false;
    6378                 :            :             }
    6379                 :        988 :           STMT_VINFO_REDUC_TYPE (reduc_info)
    6380                 :        988 :             = reduction_type = FOLD_LEFT_REDUCTION;
    6381                 :            :         }
    6382                 :      15573 :       else if (!commutative_tree_code (orig_code)
    6383                 :      15573 :                || !associative_tree_code (orig_code))
    6384                 :            :         {
    6385                 :         38 :           if (dump_enabled_p ())
    6386                 :          0 :             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    6387                 :            :                             "reduction: not commutative/associative");
    6388                 :         38 :           return false;
    6389                 :            :         }
    6390                 :            :     }
    6391                 :            : 
    6392                 :      17401 :   if ((double_reduc || reduction_type != TREE_CODE_REDUCTION)
    6393                 :       1919 :       && ncopies > 1)
    6394                 :            :     {
    6395                 :        132 :       if (dump_enabled_p ())
    6396                 :         38 :         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    6397                 :            :                          "multiple types in double reduction or condition "
    6398                 :            :                          "reduction or fold-left reduction.\n");
    6399                 :        132 :       return false;
    6400                 :            :     }
    6401                 :            : 
    6402                 :      17269 :   internal_fn reduc_fn = IFN_LAST;
    6403                 :      17269 :   if (reduction_type == TREE_CODE_REDUCTION
    6404                 :      17269 :       || reduction_type == FOLD_LEFT_REDUCTION
    6405                 :            :       || reduction_type == INTEGER_INDUC_COND_REDUCTION
    6406                 :        811 :       || reduction_type == CONST_COND_REDUCTION)
    6407                 :            :     {
    6408                 :      16225 :       if (reduction_type == FOLD_LEFT_REDUCTION
    6409                 :      18022 :           ? fold_left_reduction_fn (orig_code, &reduc_fn)
    6410                 :      16225 :           : reduction_fn_for_scalar_code (orig_code, &reduc_fn))
    6411                 :            :         {
    6412                 :      17089 :           if (reduc_fn != IFN_LAST
    6413                 :      17089 :               && !direct_internal_fn_supported_p (reduc_fn, vectype_out,
    6414                 :            :                                                   OPTIMIZE_FOR_SPEED))
    6415                 :            :             {
    6416                 :       8669 :               if (dump_enabled_p ())
    6417                 :       1336 :                 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    6418                 :            :                                  "reduc op not supported by target.\n");
    6419                 :            : 
    6420                 :       8669 :               reduc_fn = IFN_LAST;
    6421                 :            :             }
    6422                 :            :         }
    6423                 :            :       else
    6424                 :            :         {
    6425                 :         69 :           if (!nested_cycle || double_reduc)
    6426                 :            :             {
    6427                 :         69 :               if (dump_enabled_p ())
    6428                 :          6 :                 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    6429                 :            :                                  "no reduc code for scalar code.\n");
    6430                 :            : 
    6431                 :         69 :               return false;
    6432                 :            :             }
    6433                 :            :         }
    6434                 :            :     }
    6435                 :        111 :   else if (reduction_type == COND_REDUCTION)
    6436                 :            :     {
    6437                 :        111 :       int scalar_precision
    6438                 :        111 :         = GET_MODE_PRECISION (SCALAR_TYPE_MODE (scalar_type));
    6439                 :        111 :       cr_index_scalar_type = make_unsigned_type (scalar_precision);
    6440                 :        111 :       cr_index_vector_type = build_vector_type (cr_index_scalar_type,
    6441                 :            :                                                 nunits_out);
    6442                 :            : 
    6443                 :        111 :       if (direct_internal_fn_supported_p (IFN_REDUC_MAX, cr_index_vector_type,
    6444                 :            :                                           OPTIMIZE_FOR_SPEED))
    6445                 :          8 :         reduc_fn = IFN_REDUC_MAX;
    6446                 :            :     }
    6447                 :      17200 :   STMT_VINFO_REDUC_FN (reduc_info) = reduc_fn;
    6448                 :            : 
    6449                 :      17200 :   if (reduction_type != EXTRACT_LAST_REDUCTION
    6450                 :            :       && (!nested_cycle || double_reduc)
    6451                 :            :       && reduc_fn == IFN_LAST
    6452                 :            :       && !nunits_out.is_constant ())
    6453                 :            :     {
    6454                 :            :       if (dump_enabled_p ())
    6455                 :            :         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    6456                 :            :                          "missing target support for reduction on"
    6457                 :            :                          " variable-length vectors.\n");
    6458                 :            :       return false;
    6459                 :            :     }
    6460                 :            : 
    6461                 :            :   /* For SLP reductions, see if there is a neutral value we can use.  */
    6462                 :      17200 :   tree neutral_op = NULL_TREE;
    6463                 :      17200 :   if (slp_node)
    6464                 :        633 :     neutral_op = neutral_op_for_slp_reduction
    6465                 :        633 :       (slp_node_instance->reduc_phis, vectype_out, orig_code,
    6466                 :        633 :        REDUC_GROUP_FIRST_ELEMENT (stmt_info) != NULL);
    6467                 :            : 
    6468                 :      17200 :   if (double_reduc && reduction_type == FOLD_LEFT_REDUCTION)
    6469                 :            :     {
    6470                 :            :       /* We can't support in-order reductions of code such as this:
    6471                 :            : 
    6472                 :            :            for (int i = 0; i < n1; ++i)
    6473                 :            :              for (int j = 0; j < n2; ++j)
    6474                 :            :                l += a[j];
    6475                 :            : 
    6476                 :            :          since GCC effectively transforms the loop when vectorizing:
    6477                 :            : 
    6478                 :            :            for (int i = 0; i < n1 / VF; ++i)
    6479                 :            :              for (int j = 0; j < n2; ++j)
    6480                 :            :                for (int k = 0; k < VF; ++k)
    6481                 :            :                  l += a[j];
    6482                 :            : 
    6483                 :            :          which is a reassociation of the original operation.  */
    6484                 :         29 :       if (dump_enabled_p ())
    6485                 :          9 :         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    6486                 :            :                          "in-order double reduction not supported.\n");
    6487                 :            : 
    6488                 :         29 :       return false;
    6489                 :            :     }
    6490                 :            : 
    6491                 :      17171 :   if (reduction_type == FOLD_LEFT_REDUCTION
    6492                 :      17171 :       && slp_node
    6493                 :      17171 :       && !REDUC_GROUP_FIRST_ELEMENT (stmt_info))
    6494                 :            :     {
    6495                 :            :       /* We cannot use in-order reductions in this case because there is
    6496                 :            :          an implicit reassociation of the operations involved.  */
    6497                 :         52 :       if (dump_enabled_p ())
    6498                 :          3 :         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    6499                 :            :                          "in-order unchained SLP reductions not supported.\n");
    6500                 :         52 :       return false;
    6501                 :            :     }
    6502                 :            : 
    6503                 :            :   /* For double reductions, and for SLP reductions with a neutral value,
    6504                 :            :      we construct a variable-length initial vector by loading a vector
    6505                 :            :      full of the neutral value and then shift-and-inserting the start
    6506                 :            :      values into the low-numbered elements.  */
    6507                 :      17119 :   if ((double_reduc || neutral_op)
    6508                 :            :       && !nunits_out.is_constant ()
    6509                 :            :       && !direct_internal_fn_supported_p (IFN_VEC_SHL_INSERT,
    6510                 :            :                                           vectype_out, OPTIMIZE_FOR_SPEED))
    6511                 :            :     {
    6512                 :            :       if (dump_enabled_p ())
    6513                 :            :         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    6514                 :            :                          "reduction on variable-length vectors requires"
    6515                 :            :                          " target support for a vector-shift-and-insert"
    6516                 :            :                          " operation.\n");
    6517                 :            :       return false;
    6518                 :            :     }
    6519                 :            : 
    6520                 :            :   /* Check extra constraints for variable-length unchained SLP reductions.  */
    6521                 :      17119 :   if (STMT_SLP_TYPE (stmt_info)
    6522                 :        581 :       && !REDUC_GROUP_FIRST_ELEMENT (stmt_info)
    6523                 :      17119 :       && !nunits_out.is_constant ())
    6524                 :            :     {
    6525                 :            :       /* We checked above that we could build the initial vector when
    6526                 :            :          there's a neutral element value.  Check here for the case in
    6527                 :            :          which each SLP statement has its own initial value and in which
    6528                 :            :          that value needs to be repeated for every instance of the
    6529                 :            :          statement within the initial vector.  */
    6530                 :            :       unsigned int group_size = SLP_INSTANCE_GROUP_SIZE (slp_node_instance);
    6531                 :            :       if (!neutral_op
    6532                 :            :           && !can_duplicate_and_interleave_p (loop_vinfo, group_size,
    6533                 :            :                                               TREE_TYPE (vectype_out)))
    6534                 :            :         {
    6535                 :            :           if (dump_enabled_p ())
    6536                 :            :             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    6537                 :            :                              "unsupported form of SLP reduction for"
    6538                 :            :                              " variable-length vectors: cannot build"
    6539                 :            :                              " initial vector.\n");
    6540                 :            :           return false;
    6541                 :            :         }
    6542                 :            :       /* The epilogue code relies on the number of elements being a multiple
    6543                 :            :          of the group size.  The duplicate-and-interleave approach to setting
    6544                 :            :          up the initial vector does too.  */
    6545                 :            :       if (!multiple_p (nunits_out, group_size))
    6546                 :            :         {
    6547                 :            :           if (dump_enabled_p ())
    6548                 :            :             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    6549                 :            :                              "unsupported form of SLP reduction for"
    6550                 :            :                              " variable-length vectors: the vector size"
    6551                 :            :                              " is not a multiple of the number of results.\n");
    6552                 :            :           return false;
    6553                 :            :         }
    6554                 :            :     }
    6555                 :            : 
    6556                 :      17119 :   if (reduction_type == COND_REDUCTION)
    6557                 :            :     {
    6558                 :        111 :       widest_int ni;
    6559                 :            : 
    6560                 :        111 :       if (! max_loop_iterations (loop, &ni))
    6561                 :            :         {
    6562                 :          0 :           if (dump_enabled_p ())
    6563                 :          0 :             dump_printf_loc (MSG_NOTE, vect_location,
    6564                 :            :                              "loop count not known, cannot create cond "
    6565                 :            :                              "reduction.\n");
    6566                 :         14 :           return false;
    6567                 :            :         }
    6568                 :            :       /* Convert backedges to iterations.  */
    6569                 :        111 :       ni += 1;
    6570                 :            : 
    6571                 :            :       /* The additional index will be the same type as the condition.  Check
    6572                 :            :          that the loop can fit into this less one (because we'll use up the
    6573                 :            :          zero slot for when there are no matches).  */
    6574                 :        111 :       tree max_index = TYPE_MAX_VALUE (cr_index_scalar_type);
    6575                 :        111 :       if (wi::geu_p (ni, wi::to_widest (max_index)))
    6576                 :            :         {
    6577                 :         14 :           if (dump_enabled_p ())
    6578                 :         14 :             dump_printf_loc (MSG_NOTE, vect_location,
    6579                 :            :                              "loop size is greater than data size.\n");
    6580                 :         14 :           return false;
    6581                 :            :         }
    6582                 :            :     }
    6583                 :            : 
    6584                 :            :   /* In case the vectorization factor (VF) is bigger than the number
    6585                 :            :      of elements that we can fit in a vectype (nunits), we have to generate
    6586                 :            :      more than one vector stmt - i.e - we need to "unroll" the
    6587                 :            :      vector stmt by a factor VF/nunits.  For more details see documentation
    6588                 :            :      in vectorizable_operation.  */
    6589                 :            : 
    6590                 :            :   /* If the reduction is used in an outer loop we need to generate
    6591                 :            :      VF intermediate results, like so (e.g. for ncopies=2):
    6592                 :            :         r0 = phi (init, r0)
    6593                 :            :         r1 = phi (init, r1)
    6594                 :            :         r0 = x0 + r0;
    6595                 :            :         r1 = x1 + r1;
    6596                 :            :     (i.e. we generate VF results in 2 registers).
    6597                 :            :     In this case we have a separate def-use cycle for each copy, and therefore
    6598                 :            :     for each copy we get the vector def for the reduction variable from the
    6599                 :            :     respective phi node created for this copy.
    6600                 :            : 
    6601                 :            :     Otherwise (the reduction is unused in the loop nest), we can combine
    6602                 :            :     together intermediate results, like so (e.g. for ncopies=2):
    6603                 :            :         r = phi (init, r)
    6604                 :            :         r = x0 + r;
    6605                 :            :         r = x1 + r;
    6606                 :            :    (i.e. we generate VF/2 results in a single register).
    6607                 :            :    In this case for each copy we get the vector def for the reduction variable
    6608                 :            :    from the vectorized reduction operation generated in the previous iteration.
    6609                 :            : 
    6610                 :            :    This only works when we see both the reduction PHI and its only consumer
    6611                 :            :    in vectorizable_reduction and there are no intermediate stmts
    6612                 :            :    participating.  */
    6613                 :      17105 :   if (ncopies > 1
    6614                 :        562 :       && (STMT_VINFO_RELEVANT (stmt_info) <= vect_used_only_live)
    6615                 :        562 :       && reduc_chain_length == 1)
    6616                 :        534 :     single_defuse_cycle = true;
    6617                 :            : 
    6618                 :      17105 :   if (single_defuse_cycle || lane_reduc_code_p)
    6619                 :            :     {
    6620                 :        599 :       gcc_assert (code != COND_EXPR);
    6621                 :            : 
    6622                 :            :       /* 4. Supportable by target?  */
    6623                 :        599 :       bool ok = true;
    6624                 :            : 
    6625                 :            :       /* 4.1. check support for the operation in the loop  */
    6626                 :        599 :       optab optab = optab_for_tree_code (code, vectype_in, optab_vector);
    6627                 :        599 :       if (!optab)
    6628                 :            :         {
    6629                 :          0 :           if (dump_enabled_p ())
    6630                 :          0 :             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    6631                 :            :                              "no optab.\n");
    6632                 :            :           ok = false;
    6633                 :            :         }
    6634                 :            : 
    6635                 :        599 :       machine_mode vec_mode = TYPE_MODE (vectype_in);
    6636                 :        599 :       if (ok && optab_handler (optab, vec_mode) == CODE_FOR_nothing)
    6637                 :            :         {
    6638                 :         49 :           if (dump_enabled_p ())
    6639                 :          1 :             dump_printf (MSG_NOTE, "op not supported by target.\n");
    6640                 :         98 :           if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)
    6641                 :         49 :               || !vect_worthwhile_without_simd_p (loop_vinfo, code))
    6642                 :            :             ok = false;
    6643                 :            :           else
    6644                 :         33 :             if (dump_enabled_p ())
    6645                 :          1 :               dump_printf (MSG_NOTE, "proceeding using word mode.\n");
    6646                 :            :         }
    6647                 :            : 
    6648                 :            :       /* Worthwhile without SIMD support?  */
    6649                 :        583 :       if (ok
    6650                 :       1179 :           && !VECTOR_MODE_P (TYPE_MODE (vectype_in))
    6651                 :        583 :           && !vect_worthwhile_without_simd_p (loop_vinfo, code))
    6652                 :            :         {
    6653                 :          0 :           if (dump_enabled_p ())
    6654                 :          0 :             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    6655                 :            :                              "not worthwhile without SIMD support.\n");
    6656                 :            :           ok = false;
    6657                 :            :         }
    6658                 :            : 
    6659                 :            :       /* lane-reducing operations have to go through vect_transform_reduction.
    6660                 :            :          For the other cases try without the single cycle optimization.  */
    6661                 :        599 :       if (!ok)
    6662                 :            :         {
    6663                 :         16 :           if (lane_reduc_code_p)
    6664                 :            :             return false;
    6665                 :            :           else
    6666                 :            :             single_defuse_cycle = false;
    6667                 :            :         }
    6668                 :            :     }
    6669                 :      17105 :   STMT_VINFO_FORCE_SINGLE_CYCLE (reduc_info) = single_defuse_cycle;
    6670                 :            : 
    6671                 :            :   /* If the reduction stmt is one of the patterns that have lane
    6672                 :            :      reduction embedded we cannot handle the case of ! single_defuse_cycle.  */
    6673                 :      17105 :   if ((ncopies > 1 && ! single_defuse_cycle)
    6674                 :         44 :       && lane_reduc_code_p)
    6675                 :            :     {
    6676                 :          0 :       if (dump_enabled_p ())
    6677                 :          0 :         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    6678                 :            :                          "multi def-use cycle not possible for lane-reducing "
    6679                 :            :                          "reduction operation\n");
    6680                 :          0 :       return false;
    6681                 :            :     }
    6682                 :            : 
    6683                 :      17105 :   if (slp_node)
    6684                 :        581 :     vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
    6685                 :            :   else
    6686                 :            :     vec_num = 1;
    6687                 :            : 
    6688                 :      17105 :   vect_model_reduction_cost (stmt_info, reduc_fn, reduction_type, ncopies,
    6689                 :            :                              cost_vec);
    6690                 :      17105 :   if (dump_enabled_p ()
    6691                 :      17105 :       && reduction_type == FOLD_LEFT_REDUCTION)
    6692                 :        108 :     dump_printf_loc (MSG_NOTE, vect_location,
    6693                 :            :                      "using an in-order (fold-left) reduction.\n");
    6694                 :      17105 :   STMT_VINFO_TYPE (orig_stmt_of_analysis) = cycle_phi_info_type;
    6695                 :            :   /* All but single defuse-cycle optimized, lane-reducing and fold-left
    6696                 :            :      reductions go through their own vectorizable_* routines.  */
    6697                 :      17105 :   if (!single_defuse_cycle
    6698                 :      17105 :       && code != DOT_PROD_EXPR
    6699                 :            :       && code != WIDEN_SUM_EXPR
    6700                 :      16565 :       && code != SAD_EXPR
    6701                 :      16522 :       && reduction_type != FOLD_LEFT_REDUCTION)
    6702                 :            :     {
    6703                 :      15739 :       stmt_vec_info tem
    6704                 :      15739 :         = vect_stmt_to_vectorize (STMT_VINFO_REDUC_DEF (phi_info));
    6705                 :      15739 :       if (slp_node && REDUC_GROUP_FIRST_ELEMENT (tem))
    6706                 :            :         {
    6707                 :         94 :           gcc_assert (!REDUC_GROUP_NEXT_ELEMENT (tem));
    6708                 :            :           tem = REDUC_GROUP_FIRST_ELEMENT (tem);
    6709                 :            :         }
    6710                 :      15739 :       STMT_VINFO_DEF_TYPE (vect_orig_stmt (tem)) = vect_internal_def;
    6711                 :      15739 :       STMT_VINFO_DEF_TYPE (tem) = vect_internal_def;
    6712                 :            :     }
    6713                 :       1366 :   else if (loop_vinfo && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
    6714                 :            :     {
    6715                 :       1299 :       vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo);
    6716                 :       1299 :       internal_fn cond_fn = get_conditional_internal_fn (code);
    6717                 :            : 
    6718                 :       1299 :       if (reduction_type != FOLD_LEFT_REDUCTION
    6719                 :        545 :           && !use_mask_by_cond_expr_p (code, cond_fn, vectype_in)
    6720                 :       1783 :           && (cond_fn == IFN_LAST
    6721                 :        484 :               || !direct_internal_fn_supported_p (cond_fn, vectype_in,
    6722                 :            :                                                   OPTIMIZE_FOR_SPEED)))
    6723                 :            :         {
    6724                 :        484 :           if (dump_enabled_p ())
    6725                 :        341 :             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    6726                 :            :                              "can't use a fully-masked loop because no"
    6727                 :            :                              " conditional operation is available.\n");
    6728                 :        484 :           LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
    6729                 :            :         }
    6730                 :        815 :       else if (reduction_type == FOLD_LEFT_REDUCTION
    6731                 :        754 :                && reduc_fn == IFN_LAST
    6732                 :       1569 :                && !expand_vec_cond_expr_p (vectype_in,
    6733                 :            :                                            truth_type_for (vectype_in),
    6734                 :            :                                            SSA_NAME))
    6735                 :            :         {
    6736                 :          1 :           if (dump_enabled_p ())
    6737                 :          0 :             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    6738                 :            :                              "can't use a fully-masked loop because no"
    6739                 :            :                              " conditional operation is available.\n");
    6740                 :          1 :           LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
    6741                 :            :         }
    6742                 :            :       else
    6743                 :        814 :         vect_record_loop_mask (loop_vinfo, masks, ncopies * vec_num,
    6744                 :            :                                vectype_in, NULL);
    6745                 :            :     }
    6746                 :            :   return true;
    6747                 :            : }
    6748                 :            : 
    6749                 :            : /* Transform the definition stmt STMT_INFO of a reduction PHI backedge
    6750                 :            :    value.  */
    6751                 :            : 
    6752                 :            : bool
    6753                 :        723 : vect_transform_reduction (stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
    6754                 :            :                           stmt_vec_info *vec_stmt, slp_tree slp_node)
    6755                 :            : {
    6756                 :        723 :   tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
    6757                 :        723 :   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
    6758                 :        723 :   class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
    6759                 :        723 :   int i;
    6760                 :        723 :   int ncopies;
    6761                 :        723 :   int j;
    6762                 :        723 :   int vec_num;
    6763                 :            : 
    6764                 :        723 :   stmt_vec_info reduc_info = info_for_reduction (stmt_info);
    6765                 :        723 :   gcc_assert (reduc_info->is_reduc_info);
    6766                 :            : 
    6767                 :        723 :   if (nested_in_vect_loop_p (loop, stmt_info))
    6768                 :            :     {
    6769                 :          0 :       loop = loop->inner;
    6770                 :          0 :       gcc_assert (STMT_VINFO_DEF_TYPE (reduc_info) == vect_double_reduction_def);
    6771                 :            :     }
    6772                 :            : 
    6773                 :        723 :   gassign *stmt = as_a <gassign *> (stmt_info->stmt);
    6774                 :        723 :   enum tree_code code = gimple_assign_rhs_code (stmt);
    6775                 :        723 :   int op_type = TREE_CODE_LENGTH (code);
    6776                 :            : 
    6777                 :            :   /* Flatten RHS.  */
    6778                 :        723 :   tree ops[3];
    6779                 :        723 :   switch (get_gimple_rhs_class (code))
    6780                 :            :     {
    6781                 :         54 :     case GIMPLE_TERNARY_RHS:
    6782                 :        108 :       ops[2] = gimple_assign_rhs3 (stmt);
    6783                 :            :       /* Fall thru.  */
    6784                 :        723 :     case GIMPLE_BINARY_RHS:
    6785                 :        723 :       ops[0] = gimple_assign_rhs1 (stmt);
    6786                 :        723 :       ops[1] = gimple_assign_rhs2 (stmt);
    6787                 :        723 :       break;
    6788                 :          0 :     default:
    6789                 :          0 :       gcc_unreachable ();
    6790                 :            :     }
    6791                 :            : 
    6792                 :            :   /* All uses but the last are expected to be defined in the loop.
    6793                 :            :      The last use is the reduction variable.  In case of nested cycle this
    6794                 :            :      assumption is not true: we use reduc_index to record the index of the
    6795                 :            :      reduction variable.  */
    6796                 :        723 :   stmt_vec_info phi_info = STMT_VINFO_REDUC_DEF (vect_orig_stmt (stmt_info));
    6797                 :        723 :   gphi *reduc_def_phi = as_a <gphi *> (phi_info->stmt);
    6798                 :        723 :   int reduc_index = STMT_VINFO_REDUC_IDX (stmt_info);
    6799                 :        723 :   tree vectype_in = STMT_VINFO_REDUC_VECTYPE_IN (reduc_info);
    6800                 :            : 
    6801                 :        723 :   if (slp_node)
    6802                 :            :     {
    6803                 :         49 :       ncopies = 1;
    6804                 :         49 :       vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
    6805                 :            :     }
    6806                 :            :   else
    6807                 :            :     {
    6808                 :        674 :       ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
    6809                 :        674 :       vec_num = 1;
    6810                 :            :     }
    6811                 :            : 
    6812                 :        723 :   internal_fn cond_fn = get_conditional_internal_fn (code);
    6813                 :        723 :   vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo);
    6814                 :        723 :   bool mask_by_cond_expr = use_mask_by_cond_expr_p (code, cond_fn, vectype_in);
    6815                 :            : 
    6816                 :            :   /* Transform.  */
    6817                 :        723 :   stmt_vec_info new_stmt_info = NULL;
    6818                 :        723 :   stmt_vec_info prev_stmt_info;
    6819                 :        723 :   tree new_temp = NULL_TREE;
    6820                 :        723 :   auto_vec<tree> vec_oprnds0;
    6821                 :        723 :   auto_vec<tree> vec_oprnds1;
    6822                 :        723 :   auto_vec<tree> vec_oprnds2;
    6823                 :        723 :   tree def0;
    6824                 :            : 
    6825                 :        723 :   if (dump_enabled_p ())
    6826                 :        353 :     dump_printf_loc (MSG_NOTE, vect_location, "transform reduction.\n");
    6827                 :            : 
    6828                 :            :   /* FORNOW: Multiple types are not supported for condition.  */
    6829                 :        723 :   if (code == COND_EXPR)
    6830                 :          0 :     gcc_assert (ncopies == 1);
    6831                 :            : 
    6832                 :        723 :   bool masked_loop_p = LOOP_VINFO_FULLY_MASKED_P (loop_vinfo);
    6833                 :            : 
    6834                 :        723 :   vect_reduction_type reduction_type = STMT_VINFO_REDUC_TYPE (reduc_info);
    6835                 :        723 :   if (reduction_type == FOLD_LEFT_REDUCTION)
    6836                 :            :     {
    6837                 :        410 :       internal_fn reduc_fn = STMT_VINFO_REDUC_FN (reduc_info);
    6838                 :        410 :       return vectorize_fold_left_reduction
    6839                 :        410 :           (stmt_info, gsi, vec_stmt, slp_node, reduc_def_phi, code,
    6840                 :        410 :            reduc_fn, ops, vectype_in, reduc_index, masks);
    6841                 :            :     }
    6842                 :            : 
    6843                 :        313 :   bool single_defuse_cycle = STMT_VINFO_FORCE_SINGLE_CYCLE (reduc_info);
    6844                 :        313 :   gcc_assert (single_defuse_cycle
    6845                 :            :               || code == DOT_PROD_EXPR
    6846                 :            :               || code == WIDEN_SUM_EXPR
    6847                 :            :               || code == SAD_EXPR);
    6848                 :            : 
    6849                 :            :   /* Create the destination vector  */
    6850                 :        313 :   tree scalar_dest = gimple_assign_lhs (stmt);
    6851                 :        313 :   tree vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
    6852                 :            : 
    6853                 :        313 :   prev_stmt_info = NULL;
    6854                 :        313 :   if (!slp_node)
    6855                 :            :     {
    6856                 :        302 :       vec_oprnds0.create (1);
    6857                 :        302 :       vec_oprnds1.create (1);
    6858                 :        302 :       if (op_type == ternary_op)
    6859                 :         43 :         vec_oprnds2.create (1);
    6860                 :            :     }
    6861                 :            : 
    6862                 :       1095 :   for (j = 0; j < ncopies; j++)
    6863                 :            :     {
    6864                 :            :       /* Handle uses.  */
    6865                 :        782 :       if (j == 0)
    6866                 :            :         {
    6867                 :        313 :           if (slp_node)
    6868                 :            :             {
    6869                 :            :               /* Get vec defs for all the operands except the reduction index,
    6870                 :            :                  ensuring the ordering of the ops in the vector is kept.  */
    6871                 :         22 :               auto_vec<vec<tree>, 3> vec_defs;
    6872                 :         11 :               vect_get_slp_defs (slp_node, &vec_defs);
    6873                 :         11 :               vec_oprnds0.safe_splice (vec_defs[0]);
    6874                 :         11 :               vec_defs[0].release ();
    6875                 :         11 :               vec_oprnds1.safe_splice (vec_defs[1]);
    6876                 :         11 :               vec_defs[1].release ();
    6877                 :         11 :               if (op_type == ternary_op)
    6878                 :            :                 {
    6879                 :         11 :                   vec_oprnds2.safe_splice (vec_defs[2]);
    6880                 :         22 :                   vec_defs[2].release ();
    6881                 :            :                 }
    6882                 :            :             }
    6883                 :            :           else
    6884                 :            :             {
    6885                 :        302 :               vec_oprnds0.quick_push
    6886                 :        302 :                 (vect_get_vec_def_for_operand (ops[0], stmt_info));
    6887                 :        302 :               vec_oprnds1.quick_push
    6888                 :        302 :                 (vect_get_vec_def_for_operand (ops[1], stmt_info));
    6889                 :        302 :               if (op_type == ternary_op)
    6890                 :         43 :                 vec_oprnds2.quick_push 
    6891                 :         43 :                   (vect_get_vec_def_for_operand (ops[2], stmt_info));
    6892                 :            :             }
    6893                 :            :         }
    6894                 :            :       else
    6895                 :            :         {
    6896                 :        469 :           if (!slp_node)
    6897                 :            :             {
    6898                 :        469 :               gcc_assert (reduc_index != -1 || ! single_defuse_cycle);
    6899                 :            : 
    6900                 :        469 :               if (single_defuse_cycle && reduc_index == 0)
    6901                 :         35 :                 vec_oprnds0[0] = gimple_get_lhs (new_stmt_info->stmt);
    6902                 :            :               else
    6903                 :        868 :                 vec_oprnds0[0]
    6904                 :        434 :                   = vect_get_vec_def_for_stmt_copy (loop_vinfo,
    6905                 :        434 :                                                     vec_oprnds0[0]);
    6906                 :        469 :               if (single_defuse_cycle && reduc_index == 1)
    6907                 :        428 :                 vec_oprnds1[0] = gimple_get_lhs (new_stmt_info->stmt);
    6908                 :            :               else
    6909                 :         82 :                 vec_oprnds1[0]
    6910                 :         41 :                   = vect_get_vec_def_for_stmt_copy (loop_vinfo,
    6911                 :         41 :                                                     vec_oprnds1[0]);
    6912                 :        469 :               if (op_type == ternary_op)
    6913                 :            :                 {
    6914                 :          6 :                   if (single_defuse_cycle && reduc_index == 2)
    6915                 :          6 :                     vec_oprnds2[0] = gimple_get_lhs (new_stmt_info->stmt);
    6916                 :            :                   else
    6917                 :          0 :                     vec_oprnds2[0] 
    6918                 :          0 :                       = vect_get_vec_def_for_stmt_copy (loop_vinfo,
    6919                 :          0 :                                                         vec_oprnds2[0]);
    6920                 :            :                 }
    6921                 :            :             }
    6922                 :            :         }
    6923                 :            : 
    6924                 :       1564 :       FOR_EACH_VEC_ELT (vec_oprnds0, i, def0)
    6925                 :            :         {
    6926                 :        782 :           tree vop[3] = { def0, vec_oprnds1[i], NULL_TREE };
    6927                 :        782 :           if (masked_loop_p && !mask_by_cond_expr)
    6928                 :            :             {
    6929                 :            :               /* Make sure that the reduction accumulator is vop[0].  */
    6930                 :          0 :               if (reduc_index == 1)
    6931                 :            :                 {
    6932                 :          0 :                   gcc_assert (commutative_tree_code (code));
    6933                 :          0 :                   std::swap (vop[0], vop[1]);
    6934                 :            :                 }
    6935                 :          0 :               tree mask = vect_get_loop_mask (gsi, masks, vec_num * ncopies,
    6936                 :          0 :                                               vectype_in, i * ncopies + j);
    6937                 :          0 :               gcall *call = gimple_build_call_internal (cond_fn, 4, mask,
    6938                 :            :                                                         vop[0], vop[1],
    6939                 :            :                                                         vop[0]);
    6940                 :          0 :               new_temp = make_ssa_name (vec_dest, call);
    6941                 :          0 :               gimple_call_set_lhs (call, new_temp);
    6942                 :          0 :               gimple_call_set_nothrow (call, true);
    6943                 :          0 :               new_stmt_info
    6944                 :          0 :                 = vect_finish_stmt_generation (stmt_info, call, gsi);
    6945                 :            :             }
    6946                 :            :           else
    6947                 :            :             {
    6948                 :        782 :               if (op_type == ternary_op)
    6949                 :         60 :                 vop[2] = vec_oprnds2[i];
    6950                 :            : 
    6951                 :        782 :               if (masked_loop_p && mask_by_cond_expr)
    6952                 :            :                 {
    6953                 :          0 :                   tree mask = vect_get_loop_mask (gsi, masks,
    6954                 :          0 :                                                   vec_num * ncopies,
    6955                 :          0 :                                                   vectype_in, i * ncopies + j);
    6956                 :          0 :                   build_vect_cond_expr (code, vop, mask, gsi);
    6957                 :            :                 }
    6958                 :            : 
    6959                 :        782 :               gassign *new_stmt = gimple_build_assign (vec_dest, code,
    6960                 :            :                                                        vop[0], vop[1], vop[2]);
    6961                 :        782 :               new_temp = make_ssa_name (vec_dest, new_stmt);
    6962                 :        782 :               gimple_assign_set_lhs (new_stmt, new_temp);
    6963                 :        782 :               new_stmt_info
    6964                 :        782 :                 = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
    6965                 :            :             }
    6966                 :            : 
    6967                 :        782 :           if (slp_node)
    6968                 :        793 :             SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
    6969                 :            :         }
    6970                 :            : 
    6971                 :        782 :       if (slp_node || single_defuse_cycle)
    6972                 :        745 :         continue;
    6973                 :            : 
    6974                 :         37 :       if (j == 0)
    6975                 :         37 :         STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
    6976                 :            :       else
    6977                 :          0 :         STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt_info;
    6978                 :            : 
    6979                 :            :       prev_stmt_info = new_stmt_info;
    6980                 :            :     }
    6981                 :            : 
    6982                 :        313 :   if (single_defuse_cycle && !slp_node)
    6983                 :        265 :     STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
    6984                 :            : 
    6985                 :            :   return true;
    6986                 :            : }
    6987                 :            : 
    6988                 :            : /* Transform phase of a cycle PHI.  */
    6989                 :            : 
    6990                 :            : bool
    6991                 :      11887 : vect_transform_cycle_phi (stmt_vec_info stmt_info, stmt_vec_info *vec_stmt,
    6992                 :            :                           slp_tree slp_node, slp_instance slp_node_instance)
    6993                 :            : {
    6994                 :      11887 :   tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
    6995                 :      11887 :   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
    6996                 :      11887 :   class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
    6997                 :      11887 :   int i;
    6998                 :      11887 :   int ncopies;
    6999                 :      11887 :   stmt_vec_info prev_phi_info;
    7000                 :      11887 :   int j;
    7001                 :      11887 :   bool nested_cycle = false;
    7002                 :      11887 :   int vec_num;
    7003                 :            : 
    7004                 :      11887 :   if (nested_in_vect_loop_p (loop, stmt_info))
    7005                 :            :     {
    7006                 :        310 :       loop = loop->inner;
    7007                 :        310 :       nested_cycle = true;
    7008                 :            :     }
    7009                 :            : 
    7010                 :      11887 :   stmt_vec_info reduc_stmt_info = STMT_VINFO_REDUC_DEF (stmt_info);
    7011                 :      11887 :   reduc_stmt_info = vect_stmt_to_vectorize (reduc_stmt_info);
    7012                 :      11887 :   stmt_vec_info reduc_info = info_for_reduction (stmt_info);
    7013                 :      11887 :   gcc_assert (reduc_info->is_reduc_info);
    7014                 :            : 
    7015                 :      11887 :   if (STMT_VINFO_REDUC_TYPE (reduc_info) == EXTRACT_LAST_REDUCTION
    7016                 :      11887 :       || STMT_VINFO_REDUC_TYPE (reduc_info) == FOLD_LEFT_REDUCTION)
    7017                 :            :     /* Leave the scalar phi in place.  */
    7018                 :            :     return true;
    7019                 :            : 
    7020                 :      11477 :   tree vectype_in = STMT_VINFO_REDUC_VECTYPE_IN (reduc_info);
    7021                 :            :   /* For a nested cycle we do not fill the above.  */
    7022                 :      11477 :   if (!vectype_in)
    7023                 :        271 :     vectype_in = STMT_VINFO_VECTYPE (stmt_info);
    7024                 :      11477 :   gcc_assert (vectype_in);
    7025                 :            : 
    7026                 :      11477 :   if (slp_node)
    7027                 :            :     {
    7028                 :            :       /* The size vect_schedule_slp_instance computes is off for us.  */
    7029                 :        854 :       vec_num = vect_get_num_vectors
    7030                 :        427 :           (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
    7031                 :        427 :            * SLP_TREE_SCALAR_STMTS (slp_node).length (), vectype_in);
    7032                 :        427 :       ncopies = 1;
    7033                 :            :     }
    7034                 :            :   else
    7035                 :            :     {
    7036                 :      11050 :       vec_num = 1;
    7037                 :      11050 :       ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
    7038                 :            :     }
    7039                 :            : 
    7040                 :            :   /* Check whether we should use a single PHI node and accumulate
    7041                 :            :      vectors to one before the backedge.  */
    7042                 :      11477 :   if (STMT_VINFO_FORCE_SINGLE_CYCLE (reduc_info))
    7043                 :        265 :     ncopies = 1;
    7044                 :            : 
    7045                 :            :   /* Create the destination vector  */
    7046                 :      11477 :   gphi *phi = as_a <gphi *> (stmt_info->stmt);
    7047                 :      11477 :   tree vec_dest = vect_create_destination_var (gimple_phi_result (phi),
    7048                 :            :                                                vectype_out);
    7049                 :            : 
    7050                 :            :   /* Get the loop-entry arguments.  */
    7051                 :      11477 :   tree vec_initial_def;
    7052                 :      11477 :   auto_vec<tree> vec_initial_defs;
    7053                 :      11477 :   if (slp_node)
    7054                 :            :     {
    7055                 :        427 :       vec_initial_defs.reserve (vec_num);
    7056                 :        427 :       gcc_assert (slp_node == slp_node_instance->reduc_phis);
    7057                 :        427 :       stmt_vec_info first = REDUC_GROUP_FIRST_ELEMENT (reduc_stmt_info);
    7058                 :        427 :       tree neutral_op
    7059                 :        427 :         = neutral_op_for_slp_reduction (slp_node, vectype_out,
    7060                 :            :                                         STMT_VINFO_REDUC_CODE (reduc_info),
    7061                 :            :                                         first != NULL);
    7062                 :        427 :       get_initial_defs_for_reduction (slp_node_instance->reduc_phis,
    7063                 :            :                                       &vec_initial_defs, vec_num,
    7064                 :            :                                       first != NULL, neutral_op);
    7065                 :            :     }
    7066                 :            :   else
    7067                 :            :     {
    7068                 :            :       /* Get at the scalar def before the loop, that defines the initial
    7069                 :            :          value of the reduction variable.  */
    7070                 :      11050 :       tree initial_def = PHI_ARG_DEF_FROM_EDGE (phi,
    7071                 :            :                                                 loop_preheader_edge (loop));
    7072                 :            :       /* Optimize: if initial_def is for REDUC_MAX smaller than the base
    7073                 :            :          and we can't use zero for induc_val, use initial_def.  Similarly
    7074                 :            :          for REDUC_MIN and initial_def larger than the base.  */
    7075                 :      11050 :       if (STMT_VINFO_REDUC_TYPE (reduc_info) == INTEGER_INDUC_COND_REDUCTION)
    7076                 :            :         {
    7077                 :         63 :           tree induc_val = STMT_VINFO_VEC_INDUC_COND_INITIAL_VAL (reduc_info);
    7078                 :         63 :           if (TREE_CODE (initial_def) == INTEGER_CST
    7079                 :         42 :               && !integer_zerop (induc_val)
    7080                 :        105 :               && ((STMT_VINFO_REDUC_CODE (reduc_info) == MAX_EXPR
    7081                 :         27 :                    && tree_int_cst_lt (initial_def, induc_val))
    7082                 :         40 :                   || (STMT_VINFO_REDUC_CODE (reduc_info) == MIN_EXPR
    7083                 :         15 :                       && tree_int_cst_lt (induc_val, initial_def))))
    7084                 :            :             {
    7085                 :          2 :               induc_val = initial_def;
    7086                 :            :               /* Communicate we used the initial_def to epilouge
    7087                 :            :                  generation.  */
    7088                 :          2 :               STMT_VINFO_VEC_INDUC_COND_INITIAL_VAL (reduc_info) = NULL_TREE;
    7089                 :            :             }
    7090                 :         63 :           vec_initial_def = build_vector_from_val (vectype_out, induc_val);
    7091                 :            :         }
    7092                 :      10987 :       else if (nested_cycle)
    7093                 :            :         {
    7094                 :            :           /* Do not use an adjustment def as that case is not supported
    7095                 :            :              correctly if ncopies is not one.  */
    7096                 :        310 :           vec_initial_def = vect_get_vec_def_for_operand (initial_def,
    7097                 :            :                                                           reduc_stmt_info);
    7098                 :            :         }
    7099                 :            :       else
    7100                 :            :         {
    7101                 :      10677 :           tree adjustment_def = NULL_TREE;
    7102                 :      10677 :           tree *adjustment_defp = &adjustment_def;
    7103                 :      10677 :           enum tree_code code = STMT_VINFO_REDUC_CODE (reduc_info);
    7104                 :      10677 :           if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_double_reduction_def)
    7105                 :         39 :             adjustment_defp = NULL;
    7106                 :      10677 :           vec_initial_def
    7107                 :      10677 :             = get_initial_def_for_reduction (reduc_stmt_info, code,
    7108                 :            :                                              initial_def, adjustment_defp);
    7109                 :      10677 :           STMT_VINFO_REDUC_EPILOGUE_ADJUSTMENT (reduc_info) = adjustment_def;
    7110                 :            :         }
    7111                 :      11050 :       vec_initial_defs.create (1);
    7112                 :      11050 :       vec_initial_defs.quick_push (vec_initial_def);
    7113                 :            :     }
    7114                 :            : 
    7115                 :            :   /* Generate the reduction PHIs upfront.  */
    7116                 :            :   prev_phi_info = NULL;
    7117                 :      23117 :   for (i = 0; i < vec_num; i++)
    7118                 :            :     {
    7119                 :      11640 :       tree vec_init_def = vec_initial_defs[i];
    7120                 :      23407 :       for (j = 0; j < ncopies; j++)
    7121                 :            :         {
    7122                 :            :           /* Create the reduction-phi that defines the reduction
    7123                 :            :              operand.  */
    7124                 :      11767 :           gphi *new_phi = create_phi_node (vec_dest, loop->header);
    7125                 :      11767 :           stmt_vec_info new_phi_info = loop_vinfo->add_stmt (new_phi);
    7126                 :            : 
    7127                 :            :           /* Set the loop-entry arg of the reduction-phi.  */
    7128                 :      11767 :           if (j != 0 && nested_cycle)
    7129                 :         99 :             vec_init_def = vect_get_vec_def_for_stmt_copy (loop_vinfo,
    7130                 :            :                                                            vec_init_def);
    7131                 :      11767 :           add_phi_arg (new_phi, vec_init_def, loop_preheader_edge (loop),
    7132                 :            :                        UNKNOWN_LOCATION);
    7133                 :            : 
    7134                 :            :           /* The loop-latch arg is set in epilogue processing.  */
    7135                 :            : 
    7136                 :      11767 :           if (slp_node)
    7137                 :      12357 :             SLP_TREE_VEC_STMTS (slp_node).quick_push (new_phi_info);
    7138                 :            :           else
    7139                 :            :             {
    7140                 :      11177 :               if (j == 0)
    7141                 :      11050 :                 STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_phi_info;
    7142                 :            :               else
    7143                 :        127 :                 STMT_VINFO_RELATED_STMT (prev_phi_info) = new_phi_info;
    7144                 :            :               prev_phi_info = new_phi_info;
    7145                 :            :             }
    7146                 :            :         }
    7147                 :            :     }
    7148                 :            : 
    7149                 :      11477 :   return true;
    7150                 :            : }
    7151                 :            : 
    7152                 :            : /* Vectorizes LC PHIs.  */
    7153                 :            : 
    7154                 :            : bool
    7155                 :       6550 : vectorizable_lc_phi (stmt_vec_info stmt_info, stmt_vec_info *vec_stmt,
    7156                 :            :                      slp_tree slp_node)
    7157                 :            : {
    7158                 :       6550 :   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
    7159                 :       6550 :   if (!loop_vinfo
    7160                 :       6550 :       || !is_a <gphi *> (stmt_info->stmt)
    7161                 :        536 :       || gimple_phi_num_args (stmt_info->stmt) != 1)
    7162                 :            :     return false;
    7163                 :            : 
    7164                 :        474 :   if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_internal_def
    7165                 :         80 :       && STMT_VINFO_DEF_TYPE (stmt_info) != vect_double_reduction_def)
    7166                 :            :     return false;
    7167                 :            : 
    7168                 :        474 :   if (!vec_stmt) /* transformation not required.  */
    7169                 :            :     {
    7170                 :        244 :       STMT_VINFO_TYPE (stmt_info) = lc_phi_info_type;
    7171                 :        244 :       return true;
    7172                 :            :     }
    7173                 :            : 
    7174                 :        230 :   tree vectype = STMT_VINFO_VECTYPE (stmt_info);
    7175                 :        230 :   tree scalar_dest = gimple_phi_result (stmt_info->stmt);
    7176                 :        230 :   basic_block bb = gimple_bb (stmt_info->stmt);
    7177                 :        230 :   edge e = single_pred_edge (bb);
    7178                 :        230 :   tree vec_dest = vect_create_destination_var (scalar_dest, vectype);
    7179                 :        230 :   vec<tree> vec_oprnds = vNULL;
    7180                 :        230 :   vect_get_vec_defs (gimple_phi_arg_def (stmt_info->stmt, 0), NULL_TREE,
    7181                 :            :                      stmt_info, &vec_oprnds, NULL, slp_node);
    7182                 :        230 :   if (slp_node)
    7183                 :            :     {
    7184                 :          0 :       unsigned vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
    7185                 :          0 :       gcc_assert (vec_oprnds.length () == vec_num);
    7186                 :          0 :       for (unsigned i = 0; i < vec_num; i++)
    7187                 :            :         {
    7188                 :            :           /* Create the vectorized LC PHI node.  */
    7189                 :          0 :           gphi *new_phi = create_phi_node (vec_dest, bb);
    7190                 :          0 :           add_phi_arg (new_phi, vec_oprnds[i], e, UNKNOWN_LOCATION);
    7191                 :          0 :           stmt_vec_info new_phi_info = loop_vinfo->add_stmt (new_phi);
    7192                 :          0 :           SLP_TREE_VEC_STMTS (slp_node).quick_push (new_phi_info);
    7193                 :            :         }
    7194                 :            :     }
    7195                 :            :   else
    7196                 :            :     {
    7197                 :        230 :       unsigned ncopies = vect_get_num_copies (loop_vinfo, vectype);
    7198                 :        230 :       stmt_vec_info prev_phi_info = NULL;
    7199                 :        495 :       for (unsigned i = 0; i < ncopies; i++)
    7200                 :            :         {
    7201                 :        265 :           if (i != 0)
    7202                 :         35 :             vect_get_vec_defs_for_stmt_copy (loop_vinfo, &vec_oprnds, NULL);
    7203                 :            :           /* Create the vectorized LC PHI node.  */
    7204                 :        265 :           gphi *new_phi = create_phi_node (vec_dest, bb);
    7205                 :        265 :           add_phi_arg (new_phi, vec_oprnds[0], e, UNKNOWN_LOCATION);
    7206                 :        265 :           stmt_vec_info new_phi_info = loop_vinfo->add_stmt (new_phi);
    7207                 :        265 :           if (i == 0)
    7208                 :        230 :             STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_phi_info;
    7209                 :            :           else
    7210                 :         35 :             STMT_VINFO_RELATED_STMT (prev_phi_info) = new_phi_info;
    7211                 :        265 :           prev_phi_info = new_phi_info;
    7212                 :            :         }
    7213                 :            :     }
    7214                 :        230 :   vec_oprnds.release ();
    7215                 :            : 
    7216                 :            :   return true;
    7217                 :            : }
    7218                 :            : 
    7219                 :            : 
    7220                 :            : /* Function vect_min_worthwhile_factor.
    7221                 :            : 
    7222                 :            :    For a loop where we could vectorize the operation indicated by CODE,
    7223                 :            :    return the minimum vectorization factor that makes it worthwhile
    7224                 :            :    to use generic vectors.  */
    7225                 :            : static unsigned int
    7226                 :        860 : vect_min_worthwhile_factor (enum tree_code code)
    7227                 :            : {
    7228                 :        860 :   switch (code)
    7229                 :            :     {
    7230                 :            :     case PLUS_EXPR:
    7231                 :            :     case MINUS_EXPR:
    7232                 :            :     case NEGATE_EXPR:
    7233                 :            :       return 4;
    7234                 :            : 
    7235                 :         92 :     case BIT_AND_EXPR:
    7236                 :         92 :     case BIT_IOR_EXPR:
    7237                 :         92 :     case BIT_XOR_EXPR:
    7238                 :         92 :     case BIT_NOT_EXPR:
    7239                 :         92 :       return 2;
    7240                 :            : 
    7241                 :        702 :     default:
    7242                 :        702 :       return INT_MAX;
    7243                 :            :     }
    7244                 :            : }
    7245                 :            : 
    7246                 :            : /* Return true if VINFO indicates we are doing loop vectorization and if
    7247                 :            :    it is worth decomposing CODE operations into scalar operations for
    7248                 :            :    that loop's vectorization factor.  */
    7249                 :            : 
    7250                 :            : bool
    7251                 :       6855 : vect_worthwhile_without_simd_p (vec_info *vinfo, tree_code code)
    7252                 :            : {
    7253                 :       6855 :   loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
    7254                 :        860 :   unsigned HOST_WIDE_INT value;
    7255                 :        860 :   return (loop_vinfo
    7256                 :        860 :           && LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&value)
    7257                 :        860 :           && value >= vect_min_worthwhile_factor (code));
    7258                 :            : }
    7259                 :            : 
    7260                 :            : /* Function vectorizable_induction
    7261                 :            : 
    7262                 :            :    Check if STMT_INFO performs an induction computation that can be vectorized.
    7263                 :            :    If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
    7264                 :            :    phi to replace it, put it in VEC_STMT, and add it to the same basic block.
    7265                 :            :    Return true if STMT_INFO is vectorizable in this way.  */
    7266                 :            : 
    7267                 :            : bool
    7268                 :      23025 : vectorizable_induction (stmt_vec_info stmt_info,
    7269                 :            :                         gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
    7270                 :            :                         stmt_vec_info *vec_stmt, slp_tree slp_node,
    7271                 :            :                         stmt_vector_for_cost *cost_vec)
    7272                 :            : {
    7273                 :      23025 :   loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
    7274                 :      23025 :   class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
    7275                 :      23025 :   unsigned ncopies;
    7276                 :      23025 :   bool nested_in_vect_loop = false;
    7277                 :      23025 :   class loop *iv_loop;
    7278                 :      23025 :   tree vec_def;
    7279                 :      23025 :   edge pe = loop_preheader_edge (loop);
    7280                 :      23025 :   basic_block new_bb;
    7281                 :      23025 :   tree new_vec, vec_init, vec_step, t;
    7282                 :      23025 :   tree new_name;
    7283                 :      23025 :   gimple *new_stmt;
    7284                 :      23025 :   gphi *induction_phi;
    7285                 :      23025 :   tree induc_def, vec_dest;
    7286                 :      23025 :   tree init_expr, step_expr;
    7287                 :      23025 :   poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
    7288                 :      23025 :   unsigned i;
    7289                 :      23025 :   tree expr;
    7290                 :      23025 :   gimple_seq stmts;
    7291                 :      23025 :   imm_use_iterator imm_iter;
    7292                 :      23025 :   use_operand_p use_p;
    7293                 :      23025 :   gimple *exit_phi;
    7294                 :      23025 :   edge latch_e;
    7295                 :      23025 :   tree loop_arg;
    7296                 :      23025 :   gimple_stmt_iterator si;
    7297                 :            : 
    7298                 :      23025 :   gphi *phi = dyn_cast <gphi *> (stmt_info->stmt);
    7299                 :      10152 :   if (!phi)
    7300                 :            :     return false;
    7301                 :            : 
    7302                 :      10152 :   if (!STMT_VINFO_RELEVANT_P (stmt_info))
    7303                 :            :     return false;
    7304                 :            : 
    7305                 :            :   /* Make sure it was recognized as induction computation.  */
    7306                 :      10152 :   if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
    7307                 :            :     return false;
    7308                 :            : 
    7309                 :      10090 :   tree vectype = STMT_VINFO_VECTYPE (stmt_info);
    7310                 :      10090 :   poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
    7311                 :            : 
    7312                 :      10090 :   if (slp_node)
    7313                 :            :     ncopies = 1;
    7314                 :            :   else
    7315                 :       9742 :     ncopies = vect_get_num_copies (loop_vinfo, vectype);
    7316                 :       9742 :   gcc_assert (ncopies >= 1);
    7317                 :            : 
    7318                 :            :   /* FORNOW. These restrictions should be relaxed.  */
    7319                 :      10090 :   if (nested_in_vect_loop_p (loop, stmt_info))
    7320                 :            :     {
    7321                 :        162 :       imm_use_iterator imm_iter;
    7322                 :        162 :       use_operand_p use_p;
    7323                 :        162 :       gimple *exit_phi;
    7324                 :        162 :       edge latch_e;
    7325                 :        162 :       tree loop_arg;
    7326                 :            : 
    7327                 :        162 :       if (ncopies > 1)
    7328                 :            :         {
    7329                 :          6 :           if (dump_enabled_p ())
    7330                 :          2 :             dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    7331                 :            :                              "multiple types in nested loop.\n");
    7332                 :          8 :           return false;
    7333                 :            :         }
    7334                 :            : 
    7335                 :            :       /* FORNOW: outer loop induction with SLP not supported.  */
    7336                 :        156 :       if (STMT_SLP_TYPE (stmt_info))
    7337                 :            :         return false;
    7338                 :            : 
    7339                 :        156 :       exit_phi = NULL;
    7340                 :        156 :       latch_e = loop_latch_edge (loop->inner);
    7341                 :        156 :       loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
    7342                 :        324 :       FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
    7343                 :            :         {
    7344                 :        172 :           gimple *use_stmt = USE_STMT (use_p);
    7345                 :        172 :           if (is_gimple_debug (use_stmt))
    7346                 :          2 :             continue;
    7347                 :            : 
    7348                 :        170 :           if (!flow_bb_inside_loop_p (loop->inner, gimple_bb (use_stmt)))
    7349                 :            :             {
    7350                 :            :               exit_phi = use_stmt;
    7351                 :            :               break;
    7352                 :            :             }
    7353                 :            :         }
    7354                 :        156 :       if (exit_phi)
    7355                 :            :         {
    7356                 :          4 :           stmt_vec_info exit_phi_vinfo = loop_vinfo->lookup_stmt (exit_phi);
    7357                 :          4 :           if (!(STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
    7358                 :          2 :                 && !STMT_VINFO_LIVE_P (exit_phi_vinfo)))
    7359                 :            :             {
    7360                 :          2 :               if (dump_enabled_p ())
    7361                 :          2 :                 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    7362                 :            :                                  "inner-loop induction only used outside "
    7363                 :            :                                  "of the outer vectorized loop.\n");
    7364                 :          2 :               return false;
    7365                 :            :             }
    7366                 :            :         }
    7367                 :            : 
    7368                 :        154 :       nested_in_vect_loop = true;
    7369                 :        154 :       iv_loop = loop->inner;
    7370                 :            :     }
    7371                 :            :   else
    7372                 :            :     iv_loop = loop;
    7373                 :      10082 :   gcc_assert (iv_loop == (gimple_bb (phi))->loop_father);
    7374                 :            : 
    7375                 :      10082 :   if (slp_node && !nunits.is_constant ())
    7376                 :            :     {
    7377                 :            :       /* The current SLP code creates the initial value element-by-element.  */
    7378                 :            :       if (dump_enabled_p ())
    7379                 :            :         dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
    7380                 :            :                          "SLP induction not supported for variable-length"
    7381                 :            :                          " vectors.\n");
    7382                 :            :       return false;
    7383                 :            :     }
    7384                 :            : 
    7385                 :      10082 :   if (!vec_stmt) /* transformation not required.  */
    7386                 :            :     {
    7387                 :       6405 :       STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type;
    7388                 :       6405 :       DUMP_VECT_SCOPE ("vectorizable_induction");
    7389                 :       6405 :       vect_model_induction_cost (stmt_info, ncopies, cost_vec);
    7390                 :       6405 :       return true;
    7391                 :            :     }
    7392                 :            : 
    7393                 :            :   /* Transform.  */
    7394                 :            : 
    7395                 :            :   /* Compute a vector variable, initialized with the first VF values of
    7396                 :            :      the induction variable.  E.g., for an iv with IV_PHI='X' and
    7397                 :            :      evolution S, for a vector of 4 units, we want to compute:
    7398                 :            :      [X, X + S, X + 2*S, X + 3*S].  */
    7399                 :            : 
    7400                 :       3677 :   if (dump_enabled_p ())
    7401                 :       1648 :     dump_printf_loc (MSG_NOTE, vect_location, "transform induction phi.\n");
    7402                 :            : 
    7403                 :       3677 :   latch_e = loop_latch_edge (iv_loop);
    7404                 :       3677 :   loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
    7405                 :            : 
    7406                 :       3677 :   step_expr = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_info);
    7407                 :       3677 :   gcc_assert (step_expr != NULL_TREE);
    7408                 :       3677 :   tree step_vectype = get_same_sized_vectype (TREE_TYPE (step_expr), vectype);
    7409                 :            : 
    7410                 :       3677 :   pe = loop_preheader_edge (iv_loop);
    7411                 :       3677 :   init_expr = PHI_ARG_DEF_FROM_EDGE (phi,
    7412                 :            :                                      loop_preheader_edge (iv_loop));
    7413                 :            : 
    7414                 :       3677 :   stmts = NULL;
    7415                 :       3677 :   if (!nested_in_vect_loop)
    7416                 :            :     {
    7417                 :            :       /* Convert the initial value to the IV update type.  */
    7418                 :       3611 :       tree new_type = TREE_TYPE (step_expr);
    7419                 :       3611 :       init_expr = gimple_convert (&stmts, new_type, init_expr);
    7420                 :            : 
    7421                 :            :       /* If we are using the loop mask to "peel" for alignment then we need
    7422                 :            :          to adjust the start value here.  */
    7423                 :       3611 :       tree skip_niters = LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo);
    7424                 :       3611 :       if (skip_niters != NULL_TREE)
    7425                 :            :         {
    7426                 :          0 :           if (FLOAT_TYPE_P (vectype))
    7427                 :          0 :             skip_niters = gimple_build (&stmts, FLOAT_EXPR, new_type,
    7428                 :            :                                         skip_niters);
    7429                 :            :           else
    7430                 :          0 :             skip_niters = gimple_convert (&stmts, new_type, skip_niters);
    7431                 :          0 :           tree skip_step = gimple_build (&stmts, MULT_EXPR, new_type,
    7432                 :            :                                          skip_niters, step_expr);
    7433                 :          0 :           init_expr = gimple_build (&stmts, MINUS_EXPR, new_type,
    7434                 :            :                                     init_expr, skip_step);
    7435                 :            :         }
    7436                 :            :     }
    7437                 :            : 
    7438                 :       3677 :   if (stmts)
    7439                 :            :     {
    7440                 :         32 :       new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
    7441                 :         32 :       gcc_assert (!new_bb);
    7442                 :            :     }
    7443                 :            : 
    7444                 :            :   /* Find the first insertion point in the BB.  */
    7445                 :       3677 :   basic_block bb = gimple_bb (phi);
    7446                 :       3677 :   si = gsi_after_labels (bb);
    7447                 :            : 
    7448                 :            :   /* For SLP induction we have to generate several IVs as for example
    7449                 :            :      with group size 3 we need [i, i, i, i + S] [i + S, i + S, i + 2*S, i + 2*S]
    7450                 :            :      [i + 2*S, i + 3*S, i + 3*S, i + 3*S].  The step is the same uniform
    7451                 :            :      [VF*S, VF*S, VF*S, VF*S] for all.  */
    7452                 :       3677 :   if (slp_node)
    7453                 :            :     {
    7454                 :            :       /* Enforced above.  */
    7455                 :        104 :       unsigned int const_nunits = nunits.to_constant ();
    7456                 :            : 
    7457                 :            :       /* Generate [VF*S, VF*S, ... ].  */
    7458                 :        104 :       if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
    7459                 :            :         {
    7460                 :          3 :           expr = build_int_cst (integer_type_node, vf);
    7461                 :          3 :           expr = fold_convert (TREE_TYPE (step_expr), expr);
    7462                 :            :         }
    7463                 :            :       else
    7464                 :        101 :         expr = build_int_cst (TREE_TYPE (step_expr), vf);
    7465                 :        104 :       new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
    7466                 :            :                               expr, step_expr);
    7467                 :        104 :       if (! CONSTANT_CLASS_P (new_name))
    7468                 :          3 :         new_name = vect_init_vector (stmt_info, new_name,
    7469                 :          3 :                                      TREE_TYPE (step_e