File: | build/gcc/combine.c |
Warning: | line 8206, column 13 Although the value stored to 'i' is used in the enclosing expression, the value is never actually read from 'i' |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* Optimize by combining instructions for GNU compiler. |
2 | Copyright (C) 1987-2021 Free Software Foundation, Inc. |
3 | |
4 | This file is part of GCC. |
5 | |
6 | GCC is free software; you can redistribute it and/or modify it under |
7 | the terms of the GNU General Public License as published by the Free |
8 | Software Foundation; either version 3, or (at your option) any later |
9 | version. |
10 | |
11 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or |
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
14 | for more details. |
15 | |
16 | You should have received a copy of the GNU General Public License |
17 | along with GCC; see the file COPYING3. If not see |
18 | <http://www.gnu.org/licenses/>. */ |
19 | |
20 | /* This module is essentially the "combiner" phase of the U. of Arizona |
21 | Portable Optimizer, but redone to work on our list-structured |
22 | representation for RTL instead of their string representation. |
23 | |
24 | The LOG_LINKS of each insn identify the most recent assignment |
25 | to each REG used in the insn. It is a list of previous insns, |
26 | each of which contains a SET for a REG that is used in this insn |
27 | and not used or set in between. LOG_LINKs never cross basic blocks. |
28 | They were set up by the preceding pass (lifetime analysis). |
29 | |
30 | We try to combine each pair of insns joined by a logical link. |
31 | We also try to combine triplets of insns A, B and C when C has |
32 | a link back to B and B has a link back to A. Likewise for a |
33 | small number of quadruplets of insns A, B, C and D for which |
34 | there's high likelihood of success. |
35 | |
36 | LOG_LINKS does not have links for use of the CC0. They don't |
37 | need to, because the insn that sets the CC0 is always immediately |
38 | before the insn that tests it. So we always regard a branch |
39 | insn as having a logical link to the preceding insn. The same is true |
40 | for an insn explicitly using CC0. |
41 | |
42 | We check (with modified_between_p) to avoid combining in such a way |
43 | as to move a computation to a place where its value would be different. |
44 | |
45 | Combination is done by mathematically substituting the previous |
46 | insn(s) values for the regs they set into the expressions in |
47 | the later insns that refer to these regs. If the result is a valid insn |
48 | for our target machine, according to the machine description, |
49 | we install it, delete the earlier insns, and update the data flow |
50 | information (LOG_LINKS and REG_NOTES) for what we did. |
51 | |
52 | There are a few exceptions where the dataflow information isn't |
53 | completely updated (however this is only a local issue since it is |
54 | regenerated before the next pass that uses it): |
55 | |
56 | - reg_live_length is not updated |
57 | - reg_n_refs is not adjusted in the rare case when a register is |
58 | no longer required in a computation |
59 | - there are extremely rare cases (see distribute_notes) when a |
60 | REG_DEAD note is lost |
61 | - a LOG_LINKS entry that refers to an insn with multiple SETs may be |
62 | removed because there is no way to know which register it was |
63 | linking |
64 | |
65 | To simplify substitution, we combine only when the earlier insn(s) |
66 | consist of only a single assignment. To simplify updating afterward, |
67 | we never combine when a subroutine call appears in the middle. |
68 | |
69 | Since we do not represent assignments to CC0 explicitly except when that |
70 | is all an insn does, there is no LOG_LINKS entry in an insn that uses |
71 | the condition code for the insn that set the condition code. |
72 | Fortunately, these two insns must be consecutive. |
73 | Therefore, every JUMP_INSN is taken to have an implicit logical link |
74 | to the preceding insn. This is not quite right, since non-jumps can |
75 | also use the condition code; but in practice such insns would not |
76 | combine anyway. */ |
77 | |
78 | #include "config.h" |
79 | #include "system.h" |
80 | #include "coretypes.h" |
81 | #include "backend.h" |
82 | #include "target.h" |
83 | #include "rtl.h" |
84 | #include "tree.h" |
85 | #include "cfghooks.h" |
86 | #include "predict.h" |
87 | #include "df.h" |
88 | #include "memmodel.h" |
89 | #include "tm_p.h" |
90 | #include "optabs.h" |
91 | #include "regs.h" |
92 | #include "emit-rtl.h" |
93 | #include "recog.h" |
94 | #include "cgraph.h" |
95 | #include "stor-layout.h" |
96 | #include "cfgrtl.h" |
97 | #include "cfgcleanup.h" |
98 | /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */ |
99 | #include "explow.h" |
100 | #include "insn-attr.h" |
101 | #include "rtlhooks-def.h" |
102 | #include "expr.h" |
103 | #include "tree-pass.h" |
104 | #include "valtrack.h" |
105 | #include "rtl-iter.h" |
106 | #include "print-rtl.h" |
107 | #include "function-abi.h" |
108 | |
109 | /* Number of attempts to combine instructions in this function. */ |
110 | |
111 | static int combine_attempts; |
112 | |
113 | /* Number of attempts that got as far as substitution in this function. */ |
114 | |
115 | static int combine_merges; |
116 | |
117 | /* Number of instructions combined with added SETs in this function. */ |
118 | |
119 | static int combine_extras; |
120 | |
121 | /* Number of instructions combined in this function. */ |
122 | |
123 | static int combine_successes; |
124 | |
125 | /* Totals over entire compilation. */ |
126 | |
127 | static int total_attempts, total_merges, total_extras, total_successes; |
128 | |
129 | /* combine_instructions may try to replace the right hand side of the |
130 | second instruction with the value of an associated REG_EQUAL note |
131 | before throwing it at try_combine. That is problematic when there |
132 | is a REG_DEAD note for a register used in the old right hand side |
133 | and can cause distribute_notes to do wrong things. This is the |
134 | second instruction if it has been so modified, null otherwise. */ |
135 | |
136 | static rtx_insn *i2mod; |
137 | |
138 | /* When I2MOD is nonnull, this is a copy of the old right hand side. */ |
139 | |
140 | static rtx i2mod_old_rhs; |
141 | |
142 | /* When I2MOD is nonnull, this is a copy of the new right hand side. */ |
143 | |
144 | static rtx i2mod_new_rhs; |
145 | |
146 | struct reg_stat_type { |
147 | /* Record last point of death of (hard or pseudo) register n. */ |
148 | rtx_insn *last_death; |
149 | |
150 | /* Record last point of modification of (hard or pseudo) register n. */ |
151 | rtx_insn *last_set; |
152 | |
153 | /* The next group of fields allows the recording of the last value assigned |
154 | to (hard or pseudo) register n. We use this information to see if an |
155 | operation being processed is redundant given a prior operation performed |
156 | on the register. For example, an `and' with a constant is redundant if |
157 | all the zero bits are already known to be turned off. |
158 | |
159 | We use an approach similar to that used by cse, but change it in the |
160 | following ways: |
161 | |
162 | (1) We do not want to reinitialize at each label. |
163 | (2) It is useful, but not critical, to know the actual value assigned |
164 | to a register. Often just its form is helpful. |
165 | |
166 | Therefore, we maintain the following fields: |
167 | |
168 | last_set_value the last value assigned |
169 | last_set_label records the value of label_tick when the |
170 | register was assigned |
171 | last_set_table_tick records the value of label_tick when a |
172 | value using the register is assigned |
173 | last_set_invalid set to nonzero when it is not valid |
174 | to use the value of this register in some |
175 | register's value |
176 | |
177 | To understand the usage of these tables, it is important to understand |
178 | the distinction between the value in last_set_value being valid and |
179 | the register being validly contained in some other expression in the |
180 | table. |
181 | |
182 | (The next two parameters are out of date). |
183 | |
184 | reg_stat[i].last_set_value is valid if it is nonzero, and either |
185 | reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick. |
186 | |
187 | Register I may validly appear in any expression returned for the value |
188 | of another register if reg_n_sets[i] is 1. It may also appear in the |
189 | value for register J if reg_stat[j].last_set_invalid is zero, or |
190 | reg_stat[i].last_set_label < reg_stat[j].last_set_label. |
191 | |
192 | If an expression is found in the table containing a register which may |
193 | not validly appear in an expression, the register is replaced by |
194 | something that won't match, (clobber (const_int 0)). */ |
195 | |
196 | /* Record last value assigned to (hard or pseudo) register n. */ |
197 | |
198 | rtx last_set_value; |
199 | |
200 | /* Record the value of label_tick when an expression involving register n |
201 | is placed in last_set_value. */ |
202 | |
203 | int last_set_table_tick; |
204 | |
205 | /* Record the value of label_tick when the value for register n is placed in |
206 | last_set_value. */ |
207 | |
208 | int last_set_label; |
209 | |
210 | /* These fields are maintained in parallel with last_set_value and are |
211 | used to store the mode in which the register was last set, the bits |
212 | that were known to be zero when it was last set, and the number of |
213 | sign bits copies it was known to have when it was last set. */ |
214 | |
215 | unsigned HOST_WIDE_INTlong last_set_nonzero_bits; |
216 | char last_set_sign_bit_copies; |
217 | ENUM_BITFIELD(machine_mode)enum machine_mode last_set_mode : 8; |
218 | |
219 | /* Set nonzero if references to register n in expressions should not be |
220 | used. last_set_invalid is set nonzero when this register is being |
221 | assigned to and last_set_table_tick == label_tick. */ |
222 | |
223 | char last_set_invalid; |
224 | |
225 | /* Some registers that are set more than once and used in more than one |
226 | basic block are nevertheless always set in similar ways. For example, |
227 | a QImode register may be loaded from memory in two places on a machine |
228 | where byte loads zero extend. |
229 | |
230 | We record in the following fields if a register has some leading bits |
231 | that are always equal to the sign bit, and what we know about the |
232 | nonzero bits of a register, specifically which bits are known to be |
233 | zero. |
234 | |
235 | If an entry is zero, it means that we don't know anything special. */ |
236 | |
237 | unsigned char sign_bit_copies; |
238 | |
239 | unsigned HOST_WIDE_INTlong nonzero_bits; |
240 | |
241 | /* Record the value of the label_tick when the last truncation |
242 | happened. The field truncated_to_mode is only valid if |
243 | truncation_label == label_tick. */ |
244 | |
245 | int truncation_label; |
246 | |
247 | /* Record the last truncation seen for this register. If truncation |
248 | is not a nop to this mode we might be able to save an explicit |
249 | truncation if we know that value already contains a truncated |
250 | value. */ |
251 | |
252 | ENUM_BITFIELD(machine_mode)enum machine_mode truncated_to_mode : 8; |
253 | }; |
254 | |
255 | |
256 | static vec<reg_stat_type> reg_stat; |
257 | |
258 | /* One plus the highest pseudo for which we track REG_N_SETS. |
259 | regstat_init_n_sets_and_refs allocates the array for REG_N_SETS just once, |
260 | but during combine_split_insns new pseudos can be created. As we don't have |
261 | updated DF information in that case, it is hard to initialize the array |
262 | after growing. The combiner only cares about REG_N_SETS (regno) == 1, |
263 | so instead of growing the arrays, just assume all newly created pseudos |
264 | during combine might be set multiple times. */ |
265 | |
266 | static unsigned int reg_n_sets_max; |
267 | |
268 | /* Record the luid of the last insn that invalidated memory |
269 | (anything that writes memory, and subroutine calls, but not pushes). */ |
270 | |
271 | static int mem_last_set; |
272 | |
273 | /* Record the luid of the last CALL_INSN |
274 | so we can tell whether a potential combination crosses any calls. */ |
275 | |
276 | static int last_call_luid; |
277 | |
278 | /* When `subst' is called, this is the insn that is being modified |
279 | (by combining in a previous insn). The PATTERN of this insn |
280 | is still the old pattern partially modified and it should not be |
281 | looked at, but this may be used to examine the successors of the insn |
282 | to judge whether a simplification is valid. */ |
283 | |
284 | static rtx_insn *subst_insn; |
285 | |
286 | /* This is the lowest LUID that `subst' is currently dealing with. |
287 | get_last_value will not return a value if the register was set at or |
288 | after this LUID. If not for this mechanism, we could get confused if |
289 | I2 or I1 in try_combine were an insn that used the old value of a register |
290 | to obtain a new value. In that case, we might erroneously get the |
291 | new value of the register when we wanted the old one. */ |
292 | |
293 | static int subst_low_luid; |
294 | |
295 | /* This contains any hard registers that are used in newpat; reg_dead_at_p |
296 | must consider all these registers to be always live. */ |
297 | |
298 | static HARD_REG_SET newpat_used_regs; |
299 | |
300 | /* This is an insn to which a LOG_LINKS entry has been added. If this |
301 | insn is the earlier than I2 or I3, combine should rescan starting at |
302 | that location. */ |
303 | |
304 | static rtx_insn *added_links_insn; |
305 | |
306 | /* And similarly, for notes. */ |
307 | |
308 | static rtx_insn *added_notes_insn; |
309 | |
310 | /* Basic block in which we are performing combines. */ |
311 | static basic_block this_basic_block; |
312 | static bool optimize_this_for_speed_p; |
313 | |
314 | |
315 | /* Length of the currently allocated uid_insn_cost array. */ |
316 | |
317 | static int max_uid_known; |
318 | |
319 | /* The following array records the insn_cost for every insn |
320 | in the instruction stream. */ |
321 | |
322 | static int *uid_insn_cost; |
323 | |
324 | /* The following array records the LOG_LINKS for every insn in the |
325 | instruction stream as struct insn_link pointers. */ |
326 | |
327 | struct insn_link { |
328 | rtx_insn *insn; |
329 | unsigned int regno; |
330 | struct insn_link *next; |
331 | }; |
332 | |
333 | static struct insn_link **uid_log_links; |
334 | |
335 | static inline int |
336 | insn_uid_check (const_rtx insn) |
337 | { |
338 | int uid = INSN_UID (insn); |
339 | gcc_checking_assert (uid <= max_uid_known)((void)(!(uid <= max_uid_known) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/combine.c" , 339, __FUNCTION__), 0 : 0)); |
340 | return uid; |
341 | } |
342 | |
343 | #define INSN_COST(INSN)(uid_insn_cost[insn_uid_check (INSN)]) (uid_insn_cost[insn_uid_check (INSN)]) |
344 | #define LOG_LINKS(INSN)(uid_log_links[insn_uid_check (INSN)]) (uid_log_links[insn_uid_check (INSN)]) |
345 | |
346 | #define FOR_EACH_LOG_LINK(L, INSN)for ((L) = (uid_log_links[insn_uid_check (INSN)]); (L); (L) = (L)->next) \ |
347 | for ((L) = LOG_LINKS (INSN)(uid_log_links[insn_uid_check (INSN)]); (L); (L) = (L)->next) |
348 | |
349 | /* Links for LOG_LINKS are allocated from this obstack. */ |
350 | |
351 | static struct obstack insn_link_obstack; |
352 | |
353 | /* Allocate a link. */ |
354 | |
355 | static inline struct insn_link * |
356 | alloc_insn_link (rtx_insn *insn, unsigned int regno, struct insn_link *next) |
357 | { |
358 | struct insn_link *l |
359 | = (struct insn_link *) obstack_alloc (&insn_link_obstack,__extension__ ({ struct obstack *__h = (&insn_link_obstack ); __extension__ ({ struct obstack *__o = (__h); size_t __len = ((sizeof (struct insn_link))); if (__extension__ ({ struct obstack const *__o1 = (__o); (size_t) (__o1->chunk_limit - __o1->next_free); }) < __len) _obstack_newchunk (__o, __len ); ((void) ((__o)->next_free += (__len))); }); __extension__ ({ struct obstack *__o1 = (__h); void *__value = (void *) __o1 ->object_base; if (__o1->next_free == __value) __o1-> maybe_empty_object = 1; __o1->next_free = ((sizeof (ptrdiff_t ) < sizeof (void *) ? (__o1->object_base) : (char *) 0) + (((__o1->next_free) - (sizeof (ptrdiff_t) < sizeof ( void *) ? (__o1->object_base) : (char *) 0) + (__o1->alignment_mask )) & ~(__o1->alignment_mask))); if ((size_t) (__o1-> next_free - (char *) __o1->chunk) > (size_t) (__o1-> chunk_limit - (char *) __o1->chunk)) __o1->next_free = __o1 ->chunk_limit; __o1->object_base = __o1->next_free; __value ; }); }) |
360 | sizeof (struct insn_link))__extension__ ({ struct obstack *__h = (&insn_link_obstack ); __extension__ ({ struct obstack *__o = (__h); size_t __len = ((sizeof (struct insn_link))); if (__extension__ ({ struct obstack const *__o1 = (__o); (size_t) (__o1->chunk_limit - __o1->next_free); }) < __len) _obstack_newchunk (__o, __len ); ((void) ((__o)->next_free += (__len))); }); __extension__ ({ struct obstack *__o1 = (__h); void *__value = (void *) __o1 ->object_base; if (__o1->next_free == __value) __o1-> maybe_empty_object = 1; __o1->next_free = ((sizeof (ptrdiff_t ) < sizeof (void *) ? (__o1->object_base) : (char *) 0) + (((__o1->next_free) - (sizeof (ptrdiff_t) < sizeof ( void *) ? (__o1->object_base) : (char *) 0) + (__o1->alignment_mask )) & ~(__o1->alignment_mask))); if ((size_t) (__o1-> next_free - (char *) __o1->chunk) > (size_t) (__o1-> chunk_limit - (char *) __o1->chunk)) __o1->next_free = __o1 ->chunk_limit; __o1->object_base = __o1->next_free; __value ; }); }); |
361 | l->insn = insn; |
362 | l->regno = regno; |
363 | l->next = next; |
364 | return l; |
365 | } |
366 | |
367 | /* Incremented for each basic block. */ |
368 | |
369 | static int label_tick; |
370 | |
371 | /* Reset to label_tick for each extended basic block in scanning order. */ |
372 | |
373 | static int label_tick_ebb_start; |
374 | |
375 | /* Mode used to compute significance in reg_stat[].nonzero_bits. It is the |
376 | largest integer mode that can fit in HOST_BITS_PER_WIDE_INT. */ |
377 | |
378 | static scalar_int_mode nonzero_bits_mode; |
379 | |
380 | /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can |
381 | be safely used. It is zero while computing them and after combine has |
382 | completed. This former test prevents propagating values based on |
383 | previously set values, which can be incorrect if a variable is modified |
384 | in a loop. */ |
385 | |
386 | static int nonzero_sign_valid; |
387 | |
388 | |
389 | /* Record one modification to rtl structure |
390 | to be undone by storing old_contents into *where. */ |
391 | |
392 | enum undo_kind { UNDO_RTX, UNDO_INT, UNDO_MODE, UNDO_LINKS }; |
393 | |
394 | struct undo |
395 | { |
396 | struct undo *next; |
397 | enum undo_kind kind; |
398 | union { rtx r; int i; machine_mode m; struct insn_link *l; } old_contents; |
399 | union { rtx *r; int *i; struct insn_link **l; } where; |
400 | }; |
401 | |
402 | /* Record a bunch of changes to be undone, up to MAX_UNDO of them. |
403 | num_undo says how many are currently recorded. |
404 | |
405 | other_insn is nonzero if we have modified some other insn in the process |
406 | of working on subst_insn. It must be verified too. */ |
407 | |
408 | struct undobuf |
409 | { |
410 | struct undo *undos; |
411 | struct undo *frees; |
412 | rtx_insn *other_insn; |
413 | }; |
414 | |
415 | static struct undobuf undobuf; |
416 | |
417 | /* Number of times the pseudo being substituted for |
418 | was found and replaced. */ |
419 | |
420 | static int n_occurrences; |
421 | |
422 | static rtx reg_nonzero_bits_for_combine (const_rtx, scalar_int_mode, |
423 | scalar_int_mode, |
424 | unsigned HOST_WIDE_INTlong *); |
425 | static rtx reg_num_sign_bit_copies_for_combine (const_rtx, scalar_int_mode, |
426 | scalar_int_mode, |
427 | unsigned int *); |
428 | static void do_SUBST (rtx *, rtx); |
429 | static void do_SUBST_INT (int *, int); |
430 | static void init_reg_last (void); |
431 | static void setup_incoming_promotions (rtx_insn *); |
432 | static void set_nonzero_bits_and_sign_copies (rtx, const_rtx, void *); |
433 | static int cant_combine_insn_p (rtx_insn *); |
434 | static int can_combine_p (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *, |
435 | rtx_insn *, rtx_insn *, rtx *, rtx *); |
436 | static int combinable_i3pat (rtx_insn *, rtx *, rtx, rtx, rtx, int, int, rtx *); |
437 | static int contains_muldiv (rtx); |
438 | static rtx_insn *try_combine (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *, |
439 | int *, rtx_insn *); |
440 | static void undo_all (void); |
441 | static void undo_commit (void); |
442 | static rtx *find_split_point (rtx *, rtx_insn *, bool); |
443 | static rtx subst (rtx, rtx, rtx, int, int, int); |
444 | static rtx combine_simplify_rtx (rtx, machine_mode, int, int); |
445 | static rtx simplify_if_then_else (rtx); |
446 | static rtx simplify_set (rtx); |
447 | static rtx simplify_logical (rtx); |
448 | static rtx expand_compound_operation (rtx); |
449 | static const_rtx expand_field_assignment (const_rtx); |
450 | static rtx make_extraction (machine_mode, rtx, HOST_WIDE_INTlong, |
451 | rtx, unsigned HOST_WIDE_INTlong, int, int, int); |
452 | static int get_pos_from_mask (unsigned HOST_WIDE_INTlong, |
453 | unsigned HOST_WIDE_INTlong *); |
454 | static rtx canon_reg_for_combine (rtx, rtx); |
455 | static rtx force_int_to_mode (rtx, scalar_int_mode, scalar_int_mode, |
456 | scalar_int_mode, unsigned HOST_WIDE_INTlong, int); |
457 | static rtx force_to_mode (rtx, machine_mode, |
458 | unsigned HOST_WIDE_INTlong, int); |
459 | static rtx if_then_else_cond (rtx, rtx *, rtx *); |
460 | static rtx known_cond (rtx, enum rtx_code, rtx, rtx); |
461 | static int rtx_equal_for_field_assignment_p (rtx, rtx, bool = false); |
462 | static rtx make_field_assignment (rtx); |
463 | static rtx apply_distributive_law (rtx); |
464 | static rtx distribute_and_simplify_rtx (rtx, int); |
465 | static rtx simplify_and_const_int_1 (scalar_int_mode, rtx, |
466 | unsigned HOST_WIDE_INTlong); |
467 | static rtx simplify_and_const_int (rtx, scalar_int_mode, rtx, |
468 | unsigned HOST_WIDE_INTlong); |
469 | static int merge_outer_ops (enum rtx_code *, HOST_WIDE_INTlong *, enum rtx_code, |
470 | HOST_WIDE_INTlong, machine_mode, int *); |
471 | static rtx simplify_shift_const_1 (enum rtx_code, machine_mode, rtx, int); |
472 | static rtx simplify_shift_const (rtx, enum rtx_code, machine_mode, rtx, |
473 | int); |
474 | static int recog_for_combine (rtx *, rtx_insn *, rtx *); |
475 | static rtx gen_lowpart_for_combine (machine_mode, rtx); |
476 | static enum rtx_code simplify_compare_const (enum rtx_code, machine_mode, |
477 | rtx, rtx *); |
478 | static enum rtx_code simplify_comparison (enum rtx_code, rtx *, rtx *); |
479 | static void update_table_tick (rtx); |
480 | static void record_value_for_reg (rtx, rtx_insn *, rtx); |
481 | static void check_promoted_subreg (rtx_insn *, rtx); |
482 | static void record_dead_and_set_regs_1 (rtx, const_rtx, void *); |
483 | static void record_dead_and_set_regs (rtx_insn *); |
484 | static int get_last_value_validate (rtx *, rtx_insn *, int, int); |
485 | static rtx get_last_value (const_rtx); |
486 | static void reg_dead_at_p_1 (rtx, const_rtx, void *); |
487 | static int reg_dead_at_p (rtx, rtx_insn *); |
488 | static void move_deaths (rtx, rtx, int, rtx_insn *, rtx *); |
489 | static int reg_bitfield_target_p (rtx, rtx); |
490 | static void distribute_notes (rtx, rtx_insn *, rtx_insn *, rtx_insn *, rtx, rtx, rtx); |
491 | static void distribute_links (struct insn_link *); |
492 | static void mark_used_regs_combine (rtx); |
493 | static void record_promoted_value (rtx_insn *, rtx); |
494 | static bool unmentioned_reg_p (rtx, rtx); |
495 | static void record_truncated_values (rtx *, void *); |
496 | static bool reg_truncated_to_mode (machine_mode, const_rtx); |
497 | static rtx gen_lowpart_or_truncate (machine_mode, rtx); |
498 | |
499 | |
500 | /* It is not safe to use ordinary gen_lowpart in combine. |
501 | See comments in gen_lowpart_for_combine. */ |
502 | #undef RTL_HOOKS_GEN_LOWPARTgen_lowpart_for_combine |
503 | #define RTL_HOOKS_GEN_LOWPARTgen_lowpart_for_combine gen_lowpart_for_combine |
504 | |
505 | /* Our implementation of gen_lowpart never emits a new pseudo. */ |
506 | #undef RTL_HOOKS_GEN_LOWPART_NO_EMITgen_lowpart_for_combine |
507 | #define RTL_HOOKS_GEN_LOWPART_NO_EMITgen_lowpart_for_combine gen_lowpart_for_combine |
508 | |
509 | #undef RTL_HOOKS_REG_NONZERO_REG_BITSreg_nonzero_bits_for_combine |
510 | #define RTL_HOOKS_REG_NONZERO_REG_BITSreg_nonzero_bits_for_combine reg_nonzero_bits_for_combine |
511 | |
512 | #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIESreg_num_sign_bit_copies_for_combine |
513 | #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIESreg_num_sign_bit_copies_for_combine reg_num_sign_bit_copies_for_combine |
514 | |
515 | #undef RTL_HOOKS_REG_TRUNCATED_TO_MODEreg_truncated_to_mode |
516 | #define RTL_HOOKS_REG_TRUNCATED_TO_MODEreg_truncated_to_mode reg_truncated_to_mode |
517 | |
518 | static const struct rtl_hooks combine_rtl_hooks = RTL_HOOKS_INITIALIZER{ gen_lowpart_for_combine, gen_lowpart_for_combine, reg_nonzero_bits_for_combine , reg_num_sign_bit_copies_for_combine, reg_truncated_to_mode }; |
519 | |
520 | |
521 | /* Convenience wrapper for the canonicalize_comparison target hook. |
522 | Target hooks cannot use enum rtx_code. */ |
523 | static inline void |
524 | target_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1, |
525 | bool op0_preserve_value) |
526 | { |
527 | int code_int = (int)*code; |
528 | targetm.canonicalize_comparison (&code_int, op0, op1, op0_preserve_value); |
529 | *code = (enum rtx_code)code_int; |
530 | } |
531 | |
532 | /* Try to split PATTERN found in INSN. This returns NULL_RTX if |
533 | PATTERN cannot be split. Otherwise, it returns an insn sequence. |
534 | This is a wrapper around split_insns which ensures that the |
535 | reg_stat vector is made larger if the splitter creates a new |
536 | register. */ |
537 | |
538 | static rtx_insn * |
539 | combine_split_insns (rtx pattern, rtx_insn *insn) |
540 | { |
541 | rtx_insn *ret; |
542 | unsigned int nregs; |
543 | |
544 | ret = split_insns (pattern, insn); |
545 | nregs = max_reg_num (); |
546 | if (nregs > reg_stat.length ()) |
547 | reg_stat.safe_grow_cleared (nregs, true); |
548 | return ret; |
549 | } |
550 | |
551 | /* This is used by find_single_use to locate an rtx in LOC that |
552 | contains exactly one use of DEST, which is typically either a REG |
553 | or CC0. It returns a pointer to the innermost rtx expression |
554 | containing DEST. Appearances of DEST that are being used to |
555 | totally replace it are not counted. */ |
556 | |
557 | static rtx * |
558 | find_single_use_1 (rtx dest, rtx *loc) |
559 | { |
560 | rtx x = *loc; |
561 | enum rtx_code code = GET_CODE (x)((enum rtx_code) (x)->code); |
562 | rtx *result = NULLnullptr; |
563 | rtx *this_result; |
564 | int i; |
565 | const char *fmt; |
566 | |
567 | switch (code) |
568 | { |
569 | case CONST: |
570 | case LABEL_REF: |
571 | case SYMBOL_REF: |
572 | CASE_CONST_ANYcase CONST_INT: case CONST_WIDE_INT: case CONST_POLY_INT: case CONST_DOUBLE: case CONST_FIXED: case CONST_VECTOR: |
573 | case CLOBBER: |
574 | return 0; |
575 | |
576 | case SET: |
577 | /* If the destination is anything other than CC0, PC, a REG or a SUBREG |
578 | of a REG that occupies all of the REG, the insn uses DEST if |
579 | it is mentioned in the destination or the source. Otherwise, we |
580 | need just check the source. */ |
581 | if (GET_CODE (SET_DEST (x))((enum rtx_code) ((((x)->u.fld[0]).rt_rtx))->code) != CC0 |
582 | && GET_CODE (SET_DEST (x))((enum rtx_code) ((((x)->u.fld[0]).rt_rtx))->code) != PC |
583 | && !REG_P (SET_DEST (x))(((enum rtx_code) ((((x)->u.fld[0]).rt_rtx))->code) == REG ) |
584 | && ! (GET_CODE (SET_DEST (x))((enum rtx_code) ((((x)->u.fld[0]).rt_rtx))->code) == SUBREG |
585 | && REG_P (SUBREG_REG (SET_DEST (x)))(((enum rtx_code) (((((((x)->u.fld[0]).rt_rtx))->u.fld[ 0]).rt_rtx))->code) == REG) |
586 | && !read_modify_subreg_p (SET_DEST (x)(((x)->u.fld[0]).rt_rtx)))) |
587 | break; |
588 | |
589 | return find_single_use_1 (dest, &SET_SRC (x)(((x)->u.fld[1]).rt_rtx)); |
590 | |
591 | case MEM: |
592 | case SUBREG: |
593 | return find_single_use_1 (dest, &XEXP (x, 0)(((x)->u.fld[0]).rt_rtx)); |
594 | |
595 | default: |
596 | break; |
597 | } |
598 | |
599 | /* If it wasn't one of the common cases above, check each expression and |
600 | vector of this code. Look for a unique usage of DEST. */ |
601 | |
602 | fmt = GET_RTX_FORMAT (code)(rtx_format[(int) (code)]); |
603 | for (i = GET_RTX_LENGTH (code)(rtx_length[(int) (code)]) - 1; i >= 0; i--) |
604 | { |
605 | if (fmt[i] == 'e') |
606 | { |
607 | if (dest == XEXP (x, i)(((x)->u.fld[i]).rt_rtx) |
608 | || (REG_P (dest)(((enum rtx_code) (dest)->code) == REG) && REG_P (XEXP (x, i))(((enum rtx_code) ((((x)->u.fld[i]).rt_rtx))->code) == REG ) |
609 | && REGNO (dest)(rhs_regno(dest)) == REGNO (XEXP (x, i))(rhs_regno((((x)->u.fld[i]).rt_rtx))))) |
610 | this_result = loc; |
611 | else |
612 | this_result = find_single_use_1 (dest, &XEXP (x, i)(((x)->u.fld[i]).rt_rtx)); |
613 | |
614 | if (result == NULLnullptr) |
615 | result = this_result; |
616 | else if (this_result) |
617 | /* Duplicate usage. */ |
618 | return NULLnullptr; |
619 | } |
620 | else if (fmt[i] == 'E') |
621 | { |
622 | int j; |
623 | |
624 | for (j = XVECLEN (x, i)(((((x)->u.fld[i]).rt_rtvec))->num_elem) - 1; j >= 0; j--) |
625 | { |
626 | if (XVECEXP (x, i, j)(((((x)->u.fld[i]).rt_rtvec))->elem[j]) == dest |
627 | || (REG_P (dest)(((enum rtx_code) (dest)->code) == REG) |
628 | && REG_P (XVECEXP (x, i, j))(((enum rtx_code) ((((((x)->u.fld[i]).rt_rtvec))->elem[ j]))->code) == REG) |
629 | && REGNO (XVECEXP (x, i, j))(rhs_regno((((((x)->u.fld[i]).rt_rtvec))->elem[j]))) == REGNO (dest)(rhs_regno(dest)))) |
630 | this_result = loc; |
631 | else |
632 | this_result = find_single_use_1 (dest, &XVECEXP (x, i, j)(((((x)->u.fld[i]).rt_rtvec))->elem[j])); |
633 | |
634 | if (result == NULLnullptr) |
635 | result = this_result; |
636 | else if (this_result) |
637 | return NULLnullptr; |
638 | } |
639 | } |
640 | } |
641 | |
642 | return result; |
643 | } |
644 | |
645 | |
646 | /* See if DEST, produced in INSN, is used only a single time in the |
647 | sequel. If so, return a pointer to the innermost rtx expression in which |
648 | it is used. |
649 | |
650 | If PLOC is nonzero, *PLOC is set to the insn containing the single use. |
651 | |
652 | If DEST is cc0_rtx, we look only at the next insn. In that case, we don't |
653 | care about REG_DEAD notes or LOG_LINKS. |
654 | |
655 | Otherwise, we find the single use by finding an insn that has a |
656 | LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is |
657 | only referenced once in that insn, we know that it must be the first |
658 | and last insn referencing DEST. */ |
659 | |
660 | static rtx * |
661 | find_single_use (rtx dest, rtx_insn *insn, rtx_insn **ploc) |
662 | { |
663 | basic_block bb; |
664 | rtx_insn *next; |
665 | rtx *result; |
666 | struct insn_link *link; |
667 | |
668 | if (dest == cc0_rtx) |
669 | { |
670 | next = NEXT_INSN (insn); |
671 | if (next == 0 |
672 | || (!NONJUMP_INSN_P (next)(((enum rtx_code) (next)->code) == INSN) && !JUMP_P (next)(((enum rtx_code) (next)->code) == JUMP_INSN))) |
673 | return 0; |
674 | |
675 | result = find_single_use_1 (dest, &PATTERN (next)); |
676 | if (result && ploc) |
677 | *ploc = next; |
678 | return result; |
679 | } |
680 | |
681 | if (!REG_P (dest)(((enum rtx_code) (dest)->code) == REG)) |
682 | return 0; |
683 | |
684 | bb = BLOCK_FOR_INSN (insn); |
685 | for (next = NEXT_INSN (insn); |
686 | next && BLOCK_FOR_INSN (next) == bb; |
687 | next = NEXT_INSN (next)) |
688 | if (NONDEBUG_INSN_P (next)((((enum rtx_code) (next)->code) == INSN) || (((enum rtx_code ) (next)->code) == JUMP_INSN) || (((enum rtx_code) (next)-> code) == CALL_INSN)) && dead_or_set_p (next, dest)) |
689 | { |
690 | FOR_EACH_LOG_LINK (link, next)for ((link) = (uid_log_links[insn_uid_check (next)]); (link); (link) = (link)->next) |
691 | if (link->insn == insn && link->regno == REGNO (dest)(rhs_regno(dest))) |
692 | break; |
693 | |
694 | if (link) |
695 | { |
696 | result = find_single_use_1 (dest, &PATTERN (next)); |
697 | if (ploc) |
698 | *ploc = next; |
699 | return result; |
700 | } |
701 | } |
702 | |
703 | return 0; |
704 | } |
705 | |
706 | /* Substitute NEWVAL, an rtx expression, into INTO, a place in some |
707 | insn. The substitution can be undone by undo_all. If INTO is already |
708 | set to NEWVAL, do not record this change. Because computing NEWVAL might |
709 | also call SUBST, we have to compute it before we put anything into |
710 | the undo table. */ |
711 | |
712 | static void |
713 | do_SUBST (rtx *into, rtx newval) |
714 | { |
715 | struct undo *buf; |
716 | rtx oldval = *into; |
717 | |
718 | if (oldval == newval) |
719 | return; |
720 | |
721 | /* We'd like to catch as many invalid transformations here as |
722 | possible. Unfortunately, there are way too many mode changes |
723 | that are perfectly valid, so we'd waste too much effort for |
724 | little gain doing the checks here. Focus on catching invalid |
725 | transformations involving integer constants. */ |
726 | if (GET_MODE_CLASS (GET_MODE (oldval))((enum mode_class) mode_class[((machine_mode) (oldval)->mode )]) == MODE_INT |
727 | && CONST_INT_P (newval)(((enum rtx_code) (newval)->code) == CONST_INT)) |
728 | { |
729 | /* Sanity check that we're replacing oldval with a CONST_INT |
730 | that is a valid sign-extension for the original mode. */ |
731 | gcc_assert (INTVAL (newval)((void)(!(((newval)->u.hwint[0]) == trunc_int_for_mode ((( newval)->u.hwint[0]), ((machine_mode) (oldval)->mode))) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/combine.c" , 732, __FUNCTION__), 0 : 0)) |
732 | == trunc_int_for_mode (INTVAL (newval), GET_MODE (oldval)))((void)(!(((newval)->u.hwint[0]) == trunc_int_for_mode ((( newval)->u.hwint[0]), ((machine_mode) (oldval)->mode))) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/combine.c" , 732, __FUNCTION__), 0 : 0)); |
733 | |
734 | /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a |
735 | CONST_INT is not valid, because after the replacement, the |
736 | original mode would be gone. Unfortunately, we can't tell |
737 | when do_SUBST is called to replace the operand thereof, so we |
738 | perform this test on oldval instead, checking whether an |
739 | invalid replacement took place before we got here. */ |
740 | gcc_assert (!(GET_CODE (oldval) == SUBREG((void)(!(!(((enum rtx_code) (oldval)->code) == SUBREG && (((enum rtx_code) ((((oldval)->u.fld[0]).rt_rtx))->code ) == CONST_INT))) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/combine.c" , 741, __FUNCTION__), 0 : 0)) |
741 | && CONST_INT_P (SUBREG_REG (oldval))))((void)(!(!(((enum rtx_code) (oldval)->code) == SUBREG && (((enum rtx_code) ((((oldval)->u.fld[0]).rt_rtx))->code ) == CONST_INT))) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/combine.c" , 741, __FUNCTION__), 0 : 0)); |
742 | gcc_assert (!(GET_CODE (oldval) == ZERO_EXTEND((void)(!(!(((enum rtx_code) (oldval)->code) == ZERO_EXTEND && (((enum rtx_code) ((((oldval)->u.fld[0]).rt_rtx ))->code) == CONST_INT))) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/combine.c" , 743, __FUNCTION__), 0 : 0)) |
743 | && CONST_INT_P (XEXP (oldval, 0))))((void)(!(!(((enum rtx_code) (oldval)->code) == ZERO_EXTEND && (((enum rtx_code) ((((oldval)->u.fld[0]).rt_rtx ))->code) == CONST_INT))) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/combine.c" , 743, __FUNCTION__), 0 : 0)); |
744 | } |
745 | |
746 | if (undobuf.frees) |
747 | buf = undobuf.frees, undobuf.frees = buf->next; |
748 | else |
749 | buf = XNEW (struct undo)((struct undo *) xmalloc (sizeof (struct undo))); |
750 | |
751 | buf->kind = UNDO_RTX; |
752 | buf->where.r = into; |
753 | buf->old_contents.r = oldval; |
754 | *into = newval; |
755 | |
756 | buf->next = undobuf.undos, undobuf.undos = buf; |
757 | } |
758 | |
759 | #define SUBST(INTO, NEWVAL)do_SUBST (&(INTO), (NEWVAL)) do_SUBST (&(INTO), (NEWVAL)) |
760 | |
761 | /* Similar to SUBST, but NEWVAL is an int expression. Note that substitution |
762 | for the value of a HOST_WIDE_INT value (including CONST_INT) is |
763 | not safe. */ |
764 | |
765 | static void |
766 | do_SUBST_INT (int *into, int newval) |
767 | { |
768 | struct undo *buf; |
769 | int oldval = *into; |
770 | |
771 | if (oldval == newval) |
772 | return; |
773 | |
774 | if (undobuf.frees) |
775 | buf = undobuf.frees, undobuf.frees = buf->next; |
776 | else |
777 | buf = XNEW (struct undo)((struct undo *) xmalloc (sizeof (struct undo))); |
778 | |
779 | buf->kind = UNDO_INT; |
780 | buf->where.i = into; |
781 | buf->old_contents.i = oldval; |
782 | *into = newval; |
783 | |
784 | buf->next = undobuf.undos, undobuf.undos = buf; |
785 | } |
786 | |
787 | #define SUBST_INT(INTO, NEWVAL)do_SUBST_INT (&(INTO), (NEWVAL)) do_SUBST_INT (&(INTO), (NEWVAL)) |
788 | |
789 | /* Similar to SUBST, but just substitute the mode. This is used when |
790 | changing the mode of a pseudo-register, so that any other |
791 | references to the entry in the regno_reg_rtx array will change as |
792 | well. */ |
793 | |
794 | static void |
795 | do_SUBST_MODE (rtx *into, machine_mode newval) |
796 | { |
797 | struct undo *buf; |
798 | machine_mode oldval = GET_MODE (*into)((machine_mode) (*into)->mode); |
799 | |
800 | if (oldval == newval) |
801 | return; |
802 | |
803 | if (undobuf.frees) |
804 | buf = undobuf.frees, undobuf.frees = buf->next; |
805 | else |
806 | buf = XNEW (struct undo)((struct undo *) xmalloc (sizeof (struct undo))); |
807 | |
808 | buf->kind = UNDO_MODE; |
809 | buf->where.r = into; |
810 | buf->old_contents.m = oldval; |
811 | adjust_reg_mode (*into, newval); |
812 | |
813 | buf->next = undobuf.undos, undobuf.undos = buf; |
814 | } |
815 | |
816 | #define SUBST_MODE(INTO, NEWVAL)do_SUBST_MODE (&(INTO), (NEWVAL)) do_SUBST_MODE (&(INTO), (NEWVAL)) |
817 | |
818 | /* Similar to SUBST, but NEWVAL is a LOG_LINKS expression. */ |
819 | |
820 | static void |
821 | do_SUBST_LINK (struct insn_link **into, struct insn_link *newval) |
822 | { |
823 | struct undo *buf; |
824 | struct insn_link * oldval = *into; |
825 | |
826 | if (oldval == newval) |
827 | return; |
828 | |
829 | if (undobuf.frees) |
830 | buf = undobuf.frees, undobuf.frees = buf->next; |
831 | else |
832 | buf = XNEW (struct undo)((struct undo *) xmalloc (sizeof (struct undo))); |
833 | |
834 | buf->kind = UNDO_LINKS; |
835 | buf->where.l = into; |
836 | buf->old_contents.l = oldval; |
837 | *into = newval; |
838 | |
839 | buf->next = undobuf.undos, undobuf.undos = buf; |
840 | } |
841 | |
842 | #define SUBST_LINK(oldval, newval)do_SUBST_LINK (&oldval, newval) do_SUBST_LINK (&oldval, newval) |
843 | |
844 | /* Subroutine of try_combine. Determine whether the replacement patterns |
845 | NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to insn_cost |
846 | than the original sequence I0, I1, I2, I3 and undobuf.other_insn. Note |
847 | that I0, I1 and/or NEWI2PAT may be NULL_RTX. Similarly, NEWOTHERPAT and |
848 | undobuf.other_insn may also both be NULL_RTX. Return false if the cost |
849 | of all the instructions can be estimated and the replacements are more |
850 | expensive than the original sequence. */ |
851 | |
852 | static bool |
853 | combine_validate_cost (rtx_insn *i0, rtx_insn *i1, rtx_insn *i2, rtx_insn *i3, |
854 | rtx newpat, rtx newi2pat, rtx newotherpat) |
855 | { |
856 | int i0_cost, i1_cost, i2_cost, i3_cost; |
857 | int new_i2_cost, new_i3_cost; |
858 | int old_cost, new_cost; |
859 | |
860 | /* Lookup the original insn_costs. */ |
861 | i2_cost = INSN_COST (i2)(uid_insn_cost[insn_uid_check (i2)]); |
862 | i3_cost = INSN_COST (i3)(uid_insn_cost[insn_uid_check (i3)]); |
863 | |
864 | if (i1) |
865 | { |
866 | i1_cost = INSN_COST (i1)(uid_insn_cost[insn_uid_check (i1)]); |
867 | if (i0) |
868 | { |
869 | i0_cost = INSN_COST (i0)(uid_insn_cost[insn_uid_check (i0)]); |
870 | old_cost = (i0_cost > 0 && i1_cost > 0 && i2_cost > 0 && i3_cost > 0 |
871 | ? i0_cost + i1_cost + i2_cost + i3_cost : 0); |
872 | } |
873 | else |
874 | { |
875 | old_cost = (i1_cost > 0 && i2_cost > 0 && i3_cost > 0 |
876 | ? i1_cost + i2_cost + i3_cost : 0); |
877 | i0_cost = 0; |
878 | } |
879 | } |
880 | else |
881 | { |
882 | old_cost = (i2_cost > 0 && i3_cost > 0) ? i2_cost + i3_cost : 0; |
883 | i1_cost = i0_cost = 0; |
884 | } |
885 | |
886 | /* If we have split a PARALLEL I2 to I1,I2, we have counted its cost twice; |
887 | correct that. */ |
888 | if (old_cost && i1 && INSN_UID (i1) == INSN_UID (i2)) |
889 | old_cost -= i1_cost; |
890 | |
891 | |
892 | /* Calculate the replacement insn_costs. */ |
893 | rtx tmp = PATTERN (i3); |
894 | PATTERN (i3) = newpat; |
895 | int tmpi = INSN_CODE (i3)(((i3)->u.fld[5]).rt_int); |
896 | INSN_CODE (i3)(((i3)->u.fld[5]).rt_int) = -1; |
897 | new_i3_cost = insn_cost (i3, optimize_this_for_speed_p); |
898 | PATTERN (i3) = tmp; |
899 | INSN_CODE (i3)(((i3)->u.fld[5]).rt_int) = tmpi; |
900 | if (newi2pat) |
901 | { |
902 | tmp = PATTERN (i2); |
903 | PATTERN (i2) = newi2pat; |
904 | tmpi = INSN_CODE (i2)(((i2)->u.fld[5]).rt_int); |
905 | INSN_CODE (i2)(((i2)->u.fld[5]).rt_int) = -1; |
906 | new_i2_cost = insn_cost (i2, optimize_this_for_speed_p); |
907 | PATTERN (i2) = tmp; |
908 | INSN_CODE (i2)(((i2)->u.fld[5]).rt_int) = tmpi; |
909 | new_cost = (new_i2_cost > 0 && new_i3_cost > 0) |
910 | ? new_i2_cost + new_i3_cost : 0; |
911 | } |
912 | else |
913 | { |
914 | new_cost = new_i3_cost; |
915 | new_i2_cost = 0; |
916 | } |
917 | |
918 | if (undobuf.other_insn) |
919 | { |
920 | int old_other_cost, new_other_cost; |
921 | |
922 | old_other_cost = INSN_COST (undobuf.other_insn)(uid_insn_cost[insn_uid_check (undobuf.other_insn)]); |
923 | tmp = PATTERN (undobuf.other_insn); |
924 | PATTERN (undobuf.other_insn) = newotherpat; |
925 | tmpi = INSN_CODE (undobuf.other_insn)(((undobuf.other_insn)->u.fld[5]).rt_int); |
926 | INSN_CODE (undobuf.other_insn)(((undobuf.other_insn)->u.fld[5]).rt_int) = -1; |
927 | new_other_cost = insn_cost (undobuf.other_insn, |
928 | optimize_this_for_speed_p); |
929 | PATTERN (undobuf.other_insn) = tmp; |
930 | INSN_CODE (undobuf.other_insn)(((undobuf.other_insn)->u.fld[5]).rt_int) = tmpi; |
931 | if (old_other_cost > 0 && new_other_cost > 0) |
932 | { |
933 | old_cost += old_other_cost; |
934 | new_cost += new_other_cost; |
935 | } |
936 | else |
937 | old_cost = 0; |
938 | } |
939 | |
940 | /* Disallow this combination if both new_cost and old_cost are greater than |
941 | zero, and new_cost is greater than old cost. */ |
942 | int reject = old_cost > 0 && new_cost > old_cost; |
943 | |
944 | if (dump_file) |
945 | { |
946 | fprintf (dump_file, "%s combination of insns ", |
947 | reject ? "rejecting" : "allowing"); |
948 | if (i0) |
949 | fprintf (dump_file, "%d, ", INSN_UID (i0)); |
950 | if (i1 && INSN_UID (i1) != INSN_UID (i2)) |
951 | fprintf (dump_file, "%d, ", INSN_UID (i1)); |
952 | fprintf (dump_file, "%d and %d\n", INSN_UID (i2), INSN_UID (i3)); |
953 | |
954 | fprintf (dump_file, "original costs "); |
955 | if (i0) |
956 | fprintf (dump_file, "%d + ", i0_cost); |
957 | if (i1 && INSN_UID (i1) != INSN_UID (i2)) |
958 | fprintf (dump_file, "%d + ", i1_cost); |
959 | fprintf (dump_file, "%d + %d = %d\n", i2_cost, i3_cost, old_cost); |
960 | |
961 | if (newi2pat) |
962 | fprintf (dump_file, "replacement costs %d + %d = %d\n", |
963 | new_i2_cost, new_i3_cost, new_cost); |
964 | else |
965 | fprintf (dump_file, "replacement cost %d\n", new_cost); |
966 | } |
967 | |
968 | if (reject) |
969 | return false; |
970 | |
971 | /* Update the uid_insn_cost array with the replacement costs. */ |
972 | INSN_COST (i2)(uid_insn_cost[insn_uid_check (i2)]) = new_i2_cost; |
973 | INSN_COST (i3)(uid_insn_cost[insn_uid_check (i3)]) = new_i3_cost; |
974 | if (i1) |
975 | { |
976 | INSN_COST (i1)(uid_insn_cost[insn_uid_check (i1)]) = 0; |
977 | if (i0) |
978 | INSN_COST (i0)(uid_insn_cost[insn_uid_check (i0)]) = 0; |
979 | } |
980 | |
981 | return true; |
982 | } |
983 | |
984 | |
985 | /* Delete any insns that copy a register to itself. |
986 | Return true if the CFG was changed. */ |
987 | |
988 | static bool |
989 | delete_noop_moves (void) |
990 | { |
991 | rtx_insn *insn, *next; |
992 | basic_block bb; |
993 | |
994 | bool edges_deleted = false; |
995 | |
996 | FOR_EACH_BB_FN (bb, cfun)for (bb = ((cfun + 0))->cfg->x_entry_block_ptr->next_bb ; bb != ((cfun + 0))->cfg->x_exit_block_ptr; bb = bb-> next_bb) |
997 | { |
998 | for (insn = BB_HEAD (bb)(bb)->il.x.head_; insn != NEXT_INSN (BB_END (bb)(bb)->il.x.rtl->end_); insn = next) |
999 | { |
1000 | next = NEXT_INSN (insn); |
1001 | if (INSN_P (insn)(((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN)) || (((enum rtx_code) (insn)->code) == DEBUG_INSN)) && noop_move_p (insn)) |
1002 | { |
1003 | if (dump_file) |
1004 | fprintf (dump_file, "deleting noop move %d\n", INSN_UID (insn)); |
1005 | |
1006 | edges_deleted |= delete_insn_and_edges (insn); |
1007 | } |
1008 | } |
1009 | } |
1010 | |
1011 | return edges_deleted; |
1012 | } |
1013 | |
1014 | |
1015 | /* Return false if we do not want to (or cannot) combine DEF. */ |
1016 | static bool |
1017 | can_combine_def_p (df_ref def) |
1018 | { |
1019 | /* Do not consider if it is pre/post modification in MEM. */ |
1020 | if (DF_REF_FLAGS (def)((def)->base.flags) & DF_REF_PRE_POST_MODIFY) |
1021 | return false; |
1022 | |
1023 | unsigned int regno = DF_REF_REGNO (def)((def)->base.regno); |
1024 | |
1025 | /* Do not combine frame pointer adjustments. */ |
1026 | if ((regno == FRAME_POINTER_REGNUM19 |
1027 | && (!reload_completed || frame_pointer_needed((&x_rtl)->frame_pointer_needed))) |
1028 | || (!HARD_FRAME_POINTER_IS_FRAME_POINTER(6 == 19) |
1029 | && regno == HARD_FRAME_POINTER_REGNUM6 |
1030 | && (!reload_completed || frame_pointer_needed((&x_rtl)->frame_pointer_needed))) |
1031 | || (FRAME_POINTER_REGNUM19 != ARG_POINTER_REGNUM16 |
1032 | && regno == ARG_POINTER_REGNUM16 && fixed_regs(this_target_hard_regs->x_fixed_regs)[regno])) |
1033 | return false; |
1034 | |
1035 | return true; |
1036 | } |
1037 | |
1038 | /* Return false if we do not want to (or cannot) combine USE. */ |
1039 | static bool |
1040 | can_combine_use_p (df_ref use) |
1041 | { |
1042 | /* Do not consider the usage of the stack pointer by function call. */ |
1043 | if (DF_REF_FLAGS (use)((use)->base.flags) & DF_REF_CALL_STACK_USAGE) |
1044 | return false; |
1045 | |
1046 | return true; |
1047 | } |
1048 | |
1049 | /* Fill in log links field for all insns. */ |
1050 | |
1051 | static void |
1052 | create_log_links (void) |
1053 | { |
1054 | basic_block bb; |
1055 | rtx_insn **next_use; |
1056 | rtx_insn *insn; |
1057 | df_ref def, use; |
1058 | |
1059 | next_use = XCNEWVEC (rtx_insn *, max_reg_num ())((rtx_insn * *) xcalloc ((max_reg_num ()), sizeof (rtx_insn * ))); |
1060 | |
1061 | /* Pass through each block from the end, recording the uses of each |
1062 | register and establishing log links when def is encountered. |
1063 | Note that we do not clear next_use array in order to save time, |
1064 | so we have to test whether the use is in the same basic block as def. |
1065 | |
1066 | There are a few cases below when we do not consider the definition or |
1067 | usage -- these are taken from original flow.c did. Don't ask me why it is |
1068 | done this way; I don't know and if it works, I don't want to know. */ |
1069 | |
1070 | FOR_EACH_BB_FN (bb, cfun)for (bb = ((cfun + 0))->cfg->x_entry_block_ptr->next_bb ; bb != ((cfun + 0))->cfg->x_exit_block_ptr; bb = bb-> next_bb) |
1071 | { |
1072 | FOR_BB_INSNS_REVERSE (bb, insn)for ((insn) = (bb)->il.x.rtl->end_; (insn) && ( insn) != PREV_INSN ((bb)->il.x.head_); (insn) = PREV_INSN ( insn)) |
1073 | { |
1074 | if (!NONDEBUG_INSN_P (insn)((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN))) |
1075 | continue; |
1076 | |
1077 | /* Log links are created only once. */ |
1078 | gcc_assert (!LOG_LINKS (insn))((void)(!(!(uid_log_links[insn_uid_check (insn)])) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/combine.c" , 1078, __FUNCTION__), 0 : 0)); |
1079 | |
1080 | FOR_EACH_INSN_DEF (def, insn)for (def = (((df->insns[(INSN_UID (insn))]))->defs); def ; def = ((def)->base.next_loc)) |
1081 | { |
1082 | unsigned int regno = DF_REF_REGNO (def)((def)->base.regno); |
1083 | rtx_insn *use_insn; |
1084 | |
1085 | if (!next_use[regno]) |
1086 | continue; |
1087 | |
1088 | if (!can_combine_def_p (def)) |
1089 | continue; |
1090 | |
1091 | use_insn = next_use[regno]; |
1092 | next_use[regno] = NULLnullptr; |
1093 | |
1094 | if (BLOCK_FOR_INSN (use_insn) != bb) |
1095 | continue; |
1096 | |
1097 | /* flow.c claimed: |
1098 | |
1099 | We don't build a LOG_LINK for hard registers contained |
1100 | in ASM_OPERANDs. If these registers get replaced, |
1101 | we might wind up changing the semantics of the insn, |
1102 | even if reload can make what appear to be valid |
1103 | assignments later. */ |
1104 | if (regno < FIRST_PSEUDO_REGISTER76 |
1105 | && asm_noperands (PATTERN (use_insn)) >= 0) |
1106 | continue; |
1107 | |
1108 | /* Don't add duplicate links between instructions. */ |
1109 | struct insn_link *links; |
1110 | FOR_EACH_LOG_LINK (links, use_insn)for ((links) = (uid_log_links[insn_uid_check (use_insn)]); (links ); (links) = (links)->next) |
1111 | if (insn == links->insn && regno == links->regno) |
1112 | break; |
1113 | |
1114 | if (!links) |
1115 | LOG_LINKS (use_insn)(uid_log_links[insn_uid_check (use_insn)]) |
1116 | = alloc_insn_link (insn, regno, LOG_LINKS (use_insn)(uid_log_links[insn_uid_check (use_insn)])); |
1117 | } |
1118 | |
1119 | FOR_EACH_INSN_USE (use, insn)for (use = (((df->insns[(INSN_UID (insn))]))->uses); use ; use = ((use)->base.next_loc)) |
1120 | if (can_combine_use_p (use)) |
1121 | next_use[DF_REF_REGNO (use)((use)->base.regno)] = insn; |
1122 | } |
1123 | } |
1124 | |
1125 | free (next_use); |
1126 | } |
1127 | |
1128 | /* Walk the LOG_LINKS of insn B to see if we find a reference to A. Return |
1129 | true if we found a LOG_LINK that proves that A feeds B. This only works |
1130 | if there are no instructions between A and B which could have a link |
1131 | depending on A, since in that case we would not record a link for B. |
1132 | We also check the implicit dependency created by a cc0 setter/user |
1133 | pair. */ |
1134 | |
1135 | static bool |
1136 | insn_a_feeds_b (rtx_insn *a, rtx_insn *b) |
1137 | { |
1138 | struct insn_link *links; |
1139 | FOR_EACH_LOG_LINK (links, b)for ((links) = (uid_log_links[insn_uid_check (b)]); (links); ( links) = (links)->next) |
1140 | if (links->insn == a) |
1141 | return true; |
1142 | if (HAVE_cc00 && sets_cc0_p (a)) |
1143 | return true; |
1144 | return false; |
1145 | } |
1146 | |
1147 | /* Main entry point for combiner. F is the first insn of the function. |
1148 | NREGS is the first unused pseudo-reg number. |
1149 | |
1150 | Return nonzero if the CFG was changed (e.g. if the combiner has |
1151 | turned an indirect jump instruction into a direct jump). */ |
1152 | static int |
1153 | combine_instructions (rtx_insn *f, unsigned int nregs) |
1154 | { |
1155 | rtx_insn *insn, *next; |
1156 | rtx_insn *prev; |
1157 | struct insn_link *links, *nextlinks; |
1158 | rtx_insn *first; |
1159 | basic_block last_bb; |
1160 | |
1161 | int new_direct_jump_p = 0; |
1162 | |
1163 | for (first = f; first && !NONDEBUG_INSN_P (first)((((enum rtx_code) (first)->code) == INSN) || (((enum rtx_code ) (first)->code) == JUMP_INSN) || (((enum rtx_code) (first )->code) == CALL_INSN)); ) |
1164 | first = NEXT_INSN (first); |
1165 | if (!first) |
1166 | return 0; |
1167 | |
1168 | combine_attempts = 0; |
1169 | combine_merges = 0; |
1170 | combine_extras = 0; |
1171 | combine_successes = 0; |
1172 | |
1173 | rtl_hooks = combine_rtl_hooks; |
1174 | |
1175 | reg_stat.safe_grow_cleared (nregs, true); |
1176 | |
1177 | init_recog_no_volatile (); |
1178 | |
1179 | /* Allocate array for insn info. */ |
1180 | max_uid_known = get_max_uid (); |
1181 | uid_log_links = XCNEWVEC (struct insn_link *, max_uid_known + 1)((struct insn_link * *) xcalloc ((max_uid_known + 1), sizeof ( struct insn_link *))); |
1182 | uid_insn_cost = XCNEWVEC (int, max_uid_known + 1)((int *) xcalloc ((max_uid_known + 1), sizeof (int))); |
1183 | gcc_obstack_init (&insn_link_obstack)_obstack_begin (((&insn_link_obstack)), (memory_block_pool ::block_size), (0), (mempool_obstack_chunk_alloc), (mempool_obstack_chunk_free )); |
1184 | |
1185 | nonzero_bits_mode = int_mode_for_size (HOST_BITS_PER_WIDE_INT64, 0).require (); |
1186 | |
1187 | /* Don't use reg_stat[].nonzero_bits when computing it. This can cause |
1188 | problems when, for example, we have j <<= 1 in a loop. */ |
1189 | |
1190 | nonzero_sign_valid = 0; |
1191 | label_tick = label_tick_ebb_start = 1; |
1192 | |
1193 | /* Scan all SETs and see if we can deduce anything about what |
1194 | bits are known to be zero for some registers and how many copies |
1195 | of the sign bit are known to exist for those registers. |
1196 | |
1197 | Also set any known values so that we can use it while searching |
1198 | for what bits are known to be set. */ |
1199 | |
1200 | setup_incoming_promotions (first); |
1201 | /* Allow the entry block and the first block to fall into the same EBB. |
1202 | Conceptually the incoming promotions are assigned to the entry block. */ |
1203 | last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr); |
1204 | |
1205 | create_log_links (); |
1206 | FOR_EACH_BB_FN (this_basic_block, cfun)for (this_basic_block = ((cfun + 0))->cfg->x_entry_block_ptr ->next_bb; this_basic_block != ((cfun + 0))->cfg->x_exit_block_ptr ; this_basic_block = this_basic_block->next_bb) |
1207 | { |
1208 | optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block); |
1209 | last_call_luid = 0; |
1210 | mem_last_set = -1; |
1211 | |
1212 | label_tick++; |
1213 | if (!single_pred_p (this_basic_block) |
1214 | || single_pred (this_basic_block) != last_bb) |
1215 | label_tick_ebb_start = label_tick; |
1216 | last_bb = this_basic_block; |
1217 | |
1218 | FOR_BB_INSNS (this_basic_block, insn)for ((insn) = (this_basic_block)->il.x.head_; (insn) && (insn) != NEXT_INSN ((this_basic_block)->il.x.rtl->end_ ); (insn) = NEXT_INSN (insn)) |
1219 | if (INSN_P (insn)(((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN)) || (((enum rtx_code) (insn)->code) == DEBUG_INSN)) && BLOCK_FOR_INSN (insn)) |
1220 | { |
1221 | rtx links; |
1222 | |
1223 | subst_low_luid = DF_INSN_LUID (insn)((((df->insns[(INSN_UID (insn))]))->luid)); |
1224 | subst_insn = insn; |
1225 | |
1226 | note_stores (insn, set_nonzero_bits_and_sign_copies, insn); |
1227 | record_dead_and_set_regs (insn); |
1228 | |
1229 | if (AUTO_INC_DEC0) |
1230 | for (links = REG_NOTES (insn)(((insn)->u.fld[6]).rt_rtx); links; links = XEXP (links, 1)(((links)->u.fld[1]).rt_rtx)) |
1231 | if (REG_NOTE_KIND (links)((enum reg_note) ((machine_mode) (links)->mode)) == REG_INC) |
1232 | set_nonzero_bits_and_sign_copies (XEXP (links, 0)(((links)->u.fld[0]).rt_rtx), NULL_RTX(rtx) 0, |
1233 | insn); |
1234 | |
1235 | /* Record the current insn_cost of this instruction. */ |
1236 | INSN_COST (insn)(uid_insn_cost[insn_uid_check (insn)]) = insn_cost (insn, optimize_this_for_speed_p); |
1237 | if (dump_file) |
1238 | { |
1239 | fprintf (dump_file, "insn_cost %d for ", INSN_COST (insn)(uid_insn_cost[insn_uid_check (insn)])); |
1240 | dump_insn_slim (dump_file, insn); |
1241 | } |
1242 | } |
1243 | } |
1244 | |
1245 | nonzero_sign_valid = 1; |
1246 | |
1247 | /* Now scan all the insns in forward order. */ |
1248 | label_tick = label_tick_ebb_start = 1; |
1249 | init_reg_last (); |
1250 | setup_incoming_promotions (first); |
1251 | last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr); |
1252 | int max_combine = param_max_combine_insnsglobal_options.x_param_max_combine_insns; |
1253 | |
1254 | FOR_EACH_BB_FN (this_basic_block, cfun)for (this_basic_block = ((cfun + 0))->cfg->x_entry_block_ptr ->next_bb; this_basic_block != ((cfun + 0))->cfg->x_exit_block_ptr ; this_basic_block = this_basic_block->next_bb) |
1255 | { |
1256 | rtx_insn *last_combined_insn = NULLnullptr; |
1257 | |
1258 | /* Ignore instruction combination in basic blocks that are going to |
1259 | be removed as unreachable anyway. See PR82386. */ |
1260 | if (EDGE_COUNT (this_basic_block->preds)vec_safe_length (this_basic_block->preds) == 0) |
1261 | continue; |
1262 | |
1263 | optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block); |
1264 | last_call_luid = 0; |
1265 | mem_last_set = -1; |
1266 | |
1267 | label_tick++; |
1268 | if (!single_pred_p (this_basic_block) |
1269 | || single_pred (this_basic_block) != last_bb) |
1270 | label_tick_ebb_start = label_tick; |
1271 | last_bb = this_basic_block; |
1272 | |
1273 | rtl_profile_for_bb (this_basic_block); |
1274 | for (insn = BB_HEAD (this_basic_block)(this_basic_block)->il.x.head_; |
1275 | insn != NEXT_INSN (BB_END (this_basic_block)(this_basic_block)->il.x.rtl->end_); |
1276 | insn = next ? next : NEXT_INSN (insn)) |
1277 | { |
1278 | next = 0; |
1279 | if (!NONDEBUG_INSN_P (insn)((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN))) |
1280 | continue; |
1281 | |
1282 | while (last_combined_insn |
1283 | && (!NONDEBUG_INSN_P (last_combined_insn)((((enum rtx_code) (last_combined_insn)->code) == INSN) || (((enum rtx_code) (last_combined_insn)->code) == JUMP_INSN ) || (((enum rtx_code) (last_combined_insn)->code) == CALL_INSN )) |
1284 | || last_combined_insn->deleted ())) |
1285 | last_combined_insn = PREV_INSN (last_combined_insn); |
1286 | if (last_combined_insn == NULL_RTX(rtx) 0 |
1287 | || BLOCK_FOR_INSN (last_combined_insn) != this_basic_block |
1288 | || DF_INSN_LUID (last_combined_insn)((((df->insns[(INSN_UID (last_combined_insn))]))->luid) ) <= DF_INSN_LUID (insn)((((df->insns[(INSN_UID (insn))]))->luid))) |
1289 | last_combined_insn = insn; |
1290 | |
1291 | /* See if we know about function return values before this |
1292 | insn based upon SUBREG flags. */ |
1293 | check_promoted_subreg (insn, PATTERN (insn)); |
1294 | |
1295 | /* See if we can find hardregs and subreg of pseudos in |
1296 | narrower modes. This could help turning TRUNCATEs |
1297 | into SUBREGs. */ |
1298 | note_uses (&PATTERN (insn), record_truncated_values, NULLnullptr); |
1299 | |
1300 | /* Try this insn with each insn it links back to. */ |
1301 | |
1302 | FOR_EACH_LOG_LINK (links, insn)for ((links) = (uid_log_links[insn_uid_check (insn)]); (links ); (links) = (links)->next) |
1303 | if ((next = try_combine (insn, links->insn, NULLnullptr, |
1304 | NULLnullptr, &new_direct_jump_p, |
1305 | last_combined_insn)) != 0) |
1306 | { |
1307 | statistics_counter_event (cfun(cfun + 0), "two-insn combine", 1); |
1308 | goto retry; |
1309 | } |
1310 | |
1311 | /* Try each sequence of three linked insns ending with this one. */ |
1312 | |
1313 | if (max_combine >= 3) |
1314 | FOR_EACH_LOG_LINK (links, insn)for ((links) = (uid_log_links[insn_uid_check (insn)]); (links ); (links) = (links)->next) |
1315 | { |
1316 | rtx_insn *link = links->insn; |
1317 | |
1318 | /* If the linked insn has been replaced by a note, then there |
1319 | is no point in pursuing this chain any further. */ |
1320 | if (NOTE_P (link)(((enum rtx_code) (link)->code) == NOTE)) |
1321 | continue; |
1322 | |
1323 | FOR_EACH_LOG_LINK (nextlinks, link)for ((nextlinks) = (uid_log_links[insn_uid_check (link)]); (nextlinks ); (nextlinks) = (nextlinks)->next) |
1324 | if ((next = try_combine (insn, link, nextlinks->insn, |
1325 | NULLnullptr, &new_direct_jump_p, |
1326 | last_combined_insn)) != 0) |
1327 | { |
1328 | statistics_counter_event (cfun(cfun + 0), "three-insn combine", 1); |
1329 | goto retry; |
1330 | } |
1331 | } |
1332 | |
1333 | /* Try to combine a jump insn that uses CC0 |
1334 | with a preceding insn that sets CC0, and maybe with its |
1335 | logical predecessor as well. |
1336 | This is how we make decrement-and-branch insns. |
1337 | We need this special code because data flow connections |
1338 | via CC0 do not get entered in LOG_LINKS. */ |
1339 | |
1340 | if (HAVE_cc00 |
1341 | && JUMP_P (insn)(((enum rtx_code) (insn)->code) == JUMP_INSN) |
1342 | && (prev = prev_nonnote_insn (insn)) != 0 |
1343 | && NONJUMP_INSN_P (prev)(((enum rtx_code) (prev)->code) == INSN) |
1344 | && sets_cc0_p (PATTERN (prev))) |
1345 | { |
1346 | if ((next = try_combine (insn, prev, NULLnullptr, NULLnullptr, |
1347 | &new_direct_jump_p, |
1348 | last_combined_insn)) != 0) |
1349 | goto retry; |
1350 | |
1351 | FOR_EACH_LOG_LINK (nextlinks, prev)for ((nextlinks) = (uid_log_links[insn_uid_check (prev)]); (nextlinks ); (nextlinks) = (nextlinks)->next) |
1352 | if ((next = try_combine (insn, prev, nextlinks->insn, |
1353 | NULLnullptr, &new_direct_jump_p, |
1354 | last_combined_insn)) != 0) |
1355 | goto retry; |
1356 | } |
1357 | |
1358 | /* Do the same for an insn that explicitly references CC0. */ |
1359 | if (HAVE_cc00 && NONJUMP_INSN_P (insn)(((enum rtx_code) (insn)->code) == INSN) |
1360 | && (prev = prev_nonnote_insn (insn)) != 0 |
1361 | && NONJUMP_INSN_P (prev)(((enum rtx_code) (prev)->code) == INSN) |
1362 | && sets_cc0_p (PATTERN (prev)) |
1363 | && GET_CODE (PATTERN (insn))((enum rtx_code) (PATTERN (insn))->code) == SET |
1364 | && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (insn))(((PATTERN (insn))->u.fld[1]).rt_rtx))) |
1365 | { |
1366 | if ((next = try_combine (insn, prev, NULLnullptr, NULLnullptr, |
1367 | &new_direct_jump_p, |
1368 | last_combined_insn)) != 0) |
1369 | goto retry; |
1370 | |
1371 | FOR_EACH_LOG_LINK (nextlinks, prev)for ((nextlinks) = (uid_log_links[insn_uid_check (prev)]); (nextlinks ); (nextlinks) = (nextlinks)->next) |
1372 | if ((next = try_combine (insn, prev, nextlinks->insn, |
1373 | NULLnullptr, &new_direct_jump_p, |
1374 | last_combined_insn)) != 0) |
1375 | goto retry; |
1376 | } |
1377 | |
1378 | /* Finally, see if any of the insns that this insn links to |
1379 | explicitly references CC0. If so, try this insn, that insn, |
1380 | and its predecessor if it sets CC0. */ |
1381 | if (HAVE_cc00) |
1382 | { |
1383 | FOR_EACH_LOG_LINK (links, insn)for ((links) = (uid_log_links[insn_uid_check (insn)]); (links ); (links) = (links)->next) |
1384 | if (NONJUMP_INSN_P (links->insn)(((enum rtx_code) (links->insn)->code) == INSN) |
1385 | && GET_CODE (PATTERN (links->insn))((enum rtx_code) (PATTERN (links->insn))->code) == SET |
1386 | && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (links->insn))(((PATTERN (links->insn))->u.fld[1]).rt_rtx)) |
1387 | && (prev = prev_nonnote_insn (links->insn)) != 0 |
1388 | && NONJUMP_INSN_P (prev)(((enum rtx_code) (prev)->code) == INSN) |
1389 | && sets_cc0_p (PATTERN (prev)) |
1390 | && (next = try_combine (insn, links->insn, |
1391 | prev, NULLnullptr, &new_direct_jump_p, |
1392 | last_combined_insn)) != 0) |
1393 | goto retry; |
1394 | } |
1395 | |
1396 | /* Try combining an insn with two different insns whose results it |
1397 | uses. */ |
1398 | if (max_combine >= 3) |
1399 | FOR_EACH_LOG_LINK (links, insn)for ((links) = (uid_log_links[insn_uid_check (insn)]); (links ); (links) = (links)->next) |
1400 | for (nextlinks = links->next; nextlinks; |
1401 | nextlinks = nextlinks->next) |
1402 | if ((next = try_combine (insn, links->insn, |
1403 | nextlinks->insn, NULLnullptr, |
1404 | &new_direct_jump_p, |
1405 | last_combined_insn)) != 0) |
1406 | |
1407 | { |
1408 | statistics_counter_event (cfun(cfun + 0), "three-insn combine", 1); |
1409 | goto retry; |
1410 | } |
1411 | |
1412 | /* Try four-instruction combinations. */ |
1413 | if (max_combine >= 4) |
1414 | FOR_EACH_LOG_LINK (links, insn)for ((links) = (uid_log_links[insn_uid_check (insn)]); (links ); (links) = (links)->next) |
1415 | { |
1416 | struct insn_link *next1; |
1417 | rtx_insn *link = links->insn; |
1418 | |
1419 | /* If the linked insn has been replaced by a note, then there |
1420 | is no point in pursuing this chain any further. */ |
1421 | if (NOTE_P (link)(((enum rtx_code) (link)->code) == NOTE)) |
1422 | continue; |
1423 | |
1424 | FOR_EACH_LOG_LINK (next1, link)for ((next1) = (uid_log_links[insn_uid_check (link)]); (next1 ); (next1) = (next1)->next) |
1425 | { |
1426 | rtx_insn *link1 = next1->insn; |
1427 | if (NOTE_P (link1)(((enum rtx_code) (link1)->code) == NOTE)) |
1428 | continue; |
1429 | /* I0 -> I1 -> I2 -> I3. */ |
1430 | FOR_EACH_LOG_LINK (nextlinks, link1)for ((nextlinks) = (uid_log_links[insn_uid_check (link1)]); ( nextlinks); (nextlinks) = (nextlinks)->next) |
1431 | if ((next = try_combine (insn, link, link1, |
1432 | nextlinks->insn, |
1433 | &new_direct_jump_p, |
1434 | last_combined_insn)) != 0) |
1435 | { |
1436 | statistics_counter_event (cfun(cfun + 0), "four-insn combine", 1); |
1437 | goto retry; |
1438 | } |
1439 | /* I0, I1 -> I2, I2 -> I3. */ |
1440 | for (nextlinks = next1->next; nextlinks; |
1441 | nextlinks = nextlinks->next) |
1442 | if ((next = try_combine (insn, link, link1, |
1443 | nextlinks->insn, |
1444 | &new_direct_jump_p, |
1445 | last_combined_insn)) != 0) |
1446 | { |
1447 | statistics_counter_event (cfun(cfun + 0), "four-insn combine", 1); |
1448 | goto retry; |
1449 | } |
1450 | } |
1451 | |
1452 | for (next1 = links->next; next1; next1 = next1->next) |
1453 | { |
1454 | rtx_insn *link1 = next1->insn; |
1455 | if (NOTE_P (link1)(((enum rtx_code) (link1)->code) == NOTE)) |
1456 | continue; |
1457 | /* I0 -> I2; I1, I2 -> I3. */ |
1458 | FOR_EACH_LOG_LINK (nextlinks, link)for ((nextlinks) = (uid_log_links[insn_uid_check (link)]); (nextlinks ); (nextlinks) = (nextlinks)->next) |
1459 | if ((next = try_combine (insn, link, link1, |
1460 | nextlinks->insn, |
1461 | &new_direct_jump_p, |
1462 | last_combined_insn)) != 0) |
1463 | { |
1464 | statistics_counter_event (cfun(cfun + 0), "four-insn combine", 1); |
1465 | goto retry; |
1466 | } |
1467 | /* I0 -> I1; I1, I2 -> I3. */ |
1468 | FOR_EACH_LOG_LINK (nextlinks, link1)for ((nextlinks) = (uid_log_links[insn_uid_check (link1)]); ( nextlinks); (nextlinks) = (nextlinks)->next) |
1469 | if ((next = try_combine (insn, link, link1, |
1470 | nextlinks->insn, |
1471 | &new_direct_jump_p, |
1472 | last_combined_insn)) != 0) |
1473 | { |
1474 | statistics_counter_event (cfun(cfun + 0), "four-insn combine", 1); |
1475 | goto retry; |
1476 | } |
1477 | } |
1478 | } |
1479 | |
1480 | /* Try this insn with each REG_EQUAL note it links back to. */ |
1481 | FOR_EACH_LOG_LINK (links, insn)for ((links) = (uid_log_links[insn_uid_check (insn)]); (links ); (links) = (links)->next) |
1482 | { |
1483 | rtx set, note; |
1484 | rtx_insn *temp = links->insn; |
1485 | if ((set = single_set (temp)) != 0 |
1486 | && (note = find_reg_equal_equiv_note (temp)) != 0 |
1487 | && (note = XEXP (note, 0)(((note)->u.fld[0]).rt_rtx), GET_CODE (note)((enum rtx_code) (note)->code)) != EXPR_LIST |
1488 | && ! side_effects_p (SET_SRC (set)(((set)->u.fld[1]).rt_rtx)) |
1489 | /* Avoid using a register that may already been marked |
1490 | dead by an earlier instruction. */ |
1491 | && ! unmentioned_reg_p (note, SET_SRC (set)(((set)->u.fld[1]).rt_rtx)) |
1492 | && (GET_MODE (note)((machine_mode) (note)->mode) == VOIDmode((void) 0, E_VOIDmode) |
1493 | ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set)))(((enum mode_class) mode_class[((machine_mode) ((((set)->u .fld[0]).rt_rtx))->mode)]) == MODE_INT || ((enum mode_class ) mode_class[((machine_mode) ((((set)->u.fld[0]).rt_rtx))-> mode)]) == MODE_PARTIAL_INT) |
1494 | : (GET_MODE (SET_DEST (set))((machine_mode) ((((set)->u.fld[0]).rt_rtx))->mode) == GET_MODE (note)((machine_mode) (note)->mode) |
1495 | && (GET_CODE (SET_DEST (set))((enum rtx_code) ((((set)->u.fld[0]).rt_rtx))->code) != ZERO_EXTRACT |
1496 | || (GET_MODE (XEXP (SET_DEST (set), 0))((machine_mode) (((((((set)->u.fld[0]).rt_rtx))->u.fld[ 0]).rt_rtx))->mode) |
1497 | == GET_MODE (note)((machine_mode) (note)->mode)))))) |
1498 | { |
1499 | /* Temporarily replace the set's source with the |
1500 | contents of the REG_EQUAL note. The insn will |
1501 | be deleted or recognized by try_combine. */ |
1502 | rtx orig_src = SET_SRC (set)(((set)->u.fld[1]).rt_rtx); |
1503 | rtx orig_dest = SET_DEST (set)(((set)->u.fld[0]).rt_rtx); |
1504 | if (GET_CODE (SET_DEST (set))((enum rtx_code) ((((set)->u.fld[0]).rt_rtx))->code) == ZERO_EXTRACT) |
1505 | SET_DEST (set)(((set)->u.fld[0]).rt_rtx) = XEXP (SET_DEST (set), 0)((((((set)->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx); |
1506 | SET_SRC (set)(((set)->u.fld[1]).rt_rtx) = note; |
1507 | i2mod = temp; |
1508 | i2mod_old_rhs = copy_rtx (orig_src); |
1509 | i2mod_new_rhs = copy_rtx (note); |
1510 | next = try_combine (insn, i2mod, NULLnullptr, NULLnullptr, |
1511 | &new_direct_jump_p, |
1512 | last_combined_insn); |
1513 | i2mod = NULLnullptr; |
1514 | if (next) |
1515 | { |
1516 | statistics_counter_event (cfun(cfun + 0), "insn-with-note combine", 1); |
1517 | goto retry; |
1518 | } |
1519 | SET_SRC (set)(((set)->u.fld[1]).rt_rtx) = orig_src; |
1520 | SET_DEST (set)(((set)->u.fld[0]).rt_rtx) = orig_dest; |
1521 | } |
1522 | } |
1523 | |
1524 | if (!NOTE_P (insn)(((enum rtx_code) (insn)->code) == NOTE)) |
1525 | record_dead_and_set_regs (insn); |
1526 | |
1527 | retry: |
1528 | ; |
1529 | } |
1530 | } |
1531 | |
1532 | default_rtl_profile (); |
1533 | clear_bb_flags (); |
1534 | new_direct_jump_p |= purge_all_dead_edges (); |
1535 | new_direct_jump_p |= delete_noop_moves (); |
1536 | |
1537 | /* Clean up. */ |
1538 | obstack_free (&insn_link_obstack, NULL)__extension__ ({ struct obstack *__o = (&insn_link_obstack ); void *__obj = (void *) (nullptr); if (__obj > (void *) __o ->chunk && __obj < (void *) __o->chunk_limit ) __o->next_free = __o->object_base = (char *) __obj; else _obstack_free (__o, __obj); }); |
1539 | free (uid_log_links); |
1540 | free (uid_insn_cost); |
1541 | reg_stat.release (); |
1542 | |
1543 | { |
1544 | struct undo *undo, *next; |
1545 | for (undo = undobuf.frees; undo; undo = next) |
1546 | { |
1547 | next = undo->next; |
1548 | free (undo); |
1549 | } |
1550 | undobuf.frees = 0; |
1551 | } |
1552 | |
1553 | total_attempts += combine_attempts; |
1554 | total_merges += combine_merges; |
1555 | total_extras += combine_extras; |
1556 | total_successes += combine_successes; |
1557 | |
1558 | nonzero_sign_valid = 0; |
1559 | rtl_hooks = general_rtl_hooks; |
1560 | |
1561 | /* Make recognizer allow volatile MEMs again. */ |
1562 | init_recog (); |
1563 | |
1564 | return new_direct_jump_p; |
1565 | } |
1566 | |
1567 | /* Wipe the last_xxx fields of reg_stat in preparation for another pass. */ |
1568 | |
1569 | static void |
1570 | init_reg_last (void) |
1571 | { |
1572 | unsigned int i; |
1573 | reg_stat_type *p; |
1574 | |
1575 | FOR_EACH_VEC_ELT (reg_stat, i, p)for (i = 0; (reg_stat).iterate ((i), &(p)); ++(i)) |
1576 | memset (p, 0, offsetof (reg_stat_type, sign_bit_copies)__builtin_offsetof(reg_stat_type, sign_bit_copies)); |
1577 | } |
1578 | |
1579 | /* Set up any promoted values for incoming argument registers. */ |
1580 | |
1581 | static void |
1582 | setup_incoming_promotions (rtx_insn *first) |
1583 | { |
1584 | tree arg; |
1585 | bool strictly_local = false; |
1586 | |
1587 | for (arg = DECL_ARGUMENTS (current_function_decl)((tree_check ((current_function_decl), "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/combine.c" , 1587, __FUNCTION__, (FUNCTION_DECL)))->function_decl.arguments ); arg; |
1588 | arg = DECL_CHAIN (arg)(((contains_struct_check (((contains_struct_check ((arg), (TS_DECL_MINIMAL ), "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/combine.c" , 1588, __FUNCTION__))), (TS_COMMON), "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/combine.c" , 1588, __FUNCTION__))->common.chain))) |
1589 | { |
1590 | rtx x, reg = DECL_INCOMING_RTL (arg)((tree_check ((arg), "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/combine.c" , 1590, __FUNCTION__, (PARM_DECL)))->parm_decl.incoming_rtl ); |
1591 | int uns1, uns3; |
1592 | machine_mode mode1, mode2, mode3, mode4; |
1593 | |
1594 | /* Only continue if the incoming argument is in a register. */ |
1595 | if (!REG_P (reg)(((enum rtx_code) (reg)->code) == REG)) |
1596 | continue; |
1597 | |
1598 | /* Determine, if possible, whether all call sites of the current |
1599 | function lie within the current compilation unit. (This does |
1600 | take into account the exporting of a function via taking its |
1601 | address, and so forth.) */ |
1602 | strictly_local |
1603 | = cgraph_node::local_info_node (current_function_decl)->local; |
1604 | |
1605 | /* The mode and signedness of the argument before any promotions happen |
1606 | (equal to the mode of the pseudo holding it at that stage). */ |
1607 | mode1 = TYPE_MODE (TREE_TYPE (arg))((((enum tree_code) ((tree_class_check ((((contains_struct_check ((arg), (TS_TYPED), "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/combine.c" , 1607, __FUNCTION__))->typed.type)), (tcc_type), "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/combine.c" , 1607, __FUNCTION__)))->base.code) == VECTOR_TYPE) ? vector_type_mode (((contains_struct_check ((arg), (TS_TYPED), "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/combine.c" , 1607, __FUNCTION__))->typed.type)) : (((contains_struct_check ((arg), (TS_TYPED), "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/combine.c" , 1607, __FUNCTION__))->typed.type))->type_common.mode); |
1608 | uns1 = TYPE_UNSIGNED (TREE_TYPE (arg))((tree_class_check ((((contains_struct_check ((arg), (TS_TYPED ), "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/combine.c" , 1608, __FUNCTION__))->typed.type)), (tcc_type), "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/combine.c" , 1608, __FUNCTION__))->base.u.bits.unsigned_flag); |
1609 | |
1610 | /* The mode and signedness of the argument after any source language and |
1611 | TARGET_PROMOTE_PROTOTYPES-driven promotions. */ |
1612 | mode2 = TYPE_MODE (DECL_ARG_TYPE (arg))((((enum tree_code) ((tree_class_check ((((tree_check ((arg), "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/combine.c" , 1612, __FUNCTION__, (PARM_DECL)))->decl_common.initial)) , (tcc_type), "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/combine.c" , 1612, __FUNCTION__)))->base.code) == VECTOR_TYPE) ? vector_type_mode (((tree_check ((arg), "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/combine.c" , 1612, __FUNCTION__, (PARM_DECL)))->decl_common.initial)) : (((tree_check ((arg), "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/combine.c" , 1612, __FUNCTION__, (PARM_DECL)))->decl_common.initial)) ->type_common.mode); |
1613 | uns3 = TYPE_UNSIGNED (DECL_ARG_TYPE (arg))((tree_class_check ((((tree_check ((arg), "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/combine.c" , 1613, __FUNCTION__, (PARM_DECL)))->decl_common.initial)) , (tcc_type), "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/combine.c" , 1613, __FUNCTION__))->base.u.bits.unsigned_flag); |
1614 | |
1615 | /* The mode and signedness of the argument as it is actually passed, |
1616 | see assign_parm_setup_reg in function.c. */ |
1617 | mode3 = promote_function_mode (TREE_TYPE (arg)((contains_struct_check ((arg), (TS_TYPED), "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/combine.c" , 1617, __FUNCTION__))->typed.type), mode1, &uns3, |
1618 | TREE_TYPE (cfun->decl)((contains_struct_check (((cfun + 0)->decl), (TS_TYPED), "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/combine.c" , 1618, __FUNCTION__))->typed.type), 0); |
1619 | |
1620 | /* The mode of the register in which the argument is being passed. */ |
1621 | mode4 = GET_MODE (reg)((machine_mode) (reg)->mode); |
1622 | |
1623 | /* Eliminate sign extensions in the callee when: |
1624 | (a) A mode promotion has occurred; */ |
1625 | if (mode1 == mode3) |
1626 | continue; |
1627 | /* (b) The mode of the register is the same as the mode of |
1628 | the argument as it is passed; */ |
1629 | if (mode3 != mode4) |
1630 | continue; |
1631 | /* (c) There's no language level extension; */ |
1632 | if (mode1 == mode2) |
1633 | ; |
1634 | /* (c.1) All callers are from the current compilation unit. If that's |
1635 | the case we don't have to rely on an ABI, we only have to know |
1636 | what we're generating right now, and we know that we will do the |
1637 | mode1 to mode2 promotion with the given sign. */ |
1638 | else if (!strictly_local) |
1639 | continue; |
1640 | /* (c.2) The combination of the two promotions is useful. This is |
1641 | true when the signs match, or if the first promotion is unsigned. |
1642 | In the later case, (sign_extend (zero_extend x)) is the same as |
1643 | (zero_extend (zero_extend x)), so make sure to force UNS3 true. */ |
1644 | else if (uns1) |
1645 | uns3 = true; |
1646 | else if (uns3) |
1647 | continue; |
1648 | |
1649 | /* Record that the value was promoted from mode1 to mode3, |
1650 | so that any sign extension at the head of the current |
1651 | function may be eliminated. */ |
1652 | x = gen_rtx_CLOBBER (mode1, const0_rtx)gen_rtx_fmt_e_stat ((CLOBBER), ((mode1)), (((const_int_rtx[64 ]))) ); |
1653 | x = gen_rtx_fmt_e ((uns3 ? ZERO_EXTEND : SIGN_EXTEND), mode3, x)gen_rtx_fmt_e_stat (((uns3 ? ZERO_EXTEND : SIGN_EXTEND)), (mode3 ), (x) ); |
1654 | record_value_for_reg (reg, first, x); |
1655 | } |
1656 | } |
1657 | |
1658 | /* If MODE has a precision lower than PREC and SRC is a non-negative constant |
1659 | that would appear negative in MODE, sign-extend SRC for use in nonzero_bits |
1660 | because some machines (maybe most) will actually do the sign-extension and |
1661 | this is the conservative approach. |
1662 | |
1663 | ??? For 2.5, try to tighten up the MD files in this regard instead of this |
1664 | kludge. */ |
1665 | |
1666 | static rtx |
1667 | sign_extend_short_imm (rtx src, machine_mode mode, unsigned int prec) |
1668 | { |
1669 | scalar_int_mode int_mode; |
1670 | if (CONST_INT_P (src)(((enum rtx_code) (src)->code) == CONST_INT) |
1671 | && is_a <scalar_int_mode> (mode, &int_mode) |
1672 | && GET_MODE_PRECISION (int_mode) < prec |
1673 | && INTVAL (src)((src)->u.hwint[0]) > 0 |
1674 | && val_signbit_known_set_p (int_mode, INTVAL (src)((src)->u.hwint[0]))) |
1675 | src = GEN_INT (INTVAL (src) | ~GET_MODE_MASK (int_mode))gen_rtx_CONST_INT (((void) 0, E_VOIDmode), (((src)->u.hwint [0]) | ~mode_mask_array[int_mode])); |
1676 | |
1677 | return src; |
1678 | } |
1679 | |
1680 | /* Update RSP for pseudo-register X from INSN's REG_EQUAL note (if one exists) |
1681 | and SET. */ |
1682 | |
1683 | static void |
1684 | update_rsp_from_reg_equal (reg_stat_type *rsp, rtx_insn *insn, const_rtx set, |
1685 | rtx x) |
1686 | { |
1687 | rtx reg_equal_note = insn ? find_reg_equal_equiv_note (insn) : NULL_RTX(rtx) 0; |
1688 | unsigned HOST_WIDE_INTlong bits = 0; |
1689 | rtx reg_equal = NULLnullptr, src = SET_SRC (set)(((set)->u.fld[1]).rt_rtx); |
1690 | unsigned int num = 0; |
1691 | |
1692 | if (reg_equal_note) |
1693 | reg_equal = XEXP (reg_equal_note, 0)(((reg_equal_note)->u.fld[0]).rt_rtx); |
1694 | |
1695 | if (SHORT_IMMEDIATES_SIGN_EXTEND0) |
1696 | { |
1697 | src = sign_extend_short_imm (src, GET_MODE (x)((machine_mode) (x)->mode), BITS_PER_WORD((8) * (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4))); |
1698 | if (reg_equal) |
1699 | reg_equal = sign_extend_short_imm (reg_equal, GET_MODE (x)((machine_mode) (x)->mode), BITS_PER_WORD((8) * (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4))); |
1700 | } |
1701 | |
1702 | /* Don't call nonzero_bits if it cannot change anything. */ |
1703 | if (rsp->nonzero_bits != HOST_WIDE_INT_M1U-1UL) |
1704 | { |
1705 | machine_mode mode = GET_MODE (x)((machine_mode) (x)->mode); |
1706 | if (GET_MODE_CLASS (mode)((enum mode_class) mode_class[mode]) == MODE_INT |
1707 | && HWI_COMPUTABLE_MODE_P (mode)) |
1708 | mode = nonzero_bits_mode; |
1709 | bits = nonzero_bits (src, mode); |
1710 | if (reg_equal && bits) |
1711 | bits &= nonzero_bits (reg_equal, mode); |
1712 | rsp->nonzero_bits |= bits; |
1713 | } |
1714 | |
1715 | /* Don't call num_sign_bit_copies if it cannot change anything. */ |
1716 | if (rsp->sign_bit_copies != 1) |
1717 | { |
1718 | num = num_sign_bit_copies (SET_SRC (set)(((set)->u.fld[1]).rt_rtx), GET_MODE (x)((machine_mode) (x)->mode)); |
1719 | if (reg_equal && maybe_ne (num, GET_MODE_PRECISION (GET_MODE (x)((machine_mode) (x)->mode)))) |
1720 | { |
1721 | unsigned int numeq = num_sign_bit_copies (reg_equal, GET_MODE (x)((machine_mode) (x)->mode)); |
1722 | if (num == 0 || numeq > num) |
1723 | num = numeq; |
1724 | } |
1725 | if (rsp->sign_bit_copies == 0 || num < rsp->sign_bit_copies) |
1726 | rsp->sign_bit_copies = num; |
1727 | } |
1728 | } |
1729 | |
1730 | /* Called via note_stores. If X is a pseudo that is narrower than |
1731 | HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero. |
1732 | |
1733 | If we are setting only a portion of X and we can't figure out what |
1734 | portion, assume all bits will be used since we don't know what will |
1735 | be happening. |
1736 | |
1737 | Similarly, set how many bits of X are known to be copies of the sign bit |
1738 | at all locations in the function. This is the smallest number implied |
1739 | by any set of X. */ |
1740 | |
1741 | static void |
1742 | set_nonzero_bits_and_sign_copies (rtx x, const_rtx set, void *data) |
1743 | { |
1744 | rtx_insn *insn = (rtx_insn *) data; |
1745 | scalar_int_mode mode; |
1746 | |
1747 | if (REG_P (x)(((enum rtx_code) (x)->code) == REG) |
1748 | && REGNO (x)(rhs_regno(x)) >= FIRST_PSEUDO_REGISTER76 |
1749 | /* If this register is undefined at the start of the file, we can't |
1750 | say what its contents were. */ |
1751 | && ! REGNO_REG_SET_Pbitmap_bit_p ((&(df_lr_get_bb_info (((((cfun + 0))->cfg ->x_entry_block_ptr)->next_bb)->index))->in), (rhs_regno (x))) |
1752 | (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), REGNO (x))bitmap_bit_p ((&(df_lr_get_bb_info (((((cfun + 0))->cfg ->x_entry_block_ptr)->next_bb)->index))->in), (rhs_regno (x))) |
1753 | && is_a <scalar_int_mode> (GET_MODE (x)((machine_mode) (x)->mode), &mode) |
1754 | && HWI_COMPUTABLE_MODE_P (mode)) |
1755 | { |
1756 | reg_stat_type *rsp = ®_stat[REGNO (x)(rhs_regno(x))]; |
1757 | |
1758 | if (set == 0 || GET_CODE (set)((enum rtx_code) (set)->code) == CLOBBER) |
1759 | { |
1760 | rsp->nonzero_bits = GET_MODE_MASK (mode)mode_mask_array[mode]; |
1761 | rsp->sign_bit_copies = 1; |
1762 | return; |
1763 | } |
1764 | |
1765 | /* If this register is being initialized using itself, and the |
1766 | register is uninitialized in this basic block, and there are |
1767 | no LOG_LINKS which set the register, then part of the |
1768 | register is uninitialized. In that case we can't assume |
1769 | anything about the number of nonzero bits. |
1770 | |
1771 | ??? We could do better if we checked this in |
1772 | reg_{nonzero_bits,num_sign_bit_copies}_for_combine. Then we |
1773 | could avoid making assumptions about the insn which initially |
1774 | sets the register, while still using the information in other |
1775 | insns. We would have to be careful to check every insn |
1776 | involved in the combination. */ |
1777 | |
1778 | if (insn |
1779 | && reg_referenced_p (x, PATTERN (insn)) |
1780 | && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn)),bitmap_bit_p ((&(df_lr_get_bb_info ((BLOCK_FOR_INSN (insn ))->index))->in), (rhs_regno(x))) |
1781 | REGNO (x))bitmap_bit_p ((&(df_lr_get_bb_info ((BLOCK_FOR_INSN (insn ))->index))->in), (rhs_regno(x)))) |
1782 | { |
1783 | struct insn_link *link; |
1784 | |
1785 | FOR_EACH_LOG_LINK (link, insn)for ((link) = (uid_log_links[insn_uid_check (insn)]); (link); (link) = (link)->next) |
1786 | if (dead_or_set_p (link->insn, x)) |
1787 | break; |
1788 | if (!link) |
1789 | { |
1790 | rsp->nonzero_bits = GET_MODE_MASK (mode)mode_mask_array[mode]; |
1791 | rsp->sign_bit_copies = 1; |
1792 | return; |
1793 | } |
1794 | } |
1795 | |
1796 | /* If this is a complex assignment, see if we can convert it into a |
1797 | simple assignment. */ |
1798 | set = expand_field_assignment (set); |
1799 | |
1800 | /* If this is a simple assignment, or we have a paradoxical SUBREG, |
1801 | set what we know about X. */ |
1802 | |
1803 | if (SET_DEST (set)(((set)->u.fld[0]).rt_rtx) == x |
1804 | || (paradoxical_subreg_p (SET_DEST (set)(((set)->u.fld[0]).rt_rtx)) |
1805 | && SUBREG_REG (SET_DEST (set))((((((set)->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx) == x)) |
1806 | update_rsp_from_reg_equal (rsp, insn, set, x); |
1807 | else |
1808 | { |
1809 | rsp->nonzero_bits = GET_MODE_MASK (mode)mode_mask_array[mode]; |
1810 | rsp->sign_bit_copies = 1; |
1811 | } |
1812 | } |
1813 | } |
1814 | |
1815 | /* See if INSN can be combined into I3. PRED, PRED2, SUCC and SUCC2 are |
1816 | optionally insns that were previously combined into I3 or that will be |
1817 | combined into the merger of INSN and I3. The order is PRED, PRED2, |
1818 | INSN, SUCC, SUCC2, I3. |
1819 | |
1820 | Return 0 if the combination is not allowed for any reason. |
1821 | |
1822 | If the combination is allowed, *PDEST will be set to the single |
1823 | destination of INSN and *PSRC to the single source, and this function |
1824 | will return 1. */ |
1825 | |
1826 | static int |
1827 | can_combine_p (rtx_insn *insn, rtx_insn *i3, rtx_insn *pred ATTRIBUTE_UNUSED__attribute__ ((__unused__)), |
1828 | rtx_insn *pred2 ATTRIBUTE_UNUSED__attribute__ ((__unused__)), rtx_insn *succ, rtx_insn *succ2, |
1829 | rtx *pdest, rtx *psrc) |
1830 | { |
1831 | int i; |
1832 | const_rtx set = 0; |
1833 | rtx src, dest; |
1834 | rtx_insn *p; |
1835 | rtx link; |
1836 | bool all_adjacent = true; |
1837 | int (*is_volatile_p) (const_rtx); |
1838 | |
1839 | if (succ) |
1840 | { |
1841 | if (succ2) |
1842 | { |
1843 | if (next_active_insn (succ2) != i3) |
1844 | all_adjacent = false; |
1845 | if (next_active_insn (succ) != succ2) |
1846 | all_adjacent = false; |
1847 | } |
1848 | else if (next_active_insn (succ) != i3) |
1849 | all_adjacent = false; |
1850 | if (next_active_insn (insn) != succ) |
1851 | all_adjacent = false; |
1852 | } |
1853 | else if (next_active_insn (insn) != i3) |
1854 | all_adjacent = false; |
1855 | |
1856 | /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0. |
1857 | or a PARALLEL consisting of such a SET and CLOBBERs. |
1858 | |
1859 | If INSN has CLOBBER parallel parts, ignore them for our processing. |
1860 | By definition, these happen during the execution of the insn. When it |
1861 | is merged with another insn, all bets are off. If they are, in fact, |
1862 | needed and aren't also supplied in I3, they may be added by |
1863 | recog_for_combine. Otherwise, it won't match. |
1864 | |
1865 | We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED |
1866 | note. |
1867 | |
1868 | Get the source and destination of INSN. If more than one, can't |
1869 | combine. */ |
1870 | |
1871 | if (GET_CODE (PATTERN (insn))((enum rtx_code) (PATTERN (insn))->code) == SET) |
1872 | set = PATTERN (insn); |
1873 | else if (GET_CODE (PATTERN (insn))((enum rtx_code) (PATTERN (insn))->code) == PARALLEL |
1874 | && GET_CODE (XVECEXP (PATTERN (insn), 0, 0))((enum rtx_code) ((((((PATTERN (insn))->u.fld[0]).rt_rtvec ))->elem[0]))->code) == SET) |
1875 | { |
1876 | for (i = 0; i < XVECLEN (PATTERN (insn), 0)(((((PATTERN (insn))->u.fld[0]).rt_rtvec))->num_elem); i++) |
1877 | { |
1878 | rtx elt = XVECEXP (PATTERN (insn), 0, i)(((((PATTERN (insn))->u.fld[0]).rt_rtvec))->elem[i]); |
1879 | |
1880 | switch (GET_CODE (elt)((enum rtx_code) (elt)->code)) |
1881 | { |
1882 | /* This is important to combine floating point insns |
1883 | for the SH4 port. */ |
1884 | case USE: |
1885 | /* Combining an isolated USE doesn't make sense. |
1886 | We depend here on combinable_i3pat to reject them. */ |
1887 | /* The code below this loop only verifies that the inputs of |
1888 | the SET in INSN do not change. We call reg_set_between_p |
1889 | to verify that the REG in the USE does not change between |
1890 | I3 and INSN. |
1891 | If the USE in INSN was for a pseudo register, the matching |
1892 | insn pattern will likely match any register; combining this |
1893 | with any other USE would only be safe if we knew that the |
1894 | used registers have identical values, or if there was |
1895 | something to tell them apart, e.g. different modes. For |
1896 | now, we forgo such complicated tests and simply disallow |
1897 | combining of USES of pseudo registers with any other USE. */ |
1898 | if (REG_P (XEXP (elt, 0))(((enum rtx_code) ((((elt)->u.fld[0]).rt_rtx))->code) == REG) |
1899 | && GET_CODE (PATTERN (i3))((enum rtx_code) (PATTERN (i3))->code) == PARALLEL) |
1900 | { |
1901 | rtx i3pat = PATTERN (i3); |
1902 | int i = XVECLEN (i3pat, 0)(((((i3pat)->u.fld[0]).rt_rtvec))->num_elem) - 1; |
1903 | unsigned int regno = REGNO (XEXP (elt, 0))(rhs_regno((((elt)->u.fld[0]).rt_rtx))); |
1904 | |
1905 | do |
1906 | { |
1907 | rtx i3elt = XVECEXP (i3pat, 0, i)(((((i3pat)->u.fld[0]).rt_rtvec))->elem[i]); |
1908 | |
1909 | if (GET_CODE (i3elt)((enum rtx_code) (i3elt)->code) == USE |
1910 | && REG_P (XEXP (i3elt, 0))(((enum rtx_code) ((((i3elt)->u.fld[0]).rt_rtx))->code) == REG) |
1911 | && (REGNO (XEXP (i3elt, 0))(rhs_regno((((i3elt)->u.fld[0]).rt_rtx))) == regno |
1912 | ? reg_set_between_p (XEXP (elt, 0)(((elt)->u.fld[0]).rt_rtx), |
1913 | PREV_INSN (insn), i3) |
1914 | : regno >= FIRST_PSEUDO_REGISTER76)) |
1915 | return 0; |
1916 | } |
1917 | while (--i >= 0); |
1918 | } |
1919 | break; |
1920 | |
1921 | /* We can ignore CLOBBERs. */ |
1922 | case CLOBBER: |
1923 | break; |
1924 | |
1925 | case SET: |
1926 | /* Ignore SETs whose result isn't used but not those that |
1927 | have side-effects. */ |
1928 | if (find_reg_note (insn, REG_UNUSED, SET_DEST (elt)(((elt)->u.fld[0]).rt_rtx)) |
1929 | && insn_nothrow_p (insn) |
1930 | && !side_effects_p (elt)) |
1931 | break; |
1932 | |
1933 | /* If we have already found a SET, this is a second one and |
1934 | so we cannot combine with this insn. */ |
1935 | if (set) |
1936 | return 0; |
1937 | |
1938 | set = elt; |
1939 | break; |
1940 | |
1941 | default: |
1942 | /* Anything else means we can't combine. */ |
1943 | return 0; |
1944 | } |
1945 | } |
1946 | |
1947 | if (set == 0 |
1948 | /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs, |
1949 | so don't do anything with it. */ |
1950 | || GET_CODE (SET_SRC (set))((enum rtx_code) ((((set)->u.fld[1]).rt_rtx))->code) == ASM_OPERANDS) |
1951 | return 0; |
1952 | } |
1953 | else |
1954 | return 0; |
1955 | |
1956 | if (set == 0) |
1957 | return 0; |
1958 | |
1959 | /* The simplification in expand_field_assignment may call back to |
1960 | get_last_value, so set safe guard here. */ |
1961 | subst_low_luid = DF_INSN_LUID (insn)((((df->insns[(INSN_UID (insn))]))->luid)); |
1962 | |
1963 | set = expand_field_assignment (set); |
1964 | src = SET_SRC (set)(((set)->u.fld[1]).rt_rtx), dest = SET_DEST (set)(((set)->u.fld[0]).rt_rtx); |
1965 | |
1966 | /* Do not eliminate user-specified register if it is in an |
1967 | asm input because we may break the register asm usage defined |
1968 | in GCC manual if allow to do so. |
1969 | Be aware that this may cover more cases than we expect but this |
1970 | should be harmless. */ |
1971 | if (REG_P (dest)(((enum rtx_code) (dest)->code) == REG) && REG_USERVAR_P (dest)(__extension__ ({ __typeof ((dest)) const _rtx = ((dest)); if (((enum rtx_code) (_rtx)->code) != REG) rtl_check_failed_flag ("REG_USERVAR_P", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/combine.c" , 1971, __FUNCTION__); _rtx; })->volatil) && HARD_REGISTER_P (dest)((((rhs_regno(dest))) < 76)) |
1972 | && extract_asm_operands (PATTERN (i3))) |
1973 | return 0; |
1974 | |
1975 | /* Don't eliminate a store in the stack pointer. */ |
1976 | if (dest == stack_pointer_rtx((this_target_rtl->x_global_rtl)[GR_STACK_POINTER]) |
1977 | /* Don't combine with an insn that sets a register to itself if it has |
1978 | a REG_EQUAL note. This may be part of a LIBCALL sequence. */ |
1979 | || (rtx_equal_p (src, dest) && find_reg_note (insn, REG_EQUAL, NULL_RTX(rtx) 0)) |
1980 | /* Can't merge an ASM_OPERANDS. */ |
1981 | || GET_CODE (src)((enum rtx_code) (src)->code) == ASM_OPERANDS |
1982 | /* Can't merge a function call. */ |
1983 | || GET_CODE (src)((enum rtx_code) (src)->code) == CALL |
1984 | /* Don't eliminate a function call argument. */ |
1985 | || (CALL_P (i3)(((enum rtx_code) (i3)->code) == CALL_INSN) |
1986 | && (find_reg_fusage (i3, USE, dest) |
1987 | || (REG_P (dest)(((enum rtx_code) (dest)->code) == REG) |
1988 | && REGNO (dest)(rhs_regno(dest)) < FIRST_PSEUDO_REGISTER76 |
1989 | && global_regs[REGNO (dest)(rhs_regno(dest))]))) |
1990 | /* Don't substitute into an incremented register. */ |
1991 | || FIND_REG_INC_NOTE (i3, dest)0 |
1992 | || (succ && FIND_REG_INC_NOTE (succ, dest)0) |
1993 | || (succ2 && FIND_REG_INC_NOTE (succ2, dest)0) |
1994 | /* Don't substitute into a non-local goto, this confuses CFG. */ |
1995 | || (JUMP_P (i3)(((enum rtx_code) (i3)->code) == JUMP_INSN) && find_reg_note (i3, REG_NON_LOCAL_GOTO, NULL_RTX(rtx) 0)) |
1996 | /* Make sure that DEST is not used after INSN but before SUCC, or |
1997 | after SUCC and before SUCC2, or after SUCC2 but before I3. */ |
1998 | || (!all_adjacent |
1999 | && ((succ2 |
2000 | && (reg_used_between_p (dest, succ2, i3) |
2001 | || reg_used_between_p (dest, succ, succ2))) |
2002 | || (!succ2 && succ && reg_used_between_p (dest, succ, i3)) |
2003 | || (!succ2 && !succ && reg_used_between_p (dest, insn, i3)) |
2004 | || (succ |
2005 | /* SUCC and SUCC2 can be split halves from a PARALLEL; in |
2006 | that case SUCC is not in the insn stream, so use SUCC2 |
2007 | instead for this test. */ |
2008 | && reg_used_between_p (dest, insn, |
2009 | succ2 |
2010 | && INSN_UID (succ) == INSN_UID (succ2) |
2011 | ? succ2 : succ)))) |
2012 | /* Make sure that the value that is to be substituted for the register |
2013 | does not use any registers whose values alter in between. However, |
2014 | If the insns are adjacent, a use can't cross a set even though we |
2015 | think it might (this can happen for a sequence of insns each setting |
2016 | the same destination; last_set of that register might point to |
2017 | a NOTE). If INSN has a REG_EQUIV note, the register is always |
2018 | equivalent to the memory so the substitution is valid even if there |
2019 | are intervening stores. Also, don't move a volatile asm or |
2020 | UNSPEC_VOLATILE across any other insns. */ |
2021 | || (! all_adjacent |
2022 | && (((!MEM_P (src)(((enum rtx_code) (src)->code) == MEM) |
2023 | || ! find_reg_note (insn, REG_EQUIV, src)) |
2024 | && modified_between_p (src, insn, i3)) |
2025 | || (GET_CODE (src)((enum rtx_code) (src)->code) == ASM_OPERANDS && MEM_VOLATILE_P (src)(__extension__ ({ __typeof ((src)) const _rtx = ((src)); if ( ((enum rtx_code) (_rtx)->code) != MEM && ((enum rtx_code ) (_rtx)->code) != ASM_OPERANDS && ((enum rtx_code ) (_rtx)->code) != ASM_INPUT) rtl_check_failed_flag ("MEM_VOLATILE_P" , _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/combine.c" , 2025, __FUNCTION__); _rtx; })->volatil)) |
2026 | || GET_CODE (src)((enum rtx_code) (src)->code) == UNSPEC_VOLATILE)) |
2027 | /* Don't combine across a CALL_INSN, because that would possibly |
2028 | change whether the life span of some REGs crosses calls or not, |
2029 | and it is a pain to update that information. |
2030 | Exception: if source is a constant, moving it later can't hurt. |
2031 | Accept that as a special case. */ |
2032 | || (DF_INSN_LUID (insn)((((df->insns[(INSN_UID (insn))]))->luid)) < last_call_luid && ! CONSTANT_P (src)((rtx_class[(int) (((enum rtx_code) (src)->code))]) == RTX_CONST_OBJ ))) |
2033 | return 0; |
2034 | |
2035 | /* DEST must either be a REG or CC0. */ |
2036 | if (REG_P (dest)(((enum rtx_code) (dest)->code) == REG)) |
2037 | { |
2038 | /* If register alignment is being enforced for multi-word items in all |
2039 | cases except for parameters, it is possible to have a register copy |
2040 | insn referencing a hard register that is not allowed to contain the |
2041 | mode being copied and which would not be valid as an operand of most |
2042 | insns. Eliminate this problem by not combining with such an insn. |
2043 | |
2044 | Also, on some machines we don't want to extend the life of a hard |
2045 | register. */ |
2046 | |
2047 | if (REG_P (src)(((enum rtx_code) (src)->code) == REG) |
2048 | && ((REGNO (dest)(rhs_regno(dest)) < FIRST_PSEUDO_REGISTER76 |
2049 | && !targetm.hard_regno_mode_ok (REGNO (dest)(rhs_regno(dest)), GET_MODE (dest)((machine_mode) (dest)->mode))) |
2050 | /* Don't extend the life of a hard register unless it is |
2051 | user variable (if we have few registers) or it can't |
2052 | fit into the desired register (meaning something special |
2053 | is going on). |
2054 | Also avoid substituting a return register into I3, because |
2055 | reload can't handle a conflict with constraints of other |
2056 | inputs. */ |
2057 | || (REGNO (src)(rhs_regno(src)) < FIRST_PSEUDO_REGISTER76 |
2058 | && !targetm.hard_regno_mode_ok (REGNO (src)(rhs_regno(src)), |
2059 | GET_MODE (src)((machine_mode) (src)->mode))))) |
2060 | return 0; |
2061 | } |
2062 | else if (GET_CODE (dest)((enum rtx_code) (dest)->code) != CC0) |
2063 | return 0; |
2064 | |
2065 | |
2066 | if (GET_CODE (PATTERN (i3))((enum rtx_code) (PATTERN (i3))->code) == PARALLEL) |
2067 | for (i = XVECLEN (PATTERN (i3), 0)(((((PATTERN (i3))->u.fld[0]).rt_rtvec))->num_elem) - 1; i >= 0; i--) |
2068 | if (GET_CODE (XVECEXP (PATTERN (i3), 0, i))((enum rtx_code) ((((((PATTERN (i3))->u.fld[0]).rt_rtvec)) ->elem[i]))->code) == CLOBBER) |
2069 | { |
2070 | rtx reg = XEXP (XVECEXP (PATTERN (i3), 0, i), 0)((((((((PATTERN (i3))->u.fld[0]).rt_rtvec))->elem[i]))-> u.fld[0]).rt_rtx); |
2071 | |
2072 | /* If the clobber represents an earlyclobber operand, we must not |
2073 | substitute an expression containing the clobbered register. |
2074 | As we do not analyze the constraint strings here, we have to |
2075 | make the conservative assumption. However, if the register is |
2076 | a fixed hard reg, the clobber cannot represent any operand; |
2077 | we leave it up to the machine description to either accept or |
2078 | reject use-and-clobber patterns. */ |
2079 | if (!REG_P (reg)(((enum rtx_code) (reg)->code) == REG) |
2080 | || REGNO (reg)(rhs_regno(reg)) >= FIRST_PSEUDO_REGISTER76 |
2081 | || !fixed_regs(this_target_hard_regs->x_fixed_regs)[REGNO (reg)(rhs_regno(reg))]) |
2082 | if (reg_overlap_mentioned_p (reg, src)) |
2083 | return 0; |
2084 | } |
2085 | |
2086 | /* If INSN contains anything volatile, or is an `asm' (whether volatile |
2087 | or not), reject, unless nothing volatile comes between it and I3 */ |
2088 | |
2089 | if (GET_CODE (src)((enum rtx_code) (src)->code) == ASM_OPERANDS || volatile_refs_p (src)) |
2090 | { |
2091 | /* Make sure neither succ nor succ2 contains a volatile reference. */ |
2092 | if (succ2 != 0 && volatile_refs_p (PATTERN (succ2))) |
2093 | return 0; |
2094 | if (succ != 0 && volatile_refs_p (PATTERN (succ))) |
2095 | return 0; |
2096 | /* We'll check insns between INSN and I3 below. */ |
2097 | } |
2098 | |
2099 | /* If INSN is an asm, and DEST is a hard register, reject, since it has |
2100 | to be an explicit register variable, and was chosen for a reason. */ |
2101 | |
2102 | if (GET_CODE (src)((enum rtx_code) (src)->code) == ASM_OPERANDS |
2103 | && REG_P (dest)(((enum rtx_code) (dest)->code) == REG) && REGNO (dest)(rhs_regno(dest)) < FIRST_PSEUDO_REGISTER76) |
2104 | return 0; |
2105 | |
2106 | /* If INSN contains volatile references (specifically volatile MEMs), |
2107 | we cannot combine across any other volatile references. |
2108 | Even if INSN doesn't contain volatile references, any intervening |
2109 | volatile insn might affect machine state. */ |
2110 | |
2111 | is_volatile_p = volatile_refs_p (PATTERN (insn)) |
2112 | ? volatile_refs_p |
2113 | : volatile_insn_p; |
2114 | |
2115 | for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p)) |
2116 | if (INSN_P (p)(((((enum rtx_code) (p)->code) == INSN) || (((enum rtx_code ) (p)->code) == JUMP_INSN) || (((enum rtx_code) (p)->code ) == CALL_INSN)) || (((enum rtx_code) (p)->code) == DEBUG_INSN )) && p != succ && p != succ2 && is_volatile_p (PATTERN (p))) |
2117 | return 0; |
2118 | |
2119 | /* If INSN contains an autoincrement or autodecrement, make sure that |
2120 | register is not used between there and I3, and not already used in |
2121 | I3 either. Neither must it be used in PRED or SUCC, if they exist. |
2122 | Also insist that I3 not be a jump if using LRA; if it were one |
2123 | and the incremented register were spilled, we would lose. |
2124 | Reload handles this correctly. */ |
2125 | |
2126 | if (AUTO_INC_DEC0) |
2127 | for (link = REG_NOTES (insn)(((insn)->u.fld[6]).rt_rtx); link; link = XEXP (link, 1)(((link)->u.fld[1]).rt_rtx)) |
2128 | if (REG_NOTE_KIND (link)((enum reg_note) ((machine_mode) (link)->mode)) == REG_INC |
2129 | && ((JUMP_P (i3)(((enum rtx_code) (i3)->code) == JUMP_INSN) && targetm.lra_p ()) |
2130 | || reg_used_between_p (XEXP (link, 0)(((link)->u.fld[0]).rt_rtx), insn, i3) |
2131 | || (pred != NULL_RTX(rtx) 0 |
2132 | && reg_overlap_mentioned_p (XEXP (link, 0)(((link)->u.fld[0]).rt_rtx), PATTERN (pred))) |
2133 | || (pred2 != NULL_RTX(rtx) 0 |
2134 | && reg_overlap_mentioned_p (XEXP (link, 0)(((link)->u.fld[0]).rt_rtx), PATTERN (pred2))) |
2135 | || (succ != NULL_RTX(rtx) 0 |
2136 | && reg_overlap_mentioned_p (XEXP (link, 0)(((link)->u.fld[0]).rt_rtx), PATTERN (succ))) |
2137 | || (succ2 != NULL_RTX(rtx) 0 |
2138 | && reg_overlap_mentioned_p (XEXP (link, 0)(((link)->u.fld[0]).rt_rtx), PATTERN (succ2))) |
2139 | || reg_overlap_mentioned_p (XEXP (link, 0)(((link)->u.fld[0]).rt_rtx), PATTERN (i3)))) |
2140 | return 0; |
2141 | |
2142 | /* Don't combine an insn that follows a CC0-setting insn. |
2143 | An insn that uses CC0 must not be separated from the one that sets it. |
2144 | We do, however, allow I2 to follow a CC0-setting insn if that insn |
2145 | is passed as I1; in that case it will be deleted also. |
2146 | We also allow combining in this case if all the insns are adjacent |
2147 | because that would leave the two CC0 insns adjacent as well. |
2148 | It would be more logical to test whether CC0 occurs inside I1 or I2, |
2149 | but that would be much slower, and this ought to be equivalent. */ |
2150 | |
2151 | if (HAVE_cc00) |
2152 | { |
2153 | p = prev_nonnote_insn (insn); |
2154 | if (p && p != pred && NONJUMP_INSN_P (p)(((enum rtx_code) (p)->code) == INSN) && sets_cc0_p (PATTERN (p)) |
2155 | && ! all_adjacent) |
2156 | return 0; |
2157 | } |
2158 | |
2159 | /* If we get here, we have passed all the tests and the combination is |
2160 | to be allowed. */ |
2161 | |
2162 | *pdest = dest; |
2163 | *psrc = src; |
2164 | |
2165 | return 1; |
2166 | } |
2167 | |
2168 | /* LOC is the location within I3 that contains its pattern or the component |
2169 | of a PARALLEL of the pattern. We validate that it is valid for combining. |
2170 | |
2171 | One problem is if I3 modifies its output, as opposed to replacing it |
2172 | entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as |
2173 | doing so would produce an insn that is not equivalent to the original insns. |
2174 | |
2175 | Consider: |
2176 | |
2177 | (set (reg:DI 101) (reg:DI 100)) |
2178 | (set (subreg:SI (reg:DI 101) 0) <foo>) |
2179 | |
2180 | This is NOT equivalent to: |
2181 | |
2182 | (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>) |
2183 | (set (reg:DI 101) (reg:DI 100))]) |
2184 | |
2185 | Not only does this modify 100 (in which case it might still be valid |
2186 | if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100. |
2187 | |
2188 | We can also run into a problem if I2 sets a register that I1 |
2189 | uses and I1 gets directly substituted into I3 (not via I2). In that |
2190 | case, we would be getting the wrong value of I2DEST into I3, so we |
2191 | must reject the combination. This case occurs when I2 and I1 both |
2192 | feed into I3, rather than when I1 feeds into I2, which feeds into I3. |
2193 | If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source |
2194 | of a SET must prevent combination from occurring. The same situation |
2195 | can occur for I0, in which case I0_NOT_IN_SRC is set. |
2196 | |
2197 | Before doing the above check, we first try to expand a field assignment |
2198 | into a set of logical operations. |
2199 | |
2200 | If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which |
2201 | we place a register that is both set and used within I3. If more than one |
2202 | such register is detected, we fail. |
2203 | |
2204 | Return 1 if the combination is valid, zero otherwise. */ |
2205 | |
2206 | static int |
2207 | combinable_i3pat (rtx_insn *i3, rtx *loc, rtx i2dest, rtx i1dest, rtx i0dest, |
2208 | int i1_not_in_src, int i0_not_in_src, rtx *pi3dest_killed) |
2209 | { |
2210 | rtx x = *loc; |
2211 | |
2212 | if (GET_CODE (x)((enum rtx_code) (x)->code) == SET) |
2213 | { |
2214 | rtx set = x ; |
2215 | rtx dest = SET_DEST (set)(((set)->u.fld[0]).rt_rtx); |
2216 | rtx src = SET_SRC (set)(((set)->u.fld[1]).rt_rtx); |
2217 | rtx inner_dest = dest; |
2218 | rtx subdest; |
2219 | |
2220 | while (GET_CODE (inner_dest)((enum rtx_code) (inner_dest)->code) == STRICT_LOW_PART |
2221 | || GET_CODE (inner_dest)((enum rtx_code) (inner_dest)->code) == SUBREG |
2222 | || GET_CODE (inner_dest)((enum rtx_code) (inner_dest)->code) == ZERO_EXTRACT) |
2223 | inner_dest = XEXP (inner_dest, 0)(((inner_dest)->u.fld[0]).rt_rtx); |
2224 | |
2225 | /* Check for the case where I3 modifies its output, as discussed |
2226 | above. We don't want to prevent pseudos from being combined |
2227 | into the address of a MEM, so only prevent the combination if |
2228 | i1 or i2 set the same MEM. */ |
2229 | if ((inner_dest != dest && |
2230 | (!MEM_P (inner_dest)(((enum rtx_code) (inner_dest)->code) == MEM) |
2231 | || rtx_equal_p (i2dest, inner_dest) |
2232 | || (i1dest && rtx_equal_p (i1dest, inner_dest)) |
2233 | || (i0dest && rtx_equal_p (i0dest, inner_dest))) |
2234 | && (reg_overlap_mentioned_p (i2dest, inner_dest) |
2235 | || (i1dest && reg_overlap_mentioned_p (i1dest, inner_dest)) |
2236 | || (i0dest && reg_overlap_mentioned_p (i0dest, inner_dest)))) |
2237 | |
2238 | /* This is the same test done in can_combine_p except we can't test |
2239 | all_adjacent; we don't have to, since this instruction will stay |
2240 | in place, thus we are not considering increasing the lifetime of |
2241 | INNER_DEST. |
2242 | |
2243 | Also, if this insn sets a function argument, combining it with |
2244 | something that might need a spill could clobber a previous |
2245 | function argument; the all_adjacent test in can_combine_p also |
2246 | checks this; here, we do a more specific test for this case. */ |
2247 | |
2248 | || (REG_P (inner_dest)(((enum rtx_code) (inner_dest)->code) == REG) |
2249 | && REGNO (inner_dest)(rhs_regno(inner_dest)) < FIRST_PSEUDO_REGISTER76 |
2250 | && !targetm.hard_regno_mode_ok (REGNO (inner_dest)(rhs_regno(inner_dest)), |
2251 | GET_MODE (inner_dest)((machine_mode) (inner_dest)->mode))) |
2252 | || (i1_not_in_src && reg_overlap_mentioned_p (i1dest, src)) |
2253 | || (i0_not_in_src && reg_overlap_mentioned_p (i0dest, src))) |
2254 | return 0; |
2255 | |
2256 | /* If DEST is used in I3, it is being killed in this insn, so |
2257 | record that for later. We have to consider paradoxical |
2258 | subregs here, since they kill the whole register, but we |
2259 | ignore partial subregs, STRICT_LOW_PART, etc. |
2260 | Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the |
2261 | STACK_POINTER_REGNUM, since these are always considered to be |
2262 | live. Similarly for ARG_POINTER_REGNUM if it is fixed. */ |
2263 | subdest = dest; |
2264 | if (GET_CODE (subdest)((enum rtx_code) (subdest)->code) == SUBREG && !partial_subreg_p (subdest)) |
2265 | subdest = SUBREG_REG (subdest)(((subdest)->u.fld[0]).rt_rtx); |
2266 | if (pi3dest_killed |
2267 | && REG_P (subdest)(((enum rtx_code) (subdest)->code) == REG) |
2268 | && reg_referenced_p (subdest, PATTERN (i3)) |
2269 | && REGNO (subdest)(rhs_regno(subdest)) != FRAME_POINTER_REGNUM19 |
2270 | && (HARD_FRAME_POINTER_IS_FRAME_POINTER(6 == 19) |
2271 | || REGNO (subdest)(rhs_regno(subdest)) != HARD_FRAME_POINTER_REGNUM6) |
2272 | && (FRAME_POINTER_REGNUM19 == ARG_POINTER_REGNUM16 |
2273 | || (REGNO (subdest)(rhs_regno(subdest)) != ARG_POINTER_REGNUM16 |
2274 | || ! fixed_regs(this_target_hard_regs->x_fixed_regs) [REGNO (subdest)(rhs_regno(subdest))])) |
2275 | && REGNO (subdest)(rhs_regno(subdest)) != STACK_POINTER_REGNUM7) |
2276 | { |
2277 | if (*pi3dest_killed) |
2278 | return 0; |
2279 | |
2280 | *pi3dest_killed = subdest; |
2281 | } |
2282 | } |
2283 | |
2284 | else if (GET_CODE (x)((enum rtx_code) (x)->code) == PARALLEL) |
2285 | { |
2286 | int i; |
2287 | |
2288 | for (i = 0; i < XVECLEN (x, 0)(((((x)->u.fld[0]).rt_rtvec))->num_elem); i++) |
2289 | if (! combinable_i3pat (i3, &XVECEXP (x, 0, i)(((((x)->u.fld[0]).rt_rtvec))->elem[i]), i2dest, i1dest, i0dest, |
2290 | i1_not_in_src, i0_not_in_src, pi3dest_killed)) |
2291 | return 0; |
2292 | } |
2293 | |
2294 | return 1; |
2295 | } |
2296 | |
2297 | /* Return 1 if X is an arithmetic expression that contains a multiplication |
2298 | and division. We don't count multiplications by powers of two here. */ |
2299 | |
2300 | static int |
2301 | contains_muldiv (rtx x) |
2302 | { |
2303 | switch (GET_CODE (x)((enum rtx_code) (x)->code)) |
2304 | { |
2305 | case MOD: case DIV: case UMOD: case UDIV: |
2306 | return 1; |
2307 | |
2308 | case MULT: |
2309 | return ! (CONST_INT_P (XEXP (x, 1))(((enum rtx_code) ((((x)->u.fld[1]).rt_rtx))->code) == CONST_INT ) |
2310 | && pow2p_hwi (UINTVAL (XEXP (x, 1))((unsigned long) (((((x)->u.fld[1]).rt_rtx))->u.hwint[0 ])))); |
2311 | default: |
2312 | if (BINARY_P (x)(((rtx_class[(int) (((enum rtx_code) (x)->code))]) & ( ~3)) == (RTX_COMPARE & (~3)))) |
2313 | return contains_muldiv (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx)) |
2314 | || contains_muldiv (XEXP (x, 1)(((x)->u.fld[1]).rt_rtx)); |
2315 | |
2316 | if (UNARY_P (x)((rtx_class[(int) (((enum rtx_code) (x)->code))]) == RTX_UNARY )) |
2317 | return contains_muldiv (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx)); |
2318 | |
2319 | return 0; |
2320 | } |
2321 | } |
2322 | |
2323 | /* Determine whether INSN can be used in a combination. Return nonzero if |
2324 | not. This is used in try_combine to detect early some cases where we |
2325 | can't perform combinations. */ |
2326 | |
2327 | static int |
2328 | cant_combine_insn_p (rtx_insn *insn) |
2329 | { |
2330 | rtx set; |
2331 | rtx src, dest; |
2332 | |
2333 | /* If this isn't really an insn, we can't do anything. |
2334 | This can occur when flow deletes an insn that it has merged into an |
2335 | auto-increment address. */ |
2336 | if (!NONDEBUG_INSN_P (insn)((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN))) |
2337 | return 1; |
2338 | |
2339 | /* Never combine loads and stores involving hard regs that are likely |
2340 | to be spilled. The register allocator can usually handle such |
2341 | reg-reg moves by tying. If we allow the combiner to make |
2342 | substitutions of likely-spilled regs, reload might die. |
2343 | As an exception, we allow combinations involving fixed regs; these are |
2344 | not available to the register allocator so there's no risk involved. */ |
2345 | |
2346 | set = single_set (insn); |
2347 | if (! set) |
2348 | return 0; |
2349 | src = SET_SRC (set)(((set)->u.fld[1]).rt_rtx); |
2350 | dest = SET_DEST (set)(((set)->u.fld[0]).rt_rtx); |
2351 | if (GET_CODE (src)((enum rtx_code) (src)->code) == SUBREG) |
2352 | src = SUBREG_REG (src)(((src)->u.fld[0]).rt_rtx); |
2353 | if (GET_CODE (dest)((enum rtx_code) (dest)->code) == SUBREG) |
2354 | dest = SUBREG_REG (dest)(((dest)->u.fld[0]).rt_rtx); |
2355 | if (REG_P (src)(((enum rtx_code) (src)->code) == REG) && REG_P (dest)(((enum rtx_code) (dest)->code) == REG) |
2356 | && ((HARD_REGISTER_P (src)((((rhs_regno(src))) < 76)) |
2357 | && ! TEST_HARD_REG_BIT (fixed_reg_set(this_target_hard_regs->x_fixed_reg_set), REGNO (src)(rhs_regno(src))) |
2358 | #ifdef LEAF_REGISTERS |
2359 | && ! LEAF_REGISTERS [REGNO (src)(rhs_regno(src))]) |
2360 | #else |
2361 | ) |
2362 | #endif |
2363 | || (HARD_REGISTER_P (dest)((((rhs_regno(dest))) < 76)) |
2364 | && ! TEST_HARD_REG_BIT (fixed_reg_set(this_target_hard_regs->x_fixed_reg_set), REGNO (dest)(rhs_regno(dest))) |
2365 | && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest))(regclass_map[((rhs_regno(dest)))]))))) |
2366 | return 1; |
2367 | |
2368 | return 0; |
2369 | } |
2370 | |
2371 | struct likely_spilled_retval_info |
2372 | { |
2373 | unsigned regno, nregs; |
2374 | unsigned mask; |
2375 | }; |
2376 | |
2377 | /* Called via note_stores by likely_spilled_retval_p. Remove from info->mask |
2378 | hard registers that are known to be written to / clobbered in full. */ |
2379 | static void |
2380 | likely_spilled_retval_1 (rtx x, const_rtx set, void *data) |
2381 | { |
2382 | struct likely_spilled_retval_info *const info = |
2383 | (struct likely_spilled_retval_info *) data; |
2384 | unsigned regno, nregs; |
2385 | unsigned new_mask; |
2386 | |
2387 | if (!REG_P (XEXP (set, 0))(((enum rtx_code) ((((set)->u.fld[0]).rt_rtx))->code) == REG)) |
2388 | return; |
2389 | regno = REGNO (x)(rhs_regno(x)); |
2390 | if (regno >= info->regno + info->nregs) |
2391 | return; |
2392 | nregs = REG_NREGS (x)((&(x)->u.reg)->nregs); |
2393 | if (regno + nregs <= info->regno) |
2394 | return; |
2395 | new_mask = (2U << (nregs - 1)) - 1; |
2396 | if (regno < info->regno) |
2397 | new_mask >>= info->regno - regno; |
2398 | else |
2399 | new_mask <<= regno - info->regno; |
2400 | info->mask &= ~new_mask; |
2401 | } |
2402 | |
2403 | /* Return nonzero iff part of the return value is live during INSN, and |
2404 | it is likely spilled. This can happen when more than one insn is needed |
2405 | to copy the return value, e.g. when we consider to combine into the |
2406 | second copy insn for a complex value. */ |
2407 | |
2408 | static int |
2409 | likely_spilled_retval_p (rtx_insn *insn) |
2410 | { |
2411 | rtx_insn *use = BB_END (this_basic_block)(this_basic_block)->il.x.rtl->end_; |
2412 | rtx reg; |
2413 | rtx_insn *p; |
2414 | unsigned regno, nregs; |
2415 | /* We assume here that no machine mode needs more than |
2416 | 32 hard registers when the value overlaps with a register |
2417 | for which TARGET_FUNCTION_VALUE_REGNO_P is true. */ |
2418 | unsigned mask; |
2419 | struct likely_spilled_retval_info info; |
2420 | |
2421 | if (!NONJUMP_INSN_P (use)(((enum rtx_code) (use)->code) == INSN) || GET_CODE (PATTERN (use))((enum rtx_code) (PATTERN (use))->code) != USE || insn == use) |
2422 | return 0; |
2423 | reg = XEXP (PATTERN (use), 0)(((PATTERN (use))->u.fld[0]).rt_rtx); |
2424 | if (!REG_P (reg)(((enum rtx_code) (reg)->code) == REG) || !targetm.calls.function_value_regno_p (REGNO (reg)(rhs_regno(reg)))) |
2425 | return 0; |
2426 | regno = REGNO (reg)(rhs_regno(reg)); |
2427 | nregs = REG_NREGS (reg)((&(reg)->u.reg)->nregs); |
2428 | if (nregs == 1) |
2429 | return 0; |
2430 | mask = (2U << (nregs - 1)) - 1; |
2431 | |
2432 | /* Disregard parts of the return value that are set later. */ |
2433 | info.regno = regno; |
2434 | info.nregs = nregs; |
2435 | info.mask = mask; |
2436 | for (p = PREV_INSN (use); info.mask && p != insn; p = PREV_INSN (p)) |
2437 | if (INSN_P (p)(((((enum rtx_code) (p)->code) == INSN) || (((enum rtx_code ) (p)->code) == JUMP_INSN) || (((enum rtx_code) (p)->code ) == CALL_INSN)) || (((enum rtx_code) (p)->code) == DEBUG_INSN ))) |
2438 | note_stores (p, likely_spilled_retval_1, &info); |
2439 | mask = info.mask; |
2440 | |
2441 | /* Check if any of the (probably) live return value registers is |
2442 | likely spilled. */ |
2443 | nregs --; |
2444 | do |
2445 | { |
2446 | if ((mask & 1 << nregs) |
2447 | && targetm.class_likely_spilled_p (REGNO_REG_CLASS (regno + nregs)(regclass_map[(regno + nregs)]))) |
2448 | return 1; |
2449 | } while (nregs--); |
2450 | return 0; |
2451 | } |
2452 | |
2453 | /* Adjust INSN after we made a change to its destination. |
2454 | |
2455 | Changing the destination can invalidate notes that say something about |
2456 | the results of the insn and a LOG_LINK pointing to the insn. */ |
2457 | |
2458 | static void |
2459 | adjust_for_new_dest (rtx_insn *insn) |
2460 | { |
2461 | /* For notes, be conservative and simply remove them. */ |
2462 | remove_reg_equal_equiv_notes (insn, true); |
2463 | |
2464 | /* The new insn will have a destination that was previously the destination |
2465 | of an insn just above it. Call distribute_links to make a LOG_LINK from |
2466 | the next use of that destination. */ |
2467 | |
2468 | rtx set = single_set (insn); |
2469 | gcc_assert (set)((void)(!(set) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/combine.c" , 2469, __FUNCTION__), 0 : 0)); |
2470 | |
2471 | rtx reg = SET_DEST (set)(((set)->u.fld[0]).rt_rtx); |
2472 | |
2473 | while (GET_CODE (reg)((enum rtx_code) (reg)->code) == ZERO_EXTRACT |
2474 | || GET_CODE (reg)((enum rtx_code) (reg)->code) == STRICT_LOW_PART |
2475 | || GET_CODE (reg)((enum rtx_code) (reg)->code) == SUBREG) |
2476 | reg = XEXP (reg, 0)(((reg)->u.fld[0]).rt_rtx); |
2477 | gcc_assert (REG_P (reg))((void)(!((((enum rtx_code) (reg)->code) == REG)) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/combine.c" , 2477, __FUNCTION__), 0 : 0)); |
2478 | |
2479 | distribute_links (alloc_insn_link (insn, REGNO (reg)(rhs_regno(reg)), NULLnullptr)); |
2480 | |
2481 | df_insn_rescan (insn); |
2482 | } |
2483 | |
2484 | /* Return TRUE if combine can reuse reg X in mode MODE. |
2485 | ADDED_SETS is nonzero if the original set is still required. */ |
2486 | static bool |
2487 | can_change_dest_mode (rtx x, int added_sets, machine_mode mode) |
2488 | { |
2489 | unsigned int regno; |
2490 | |
2491 | if (!REG_P (x)(((enum rtx_code) (x)->code) == REG)) |
2492 | return false; |
2493 | |
2494 | /* Don't change between modes with different underlying register sizes, |
2495 | since this could lead to invalid subregs. */ |
2496 | if (maybe_ne (REGMODE_NATURAL_SIZE (mode)ix86_regmode_natural_size (mode), |
2497 | REGMODE_NATURAL_SIZE (GET_MODE (x))ix86_regmode_natural_size (((machine_mode) (x)->mode)))) |
2498 | return false; |
2499 | |
2500 | regno = REGNO (x)(rhs_regno(x)); |
2501 | /* Allow hard registers if the new mode is legal, and occupies no more |
2502 | registers than the old mode. */ |
2503 | if (regno < FIRST_PSEUDO_REGISTER76) |
2504 | return (targetm.hard_regno_mode_ok (regno, mode) |
2505 | && REG_NREGS (x)((&(x)->u.reg)->nregs) >= hard_regno_nregs (regno, mode)); |
2506 | |
2507 | /* Or a pseudo that is only used once. */ |
2508 | return (regno < reg_n_sets_max |
2509 | && REG_N_SETS (regno) == 1 |
2510 | && !added_sets |
2511 | && !REG_USERVAR_P (x)(__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum rtx_code) (_rtx)->code) != REG) rtl_check_failed_flag ("REG_USERVAR_P" , _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/combine.c" , 2511, __FUNCTION__); _rtx; })->volatil)); |
2512 | } |
2513 | |
2514 | |
2515 | /* Check whether X, the destination of a set, refers to part of |
2516 | the register specified by REG. */ |
2517 | |
2518 | static bool |
2519 | reg_subword_p (rtx x, rtx reg) |
2520 | { |
2521 | /* Check that reg is an integer mode register. */ |
2522 | if (!REG_P (reg)(((enum rtx_code) (reg)->code) == REG) || GET_MODE_CLASS (GET_MODE (reg))((enum mode_class) mode_class[((machine_mode) (reg)->mode) ]) != MODE_INT) |
2523 | return false; |
2524 | |
2525 | if (GET_CODE (x)((enum rtx_code) (x)->code) == STRICT_LOW_PART |
2526 | || GET_CODE (x)((enum rtx_code) (x)->code) == ZERO_EXTRACT) |
2527 | x = XEXP (x, 0)(((x)->u.fld[0]).rt_rtx); |
2528 | |
2529 | return GET_CODE (x)((enum rtx_code) (x)->code) == SUBREG |
2530 | && SUBREG_REG (x)(((x)->u.fld[0]).rt_rtx) == reg |
2531 | && GET_MODE_CLASS (GET_MODE (x))((enum mode_class) mode_class[((machine_mode) (x)->mode)]) == MODE_INT; |
2532 | } |
2533 | |
2534 | /* Return whether PAT is a PARALLEL of exactly N register SETs followed |
2535 | by an arbitrary number of CLOBBERs. */ |
2536 | static bool |
2537 | is_parallel_of_n_reg_sets (rtx pat, int n) |
2538 | { |
2539 | if (GET_CODE (pat)((enum rtx_code) (pat)->code) != PARALLEL) |
2540 | return false; |
2541 | |
2542 | int len = XVECLEN (pat, 0)(((((pat)->u.fld[0]).rt_rtvec))->num_elem); |
2543 | if (len < n) |
2544 | return false; |
2545 | |
2546 | int i; |
2547 | for (i = 0; i < n; i++) |
2548 | if (GET_CODE (XVECEXP (pat, 0, i))((enum rtx_code) ((((((pat)->u.fld[0]).rt_rtvec))->elem [i]))->code) != SET |
2549 | || !REG_P (SET_DEST (XVECEXP (pat, 0, i)))(((enum rtx_code) (((((((((pat)->u.fld[0]).rt_rtvec))-> elem[i]))->u.fld[0]).rt_rtx))->code) == REG)) |
2550 | return false; |
2551 | for ( ; i < len; i++) |
2552 | switch (GET_CODE (XVECEXP (pat, 0, i))((enum rtx_code) ((((((pat)->u.fld[0]).rt_rtvec))->elem [i]))->code)) |
2553 | { |
2554 | case CLOBBER: |
2555 | if (XEXP (XVECEXP (pat, 0, i), 0)((((((((pat)->u.fld[0]).rt_rtvec))->elem[i]))->u.fld [0]).rt_rtx) == const0_rtx(const_int_rtx[64])) |
2556 | return false; |
2557 | break; |
2558 | default: |
2559 | return false; |
2560 | } |
2561 | return true; |
2562 | } |
2563 | |
2564 | /* Return whether INSN, a PARALLEL of N register SETs (and maybe some |
2565 | CLOBBERs), can be split into individual SETs in that order, without |
2566 | changing semantics. */ |
2567 | static bool |
2568 | can_split_parallel_of_n_reg_sets (rtx_insn *insn, int n) |
2569 | { |
2570 | if (!insn_nothrow_p (insn)) |
2571 | return false; |
2572 | |
2573 | rtx pat = PATTERN (insn); |
2574 | |
2575 | int i, j; |
2576 | for (i = 0; i < n; i++) |
2577 | { |
2578 | if (side_effects_p (SET_SRC (XVECEXP (pat, 0, i))((((((((pat)->u.fld[0]).rt_rtvec))->elem[i]))->u.fld [1]).rt_rtx))) |
2579 | return false; |
2580 | |
2581 | rtx reg = SET_DEST (XVECEXP (pat, 0, i))((((((((pat)->u.fld[0]).rt_rtvec))->elem[i]))->u.fld [0]).rt_rtx); |
2582 | |
2583 | for (j = i + 1; j < n; j++) |
2584 | if (reg_referenced_p (reg, XVECEXP (pat, 0, j)(((((pat)->u.fld[0]).rt_rtvec))->elem[j]))) |
2585 | return false; |
2586 | } |
2587 | |
2588 | return true; |
2589 | } |
2590 | |
2591 | /* Return whether X is just a single_set, with the source |
2592 | a general_operand. */ |
2593 | static bool |
2594 | is_just_move (rtx_insn *x) |
2595 | { |
2596 | rtx set = single_set (x); |
2597 | if (!set) |
2598 | return false; |
2599 | |
2600 | return general_operand (SET_SRC (set)(((set)->u.fld[1]).rt_rtx), VOIDmode((void) 0, E_VOIDmode)); |
2601 | } |
2602 | |
2603 | /* Callback function to count autoincs. */ |
2604 | |
2605 | static int |
2606 | count_auto_inc (rtx, rtx, rtx, rtx, rtx, void *arg) |
2607 | { |
2608 | (*((int *) arg))++; |
2609 | |
2610 | return 0; |
2611 | } |
2612 | |
2613 | /* Try to combine the insns I0, I1 and I2 into I3. |
2614 | Here I0, I1 and I2 appear earlier than I3. |
2615 | I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into |
2616 | I3. |
2617 | |
2618 | If we are combining more than two insns and the resulting insn is not |
2619 | recognized, try splitting it into two insns. If that happens, I2 and I3 |
2620 | are retained and I1/I0 are pseudo-deleted by turning them into a NOTE. |
2621 | Otherwise, I0, I1 and I2 are pseudo-deleted. |
2622 | |
2623 | Return 0 if the combination does not work. Then nothing is changed. |
2624 | If we did the combination, return the insn at which combine should |
2625 | resume scanning. |
2626 | |
2627 | Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a |
2628 | new direct jump instruction. |
2629 | |
2630 | LAST_COMBINED_INSN is either I3, or some insn after I3 that has |
2631 | been I3 passed to an earlier try_combine within the same basic |
2632 | block. */ |
2633 | |
2634 | static rtx_insn * |
2635 | try_combine (rtx_insn *i3, rtx_insn *i2, rtx_insn *i1, rtx_insn *i0, |
2636 | int *new_direct_jump_p, rtx_insn *last_combined_insn) |
2637 | { |
2638 | /* New patterns for I3 and I2, respectively. */ |
2639 | rtx newpat, newi2pat = 0; |
2640 | rtvec newpat_vec_with_clobbers = 0; |
2641 | int substed_i2 = 0, substed_i1 = 0, substed_i0 = 0; |
2642 | /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not |
2643 | dead. */ |
2644 | int added_sets_0, added_sets_1, added_sets_2; |
2645 | /* Total number of SETs to put into I3. */ |
2646 | int total_sets; |
2647 | /* Nonzero if I2's or I1's body now appears in I3. */ |
2648 | int i2_is_used = 0, i1_is_used = 0; |
2649 | /* INSN_CODEs for new I3, new I2, and user of condition code. */ |
2650 | int insn_code_number, i2_code_number = 0, other_code_number = 0; |
2651 | /* Contains I3 if the destination of I3 is used in its source, which means |
2652 | that the old life of I3 is being killed. If that usage is placed into |
2653 | I2 and not in I3, a REG_DEAD note must be made. */ |
2654 | rtx i3dest_killed = 0; |
2655 | /* SET_DEST and SET_SRC of I2, I1 and I0. */ |
2656 | rtx i2dest = 0, i2src = 0, i1dest = 0, i1src = 0, i0dest = 0, i0src = 0; |
2657 | /* Copy of SET_SRC of I1 and I0, if needed. */ |
2658 | rtx i1src_copy = 0, i0src_copy = 0, i0src_copy2 = 0; |
2659 | /* Set if I2DEST was reused as a scratch register. */ |
2660 | bool i2scratch = false; |
2661 | /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases. */ |
2662 | rtx i0pat = 0, i1pat = 0, i2pat = 0; |
2663 | /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */ |
2664 | int i2dest_in_i2src = 0, i1dest_in_i1src = 0, i2dest_in_i1src = 0; |
2665 | int i0dest_in_i0src = 0, i1dest_in_i0src = 0, i2dest_in_i0src = 0; |
2666 | int i2dest_killed = 0, i1dest_killed = 0, i0dest_killed = 0; |
2667 | int i1_feeds_i2_n = 0, i0_feeds_i2_n = 0, i0_feeds_i1_n = 0; |
2668 | /* Notes that must be added to REG_NOTES in I3 and I2. */ |
2669 | rtx new_i3_notes, new_i2_notes; |
2670 | /* Notes that we substituted I3 into I2 instead of the normal case. */ |
2671 | int i3_subst_into_i2 = 0; |
2672 | /* Notes that I1, I2 or I3 is a MULT operation. */ |
2673 | int have_mult = 0; |
2674 | int swap_i2i3 = 0; |
2675 | int split_i2i3 = 0; |
2676 | int changed_i3_dest = 0; |
2677 | bool i2_was_move = false, i3_was_move = false; |
2678 | int n_auto_inc = 0; |
2679 | |
2680 | int maxreg; |
2681 | rtx_insn *temp_insn; |
2682 | rtx temp_expr; |
2683 | struct insn_link *link; |
2684 | rtx other_pat = 0; |
2685 | rtx new_other_notes; |
2686 | int i; |
2687 | scalar_int_mode dest_mode, temp_mode; |
2688 | |
2689 | /* Immediately return if any of I0,I1,I2 are the same insn (I3 can |
2690 | never be). */ |
2691 | if (i1 == i2 || i0 == i2 || (i0 && i0 == i1)) |
2692 | return 0; |
2693 | |
2694 | /* Only try four-insn combinations when there's high likelihood of |
2695 | success. Look for simple insns, such as loads of constants or |
2696 | binary operations involving a constant. */ |
2697 | if (i0) |
2698 | { |
2699 | int i; |
2700 | int ngood = 0; |
2701 | int nshift = 0; |
2702 | rtx set0, set3; |
2703 | |
2704 | if (!flag_expensive_optimizationsglobal_options.x_flag_expensive_optimizations) |
2705 | return 0; |
2706 | |
2707 | for (i = 0; i < 4; i++) |
2708 | { |
2709 | rtx_insn *insn = i == 0 ? i0 : i == 1 ? i1 : i == 2 ? i2 : i3; |
2710 | rtx set = single_set (insn); |
2711 | rtx src; |
2712 | if (!set) |
2713 | continue; |
2714 | src = SET_SRC (set)(((set)->u.fld[1]).rt_rtx); |
2715 | if (CONSTANT_P (src)((rtx_class[(int) (((enum rtx_code) (src)->code))]) == RTX_CONST_OBJ )) |
2716 | { |
2717 | ngood += 2; |
2718 | break; |
2719 | } |
2720 | else if (BINARY_P (src)(((rtx_class[(int) (((enum rtx_code) (src)->code))]) & (~3)) == (RTX_COMPARE & (~3))) && CONSTANT_P (XEXP (src, 1))((rtx_class[(int) (((enum rtx_code) ((((src)->u.fld[1]).rt_rtx ))->code))]) == RTX_CONST_OBJ)) |
2721 | ngood++; |
2722 | else if (GET_CODE (src)((enum rtx_code) (src)->code) == ASHIFT || GET_CODE (src)((enum rtx_code) (src)->code) == ASHIFTRT |
2723 | || GET_CODE (src)((enum rtx_code) (src)->code) == LSHIFTRT) |
2724 | nshift++; |
2725 | } |
2726 | |
2727 | /* If I0 loads a memory and I3 sets the same memory, then I1 and I2 |
2728 | are likely manipulating its value. Ideally we'll be able to combine |
2729 | all four insns into a bitfield insertion of some kind. |
2730 | |
2731 | Note the source in I0 might be inside a sign/zero extension and the |
2732 | memory modes in I0 and I3 might be different. So extract the address |
2733 | from the destination of I3 and search for it in the source of I0. |
2734 | |
2735 | In the event that there's a match but the source/dest do not actually |
2736 | refer to the same memory, the worst that happens is we try some |
2737 | combinations that we wouldn't have otherwise. */ |
2738 | if ((set0 = single_set (i0)) |
2739 | /* Ensure the source of SET0 is a MEM, possibly buried inside |
2740 | an extension. */ |
2741 | && (GET_CODE (SET_SRC (set0))((enum rtx_code) ((((set0)->u.fld[1]).rt_rtx))->code) == MEM |
2742 | || ((GET_CODE (SET_SRC (set0))((enum rtx_code) ((((set0)->u.fld[1]).rt_rtx))->code) == ZERO_EXTEND |
2743 | || GET_CODE (SET_SRC (set0))((enum rtx_code) ((((set0)->u.fld[1]).rt_rtx))->code) == SIGN_EXTEND) |
2744 | && GET_CODE (XEXP (SET_SRC (set0), 0))((enum rtx_code) (((((((set0)->u.fld[1]).rt_rtx))->u.fld [0]).rt_rtx))->code) == MEM)) |
2745 | && (set3 = single_set (i3)) |
2746 | /* Ensure the destination of SET3 is a MEM. */ |
2747 | && GET_CODE (SET_DEST (set3))((enum rtx_code) ((((set3)->u.fld[0]).rt_rtx))->code) == MEM |
2748 | /* Would it be better to extract the base address for the MEM |
2749 | in SET3 and look for that? I don't have cases where it matters |
2750 | but I could envision such cases. */ |
2751 | && rtx_referenced_p (XEXP (SET_DEST (set3), 0)((((((set3)->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx), SET_SRC (set0)(((set0)->u.fld[1]).rt_rtx))) |
2752 | ngood += 2; |
2753 | |
2754 | if (ngood < 2 && nshift < 2) |
2755 | return 0; |
2756 | } |
2757 | |
2758 | /* Exit early if one of the insns involved can't be used for |
2759 | combinations. */ |
2760 | if (CALL_P (i2)(((enum rtx_code) (i2)->code) == CALL_INSN) |
2761 | || (i1 && CALL_P (i1)(((enum rtx_code) (i1)->code) == CALL_INSN)) |
2762 | || (i0 && CALL_P (i0)(((enum rtx_code) (i0)->code) == CALL_INSN)) |
2763 | || cant_combine_insn_p (i3) |
2764 | || cant_combine_insn_p (i2) |
2765 | || (i1 && cant_combine_insn_p (i1)) |
2766 | || (i0 && cant_combine_insn_p (i0)) |
2767 | || likely_spilled_retval_p (i3)) |
2768 | return 0; |
2769 | |
2770 | combine_attempts++; |
2771 | undobuf.other_insn = 0; |
2772 | |
2773 | /* Reset the hard register usage information. */ |
2774 | CLEAR_HARD_REG_SET (newpat_used_regs); |
2775 | |
2776 | if (dump_file && (dump_flags & TDF_DETAILS)) |
2777 | { |
2778 | if (i0) |
2779 | fprintf (dump_file, "\nTrying %d, %d, %d -> %d:\n", |
2780 | INSN_UID (i0), INSN_UID (i1), INSN_UID (i2), INSN_UID (i3)); |
2781 | else if (i1) |
2782 | fprintf (dump_file, "\nTrying %d, %d -> %d:\n", |
2783 | INSN_UID (i1), INSN_UID (i2), INSN_UID (i3)); |
2784 | else |
2785 | fprintf (dump_file, "\nTrying %d -> %d:\n", |
2786 | INSN_UID (i2), INSN_UID (i3)); |
2787 | |
2788 | if (i0) |
2789 | dump_insn_slim (dump_file, i0); |
2790 | if (i1) |
2791 | dump_insn_slim (dump_file, i1); |
2792 | dump_insn_slim (dump_file, i2); |
2793 | dump_insn_slim (dump_file, i3); |
2794 | } |
2795 | |
2796 | /* If multiple insns feed into one of I2 or I3, they can be in any |
2797 | order. To simplify the code below, reorder them in sequence. */ |
2798 | if (i0 && DF_INSN_LUID (i0)((((df->insns[(INSN_UID (i0))]))->luid)) > DF_INSN_LUID (i2)((((df->insns[(INSN_UID (i2))]))->luid))) |
2799 | std::swap (i0, i2); |
2800 | if (i0 && DF_INSN_LUID (i0)((((df->insns[(INSN_UID (i0))]))->luid)) > DF_INSN_LUID (i1)((((df->insns[(INSN_UID (i1))]))->luid))) |
2801 | std::swap (i0, i1); |
2802 | if (i1 && DF_INSN_LUID (i1)((((df->insns[(INSN_UID (i1))]))->luid)) > DF_INSN_LUID (i2)((((df->insns[(INSN_UID (i2))]))->luid))) |
2803 | std::swap (i1, i2); |
2804 | |
2805 | added_links_insn = 0; |
2806 | added_notes_insn = 0; |
2807 | |
2808 | /* First check for one important special case that the code below will |
2809 | not handle. Namely, the case where I1 is zero, I2 is a PARALLEL |
2810 | and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case, |
2811 | we may be able to replace that destination with the destination of I3. |
2812 | This occurs in the common code where we compute both a quotient and |
2813 | remainder into a structure, in which case we want to do the computation |
2814 | directly into the structure to avoid register-register copies. |
2815 | |
2816 | Note that this case handles both multiple sets in I2 and also cases |
2817 | where I2 has a number of CLOBBERs inside the PARALLEL. |
2818 | |
2819 | We make very conservative checks below and only try to handle the |
2820 | most common cases of this. For example, we only handle the case |
2821 | where I2 and I3 are adjacent to avoid making difficult register |
2822 | usage tests. */ |
2823 | |
2824 | if (i1 == 0 && NONJUMP_INSN_P (i3)(((enum rtx_code) (i3)->code) == INSN) && GET_CODE (PATTERN (i3))((enum rtx_code) (PATTERN (i3))->code) == SET |
2825 | && REG_P (SET_SRC (PATTERN (i3)))(((enum rtx_code) ((((PATTERN (i3))->u.fld[1]).rt_rtx))-> code) == REG) |
2826 | && REGNO (SET_SRC (PATTERN (i3)))(rhs_regno((((PATTERN (i3))->u.fld[1]).rt_rtx))) >= FIRST_PSEUDO_REGISTER76 |
2827 | && find_reg_note (i3, REG_DEAD, SET_SRC (PATTERN (i3))(((PATTERN (i3))->u.fld[1]).rt_rtx)) |
2828 | && GET_CODE (PATTERN (i2))((enum rtx_code) (PATTERN (i2))->code) == PARALLEL |
2829 | && ! side_effects_p (SET_DEST (PATTERN (i3))(((PATTERN (i3))->u.fld[0]).rt_rtx)) |
2830 | /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code |
2831 | below would need to check what is inside (and reg_overlap_mentioned_p |
2832 | doesn't support those codes anyway). Don't allow those destinations; |
2833 | the resulting insn isn't likely to be recognized anyway. */ |
2834 | && GET_CODE (SET_DEST (PATTERN (i3)))((enum rtx_code) ((((PATTERN (i3))->u.fld[0]).rt_rtx))-> code) != ZERO_EXTRACT |
2835 | && GET_CODE (SET_DEST (PATTERN (i3)))((enum rtx_code) ((((PATTERN (i3))->u.fld[0]).rt_rtx))-> code) != STRICT_LOW_PART |
2836 | && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3))(((PATTERN (i3))->u.fld[1]).rt_rtx), |
2837 | SET_DEST (PATTERN (i3))(((PATTERN (i3))->u.fld[0]).rt_rtx)) |
2838 | && next_active_insn (i2) == i3) |
2839 | { |
2840 | rtx p2 = PATTERN (i2); |
2841 | |
2842 | /* Make sure that the destination of I3, |
2843 | which we are going to substitute into one output of I2, |
2844 | is not used within another output of I2. We must avoid making this: |
2845 | (parallel [(set (mem (reg 69)) ...) |
2846 | (set (reg 69) ...)]) |
2847 | which is not well-defined as to order of actions. |
2848 | (Besides, reload can't handle output reloads for this.) |
2849 | |
2850 | The problem can also happen if the dest of I3 is a memory ref, |
2851 | if another dest in I2 is an indirect memory ref. |
2852 | |
2853 | Neither can this PARALLEL be an asm. We do not allow combining |
2854 | that usually (see can_combine_p), so do not here either. */ |
2855 | bool ok = true; |
2856 | for (i = 0; ok && i < XVECLEN (p2, 0)(((((p2)->u.fld[0]).rt_rtvec))->num_elem); i++) |
2857 | { |
2858 | if ((GET_CODE (XVECEXP (p2, 0, i))((enum rtx_code) ((((((p2)->u.fld[0]).rt_rtvec))->elem[ i]))->code) == SET |
2859 | || GET_CODE (XVECEXP (p2, 0, i))((enum rtx_code) ((((((p2)->u.fld[0]).rt_rtvec))->elem[ i]))->code) == CLOBBER) |
2860 | && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3))(((PATTERN (i3))->u.fld[0]).rt_rtx), |
2861 | SET_DEST (XVECEXP (p2, 0, i))((((((((p2)->u.fld[0]).rt_rtvec))->elem[i]))->u.fld[ 0]).rt_rtx))) |
2862 | ok = false; |
2863 | else if (GET_CODE (XVECEXP (p2, 0, i))((enum rtx_code) ((((((p2)->u.fld[0]).rt_rtvec))->elem[ i]))->code) == SET |
2864 | && GET_CODE (SET_SRC (XVECEXP (p2, 0, i)))((enum rtx_code) (((((((((p2)->u.fld[0]).rt_rtvec))->elem [i]))->u.fld[1]).rt_rtx))->code) == ASM_OPERANDS) |
2865 | ok = false; |
2866 | } |
2867 | |
2868 | if (ok) |
2869 | for (i = 0; i < XVECLEN (p2, 0)(((((p2)->u.fld[0]).rt_rtvec))->num_elem); i++) |
2870 | if (GET_CODE (XVECEXP (p2, 0, i))((enum rtx_code) ((((((p2)->u.fld[0]).rt_rtvec))->elem[ i]))->code) == SET |
2871 | && SET_DEST (XVECEXP (p2, 0, i))((((((((p2)->u.fld[0]).rt_rtvec))->elem[i]))->u.fld[ 0]).rt_rtx) == SET_SRC (PATTERN (i3))(((PATTERN (i3))->u.fld[1]).rt_rtx)) |
2872 | { |
2873 | combine_merges++; |
2874 | |
2875 | subst_insn = i3; |
2876 | subst_low_luid = DF_INSN_LUID (i2)((((df->insns[(INSN_UID (i2))]))->luid)); |
2877 | |
2878 | added_sets_2 = added_sets_1 = added_sets_0 = 0; |
2879 | i2src = SET_SRC (XVECEXP (p2, 0, i))((((((((p2)->u.fld[0]).rt_rtvec))->elem[i]))->u.fld[ 1]).rt_rtx); |
2880 | i2dest = SET_DEST (XVECEXP (p2, 0, i))((((((((p2)->u.fld[0]).rt_rtvec))->elem[i]))->u.fld[ 0]).rt_rtx); |
2881 | i2dest_killed = dead_or_set_p (i2, i2dest); |
2882 | |
2883 | /* Replace the dest in I2 with our dest and make the resulting |
2884 | insn the new pattern for I3. Then skip to where we validate |
2885 | the pattern. Everything was set up above. */ |
2886 | SUBST (SET_DEST (XVECEXP (p2, 0, i)), SET_DEST (PATTERN (i3)))do_SUBST (&(((((((((p2)->u.fld[0]).rt_rtvec))->elem [i]))->u.fld[0]).rt_rtx)), ((((PATTERN (i3))->u.fld[0]) .rt_rtx))); |
2887 | newpat = p2; |
2888 | i3_subst_into_i2 = 1; |
2889 | goto validate_replacement; |
2890 | } |
2891 | } |
2892 | |
2893 | /* If I2 is setting a pseudo to a constant and I3 is setting some |
2894 | sub-part of it to another constant, merge them by making a new |
2895 | constant. */ |
2896 | if (i1 == 0 |
2897 | && (temp_expr = single_set (i2)) != 0 |
2898 | && is_a <scalar_int_mode> (GET_MODE (SET_DEST (temp_expr))((machine_mode) ((((temp_expr)->u.fld[0]).rt_rtx))->mode ), &temp_mode) |
2899 | && CONST_SCALAR_INT_P (SET_SRC (temp_expr))((((enum rtx_code) ((((temp_expr)->u.fld[1]).rt_rtx))-> code) == CONST_INT) || (((enum rtx_code) ((((temp_expr)->u .fld[1]).rt_rtx))->code) == CONST_WIDE_INT)) |
2900 | && GET_CODE (PATTERN (i3))((enum rtx_code) (PATTERN (i3))->code) == SET |
2901 | && CONST_SCALAR_INT_P (SET_SRC (PATTERN (i3)))((((enum rtx_code) ((((PATTERN (i3))->u.fld[1]).rt_rtx))-> code) == CONST_INT) || (((enum rtx_code) ((((PATTERN (i3))-> u.fld[1]).rt_rtx))->code) == CONST_WIDE_INT)) |
2902 | && reg_subword_p (SET_DEST (PATTERN (i3))(((PATTERN (i3))->u.fld[0]).rt_rtx), SET_DEST (temp_expr)(((temp_expr)->u.fld[0]).rt_rtx))) |
2903 | { |
2904 | rtx dest = SET_DEST (PATTERN (i3))(((PATTERN (i3))->u.fld[0]).rt_rtx); |
2905 | rtx temp_dest = SET_DEST (temp_expr)(((temp_expr)->u.fld[0]).rt_rtx); |
2906 | int offset = -1; |
2907 | int width = 0; |
2908 | |
2909 | if (GET_CODE (dest)((enum rtx_code) (dest)->code) == ZERO_EXTRACT) |
2910 | { |
2911 | if (CONST_INT_P (XEXP (dest, 1))(((enum rtx_code) ((((dest)->u.fld[1]).rt_rtx))->code) == CONST_INT) |
2912 | && CONST_INT_P (XEXP (dest, 2))(((enum rtx_code) ((((dest)->u.fld[2]).rt_rtx))->code) == CONST_INT) |
2913 | && is_a <scalar_int_mode> (GET_MODE (XEXP (dest, 0))((machine_mode) ((((dest)->u.fld[0]).rt_rtx))->mode), |
2914 | &dest_mode)) |
2915 | { |
2916 | width = INTVAL (XEXP (dest, 1))(((((dest)->u.fld[1]).rt_rtx))->u.hwint[0]); |
2917 | offset = INTVAL (XEXP (dest, 2))(((((dest)->u.fld[2]).rt_rtx))->u.hwint[0]); |
2918 | dest = XEXP (dest, 0)(((dest)->u.fld[0]).rt_rtx); |
2919 | if (BITS_BIG_ENDIAN0) |
2920 | offset = GET_MODE_PRECISION (dest_mode) - width - offset; |
2921 | } |
2922 | } |
2923 | else |
2924 | { |
2925 | if (GET_CODE (dest)((enum rtx_code) (dest)->code) == STRICT_LOW_PART) |
2926 | dest = XEXP (dest, 0)(((dest)->u.fld[0]).rt_rtx); |
2927 | if (is_a <scalar_int_mode> (GET_MODE (dest)((machine_mode) (dest)->mode), &dest_mode)) |
2928 | { |
2929 | width = GET_MODE_PRECISION (dest_mode); |
2930 | offset = 0; |
2931 | } |
2932 | } |
2933 | |
2934 | if (offset >= 0) |
2935 | { |
2936 | /* If this is the low part, we're done. */ |
2937 | if (subreg_lowpart_p (dest)) |
2938 | ; |
2939 | /* Handle the case where inner is twice the size of outer. */ |
2940 | else if (GET_MODE_PRECISION (temp_mode) |
2941 | == 2 * GET_MODE_PRECISION (dest_mode)) |
2942 | offset += GET_MODE_PRECISION (dest_mode); |
2943 | /* Otherwise give up for now. */ |
2944 | else |
2945 | offset = -1; |
2946 | } |
2947 | |
2948 | if (offset >= 0) |
2949 | { |
2950 | rtx inner = SET_SRC (PATTERN (i3))(((PATTERN (i3))->u.fld[1]).rt_rtx); |
2951 | rtx outer = SET_SRC (temp_expr)(((temp_expr)->u.fld[1]).rt_rtx); |
2952 | |
2953 | wide_int o = wi::insert (rtx_mode_t (outer, temp_mode), |
2954 | rtx_mode_t (inner, dest_mode), |
2955 | offset, width); |
2956 | |
2957 | combine_merges++; |
2958 | subst_insn = i3; |
2959 | subst_low_luid = DF_INSN_LUID (i2)((((df->insns[(INSN_UID (i2))]))->luid)); |
2960 | added_sets_2 = added_sets_1 = added_sets_0 = 0; |
2961 | i2dest = temp_dest; |
2962 | i2dest_killed = dead_or_set_p (i2, i2dest); |
2963 | |
2964 | /* Replace the source in I2 with the new constant and make the |
2965 | resulting insn the new pattern for I3. Then skip to where we |
2966 | validate the pattern. Everything was set up above. */ |
2967 | SUBST (SET_SRC (temp_expr),do_SUBST (&((((temp_expr)->u.fld[1]).rt_rtx)), (immed_wide_int_const (o, temp_mode))) |
2968 | immed_wide_int_const (o, temp_mode))do_SUBST (&((((temp_expr)->u.fld[1]).rt_rtx)), (immed_wide_int_const (o, temp_mode))); |
2969 | |
2970 | newpat = PATTERN (i2); |
2971 | |
2972 | /* The dest of I3 has been replaced with the dest of I2. */ |
2973 | changed_i3_dest = 1; |
2974 | goto validate_replacement; |
2975 | } |
2976 | } |
2977 | |
2978 | /* If we have no I1 and I2 looks like: |
2979 | (parallel [(set (reg:CC X) (compare:CC OP (const_int 0))) |
2980 | (set Y OP)]) |
2981 | make up a dummy I1 that is |
2982 | (set Y OP) |
2983 | and change I2 to be |
2984 | (set (reg:CC X) (compare:CC Y (const_int 0))) |
2985 | |
2986 | (We can ignore any trailing CLOBBERs.) |
2987 | |
2988 | This undoes a previous combination and allows us to match a branch-and- |
2989 | decrement insn. */ |
2990 | |
2991 | if (!HAVE_cc00 && i1 == 0 |
2992 | && is_parallel_of_n_reg_sets (PATTERN (i2), 2) |
2993 | && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2), 0, 0))))((enum mode_class) mode_class[((machine_mode) (((((((((PATTERN (i2))->u.fld[0]).rt_rtvec))->elem[0]))->u.fld[0]).rt_rtx ))->mode)]) |
2994 | == MODE_CC) |
2995 | && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)))((enum rtx_code) (((((((((PATTERN (i2))->u.fld[0]).rt_rtvec ))->elem[0]))->u.fld[1]).rt_rtx))->code) == COMPARE |
2996 | && XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 1)(((((((((((PATTERN (i2))->u.fld[0]).rt_rtvec))->elem[0] ))->u.fld[1]).rt_rtx))->u.fld[1]).rt_rtx) == const0_rtx(const_int_rtx[64]) |
2997 | && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 0)(((((((((((PATTERN (i2))->u.fld[0]).rt_rtvec))->elem[0] ))->u.fld[1]).rt_rtx))->u.fld[0]).rt_rtx), |
2998 | SET_SRC (XVECEXP (PATTERN (i2), 0, 1))((((((((PATTERN (i2))->u.fld[0]).rt_rtvec))->elem[1]))-> u.fld[1]).rt_rtx)) |
2999 | && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0))((((((((PATTERN (i2))->u.fld[0]).rt_rtvec))->elem[0]))-> u.fld[0]).rt_rtx), i2, i3) |
3000 | && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1))((((((((PATTERN (i2))->u.fld[0]).rt_rtvec))->elem[1]))-> u.fld[0]).rt_rtx), i2, i3)) |
3001 | { |
3002 | /* We make I1 with the same INSN_UID as I2. This gives it |
3003 | the same DF_INSN_LUID for value tracking. Our fake I1 will |
3004 | never appear in the insn stream so giving it the same INSN_UID |
3005 | as I2 will not cause a problem. */ |
3006 | |
3007 | i1 = gen_rtx_INSN (VOIDmode((void) 0, E_VOIDmode), NULLnullptr, i2, BLOCK_FOR_INSN (i2), |
3008 | XVECEXP (PATTERN (i2), 0, 1)(((((PATTERN (i2))->u.fld[0]).rt_rtvec))->elem[1]), INSN_LOCATION (i2), |
3009 | -1, NULL_RTX(rtx) 0); |
3010 | INSN_UID (i1) = INSN_UID (i2); |
3011 | |
3012 | SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 0))do_SUBST (&(PATTERN (i2)), ((((((PATTERN (i2))->u.fld[ 0]).rt_rtvec))->elem[0]))); |
3013 | SUBST (XEXP (SET_SRC (PATTERN (i2)), 0),do_SUBST (&(((((((PATTERN (i2))->u.fld[1]).rt_rtx))-> u.fld[0]).rt_rtx)), ((((PATTERN (i1))->u.fld[0]).rt_rtx))) |
3014 | SET_DEST (PATTERN (i1)))do_SUBST (&(((((((PATTERN (i2))->u.fld[1]).rt_rtx))-> u.fld[0]).rt_rtx)), ((((PATTERN (i1))->u.fld[0]).rt_rtx))); |
3015 | unsigned int regno = REGNO (SET_DEST (PATTERN (i1)))(rhs_regno((((PATTERN (i1))->u.fld[0]).rt_rtx))); |
3016 | SUBST_LINK (LOG_LINKS (i2),do_SUBST_LINK (&(uid_log_links[insn_uid_check (i2)]), alloc_insn_link (i1, regno, (uid_log_links[insn_uid_check (i2)]))) |
3017 | alloc_insn_link (i1, regno, LOG_LINKS (i2)))do_SUBST_LINK (&(uid_log_links[insn_uid_check (i2)]), alloc_insn_link (i1, regno, (uid_log_links[insn_uid_check (i2)]))); |
3018 | } |
3019 | |
3020 | /* If I2 is a PARALLEL of two SETs of REGs (and perhaps some CLOBBERs), |
3021 | make those two SETs separate I1 and I2 insns, and make an I0 that is |
3022 | the original I1. */ |
3023 | if (!HAVE_cc00 && i0 == 0 |
3024 | && is_parallel_of_n_reg_sets (PATTERN (i2), 2) |
3025 | && can_split_parallel_of_n_reg_sets (i2, 2) |
3026 | && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0))((((((((PATTERN (i2))->u.fld[0]).rt_rtvec))->elem[0]))-> u.fld[0]).rt_rtx), i2, i3) |
3027 | && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1))((((((((PATTERN (i2))->u.fld[0]).rt_rtvec))->elem[1]))-> u.fld[0]).rt_rtx), i2, i3) |
3028 | && !reg_set_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0))((((((((PATTERN (i2))->u.fld[0]).rt_rtvec))->elem[0]))-> u.fld[0]).rt_rtx), i2, i3) |
3029 | && !reg_set_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1))((((((((PATTERN (i2))->u.fld[0]).rt_rtvec))->elem[1]))-> u.fld[0]).rt_rtx), i2, i3)) |
3030 | { |
3031 | /* If there is no I1, there is no I0 either. */ |
3032 | i0 = i1; |
3033 | |
3034 | /* We make I1 with the same INSN_UID as I2. This gives it |
3035 | the same DF_INSN_LUID for value tracking. Our fake I1 will |
3036 | never appear in the insn stream so giving it the same INSN_UID |
3037 | as I2 will not cause a problem. */ |
3038 | |
3039 | i1 = gen_rtx_INSN (VOIDmode((void) 0, E_VOIDmode), NULLnullptr, i2, BLOCK_FOR_INSN (i2), |
3040 | XVECEXP (PATTERN (i2), 0, 0)(((((PATTERN (i2))->u.fld[0]).rt_rtvec))->elem[0]), INSN_LOCATION (i2), |
3041 | -1, NULL_RTX(rtx) 0); |
3042 | INSN_UID (i1) = INSN_UID (i2); |
3043 | |
3044 | SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 1))do_SUBST (&(PATTERN (i2)), ((((((PATTERN (i2))->u.fld[ 0]).rt_rtvec))->elem[1]))); |
3045 | } |
3046 | |
3047 | /* Verify that I2 and maybe I1 and I0 can be combined into I3. */ |
3048 | if (!can_combine_p (i2, i3, i0, i1, NULLnullptr, NULLnullptr, &i2dest, &i2src)) |
3049 | { |
3050 | if (dump_file && (dump_flags & TDF_DETAILS)) |
3051 | fprintf (dump_file, "Can't combine i2 into i3\n"); |
3052 | undo_all (); |
3053 | return 0; |
3054 | } |
3055 | if (i1 && !can_combine_p (i1, i3, i0, NULLnullptr, i2, NULLnullptr, &i1dest, &i1src)) |
3056 | { |
3057 | if (dump_file && (dump_flags & TDF_DETAILS)) |
3058 | fprintf (dump_file, "Can't combine i1 into i3\n"); |
3059 | undo_all (); |
3060 | return 0; |
3061 | } |
3062 | if (i0 && !can_combine_p (i0, i3, NULLnullptr, NULLnullptr, i1, i2, &i0dest, &i0src)) |
3063 | { |
3064 | if (dump_file && (dump_flags & TDF_DETAILS)) |
3065 | fprintf (dump_file, "Can't combine i0 into i3\n"); |
3066 | undo_all (); |
3067 | return 0; |
3068 | } |
3069 | |
3070 | /* Record whether i2 and i3 are trivial moves. */ |
3071 | i2_was_move = is_just_move (i2); |
3072 | i3_was_move = is_just_move (i3); |
3073 | |
3074 | /* Record whether I2DEST is used in I2SRC and similarly for the other |
3075 | cases. Knowing this will help in register status updating below. */ |
3076 | i2dest_in_i2src = reg_overlap_mentioned_p (i2dest, i2src); |
3077 | i1dest_in_i1src = i1 && reg_overlap_mentioned_p (i1dest, i1src); |
3078 | i2dest_in_i1src = i1 && reg_overlap_mentioned_p (i2dest, i1src); |
3079 | i0dest_in_i0src = i0 && reg_overlap_mentioned_p (i0dest, i0src); |
3080 | i1dest_in_i0src = i0 && reg_overlap_mentioned_p (i1dest, i0src); |
3081 | i2dest_in_i0src = i0 && reg_overlap_mentioned_p (i2dest, i0src); |
3082 | i2dest_killed = dead_or_set_p (i2, i2dest); |
3083 | i1dest_killed = i1 && dead_or_set_p (i1, i1dest); |
3084 | i0dest_killed = i0 && dead_or_set_p (i0, i0dest); |
3085 | |
3086 | /* For the earlier insns, determine which of the subsequent ones they |
3087 | feed. */ |
3088 | i1_feeds_i2_n = i1 && insn_a_feeds_b (i1, i2); |
3089 | i0_feeds_i1_n = i0 && insn_a_feeds_b (i0, i1); |
3090 | i0_feeds_i2_n = (i0 && (!i0_feeds_i1_n ? insn_a_feeds_b (i0, i2) |
3091 | : (!reg_overlap_mentioned_p (i1dest, i0dest) |
3092 | && reg_overlap_mentioned_p (i0dest, i2src)))); |
3093 | |
3094 | /* Ensure that I3's pattern can be the destination of combines. */ |
3095 | if (! combinable_i3pat (i3, &PATTERN (i3), i2dest, i1dest, i0dest, |
3096 | i1 && i2dest_in_i1src && !i1_feeds_i2_n, |
3097 | i0 && ((i2dest_in_i0src && !i0_feeds_i2_n) |
3098 | || (i1dest_in_i0src && !i0_feeds_i1_n)), |
3099 | &i3dest_killed)) |
3100 | { |
3101 | undo_all (); |
3102 | return 0; |
3103 | } |
3104 | |
3105 | /* See if any of the insns is a MULT operation. Unless one is, we will |
3106 | reject a combination that is, since it must be slower. Be conservative |
3107 | here. */ |
3108 | if (GET_CODE (i2src)((enum rtx_code) (i2src)->code) == MULT |
3109 | || (i1 != 0 && GET_CODE (i1src)((enum rtx_code) (i1src)->code) == MULT) |
3110 | || (i0 != 0 && GET_CODE (i0src)((enum rtx_code) (i0src)->code) == MULT) |
3111 | || (GET_CODE (PATTERN (i3))((enum rtx_code) (PATTERN (i3))->code) == SET |
3112 | && GET_CODE (SET_SRC (PATTERN (i3)))((enum rtx_code) ((((PATTERN (i3))->u.fld[1]).rt_rtx))-> code) == MULT)) |
3113 | have_mult = 1; |
3114 | |
3115 | /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd. |
3116 | We used to do this EXCEPT in one case: I3 has a post-inc in an |
3117 | output operand. However, that exception can give rise to insns like |
3118 | mov r3,(r3)+ |
3119 | which is a famous insn on the PDP-11 where the value of r3 used as the |
3120 | source was model-dependent. Avoid this sort of thing. */ |
3121 | |
3122 | #if 0 |
3123 | if (!(GET_CODE (PATTERN (i3))((enum rtx_code) (PATTERN (i3))->code) == SET |
3124 | && REG_P (SET_SRC (PATTERN (i3)))(((enum rtx_code) ((((PATTERN (i3))->u.fld[1]).rt_rtx))-> code) == REG) |
3125 | && MEM_P (SET_DEST (PATTERN (i3)))(((enum rtx_code) ((((PATTERN (i3))->u.fld[0]).rt_rtx))-> code) == MEM) |
3126 | && (GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0))((enum rtx_code) (((((((PATTERN (i3))->u.fld[0]).rt_rtx))-> u.fld[0]).rt_rtx))->code) == POST_INC |
3127 | || GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0))((enum rtx_code) (((((((PATTERN (i3))->u.fld[0]).rt_rtx))-> u.fld[0]).rt_rtx))->code) == POST_DEC))) |
3128 | /* It's not the exception. */ |
3129 | #endif |
3130 | if (AUTO_INC_DEC0) |
3131 | { |
3132 | rtx link; |
3133 | for (link = REG_NOTES (i3)(((i3)->u.fld[6]).rt_rtx); link; link = XEXP (link, 1)(((link)->u.fld[1]).rt_rtx)) |
3134 | if (REG_NOTE_KIND (link)((enum reg_note) ((machine_mode) (link)->mode)) == REG_INC |
3135 | && (reg_overlap_mentioned_p (XEXP (link, 0)(((link)->u.fld[0]).rt_rtx), PATTERN (i2)) |
3136 | || (i1 != 0 |
3137 | && reg_overlap_mentioned_p (XEXP (link, 0)(((link)->u.fld[0]).rt_rtx), PATTERN (i1))))) |
3138 | { |
3139 | undo_all (); |
3140 | return 0; |
3141 | } |
3142 | } |
3143 | |
3144 | /* See if the SETs in I1 or I2 need to be kept around in the merged |
3145 | instruction: whenever the value set there is still needed past I3. |
3146 | For the SET in I2, this is easy: we see if I2DEST dies or is set in I3. |
3147 | |
3148 | For the SET in I1, we have two cases: if I1 and I2 independently feed |
3149 | into I3, the set in I1 needs to be kept around unless I1DEST dies |
3150 | or is set in I3. Otherwise (if I1 feeds I2 which feeds I3), the set |
3151 | in I1 needs to be kept around unless I1DEST dies or is set in either |
3152 | I2 or I3. The same considerations apply to I0. */ |
3153 | |
3154 | added_sets_2 = !dead_or_set_p (i3, i2dest); |
3155 | |
3156 | if (i1) |
3157 | added_sets_1 = !(dead_or_set_p (i3, i1dest) |
3158 | || (i1_feeds_i2_n && dead_or_set_p (i2, i1dest))); |
3159 | else |
3160 | added_sets_1 = 0; |
3161 | |
3162 | if (i0) |
3163 | added_sets_0 = !(dead_or_set_p (i3, i0dest) |
3164 | || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest)) |
3165 | || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n)) |
3166 | && dead_or_set_p (i2, i0dest))); |
3167 | else |
3168 | added_sets_0 = 0; |
3169 | |
3170 | /* We are about to copy insns for the case where they need to be kept |
3171 | around. Check that they can be copied in the merged instruction. */ |
3172 | |
3173 | if (targetm.cannot_copy_insn_p |
3174 | && ((added_sets_2 && targetm.cannot_copy_insn_p (i2)) |
3175 | || (i1 && added_sets_1 && targetm.cannot_copy_insn_p (i1)) |
3176 | || (i0 && added_sets_0 && targetm.cannot_copy_insn_p (i0)))) |
3177 | { |
3178 | undo_all (); |
3179 | return 0; |
3180 | } |
3181 | |
3182 | /* Count how many auto_inc expressions there were in the original insns; |
3183 | we need to have the same number in the resulting patterns. */ |
3184 | |
3185 | if (i0) |
3186 | for_each_inc_dec (PATTERN (i0), count_auto_inc, &n_auto_inc); |
3187 | if (i1) |
3188 | for_each_inc_dec (PATTERN (i1), count_auto_inc, &n_auto_inc); |
3189 | for_each_inc_dec (PATTERN (i2), count_auto_inc, &n_auto_inc); |
3190 | for_each_inc_dec (PATTERN (i3), count_auto_inc, &n_auto_inc); |
3191 | |
3192 | /* If the set in I2 needs to be kept around, we must make a copy of |
3193 | PATTERN (I2), so that when we substitute I1SRC for I1DEST in |
3194 | PATTERN (I2), we are only substituting for the original I1DEST, not into |
3195 | an already-substituted copy. This also prevents making self-referential |
3196 | rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to |
3197 | I2DEST. */ |
3198 | |
3199 | if (added_sets_2) |
3200 | { |
3201 | if (GET_CODE (PATTERN (i2))((enum rtx_code) (PATTERN (i2))->code) == PARALLEL) |
3202 | i2pat = gen_rtx_SET (i2dest, copy_rtx (i2src))gen_rtx_fmt_ee_stat ((SET), (((void) 0, E_VOIDmode)), ((i2dest )), ((copy_rtx (i2src))) ); |
3203 | else |
3204 | i2pat = copy_rtx (PATTERN (i2)); |
3205 | } |
3206 | |
3207 | if (added_sets_1) |
3208 | { |
3209 | if (GET_CODE (PATTERN (i1))((enum rtx_code) (PATTERN (i1))->code) == PARALLEL) |
3210 | i1pat = gen_rtx_SET (i1dest, copy_rtx (i1src))gen_rtx_fmt_ee_stat ((SET), (((void) 0, E_VOIDmode)), ((i1dest )), ((copy_rtx (i1src))) ); |
3211 | else |
3212 | i1pat = copy_rtx (PATTERN (i1)); |
3213 | } |
3214 | |
3215 | if (added_sets_0) |
3216 | { |
3217 | if (GET_CODE (PATTERN (i0))((enum rtx_code) (PATTERN (i0))->code) == PARALLEL) |
3218 | i0pat = gen_rtx_SET (i0dest, copy_rtx (i0src))gen_rtx_fmt_ee_stat ((SET), (((void) 0, E_VOIDmode)), ((i0dest )), ((copy_rtx (i0src))) ); |
3219 | else |
3220 | i0pat = copy_rtx (PATTERN (i0)); |
3221 | } |
3222 | |
3223 | combine_merges++; |
3224 | |
3225 | /* Substitute in the latest insn for the regs set by the earlier ones. */ |
3226 | |
3227 | maxreg = max_reg_num (); |
3228 | |
3229 | subst_insn = i3; |
3230 | |
3231 | /* Many machines that don't use CC0 have insns that can both perform an |
3232 | arithmetic operation and set the condition code. These operations will |
3233 | be represented as a PARALLEL with the first element of the vector |
3234 | being a COMPARE of an arithmetic operation with the constant zero. |
3235 | The second element of the vector will set some pseudo to the result |
3236 | of the same arithmetic operation. If we simplify the COMPARE, we won't |
3237 | match such a pattern and so will generate an extra insn. Here we test |
3238 | for this case, where both the comparison and the operation result are |
3239 | needed, and make the PARALLEL by just replacing I2DEST in I3SRC with |
3240 | I2SRC. Later we will make the PARALLEL that contains I2. */ |
3241 | |
3242 | if (!HAVE_cc00 && i1 == 0 && added_sets_2 && GET_CODE (PATTERN (i3))((enum rtx_code) (PATTERN (i3))->code) == SET |
3243 | && GET_CODE (SET_SRC (PATTERN (i3)))((enum rtx_code) ((((PATTERN (i3))->u.fld[1]).rt_rtx))-> code) == COMPARE |
3244 | && CONST_INT_P (XEXP (SET_SRC (PATTERN (i3)), 1))(((enum rtx_code) (((((((PATTERN (i3))->u.fld[1]).rt_rtx)) ->u.fld[1]).rt_rtx))->code) == CONST_INT) |
3245 | && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3)), 0)((((((PATTERN (i3))->u.fld[1]).rt_rtx))->u.fld[0]).rt_rtx ), i2dest)) |
3246 | { |
3247 | rtx newpat_dest; |
3248 | rtx *cc_use_loc = NULLnullptr; |
3249 | rtx_insn *cc_use_insn = NULLnullptr; |
3250 | rtx op0 = i2src, op1 = XEXP (SET_SRC (PATTERN (i3)), 1)((((((PATTERN (i3))->u.fld[1]).rt_rtx))->u.fld[1]).rt_rtx ); |
3251 | machine_mode compare_mode, orig_compare_mode; |
3252 | enum rtx_code compare_code = UNKNOWN, orig_compare_code = UNKNOWN; |
3253 | scalar_int_mode mode; |
3254 | |
3255 | newpat = PATTERN (i3); |
3256 | newpat_dest = SET_DEST (newpat)(((newpat)->u.fld[0]).rt_rtx); |
3257 | compare_mode = orig_compare_mode = GET_MODE (newpat_dest)((machine_mode) (newpat_dest)->mode); |
3258 | |
3259 | if (undobuf.other_insn == 0 |
3260 | && (cc_use_loc = find_single_use (SET_DEST (newpat)(((newpat)->u.fld[0]).rt_rtx), i3, |
3261 | &cc_use_insn))) |
3262 | { |
3263 | compare_code = orig_compare_code = GET_CODE (*cc_use_loc)((enum rtx_code) (*cc_use_loc)->code); |
3264 | if (is_a <scalar_int_mode> (GET_MODE (i2dest)((machine_mode) (i2dest)->mode), &mode)) |
3265 | compare_code = simplify_compare_const (compare_code, mode, |
3266 | op0, &op1); |
3267 | target_canonicalize_comparison (&compare_code, &op0, &op1, 1); |
3268 | } |
3269 | |
3270 | /* Do the rest only if op1 is const0_rtx, which may be the |
3271 | result of simplification. */ |
3272 | if (op1 == const0_rtx(const_int_rtx[64])) |
3273 | { |
3274 | /* If a single use of the CC is found, prepare to modify it |
3275 | when SELECT_CC_MODE returns a new CC-class mode, or when |
3276 | the above simplify_compare_const() returned a new comparison |
3277 | operator. undobuf.other_insn is assigned the CC use insn |
3278 | when modifying it. */ |
3279 | if (cc_use_loc) |
3280 | { |
3281 | #ifdef SELECT_CC_MODE |
3282 | machine_mode new_mode |
3283 | = SELECT_CC_MODE (compare_code, op0, op1)ix86_cc_mode ((compare_code), (op0), (op1)); |
3284 | if (new_mode != orig_compare_mode |
3285 | && can_change_dest_mode (SET_DEST (newpat)(((newpat)->u.fld[0]).rt_rtx), |
3286 | added_sets_2, new_mode)) |
3287 | { |
3288 | unsigned int regno = REGNO (newpat_dest)(rhs_regno(newpat_dest)); |
3289 | compare_mode = new_mode; |
3290 | if (regno < FIRST_PSEUDO_REGISTER76) |
3291 | newpat_dest = gen_rtx_REG (compare_mode, regno); |
3292 | else |
3293 | { |
3294 | SUBST_MODE (regno_reg_rtx[regno], compare_mode)do_SUBST_MODE (&(regno_reg_rtx[regno]), (compare_mode)); |
3295 | newpat_dest = regno_reg_rtx[regno]; |
3296 | } |
3297 | } |
3298 | #endif |
3299 | /* Cases for modifying the CC-using comparison. */ |
3300 | if (compare_code != orig_compare_code |
3301 | /* ??? Do we need to verify the zero rtx? */ |
3302 | && XEXP (*cc_use_loc, 1)(((*cc_use_loc)->u.fld[1]).rt_rtx) == const0_rtx(const_int_rtx[64])) |
3303 | { |
3304 | /* Replace cc_use_loc with entire new RTX. */ |
3305 | SUBST (*cc_use_loc,do_SUBST (&(*cc_use_loc), (gen_rtx_fmt_ee_stat ((compare_code ), (((machine_mode) (*cc_use_loc)->mode)), (newpat_dest), ( (const_int_rtx[64])) ))) |
3306 | gen_rtx_fmt_ee (compare_code, GET_MODE (*cc_use_loc),do_SUBST (&(*cc_use_loc), (gen_rtx_fmt_ee_stat ((compare_code ), (((machine_mode) (*cc_use_loc)->mode)), (newpat_dest), ( (const_int_rtx[64])) ))) |
3307 | newpat_dest, const0_rtx))do_SUBST (&(*cc_use_loc), (gen_rtx_fmt_ee_stat ((compare_code ), (((machine_mode) (*cc_use_loc)->mode)), (newpat_dest), ( (const_int_rtx[64])) ))); |
3308 | undobuf.other_insn = cc_use_insn; |
3309 | } |
3310 | else if (compare_mode != orig_compare_mode) |
3311 | { |
3312 | /* Just replace the CC reg with a new mode. */ |
3313 | SUBST (XEXP (*cc_use_loc, 0), newpat_dest)do_SUBST (&((((*cc_use_loc)->u.fld[0]).rt_rtx)), (newpat_dest )); |
3314 | undobuf.other_insn = cc_use_insn; |
3315 | } |
3316 | } |
3317 | |
3318 | /* Now we modify the current newpat: |
3319 | First, SET_DEST(newpat) is updated if the CC mode has been |
3320 | altered. For targets without SELECT_CC_MODE, this should be |
3321 | optimized away. */ |
3322 | if (compare_mode != orig_compare_mode) |
3323 | SUBST (SET_DEST (newpat), newpat_dest)do_SUBST (&((((newpat)->u.fld[0]).rt_rtx)), (newpat_dest )); |
3324 | /* This is always done to propagate i2src into newpat. */ |
3325 | SUBST (SET_SRC (newpat),do_SUBST (&((((newpat)->u.fld[1]).rt_rtx)), (gen_rtx_fmt_ee_stat ((COMPARE), ((compare_mode)), ((op0)), ((op1)) ))) |
3326 | gen_rtx_COMPARE (compare_mode, op0, op1))do_SUBST (&((((newpat)->u.fld[1]).rt_rtx)), (gen_rtx_fmt_ee_stat ((COMPARE), ((compare_mode)), ((op0)), ((op1)) ))); |
3327 | /* Create new version of i2pat if needed; the below PARALLEL |
3328 | creation needs this to work correctly. */ |
3329 | if (! rtx_equal_p (i2src, op0)) |
3330 | i2pat = gen_rtx_SET (i2dest, op0)gen_rtx_fmt_ee_stat ((SET), (((void) 0, E_VOIDmode)), ((i2dest )), ((op0)) ); |
3331 | i2_is_used = 1; |
3332 | } |
3333 | } |
3334 | |
3335 | if (i2_is_used == 0) |
3336 | { |
3337 | /* It is possible that the source of I2 or I1 may be performing |
3338 | an unneeded operation, such as a ZERO_EXTEND of something |
3339 | that is known to have the high part zero. Handle that case |
3340 | by letting subst look at the inner insns. |
3341 | |
3342 | Another way to do this would be to have a function that tries |
3343 | to simplify a single insn instead of merging two or more |
3344 | insns. We don't do this because of the potential of infinite |
3345 | loops and because of the potential extra memory required. |
3346 | However, doing it the way we are is a bit of a kludge and |
3347 | doesn't catch all cases. |
3348 | |
3349 | But only do this if -fexpensive-optimizations since it slows |
3350 | things down and doesn't usually win. |
3351 | |
3352 | This is not done in the COMPARE case above because the |
3353 | unmodified I2PAT is used in the PARALLEL and so a pattern |
3354 | with a modified I2SRC would not match. */ |
3355 | |
3356 | if (flag_expensive_optimizationsglobal_options.x_flag_expensive_optimizations) |
3357 | { |
3358 | /* Pass pc_rtx so no substitutions are done, just |
3359 | simplifications. */ |
3360 | if (i1) |
3361 | { |
3362 | subst_low_luid = DF_INSN_LUID (i1)((((df->insns[(INSN_UID (i1))]))->luid)); |
3363 | i1src = subst (i1src, pc_rtx, pc_rtx, 0, 0, 0); |
3364 | } |
3365 | |
3366 | subst_low_luid = DF_INSN_LUID (i2)((((df->insns[(INSN_UID (i2))]))->luid)); |
3367 | i2src = subst (i2src, pc_rtx, pc_rtx, 0, 0, 0); |
3368 | } |
3369 | |
3370 | n_occurrences = 0; /* `subst' counts here */ |
3371 | subst_low_luid = DF_INSN_LUID (i2)((((df->insns[(INSN_UID (i2))]))->luid)); |
3372 | |
3373 | /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique |
3374 | copy of I2SRC each time we substitute it, in order to avoid creating |
3375 | self-referential RTL when we will be substituting I1SRC for I1DEST |
3376 | later. Likewise if I0 feeds into I2, either directly or indirectly |
3377 | through I1, and I0DEST is in I0SRC. */ |
3378 | newpat = subst (PATTERN (i3), i2dest, i2src, 0, 0, |
3379 | (i1_feeds_i2_n && i1dest_in_i1src) |
3380 | || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n)) |
3381 | && i0dest_in_i0src)); |
3382 | substed_i2 = 1; |
3383 | |
3384 | /* Record whether I2's body now appears within I3's body. */ |
3385 | i2_is_used = n_occurrences; |
3386 | } |
3387 | |
3388 | /* If we already got a failure, don't try to do more. Otherwise, try to |
3389 | substitute I1 if we have it. */ |
3390 | |
3391 | if (i1 && GET_CODE (newpat)((enum rtx_code) (newpat)->code) != CLOBBER) |
3392 | { |
3393 | /* Before we can do this substitution, we must redo the test done |
3394 | above (see detailed comments there) that ensures I1DEST isn't |
3395 | mentioned in any SETs in NEWPAT that are field assignments. */ |
3396 | if (!combinable_i3pat (NULLnullptr, &newpat, i1dest, NULL_RTX(rtx) 0, NULL_RTX(rtx) 0, |
3397 | 0, 0, 0)) |
3398 | { |
3399 | undo_all (); |
3400 | return 0; |
3401 | } |
3402 | |
3403 | n_occurrences = 0; |
3404 | subst_low_luid = DF_INSN_LUID (i1)((((df->insns[(INSN_UID (i1))]))->luid)); |
3405 | |
3406 | /* If the following substitution will modify I1SRC, make a copy of it |
3407 | for the case where it is substituted for I1DEST in I2PAT later. */ |
3408 | if (added_sets_2 && i1_feeds_i2_n) |
3409 | i1src_copy = copy_rtx (i1src); |
3410 | |
3411 | /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique |
3412 | copy of I1SRC each time we substitute it, in order to avoid creating |
3413 | self-referential RTL when we will be substituting I0SRC for I0DEST |
3414 | later. */ |
3415 | newpat = subst (newpat, i1dest, i1src, 0, 0, |
3416 | i0_feeds_i1_n && i0dest_in_i0src); |
3417 | substed_i1 = 1; |
3418 | |
3419 | /* Record whether I1's body now appears within I3's body. */ |
3420 | i1_is_used = n_occurrences; |
3421 | } |
3422 | |
3423 | /* Likewise for I0 if we have it. */ |
3424 | |
3425 | if (i0 && GET_CODE (newpat)((enum rtx_code) (newpat)->code) != CLOBBER) |
3426 | { |
3427 | if (!combinable_i3pat (NULLnullptr, &newpat, i0dest, NULL_RTX(rtx) 0, NULL_RTX(rtx) 0, |
3428 | 0, 0, 0)) |
3429 | { |
3430 | undo_all (); |
3431 | return 0; |
3432 | } |
3433 | |
3434 | /* If the following substitution will modify I0SRC, make a copy of it |
3435 | for the case where it is substituted for I0DEST in I1PAT later. */ |
3436 | if (added_sets_1 && i0_feeds_i1_n) |
3437 | i0src_copy = copy_rtx (i0src); |
3438 | /* And a copy for I0DEST in I2PAT substitution. */ |
3439 | if (added_sets_2 && ((i0_feeds_i1_n && i1_feeds_i2_n) |
3440 | || (i0_feeds_i2_n))) |
3441 | i0src_copy2 = copy_rtx (i0src); |
3442 | |
3443 | n_occurrences = 0; |
3444 | subst_low_luid = DF_INSN_LUID (i0)((((df->insns[(INSN_UID (i0))]))->luid)); |
3445 | newpat = subst (newpat, i0dest, i0src, 0, 0, 0); |
3446 | substed_i0 = 1; |
3447 | } |
3448 | |
3449 | if (n_auto_inc) |
3450 | { |
3451 | int new_n_auto_inc = 0; |
3452 | for_each_inc_dec (newpat, count_auto_inc, &new_n_auto_inc); |
3453 | |
3454 | if (n_auto_inc != new_n_auto_inc) |
3455 | { |
3456 | if (dump_file && (dump_flags & TDF_DETAILS)) |
3457 | fprintf (dump_file, "Number of auto_inc expressions changed\n"); |
3458 | undo_all (); |
3459 | return 0; |
3460 | } |
3461 | } |
3462 | |
3463 | /* Fail if an autoincrement side-effect has been duplicated. Be careful |
3464 | to count all the ways that I2SRC and I1SRC can be used. */ |
3465 | if ((FIND_REG_INC_NOTE (i2, NULL_RTX)0 != 0 |
3466 | && i2_is_used + added_sets_2 > 1) |
3467 | || (i1 != 0 && FIND_REG_INC_NOTE (i1, NULL_RTX)0 != 0 |
3468 | && (i1_is_used + added_sets_1 + (added_sets_2 && i1_feeds_i2_n) |
3469 | > 1)) |
3470 | || (i0 != 0 && FIND_REG_INC_NOTE (i0, NULL_RTX)0 != 0 |
3471 | && (n_occurrences + added_sets_0 |
3472 | + (added_sets_1 && i0_feeds_i1_n) |
3473 | + (added_sets_2 && i0_feeds_i2_n) |
3474 | > 1)) |
3475 | /* Fail if we tried to make a new register. */ |
3476 | || max_reg_num () != maxreg |
3477 | /* Fail if we couldn't do something and have a CLOBBER. */ |
3478 | || GET_CODE (newpat)((enum rtx_code) (newpat)->code) == CLOBBER |
3479 | /* Fail if this new pattern is a MULT and we didn't have one before |
3480 | at the outer level. */ |
3481 | || (GET_CODE (newpat)((enum rtx_code) (newpat)->code) == SET && GET_CODE (SET_SRC (newpat))((enum rtx_code) ((((newpat)->u.fld[1]).rt_rtx))->code) == MULT |
3482 | && ! have_mult)) |
3483 | { |
3484 | undo_all (); |
3485 | return 0; |
3486 | } |
3487 | |
3488 | /* If the actions of the earlier insns must be kept |
3489 | in addition to substituting them into the latest one, |
3490 | we must make a new PARALLEL for the latest insn |
3491 | to hold additional the SETs. */ |
3492 | |
3493 | if (added_sets_0 || added_sets_1 || added_sets_2) |
3494 | { |
3495 | int extra_sets = added_sets_0 + added_sets_1 + added_sets_2; |
3496 | combine_extras++; |
3497 | |
3498 | if (GET_CODE (newpat)((enum rtx_code) (newpat)->code) == PARALLEL) |
3499 | { |
3500 | rtvec old = XVEC (newpat, 0)(((newpat)->u.fld[0]).rt_rtvec); |
3501 | total_sets = XVECLEN (newpat, 0)(((((newpat)->u.fld[0]).rt_rtvec))->num_elem) + extra_sets; |
3502 | newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets))gen_rtx_fmt_E_stat ((PARALLEL), ((((void) 0, E_VOIDmode))), ( (rtvec_alloc (total_sets))) ); |
3503 | memcpy (XVEC (newpat, 0)(((newpat)->u.fld[0]).rt_rtvec)->elem, &old->elem[0], |
3504 | sizeof (old->elem[0]) * old->num_elem); |
3505 | } |
3506 | else |
3507 | { |
3508 | rtx old = newpat; |
3509 | total_sets = 1 + extra_sets; |
3510 | newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets))gen_rtx_fmt_E_stat ((PARALLEL), ((((void) 0, E_VOIDmode))), ( (rtvec_alloc (total_sets))) ); |
3511 | XVECEXP (newpat, 0, 0)(((((newpat)->u.fld[0]).rt_rtvec))->elem[0]) = old; |
3512 | } |
3513 | |
3514 | if (added_sets_0) |
3515 | XVECEXP (newpat, 0, --total_sets)(((((newpat)->u.fld[0]).rt_rtvec))->elem[--total_sets]) = i0pat; |
3516 | |
3517 | if (added_sets_1) |
3518 | { |
3519 | rtx t = i1pat; |
3520 | if (i0_feeds_i1_n) |
3521 | t = subst (t, i0dest, i0src_copy ? i0src_copy : i0src, 0, 0, 0); |
3522 | |
3523 | XVECEXP (newpat, 0, --total_sets)(((((newpat)->u.fld[0]).rt_rtvec))->elem[--total_sets]) = t; |
3524 | } |
3525 | if (added_sets_2) |
3526 | { |
3527 | rtx t = i2pat; |
3528 | if (i1_feeds_i2_n) |
3529 | t = subst (t, i1dest, i1src_copy ? i1src_copy : i1src, 0, 0, |
3530 | i0_feeds_i1_n && i0dest_in_i0src); |
3531 | if ((i0_feeds_i1_n && i1_feeds_i2_n) || i0_feeds_i2_n) |
3532 | t = subst (t, i0dest, i0src_copy2 ? i0src_copy2 : i0src, 0, 0, 0); |
3533 | |
3534 | XVECEXP (newpat, 0, --total_sets)(((((newpat)->u.fld[0]).rt_rtvec))->elem[--total_sets]) = t; |
3535 | } |
3536 | } |
3537 | |
3538 | validate_replacement: |
3539 | |
3540 | /* Note which hard regs this insn has as inputs. */ |
3541 | mark_used_regs_combine (newpat); |
3542 | |
3543 | /* If recog_for_combine fails, it strips existing clobbers. If we'll |
3544 | consider splitting this pattern, we might need these clobbers. */ |
3545 | if (i1 && GET_CODE (newpat)((enum rtx_code) (newpat)->code) == PARALLEL |
3546 | && GET_CODE (XVECEXP (newpat, 0, XVECLEN (newpat, 0) - 1))((enum rtx_code) ((((((newpat)->u.fld[0]).rt_rtvec))->elem [(((((newpat)->u.fld[0]).rt_rtvec))->num_elem) - 1]))-> code) == CLOBBER) |
3547 | { |
3548 | int len = XVECLEN (newpat, 0)(((((newpat)->u.fld[0]).rt_rtvec))->num_elem); |
3549 | |
3550 | newpat_vec_with_clobbers = rtvec_alloc (len); |
3551 | for (i = 0; i < len; i++) |
3552 | RTVEC_ELT (newpat_vec_with_clobbers, i)((newpat_vec_with_clobbers)->elem[i]) = XVECEXP (newpat, 0, i)(((((newpat)->u.fld[0]).rt_rtvec))->elem[i]); |
3553 | } |
3554 | |
3555 | /* We have recognized nothing yet. */ |
3556 | insn_code_number = -1; |
3557 | |
3558 | /* See if this is a PARALLEL of two SETs where one SET's destination is |
3559 | a register that is unused and this isn't marked as an instruction that |
3560 | might trap in an EH region. In that case, we just need the other SET. |
3561 | We prefer this over the PARALLEL. |
3562 | |
3563 | This can occur when simplifying a divmod insn. We *must* test for this |
3564 | case here because the code below that splits two independent SETs doesn't |
3565 | handle this case correctly when it updates the register status. |
3566 | |
3567 | It's pointless doing this if we originally had two sets, one from |
3568 | i3, and one from i2. Combining then splitting the parallel results |
3569 | in the original i2 again plus an invalid insn (which we delete). |
3570 | The net effect is only to move instructions around, which makes |
3571 | debug info less accurate. |
3572 | |
3573 | If the remaining SET came from I2 its destination should not be used |
3574 | between I2 and I3. See PR82024. */ |
3575 | |
3576 | if (!(added_sets_2 && i1 == 0) |
3577 | && is_parallel_of_n_reg_sets (newpat, 2) |
3578 | && asm_noperands (newpat) < 0) |
3579 | { |
3580 | rtx set0 = XVECEXP (newpat, 0, 0)(((((newpat)->u.fld[0]).rt_rtvec))->elem[0]); |
3581 | rtx set1 = XVECEXP (newpat, 0, 1)(((((newpat)->u.fld[0]).rt_rtvec))->elem[1]); |
3582 | rtx oldpat = newpat; |
3583 | |
3584 | if (((REG_P (SET_DEST (set1))(((enum rtx_code) ((((set1)->u.fld[0]).rt_rtx))->code) == REG) |
3585 | && find_reg_note (i3, REG_UNUSED, SET_DEST (set1)(((set1)->u.fld[0]).rt_rtx))) |
3586 | || (GET_CODE (SET_DEST (set1))((enum rtx_code) ((((set1)->u.fld[0]).rt_rtx))->code) == SUBREG |
3587 | && find_reg_note (i3, REG_UNUSED, SUBREG_REG (SET_DEST (set1))((((((set1)->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx)))) |
3588 | && insn_nothrow_p (i3) |
3589 | && !side_effects_p (SET_SRC (set1)(((set1)->u.fld[1]).rt_rtx))) |
3590 | { |
3591 | newpat = set0; |
3592 | insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes); |
3593 | } |
3594 | |
3595 | else if (((REG_P (SET_DEST (set0))(((enum rtx_code) ((((set0)->u.fld[0]).rt_rtx))->code) == REG) |
3596 | && find_reg_note (i3, REG_UNUSED, SET_DEST (set0)(((set0)->u.fld[0]).rt_rtx))) |
3597 | || (GET_CODE (SET_DEST (set0))((enum rtx_code) ((((set0)->u.fld[0]).rt_rtx))->code) == SUBREG |
3598 | && find_reg_note (i3, REG_UNUSED, |
3599 | SUBREG_REG (SET_DEST (set0))((((((set0)->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx)))) |
3600 | && insn_nothrow_p (i3) |
3601 | && !side_effects_p (SET_SRC (set0)(((set0)->u.fld[1]).rt_rtx))) |
3602 | { |
3603 | rtx dest = SET_DEST (set1)(((set1)->u.fld[0]).rt_rtx); |
3604 | if (GET_CODE (dest)((enum rtx_code) (dest)->code) == SUBREG) |
3605 | dest = SUBREG_REG (dest)(((dest)->u.fld[0]).rt_rtx); |
3606 | if (!reg_used_between_p (dest, i2, i3)) |
3607 | { |
3608 | newpat = set1; |
3609 | insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes); |
3610 | |
3611 | if (insn_code_number >= 0) |
3612 | changed_i3_dest = 1; |
3613 | } |
3614 | } |
3615 | |
3616 | if (insn_code_number < 0) |
3617 | newpat = oldpat; |
3618 | } |
3619 | |
3620 | /* Is the result of combination a valid instruction? */ |
3621 | if (insn_code_number < 0) |
3622 | insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes); |
3623 | |
3624 | /* If we were combining three insns and the result is a simple SET |
3625 | with no ASM_OPERANDS that wasn't recognized, try to split it into two |
3626 | insns. There are two ways to do this. It can be split using a |
3627 | machine-specific method (like when you have an addition of a large |
3628 | constant) or by combine in the function find_split_point. */ |
3629 | |
3630 | if (i1 && insn_code_number < 0 && GET_CODE (newpat)((enum rtx_code) (newpat)->code) == SET |
3631 | && asm_noperands (newpat) < 0) |
3632 | { |
3633 | rtx parallel, *split; |
3634 | rtx_insn *m_split_insn; |
3635 | |
3636 | /* See if the MD file can split NEWPAT. If it can't, see if letting it |
3637 | use I2DEST as a scratch register will help. In the latter case, |
3638 | convert I2DEST to the mode of the source of NEWPAT if we can. */ |
3639 | |
3640 | m_split_insn = combine_split_insns (newpat, i3); |
3641 | |
3642 | /* We can only use I2DEST as a scratch reg if it doesn't overlap any |
3643 | inputs of NEWPAT. */ |
3644 | |
3645 | /* ??? If I2DEST is not safe, and I1DEST exists, then it would be |
3646 | possible to try that as a scratch reg. This would require adding |
3647 | more code to make it work though. */ |
3648 | |
3649 | if (m_split_insn == 0 && ! reg_overlap_mentioned_p (i2dest, newpat)) |
3650 | { |
3651 | machine_mode new_mode = GET_MODE (SET_DEST (newpat))((machine_mode) ((((newpat)->u.fld[0]).rt_rtx))->mode); |
3652 | |
3653 | /* ??? Reusing i2dest without resetting the reg_stat entry for it |
3654 | (temporarily, until we are committed to this instruction |
3655 | combination) does not work: for example, any call to nonzero_bits |
3656 | on the register (from a splitter in the MD file, for example) |
3657 | will get the old information, which is invalid. |
3658 | |
3659 | Since nowadays we can create registers during combine just fine, |
3660 | we should just create a new one here, not reuse i2dest. */ |
3661 | |
3662 | /* First try to split using the original register as a |
3663 | scratch register. */ |
3664 | parallel = gen_rtx_PARALLEL (VOIDmode,gen_rtx_fmt_E_stat ((PARALLEL), ((((void) 0, E_VOIDmode))), ( (gen_rtvec (2, newpat, gen_rtx_fmt_e_stat ((CLOBBER), ((((void ) 0, E_VOIDmode))), ((i2dest)) )))) ) |
3665 | gen_rtvec (2, newpat,gen_rtx_fmt_E_stat ((PARALLEL), ((((void) 0, E_VOIDmode))), ( (gen_rtvec (2, newpat, gen_rtx_fmt_e_stat ((CLOBBER), ((((void ) 0, E_VOIDmode))), ((i2dest)) )))) ) |
3666 | gen_rtx_CLOBBER (VOIDmode,gen_rtx_fmt_E_stat ((PARALLEL), ((((void) 0, E_VOIDmode))), ( (gen_rtvec (2, newpat, gen_rtx_fmt_e_stat ((CLOBBER), ((((void ) 0, E_VOIDmode))), ((i2dest)) )))) ) |
3667 | i2dest)))gen_rtx_fmt_E_stat ((PARALLEL), ((((void) 0, E_VOIDmode))), ( (gen_rtvec (2, newpat, gen_rtx_fmt_e_stat ((CLOBBER), ((((void ) 0, E_VOIDmode))), ((i2dest)) )))) ); |
3668 | m_split_insn = combine_split_insns (parallel, i3); |
3669 | |
3670 | /* If that didn't work, try changing the mode of I2DEST if |
3671 | we can. */ |
3672 | if (m_split_insn == 0 |
3673 | && new_mode != GET_MODE (i2dest)((machine_mode) (i2dest)->mode) |
3674 | && new_mode != VOIDmode((void) 0, E_VOIDmode) |
3675 | && can_change_dest_mode (i2dest, added_sets_2, new_mode)) |
3676 | { |
3677 | machine_mode old_mode = GET_MODE (i2dest)((machine_mode) (i2dest)->mode); |
3678 | rtx ni2dest; |
3679 | |
3680 | if (REGNO (i2dest)(rhs_regno(i2dest)) < FIRST_PSEUDO_REGISTER76) |
3681 | ni2dest = gen_rtx_REG (new_mode, REGNO (i2dest)(rhs_regno(i2dest))); |
3682 | else |
3683 | { |
3684 | SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], new_mode)do_SUBST_MODE (&(regno_reg_rtx[(rhs_regno(i2dest))]), (new_mode )); |
3685 | ni2dest = regno_reg_rtx[REGNO (i2dest)(rhs_regno(i2dest))]; |
3686 | } |
3687 | |
3688 | parallel = (gen_rtx_PARALLELgen_rtx_fmt_E_stat ((PARALLEL), ((((void) 0, E_VOIDmode))), ( (gen_rtvec (2, newpat, gen_rtx_fmt_e_stat ((CLOBBER), ((((void ) 0, E_VOIDmode))), ((ni2dest)) )))) ) |
3689 | (VOIDmode,gen_rtx_fmt_E_stat ((PARALLEL), ((((void) 0, E_VOIDmode))), ( (gen_rtvec (2, newpat, gen_rtx_fmt_e_stat ((CLOBBER), ((((void ) 0, E_VOIDmode))), ((ni2dest)) )))) ) |
3690 | gen_rtvec (2, newpat,gen_rtx_fmt_E_stat ((PARALLEL), ((((void) 0, E_VOIDmode))), ( (gen_rtvec (2, newpat, gen_rtx_fmt_e_stat ((CLOBBER), ((((void ) 0, E_VOIDmode))), ((ni2dest)) )))) ) |
3691 | gen_rtx_CLOBBER (VOIDmode,gen_rtx_fmt_E_stat ((PARALLEL), ((((void) 0, E_VOIDmode))), ( (gen_rtvec (2, newpat, gen_rtx_fmt_e_stat ((CLOBBER), ((((void ) 0, E_VOIDmode))), ((ni2dest)) )))) ) |
3692 | ni2dest)))gen_rtx_fmt_E_stat ((PARALLEL), ((((void) 0, E_VOIDmode))), ( (gen_rtvec (2, newpat, gen_rtx_fmt_e_stat ((CLOBBER), ((((void ) 0, E_VOIDmode))), ((ni2dest)) )))) )); |
3693 | m_split_insn = combine_split_insns (parallel, i3); |
3694 | |
3695 | if (m_split_insn == 0 |
3696 | && REGNO (i2dest)(rhs_regno(i2dest)) >= FIRST_PSEUDO_REGISTER76) |
3697 | { |
3698 | struct undo *buf; |
3699 | |
3700 | adjust_reg_mode (regno_reg_rtx[REGNO (i2dest)(rhs_regno(i2dest))], old_mode); |
3701 | buf = undobuf.undos; |
3702 | undobuf.undos = buf->next; |
3703 | buf->next = undobuf.frees; |
3704 | undobuf.frees = buf; |
3705 | } |
3706 | } |
3707 | |
3708 | i2scratch = m_split_insn != 0; |
3709 | } |
3710 | |
3711 | /* If recog_for_combine has discarded clobbers, try to use them |
3712 | again for the split. */ |
3713 | if (m_split_insn == 0 && newpat_vec_with_clobbers) |
3714 | { |
3715 | parallel = gen_rtx_PARALLEL (VOIDmode, newpat_vec_with_clobbers)gen_rtx_fmt_E_stat ((PARALLEL), ((((void) 0, E_VOIDmode))), ( (newpat_vec_with_clobbers)) ); |
3716 | m_split_insn = combine_split_insns (parallel, i3); |
3717 | } |
3718 | |
3719 | if (m_split_insn && NEXT_INSN (m_split_insn) == NULL_RTX(rtx) 0) |
3720 | { |
3721 | rtx m_split_pat = PATTERN (m_split_insn); |
3722 | insn_code_number = recog_for_combine (&m_split_pat, i3, &new_i3_notes); |
3723 | if (insn_code_number >= 0) |
3724 | newpat = m_split_pat; |
3725 | } |
3726 | else if (m_split_insn && NEXT_INSN (NEXT_INSN (m_split_insn)) == NULL_RTX(rtx) 0 |
3727 | && (next_nonnote_nondebug_insn (i2) == i3 |
3728 | || !modified_between_p (PATTERN (m_split_insn), i2, i3))) |
3729 | { |
3730 | rtx i2set, i3set; |
3731 | rtx newi3pat = PATTERN (NEXT_INSN (m_split_insn)); |
3732 | newi2pat = PATTERN (m_split_insn); |
3733 | |
3734 | i3set = single_set (NEXT_INSN (m_split_insn)); |
3735 | i2set = single_set (m_split_insn); |
3736 | |
3737 | i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes); |
3738 | |
3739 | /* If I2 or I3 has multiple SETs, we won't know how to track |
3740 | register status, so don't use these insns. If I2's destination |
3741 | is used between I2 and I3, we also can't use these insns. */ |
3742 | |
3743 | if (i2_code_number >= 0 && i2set && i3set |
3744 | && (next_nonnote_nondebug_insn (i2) == i3 |
3745 | || ! reg_used_between_p (SET_DEST (i2set)(((i2set)->u.fld[0]).rt_rtx), i2, i3))) |
3746 | insn_code_number = recog_for_combine (&newi3pat, i3, |
3747 | &new_i3_notes); |
3748 | if (insn_code_number >= 0) |
3749 | newpat = newi3pat; |
3750 | |
3751 | /* It is possible that both insns now set the destination of I3. |
3752 | If so, we must show an extra use of it. */ |
3753 | |
3754 | if (insn_code_number >= 0) |
3755 | { |
3756 | rtx new_i3_dest = SET_DEST (i3set)(((i3set)->u.fld[0]).rt_rtx); |
3757 | rtx new_i2_dest = SET_DEST (i2set)(((i2set)->u.fld[0]).rt_rtx); |
3758 | |
3759 | while (GET_CODE (new_i3_dest)((enum rtx_code) (new_i3_dest)->code) == ZERO_EXTRACT |
3760 | || GET_CODE (new_i3_dest)((enum rtx_code) (new_i3_dest)->code) == STRICT_LOW_PART |
3761 | || GET_CODE (new_i3_dest)((enum rtx_code) (new_i3_dest)->code) == SUBREG) |
3762 | new_i3_dest = XEXP (new_i3_dest, 0)(((new_i3_dest)->u.fld[0]).rt_rtx); |
3763 | |
3764 | while (GET_CODE (new_i2_dest)((enum rtx_code) (new_i2_dest)->code) == ZERO_EXTRACT |
3765 | || GET_CODE (new_i2_dest)((enum rtx_code) (new_i2_dest)->code) == STRICT_LOW_PART |
3766 | || GET_CODE (new_i2_dest)((enum rtx_code) (new_i2_dest)->code) == SUBREG) |
3767 | new_i2_dest = XEXP (new_i2_dest, 0)(((new_i2_dest)->u.fld[0]).rt_rtx); |
3768 | |
3769 | if (REG_P (new_i3_dest)(((enum rtx_code) (new_i3_dest)->code) == REG) |
3770 | && REG_P (new_i2_dest)(((enum rtx_code) (new_i2_dest)->code) == REG) |
3771 | && REGNO (new_i3_dest)(rhs_regno(new_i3_dest)) == REGNO (new_i2_dest)(rhs_regno(new_i2_dest)) |
3772 | && REGNO (new_i2_dest)(rhs_regno(new_i2_dest)) < reg_n_sets_max) |
3773 | INC_REG_N_SETS (REGNO (new_i2_dest), 1)(regstat_n_sets_and_refs[(rhs_regno(new_i2_dest))].sets += 1); |
3774 | } |
3775 | } |
3776 | |
3777 | /* If we can split it and use I2DEST, go ahead and see if that |
3778 | helps things be recognized. Verify that none of the registers |
3779 | are set between I2 and I3. */ |
3780 | if (insn_code_number < 0 |
3781 | && (split = find_split_point (&newpat, i3, false)) != 0 |
3782 | && (!HAVE_cc00 || REG_P (i2dest)(((enum rtx_code) (i2dest)->code) == REG)) |
3783 | /* We need I2DEST in the proper mode. If it is a hard register |
3784 | or the only use of a pseudo, we can change its mode. |
3785 | Make sure we don't change a hard register to have a mode that |
3786 | isn't valid for it, or change the number of registers. */ |
3787 | && (GET_MODE (*split)((machine_mode) (*split)->mode) == GET_MODE (i2dest)((machine_mode) (i2dest)->mode) |
3788 | || GET_MODE (*split)((machine_mode) (*split)->mode) == VOIDmode((void) 0, E_VOIDmode) |
3789 | || can_change_dest_mode (i2dest, added_sets_2, |
3790 | GET_MODE (*split)((machine_mode) (*split)->mode))) |
3791 | && (next_nonnote_nondebug_insn (i2) == i3 |
3792 | || !modified_between_p (*split, i2, i3)) |
3793 | /* We can't overwrite I2DEST if its value is still used by |
3794 | NEWPAT. */ |
3795 | && ! reg_referenced_p (i2dest, newpat)) |
3796 | { |
3797 | rtx newdest = i2dest; |
3798 | enum rtx_code split_code = GET_CODE (*split)((enum rtx_code) (*split)->code); |
3799 | machine_mode split_mode = GET_MODE (*split)((machine_mode) (*split)->mode); |
3800 | bool subst_done = false; |
3801 | newi2pat = NULL_RTX(rtx) 0; |
3802 | |
3803 | i2scratch = true; |
3804 | |
3805 | /* *SPLIT may be part of I2SRC, so make sure we have the |
3806 | original expression around for later debug processing. |
3807 | We should not need I2SRC any more in other cases. */ |
3808 | if (MAY_HAVE_DEBUG_BIND_INSNSglobal_options.x_flag_var_tracking_assignments) |
3809 | i2src = copy_rtx (i2src); |
3810 | else |
3811 | i2src = NULLnullptr; |
3812 | |
3813 | /* Get NEWDEST as a register in the proper mode. We have already |
3814 | validated that we can do this. */ |
3815 | if (GET_MODE (i2dest)((machine_mode) (i2dest)->mode) != split_mode && split_mode != VOIDmode((void) 0, E_VOIDmode)) |
3816 | { |
3817 | if (REGNO (i2dest)(rhs_regno(i2dest)) < FIRST_PSEUDO_REGISTER76) |
3818 | newdest = gen_rtx_REG (split_mode, REGNO (i2dest)(rhs_regno(i2dest))); |
3819 | else |
3820 | { |
3821 | SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], split_mode)do_SUBST_MODE (&(regno_reg_rtx[(rhs_regno(i2dest))]), (split_mode )); |
3822 | newdest = regno_reg_rtx[REGNO (i2dest)(rhs_regno(i2dest))]; |
3823 | } |
3824 | } |
3825 | |
3826 | /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to |
3827 | an ASHIFT. This can occur if it was inside a PLUS and hence |
3828 | appeared to be a memory address. This is a kludge. */ |
3829 | if (split_code == MULT |
3830 | && CONST_INT_P (XEXP (*split, 1))(((enum rtx_code) ((((*split)->u.fld[1]).rt_rtx))->code ) == CONST_INT) |
3831 | && INTVAL (XEXP (*split, 1))(((((*split)->u.fld[1]).rt_rtx))->u.hwint[0]) > 0 |
3832 | && (i = exact_log2 (UINTVAL (XEXP (*split, 1))((unsigned long) (((((*split)->u.fld[1]).rt_rtx))->u.hwint [0])))) >= 0) |
3833 | { |
3834 | rtx i_rtx = gen_int_shift_amount (split_mode, i); |
3835 | SUBST (*split, gen_rtx_ASHIFT (split_mode,do_SUBST (&(*split), (gen_rtx_fmt_ee_stat ((ASHIFT), ((split_mode )), (((((*split)->u.fld[0]).rt_rtx))), ((i_rtx)) ))) |
3836 | XEXP (*split, 0), i_rtx))do_SUBST (&(*split), (gen_rtx_fmt_ee_stat ((ASHIFT), ((split_mode )), (((((*split)->u.fld[0]).rt_rtx))), ((i_rtx)) ))); |
3837 | /* Update split_code because we may not have a multiply |
3838 | anymore. */ |
3839 | split_code = GET_CODE (*split)((enum rtx_code) (*split)->code); |
3840 | } |
3841 | |
3842 | /* Similarly for (plus (mult FOO (const_int pow2))). */ |
3843 | if (split_code == PLUS |
3844 | && GET_CODE (XEXP (*split, 0))((enum rtx_code) ((((*split)->u.fld[0]).rt_rtx))->code) == MULT |
3845 | && CONST_INT_P (XEXP (XEXP (*split, 0), 1))(((enum rtx_code) (((((((*split)->u.fld[0]).rt_rtx))->u .fld[1]).rt_rtx))->code) == CONST_INT) |
3846 | && INTVAL (XEXP (XEXP (*split, 0), 1))((((((((*split)->u.fld[0]).rt_rtx))->u.fld[1]).rt_rtx)) ->u.hwint[0]) > 0 |
3847 | && (i = exact_log2 (UINTVAL (XEXP (XEXP (*split, 0), 1))((unsigned long) ((((((((*split)->u.fld[0]).rt_rtx))->u .fld[1]).rt_rtx))->u.hwint[0])))) >= 0) |
3848 | { |
3849 | rtx nsplit = XEXP (*split, 0)(((*split)->u.fld[0]).rt_rtx); |
3850 | rtx i_rtx = gen_int_shift_amount (GET_MODE (nsplit)((machine_mode) (nsplit)->mode), i); |
3851 | SUBST (XEXP (*split, 0), gen_rtx_ASHIFT (GET_MODE (nsplit),do_SUBST (&((((*split)->u.fld[0]).rt_rtx)), (gen_rtx_fmt_ee_stat ((ASHIFT), ((((machine_mode) (nsplit)->mode))), (((((nsplit )->u.fld[0]).rt_rtx))), ((i_rtx)) ))) |
3852 | XEXP (nsplit, 0),do_SUBST (&((((*split)->u.fld[0]).rt_rtx)), (gen_rtx_fmt_ee_stat ((ASHIFT), ((((machine_mode) (nsplit)->mode))), (((((nsplit )->u.fld[0]).rt_rtx))), ((i_rtx)) ))) |
3853 | i_rtx))do_SUBST (&((((*split)->u.fld[0]).rt_rtx)), (gen_rtx_fmt_ee_stat ((ASHIFT), ((((machine_mode) (nsplit)->mode))), (((((nsplit )->u.fld[0]).rt_rtx))), ((i_rtx)) ))); |
3854 | /* Update split_code because we may not have a multiply |
3855 | anymore. */ |
3856 | split_code = GET_CODE (*split)((enum rtx_code) (*split)->code); |
3857 | } |
3858 | |
3859 | #ifdef INSN_SCHEDULING |
3860 | /* If *SPLIT is a paradoxical SUBREG, when we split it, it should |
3861 | be written as a ZERO_EXTEND. */ |
3862 | if (split_code == SUBREG && MEM_P (SUBREG_REG (*split))(((enum rtx_code) ((((*split)->u.fld[0]).rt_rtx))->code ) == MEM)) |
3863 | { |
3864 | /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's |
3865 | what it really is. */ |
3866 | if (load_extend_op (GET_MODE (SUBREG_REG (*split))((machine_mode) ((((*split)->u.fld[0]).rt_rtx))->mode)) |
3867 | == SIGN_EXTEND) |
3868 | SUBST (*split, gen_rtx_SIGN_EXTEND (split_mode,do_SUBST (&(*split), (gen_rtx_fmt_e_stat ((SIGN_EXTEND), ( (split_mode)), (((((*split)->u.fld[0]).rt_rtx))) ))) |
3869 | SUBREG_REG (*split)))do_SUBST (&(*split), (gen_rtx_fmt_e_stat ((SIGN_EXTEND), ( (split_mode)), (((((*split)->u.fld[0]).rt_rtx))) ))); |
3870 | else |
3871 | SUBST (*split, gen_rtx_ZERO_EXTEND (split_mode,do_SUBST (&(*split), (gen_rtx_fmt_e_stat ((ZERO_EXTEND), ( (split_mode)), (((((*split)->u.fld[0]).rt_rtx))) ))) |
3872 | SUBREG_REG (*split)))do_SUBST (&(*split), (gen_rtx_fmt_e_stat ((ZERO_EXTEND), ( (split_mode)), (((((*split)->u.fld[0]).rt_rtx))) ))); |
3873 | } |
3874 | #endif |
3875 | |
3876 | /* Attempt to split binary operators using arithmetic identities. */ |
3877 | if (BINARY_P (SET_SRC (newpat))(((rtx_class[(int) (((enum rtx_code) ((((newpat)->u.fld[1] ).rt_rtx))->code))]) & (~3)) == (RTX_COMPARE & (~3 ))) |
3878 | && split_mode == GET_MODE (SET_SRC (newpat))((machine_mode) ((((newpat)->u.fld[1]).rt_rtx))->mode) |
3879 | && ! side_effects_p (SET_SRC (newpat)(((newpat)->u.fld[1]).rt_rtx))) |
3880 | { |
3881 | rtx setsrc = SET_SRC (newpat)(((newpat)->u.fld[1]).rt_rtx); |
3882 | machine_mode mode = GET_MODE (setsrc)((machine_mode) (setsrc)->mode); |
3883 | enum rtx_code code = GET_CODE (setsrc)((enum rtx_code) (setsrc)->code); |
3884 | rtx src_op0 = XEXP (setsrc, 0)(((setsrc)->u.fld[0]).rt_rtx); |
3885 | rtx src_op1 = XEXP (setsrc, 1)(((setsrc)->u.fld[1]).rt_rtx); |
3886 | |
3887 | /* Split "X = Y op Y" as "Z = Y; X = Z op Z". */ |
3888 | if (rtx_equal_p (src_op0, src_op1)) |
3889 | { |
3890 | newi2pat = gen_rtx_SET (newdest, src_op0)gen_rtx_fmt_ee_stat ((SET), (((void) 0, E_VOIDmode)), ((newdest )), ((src_op0)) ); |
3891 | SUBST (XEXP (setsrc, 0), newdest)do_SUBST (&((((setsrc)->u.fld[0]).rt_rtx)), (newdest)); |
3892 | SUBST (XEXP (setsrc, 1), newdest)do_SUBST (&((((setsrc)->u.fld[1]).rt_rtx)), (newdest)); |
3893 | subst_done = true; |
3894 | } |
3895 | /* Split "((P op Q) op R) op S" where op is PLUS or MULT. */ |
3896 | else if ((code == PLUS || code == MULT) |
3897 | && GET_CODE (src_op0)((enum rtx_code) (src_op0)->code) == code |
3898 | && GET_CODE (XEXP (src_op0, 0))((enum rtx_code) ((((src_op0)->u.fld[0]).rt_rtx))->code ) == code |
3899 | && (INTEGRAL_MODE_P (mode)(((enum mode_class) mode_class[mode]) == MODE_INT || ((enum mode_class ) mode_class[mode]) == MODE_PARTIAL_INT || ((enum mode_class) mode_class[mode]) == MODE_COMPLEX_INT || ((enum mode_class) mode_class [mode]) == MODE_VECTOR_BOOL || ((enum mode_class) mode_class[ mode]) == MODE_VECTOR_INT) |
3900 | || (FLOAT_MODE_P (mode)(((enum mode_class) mode_class[mode]) == MODE_FLOAT || ((enum mode_class) mode_class[mode]) == MODE_DECIMAL_FLOAT || ((enum mode_class) mode_class[mode]) == MODE_COMPLEX_FLOAT || ((enum mode_class) mode_class[mode]) == MODE_VECTOR_FLOAT) |
3901 | && flag_unsafe_math_optimizationsglobal_options.x_flag_unsafe_math_optimizations))) |
3902 | { |
3903 | rtx p = XEXP (XEXP (src_op0, 0), 0)((((((src_op0)->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx); |
3904 | rtx q = XEXP (XEXP (src_op0, 0), 1)((((((src_op0)->u.fld[0]).rt_rtx))->u.fld[1]).rt_rtx); |
3905 | rtx r = XEXP (src_op0, 1)(((src_op0)->u.fld[1]).rt_rtx); |
3906 | rtx s = src_op1; |
3907 | |
3908 | /* Split both "((X op Y) op X) op Y" and |
3909 | "((X op Y) op Y) op X" as "T op T" where T is |
3910 | "X op Y". */ |
3911 | if ((rtx_equal_p (p,r) && rtx_equal_p (q,s)) |
3912 | || (rtx_equal_p (p,s) && rtx_equal_p (q,r))) |
3913 | { |
3914 | newi2pat = gen_rtx_SET (newdest, XEXP (src_op0, 0))gen_rtx_fmt_ee_stat ((SET), (((void) 0, E_VOIDmode)), ((newdest )), (((((src_op0)->u.fld[0]).rt_rtx))) ); |
3915 | SUBST (XEXP (setsrc, 0), newdest)do_SUBST (&((((setsrc)->u.fld[0]).rt_rtx)), (newdest)); |
3916 | SUBST (XEXP (setsrc, 1), newdest)do_SUBST (&((((setsrc)->u.fld[1]).rt_rtx)), (newdest)); |
3917 | subst_done = true; |
3918 | } |
3919 | /* Split "((X op X) op Y) op Y)" as "T op T" where |
3920 | T is "X op Y". */ |
3921 | else if (rtx_equal_p (p,q) && rtx_equal_p (r,s)) |
3922 | { |
3923 | rtx tmp = simplify_gen_binary (code, mode, p, r); |
3924 | newi2pat = gen_rtx_SET (newdest, tmp)gen_rtx_fmt_ee_stat ((SET), (((void) 0, E_VOIDmode)), ((newdest )), ((tmp)) ); |
3925 | SUBST (XEXP (setsrc, 0), newdest)do_SUBST (&((((setsrc)->u.fld[0]).rt_rtx)), (newdest)); |
3926 | SUBST (XEXP (setsrc, 1), newdest)do_SUBST (&((((setsrc)->u.fld[1]).rt_rtx)), (newdest)); |
3927 | subst_done = true; |
3928 | } |
3929 | } |
3930 | } |
3931 | |
3932 | if (!subst_done) |
3933 | { |
3934 | newi2pat = gen_rtx_SET (newdest, *split)gen_rtx_fmt_ee_stat ((SET), (((void) 0, E_VOIDmode)), ((newdest )), ((*split)) ); |
3935 | SUBST (*split, newdest)do_SUBST (&(*split), (newdest)); |
3936 | } |
3937 | |
3938 | i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes); |
3939 | |
3940 | /* recog_for_combine might have added CLOBBERs to newi2pat. |
3941 | Make sure NEWPAT does not depend on the clobbered regs. */ |
3942 | if (GET_CODE (newi2pat)((enum rtx_code) (newi2pat)->code) == PARALLEL) |
3943 | for (i = XVECLEN (newi2pat, 0)(((((newi2pat)->u.fld[0]).rt_rtvec))->num_elem) - 1; i >= 0; i--) |
3944 | if (GET_CODE (XVECEXP (newi2pat, 0, i))((enum rtx_code) ((((((newi2pat)->u.fld[0]).rt_rtvec))-> elem[i]))->code) == CLOBBER) |
3945 | { |
3946 | rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0)((((((((newi2pat)->u.fld[0]).rt_rtvec))->elem[i]))-> u.fld[0]).rt_rtx); |
3947 | if (reg_overlap_mentioned_p (reg, newpat)) |
3948 | { |
3949 | undo_all (); |
3950 | return 0; |
3951 | } |
3952 | } |
3953 | |
3954 | /* If the split point was a MULT and we didn't have one before, |
3955 | don't use one now. */ |
3956 | if (i2_code_number >= 0 && ! (split_code == MULT && ! have_mult)) |
3957 | insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes); |
3958 | } |
3959 | } |
3960 | |
3961 | /* Check for a case where we loaded from memory in a narrow mode and |
3962 | then sign extended it, but we need both registers. In that case, |
3963 | we have a PARALLEL with both loads from the same memory location. |
3964 | We can split this into a load from memory followed by a register-register |
3965 | copy. This saves at least one insn, more if register allocation can |
3966 | eliminate the copy. |
3967 | |
3968 | We cannot do this if the destination of the first assignment is a |
3969 | condition code register or cc0. We eliminate this case by making sure |
3970 | the SET_DEST and SET_SRC have the same mode. |
3971 | |
3972 | We cannot do this if the destination of the second assignment is |
3973 | a register that we have already assumed is zero-extended. Similarly |
3974 | for a SUBREG of such a register. */ |
3975 | |
3976 | else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0 |
3977 | && GET_CODE (newpat)((enum rtx_code) (newpat)->code) == PARALLEL |
3978 | && XVECLEN (newpat, 0)(((((newpat)->u.fld[0]).rt_rtvec))->num_elem) == 2 |
3979 | && GET_CODE (XVECEXP (newpat, 0, 0))((enum rtx_code) ((((((newpat)->u.fld[0]).rt_rtvec))->elem [0]))->code) == SET |
3980 | && GET_CODE (SET_SRC (XVECEXP (newpat, 0, 0)))((enum rtx_code) (((((((((newpat)->u.fld[0]).rt_rtvec))-> elem[0]))->u.fld[1]).rt_rtx))->code) == SIGN_EXTEND |
3981 | && (GET_MODE (SET_DEST (XVECEXP (newpat, 0, 0)))((machine_mode) (((((((((newpat)->u.fld[0]).rt_rtvec))-> elem[0]))->u.fld[0]).rt_rtx))->mode) |
3982 | == GET_MODE (SET_SRC (XVECEXP (newpat, 0, 0)))((machine_mode) (((((((((newpat)->u.fld[0]).rt_rtvec))-> elem[0]))->u.fld[1]).rt_rtx))->mode)) |
3983 | && GET_CODE (XVECEXP (newpat, 0, 1))((enum rtx_code) ((((((newpat)->u.fld[0]).rt_rtvec))->elem [1]))->code) == SET |
3984 | && rtx_equal_p (SET_SRC (XVECEXP (newpat, 0, 1))((((((((newpat)->u.fld[0]).rt_rtvec))->elem[1]))->u. fld[1]).rt_rtx), |
3985 | XEXP (SET_SRC (XVECEXP (newpat, 0, 0)), 0)(((((((((((newpat)->u.fld[0]).rt_rtvec))->elem[0]))-> u.fld[1]).rt_rtx))->u.fld[0]).rt_rtx)) |
3986 | && !modified_between_p (SET_SRC (XVECEXP (newpat, 0, 1))((((((((newpat)->u.fld[0]).rt_rtvec))->elem[1]))->u. fld[1]).rt_rtx), i2, i3) |
3987 | && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1)))((enum rtx_code) (((((((((newpat)->u.fld[0]).rt_rtvec))-> elem[1]))->u.fld[0]).rt_rtx))->code) != ZERO_EXTRACT |
3988 | && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1)))((enum rtx_code) (((((((((newpat)->u.fld[0]).rt_rtvec))-> elem[1]))->u.fld[0]).rt_rtx))->code) != STRICT_LOW_PART |
3989 | && ! (temp_expr = SET_DEST (XVECEXP (newpat, 0, 1))((((((((newpat)->u.fld[0]).rt_rtvec))->elem[1]))->u. fld[0]).rt_rtx), |
3990 | (REG_P (temp_expr)(((enum rtx_code) (temp_expr)->code) == REG) |
3991 | && reg_stat[REGNO (temp_expr)(rhs_regno(temp_expr))].nonzero_bits != 0 |
3992 | && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),(!maybe_le (((8) * (((global_options.x_ix86_isa_flags & ( 1UL << 1)) != 0) ? 8 : 4)), GET_MODE_PRECISION (((machine_mode ) (temp_expr)->mode)))) |
3993 | BITS_PER_WORD)(!maybe_le (((8) * (((global_options.x_ix86_isa_flags & ( 1UL << 1)) != 0) ? 8 : 4)), GET_MODE_PRECISION (((machine_mode ) (temp_expr)->mode)))) |
3994 | && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),(!maybe_le ((8 * 4), GET_MODE_PRECISION (((machine_mode) (temp_expr )->mode)))) |
3995 | HOST_BITS_PER_INT)(!maybe_le ((8 * 4), GET_MODE_PRECISION (((machine_mode) (temp_expr )->mode)))) |
3996 | && (reg_stat[REGNO (temp_expr)(rhs_regno(temp_expr))].nonzero_bits |
3997 | != GET_MODE_MASK (word_mode)mode_mask_array[word_mode]))) |
3998 | && ! (GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1)))((enum rtx_code) (((((((((newpat)->u.fld[0]).rt_rtvec))-> elem[1]))->u.fld[0]).rt_rtx))->code) == SUBREG |
3999 | && (temp_expr = SUBREG_REG (SET_DEST (XVECEXP (newpat, 0, 1)))(((((((((((newpat)->u.fld[0]).rt_rtvec))->elem[1]))-> u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx), |
4000 | (REG_P (temp_expr)(((enum rtx_code) (temp_expr)->code) == REG) |
4001 | && reg_stat[REGNO (temp_expr)(rhs_regno(temp_expr))].nonzero_bits != 0 |
4002 | && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),(!maybe_le (((8) * (((global_options.x_ix86_isa_flags & ( 1UL << 1)) != 0) ? 8 : 4)), GET_MODE_PRECISION (((machine_mode ) (temp_expr)->mode)))) |
4003 | BITS_PER_WORD)(!maybe_le (((8) * (((global_options.x_ix86_isa_flags & ( 1UL << 1)) != 0) ? 8 : 4)), GET_MODE_PRECISION (((machine_mode ) (temp_expr)->mode)))) |
4004 | && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),(!maybe_le ((8 * 4), GET_MODE_PRECISION (((machine_mode) (temp_expr )->mode)))) |
4005 | HOST_BITS_PER_INT)(!maybe_le ((8 * 4), GET_MODE_PRECISION (((machine_mode) (temp_expr )->mode)))) |
4006 | && (reg_stat[REGNO (temp_expr)(rhs_regno(temp_expr))].nonzero_bits |
4007 | != GET_MODE_MASK (word_mode)mode_mask_array[word_mode])))) |
4008 | && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat, 0, 1))((((((((newpat)->u.fld[0]).rt_rtvec))->elem[1]))->u. fld[0]).rt_rtx), |
4009 | SET_SRC (XVECEXP (newpat, 0, 1))((((((((newpat)->u.fld[0]).rt_rtvec))->elem[1]))->u. fld[1]).rt_rtx)) |
4010 | && ! find_reg_note (i3, REG_UNUSED, |
4011 | SET_DEST (XVECEXP (newpat, 0, 0))((((((((newpat)->u.fld[0]).rt_rtvec))->elem[0]))->u. fld[0]).rt_rtx))) |
4012 | { |
4013 | rtx ni2dest; |
4014 | |
4015 | newi2pat = XVECEXP (newpat, 0, 0)(((((newpat)->u.fld[0]).rt_rtvec))->elem[0]); |
4016 | ni2dest = SET_DEST (XVECEXP (newpat, 0, 0))((((((((newpat)->u.fld[0]).rt_rtvec))->elem[0]))->u. fld[0]).rt_rtx); |
4017 | newpat = XVECEXP (newpat, 0, 1)(((((newpat)->u.fld[0]).rt_rtvec))->elem[1]); |
4018 | SUBST (SET_SRC (newpat),do_SUBST (&((((newpat)->u.fld[1]).rt_rtx)), (rtl_hooks .gen_lowpart (((machine_mode) ((((newpat)->u.fld[1]).rt_rtx ))->mode), ni2dest))) |
4019 | gen_lowpart (GET_MODE (SET_SRC (newpat)), ni2dest))do_SUBST (&((((newpat)->u.fld[1]).rt_rtx)), (rtl_hooks .gen_lowpart (((machine_mode) ((((newpat)->u.fld[1]).rt_rtx ))->mode), ni2dest))); |
4020 | i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes); |
4021 | |
4022 | if (i2_code_number >= 0) |
4023 | insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes); |
4024 | |
4025 | if (insn_code_number >= 0) |
4026 | swap_i2i3 = 1; |
4027 | } |
4028 | |
4029 | /* Similarly, check for a case where we have a PARALLEL of two independent |
4030 | SETs but we started with three insns. In this case, we can do the sets |
4031 | as two separate insns. This case occurs when some SET allows two |
4032 | other insns to combine, but the destination of that SET is still live. |
4033 | |
4034 | Also do this if we started with two insns and (at least) one of the |
4035 | resulting sets is a noop; this noop will be deleted later. |
4036 | |
4037 | Also do this if we started with two insns neither of which was a simple |
4038 | move. */ |
4039 | |
4040 | else if (insn_code_number < 0 && asm_noperands (newpat) < 0 |
4041 | && GET_CODE (newpat)((enum rtx_code) (newpat)->code) == PARALLEL |
4042 | && XVECLEN (newpat, 0)(((((newpat)->u.fld[0]).rt_rtvec))->num_elem) == 2 |
4043 | && GET_CODE (XVECEXP (newpat, 0, 0))((enum rtx_code) ((((((newpat)->u.fld[0]).rt_rtvec))->elem [0]))->code) == SET |
4044 | && GET_CODE (XVECEXP (newpat, 0, 1))((enum rtx_code) ((((((newpat)->u.fld[0]).rt_rtvec))->elem [1]))->code) == SET |
4045 | && (i1 |
4046 | || set_noop_p (XVECEXP (newpat, 0, 0)(((((newpat)->u.fld[0]).rt_rtvec))->elem[0])) |
4047 | || set_noop_p (XVECEXP (newpat, 0, 1)(((((newpat)->u.fld[0]).rt_rtvec))->elem[1])) |
4048 | || (!i2_was_move && !i3_was_move)) |
4049 | && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0)))((enum rtx_code) (((((((((newpat)->u.fld[0]).rt_rtvec))-> elem[0]))->u.fld[0]).rt_rtx))->code) != ZERO_EXTRACT |
4050 | && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0)))((enum rtx_code) (((((((((newpat)->u.fld[0]).rt_rtvec))-> elem[0]))->u.fld[0]).rt_rtx))->code) != STRICT_LOW_PART |
4051 | && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1)))((enum rtx_code) (((((((((newpat)->u.fld[0]).rt_rtvec))-> elem[1]))->u.fld[0]).rt_rtx))->code) != ZERO_EXTRACT |
4052 | && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1)))((enum rtx_code) (((((((((newpat)->u.fld[0]).rt_rtvec))-> elem[1]))->u.fld[0]).rt_rtx))->code) != STRICT_LOW_PART |
4053 | && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 1))((((((((newpat)->u.fld[0]).rt_rtvec))->elem[1]))->u. fld[0]).rt_rtx), |
4054 | XVECEXP (newpat, 0, 0)(((((newpat)->u.fld[0]).rt_rtvec))->elem[0])) |
4055 | && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 0))((((((((newpat)->u.fld[0]).rt_rtvec))->elem[0]))->u. fld[0]).rt_rtx), |
4056 | XVECEXP (newpat, 0, 1)(((((newpat)->u.fld[0]).rt_rtvec))->elem[1])) |
4057 | && ! (contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 0))((((((((newpat)->u.fld[0]).rt_rtvec))->elem[0]))->u. fld[1]).rt_rtx)) |
4058 | && contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 1))((((((((newpat)->u.fld[0]).rt_rtvec))->elem[1]))->u. fld[1]).rt_rtx)))) |
4059 | { |
4060 | rtx set0 = XVECEXP (newpat, 0, 0)(((((newpat)->u.fld[0]).rt_rtvec))->elem[0]); |
4061 | rtx set1 = XVECEXP (newpat, 0, 1)(((((newpat)->u.fld[0]).rt_rtvec))->elem[1]); |
4062 | |
4063 | /* Normally, it doesn't matter which of the two is done first, |
4064 | but the one that references cc0 can't be the second, and |
4065 | one which uses any regs/memory set in between i2 and i3 can't |
4066 | be first. The PARALLEL might also have been pre-existing in i3, |
4067 | so we need to make sure that we won't wrongly hoist a SET to i2 |
4068 | that would conflict with a death note present in there, or would |
4069 | have its dest modified between i2 and i3. */ |
4070 | if (!modified_between_p (SET_SRC (set1)(((set1)->u.fld[1]).rt_rtx), i2, i3) |
4071 | && !(REG_P (SET_DEST (set1))(((enum rtx_code) ((((set1)->u.fld[0]).rt_rtx))->code) == REG) |
4072 | && find_reg_note (i2, REG_DEAD, SET_DEST (set1)(((set1)->u.fld[0]).rt_rtx))) |
4073 | && !(GET_CODE (SET_DEST (set1))((enum rtx_code) ((((set1)->u.fld[0]).rt_rtx))->code) == SUBREG |
4074 | && find_reg_note (i2, REG_DEAD, |
4075 | SUBREG_REG (SET_DEST (set1))((((((set1)->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx))) |
4076 | && !modified_between_p (SET_DEST (set1)(((set1)->u.fld[0]).rt_rtx), i2, i3) |
4077 | && (!HAVE_cc00 || !reg_referenced_p (cc0_rtx, set0)) |
4078 | /* If I3 is a jump, ensure that set0 is a jump so that |
4079 | we do not create invalid RTL. */ |
4080 | && (!JUMP_P (i3)(((enum rtx_code) (i3)->code) == JUMP_INSN) || SET_DEST (set0)(((set0)->u.fld[0]).rt_rtx) == pc_rtx) |
4081 | ) |
4082 | { |
4083 | newi2pat = set1; |
4084 | newpat = set0; |
4085 | } |
4086 | else if (!modified_between_p (SET_SRC (set0)(((set0)->u.fld[1]).rt_rtx), i2, i3) |
4087 | && !(REG_P (SET_DEST (set0))(((enum rtx_code) ((((set0)->u.fld[0]).rt_rtx))->code) == REG) |
4088 | && find_reg_note (i2, REG_DEAD, SET_DEST (set0)(((set0)->u.fld[0]).rt_rtx))) |
4089 | && !(GET_CODE (SET_DEST (set0))((enum rtx_code) ((((set0)->u.fld[0]).rt_rtx))->code) == SUBREG |
4090 | && find_reg_note (i2, REG_DEAD, |
4091 | SUBREG_REG (SET_DEST (set0))((((((set0)->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx))) |
4092 | && !modified_between_p (SET_DEST (set0)(((set0)->u.fld[0]).rt_rtx), i2, i3) |
4093 | && (!HAVE_cc00 || !reg_referenced_p (cc0_rtx, set1)) |
4094 | /* If I3 is a jump, ensure that set1 is a jump so that |
4095 | we do not create invalid RTL. */ |
4096 | && (!JUMP_P (i3)(((enum rtx_code) (i3)->code) == JUMP_INSN) || SET_DEST (set1)(((set1)->u.fld[0]).rt_rtx) == pc_rtx) |
4097 | ) |
4098 | { |
4099 | newi2pat = set0; |
4100 | newpat = set1; |
4101 | } |
4102 | else |
4103 | { |
4104 | undo_all (); |
4105 | return 0; |
4106 | } |
4107 | |
4108 | i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes); |
4109 | |
4110 | if (i2_code_number >= 0) |
4111 | { |
4112 | /* recog_for_combine might have added CLOBBERs to newi2pat. |
4113 | Make sure NEWPAT does not depend on the clobbered regs. */ |
4114 | if (GET_CODE (newi2pat)((enum rtx_code) (newi2pat)->code) == PARALLEL) |
4115 | { |
4116 | for (i = XVECLEN (newi2pat, 0)(((((newi2pat)->u.fld[0]).rt_rtvec))->num_elem) - 1; i >= 0; i--) |
4117 | if (GET_CODE (XVECEXP (newi2pat, 0, i))((enum rtx_code) ((((((newi2pat)->u.fld[0]).rt_rtvec))-> elem[i]))->code) == CLOBBER) |
4118 | { |
4119 | rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0)((((((((newi2pat)->u.fld[0]).rt_rtvec))->elem[i]))-> u.fld[0]).rt_rtx); |
4120 | if (reg_overlap_mentioned_p (reg, newpat)) |
4121 | { |
4122 | undo_all (); |
4123 | return 0; |
4124 | } |
4125 | } |
4126 | } |
4127 | |
4128 | insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes); |
4129 | |
4130 | if (insn_code_number >= 0) |
4131 | split_i2i3 = 1; |
4132 | } |
4133 | } |
4134 | |
4135 | /* If it still isn't recognized, fail and change things back the way they |
4136 | were. */ |
4137 | if ((insn_code_number < 0 |
4138 | /* Is the result a reasonable ASM_OPERANDS? */ |
4139 | && (! check_asm_operands (newpat) || added_sets_1 || added_sets_2))) |
4140 | { |
4141 | undo_all (); |
4142 | return 0; |
4143 | } |
4144 | |
4145 | /* If we had to change another insn, make sure it is valid also. */ |
4146 | if (undobuf.other_insn) |
4147 | { |
4148 | CLEAR_HARD_REG_SET (newpat_used_regs); |
4149 | |
4150 | other_pat = PATTERN (undobuf.other_insn); |
4151 | other_code_number = recog_for_combine (&other_pat, undobuf.other_insn, |
4152 | &new_other_notes); |
4153 | |
4154 | if (other_code_number < 0 && ! check_asm_operands (other_pat)) |
4155 | { |
4156 | undo_all (); |
4157 | return 0; |
4158 | } |
4159 | } |
4160 | |
4161 | /* If I2 is the CC0 setter and I3 is the CC0 user then check whether |
4162 | they are adjacent to each other or not. */ |
4163 | if (HAVE_cc00) |
4164 | { |
4165 | rtx_insn *p = prev_nonnote_insn (i3); |
4166 | if (p && p != i2 && NONJUMP_INSN_P (p)(((enum rtx_code) (p)->code) == INSN) && newi2pat |
4167 | && sets_cc0_p (newi2pat)) |
4168 | { |
4169 | undo_all (); |
4170 | return 0; |
4171 | } |
4172 | } |
4173 | |
4174 | /* Only allow this combination if insn_cost reports that the |
4175 | replacement instructions are cheaper than the originals. */ |
4176 | if (!combine_validate_cost (i0, i1, i2, i3, newpat, newi2pat, other_pat)) |
4177 | { |
4178 | undo_all (); |
4179 | return 0; |
4180 | } |
4181 | |
4182 | if (MAY_HAVE_DEBUG_BIND_INSNSglobal_options.x_flag_var_tracking_assignments) |
4183 | { |
4184 | struct undo *undo; |
4185 | |
4186 | for (undo = undobuf.undos; undo; undo = undo->next) |
4187 | if (undo->kind == UNDO_MODE) |
4188 | { |
4189 | rtx reg = *undo->where.r; |
4190 | machine_mode new_mode = GET_MODE (reg)((machine_mode) (reg)->mode); |
4191 | machine_mode old_mode = undo->old_contents.m; |
4192 | |
4193 | /* Temporarily revert mode back. */ |
4194 | adjust_reg_mode (reg, old_mode); |
4195 | |
4196 | if (reg == i2dest && i2scratch) |
4197 | { |
4198 | /* If we used i2dest as a scratch register with a |
4199 | different mode, substitute it for the original |
4200 | i2src while its original mode is temporarily |
4201 | restored, and then clear i2scratch so that we don't |
4202 | do it again later. */ |
4203 | propagate_for_debug (i2, last_combined_insn, reg, i2src, |
4204 | this_basic_block); |
4205 | i2scratch = false; |
4206 | /* Put back the new mode. */ |
4207 | adjust_reg_mode (reg, new_mode); |
4208 | } |
4209 | else |
4210 | { |
4211 | rtx tempreg = gen_raw_REG (old_mode, REGNO (reg)(rhs_regno(reg))); |
4212 | rtx_insn *first, *last; |
4213 | |
4214 | if (reg == i2dest) |
4215 | { |
4216 | first = i2; |
4217 | last = last_combined_insn; |
4218 | } |
4219 | else |
4220 | { |
4221 | first = i3; |
4222 | last = undobuf.other_insn; |
4223 | gcc_assert (last)((void)(!(last) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/combine.c" , 4223, __FUNCTION__), 0 : 0)); |
4224 | if (DF_INSN_LUID (last)((((df->insns[(INSN_UID (last))]))->luid)) |
4225 | < DF_INSN_LUID (last_combined_insn)((((df->insns[(INSN_UID (last_combined_insn))]))->luid) )) |
4226 | last = last_combined_insn; |
4227 | } |
4228 | |
4229 | /* We're dealing with a reg that changed mode but not |
4230 | meaning, so we want to turn it into a subreg for |
4231 | the new mode. However, because of REG sharing and |
4232 | because its mode had already changed, we have to do |
4233 | it in two steps. First, replace any debug uses of |
4234 | reg, with its original mode temporarily restored, |
4235 | with this copy we have created; then, replace the |
4236 | copy with the SUBREG of the original shared reg, |
4237 | once again changed to the new mode. */ |
4238 | propagate_for_debug (first, last, reg, tempreg, |
4239 | this_basic_block); |
4240 | adjust_reg_mode (reg, new_mode); |
4241 | propagate_for_debug (first, last, tempreg, |
4242 | lowpart_subreg (old_mode, reg, new_mode), |
4243 | this_basic_block); |
4244 | } |
4245 | } |
4246 | } |
4247 | |
4248 | /* If we will be able to accept this, we have made a |
4249 | change to the destination of I3. This requires us to |
4250 | do a few adjustments. */ |
4251 | |
4252 | if (changed_i3_dest) |
4253 | { |
4254 | PATTERN (i3) = newpat; |
4255 | adjust_for_new_dest (i3); |
4256 | } |
4257 | |
4258 | /* We now know that we can do this combination. Merge the insns and |
4259 | update the status of registers and LOG_LINKS. */ |
4260 | |
4261 | if (undobuf.other_insn) |
4262 | { |
4263 | rtx note, next; |
4264 | |
4265 | PATTERN (undobuf.other_insn) = other_pat; |
4266 | |
4267 | /* If any of the notes in OTHER_INSN were REG_DEAD or REG_UNUSED, |
4268 | ensure that they are still valid. Then add any non-duplicate |
4269 | notes added by recog_for_combine. */ |
4270 | for (note = REG_NOTES (undobuf.other_insn)(((undobuf.other_insn)->u.fld[6]).rt_rtx); note; note = next) |
4271 | { |
4272 | next = XEXP (note, 1)(((note)->u.fld[1]).rt_rtx); |
4273 | |
4274 | if ((REG_NOTE_KIND (note)((enum reg_note) ((machine_mode) (note)->mode)) == REG_DEAD |
4275 | && !reg_referenced_p (XEXP (note, 0)(((note)->u.fld[0]).rt_rtx), |
4276 | PATTERN (undobuf.other_insn))) |
4277 | ||(REG_NOTE_KIND (note)((enum reg_note) ((machine_mode) (note)->mode)) == REG_UNUSED |
4278 | && !reg_set_p (XEXP (note, 0)(((note)->u.fld[0]).rt_rtx), |
4279 | PATTERN (undobuf.other_insn))) |
4280 | /* Simply drop equal note since it may be no longer valid |
4281 | for other_insn. It may be possible to record that CC |
4282 | register is changed and only discard those notes, but |
4283 | in practice it's unnecessary complication and doesn't |
4284 | give any meaningful improvement. |
4285 | |
4286 | See PR78559. */ |
4287 | || REG_NOTE_KIND (note)((enum reg_note) ((machine_mode) (note)->mode)) == REG_EQUAL |
4288 | || REG_NOTE_KIND (note)((enum reg_note) ((machine_mode) (note)->mode)) == REG_EQUIV) |
4289 | remove_note (undobuf.other_insn, note); |
4290 | } |
4291 | |
4292 | distribute_notes (new_other_notes, undobuf.other_insn, |
4293 | undobuf.other_insn, NULLnullptr, NULL_RTX(rtx) 0, NULL_RTX(rtx) 0, |
4294 | NULL_RTX(rtx) 0); |
4295 | } |
4296 | |
4297 | if (swap_i2i3) |
4298 | { |
4299 | /* I3 now uses what used to be its destination and which is now |
4300 | I2's destination. This requires us to do a few adjustments. */ |
4301 | PATTERN (i3) = newpat; |
4302 | adjust_for_new_dest (i3); |
4303 | } |
4304 | |
4305 | if (swap_i2i3 || split_i2i3) |
4306 | { |
4307 | /* We might need a LOG_LINK from I3 to I2. But then we used to |
4308 | have one, so we still will. |
4309 | |
4310 | However, some later insn might be using I2's dest and have |
4311 | a LOG_LINK pointing at I3. We should change it to point at |
4312 | I2 instead. */ |
4313 | |
4314 | /* newi2pat is usually a SET here; however, recog_for_combine might |
4315 | have added some clobbers. */ |
4316 | rtx x = newi2pat; |
4317 | if (GET_CODE (x)((enum rtx_code) (x)->code) == PARALLEL) |
4318 | x = XVECEXP (newi2pat, 0, 0)(((((newi2pat)->u.fld[0]).rt_rtvec))->elem[0]); |
4319 | |
4320 | if (REG_P (SET_DEST (x))(((enum rtx_code) ((((x)->u.fld[0]).rt_rtx))->code) == REG ) |
4321 | || (GET_CODE (SET_DEST (x))((enum rtx_code) ((((x)->u.fld[0]).rt_rtx))->code) == SUBREG |
4322 | && REG_P (SUBREG_REG (SET_DEST (x)))(((enum rtx_code) (((((((x)->u.fld[0]).rt_rtx))->u.fld[ 0]).rt_rtx))->code) == REG))) |
4323 | { |
4324 | unsigned int regno = reg_or_subregno (SET_DEST (x)(((x)->u.fld[0]).rt_rtx)); |
4325 | |
4326 | bool done = false; |
4327 | for (rtx_insn *insn = NEXT_INSN (i3); |
4328 | !done |
4329 | && insn |
4330 | && NONDEBUG_INSN_P (insn)((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN)) |
4331 | && BLOCK_FOR_INSN (insn) == this_basic_block; |
4332 | insn = NEXT_INSN (insn)) |
4333 | { |
4334 | struct insn_link *link; |
4335 | FOR_EACH_LOG_LINK (link, insn)for ((link) = (uid_log_links[insn_uid_check (insn)]); (link); (link) = (link)->next) |
4336 | if (link->insn == i3 && link->regno == regno) |
4337 | { |
4338 | link->insn = i2; |
4339 | done = true; |
4340 | break; |
4341 | } |
4342 | } |
4343 | } |
4344 | } |
4345 | |
4346 | { |
4347 | rtx i3notes, i2notes, i1notes = 0, i0notes = 0; |
4348 | struct insn_link *i3links, *i2links, *i1links = 0, *i0links = 0; |
4349 | rtx midnotes = 0; |
4350 | int from_luid; |
4351 | /* Compute which registers we expect to eliminate. newi2pat may be setting |
4352 | either i3dest or i2dest, so we must check it. */ |
4353 | rtx elim_i2 = ((newi2pat && reg_set_p (i2dest, newi2pat)) |
4354 | || i2dest_in_i2src || i2dest_in_i1src || i2dest_in_i0src |
4355 | || !i2dest_killed |
4356 | ? 0 : i2dest); |
4357 | /* For i1, we need to compute both local elimination and global |
4358 | elimination information with respect to newi2pat because i1dest |
4359 | may be the same as i3dest, in which case newi2pat may be setting |
4360 | i1dest. Global information is used when distributing REG_DEAD |
4361 | note for i2 and i3, in which case it does matter if newi2pat sets |
4362 | i1dest or not. |
4363 | |
4364 | Local information is used when distributing REG_DEAD note for i1, |
4365 | in which case it doesn't matter if newi2pat sets i1dest or not. |
4366 | See PR62151, if we have four insns combination: |
4367 | i0: r0 <- i0src |
4368 | i1: r1 <- i1src (using r0) |
4369 | REG_DEAD (r0) |
4370 | i2: r0 <- i2src (using r1) |
4371 | i3: r3 <- i3src (using r0) |
4372 | ix: using r0 |
4373 | From i1's point of view, r0 is eliminated, no matter if it is set |
4374 | by newi2pat or not. In other words, REG_DEAD info for r0 in i1 |
4375 | should be discarded. |
4376 | |
4377 | Note local information only affects cases in forms like "I1->I2->I3", |
4378 | "I0->I1->I2->I3" or "I0&I1->I2, I2->I3". For other cases like |
4379 | "I0->I1, I1&I2->I3" or "I1&I2->I3", newi2pat won't set i1dest or |
4380 | i0dest anyway. */ |
4381 | rtx local_elim_i1 = (i1 == 0 || i1dest_in_i1src || i1dest_in_i0src |
4382 | || !i1dest_killed |
4383 | ? 0 : i1dest); |
4384 | rtx elim_i1 = (local_elim_i1 == 0 |
4385 | || (newi2pat && reg_set_p (i1dest, newi2pat)) |
4386 | ? 0 : i1dest); |
4387 | /* Same case as i1. */ |
4388 | rtx local_elim_i0 = (i0 == 0 || i0dest_in_i0src || !i0dest_killed |
4389 | ? 0 : i0dest); |
4390 | rtx elim_i0 = (local_elim_i0 == 0 |
4391 | || (newi2pat && reg_set_p (i0dest, newi2pat)) |
4392 | ? 0 : i0dest); |
4393 | |
4394 | /* Get the old REG_NOTES and LOG_LINKS from all our insns and |
4395 | clear them. */ |
4396 | i3notes = REG_NOTES (i3)(((i3)->u.fld[6]).rt_rtx), i3links = LOG_LINKS (i3)(uid_log_links[insn_uid_check (i3)]); |
4397 | i2notes = REG_NOTES (i2)(((i2)->u.fld[6]).rt_rtx), i2links = LOG_LINKS (i2)(uid_log_links[insn_uid_check (i2)]); |
4398 | if (i1) |
4399 | i1notes = REG_NOTES (i1)(((i1)->u.fld[6]).rt_rtx), i1links = LOG_LINKS (i1)(uid_log_links[insn_uid_check (i1)]); |
4400 | if (i0) |
4401 | i0notes = REG_NOTES (i0)(((i0)->u.fld[6]).rt_rtx), i0links = LOG_LINKS (i0)(uid_log_links[insn_uid_check (i0)]); |
4402 | |
4403 | /* Ensure that we do not have something that should not be shared but |
4404 | occurs multiple times in the new insns. Check this by first |
4405 | resetting all the `used' flags and then copying anything is shared. */ |
4406 | |
4407 | reset_used_flags (i3notes); |
4408 | reset_used_flags (i2notes); |
4409 | reset_used_flags (i1notes); |
4410 | reset_used_flags (i0notes); |
4411 | reset_used_flags (newpat); |
4412 | reset_used_flags (newi2pat); |
4413 | if (undobuf.other_insn) |
4414 | reset_used_flags (PATTERN (undobuf.other_insn)); |
4415 | |
4416 | i3notes = copy_rtx_if_shared (i3notes); |
4417 | i2notes = copy_rtx_if_shared (i2notes); |
4418 | i1notes = copy_rtx_if_shared (i1notes); |
4419 | i0notes = copy_rtx_if_shared (i0notes); |
4420 | newpat = copy_rtx_if_shared (newpat); |
4421 | newi2pat = copy_rtx_if_shared (newi2pat); |
4422 | if (undobuf.other_insn) |
4423 | reset_used_flags (PATTERN (undobuf.other_insn)); |
4424 | |
4425 | INSN_CODE (i3)(((i3)->u.fld[5]).rt_int) = insn_code_number; |
4426 | PATTERN (i3) = newpat; |
4427 | |
4428 | if (CALL_P (i3)(((enum rtx_code) (i3)->code) == CALL_INSN) && CALL_INSN_FUNCTION_USAGE (i3)(((i3)->u.fld[7]).rt_rtx)) |
4429 | { |
4430 | for (rtx link = CALL_INSN_FUNCTION_USAGE (i3)(((i3)->u.fld[7]).rt_rtx); link; |
4431 | link = XEXP (link, 1)(((link)->u.fld[1]).rt_rtx)) |
4432 | { |
4433 | if (substed_i2) |
4434 | { |
4435 | /* I2SRC must still be meaningful at this point. Some |
4436 | splitting operations can invalidate I2SRC, but those |
4437 | operations do not apply to calls. */ |
4438 | gcc_assert (i2src)((void)(!(i2src) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/combine.c" , 4438, __FUNCTION__), 0 : 0)); |
4439 | XEXP (link, 0)(((link)->u.fld[0]).rt_rtx) = simplify_replace_rtx (XEXP (link, 0)(((link)->u.fld[0]).rt_rtx), |
4440 | i2dest, i2src); |
4441 | } |
4442 | if (substed_i1) |
4443 | XEXP (link, 0)(((link)->u.fld[0]).rt_rtx) = simplify_replace_rtx (XEXP (link, 0)(((link)->u.fld[0]).rt_rtx), |
4444 | i1dest, i1src); |
4445 | if (substed_i0) |
4446 | XEXP (link, 0)(((link)->u.fld[0]).rt_rtx) = simplify_replace_rtx (XEXP (link, 0)(((link)->u.fld[0]).rt_rtx), |
4447 | i0dest, i0src); |
4448 | } |
4449 | } |
4450 | |
4451 | if (undobuf.other_insn) |
4452 | INSN_CODE (undobuf.other_insn)(((undobuf.other_insn)->u.fld[5]).rt_int) = other_code_number; |
4453 | |
4454 | /* We had one special case above where I2 had more than one set and |
4455 | we replaced a destination of one of those sets with the destination |
4456 | of I3. In that case, we have to update LOG_LINKS of insns later |
4457 | in this basic block. Note that this (expensive) case is rare. |
4458 | |
4459 | Also, in this case, we must pretend that all REG_NOTEs for I2 |
4460 | actually came from I3, so that REG_UNUSED notes from I2 will be |
4461 | properly handled. */ |
4462 | |
4463 | if (i3_subst_into_i2) |
4464 | { |
4465 | for (i = 0; i < XVECLEN (PATTERN (i2), 0)(((((PATTERN (i2))->u.fld[0]).rt_rtvec))->num_elem); i++) |
4466 | if ((GET_CODE (XVECEXP (PATTERN (i2), 0, i))((enum rtx_code) ((((((PATTERN (i2))->u.fld[0]).rt_rtvec)) ->elem[i]))->code) == SET |
4467 | || GET_CODE (XVECEXP (PATTERN (i2), 0, i))((enum rtx_code) ((((((PATTERN (i2))->u.fld[0]).rt_rtvec)) ->elem[i]))->code) == CLOBBER) |
4468 | && REG_P (SET_DEST (XVECEXP (PATTERN (i2), 0, i)))(((enum rtx_code) (((((((((PATTERN (i2))->u.fld[0]).rt_rtvec ))->elem[i]))->u.fld[0]).rt_rtx))->code) == REG) |
4469 | && SET_DEST (XVECEXP (PATTERN (i2), 0, i))((((((((PATTERN (i2))->u.fld[0]).rt_rtvec))->elem[i]))-> u.fld[0]).rt_rtx) != i2dest |
4470 | && ! find_reg_note (i2, REG_UNUSED, |
4471 | SET_DEST (XVECEXP (PATTERN (i2), 0, i))((((((((PATTERN (i2))->u.fld[0]).rt_rtvec))->elem[i]))-> u.fld[0]).rt_rtx))) |
4472 | for (temp_insn = NEXT_INSN (i2); |
4473 | temp_insn |
4474 | && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr) |
4475 | || BB_HEAD (this_basic_block)(this_basic_block)->il.x.head_ != temp_insn); |
4476 | temp_insn = NEXT_INSN (temp_insn)) |
4477 | if (temp_insn != i3 && NONDEBUG_INSN_P (temp_insn)((((enum rtx_code) (temp_insn)->code) == INSN) || (((enum rtx_code ) (temp_insn)->code) == JUMP_INSN) || (((enum rtx_code) (temp_insn )->code) == CALL_INSN))) |
4478 | FOR_EACH_LOG_LINK (link, temp_insn)for ((link) = (uid_log_links[insn_uid_check (temp_insn)]); (link ); (link) = (link)->next) |
4479 | if (link->insn == i2) |
4480 | link->insn = i3; |
4481 | |
4482 | if (i3notes) |
4483 | { |
4484 | rtx link = i3notes; |
4485 | while (XEXP (link, 1)(((link)->u.fld[1]).rt_rtx)) |
4486 | link = XEXP (link, 1)(((link)->u.fld[1]).rt_rtx); |
4487 | XEXP (link, 1)(((link)->u.fld[1]).rt_rtx) = i2notes; |
4488 | } |
4489 | else |
4490 | i3notes = i2notes; |
4491 | i2notes = 0; |
4492 | } |
4493 | |
4494 | LOG_LINKS (i3)(uid_log_links[insn_uid_check (i3)]) = NULLnullptr; |
4495 | REG_NOTES (i3)(((i3)->u.fld[6]).rt_rtx) = 0; |
4496 | LOG_LINKS (i2)(uid_log_links[insn_uid_check (i2)]) = NULLnullptr; |
4497 | REG_NOTES (i2)(((i2)->u.fld[6]).rt_rtx) = 0; |
4498 | |
4499 | if (newi2pat) |
4500 | { |
4501 | if (MAY_HAVE_DEBUG_BIND_INSNSglobal_options.x_flag_var_tracking_assignments && i2scratch) |
4502 | propagate_for_debug (i2, last_combined_insn, i2dest, i2src, |
4503 | this_basic_block); |
4504 | INSN_CODE (i2)(((i2)->u.fld[5]).rt_int) = i2_code_number; |
4505 | PATTERN (i2) = newi2pat; |
4506 | } |
4507 | else |
4508 | { |
4509 | if (MAY_HAVE_DEBUG_BIND_INSNSglobal_options.x_flag_var_tracking_assignments && i2src) |
4510 | propagate_for_debug (i2, last_combined_insn, i2dest, i2src, |
4511 | this_basic_block); |
4512 | SET_INSN_DELETED (i2)set_insn_deleted (i2);; |
4513 | } |
4514 | |
4515 | if (i1) |
4516 | { |
4517 | LOG_LINKS (i1)(uid_log_links[insn_uid_check (i1)]) = NULLnullptr; |
4518 | REG_NOTES (i1)(((i1)->u.fld[6]).rt_rtx) = 0; |
4519 | if (MAY_HAVE_DEBUG_BIND_INSNSglobal_options.x_flag_var_tracking_assignments) |
4520 | propagate_for_debug (i1, last_combined_insn, i1dest, i1src, |
4521 | this_basic_block); |
4522 | SET_INSN_DELETED (i1)set_insn_deleted (i1);; |
4523 | } |
4524 | |
4525 | if (i0) |
4526 | { |
4527 | LOG_LINKS (i0)(uid_log_links[insn_uid_check (i0)]) = NULLnullptr; |
4528 | REG_NOTES (i0)(((i0)->u.fld[6]).rt_rtx) = 0; |
4529 | if (MAY_HAVE_DEBUG_BIND_INSNSglobal_options.x_flag_var_tracking_assignments) |
4530 | propagate_for_debug (i0, last_combined_insn, i0dest, i0src, |
4531 | this_basic_block); |
4532 | SET_INSN_DELETED (i0)set_insn_deleted (i0);; |
4533 | } |
4534 | |
4535 | /* Get death notes for everything that is now used in either I3 or |
4536 | I2 and used to die in a previous insn. If we built two new |
4537 | patterns, move from I1 to I2 then I2 to I3 so that we get the |
4538 | proper movement on registers that I2 modifies. */ |
4539 | |
4540 | if (i0) |
4541 | from_luid = DF_INSN_LUID (i0)((((df->insns[(INSN_UID (i0))]))->luid)); |
4542 | else if (i1) |
4543 | from_luid = DF_INSN_LUID (i1)((((df->insns[(INSN_UID (i1))]))->luid)); |
4544 | else |
4545 | from_luid = DF_INSN_LUID (i2)((((df->insns[(INSN_UID (i2))]))->luid)); |
4546 | if (newi2pat) |
4547 | move_deaths (newi2pat, NULL_RTX(rtx) 0, from_luid, i2, &midnotes); |
4548 | move_deaths (newpat, newi2pat, from_luid, i3, &midnotes); |
4549 | |
4550 | /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */ |
4551 | if (i3notes) |
4552 | distribute_notes (i3notes, i3, i3, newi2pat ? i2 : NULLnullptr, |
4553 | elim_i2, elim_i1, elim_i0); |
4554 | if (i2notes) |
4555 | distribute_notes (i2notes, i2, i3, newi2pat ? i2 : NULLnullptr, |
4556 | elim_i2, elim_i1, elim_i0); |
4557 | if (i1notes) |
4558 | distribute_notes (i1notes, i1, i3, newi2pat ? i2 : NULLnullptr, |
4559 | elim_i2, local_elim_i1, local_elim_i0); |
4560 | if (i0notes) |
4561 | distribute_notes (i0notes, i0, i3, newi2pat ? i2 : NULLnullptr, |
4562 | elim_i2, elim_i1, local_elim_i0); |
4563 | if (midnotes) |
4564 | distribute_notes (midnotes, NULLnullptr, i3, newi2pat ? i2 : NULLnullptr, |
4565 | elim_i2, elim_i1, elim_i0); |
4566 | |
4567 | /* Distribute any notes added to I2 or I3 by recog_for_combine. We |
4568 | know these are REG_UNUSED and want them to go to the desired insn, |
4569 | so we always pass it as i3. */ |
4570 | |
4571 | if (newi2pat && new_i2_notes) |
4572 | distribute_notes (new_i2_notes, i2, i2, NULLnullptr, NULL_RTX(rtx) 0, NULL_RTX(rtx) 0, |
4573 | NULL_RTX(rtx) 0); |
4574 | |
4575 | if (new_i3_notes) |
4576 | distribute_notes (new_i3_notes, i3, i3, NULLnullptr, NULL_RTX(rtx) 0, NULL_RTX(rtx) 0, |
4577 | NULL_RTX(rtx) 0); |
4578 | |
4579 | /* If I3DEST was used in I3SRC, it really died in I3. We may need to |
4580 | put a REG_DEAD note for it somewhere. If NEWI2PAT exists and sets |
4581 | I3DEST, the death must be somewhere before I2, not I3. If we passed I3 |
4582 | in that case, it might delete I2. Similarly for I2 and I1. |
4583 | Show an additional death due to the REG_DEAD note we make here. If |
4584 | we discard it in distribute_notes, we will decrement it again. */ |
4585 | |
4586 | if (i3dest_killed) |
4587 | { |
4588 | rtx new_note = alloc_reg_note (REG_DEAD, i3dest_killed, NULL_RTX(rtx) 0); |
4589 | if (newi2pat && reg_set_p (i3dest_killed, newi2pat)) |
4590 | distribute_notes (new_note, NULLnullptr, i2, NULLnullptr, elim_i2, |
4591 | elim_i1, elim_i0); |
4592 | else |
4593 | distribute_notes (new_note, NULLnullptr, i3, newi2pat ? i2 : NULLnullptr, |
4594 | elim_i2, elim_i1, elim_i0); |
4595 | } |
4596 | |
4597 | if (i2dest_in_i2src) |
4598 | { |
4599 | rtx new_note = alloc_reg_note (REG_DEAD, i2dest, NULL_RTX(rtx) 0); |
4600 | if (newi2pat && reg_set_p (i2dest, newi2pat)) |
4601 | distribute_notes (new_note, NULLnullptr, i2, NULLnullptr, NULL_RTX(rtx) 0, |
4602 | NULL_RTX(rtx) 0, NULL_RTX(rtx) 0); |
4603 | else |
4604 | distribute_notes (new_note, NULLnullptr, i3, newi2pat ? i2 : NULLnullptr, |
4605 | NULL_RTX(rtx) 0, NULL_RTX(rtx) 0, NULL_RTX(rtx) 0); |
4606 | } |
4607 | |
4608 | if (i1dest_in_i1src) |
4609 | { |
4610 | rtx new_note = alloc_reg_note (REG_DEAD, i1dest, NULL_RTX(rtx) 0); |
4611 | if (newi2pat && reg_set_p (i1dest, newi2pat)) |
4612 | distribute_notes (new_note, NULLnullptr, i2, NULLnullptr, NULL_RTX(rtx) 0, |
4613 | NULL_RTX(rtx) 0, NULL_RTX(rtx) 0); |
4614 | else |
4615 | distribute_notes (new_note, NULLnullptr, i3, newi2pat ? i2 : NULLnullptr, |
4616 | NULL_RTX(rtx) 0, NULL_RTX(rtx) 0, NULL_RTX(rtx) 0); |
4617 | } |
4618 | |
4619 | if (i0dest_in_i0src) |
4620 | { |
4621 | rtx new_note = alloc_reg_note (REG_DEAD, i0dest, NULL_RTX(rtx) 0); |
4622 | if (newi2pat && reg_set_p (i0dest, newi2pat)) |
4623 | distribute_notes (new_note, NULLnullptr, i2, NULLnullptr, NULL_RTX(rtx) 0, |
4624 | NULL_RTX(rtx) 0, NULL_RTX(rtx) 0); |
4625 | else |
4626 | distribute_notes (new_note, NULLnullptr, i3, newi2pat ? i2 : NULLnullptr, |
4627 | NULL_RTX(rtx) 0, NULL_RTX(rtx) 0, NULL_RTX(rtx) 0); |
4628 | } |
4629 | |
4630 | distribute_links (i3links); |
4631 | distribute_links (i2links); |
4632 | distribute_links (i1links); |
4633 | distribute_links (i0links); |
4634 | |
4635 | if (REG_P (i2dest)(((enum rtx_code) (i2dest)->code) == REG)) |
4636 | { |
4637 | struct insn_link *link; |
4638 | rtx_insn *i2_insn = 0; |
4639 | rtx i2_val = 0, set; |
4640 | |
4641 | /* The insn that used to set this register doesn't exist, and |
4642 | this life of the register may not exist either. See if one of |
4643 | I3's links points to an insn that sets I2DEST. If it does, |
4644 | that is now the last known value for I2DEST. If we don't update |
4645 | this and I2 set the register to a value that depended on its old |
4646 | contents, we will get confused. If this insn is used, thing |
4647 | will be set correctly in combine_instructions. */ |
4648 | FOR_EACH_LOG_LINK (link, i3)for ((link) = (uid_log_links[insn_uid_check (i3)]); (link); ( link) = (link)->next) |
4649 | if ((set = single_set (link->insn)) != 0 |
4650 | && rtx_equal_p (i2dest, SET_DEST (set)(((set)->u.fld[0]).rt_rtx))) |
4651 | i2_insn = link->insn, i2_val = SET_SRC (set)(((set)->u.fld[1]).rt_rtx); |
4652 | |
4653 | record_value_for_reg (i2dest, i2_insn, i2_val); |
4654 | |
4655 | /* If the reg formerly set in I2 died only once and that was in I3, |
4656 | zero its use count so it won't make `reload' do any work. */ |
4657 | if (! added_sets_2 |
4658 | && (newi2pat == 0 || ! reg_mentioned_p (i2dest, newi2pat)) |
4659 | && ! i2dest_in_i2src |
4660 | && REGNO (i2dest)(rhs_regno(i2dest)) < reg_n_sets_max) |
4661 | INC_REG_N_SETS (REGNO (i2dest), -1)(regstat_n_sets_and_refs[(rhs_regno(i2dest))].sets += -1); |
4662 | } |
4663 | |
4664 | if (i1 && REG_P (i1dest)(((enum rtx_code) (i1dest)->code) == REG)) |
4665 | { |
4666 | struct insn_link *link; |
4667 | rtx_insn *i1_insn = 0; |
4668 | rtx i1_val = 0, set; |
4669 | |
4670 | FOR_EACH_LOG_LINK (link, i3)for ((link) = (uid_log_links[insn_uid_check (i3)]); (link); ( link) = (link)->next) |
4671 | if ((set = single_set (link->insn)) != 0 |
4672 | && rtx_equal_p (i1dest, SET_DEST (set)(((set)->u.fld[0]).rt_rtx))) |
4673 | i1_insn = link->insn, i1_val = SET_SRC (set)(((set)->u.fld[1]).rt_rtx); |
4674 | |
4675 | record_value_for_reg (i1dest, i1_insn, i1_val); |
4676 | |
4677 | if (! added_sets_1 |
4678 | && ! i1dest_in_i1src |
4679 | && REGNO (i1dest)(rhs_regno(i1dest)) < reg_n_sets_max) |
4680 | INC_REG_N_SETS (REGNO (i1dest), -1)(regstat_n_sets_and_refs[(rhs_regno(i1dest))].sets += -1); |
4681 | } |
4682 | |
4683 | if (i0 && REG_P (i0dest)(((enum rtx_code) (i0dest)->code) == REG)) |
4684 | { |
4685 | struct insn_link *link; |
4686 | rtx_insn *i0_insn = 0; |
4687 | rtx i0_val = 0, set; |
4688 | |
4689 | FOR_EACH_LOG_LINK (link, i3)for ((link) = (uid_log_links[insn_uid_check (i3)]); (link); ( link) = (link)->next) |
4690 | if ((set = single_set (link->insn)) != 0 |
4691 | && rtx_equal_p (i0dest, SET_DEST (set)(((set)->u.fld[0]).rt_rtx))) |
4692 | i0_insn = link->insn, i0_val = SET_SRC (set)(((set)->u.fld[1]).rt_rtx); |
4693 | |
4694 | record_value_for_reg (i0dest, i0_insn, i0_val); |
4695 | |
4696 | if (! added_sets_0 |
4697 | && ! i0dest_in_i0src |
4698 | && REGNO (i0dest)(rhs_regno(i0dest)) < reg_n_sets_max) |
4699 | INC_REG_N_SETS (REGNO (i0dest), -1)(regstat_n_sets_and_refs[(rhs_regno(i0dest))].sets += -1); |
4700 | } |
4701 | |
4702 | /* Update reg_stat[].nonzero_bits et al for any changes that may have |
4703 | been made to this insn. The order is important, because newi2pat |
4704 | can affect nonzero_bits of newpat. */ |
4705 | if (newi2pat) |
4706 | note_pattern_stores (newi2pat, set_nonzero_bits_and_sign_copies, NULLnullptr); |
4707 | note_pattern_stores (newpat, set_nonzero_bits_and_sign_copies, NULLnullptr); |
4708 | } |
4709 | |
4710 | if (undobuf.other_insn != NULL_RTX(rtx) 0) |
4711 | { |
4712 | if (dump_file) |
4713 | { |
4714 | fprintf (dump_file, "modifying other_insn "); |
4715 | dump_insn_slim (dump_file, undobuf.other_insn); |
4716 | } |
4717 | df_insn_rescan (undobuf.other_insn); |
4718 | } |
4719 | |
4720 | if (i0 && !(NOTE_P (i0)(((enum rtx_code) (i0)->code) == NOTE) && (NOTE_KIND (i0)(((i0)->u.fld[4]).rt_int) == NOTE_INSN_DELETED))) |
4721 | { |
4722 | if (dump_file) |
4723 | { |
4724 | fprintf (dump_file, "modifying insn i0 "); |
4725 | dump_insn_slim (dump_file, i0); |
4726 | } |
4727 | df_insn_rescan (i0); |
4728 | } |
4729 | |
4730 | if (i1 && !(NOTE_P (i1)(((enum rtx_code) (i1)->code) == NOTE) && (NOTE_KIND (i1)(((i1)->u.fld[4]).rt_int) == NOTE_INSN_DELETED))) |
4731 | { |
4732 | if (dump_file) |
4733 | { |
4734 | fprintf (dump_file, "modifying insn i1 "); |
4735 | dump_insn_slim (dump_file, i1); |
4736 | } |
4737 | df_insn_rescan (i1); |
4738 | } |
4739 | |
4740 | if (i2 && !(NOTE_P (i2)(((enum rtx_code) (i2)->code) == NOTE) && (NOTE_KIND (i2)(((i2)->u.fld[4]).rt_int) == NOTE_INSN_DELETED))) |
4741 | { |
4742 | if (dump_file) |
4743 | { |
4744 | fprintf (dump_file, "modifying insn i2 "); |
4745 | dump_insn_slim (dump_file, i2); |
4746 | } |
4747 | df_insn_rescan (i2); |
4748 | } |
4749 | |
4750 | if (i3 && !(NOTE_P (i3)(((enum rtx_code) (i3)->code) == NOTE) && (NOTE_KIND (i3)(((i3)->u.fld[4]).rt_int) == NOTE_INSN_DELETED))) |
4751 | { |
4752 | if (dump_file) |
4753 | { |
4754 | fprintf (dump_file, "modifying insn i3 "); |
4755 | dump_insn_slim (dump_file, i3); |
4756 | } |
4757 | df_insn_rescan (i3); |
4758 | } |
4759 | |
4760 | /* Set new_direct_jump_p if a new return or simple jump instruction |
4761 | has been created. Adjust the CFG accordingly. */ |
4762 | if (returnjump_p (i3) || any_uncondjump_p (i3)) |
4763 | { |
4764 | *new_direct_jump_p = 1; |
4765 | mark_jump_label (PATTERN (i3), i3, 0); |
4766 | update_cfg_for_uncondjump (i3); |
4767 | } |
4768 | |
4769 | if (undobuf.other_insn != NULL_RTX(rtx) 0 |
4770 | && (returnjump_p (undobuf.other_insn) |
4771 | || any_uncondjump_p (undobuf.other_insn))) |
4772 | { |
4773 | *new_direct_jump_p = 1; |
4774 | update_cfg_for_uncondjump (undobuf.other_insn); |
4775 | } |
4776 | |
4777 | if (GET_CODE (PATTERN (i3))((enum rtx_code) (PATTERN (i3))->code) == TRAP_IF |
4778 | && XEXP (PATTERN (i3), 0)(((PATTERN (i3))->u.fld[0]).rt_rtx) == const1_rtx(const_int_rtx[64 +1])) |
4779 | { |
4780 | basic_block bb = BLOCK_FOR_INSN (i3); |
4781 | gcc_assert (bb)((void)(!(bb) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/combine.c" , 4781, __FUNCTION__), 0 : 0)); |
4782 | remove_edge (split_block (bb, i3)); |
4783 | emit_barrier_after_bb (bb); |
4784 | *new_direct_jump_p = 1; |
4785 | } |
4786 | |
4787 | if (undobuf.other_insn |
4788 | && GET_CODE (PATTERN (undobuf.other_insn))((enum rtx_code) (PATTERN (undobuf.other_insn))->code) == TRAP_IF |
4789 | && XEXP (PATTERN (undobuf.other_insn), 0)(((PATTERN (undobuf.other_insn))->u.fld[0]).rt_rtx) == const1_rtx(const_int_rtx[64 +1])) |
4790 | { |
4791 | basic_block bb = BLOCK_FOR_INSN (undobuf.other_insn); |
4792 | gcc_assert (bb)((void)(!(bb) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/combine.c" , 4792, __FUNCTION__), 0 : 0)); |
4793 | remove_edge (split_block (bb, undobuf.other_insn)); |
4794 | emit_barrier_after_bb (bb); |
4795 | *new_direct_jump_p = 1; |
4796 | } |
4797 | |
4798 | /* A noop might also need cleaning up of CFG, if it comes from the |
4799 | simplification of a jump. */ |
4800 | if (JUMP_P (i3)(((enum rtx_code) (i3)->code) == JUMP_INSN) |
4801 | && GET_CODE (newpat)((enum rtx_code) (newpat)->code) == SET |
4802 | && SET_SRC (newpat)(((newpat)->u.fld[1]).rt_rtx) == pc_rtx |
4803 | && SET_DEST (newpat)(((newpat)->u.fld[0]).rt_rtx) == pc_rtx) |
4804 | { |
4805 | *new_direct_jump_p = 1; |
4806 | update_cfg_for_uncondjump (i3); |
4807 | } |
4808 | |
4809 | if (undobuf.other_insn != NULL_RTX(rtx) 0 |
4810 | && JUMP_P (undobuf.other_insn)(((enum rtx_code) (undobuf.other_insn)->code) == JUMP_INSN ) |
4811 | && GET_CODE (PATTERN (undobuf.other_insn))((enum rtx_code) (PATTERN (undobuf.other_insn))->code) == SET |
4812 | && SET_SRC (PATTERN (undobuf.other_insn))(((PATTERN (undobuf.other_insn))->u.fld[1]).rt_rtx) == pc_rtx |
4813 | && SET_DEST (PATTERN (undobuf.other_insn))(((PATTERN (undobuf.other_insn))->u.fld[0]).rt_rtx) == pc_rtx) |
4814 | { |
4815 | *new_direct_jump_p = 1; |
4816 | update_cfg_for_uncondjump (undobuf.other_insn); |
4817 | } |
4818 | |
4819 | combine_successes++; |
4820 | undo_commit (); |
4821 | |
4822 | rtx_insn *ret = newi2pat ? i2 : i3; |
4823 | if (added_links_insn && DF_INSN_LUID (added_links_insn)((((df->insns[(INSN_UID (added_links_insn))]))->luid)) < DF_INSN_LUID (ret)((((df->insns[(INSN_UID (ret))]))->luid))) |
4824 | ret = added_links_insn; |
4825 | if (added_notes_insn && DF_INSN_LUID (added_notes_insn)((((df->insns[(INSN_UID (added_notes_insn))]))->luid)) < DF_INSN_LUID (ret)((((df->insns[(INSN_UID (ret))]))->luid))) |
4826 | ret = added_notes_insn; |
4827 | |
4828 | return ret; |
4829 | } |
4830 | |
4831 | /* Get a marker for undoing to the current state. */ |
4832 | |
4833 | static void * |
4834 | get_undo_marker (void) |
4835 | { |
4836 | return undobuf.undos; |
4837 | } |
4838 | |
4839 | /* Undo the modifications up to the marker. */ |
4840 | |
4841 | static void |
4842 | undo_to_marker (void *marker) |
4843 | { |
4844 | struct undo *undo, *next; |
4845 | |
4846 | for (undo = undobuf.undos; undo != marker; undo = next) |
4847 | { |
4848 | gcc_assert (undo)((void)(!(undo) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/combine.c" , 4848, __FUNCTION__), 0 : 0)); |
4849 | |
4850 | next = undo->next; |
4851 | switch (undo->kind) |
4852 | { |
4853 | case UNDO_RTX: |
4854 | *undo->where.r = undo->old_contents.r; |
4855 | break; |
4856 | case UNDO_INT: |
4857 | *undo->where.i = undo->old_contents.i; |
4858 | break; |
4859 | case UNDO_MODE: |
4860 | adjust_reg_mode (*undo->where.r, undo->old_contents.m); |
4861 | break; |
4862 | case UNDO_LINKS: |
4863 | *undo->where.l = undo->old_contents.l; |
4864 | break; |
4865 | default: |
4866 | gcc_unreachable ()(fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/combine.c" , 4866, __FUNCTION__)); |
4867 | } |
4868 | |
4869 | undo->next = undobuf.frees; |
4870 | undobuf.frees = undo; |
4871 | } |
4872 | |
4873 | undobuf.undos = (struct undo *) marker; |
4874 | } |
4875 | |
4876 | /* Undo all the modifications recorded in undobuf. */ |
4877 | |
4878 | static void |
4879 | undo_all (void) |
4880 | { |
4881 | undo_to_marker (0); |
4882 | } |
4883 | |
4884 | /* We've committed to accepting the changes we made. Move all |
4885 | of the undos to the free list. */ |
4886 | |
4887 | static void |
4888 | undo_commit (void) |
4889 | { |
4890 | struct undo *undo, *next; |
4891 | |
4892 | for (undo = undobuf.undos; undo; undo = next) |
4893 | { |
4894 | next = undo->next; |
4895 | undo->next = undobuf.frees; |
4896 | undobuf.frees = undo; |
4897 | } |
4898 | undobuf.undos = 0; |
4899 | } |
4900 | |
4901 | /* Find the innermost point within the rtx at LOC, possibly LOC itself, |
4902 | where we have an arithmetic expression and return that point. LOC will |
4903 | be inside INSN. |
4904 | |
4905 | try_combine will call this function to see if an insn can be split into |
4906 | two insns. */ |
4907 | |
4908 | static rtx * |
4909 | find_split_point (rtx *loc, rtx_insn *insn, bool set_src) |
4910 | { |
4911 | rtx x = *loc; |
4912 | enum rtx_code code = GET_CODE (x)((enum rtx_code) (x)->code); |
4913 | rtx *split; |
4914 | unsigned HOST_WIDE_INTlong len = 0; |
4915 | HOST_WIDE_INTlong pos = 0; |
4916 | int unsignedp = 0; |
4917 | rtx inner = NULL_RTX(rtx) 0; |
4918 | scalar_int_mode mode, inner_mode; |
4919 | |
4920 | /* First special-case some codes. */ |
4921 | switch (code) |
4922 | { |
4923 | case SUBREG: |
4924 | #ifdef INSN_SCHEDULING |
4925 | /* If we are making a paradoxical SUBREG invalid, it becomes a split |
4926 | point. */ |
4927 | if (MEM_P (SUBREG_REG (x))(((enum rtx_code) ((((x)->u.fld[0]).rt_rtx))->code) == MEM )) |
4928 | return loc; |
4929 | #endif |
4930 | return find_split_point (&SUBREG_REG (x)(((x)->u.fld[0]).rt_rtx), insn, false); |
4931 | |
4932 | case MEM: |
4933 | /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it |
4934 | using LO_SUM and HIGH. */ |
4935 | if (HAVE_lo_sum0 && (GET_CODE (XEXP (x, 0))((enum rtx_code) ((((x)->u.fld[0]).rt_rtx))->code) == CONST |
4936 | || GET_CODE (XEXP (x, 0))((enum rtx_code) ((((x)->u.fld[0]).rt_rtx))->code) == SYMBOL_REF)) |
4937 | { |
4938 | machine_mode address_mode = get_address_mode (x); |
4939 | |
4940 | SUBST (XEXP (x, 0),do_SUBST (&((((x)->u.fld[0]).rt_rtx)), (gen_rtx_fmt_ee_stat ((LO_SUM), ((address_mode)), ((gen_rtx_fmt_e_stat ((HIGH), ( (address_mode)), (((((x)->u.fld[0]).rt_rtx))) ))), (((((x) ->u.fld[0]).rt_rtx))) ))) |
4941 | gen_rtx_LO_SUM (address_mode,do_SUBST (&((((x)->u.fld[0]).rt_rtx)), (gen_rtx_fmt_ee_stat ((LO_SUM), ((address_mode)), ((gen_rtx_fmt_e_stat ((HIGH), ( (address_mode)), (((((x)->u.fld[0]).rt_rtx))) ))), (((((x) ->u.fld[0]).rt_rtx))) ))) |
4942 | gen_rtx_HIGH (address_mode, XEXP (x, 0)),do_SUBST (&((((x)->u.fld[0]).rt_rtx)), (gen_rtx_fmt_ee_stat ((LO_SUM), ((address_mode)), ((gen_rtx_fmt_e_stat ((HIGH), ( (address_mode)), (((((x)->u.fld[0]).rt_rtx))) ))), (((((x) ->u.fld[0]).rt_rtx))) ))) |
4943 | XEXP (x, 0)))do_SUBST (&((((x)->u.fld[0]).rt_rtx)), (gen_rtx_fmt_ee_stat ((LO_SUM), ((address_mode)), ((gen_rtx_fmt_e_stat ((HIGH), ( (address_mode)), (((((x)->u.fld[0]).rt_rtx))) ))), (((((x) ->u.fld[0]).rt_rtx))) ))); |
4944 | return &XEXP (XEXP (x, 0), 0)((((((x)->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx); |
4945 | } |
4946 | |
4947 | /* If we have a PLUS whose second operand is a constant and the |
4948 | address is not valid, perhaps we can split it up using |
4949 | the machine-specific way to split large constants. We use |
4950 | the first pseudo-reg (one of the virtual regs) as a placeholder; |
4951 | it will not remain in the result. */ |
4952 | if (GET_CODE (XEXP (x, 0))((enum rtx_code) ((((x)->u.fld[0]).rt_rtx))->code) == PLUS |
4953 | && CONST_INT_P (XEXP (XEXP (x, 0), 1))(((enum rtx_code) (((((((x)->u.fld[0]).rt_rtx))->u.fld[ 1]).rt_rtx))->code) == CONST_INT) |
4954 | && ! memory_address_addr_space_p (GET_MODE (x)((machine_mode) (x)->mode), XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), |
4955 | MEM_ADDR_SPACE (x)(get_mem_attrs (x)->addrspace))) |
4956 | { |
4957 | rtx reg = regno_reg_rtx[FIRST_PSEUDO_REGISTER76]; |
4958 | rtx_insn *seq = combine_split_insns (gen_rtx_SET (reg, XEXP (x, 0))gen_rtx_fmt_ee_stat ((SET), (((void) 0, E_VOIDmode)), ((reg)) , (((((x)->u.fld[0]).rt_rtx))) ), |
4959 | subst_insn); |
4960 | |
4961 | /* This should have produced two insns, each of which sets our |
4962 | placeholder. If the source of the second is a valid address, |
4963 | we can put both sources together and make a split point |
4964 | in the middle. */ |
4965 | |
4966 | if (seq |
4967 | && NEXT_INSN (seq) != NULL_RTX(rtx) 0 |
4968 | && NEXT_INSN (NEXT_INSN (seq)) == NULL_RTX(rtx) 0 |
4969 | && NONJUMP_INSN_P (seq)(((enum rtx_code) (seq)->code) == INSN) |
4970 | && GET_CODE (PATTERN (seq))((enum rtx_code) (PATTERN (seq))->code) == SET |
4971 | && SET_DEST (PATTERN (seq))(((PATTERN (seq))->u.fld[0]).rt_rtx) == reg |
4972 | && ! reg_mentioned_p (reg, |
4973 | SET_SRC (PATTERN (seq))(((PATTERN (seq))->u.fld[1]).rt_rtx)) |
4974 | && NONJUMP_INSN_P (NEXT_INSN (seq))(((enum rtx_code) (NEXT_INSN (seq))->code) == INSN) |
4975 | && GET_CODE (PATTERN (NEXT_INSN (seq)))((enum rtx_code) (PATTERN (NEXT_INSN (seq)))->code) == SET |
4976 | && SET_DEST (PATTERN (NEXT_INSN (seq)))(((PATTERN (NEXT_INSN (seq)))->u.fld[0]).rt_rtx) == reg |
4977 | && memory_address_addr_space_p |
4978 | (GET_MODE (x)((machine_mode) (x)->mode), SET_SRC (PATTERN (NEXT_INSN (seq)))(((PATTERN (NEXT_INSN (seq)))->u.fld[1]).rt_rtx), |
4979 | MEM_ADDR_SPACE (x)(get_mem_attrs (x)->addrspace))) |
4980 | { |
4981 | rtx src1 = SET_SRC (PATTERN (seq))(((PATTERN (seq))->u.fld[1]).rt_rtx); |
4982 | rtx src2 = SET_SRC (PATTERN (NEXT_INSN (seq)))(((PATTERN (NEXT_INSN (seq)))->u.fld[1]).rt_rtx); |
4983 | |
4984 | /* Replace the placeholder in SRC2 with SRC1. If we can |
4985 | find where in SRC2 it was placed, that can become our |
4986 | split point and we can replace this address with SRC2. |
4987 | Just try two obvious places. */ |
4988 | |
4989 | src2 = replace_rtx (src2, reg, src1); |
4990 | split = 0; |
4991 | if (XEXP (src2, 0)(((src2)->u.fld[0]).rt_rtx) == src1) |
4992 | split = &XEXP (src2, 0)(((src2)->u.fld[0]).rt_rtx); |
4993 | else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2, 0)))(rtx_format[(int) (((enum rtx_code) ((((src2)->u.fld[0]).rt_rtx ))->code))])[0] == 'e' |
4994 | && XEXP (XEXP (src2, 0), 0)((((((src2)->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx) == src1) |
4995 | split = &XEXP (XEXP (src2, 0), 0)((((((src2)->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx); |
4996 | |
4997 | if (split) |
4998 | { |
4999 | SUBST (XEXP (x, 0), src2)do_SUBST (&((((x)->u.fld[0]).rt_rtx)), (src2)); |
5000 | return split; |
5001 | } |
5002 | } |
5003 | |
5004 | /* If that didn't work and we have a nested plus, like: |
5005 | ((REG1 * CONST1) + REG2) + CONST2 and (REG1 + REG2) + CONST2 |
5006 | is valid address, try to split (REG1 * CONST1). */ |
5007 | if (GET_CODE (XEXP (XEXP (x, 0), 0))((enum rtx_code) (((((((x)->u.fld[0]).rt_rtx))->u.fld[0 ]).rt_rtx))->code) == PLUS |
5008 | && !OBJECT_P (XEXP (XEXP (XEXP (x, 0), 0), 0))(((rtx_class[(int) (((enum rtx_code) ((((((((((x)->u.fld[0 ]).rt_rtx))->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx))-> code))]) & (~1)) == (RTX_OBJ & (~1))) |
5009 | && OBJECT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))(((rtx_class[(int) (((enum rtx_code) ((((((((((x)->u.fld[0 ]).rt_rtx))->u.fld[0]).rt_rtx))->u.fld[1]).rt_rtx))-> code))]) & (~1)) == (RTX_OBJ & (~1))) |
5010 | && ! (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))((enum rtx_code) ((((((((((x)->u.fld[0]).rt_rtx))->u.fld [0]).rt_rtx))->u.fld[0]).rt_rtx))->code) == SUBREG |
5011 | && OBJECT_P (SUBREG_REG (XEXP (XEXP (XEXP (x, 0),(((rtx_class[(int) (((enum rtx_code) (((((((((((((x)->u.fld [0]).rt_rtx))->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx))-> u.fld[0]).rt_rtx))->code))]) & (~1)) == (RTX_OBJ & (~1))) |
5012 | 0), 0)))(((rtx_class[(int) (((enum rtx_code) (((((((((((((x)->u.fld [0]).rt_rtx))->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx))-> u.fld[0]).rt_rtx))->code))]) & (~1)) == (RTX_OBJ & (~1))))) |
5013 | { |
5014 | rtx tem = XEXP (XEXP (XEXP (x, 0), 0), 0)(((((((((x)->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx))-> u.fld[0]).rt_rtx); |
5015 | XEXP (XEXP (XEXP (x, 0), 0), 0)(((((((((x)->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx))-> u.fld[0]).rt_rtx) = reg; |
5016 | if (memory_address_addr_space_p (GET_MODE (x)((machine_mode) (x)->mode), XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), |
5017 | MEM_ADDR_SPACE (x)(get_mem_attrs (x)->addrspace))) |
5018 | { |
5019 | XEXP (XEXP (XEXP (x, 0), 0), 0)(((((((((x)->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx))-> u.fld[0]).rt_rtx) = tem; |
5020 | return &XEXP (XEXP (XEXP (x, 0), 0), 0)(((((((((x)->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx))-> u.fld[0]).rt_rtx); |
5021 | } |
5022 | XEXP (XEXP (XEXP (x, 0), 0), 0)(((((((((x)->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx))-> u.fld[0]).rt_rtx) = tem; |
5023 | } |
5024 | else if (GET_CODE (XEXP (XEXP (x, 0), 0))((enum rtx_code) (((((((x)->u.fld[0]).rt_rtx))->u.fld[0 ]).rt_rtx))->code) == PLUS |
5025 | && OBJECT_P (XEXP (XEXP (XEXP (x, 0), 0), 0))(((rtx_class[(int) (((enum rtx_code) ((((((((((x)->u.fld[0 ]).rt_rtx))->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx))-> code))]) & (~1)) == (RTX_OBJ & (~1))) |
5026 | && !OBJECT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))(((rtx_class[(int) (((enum rtx_code) ((((((((((x)->u.fld[0 ]).rt_rtx))->u.fld[0]).rt_rtx))->u.fld[1]).rt_rtx))-> code))]) & (~1)) == (RTX_OBJ & (~1))) |
5027 | && ! (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1))((enum rtx_code) ((((((((((x)->u.fld[0]).rt_rtx))->u.fld [0]).rt_rtx))->u.fld[1]).rt_rtx))->code) == SUBREG |
5028 | && OBJECT_P (SUBREG_REG (XEXP (XEXP (XEXP (x, 0),(((rtx_class[(int) (((enum rtx_code) (((((((((((((x)->u.fld [0]).rt_rtx))->u.fld[0]).rt_rtx))->u.fld[1]).rt_rtx))-> u.fld[0]).rt_rtx))->code))]) & (~1)) == (RTX_OBJ & (~1))) |
5029 | 0), 1)))(((rtx_class[(int) (((enum rtx_code) (((((((((((((x)->u.fld [0]).rt_rtx))->u.fld[0]).rt_rtx))->u.fld[1]).rt_rtx))-> u.fld[0]).rt_rtx))->code))]) & (~1)) == (RTX_OBJ & (~1))))) |
5030 | { |
5031 | rtx tem = XEXP (XEXP (XEXP (x, 0), 0), 1)(((((((((x)->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx))-> u.fld[1]).rt_rtx); |
5032 | XEXP (XEXP (XEXP (x, 0), 0), 1)(((((((((x)->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx))-> u.fld[1]).rt_rtx) = reg; |
5033 | if (memory_address_addr_space_p (GET_MODE (x)((machine_mode) (x)->mode), XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), |
5034 | MEM_ADDR_SPACE (x)(get_mem_attrs (x)->addrspace))) |
5035 | { |
5036 | XEXP (XEXP (XEXP (x, 0), 0), 1)(((((((((x)->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx))-> u.fld[1]).rt_rtx) = tem; |
5037 | return &XEXP (XEXP (XEXP (x, 0), 0), 1)(((((((((x)->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx))-> u.fld[1]).rt_rtx); |
5038 | } |
5039 | XEXP (XEXP (XEXP (x, 0), 0), 1)(((((((((x)->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx))-> u.fld[1]).rt_rtx) = tem; |
5040 | } |
5041 | |
5042 | /* If that didn't work, perhaps the first operand is complex and |
5043 | needs to be computed separately, so make a split point there. |
5044 | This will occur on machines that just support REG + CONST |
5045 | and have a constant moved through some previous computation. */ |
5046 | if (!OBJECT_P (XEXP (XEXP (x, 0), 0))(((rtx_class[(int) (((enum rtx_code) (((((((x)->u.fld[0]). rt_rtx))->u.fld[0]).rt_rtx))->code))]) & (~1)) == ( RTX_OBJ & (~1))) |
5047 | && ! (GET_CODE (XEXP (XEXP (x, 0), 0))((enum rtx_code) (((((((x)->u.fld[0]).rt_rtx))->u.fld[0 ]).rt_rtx))->code) == SUBREG |
5048 | && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))(((rtx_class[(int) (((enum rtx_code) ((((((((((x)->u.fld[0 ]).rt_rtx))->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx))-> code))]) & (~1)) == (RTX_OBJ & (~1))))) |
5049 | return &XEXP (XEXP (x, 0), 0)((((((x)->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx); |
5050 | } |
5051 | |
5052 | /* If we have a PLUS whose first operand is complex, try computing it |
5053 | separately by making a split there. */ |
5054 | if (GET_CODE (XEXP (x, 0))((enum rtx_code) ((((x)->u.fld[0]).rt_rtx))->code) == PLUS |
5055 | && ! memory_address_addr_space_p (GET_MODE (x)((machine_mode) (x)->mode), XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), |
5056 | MEM_ADDR_SPACE (x)(get_mem_attrs (x)->addrspace)) |
5057 | && ! OBJECT_P (XEXP (XEXP (x, 0), 0))(((rtx_class[(int) (((enum rtx_code) (((((((x)->u.fld[0]). rt_rtx))->u.fld[0]).rt_rtx))->code))]) & (~1)) == ( RTX_OBJ & (~1))) |
5058 | && ! (GET_CODE (XEXP (XEXP (x, 0), 0))((enum rtx_code) (((((((x)->u.fld[0]).rt_rtx))->u.fld[0 ]).rt_rtx))->code) == SUBREG |
5059 | && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))(((rtx_class[(int) (((enum rtx_code) ((((((((((x)->u.fld[0 ]).rt_rtx))->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx))-> code))]) & (~1)) == (RTX_OBJ & (~1))))) |
5060 | return &XEXP (XEXP (x, 0), 0)((((((x)->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx); |
5061 | break; |
5062 | |
5063 | case SET: |
5064 | /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a |
5065 | ZERO_EXTRACT, the most likely reason why this doesn't match is that |
5066 | we need to put the operand into a register. So split at that |
5067 | point. */ |
5068 | |
5069 | if (SET_DEST (x)(((x)->u.fld[0]).rt_rtx) == cc0_rtx |
5070 | && GET_CODE (SET_SRC (x))((enum rtx_code) ((((x)->u.fld[1]).rt_rtx))->code) != COMPARE |
5071 | && GET_CODE (SET_SRC (x))((enum rtx_code) ((((x)->u.fld[1]).rt_rtx))->code) != ZERO_EXTRACT |
5072 | && !OBJECT_P (SET_SRC (x))(((rtx_class[(int) (((enum rtx_code) ((((x)->u.fld[1]).rt_rtx ))->code))]) & (~1)) == (RTX_OBJ & (~1))) |
5073 | && ! (GET_CODE (SET_SRC (x))((enum rtx_code) ((((x)->u.fld[1]).rt_rtx))->code) == SUBREG |
5074 | && OBJECT_P (SUBREG_REG (SET_SRC (x)))(((rtx_class[(int) (((enum rtx_code) (((((((x)->u.fld[1]). rt_rtx))->u.fld[0]).rt_rtx))->code))]) & (~1)) == ( RTX_OBJ & (~1))))) |
5075 | return &SET_SRC (x)(((x)->u.fld[1]).rt_rtx); |
5076 | |
5077 | /* See if we can split SET_SRC as it stands. */ |
5078 | split = find_split_point (&SET_SRC (x)(((x)->u.fld[1]).rt_rtx), insn, true); |
5079 | if (split && split != &SET_SRC (x)(((x)->u.fld[1]).rt_rtx)) |
5080 | return split; |
5081 | |
5082 | /* See if we can split SET_DEST as it stands. */ |
5083 | split = find_split_point (&SET_DEST (x)(((x)->u.fld[0]).rt_rtx), insn, false); |
5084 | if (split && split != &SET_DEST (x)(((x)->u.fld[0]).rt_rtx)) |
5085 | return split; |
5086 | |
5087 | /* See if this is a bitfield assignment with everything constant. If |
5088 | so, this is an IOR of an AND, so split it into that. */ |
5089 | if (GET_CODE (SET_DEST (x))((enum rtx_code) ((((x)->u.fld[0]).rt_rtx))->code) == ZERO_EXTRACT |
5090 | && is_a <scalar_int_mode> (GET_MODE (XEXP (SET_DEST (x), 0))((machine_mode) (((((((x)->u.fld[0]).rt_rtx))->u.fld[0] ).rt_rtx))->mode), |
5091 | &inner_mode) |
5092 | && HWI_COMPUTABLE_MODE_P (inner_mode) |
5093 | && CONST_INT_P (XEXP (SET_DEST (x), 1))(((enum rtx_code) (((((((x)->u.fld[0]).rt_rtx))->u.fld[ 1]).rt_rtx))->code) == CONST_INT) |
5094 | && CONST_INT_P (XEXP (SET_DEST (x), 2))(((enum rtx_code) (((((((x)->u.fld[0]).rt_rtx))->u.fld[ 2]).rt_rtx))->code) == CONST_INT) |
5095 | && CONST_INT_P (SET_SRC (x))(((enum rtx_code) ((((x)->u.fld[1]).rt_rtx))->code) == CONST_INT ) |
5096 | && ((INTVAL (XEXP (SET_DEST (x), 1))((((((((x)->u.fld[0]).rt_rtx))->u.fld[1]).rt_rtx))-> u.hwint[0]) |
5097 | + INTVAL (XEXP (SET_DEST (x), 2))((((((((x)->u.fld[0]).rt_rtx))->u.fld[2]).rt_rtx))-> u.hwint[0])) |
5098 | <= GET_MODE_PRECISION (inner_mode)) |
5099 | && ! side_effects_p (XEXP (SET_DEST (x), 0)((((((x)->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx))) |
5100 | { |
5101 | HOST_WIDE_INTlong pos = INTVAL (XEXP (SET_DEST (x), 2))((((((((x)->u.fld[0]).rt_rtx))->u.fld[2]).rt_rtx))-> u.hwint[0]); |
5102 | unsigned HOST_WIDE_INTlong len = INTVAL (XEXP (SET_DEST (x), 1))((((((((x)->u.fld[0]).rt_rtx))->u.fld[1]).rt_rtx))-> u.hwint[0]); |
5103 | rtx dest = XEXP (SET_DEST (x), 0)((((((x)->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx); |
5104 | unsigned HOST_WIDE_INTlong mask = (HOST_WIDE_INT_1U1UL << len) - 1; |
5105 | unsigned HOST_WIDE_INTlong src = INTVAL (SET_SRC (x))(((((x)->u.fld[1]).rt_rtx))->u.hwint[0]) & mask; |
5106 | rtx or_mask; |
5107 | |
5108 | if (BITS_BIG_ENDIAN0) |
5109 | pos = GET_MODE_PRECISION (inner_mode) - len - pos; |
5110 | |
5111 | or_mask = gen_int_mode (src << pos, inner_mode); |
5112 | if (src == mask) |
5113 | SUBST (SET_SRC (x),do_SUBST (&((((x)->u.fld[1]).rt_rtx)), (simplify_gen_binary (IOR, inner_mode, dest, or_mask))) |
5114 | simplify_gen_binary (IOR, inner_mode, dest, or_mask))do_SUBST (&((((x)->u.fld[1]).rt_rtx)), (simplify_gen_binary (IOR, inner_mode, dest, or_mask))); |
5115 | else |
5116 | { |
5117 | rtx negmask = gen_int_mode (~(mask << pos), inner_mode); |
5118 | SUBST (SET_SRC (x),do_SUBST (&((((x)->u.fld[1]).rt_rtx)), (simplify_gen_binary (IOR, inner_mode, simplify_gen_binary (AND, inner_mode, dest , negmask), or_mask))) |
5119 | simplify_gen_binary (IOR, inner_mode,do_SUBST (&((((x)->u.fld[1]).rt_rtx)), (simplify_gen_binary (IOR, inner_mode, simplify_gen_binary (AND, inner_mode, dest , negmask), or_mask))) |
5120 | simplify_gen_binary (AND, inner_mode,do_SUBST (&((((x)->u.fld[1]).rt_rtx)), (simplify_gen_binary (IOR, inner_mode, simplify_gen_binary (AND, inner_mode, dest , negmask), or_mask))) |
5121 | dest, negmask),do_SUBST (&((((x)->u.fld[1]).rt_rtx)), (simplify_gen_binary (IOR, inner_mode, simplify_gen_binary (AND, inner_mode, dest , negmask), or_mask))) |
5122 | or_mask))do_SUBST (&((((x)->u.fld[1]).rt_rtx)), (simplify_gen_binary (IOR, inner_mode, simplify_gen_binary (AND, inner_mode, dest , negmask), or_mask))); |
5123 | } |
5124 | |
5125 | SUBST (SET_DEST (x), dest)do_SUBST (&((((x)->u.fld[0]).rt_rtx)), (dest)); |
5126 | |
5127 | split = find_split_point (&SET_SRC (x)(((x)->u.fld[1]).rt_rtx), insn, true); |
5128 | if (split && split != &SET_SRC (x)(((x)->u.fld[1]).rt_rtx)) |
5129 | return split; |
5130 | } |
5131 | |
5132 | /* Otherwise, see if this is an operation that we can split into two. |
5133 | If so, try to split that. */ |
5134 | code = GET_CODE (SET_SRC (x))((enum rtx_code) ((((x)->u.fld[1]).rt_rtx))->code); |
5135 | |
5136 | switch (code) |
5137 | { |
5138 | case AND: |
5139 | /* If we are AND'ing with a large constant that is only a single |
5140 | bit and the result is only being used in a context where we |
5141 | need to know if it is zero or nonzero, replace it with a bit |
5142 | extraction. This will avoid the large constant, which might |
5143 | have taken more than one insn to make. If the constant were |
5144 | not a valid argument to the AND but took only one insn to make, |
5145 | this is no worse, but if it took more than one insn, it will |
5146 | be better. */ |
5147 | |
5148 | if (CONST_INT_P (XEXP (SET_SRC (x), 1))(((enum rtx_code) (((((((x)->u.fld[1]).rt_rtx))->u.fld[ 1]).rt_rtx))->code) == CONST_INT) |
5149 | && REG_P (XEXP (SET_SRC (x), 0))(((enum rtx_code) (((((((x)->u.fld[1]).rt_rtx))->u.fld[ 0]).rt_rtx))->code) == REG) |
5150 | && (pos = exact_log2 (UINTVAL (XEXP (SET_SRC (x), 1))((unsigned long) ((((((((x)->u.fld[1]).rt_rtx))->u.fld[ 1]).rt_rtx))->u.hwint[0])))) >= 7 |
5151 | && REG_P (SET_DEST (x))(((enum rtx_code) ((((x)->u.fld[0]).rt_rtx))->code) == REG ) |
5152 | && (split = find_single_use (SET_DEST (x)(((x)->u.fld[0]).rt_rtx), insn, NULLnullptr)) != 0 |
5153 | && (GET_CODE (*split)((enum rtx_code) (*split)->code) == EQ || GET_CODE (*split)((enum rtx_code) (*split)->code) == NE) |
5154 | && XEXP (*split, 0)(((*split)->u.fld[0]).rt_rtx) == SET_DEST (x)(((x)->u.fld[0]).rt_rtx) |
5155 | && XEXP (*split, 1)(((*split)->u.fld[1]).rt_rtx) == const0_rtx(const_int_rtx[64])) |
5156 | { |
5157 | rtx extraction = make_extraction (GET_MODE (SET_DEST (x))((machine_mode) ((((x)->u.fld[0]).rt_rtx))->mode), |
5158 | XEXP (SET_SRC (x), 0)((((((x)->u.fld[1]).rt_rtx))->u.fld[0]).rt_rtx), |
5159 | pos, NULL_RTX(rtx) 0, 1, 1, 0, 0); |
5160 | if (extraction != 0) |
5161 | { |
5162 | SUBST (SET_SRC (x), extraction)do_SUBST (&((((x)->u.fld[1]).rt_rtx)), (extraction)); |
5163 | return find_split_point (loc, insn, false); |
5164 | } |
5165 | } |
5166 | break; |
5167 | |
5168 | case NE: |
5169 | /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X |
5170 | is known to be on, this can be converted into a NEG of a shift. */ |
5171 | if (STORE_FLAG_VALUE1 == -1 && XEXP (SET_SRC (x), 1)((((((x)->u.fld[1]).rt_rtx))->u.fld[1]).rt_rtx) == const0_rtx(const_int_rtx[64]) |
5172 | && GET_MODE (SET_SRC (x))((machine_mode) ((((x)->u.fld[1]).rt_rtx))->mode) == GET_MODE (XEXP (SET_SRC (x), 0))((machine_mode) (((((((x)->u.fld[1]).rt_rtx))->u.fld[0] ).rt_rtx))->mode) |
5173 | && ((pos = exact_log2 (nonzero_bits (XEXP (SET_SRC (x), 0)((((((x)->u.fld[1]).rt_rtx))->u.fld[0]).rt_rtx), |
5174 | GET_MODE (XEXP (SET_SRC (x),((machine_mode) (((((((x)->u.fld[1]).rt_rtx))->u.fld[0] ).rt_rtx))->mode) |
5175 | 0))((machine_mode) (((((((x)->u.fld[1]).rt_rtx))->u.fld[0] ).rt_rtx))->mode)))) >= 1)) |
5176 | { |
5177 | machine_mode mode = GET_MODE (XEXP (SET_SRC (x), 0))((machine_mode) (((((((x)->u.fld[1]).rt_rtx))->u.fld[0] ).rt_rtx))->mode); |
5178 | rtx pos_rtx = gen_int_shift_amount (mode, pos); |
5179 | SUBST (SET_SRC (x),do_SUBST (&((((x)->u.fld[1]).rt_rtx)), (gen_rtx_fmt_e_stat ((NEG), ((mode)), ((gen_rtx_fmt_ee_stat ((LSHIFTRT), ((mode) ), ((((((((x)->u.fld[1]).rt_rtx))->u.fld[0]).rt_rtx))), ((pos_rtx)) ))) ))) |
5180 | gen_rtx_NEG (mode,do_SUBST (&((((x)->u.fld[1]).rt_rtx)), (gen_rtx_fmt_e_stat ((NEG), ((mode)), ((gen_rtx_fmt_ee_stat ((LSHIFTRT), ((mode) ), ((((((((x)->u.fld[1]).rt_rtx))->u.fld[0]).rt_rtx))), ((pos_rtx)) ))) ))) |
5181 | gen_rtx_LSHIFTRT (mode,do_SUBST (&((((x)->u.fld[1]).rt_rtx)), (gen_rtx_fmt_e_stat ((NEG), ((mode)), ((gen_rtx_fmt_ee_stat ((LSHIFTRT), ((mode) ), ((((((((x)->u.fld[1]).rt_rtx))->u.fld[0]).rt_rtx))), ((pos_rtx)) ))) ))) |
5182 | XEXP (SET_SRC (x), 0),do_SUBST (&((((x)->u.fld[1]).rt_rtx)), (gen_rtx_fmt_e_stat ((NEG), ((mode)), ((gen_rtx_fmt_ee_stat ((LSHIFTRT), ((mode) ), ((((((((x)->u.fld[1]).rt_rtx))->u.fld[0]).rt_rtx))), ((pos_rtx)) ))) ))) |
5183 | pos_rtx)))do_SUBST (&((((x)->u.fld[1]).rt_rtx)), (gen_rtx_fmt_e_stat ((NEG), ((mode)), ((gen_rtx_fmt_ee_stat ((LSHIFTRT), ((mode) ), ((((((((x)->u.fld[1]).rt_rtx))->u.fld[0]).rt_rtx))), ((pos_rtx)) ))) ))); |
5184 | |
5185 | split = find_split_point (&SET_SRC (x)(((x)->u.fld[1]).rt_rtx), insn, true); |
5186 | if (split && split != &SET_SRC (x)(((x)->u.fld[1]).rt_rtx)) |
5187 | return split; |
5188 | } |
5189 | break; |
5190 | |
5191 | case SIGN_EXTEND: |
5192 | inner = XEXP (SET_SRC (x), 0)((((((x)->u.fld[1]).rt_rtx))->u.fld[0]).rt_rtx); |
5193 | |
5194 | /* We can't optimize if either mode is a partial integer |
5195 | mode as we don't know how many bits are significant |
5196 | in those modes. */ |
5197 | if (!is_int_mode (GET_MODE (inner)((machine_mode) (inner)->mode), &inner_mode) |
5198 | || GET_MODE_CLASS (GET_MODE (SET_SRC (x)))((enum mode_class) mode_class[((machine_mode) ((((x)->u.fld [1]).rt_rtx))->mode)]) == MODE_PARTIAL_INT) |
5199 | break; |
5200 | |
5201 | pos = 0; |
5202 | len = GET_MODE_PRECISION (inner_mode); |
5203 | unsignedp = 0; |
5204 | break; |
5205 | |
5206 | case SIGN_EXTRACT: |
5207 | case ZERO_EXTRACT: |
5208 | if (is_a <scalar_int_mode> (GET_MODE (XEXP (SET_SRC (x), 0))((machine_mode) (((((((x)->u.fld[1]).rt_rtx))->u.fld[0] ).rt_rtx))->mode), |
5209 | &inner_mode) |
5210 | && CONST_INT_P (XEXP (SET_SRC (x), 1))(((enum rtx_code) (((((((x)->u.fld[1]).rt_rtx))->u.fld[ 1]).rt_rtx))->code) == CONST_INT) |
5211 | && CONST_INT_P (XEXP (SET_SRC (x), 2))(((enum rtx_code) (((((((x)->u.fld[1]).rt_rtx))->u.fld[ 2]).rt_rtx))->code) == CONST_INT)) |
5212 | { |
5213 | inner = XEXP (SET_SRC (x), 0)((((((x)->u.fld[1]).rt_rtx))->u.fld[0]).rt_rtx); |
5214 | len = INTVAL (XEXP (SET_SRC (x), 1))((((((((x)->u.fld[1]).rt_rtx))->u.fld[1]).rt_rtx))-> u.hwint[0]); |
5215 | pos = INTVAL (XEXP (SET_SRC (x), 2))((((((((x)->u.fld[1]).rt_rtx))->u.fld[2]).rt_rtx))-> u.hwint[0]); |
5216 | |
5217 | if (BITS_BIG_ENDIAN0) |
5218 | pos = GET_MODE_PRECISION (inner_mode) - len - pos; |
5219 | unsignedp = (code == ZERO_EXTRACT); |
5220 | } |
5221 | break; |
5222 | |
5223 | default: |
5224 | break; |
5225 | } |
5226 | |
5227 | if (len |
5228 | && known_subrange_p (pos, len, |
5229 | 0, GET_MODE_PRECISION (GET_MODE (inner)((machine_mode) (inner)->mode))) |
5230 | && is_a <scalar_int_mode> (GET_MODE (SET_SRC (x))((machine_mode) ((((x)->u.fld[1]).rt_rtx))->mode), &mode)) |
5231 | { |
5232 | /* For unsigned, we have a choice of a shift followed by an |
5233 | AND or two shifts. Use two shifts for field sizes where the |
5234 | constant might be too large. We assume here that we can |
5235 | always at least get 8-bit constants in an AND insn, which is |
5236 | true for every current RISC. */ |
5237 | |
5238 | if (unsignedp && len <= 8) |
5239 | { |
5240 | unsigned HOST_WIDE_INTlong mask |
5241 | = (HOST_WIDE_INT_1U1UL << len) - 1; |
5242 | rtx pos_rtx = gen_int_shift_amount (mode, pos); |
5243 | SUBST (SET_SRC (x),do_SUBST (&((((x)->u.fld[1]).rt_rtx)), (gen_rtx_fmt_ee_stat ((AND), ((mode)), ((gen_rtx_fmt_ee_stat ((LSHIFTRT), ((mode) ), ((rtl_hooks.gen_lowpart (mode, inner))), ((pos_rtx)) ))), ( (gen_int_mode (mask, mode))) ))) |
5244 | gen_rtx_AND (mode,do_SUBST (&((((x)->u.fld[1]).rt_rtx)), (gen_rtx_fmt_ee_stat ((AND), ((mode)), ((gen_rtx_fmt_ee_stat ((LSHIFTRT), ((mode) ), ((rtl_hooks.gen_lowpart (mode, inner))), ((pos_rtx)) ))), ( (gen_int_mode (mask, mode))) ))) |
5245 | gen_rtx_LSHIFTRTdo_SUBST (&((((x)->u.fld[1]).rt_rtx)), (gen_rtx_fmt_ee_stat ((AND), ((mode)), ((gen_rtx_fmt_ee_stat ((LSHIFTRT), ((mode) ), ((rtl_hooks.gen_lowpart (mode, inner))), ((pos_rtx)) ))), ( (gen_int_mode (mask, mode))) ))) |
5246 | (mode, gen_lowpart (mode, inner), pos_rtx),do_SUBST (&((((x)->u.fld[1]).rt_rtx)), (gen_rtx_fmt_ee_stat ((AND), ((mode)), ((gen_rtx_fmt_ee_stat ((LSHIFTRT), ((mode) ), ((rtl_hooks.gen_lowpart (mode, inner))), ((pos_rtx)) ))), ( (gen_int_mode (mask, mode))) ))) |
5247 | gen_int_mode (mask, mode)))do_SUBST (&((((x)->u.fld[1]).rt_rtx)), (gen_rtx_fmt_ee_stat ((AND), ((mode)), ((gen_rtx_fmt_ee_stat ((LSHIFTRT), ((mode) ), ((rtl_hooks.gen_lowpart (mode, inner))), ((pos_rtx)) ))), ( (gen_int_mode (mask, mode))) ))); |
5248 | |
5249 | split = find_split_point (&SET_SRC (x)(((x)->u.fld[1]).rt_rtx), insn, true); |
5250 | if (split && split != &SET_SRC (x)(((x)->u.fld[1]).rt_rtx)) |
5251 | return split; |
5252 | } |
5253 | else |
5254 | { |
5255 | int left_bits = GET_MODE_PRECISION (mode) - len - pos; |
5256 | int right_bits = GET_MODE_PRECISION (mode) - len; |
5257 | SUBST (SET_SRC (x),do_SUBST (&((((x)->u.fld[1]).rt_rtx)), (gen_rtx_fmt_ee_stat ((unsignedp ? LSHIFTRT : ASHIFTRT), (mode), (gen_rtx_fmt_ee_stat ((ASHIFT), ((mode)), ((rtl_hooks.gen_lowpart (mode, inner))) , ((gen_int_shift_amount (mode, left_bits))) )), (gen_int_shift_amount (mode, right_bits)) ))) |
5258 | gen_rtx_fmt_eedo_SUBST (&((((x)->u.fld[1]).rt_rtx)), (gen_rtx_fmt_ee_stat ((unsignedp ? LSHIFTRT : ASHIFTRT), (mode), (gen_rtx_fmt_ee_stat ((ASHIFT), ((mode)), ((rtl_hooks.gen_lowpart (mode, inner))) , ((gen_int_shift_amount (mode, left_bits))) )), (gen_int_shift_amount (mode, right_bits)) ))) |
5259 | (unsignedp ? LSHIFTRT : ASHIFTRT, mode,do_SUBST (&((((x)->u.fld[1]).rt_rtx)), (gen_rtx_fmt_ee_stat ((unsignedp ? LSHIFTRT : ASHIFTRT), (mode), (gen_rtx_fmt_ee_stat ((ASHIFT), ((mode)), ((rtl_hooks.gen_lowpart (mode, inner))) , ((gen_int_shift_amount (mode, left_bits))) )), (gen_int_shift_amount (mode, right_bits)) ))) |
5260 | gen_rtx_ASHIFT (mode,do_SUBST (&((((x)->u.fld[1]).rt_rtx)), (gen_rtx_fmt_ee_stat ((unsignedp ? LSHIFTRT : ASHIFTRT), (mode), (gen_rtx_fmt_ee_stat ((ASHIFT), ((mode)), ((rtl_hooks.gen_lowpart (mode, inner))) , ((gen_int_shift_amount (mode, left_bits))) )), (gen_int_shift_amount (mode, right_bits)) ))) |
5261 | gen_lowpart (mode, inner),do_SUBST (&((((x)->u.fld[1]).rt_rtx)), (gen_rtx_fmt_ee_stat ((unsignedp ? LSHIFTRT : ASHIFTRT), (mode), (gen_rtx_fmt_ee_stat ((ASHIFT), ((mode)), ((rtl_hooks.gen_lowpart (mode, inner))) , ((gen_int_shift_amount (mode, left_bits))) )), (gen_int_shift_amount (mode, right_bits)) ))) |
5262 | gen_int_shift_amount (mode, left_bits)),do_SUBST (&((((x)->u.fld[1]).rt_rtx)), (gen_rtx_fmt_ee_stat ((unsignedp ? LSHIFTRT : ASHIFTRT), (mode), (gen_rtx_fmt_ee_stat ((ASHIFT), ((mode)), ((rtl_hooks.gen_lowpart (mode, inner))) , ((gen_int_shift_amount (mode, left_bits))) )), (gen_int_shift_amount (mode, right_bits)) ))) |
5263 | gen_int_shift_amount (mode, right_bits)))do_SUBST (&((((x)->u.fld[1]).rt_rtx)), (gen_rtx_fmt_ee_stat ((unsignedp ? LSHIFTRT : ASHIFTRT), (mode), (gen_rtx_fmt_ee_stat ((ASHIFT), ((mode)), ((rtl_hooks.gen_lowpart (mode, inner))) , ((gen_int_shift_amount (mode, left_bits))) )), (gen_int_shift_amount (mode, right_bits)) ))); |
5264 | |
5265 | split = find_split_point (&SET_SRC (x)(((x)->u.fld[1]).rt_rtx), insn, true); |
5266 | if (split && split != &SET_SRC (x)(((x)->u.fld[1]).rt_rtx)) |
5267 | return split; |
5268 | } |
5269 | } |
5270 | |
5271 | /* See if this is a simple operation with a constant as the second |
5272 | operand. It might be that this constant is out of range and hence |
5273 | could be used as a split point. */ |
5274 | if (BINARY_P (SET_SRC (x))(((rtx_class[(int) (((enum rtx_code) ((((x)->u.fld[1]).rt_rtx ))->code))]) & (~3)) == (RTX_COMPARE & (~3))) |
5275 | && CONSTANT_P (XEXP (SET_SRC (x), 1))((rtx_class[(int) (((enum rtx_code) (((((((x)->u.fld[1]).rt_rtx ))->u.fld[1]).rt_rtx))->code))]) == RTX_CONST_OBJ) |
5276 | && (OBJECT_P (XEXP (SET_SRC (x), 0))(((rtx_class[(int) (((enum rtx_code) (((((((x)->u.fld[1]). rt_rtx))->u.fld[0]).rt_rtx))->code))]) & (~1)) == ( RTX_OBJ & (~1))) |
5277 | || (GET_CODE (XEXP (SET_SRC (x), 0))((enum rtx_code) (((((((x)->u.fld[1]).rt_rtx))->u.fld[0 ]).rt_rtx))->code) == SUBREG |
5278 | && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x), 0)))(((rtx_class[(int) (((enum rtx_code) ((((((((((x)->u.fld[1 ]).rt_rtx))->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx))-> code))]) & (~1)) == (RTX_OBJ & (~1)))))) |
5279 | return &XEXP (SET_SRC (x), 1)((((((x)->u.fld[1]).rt_rtx))->u.fld[1]).rt_rtx); |
5280 | |
5281 | /* Finally, see if this is a simple operation with its first operand |
5282 | not in a register. The operation might require this operand in a |
5283 | register, so return it as a split point. We can always do this |
5284 | because if the first operand were another operation, we would have |
5285 | already found it as a split point. */ |
5286 | if ((BINARY_P (SET_SRC (x))(((rtx_class[(int) (((enum rtx_code) ((((x)->u.fld[1]).rt_rtx ))->code))]) & (~3)) == (RTX_COMPARE & (~3))) || UNARY_P (SET_SRC (x))((rtx_class[(int) (((enum rtx_code) ((((x)->u.fld[1]).rt_rtx ))->code))]) == RTX_UNARY)) |
5287 | && ! register_operand (XEXP (SET_SRC (x), 0)((((((x)->u.fld[1]).rt_rtx))->u.fld[0]).rt_rtx), VOIDmode((void) 0, E_VOIDmode))) |
5288 | return &XEXP (SET_SRC (x), 0)((((((x)->u.fld[1]).rt_rtx))->u.fld[0]).rt_rtx); |
5289 | |
5290 | return 0; |
5291 | |
5292 | case AND: |
5293 | case IOR: |
5294 | /* We write NOR as (and (not A) (not B)), but if we don't have a NOR, |
5295 | it is better to write this as (not (ior A B)) so we can split it. |
5296 | Similarly for IOR. */ |
5297 | if (GET_CODE (XEXP (x, 0))((enum rtx_code) ((((x)->u.fld[0]).rt_rtx))->code) == NOT && GET_CODE (XEXP (x, 1))((enum rtx_code) ((((x)->u.fld[1]).rt_rtx))->code) == NOT) |
5298 | { |
5299 | SUBST (*loc,do_SUBST (&(*loc), (gen_rtx_fmt_e_stat ((NOT), ((((machine_mode ) (x)->mode))), ((gen_rtx_fmt_ee_stat ((code == IOR ? AND : IOR), (((machine_mode) (x)->mode)), (((((((x)->u.fld[0 ]).rt_rtx))->u.fld[0]).rt_rtx)), (((((((x)->u.fld[1]).rt_rtx ))->u.fld[0]).rt_rtx)) ))) ))) |
5300 | gen_rtx_NOT (GET_MODE (x),do_SUBST (&(*loc), (gen_rtx_fmt_e_stat ((NOT), ((((machine_mode ) (x)->mode))), ((gen_rtx_fmt_ee_stat ((code == IOR ? AND : IOR), (((machine_mode) (x)->mode)), (((((((x)->u.fld[0 ]).rt_rtx))->u.fld[0]).rt_rtx)), (((((((x)->u.fld[1]).rt_rtx ))->u.fld[0]).rt_rtx)) ))) ))) |
5301 | gen_rtx_fmt_ee (code == IOR ? AND : IOR,do_SUBST (&(*loc), (gen_rtx_fmt_e_stat ((NOT), ((((machine_mode ) (x)->mode))), ((gen_rtx_fmt_ee_stat ((code == IOR ? AND : IOR), (((machine_mode) (x)->mode)), (((((((x)->u.fld[0 ]).rt_rtx))->u.fld[0]).rt_rtx)), (((((((x)->u.fld[1]).rt_rtx ))->u.fld[0]).rt_rtx)) ))) ))) |
5302 | GET_MODE (x),do_SUBST (&(*loc), (gen_rtx_fmt_e_stat ((NOT), ((((machine_mode ) (x)->mode))), ((gen_rtx_fmt_ee_stat ((code == IOR ? AND : IOR), (((machine_mode) (x)->mode)), (((((((x)->u.fld[0 ]).rt_rtx))->u.fld[0]).rt_rtx)), (((((((x)->u.fld[1]).rt_rtx ))->u.fld[0]).rt_rtx)) ))) ))) |
5303 | XEXP (XEXP (x, 0), 0),do_SUBST (&(*loc), (gen_rtx_fmt_e_stat ((NOT), ((((machine_mode ) (x)->mode))), ((gen_rtx_fmt_ee_stat ((code == IOR ? AND : IOR), (((machine_mode) (x)->mode)), (((((((x)->u.fld[0 ]).rt_rtx))->u.fld[0]).rt_rtx)), (((((((x)->u.fld[1]).rt_rtx ))->u.fld[0]).rt_rtx)) ))) ))) |
5304 | XEXP (XEXP (x, 1), 0))))do_SUBST (&(*loc), (gen_rtx_fmt_e_stat ((NOT), ((((machine_mode ) (x)->mode))), ((gen_rtx_fmt_ee_stat ((code == IOR ? AND : IOR), (((machine_mode) (x)->mode)), (((((((x)->u.fld[0 ]).rt_rtx))->u.fld[0]).rt_rtx)), (((((((x)->u.fld[1]).rt_rtx ))->u.fld[0]).rt_rtx)) ))) ))); |
5305 | return find_split_point (loc, insn, set_src); |
5306 | } |
5307 | |
5308 | /* Many RISC machines have a large set of logical insns. If the |
5309 | second operand is a NOT, put it first so we will try to split the |
5310 | other operand first. */ |
5311 | if (GET_CODE (XEXP (x, 1))((enum rtx_code) ((((x)->u.fld[1]).rt_rtx))->code) == NOT) |
5312 | { |
5313 | rtx tem = XEXP (x, 0)(((x)->u.fld[0]).rt_rtx); |
5314 | SUBST (XEXP (x, 0), XEXP (x, 1))do_SUBST (&((((x)->u.fld[0]).rt_rtx)), ((((x)->u.fld [1]).rt_rtx))); |
5315 | SUBST (XEXP (x, 1), tem)do_SUBST (&((((x)->u.fld[1]).rt_rtx)), (tem)); |
5316 | } |
5317 | break; |
5318 | |
5319 | case PLUS: |
5320 | case MINUS: |
5321 | /* Canonicalization can produce (minus A (mult B C)), where C is a |
5322 | constant. It may be better to try splitting (plus (mult B -C) A) |
5323 | instead if this isn't a multiply by a power of two. */ |
5324 | if (set_src && code == MINUS && GET_CODE (XEXP (x, 1))((enum rtx_code) ((((x)->u.fld[1]).rt_rtx))->code) == MULT |
5325 | && GET_CODE (XEXP (XEXP (x, 1), 1))((enum rtx_code) (((((((x)->u.fld[1]).rt_rtx))->u.fld[1 ]).rt_rtx))->code) == CONST_INT |
5326 | && !pow2p_hwi (INTVAL (XEXP (XEXP (x, 1), 1))((((((((x)->u.fld[1]).rt_rtx))->u.fld[1]).rt_rtx))-> u.hwint[0]))) |
5327 | { |
5328 | machine_mode mode = GET_MODE (x)((machine_mode) (x)->mode); |
5329 | unsigned HOST_WIDE_INTlong this_int = INTVAL (XEXP (XEXP (x, 1), 1))((((((((x)->u.fld[1]).rt_rtx))->u.fld[1]).rt_rtx))-> u.hwint[0]); |
5330 | HOST_WIDE_INTlong other_int = trunc_int_for_mode (-this_int, mode); |
5331 | SUBST (*loc, gen_rtx_PLUS (mode,do_SUBST (&(*loc), (gen_rtx_fmt_ee_stat ((PLUS), ((mode)) , ((gen_rtx_fmt_ee_stat ((MULT), ((mode)), ((((((((x)->u.fld [1]).rt_rtx))->u.fld[0]).rt_rtx))), ((gen_int_mode (other_int , mode))) ))), (((((x)->u.fld[0]).rt_rtx))) ))) |
5332 | gen_rtx_MULT (mode,do_SUBST (&(*loc), (gen_rtx_fmt_ee_stat ((PLUS), ((mode)) , ((gen_rtx_fmt_ee_stat ((MULT), ((mode)), ((((((((x)->u.fld [1]).rt_rtx))->u.fld[0]).rt_rtx))), ((gen_int_mode (other_int , mode))) ))), (((((x)->u.fld[0]).rt_rtx))) ))) |
5333 | XEXP (XEXP (x, 1), 0),do_SUBST (&(*loc), (gen_rtx_fmt_ee_stat ((PLUS), ((mode)) , ((gen_rtx_fmt_ee_stat ((MULT), ((mode)), ((((((((x)->u.fld [1]).rt_rtx))->u.fld[0]).rt_rtx))), ((gen_int_mode (other_int , mode))) ))), (((((x)->u.fld[0]).rt_rtx))) ))) |
5334 | gen_int_mode (other_int,do_SUBST (&(*loc), (gen_rtx_fmt_ee_stat ((PLUS), ((mode)) , ((gen_rtx_fmt_ee_stat ((MULT), ((mode)), ((((((((x)->u.fld [1]).rt_rtx))->u.fld[0]).rt_rtx))), ((gen_int_mode (other_int , mode))) ))), (((((x)->u.fld[0]).rt_rtx))) ))) |
5335 | mode)),do_SUBST (&(*loc), (gen_rtx_fmt_ee_stat ((PLUS), ((mode)) , ((gen_rtx_fmt_ee_stat ((MULT), ((mode)), ((((((((x)->u.fld [1]).rt_rtx))->u.fld[0]).rt_rtx))), ((gen_int_mode (other_int , mode))) ))), (((((x)->u.fld[0]).rt_rtx))) ))) |
5336 | XEXP (x, 0)))do_SUBST (&(*loc), (gen_rtx_fmt_ee_stat ((PLUS), ((mode)) , ((gen_rtx_fmt_ee_stat ((MULT), ((mode)), ((((((((x)->u.fld [1]).rt_rtx))->u.fld[0]).rt_rtx))), ((gen_int_mode (other_int , mode))) ))), (((((x)->u.fld[0]).rt_rtx))) ))); |
5337 | return find_split_point (loc, insn, set_src); |
5338 | } |
5339 | |
5340 | /* Split at a multiply-accumulate instruction. However if this is |
5341 | the SET_SRC, we likely do not have such an instruction and it's |
5342 | worthless to try this split. */ |
5343 | if (!set_src |
5344 | && (GET_CODE (XEXP (x, 0))((enum rtx_code) ((((x)->u.fld[0]).rt_rtx))->code) == MULT |
5345 | || (GET_CODE (XEXP (x, 0))((enum rtx_code) ((((x)->u.fld[0]).rt_rtx))->code) == ASHIFT |
5346 | && GET_CODE (XEXP (XEXP (x, 0), 1))((enum rtx_code) (((((((x)->u.fld[0]).rt_rtx))->u.fld[1 ]).rt_rtx))->code) == CONST_INT))) |
5347 | return loc; |
5348 | |
5349 | default: |
5350 | break; |
5351 | } |
5352 | |
5353 | /* Otherwise, select our actions depending on our rtx class. */ |
5354 | switch (GET_RTX_CLASS (code)(rtx_class[(int) (code)])) |
5355 | { |
5356 | case RTX_BITFIELD_OPS: /* This is ZERO_EXTRACT and SIGN_EXTRACT. */ |
5357 | case RTX_TERNARY: |
5358 | split = find_split_point (&XEXP (x, 2)(((x)->u.fld[2]).rt_rtx), insn, false); |
5359 | if (split) |
5360 | return split; |
5361 | /* fall through */ |
5362 | case RTX_BIN_ARITH: |
5363 | case RTX_COMM_ARITH: |
5364 | case RTX_COMPARE: |
5365 | case RTX_COMM_COMPARE: |
5366 | split = find_split_point (&XEXP (x, 1)(((x)->u.fld[1]).rt_rtx), insn, false); |
5367 | if (split) |
5368 | return split; |
5369 | /* fall through */ |
5370 | case RTX_UNARY: |
5371 | /* Some machines have (and (shift ...) ...) insns. If X is not |
5372 | an AND, but XEXP (X, 0) is, use it as our split point. */ |
5373 | if (GET_CODE (x)((enum rtx_code) (x)->code) != AND && GET_CODE (XEXP (x, 0))((enum rtx_code) ((((x)->u.fld[0]).rt_rtx))->code) == AND) |
5374 | return &XEXP (x, 0)(((x)->u.fld[0]).rt_rtx); |
5375 | |
5376 | split = find_split_point (&XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), insn, false); |
5377 | if (split) |
5378 | return split; |
5379 | return loc; |
5380 | |
5381 | default: |
5382 | /* Otherwise, we don't have a split point. */ |
5383 | return 0; |
5384 | } |
5385 | } |
5386 | |
5387 | /* Throughout X, replace FROM with TO, and return the result. |
5388 | The result is TO if X is FROM; |
5389 | otherwise the result is X, but its contents may have been modified. |
5390 | If they were modified, a record was made in undobuf so that |
5391 | undo_all will (among other things) return X to its original state. |
5392 | |
5393 | If the number of changes necessary is too much to record to undo, |
5394 | the excess changes are not made, so the result is invalid. |
5395 | The changes already made can still be undone. |
5396 | undobuf.num_undo is incremented for such changes, so by testing that |
5397 | the caller can tell whether the result is valid. |
5398 | |
5399 | `n_occurrences' is incremented each time FROM is replaced. |
5400 | |
5401 | IN_DEST is nonzero if we are processing the SET_DEST of a SET. |
5402 | |
5403 | IN_COND is nonzero if we are at the top level of a condition. |
5404 | |
5405 | UNIQUE_COPY is nonzero if each substitution must be unique. We do this |
5406 | by copying if `n_occurrences' is nonzero. */ |
5407 | |
5408 | static rtx |
5409 | subst (rtx x, rtx from, rtx to, int in_dest, int in_cond, int unique_copy) |
5410 | { |
5411 | enum rtx_code code = GET_CODE (x)((enum rtx_code) (x)->code); |
5412 | machine_mode op0_mode = VOIDmode((void) 0, E_VOIDmode); |
5413 | const char *fmt; |
5414 | int len, i; |
5415 | rtx new_rtx; |
5416 | |
5417 | /* Two expressions are equal if they are identical copies of a shared |
5418 | RTX or if they are both registers with the same register number |
5419 | and mode. */ |
5420 | |
5421 | #define COMBINE_RTX_EQUAL_P(X,Y)((X) == (Y) || ((((enum rtx_code) (X)->code) == REG) && (((enum rtx_code) (Y)->code) == REG) && (rhs_regno (X)) == (rhs_regno(Y)) && ((machine_mode) (X)->mode ) == ((machine_mode) (Y)->mode))) \ |
5422 | ((X) == (Y) \ |
5423 | || (REG_P (X)(((enum rtx_code) (X)->code) == REG) && REG_P (Y)(((enum rtx_code) (Y)->code) == REG) \ |
5424 | && REGNO (X)(rhs_regno(X)) == REGNO (Y)(rhs_regno(Y)) && GET_MODE (X)((machine_mode) (X)->mode) == GET_MODE (Y)((machine_mode) (Y)->mode))) |
5425 | |
5426 | /* Do not substitute into clobbers of regs -- this will never result in |
5427 | valid RTL. */ |
5428 | if (GET_CODE (x)((enum rtx_code) (x)->code) == CLOBBER && REG_P (XEXP (x, 0))(((enum rtx_code) ((((x)->u.fld[0]).rt_rtx))->code) == REG )) |
5429 | return x; |
5430 | |
5431 | if (! in_dest && COMBINE_RTX_EQUAL_P (x, from)((x) == (from) || ((((enum rtx_code) (x)->code) == REG) && (((enum rtx_code) (from)->code) == REG) && (rhs_regno (x)) == (rhs_regno(from)) && ((machine_mode) (x)-> mode) == ((machine_mode) (from)->mode)))) |
5432 | { |
5433 | n_occurrences++; |
5434 | return (unique_copy && n_occurrences > 1 ? copy_rtx (to) : to); |
5435 | } |
5436 | |
5437 | /* If X and FROM are the same register but different modes, they |
5438 | will not have been seen as equal above. However, the log links code |
5439 | will make a LOG_LINKS entry for that case. If we do nothing, we |
5440 | will try to rerecognize our original insn and, when it succeeds, |
5441 | we will delete the feeding insn, which is incorrect. |
5442 | |
5443 | So force this insn not to match in this (rare) case. */ |
5444 | if (! in_dest && code == REG && REG_P (from)(((enum rtx_code) (from)->code) == REG) |
5445 | && reg_overlap_mentioned_p (x, from)) |
5446 | return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx)gen_rtx_fmt_e_stat ((CLOBBER), ((((machine_mode) (x)->mode ))), (((const_int_rtx[64]))) ); |
5447 | |
5448 | /* If this is an object, we are done unless it is a MEM or LO_SUM, both |
5449 | of which may contain things that can be combined. */ |
5450 | if (code != MEM && code != LO_SUM && OBJECT_P (x)(((rtx_class[(int) (((enum rtx_code) (x)->code))]) & ( ~1)) == (RTX_OBJ & (~1)))) |
5451 | return x; |
5452 | |
5453 | /* It is possible to have a subexpression appear twice in the insn. |
5454 | Suppose that FROM is a register that appears within TO. |
5455 | Then, after that subexpression has been scanned once by `subst', |
5456 | the second time it is scanned, TO may be found. If we were |
5457 | to scan TO here, we would find FROM within it and create a |
5458 | self-referent rtl structure which is completely wrong. */ |
5459 | if (COMBINE_RTX_EQUAL_P (x, to)((x) == (to) || ((((enum rtx_code) (x)->code) == REG) && (((enum rtx_code) (to)->code) == REG) && (rhs_regno (x)) == (rhs_regno(to)) && ((machine_mode) (x)->mode ) == ((machine_mode) (to)->mode)))) |
5460 | return to; |
5461 | |
5462 | /* Parallel asm_operands need special attention because all of the |
5463 | inputs are shared across the arms. Furthermore, unsharing the |
5464 | rtl results in recognition failures. Failure to handle this case |
5465 | specially can result in circular rtl. |
5466 | |
5467 | Solve this by doing a normal pass across the first entry of the |
5468 | parallel, and only processing the SET_DESTs of the subsequent |
5469 | entries. Ug. */ |
5470 | |
5471 | if (code == PARALLEL |
5472 | && GET_CODE (XVECEXP (x, 0, 0))((enum rtx_code) ((((((x)->u.fld[0]).rt_rtvec))->elem[0 ]))->code) == SET |
5473 | && GET_CODE (SET_SRC (XVECEXP (x, 0, 0)))((enum rtx_code) (((((((((x)->u.fld[0]).rt_rtvec))->elem [0]))->u.fld[1]).rt_rtx))->code) == ASM_OPERANDS) |
5474 | { |
5475 | new_rtx = subst (XVECEXP (x, 0, 0)(((((x)->u.fld[0]).rt_rtvec))->elem[0]), from, to, 0, 0, unique_copy); |
5476 | |
5477 | /* If this substitution failed, this whole thing fails. */ |
5478 | if (GET_CODE (new_rtx)((enum rtx_code) (new_rtx)->code) == CLOBBER |
5479 | && XEXP (new_rtx, 0)(((new_rtx)->u.fld[0]).rt_rtx) == const0_rtx(const_int_rtx[64])) |
5480 | return new_rtx; |
5481 | |
5482 | SUBST (XVECEXP (x, 0, 0), new_rtx)do_SUBST (&((((((x)->u.fld[0]).rt_rtvec))->elem[0]) ), (new_rtx)); |
5483 | |
5484 | for (i = XVECLEN (x, 0)(((((x)->u.fld[0]).rt_rtvec))->num_elem) - 1; i >= 1; i--) |
5485 | { |
5486 | rtx dest = SET_DEST (XVECEXP (x, 0, i))((((((((x)->u.fld[0]).rt_rtvec))->elem[i]))->u.fld[0 ]).rt_rtx); |
5487 | |
5488 | if (!REG_P (dest)(((enum rtx_code) (dest)->code) == REG) |
5489 | && GET_CODE (dest)((enum rtx_code) (dest)->code) != CC0 |
5490 | && GET_CODE (dest)((enum rtx_code) (dest)->code) != PC) |
5491 | { |
5492 | new_rtx = subst (dest, from, to, 0, 0, unique_copy); |
5493 | |
5494 | /* If this substitution failed, this whole thing fails. */ |
5495 | if (GET_CODE (new_rtx)((enum rtx_code) (new_rtx)->code) == CLOBBER |
5496 | && XEXP (new_rtx, 0)(((new_rtx)->u.fld[0]).rt_rtx) == const0_rtx(const_int_rtx[64])) |
5497 | return new_rtx; |
5498 | |
5499 | SUBST (SET_DEST (XVECEXP (x, 0, i)), new_rtx)do_SUBST (&(((((((((x)->u.fld[0]).rt_rtvec))->elem[ i]))->u.fld[0]).rt_rtx)), (new_rtx)); |
5500 | } |
5501 | } |
5502 | } |
5503 | else |
5504 | { |
5505 | len = GET_RTX_LENGTH (code)(rtx_length[(int) (code)]); |
5506 | fmt = GET_RTX_FORMAT (code)(rtx_format[(int) (code)]); |
5507 | |
5508 | /* We don't need to process a SET_DEST that is a register, CC0, |
5509 | or PC, so set up to skip this common case. All other cases |
5510 | where we want to suppress replacing something inside a |
5511 | SET_SRC are handled via the IN_DEST operand. */ |
5512 | if (code == SET |
5513 | && (REG_P (SET_DEST (x))(((enum rtx_code) ((((x)->u.fld[0]).rt_rtx))->code) == REG ) |
5514 | || GET_CODE (SET_DEST (x))((enum rtx_code) ((((x)->u.fld[0]).rt_rtx))->code) == CC0 |
5515 | || GET_CODE (SET_DEST (x))((enum rtx_code) ((((x)->u.fld[0]).rt_rtx))->code) == PC)) |
5516 | fmt = "ie"; |
5517 | |
5518 | /* Trying to simplify the operands of a widening MULT is not likely |
5519 | to create RTL matching a machine insn. */ |
5520 | if (code == MULT |
5521 | && (GET_CODE (XEXP (x, 0))((enum rtx_code) ((((x)->u.fld[0]).rt_rtx))->code) == ZERO_EXTEND |
5522 | || GET_CODE (XEXP (x, 0))((enum rtx_code) ((((x)->u.fld[0]).rt_rtx))->code) == SIGN_EXTEND) |
5523 | && (GET_CODE (XEXP (x, 1))((enum rtx_code) ((((x)->u.fld[1]).rt_rtx))->code) == ZERO_EXTEND |
5524 | || GET_CODE (XEXP (x, 1))((enum rtx_code) ((((x)->u.fld[1]).rt_rtx))->code) == SIGN_EXTEND) |
5525 | && REG_P (XEXP (XEXP (x, 0), 0))(((enum rtx_code) (((((((x)->u.fld[0]).rt_rtx))->u.fld[ 0]).rt_rtx))->code) == REG) |
5526 | && REG_P (XEXP (XEXP (x, 1), 0))(((enum rtx_code) (((((((x)->u.fld[1]).rt_rtx))->u.fld[ 0]).rt_rtx))->code) == REG) |
5527 | && from == to) |
5528 | return x; |
5529 | |
5530 | |
5531 | /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a |
5532 | constant. */ |
5533 | if (fmt[0] == 'e') |
5534 | op0_mode = GET_MODE (XEXP (x, 0))((machine_mode) ((((x)->u.fld[0]).rt_rtx))->mode); |
5535 | |
5536 | for (i = 0; i < len; i++) |
5537 | { |
5538 | if (fmt[i] == 'E') |
5539 | { |
5540 | int j; |
5541 | for (j = XVECLEN (x, i)(((((x)->u.fld[i]).rt_rtvec))->num_elem) - 1; j >= 0; j--) |
5542 | { |
5543 | if (COMBINE_RTX_EQUAL_P (XVECEXP (x, i, j), from)(((((((x)->u.fld[i]).rt_rtvec))->elem[j])) == (from) || ((((enum rtx_code) ((((((x)->u.fld[i]).rt_rtvec))->elem [j]))->code) == REG) && (((enum rtx_code) (from)-> code) == REG) && (rhs_regno((((((x)->u.fld[i]).rt_rtvec ))->elem[j]))) == (rhs_regno(from)) && ((machine_mode ) ((((((x)->u.fld[i]).rt_rtvec))->elem[j]))->mode) == ((machine_mode) (from)->mode)))) |
5544 | { |
5545 | new_rtx = (unique_copy && n_occurrences |
5546 | ? copy_rtx (to) : to); |
5547 | n_occurrences++; |
5548 | } |
5549 | else |
5550 | { |
5551 | new_rtx = subst (XVECEXP (x, i, j)(((((x)->u.fld[i]).rt_rtvec))->elem[j]), from, to, 0, 0, |
5552 | unique_copy); |
5553 | |
5554 | /* If this substitution failed, this whole thing |
5555 | fails. */ |
5556 | if (GET_CODE (new_rtx)((enum rtx_code) (new_rtx)->code) == CLOBBER |
5557 | && XEXP (new_rtx, 0)(((new_rtx)->u.fld[0]).rt_rtx) == const0_rtx(const_int_rtx[64])) |
5558 | return new_rtx; |
5559 | } |
5560 | |
5561 | SUBST (XVECEXP (x, i, j), new_rtx)do_SUBST (&((((((x)->u.fld[i]).rt_rtvec))->elem[j]) ), (new_rtx)); |