Bug Summary

File:build/gcc/reorg.c
Warning:line 1169, column 9
Branch condition evaluates to a garbage value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-unknown-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name reorg.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model static -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -fno-split-dwarf-inlining -debugger-tuning=gdb -resource-dir /usr/lib64/clang/11.0.0 -D IN_GCC -D HAVE_CONFIG_H -I . -I . -I /home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc -I /home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/. -I /home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../include -I /home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libcpp/include -I /home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libcody -I /home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libdecnumber -I /home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libdecnumber/bid -I ../libdecnumber -I /home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libbacktrace -internal-isystem /usr/bin/../lib64/gcc/x86_64-suse-linux/10/../../../../include/c++/10 -internal-isystem /usr/bin/../lib64/gcc/x86_64-suse-linux/10/../../../../include/c++/10/x86_64-suse-linux -internal-isystem /usr/bin/../lib64/gcc/x86_64-suse-linux/10/../../../../include/c++/10/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib64/clang/11.0.0/include -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-narrowing -Wwrite-strings -Wno-error=format-diag -Wno-long-long -Wno-variadic-macros -Wno-overlength-strings -fdeprecated-macro -fdebug-compilation-dir /home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/objdir/gcc -ferror-limit 19 -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=plist-html -analyzer-config silence-checkers=core.NullDereference -faddrsig -o /home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/objdir/clang-static-analyzer/2021-01-16-135054-17580-1/report-iEIjXZ.plist -x c++ /home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c

/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c

1/* Perform instruction reorganizations for delay slot filling.
2 Copyright (C) 1992-2021 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu).
4 Hacked by Michael Tiemann (tiemann@cygnus.com).
5
6This file is part of GCC.
7
8GCC is free software; you can redistribute it and/or modify it under
9the terms of the GNU General Public License as published by the Free
10Software Foundation; either version 3, or (at your option) any later
11version.
12
13GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14WARRANTY; without even the implied warranty of MERCHANTABILITY or
15FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16for more details.
17
18You should have received a copy of the GNU General Public License
19along with GCC; see the file COPYING3. If not see
20<http://www.gnu.org/licenses/>. */
21
22/* Instruction reorganization pass.
23
24 This pass runs after register allocation and final jump
25 optimization. It should be the last pass to run before peephole.
26 It serves primarily to fill delay slots of insns, typically branch
27 and call insns. Other insns typically involve more complicated
28 interactions of data dependencies and resource constraints, and
29 are better handled by scheduling before register allocation (by the
30 function `schedule_insns').
31
32 The Branch Penalty is the number of extra cycles that are needed to
33 execute a branch insn. On an ideal machine, branches take a single
34 cycle, and the Branch Penalty is 0. Several RISC machines approach
35 branch delays differently:
36
37 The MIPS has a single branch delay slot. Most insns
38 (except other branches) can be used to fill this slot. When the
39 slot is filled, two insns execute in two cycles, reducing the
40 branch penalty to zero.
41
42 The SPARC always has a branch delay slot, but its effects can be
43 annulled when the branch is not taken. This means that failing to
44 find other sources of insns, we can hoist an insn from the branch
45 target that would only be safe to execute knowing that the branch
46 is taken.
47
48 The HP-PA always has a branch delay slot. For unconditional branches
49 its effects can be annulled when the branch is taken. The effects
50 of the delay slot in a conditional branch can be nullified for forward
51 taken branches, or for untaken backward branches. This means
52 we can hoist insns from the fall-through path for forward branches or
53 steal insns from the target of backward branches.
54
55 The TMS320C3x and C4x have three branch delay slots. When the three
56 slots are filled, the branch penalty is zero. Most insns can fill the
57 delay slots except jump insns.
58
59 Three techniques for filling delay slots have been implemented so far:
60
61 (1) `fill_simple_delay_slots' is the simplest, most efficient way
62 to fill delay slots. This pass first looks for insns which come
63 from before the branch and which are safe to execute after the
64 branch. Then it searches after the insn requiring delay slots or,
65 in the case of a branch, for insns that are after the point at
66 which the branch merges into the fallthrough code, if such a point
67 exists. When such insns are found, the branch penalty decreases
68 and no code expansion takes place.
69
70 (2) `fill_eager_delay_slots' is more complicated: it is used for
71 scheduling conditional jumps, or for scheduling jumps which cannot
72 be filled using (1). A machine need not have annulled jumps to use
73 this strategy, but it helps (by keeping more options open).
74 `fill_eager_delay_slots' tries to guess the direction the branch
75 will go; if it guesses right 100% of the time, it can reduce the
76 branch penalty as much as `fill_simple_delay_slots' does. If it
77 guesses wrong 100% of the time, it might as well schedule nops. When
78 `fill_eager_delay_slots' takes insns from the fall-through path of
79 the jump, usually there is no code expansion; when it takes insns
80 from the branch target, there is code expansion if it is not the
81 only way to reach that target.
82
83 (3) `relax_delay_slots' uses a set of rules to simplify code that
84 has been reorganized by (1) and (2). It finds cases where
85 conditional test can be eliminated, jumps can be threaded, extra
86 insns can be eliminated, etc. It is the job of (1) and (2) to do a
87 good job of scheduling locally; `relax_delay_slots' takes care of
88 making the various individual schedules work well together. It is
89 especially tuned to handle the control flow interactions of branch
90 insns. It does nothing for insns with delay slots that do not
91 branch.
92
93 On machines that use CC0, we are very conservative. We will not make
94 a copy of an insn involving CC0 since we want to maintain a 1-1
95 correspondence between the insn that sets and uses CC0. The insns are
96 allowed to be separated by placing an insn that sets CC0 (but not an insn
97 that uses CC0; we could do this, but it doesn't seem worthwhile) in a
98 delay slot. In that case, we point each insn at the other with REG_CC_USER
99 and REG_CC_SETTER notes. Note that these restrictions affect very few
100 machines because most RISC machines with delay slots will not use CC0
101 (the RT is the only known exception at this point). */
102
103#include "config.h"
104#include "system.h"
105#include "coretypes.h"
106#include "backend.h"
107#include "target.h"
108#include "rtl.h"
109#include "tree.h"
110#include "predict.h"
111#include "memmodel.h"
112#include "tm_p.h"
113#include "expmed.h"
114#include "insn-config.h"
115#include "emit-rtl.h"
116#include "recog.h"
117#include "insn-attr.h"
118#include "resource.h"
119#include "tree-pass.h"
120
121
122/* First, some functions that were used before GCC got a control flow graph.
123 These functions are now only used here in reorg.c, and have therefore
124 been moved here to avoid inadvertent misuse elsewhere in the compiler. */
125
126/* Return the last label to mark the same position as LABEL. Return LABEL
127 itself if it is null or any return rtx. */
128
129static rtx
130skip_consecutive_labels (rtx label_or_return)
131{
132 rtx_insn *insn;
133
134 if (label_or_return && ANY_RETURN_P (label_or_return)(((enum rtx_code) (label_or_return)->code) == RETURN || ((
enum rtx_code) (label_or_return)->code) == SIMPLE_RETURN)
)
135 return label_or_return;
136
137 rtx_insn *label = as_a <rtx_insn *> (label_or_return);
138
139 /* __builtin_unreachable can create a CODE_LABEL followed by a BARRIER.
140
141 Since reaching the CODE_LABEL is undefined behavior, we can return
142 any code label and we're OK at runtime.
143
144 However, if we return a CODE_LABEL which leads to a shrinked wrapped
145 epilogue, but the path does not have a prologue, then we will trip
146 a sanity check in the dwarf2 cfi code which wants to verify that
147 the CFIs are all the same on the traces leading to the epilogue.
148
149 So we explicitly disallow looking through BARRIERS here. */
150 for (insn = label;
151 insn != 0 && !INSN_P (insn)(((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code
) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)->
code) == CALL_INSN)) || (((enum rtx_code) (insn)->code) ==
DEBUG_INSN))
&& !BARRIER_P (insn)(((enum rtx_code) (insn)->code) == BARRIER);
152 insn = NEXT_INSN (insn))
153 if (LABEL_P (insn)(((enum rtx_code) (insn)->code) == CODE_LABEL))
154 label = insn;
155
156 return label;
157}
158
159/* INSN uses CC0 and is being moved into a delay slot. Set up REG_CC_SETTER
160 and REG_CC_USER notes so we can find it. */
161
162static void
163link_cc0_insns (rtx_insn *insn)
164{
165 rtx user = next_nonnote_insn (insn);
166
167 if (NONJUMP_INSN_P (user)(((enum rtx_code) (user)->code) == INSN) && GET_CODE (PATTERN (user))((enum rtx_code) (PATTERN (user))->code) == SEQUENCE)
168 user = XVECEXP (PATTERN (user), 0, 0)(((((PATTERN (user))->u.fld[0]).rt_rtvec))->elem[0]);
169
170 add_reg_note (user, REG_CC_SETTER, insn);
171 add_reg_note (insn, REG_CC_USER, user);
172}
173
174/* Insns which have delay slots that have not yet been filled. */
175
176static struct obstack unfilled_slots_obstack;
177static rtx *unfilled_firstobj;
178
179/* Define macros to refer to the first and last slot containing unfilled
180 insns. These are used because the list may move and its address
181 should be recomputed at each use. */
182
183#define unfilled_slots_base((rtx_insn **) ((void *) (&unfilled_slots_obstack)->object_base
))
\
184 ((rtx_insn **) obstack_base (&unfilled_slots_obstack)((void *) (&unfilled_slots_obstack)->object_base))
185
186#define unfilled_slots_next((rtx_insn **) ((void *) (&unfilled_slots_obstack)->next_free
))
\
187 ((rtx_insn **) obstack_next_free (&unfilled_slots_obstack)((void *) (&unfilled_slots_obstack)->next_free))
188
189/* Points to the label before the end of the function, or before a
190 return insn. */
191static rtx_code_label *function_return_label;
192/* Likewise for a simple_return. */
193static rtx_code_label *function_simple_return_label;
194
195/* Mapping between INSN_UID's and position in the code since INSN_UID's do
196 not always monotonically increase. */
197static int *uid_to_ruid;
198
199/* Highest valid index in `uid_to_ruid'. */
200static int max_uid;
201
202static int stop_search_p (rtx_insn *, int);
203static int resource_conflicts_p (struct resources *, struct resources *);
204static int insn_references_resource_p (rtx, struct resources *, bool);
205static int insn_sets_resource_p (rtx, struct resources *, bool);
206static rtx_code_label *find_end_label (rtx);
207static rtx_insn *emit_delay_sequence (rtx_insn *, const vec<rtx_insn *> &,
208 int);
209static void add_to_delay_list (rtx_insn *, vec<rtx_insn *> *);
210static rtx_insn *delete_from_delay_slot (rtx_insn *);
211static void delete_scheduled_jump (rtx_insn *);
212static void note_delay_statistics (int, int);
213static int get_jump_flags (const rtx_insn *, rtx);
214static int mostly_true_jump (rtx);
215static rtx get_branch_condition (const rtx_insn *, rtx);
216static int condition_dominates_p (rtx, const rtx_insn *);
217static int redirect_with_delay_slots_safe_p (rtx_insn *, rtx, rtx);
218static int redirect_with_delay_list_safe_p (rtx_insn *, rtx,
219 const vec<rtx_insn *> &);
220static int check_annul_list_true_false (int, const vec<rtx_insn *> &);
221static void steal_delay_list_from_target (rtx_insn *, rtx, rtx_sequence *,
222 vec<rtx_insn *> *,
223 struct resources *,
224 struct resources *,
225 struct resources *,
226 int, int *, int *,
227 rtx *);
228static void steal_delay_list_from_fallthrough (rtx_insn *, rtx, rtx_sequence *,
229 vec<rtx_insn *> *,
230 struct resources *,
231 struct resources *,
232 struct resources *,
233 int, int *, int *);
234static void try_merge_delay_insns (rtx_insn *, rtx_insn *);
235static rtx_insn *redundant_insn (rtx, rtx_insn *, const vec<rtx_insn *> &);
236static int own_thread_p (rtx, rtx, int);
237static void update_block (rtx_insn *, rtx_insn *);
238static int reorg_redirect_jump (rtx_jump_insn *, rtx);
239static void update_reg_dead_notes (rtx_insn *, rtx_insn *);
240static void fix_reg_dead_note (rtx_insn *, rtx);
241static void update_reg_unused_notes (rtx_insn *, rtx);
242static void fill_simple_delay_slots (int);
243static void fill_slots_from_thread (rtx_jump_insn *, rtx, rtx, rtx,
244 int, int, int, int,
245 int *, vec<rtx_insn *> *);
246static void fill_eager_delay_slots (void);
247static void relax_delay_slots (rtx_insn *);
248static void make_return_insns (rtx_insn *);
249
250/* A wrapper around next_active_insn which takes care to return ret_rtx
251 unchanged. */
252
253static rtx
254first_active_target_insn (rtx insn)
255{
256 if (ANY_RETURN_P (insn)(((enum rtx_code) (insn)->code) == RETURN || ((enum rtx_code
) (insn)->code) == SIMPLE_RETURN)
)
257 return insn;
258 return next_active_insn (as_a <rtx_insn *> (insn));
259}
260
261/* Return true iff INSN is a simplejump, or any kind of return insn. */
262
263static bool
264simplejump_or_return_p (rtx insn)
265{
266 return (JUMP_P (insn)(((enum rtx_code) (insn)->code) == JUMP_INSN)
267 && (simplejump_p (as_a <rtx_insn *> (insn))
268 || ANY_RETURN_P (PATTERN (insn))(((enum rtx_code) (PATTERN (insn))->code) == RETURN || ((enum
rtx_code) (PATTERN (insn))->code) == SIMPLE_RETURN)
));
269}
270
271/* Return TRUE if this insn should stop the search for insn to fill delay
272 slots. LABELS_P indicates that labels should terminate the search.
273 In all cases, jumps terminate the search. */
274
275static int
276stop_search_p (rtx_insn *insn, int labels_p)
277{
278 if (insn == 0)
279 return 1;
280
281 /* If the insn can throw an exception that is caught within the function,
282 it may effectively perform a jump from the viewpoint of the function.
283 Therefore act like for a jump. */
284 if (can_throw_internal (insn))
285 return 1;
286
287 switch (GET_CODE (insn)((enum rtx_code) (insn)->code))
288 {
289 case NOTE:
290 case CALL_INSN:
291 case DEBUG_INSN:
292 return 0;
293
294 case CODE_LABEL:
295 return labels_p;
296
297 case JUMP_INSN:
298 case BARRIER:
299 return 1;
300
301 case INSN:
302 /* OK unless it contains a delay slot or is an `asm' insn of some type.
303 We don't know anything about these. */
304 return (GET_CODE (PATTERN (insn))((enum rtx_code) (PATTERN (insn))->code) == SEQUENCE
305 || GET_CODE (PATTERN (insn))((enum rtx_code) (PATTERN (insn))->code) == ASM_INPUT
306 || asm_noperands (PATTERN (insn)) >= 0);
307
308 default:
309 gcc_unreachable ()(fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 309, __FUNCTION__))
;
310 }
311}
312
313/* Return TRUE if any resources are marked in both RES1 and RES2 or if either
314 resource set contains a volatile memory reference. Otherwise, return FALSE. */
315
316static int
317resource_conflicts_p (struct resources *res1, struct resources *res2)
318{
319 if ((res1->cc && res2->cc) || (res1->memory && res2->memory)
50
Assuming field 'cc' is 0
51
Assuming field 'memory' is 0
53
Taking false branch
70
Assuming field 'cc' is 0
71
Assuming field 'memory' is 0
73
Taking false branch
88
Assuming field 'cc' is 0
89
Assuming field 'memory' is 0
91
Taking false branch
320 || res1->volatil || res2->volatil
52.1
Field 'volatil' is 0
72.1
Field 'volatil' is 0
90.1
Field 'volatil' is 0
52.1
Field 'volatil' is 0
72.1
Field 'volatil' is 0
90.1
Field 'volatil' is 0
52.1
Field 'volatil' is 0
72.1
Field 'volatil' is 0
90.1
Field 'volatil' is 0
)
52
Assuming field 'volatil' is 0
72
Assuming field 'volatil' is 0
90
Assuming field 'volatil' is 0
321 return 1;
322
323 return hard_reg_set_intersect_p (res1->regs, res2->regs);
54
Calling 'hard_reg_set_intersect_p'
59
Returning from 'hard_reg_set_intersect_p'
60
Returning without writing to 'res2->volatil', which participates in a condition later
61
Returning zero, which participates in a condition later
74
Calling 'hard_reg_set_intersect_p'
79
Returning from 'hard_reg_set_intersect_p'
80
Returning zero, which participates in a condition later
92
Calling 'hard_reg_set_intersect_p'
97
Returning from 'hard_reg_set_intersect_p'
98
Returning zero, which participates in a condition later
324}
325
326/* Return TRUE if any resource marked in RES, a `struct resources', is
327 referenced by INSN. If INCLUDE_DELAYED_EFFECTS is set, return if the called
328 routine is using those resources.
329
330 We compute this by computing all the resources referenced by INSN and
331 seeing if this conflicts with RES. It might be faster to directly check
332 ourselves, and this is the way it used to work, but it means duplicating
333 a large block of complex code. */
334
335static int
336insn_references_resource_p (rtx insn, struct resources *res,
337 bool include_delayed_effects)
338{
339 struct resources insn_res;
340
341 CLEAR_RESOURCE (&insn_res)do { (&insn_res)->memory = (&insn_res)->volatil
= (&insn_res)->cc = 0; CLEAR_HARD_REG_SET ((&insn_res
)->regs); } while (0)
;
48
Loop condition is false. Exiting loop
342 mark_referenced_resources (insn, &insn_res, include_delayed_effects);
343 return resource_conflicts_p (&insn_res, res);
49
Calling 'resource_conflicts_p'
62
Returning from 'resource_conflicts_p'
63
Returning without writing to 'res->volatil', which participates in a condition later
64
Returning zero, which participates in a condition later
344}
345
346/* Return TRUE if INSN modifies resources that are marked in RES.
347 INCLUDE_DELAYED_EFFECTS is set if the actions of that routine should be
348 included. CC0 is only modified if it is explicitly set; see comments
349 in front of mark_set_resources for details. */
350
351static int
352insn_sets_resource_p (rtx insn, struct resources *res,
353 bool include_delayed_effects)
354{
355 struct resources insn_sets;
356
357 CLEAR_RESOURCE (&insn_sets)do { (&insn_sets)->memory = (&insn_sets)->volatil
= (&insn_sets)->cc = 0; CLEAR_HARD_REG_SET ((&insn_sets
)->regs); } while (0)
;
67
Loop condition is false. Exiting loop
85
Loop condition is false. Exiting loop
358 mark_set_resources (insn, &insn_sets, 0,
359 (include_delayed_effects
67.1
'include_delayed_effects' is false
85.1
'include_delayed_effects' is false
67.1
'include_delayed_effects' is false
85.1
'include_delayed_effects' is false
67.1
'include_delayed_effects' is false
85.1
'include_delayed_effects' is false
68
'?' condition is false
86
'?' condition is false
360 ? MARK_SRC_DEST_CALL 361 : MARK_SRC_DEST)); 362 return resource_conflicts_p (&insn_sets, res);
69
Calling 'resource_conflicts_p'
81
Returning from 'resource_conflicts_p'
82
Returning zero, which participates in a condition later
87
Calling 'resource_conflicts_p'
99
Returning from 'resource_conflicts_p'
100
Returning zero, which participates in a condition later
363} 364
365/* Find a label at the end of the function or before a RETURN. If there 366 is none, try to make one. If that fails, returns 0. 367 368 The property of such a label is that it is placed just before the 369 epilogue or a bare RETURN insn, so that another bare RETURN can be 370 turned into a jump to the label unconditionally. In particular, the 371 label cannot be placed before a RETURN insn with a filled delay slot. 372 373 ??? There may be a problem with the current implementation. Suppose 374 we start with a bare RETURN insn and call find_end_label. It may set 375 function_return_label just before the RETURN. Suppose the machinery 376 is able to fill the delay slot of the RETURN insn afterwards. Then 377 function_return_label is no longer valid according to the property 378 described above and find_end_label will still return it unmodified. 379 Note that this is probably mitigated by the following observation: 380 once function_return_label is made, it is very likely the target of 381 a jump, so filling the delay slot of the RETURN will be much more 382 difficult. 383 KIND is either simple_return_rtx or ret_rtx, indicating which type of 384 return we're looking for. */ 385 386static rtx_code_label * 387find_end_label (rtx kind) 388{ 389 rtx_insn *insn; 390 rtx_code_label **plabel; 391 392 if (kind == ret_rtx) 393 plabel = &function_return_label; 394 else 395 { 396 gcc_assert (kind == simple_return_rtx)((void)(!(kind == simple_return_rtx) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 396, __FUNCTION__), 0 : 0))
; 397 plabel = &function_simple_return_label; 398 } 399 400 /* If we found one previously, return it. */ 401 if (*plabel) 402 return *plabel; 403 404 /* Otherwise, see if there is a label at the end of the function. If there 405 is, it must be that RETURN insns aren't needed, so that is our return 406 label and we don't have to do anything else. */ 407 408 insn = get_last_insn (); 409 while (NOTE_P (insn)(((enum rtx_code) (insn)->code) == NOTE) 410 || (NONJUMP_INSN_P (insn)(((enum rtx_code) (insn)->code) == INSN) 411 && (GET_CODE (PATTERN (insn))((enum rtx_code) (PATTERN (insn))->code) == USE 412 || GET_CODE (PATTERN (insn))((enum rtx_code) (PATTERN (insn))->code) == CLOBBER))) 413 insn = PREV_INSN (insn); 414 415 /* When a target threads its epilogue we might already have a 416 suitable return insn. If so put a label before it for the 417 function_return_label. */ 418 if (BARRIER_P (insn)(((enum rtx_code) (insn)->code) == BARRIER) 419 && JUMP_P (PREV_INSN (insn))(((enum rtx_code) (PREV_INSN (insn))->code) == JUMP_INSN) 420 && PATTERN (PREV_INSN (insn)) == kind) 421 { 422 rtx_insn *temp = PREV_INSN (PREV_INSN (insn)); 423 rtx_code_label *label = gen_label_rtx (); 424 LABEL_NUSES (label)(((label)->u.fld[4]).rt_int) = 0; 425 426 /* Put the label before any USE insns that may precede the RETURN 427 insn. */ 428 while (GET_CODE (temp)((enum rtx_code) (temp)->code) == USE) 429 temp = PREV_INSN (temp); 430 431 emit_label_after (label, temp); 432 *plabel = label; 433 } 434 435 else if (LABEL_P (insn)(((enum rtx_code) (insn)->code) == CODE_LABEL)) 436 *plabel = as_a <rtx_code_label *> (insn); 437 else 438 { 439 rtx_code_label *label = gen_label_rtx (); 440 LABEL_NUSES (label)(((label)->u.fld[4]).rt_int) = 0; 441 /* If the basic block reorder pass moves the return insn to 442 some other place try to locate it again and put our 443 function_return_label there. */ 444 while (insn && ! (JUMP_P (insn)(((enum rtx_code) (insn)->code) == JUMP_INSN) && (PATTERN (insn) == kind))) 445 insn = PREV_INSN (insn); 446 if (insn) 447 { 448 insn = PREV_INSN (insn); 449 450 /* Put the label before any USE insns that may precede the 451 RETURN insn. */ 452 while (GET_CODE (insn)((enum rtx_code) (insn)->code) == USE) 453 insn = PREV_INSN (insn); 454 455 emit_label_after (label, insn); 456 } 457 else 458 { 459 if (targetm.have_epilogue () && ! targetm.have_return ()) 460 /* The RETURN insn has its delay slot filled so we cannot 461 emit the label just before it. Since we already have 462 an epilogue and cannot emit a new RETURN, we cannot 463 emit the label at all. */ 464 return NULLnullptr; 465 466 /* Otherwise, make a new label and emit a RETURN and BARRIER, 467 if needed. */ 468 emit_label (label); 469 if (targetm.have_return ()) 470 { 471 /* The return we make may have delay slots too. */ 472 rtx_insn *pat = targetm.gen_return (); 473 rtx_insn *insn = emit_jump_insn (pat); 474 set_return_jump_label (insn); 475 emit_barrier (); 476 if (num_delay_slots (insn) > 0) 477 obstack_ptr_grow (&unfilled_slots_obstack, insn)__extension__ ({ struct obstack *__o = (&unfilled_slots_obstack
); if (__extension__ ({ struct obstack const *__o1 = (__o); (
size_t) (__o1->chunk_limit - __o1->next_free); }) < sizeof
(void *)) _obstack_newchunk (__o, sizeof (void *)); __extension__
({ struct obstack *__o1 = (__o); void *__p1 = __o1->next_free
; *(const void **) __p1 = (insn); __o1->next_free += sizeof
(const void *); (void) 0; }); })
; 478 } 479 } 480 *plabel = label; 481 } 482 483 /* Show one additional use for this label so it won't go away until 484 we are done. */ 485 ++LABEL_NUSES (*plabel)(((*plabel)->u.fld[4]).rt_int); 486 487 return *plabel; 488} 489
490/* Put INSN and LIST together in a SEQUENCE rtx of LENGTH, and replace 491 the pattern of INSN with the SEQUENCE. 492 493 Returns the insn containing the SEQUENCE that replaces INSN. */ 494 495static rtx_insn * 496emit_delay_sequence (rtx_insn *insn, const vec<rtx_insn *> &list, int length) 497{ 498 /* Allocate the rtvec to hold the insns and the SEQUENCE. */ 499 rtvec seqv = rtvec_alloc (length + 1); 500 rtx seq = gen_rtx_SEQUENCE (VOIDmode, seqv)gen_rtx_fmt_E_stat ((SEQUENCE), ((((void) 0, E_VOIDmode))), (
(seqv)) )
; 501 rtx_insn *seq_insn = make_insn_raw (seq); 502 503 /* If DELAY_INSN has a location, use it for SEQ_INSN. If DELAY_INSN does 504 not have a location, but one of the delayed insns does, we pick up a 505 location from there later. */ 506 INSN_LOCATION (seq_insn) = INSN_LOCATION (insn); 507 508 /* Unlink INSN from the insn chain, so that we can put it into 509 the SEQUENCE. Remember where we want to emit SEQUENCE in AFTER. */ 510 rtx_insn *after = PREV_INSN (insn); 511 remove_insn (insn); 512 SET_NEXT_INSN (insn) = SET_PREV_INSN (insn) = NULLnullptr; 513 514 /* Build our SEQUENCE and rebuild the insn chain. */ 515 start_sequence (); 516 XVECEXP (seq, 0, 0)(((((seq)->u.fld[0]).rt_rtvec))->elem[0]) = emit_insn (insn); 517 518 unsigned int delay_insns = list.length (); 519 gcc_assert (delay_insns == (unsigned int) length)((void)(!(delay_insns == (unsigned int) length) ? fancy_abort
("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 519, __FUNCTION__), 0 : 0))
; 520 for (unsigned int i = 0; i < delay_insns; i++) 521 { 522 rtx_insn *tem = list[i]; 523 rtx note, next; 524 525 /* Show that this copy of the insn isn't deleted. */ 526 tem->set_undeleted (); 527 528 /* Unlink insn from its original place, and re-emit it into 529 the sequence. */ 530 SET_NEXT_INSN (tem) = SET_PREV_INSN (tem) = NULLnullptr; 531 XVECEXP (seq, 0, i + 1)(((((seq)->u.fld[0]).rt_rtvec))->elem[i + 1]) = emit_insn (tem); 532 533 /* SPARC assembler, for instance, emit warning when debug info is output 534 into the delay slot. */ 535 if (INSN_LOCATION (tem) && !INSN_LOCATION (seq_insn)) 536 INSN_LOCATION (seq_insn) = INSN_LOCATION (tem); 537 INSN_LOCATION (tem) = 0; 538 539 for (note = REG_NOTES (tem)(((tem)->u.fld[6]).rt_rtx); note; note = next) 540 { 541 next = XEXP (note, 1)(((note)->u.fld[1]).rt_rtx); 542 switch (REG_NOTE_KIND (note)((enum reg_note) ((machine_mode) (note)->mode))) 543 { 544 case REG_DEAD: 545 /* Remove any REG_DEAD notes because we can't rely on them now 546 that the insn has been moved. */ 547 remove_note (tem, note); 548 break; 549 550 case REG_LABEL_OPERAND: 551 case REG_LABEL_TARGET: 552 /* Keep the label reference count up to date. */ 553 if (LABEL_P (XEXP (note, 0))(((enum rtx_code) ((((note)->u.fld[0]).rt_rtx))->code) ==
CODE_LABEL)
) 554 LABEL_NUSES (XEXP (note, 0))((((((note)->u.fld[0]).rt_rtx))->u.fld[4]).rt_int) ++; 555 break; 556 557 default: 558 break; 559 } 560 } 561 } 562 end_sequence (); 563 564 /* Splice our SEQUENCE into the insn stream where INSN used to be. */ 565 add_insn_after (seq_insn, after, NULLnullptr); 566 567 return seq_insn; 568} 569 570/* Add INSN to DELAY_LIST and return the head of the new list. The list must 571 be in the order in which the insns are to be executed. */ 572 573static void 574add_to_delay_list (rtx_insn *insn, vec<rtx_insn *> *delay_list) 575{ 576 /* If INSN has its block number recorded, clear it since we may 577 be moving the insn to a new block. */ 578 clear_hashed_info_for_insn (insn); 579 580 delay_list->safe_push (insn); 581} 582
583/* Delete INSN from the delay slot of the insn that it is in, which may 584 produce an insn with no delay slots. Return the new insn. */ 585 586static rtx_insn * 587delete_from_delay_slot (rtx_insn *insn) 588{ 589 rtx_insn *trial, *seq_insn, *prev; 590 rtx_sequence *seq; 591 int i; 592 int had_barrier = 0; 593 594 /* We first must find the insn containing the SEQUENCE with INSN in its 595 delay slot. Do this by finding an insn, TRIAL, where 596 PREV_INSN (NEXT_INSN (TRIAL)) != TRIAL. */ 597 598 for (trial = insn; 599 PREV_INSN (NEXT_INSN (trial)) == trial; 600 trial = NEXT_INSN (trial)) 601 ; 602 603 seq_insn = PREV_INSN (NEXT_INSN (trial)); 604 seq = as_a <rtx_sequence *> (PATTERN (seq_insn)); 605 606 if (NEXT_INSN (seq_insn) && BARRIER_P (NEXT_INSN (seq_insn))(((enum rtx_code) (NEXT_INSN (seq_insn))->code) == BARRIER
)
) 607 had_barrier = 1; 608 609 /* Create a delay list consisting of all the insns other than the one 610 we are deleting (unless we were the only one). */ 611 auto_vec<rtx_insn *, 5> delay_list; 612 if (seq->len () > 2) 613 for (i = 1; i < seq->len (); i++) 614 if (seq->insn (i) != insn) 615 add_to_delay_list (seq->insn (i), &delay_list); 616 617 /* Delete the old SEQUENCE, re-emit the insn that used to have the delay 618 list, and rebuild the delay list if non-empty. */ 619 prev = PREV_INSN (seq_insn); 620 trial = seq->insn (0); 621 delete_related_insns (seq_insn); 622 add_insn_after (trial, prev, NULLnullptr); 623 624 /* If there was a barrier after the old SEQUENCE, remit it. */ 625 if (had_barrier) 626 emit_barrier_after (trial); 627 628 /* If there are any delay insns, remit them. Otherwise clear the 629 annul flag. */ 630 if (!delay_list.is_empty ()) 631 trial = emit_delay_sequence (trial, delay_list, XVECLEN (seq, 0)(((((seq)->u.fld[0]).rt_rtvec))->num_elem) - 2); 632 else if (JUMP_P (trial)(((enum rtx_code) (trial)->code) == JUMP_INSN)) 633 INSN_ANNULLED_BRANCH_P (trial)(__extension__ ({ __typeof ((trial)) const _rtx = ((trial)); if
(((enum rtx_code) (_rtx)->code) != JUMP_INSN) rtl_check_failed_flag
("INSN_ANNULLED_BRANCH_P", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 633, __FUNCTION__); _rtx; })->unchanging)
= 0; 634 635 INSN_FROM_TARGET_P (insn)(__extension__ ({ __typeof ((insn)) const _rtx = ((insn)); if
(((enum rtx_code) (_rtx)->code) != INSN && ((enum
rtx_code) (_rtx)->code) != JUMP_INSN && ((enum rtx_code
) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag ("INSN_FROM_TARGET_P"
, _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 635, __FUNCTION__); _rtx; })->in_struct)
= 0; 636 637 /* Show we need to fill this insn again. */ 638 obstack_ptr_grow (&unfilled_slots_obstack, trial)__extension__ ({ struct obstack *__o = (&unfilled_slots_obstack
); if (__extension__ ({ struct obstack const *__o1 = (__o); (
size_t) (__o1->chunk_limit - __o1->next_free); }) < sizeof
(void *)) _obstack_newchunk (__o, sizeof (void *)); __extension__
({ struct obstack *__o1 = (__o); void *__p1 = __o1->next_free
; *(const void **) __p1 = (trial); __o1->next_free += sizeof
(const void *); (void) 0; }); })
; 639 640 return trial; 641} 642
643/* Delete INSN, a JUMP_INSN. If it is a conditional jump, we must track down 644 the insn that sets CC0 for it and delete it too. */ 645 646static void 647delete_scheduled_jump (rtx_insn *insn) 648{ 649 /* Delete the insn that sets cc0 for us. On machines without cc0, we could 650 delete the insn that sets the condition code, but it is hard to find it. 651 Since this case is rare anyway, don't bother trying; there would likely 652 be other insns that became dead anyway, which we wouldn't know to 653 delete. */ 654 655 if (HAVE_cc00 && reg_mentioned_p (cc0_rtx, insn)) 656 { 657 rtx note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX(rtx) 0); 658 659 /* If a reg-note was found, it points to an insn to set CC0. This 660 insn is in the delay list of some other insn. So delete it from 661 the delay list it was in. */ 662 if (note) 663 { 664 if (! FIND_REG_INC_NOTE (XEXP (note, 0), NULL_RTX)0 665 && sets_cc0_p (PATTERN (XEXP (note, 0)(((note)->u.fld[0]).rt_rtx))) == 1) 666 delete_from_delay_slot (as_a <rtx_insn *> (XEXP (note, 0)(((note)->u.fld[0]).rt_rtx))); 667 } 668 else 669 { 670 /* The insn setting CC0 is our previous insn, but it may be in 671 a delay slot. It will be the last insn in the delay slot, if 672 it is. */ 673 rtx_insn *trial = previous_insn (insn); 674 if (NOTE_P (trial)(((enum rtx_code) (trial)->code) == NOTE)) 675 trial = prev_nonnote_insn (trial); 676 if (sets_cc0_p (PATTERN (trial)) != 1 677 || FIND_REG_INC_NOTE (trial, NULL_RTX)0) 678 return; 679 if (PREV_INSN (NEXT_INSN (trial)) == trial) 680 delete_related_insns (trial); 681 else 682 delete_from_delay_slot (trial); 683 } 684 } 685 686 delete_related_insns (insn); 687} 688
689/* Counters for delay-slot filling. */ 690 691#define NUM_REORG_FUNCTIONS2 2 692#define MAX_DELAY_HISTOGRAM3 3 693#define MAX_REORG_PASSES2 2 694 695static int num_insns_needing_delays[NUM_REORG_FUNCTIONS2][MAX_REORG_PASSES2]; 696 697static int num_filled_delays[NUM_REORG_FUNCTIONS2][MAX_DELAY_HISTOGRAM3+1][MAX_REORG_PASSES2]; 698 699static int reorg_pass_number; 700 701static void 702note_delay_statistics (int slots_filled, int index) 703{ 704 num_insns_needing_delays[index][reorg_pass_number]++; 705 if (slots_filled > MAX_DELAY_HISTOGRAM3) 706 slots_filled = MAX_DELAY_HISTOGRAM3; 707 num_filled_delays[index][slots_filled][reorg_pass_number]++; 708} 709
710/* Optimize the following cases: 711 712 1. When a conditional branch skips over only one instruction, 713 use an annulling branch and put that insn in the delay slot. 714 Use either a branch that annuls when the condition if true or 715 invert the test with a branch that annuls when the condition is 716 false. This saves insns, since otherwise we must copy an insn 717 from the L1 target. 718 719 (orig) (skip) (otherwise) 720 Bcc.n L1 Bcc',a L1 Bcc,a L1' 721 insn insn insn2 722 L1: L1: L1: 723 insn2 insn2 insn2 724 insn3 insn3 L1': 725 insn3 726 727 2. When a conditional branch skips over only one instruction, 728 and after that, it unconditionally branches somewhere else, 729 perform the similar optimization. This saves executing the 730 second branch in the case where the inverted condition is true. 731 732 Bcc.n L1 Bcc',a L2 733 insn insn 734 L1: L1: 735 Bra L2 Bra L2 736 737 INSN is a JUMP_INSN. 738 739 This should be expanded to skip over N insns, where N is the number 740 of delay slots required. */ 741 742static void 743optimize_skip (rtx_jump_insn *insn, vec<rtx_insn *> *delay_list) 744{ 745 rtx_insn *trial = next_nonnote_insn (insn); 746 rtx_insn *next_trial = next_active_insn (trial); 747 int flags; 748 749 flags = get_jump_flags (insn, JUMP_LABEL (insn)(((insn)->u.fld[7]).rt_rtx)); 750 751 if (trial == 0 752 || !NONJUMP_INSN_P (trial)(((enum rtx_code) (trial)->code) == INSN) 753 || GET_CODE (PATTERN (trial))((enum rtx_code) (PATTERN (trial))->code) == SEQUENCE 754 || recog_memoized (trial) < 0 755 || (! eligible_for_annul_false (insn, 0, trial, flags) 756 && ! eligible_for_annul_true (insn, 0, trial, flags)) 757 || RTX_FRAME_RELATED_P (trial)(__extension__ ({ __typeof ((trial)) const _rtx = ((trial)); if
(((enum rtx_code) (_rtx)->code) != DEBUG_INSN && (
(enum rtx_code) (_rtx)->code) != INSN && ((enum rtx_code
) (_rtx)->code) != CALL_INSN && ((enum rtx_code) (
_rtx)->code) != JUMP_INSN && ((enum rtx_code) (_rtx
)->code) != BARRIER && ((enum rtx_code) (_rtx)->
code) != SET) rtl_check_failed_flag ("RTX_FRAME_RELATED_P",_rtx
, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 757, __FUNCTION__); _rtx; })->frame_related)
758 || can_throw_internal (trial)) 759 return; 760 761 /* There are two cases where we are just executing one insn (we assume 762 here that a branch requires only one insn; this should be generalized 763 at some point): Where the branch goes around a single insn or where 764 we have one insn followed by a branch to the same label we branch to. 765 In both of these cases, inverting the jump and annulling the delay 766 slot give the same effect in fewer insns. */ 767 if (next_trial == next_active_insn (JUMP_LABEL_AS_INSN (insn)) 768 || (next_trial != 0 769 && simplejump_or_return_p (next_trial) 770 && JUMP_LABEL (insn)(((insn)->u.fld[7]).rt_rtx) == JUMP_LABEL (next_trial)(((next_trial)->u.fld[7]).rt_rtx))) 771 { 772 if (eligible_for_annul_false (insn, 0, trial, flags)) 773 { 774 if (invert_jump (insn, JUMP_LABEL (insn)(((insn)->u.fld[7]).rt_rtx), 1)) 775 INSN_FROM_TARGET_P (trial)(__extension__ ({ __typeof ((trial)) const _rtx = ((trial)); if
(((enum rtx_code) (_rtx)->code) != INSN && ((enum
rtx_code) (_rtx)->code) != JUMP_INSN && ((enum rtx_code
) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag ("INSN_FROM_TARGET_P"
, _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 775, __FUNCTION__); _rtx; })->in_struct)
= 1; 776 else if (! eligible_for_annul_true (insn, 0, trial, flags)) 777 return; 778 } 779 780 add_to_delay_list (trial, delay_list); 781 next_trial = next_active_insn (trial); 782 update_block (trial, trial); 783 delete_related_insns (trial); 784 785 /* Also, if we are targeting an unconditional 786 branch, thread our jump to the target of that branch. Don't 787 change this into a RETURN here, because it may not accept what 788 we have in the delay slot. We'll fix this up later. */ 789 if (next_trial && simplejump_or_return_p (next_trial)) 790 { 791 rtx target_label = JUMP_LABEL (next_trial)(((next_trial)->u.fld[7]).rt_rtx); 792 if (ANY_RETURN_P (target_label)(((enum rtx_code) (target_label)->code) == RETURN || ((enum
rtx_code) (target_label)->code) == SIMPLE_RETURN)
) 793 target_label = find_end_label (target_label); 794 795 if (target_label) 796 { 797 /* Recompute the flags based on TARGET_LABEL since threading 798 the jump to TARGET_LABEL may change the direction of the 799 jump (which may change the circumstances in which the 800 delay slot is nullified). */ 801 flags = get_jump_flags (insn, target_label); 802 if (eligible_for_annul_true (insn, 0, trial, flags)) 803 reorg_redirect_jump (insn, target_label); 804 } 805 } 806 807 INSN_ANNULLED_BRANCH_P (insn)(__extension__ ({ __typeof ((insn)) const _rtx = ((insn)); if
(((enum rtx_code) (_rtx)->code) != JUMP_INSN) rtl_check_failed_flag
("INSN_ANNULLED_BRANCH_P", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 807, __FUNCTION__); _rtx; })->unchanging)
= 1; 808 } 809} 810
811/* Encode and return branch direction and prediction information for 812 INSN assuming it will jump to LABEL. 813 814 Non conditional branches return no direction information and 815 are predicted as very likely taken. */ 816 817static int 818get_jump_flags (const rtx_insn *insn, rtx label) 819{ 820 int flags; 821 822 /* get_jump_flags can be passed any insn with delay slots, these may 823 be INSNs, CALL_INSNs, or JUMP_INSNs. Only JUMP_INSNs have branch 824 direction information, and only if they are conditional jumps. 825 826 If LABEL is a return, then there is no way to determine the branch 827 direction. */ 828 if (JUMP_P (insn)(((enum rtx_code) (insn)->code) == JUMP_INSN) 829 && (condjump_p (insn) || condjump_in_parallel_p (insn)) 830 && !ANY_RETURN_P (label)(((enum rtx_code) (label)->code) == RETURN || ((enum rtx_code
) (label)->code) == SIMPLE_RETURN)
831 && INSN_UID (insn) <= max_uid 832 && INSN_UID (label) <= max_uid) 833 flags 834 = (uid_to_ruid[INSN_UID (label)] > uid_to_ruid[INSN_UID (insn)]) 835 ? ATTR_FLAG_forward0x1 : ATTR_FLAG_backward0x2; 836 /* No valid direction information. */ 837 else 838 flags = 0; 839 840 return flags; 841} 842 843/* Return truth value of the statement that this branch 844 is mostly taken. If we think that the branch is extremely likely 845 to be taken, we return 2. If the branch is slightly more likely to be 846 taken, return 1. If the branch is slightly less likely to be taken, 847 return 0 and if the branch is highly unlikely to be taken, return -1. */ 848 849static int 850mostly_true_jump (rtx jump_insn) 851{ 852 /* If branch probabilities are available, then use that number since it 853 always gives a correct answer. */ 854 rtx note = find_reg_note (jump_insn, REG_BR_PROB, 0); 855 if (note) 856 { 857 int prob = profile_probability::from_reg_br_prob_note (XINT (note, 0)(((note)->u.fld[0]).rt_int)) 858 .to_reg_br_prob_base (); 859 860 if (prob >= REG_BR_PROB_BASE10000 * 9 / 10) 861 return 2; 862 else if (prob >= REG_BR_PROB_BASE10000 / 2) 863 return 1; 864 else if (prob >= REG_BR_PROB_BASE10000 / 10) 865 return 0; 866 else 867 return -1; 868 } 869 870 /* If there is no note, assume branches are not taken. 871 This should be rare. */ 872 return 0; 873} 874 875/* Return the condition under which INSN will branch to TARGET. If TARGET 876 is zero, return the condition under which INSN will return. If INSN is 877 an unconditional branch, return const_true_rtx. If INSN isn't a simple 878 type of jump, or it doesn't go to TARGET, return 0. */ 879 880static rtx 881get_branch_condition (const rtx_insn *insn, rtx target) 882{ 883 rtx pat = PATTERN (insn); 884 rtx src; 885 886 if (condjump_in_parallel_p (insn)) 887 pat = XVECEXP (pat, 0, 0)(((((pat)->u.fld[0]).rt_rtvec))->elem[0]); 888 889 if (ANY_RETURN_P (pat)(((enum rtx_code) (pat)->code) == RETURN || ((enum rtx_code
) (pat)->code) == SIMPLE_RETURN)
&& pat == target) 890 return const_true_rtx; 891 892 if (GET_CODE (pat)((enum rtx_code) (pat)->code) != SET || SET_DEST (pat)(((pat)->u.fld[0]).rt_rtx) != pc_rtx) 893 return 0; 894 895 src = SET_SRC (pat)(((pat)->u.fld[1]).rt_rtx); 896 if (GET_CODE (src)((enum rtx_code) (src)->code) == LABEL_REF && label_ref_label (src) == target) 897 return const_true_rtx; 898 899 else if (GET_CODE (src)((enum rtx_code) (src)->code) == IF_THEN_ELSE 900 && XEXP (src, 2)(((src)->u.fld[2]).rt_rtx) == pc_rtx 901 && ((GET_CODE (XEXP (src, 1))((enum rtx_code) ((((src)->u.fld[1]).rt_rtx))->code) == LABEL_REF 902 && label_ref_label (XEXP (src, 1)(((src)->u.fld[1]).rt_rtx)) == target) 903 || (ANY_RETURN_P (XEXP (src, 1))(((enum rtx_code) ((((src)->u.fld[1]).rt_rtx))->code) ==
RETURN || ((enum rtx_code) ((((src)->u.fld[1]).rt_rtx))->
code) == SIMPLE_RETURN)
&& XEXP (src, 1)(((src)->u.fld[1]).rt_rtx) == target))) 904 return XEXP (src, 0)(((src)->u.fld[0]).rt_rtx); 905 906 else if (GET_CODE (src)((enum rtx_code) (src)->code) == IF_THEN_ELSE 907 && XEXP (src, 1)(((src)->u.fld[1]).rt_rtx) == pc_rtx 908 && ((GET_CODE (XEXP (src, 2))((enum rtx_code) ((((src)->u.fld[2]).rt_rtx))->code) == LABEL_REF 909 && label_ref_label (XEXP (src, 2)(((src)->u.fld[2]).rt_rtx)) == target) 910 || (ANY_RETURN_P (XEXP (src, 2))(((enum rtx_code) ((((src)->u.fld[2]).rt_rtx))->code) ==
RETURN || ((enum rtx_code) ((((src)->u.fld[2]).rt_rtx))->
code) == SIMPLE_RETURN)
&& XEXP (src, 2)(((src)->u.fld[2]).rt_rtx) == target))) 911 { 912 enum rtx_code rev; 913 rev = reversed_comparison_code (XEXP (src, 0)(((src)->u.fld[0]).rt_rtx), insn); 914 if (rev != UNKNOWN) 915 return gen_rtx_fmt_ee (rev, GET_MODE (XEXP (src, 0)),gen_rtx_fmt_ee_stat ((rev), (((machine_mode) ((((src)->u.fld
[0]).rt_rtx))->mode)), (((((((src)->u.fld[0]).rt_rtx))->
u.fld[0]).rt_rtx)), (((((((src)->u.fld[0]).rt_rtx))->u.
fld[1]).rt_rtx)) )
916 XEXP (XEXP (src, 0), 0),gen_rtx_fmt_ee_stat ((rev), (((machine_mode) ((((src)->u.fld
[0]).rt_rtx))->mode)), (((((((src)->u.fld[0]).rt_rtx))->
u.fld[0]).rt_rtx)), (((((((src)->u.fld[0]).rt_rtx))->u.
fld[1]).rt_rtx)) )
917 XEXP (XEXP (src, 0), 1))gen_rtx_fmt_ee_stat ((rev), (((machine_mode) ((((src)->u.fld
[0]).rt_rtx))->mode)), (((((((src)->u.fld[0]).rt_rtx))->
u.fld[0]).rt_rtx)), (((((((src)->u.fld[0]).rt_rtx))->u.
fld[1]).rt_rtx)) )
; 918 } 919 920 return 0; 921} 922 923/* Return nonzero if CONDITION is more strict than the condition of 924 INSN, i.e., if INSN will always branch if CONDITION is true. */ 925 926static int 927condition_dominates_p (rtx condition, const rtx_insn *insn) 928{ 929 rtx other_condition = get_branch_condition (insn, JUMP_LABEL (insn)(((insn)->u.fld[7]).rt_rtx)); 930 enum rtx_code code = GET_CODE (condition)((enum rtx_code) (condition)->code); 931 enum rtx_code other_code; 932 933 if (rtx_equal_p (condition, other_condition)
28
Assuming the condition is true
934 || other_condition == const_true_rtx) 935 return 1;
29
Returning the value 1, which participates in a condition later
936 937 else if (condition == const_true_rtx || other_condition == 0) 938 return 0; 939 940 other_code = GET_CODE (other_condition)((enum rtx_code) (other_condition)->code); 941 if (GET_RTX_LENGTH (code)(rtx_length[(int) (code)]) != 2 || GET_RTX_LENGTH (other_code)(rtx_length[(int) (other_code)]) != 2 942 || ! rtx_equal_p (XEXP (condition, 0)(((condition)->u.fld[0]).rt_rtx), XEXP (other_condition, 0)(((other_condition)->u.fld[0]).rt_rtx)) 943 || ! rtx_equal_p (XEXP (condition, 1)(((condition)->u.fld[1]).rt_rtx), XEXP (other_condition, 1)(((other_condition)->u.fld[1]).rt_rtx))) 944 return 0; 945 946 return comparison_dominates_p (code, other_code); 947} 948 949/* Return nonzero if redirecting JUMP to NEWLABEL does not invalidate 950 any insns already in the delay slot of JUMP. */ 951 952static int 953redirect_with_delay_slots_safe_p (rtx_insn *jump, rtx newlabel, rtx seq) 954{ 955 int flags, i; 956 rtx_sequence *pat = as_a <rtx_sequence *> (PATTERN (seq)); 957 958 /* Make sure all the delay slots of this jump would still 959 be valid after threading the jump. If they are still 960 valid, then return nonzero. */ 961 962 flags = get_jump_flags (jump, newlabel); 963 for (i = 1; i < pat->len (); i++) 964 if (! ( 965#if ANNUL_IFFALSE_SLOTS0 966 (INSN_ANNULLED_BRANCH_P (jump)(__extension__ ({ __typeof ((jump)) const _rtx = ((jump)); if
(((enum rtx_code) (_rtx)->code) != JUMP_INSN) rtl_check_failed_flag
("INSN_ANNULLED_BRANCH_P", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 966, __FUNCTION__); _rtx; })->unchanging)
967 && INSN_FROM_TARGET_P (pat->insn (i))(__extension__ ({ __typeof ((pat->insn (i))) const _rtx = (
(pat->insn (i))); if (((enum rtx_code) (_rtx)->code) !=
INSN && ((enum rtx_code) (_rtx)->code) != JUMP_INSN
&& ((enum rtx_code) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag
("INSN_FROM_TARGET_P", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 967, __FUNCTION__); _rtx; })->in_struct)
) 968 ? eligible_for_annul_false (jump, i - 1, pat->insn (i), flags) : 969#endif 970#if ANNUL_IFTRUE_SLOTS0 971 (INSN_ANNULLED_BRANCH_P (jump)(__extension__ ({ __typeof ((jump)) const _rtx = ((jump)); if
(((enum rtx_code) (_rtx)->code) != JUMP_INSN) rtl_check_failed_flag
("INSN_ANNULLED_BRANCH_P", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 971, __FUNCTION__); _rtx; })->unchanging)
972 && ! INSN_FROM_TARGET_P (XVECEXP (pat, 0, i))(__extension__ ({ __typeof (((((((pat)->u.fld[0]).rt_rtvec
))->elem[i]))) const _rtx = (((((((pat)->u.fld[0]).rt_rtvec
))->elem[i]))); if (((enum rtx_code) (_rtx)->code) != INSN
&& ((enum rtx_code) (_rtx)->code) != JUMP_INSN &&
((enum rtx_code) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag
("INSN_FROM_TARGET_P", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 972, __FUNCTION__); _rtx; })->in_struct)
) 973 ? eligible_for_annul_true (jump, i - 1, pat->insn (i), flags) : 974#endif 975 eligible_for_delay (jump, i - 1, pat->insn (i), flags))) 976 break; 977 978 return (i == pat->len ()); 979} 980 981/* Return nonzero if redirecting JUMP to NEWLABEL does not invalidate 982 any insns we wish to place in the delay slot of JUMP. */ 983 984static int 985redirect_with_delay_list_safe_p (rtx_insn *jump, rtx newlabel, 986 const vec<rtx_insn *> &delay_list) 987{ 988 /* Make sure all the insns in DELAY_LIST would still be 989 valid after threading the jump. If they are still 990 valid, then return nonzero. */ 991 992 int flags = get_jump_flags (jump, newlabel); 993 unsigned int delay_insns = delay_list.length (); 994 unsigned int i = 0; 995 for (; i < delay_insns; i++) 996 if (! ( 997#if ANNUL_IFFALSE_SLOTS0 998 (INSN_ANNULLED_BRANCH_P (jump)(__extension__ ({ __typeof ((jump)) const _rtx = ((jump)); if
(((enum rtx_code) (_rtx)->code) != JUMP_INSN) rtl_check_failed_flag
("INSN_ANNULLED_BRANCH_P", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 998, __FUNCTION__); _rtx; })->unchanging)
999 && INSN_FROM_TARGET_P (delay_list[i])(__extension__ ({ __typeof ((delay_list[i])) const _rtx = ((delay_list
[i])); if (((enum rtx_code) (_rtx)->code) != INSN &&
((enum rtx_code) (_rtx)->code) != JUMP_INSN && ((
enum rtx_code) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag
("INSN_FROM_TARGET_P", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 999, __FUNCTION__); _rtx; })->in_struct)
) 1000 ? eligible_for_annul_false (jump, i, delay_list[i], flags) : 1001#endif 1002#if ANNUL_IFTRUE_SLOTS0 1003 (INSN_ANNULLED_BRANCH_P (jump)(__extension__ ({ __typeof ((jump)) const _rtx = ((jump)); if
(((enum rtx_code) (_rtx)->code) != JUMP_INSN) rtl_check_failed_flag
("INSN_ANNULLED_BRANCH_P", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 1003, __FUNCTION__); _rtx; })->unchanging)
1004 && ! INSN_FROM_TARGET_P (delay_list[i])(__extension__ ({ __typeof ((delay_list[i])) const _rtx = ((delay_list
[i])); if (((enum rtx_code) (_rtx)->code) != INSN &&
((enum rtx_code) (_rtx)->code) != JUMP_INSN && ((
enum rtx_code) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag
("INSN_FROM_TARGET_P", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 1004, __FUNCTION__); _rtx; })->in_struct)
) 1005 ? eligible_for_annul_true (jump, i, delay_list[i], flags) : 1006#endif 1007 eligible_for_delay (jump, i, delay_list[i], flags))) 1008 break; 1009 1010 return i == delay_insns; 1011} 1012 1013/* DELAY_LIST is a list of insns that have already been placed into delay 1014 slots. See if all of them have the same annulling status as ANNUL_TRUE_P. 1015 If not, return 0; otherwise return 1. */ 1016 1017static int 1018check_annul_list_true_false (int annul_true_p, 1019 const vec<rtx_insn *> &delay_list) 1020{ 1021 rtx_insn *trial; 1022 unsigned int i; 1023 FOR_EACH_VEC_ELT (delay_list, i, trial)for (i = 0; (delay_list).iterate ((i), &(trial)); ++(i)) 1024 if ((annul_true_p && INSN_FROM_TARGET_P (trial)(__extension__ ({ __typeof ((trial)) const _rtx = ((trial)); if
(((enum rtx_code) (_rtx)->code) != INSN && ((enum
rtx_code) (_rtx)->code) != JUMP_INSN && ((enum rtx_code
) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag ("INSN_FROM_TARGET_P"
, _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 1024, __FUNCTION__); _rtx; })->in_struct)
) 1025 || (!annul_true_p && !INSN_FROM_TARGET_P (trial)(__extension__ ({ __typeof ((trial)) const _rtx = ((trial)); if
(((enum rtx_code) (_rtx)->code) != INSN && ((enum
rtx_code) (_rtx)->code) != JUMP_INSN && ((enum rtx_code
) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag ("INSN_FROM_TARGET_P"
, _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 1025, __FUNCTION__); _rtx; })->in_struct)
)) 1026 return 0; 1027 1028 return 1; 1029} 1030
1031/* INSN branches to an insn whose pattern SEQ is a SEQUENCE. Given that 1032 the condition tested by INSN is CONDITION and the resources shown in 1033 OTHER_NEEDED are needed after INSN, see whether INSN can take all the insns 1034 from SEQ's delay list, in addition to whatever insns it may execute 1035 (in DELAY_LIST). SETS and NEEDED are denote resources already set and 1036 needed while searching for delay slot insns. Return the concatenated 1037 delay list if possible, otherwise, return 0. 1038 1039 SLOTS_TO_FILL is the total number of slots required by INSN, and 1040 PSLOTS_FILLED points to the number filled so far (also the number of 1041 insns in DELAY_LIST). It is updated with the number that have been 1042 filled from the SEQUENCE, if any. 1043 1044 PANNUL_P points to a nonzero value if we already know that we need 1045 to annul INSN. If this routine determines that annulling is needed, 1046 it may set that value nonzero. 1047 1048 PNEW_THREAD points to a location that is to receive the place at which 1049 execution should continue. */ 1050 1051static void 1052steal_delay_list_from_target (rtx_insn *insn, rtx condition, rtx_sequence *seq, 1053 vec<rtx_insn *> *delay_list, 1054 struct resources *sets, 1055 struct resources *needed, 1056 struct resources *other_needed, 1057 int slots_to_fill, int *pslots_filled, 1058 int *pannul_p, rtx *pnew_thread) 1059{ 1060 int slots_remaining = slots_to_fill - *pslots_filled; 1061 int total_slots_filled = *pslots_filled; 1062 auto_vec<rtx_insn *, 5> new_delay_list; 1063 int must_annul = *pannul_p; 1064 int used_annul = 0; 1065 int i; 1066 struct resources cc_set; 1067 rtx_insn **redundant; 1068 1069 /* We can't do anything if there are more delay slots in SEQ than we 1070 can handle, or if we don't know that it will be a taken branch. 1071 We know that it will be a taken branch if it is either an unconditional 1072 branch or a conditional branch with a stricter branch condition. 1073 1074 Also, exit if the branch has more than one set, since then it is computing 1075 other results that can't be ignored, e.g. the HPPA mov&branch instruction. 1076 ??? It may be possible to move other sets into INSN in addition to 1077 moving the instructions in the delay slots. 1078 1079 We cannot steal the delay list if one of the instructions in the 1080 current delay_list modifies the condition codes and the jump in the 1081 sequence is a conditional jump. We cannot do this because we cannot 1082 change the direction of the jump because the condition codes 1083 will effect the direction of the jump in the sequence. */ 1084 1085 CLEAR_RESOURCE (&cc_set)do { (&cc_set)->memory = (&cc_set)->volatil = (
&cc_set)->cc = 0; CLEAR_HARD_REG_SET ((&cc_set)->
regs); } while (0)
;
24
Loop condition is false. Exiting loop
1086 1087 rtx_insn *trial; 1088 FOR_EACH_VEC_ELT (*delay_list, i, trial)for (i = 0; (*delay_list).iterate ((i), &(trial)); ++(i))
25
Loop condition is false. Execution continues on line 1095
1089 { 1090 mark_set_resources (trial, &cc_set, 0, MARK_SRC_DEST_CALL); 1091 if (insn_references_resource_p (seq->insn (0), &cc_set, false)) 1092 return; 1093 } 1094 1095 if (XVECLEN (seq, 0)(((((seq)->u.fld[0]).rt_rtvec))->num_elem) - 1 > slots_remaining
26
Assuming the condition is false
42
Taking false branch
1096 || ! condition_dominates_p (condition, seq->insn (0))
27
Calling 'condition_dominates_p'
30
Returning from 'condition_dominates_p'
1097 || ! single_set (seq->insn (0)))
31
Calling 'single_set'
40
Returning from 'single_set'
41
Assuming the condition is false
1098 return; 1099 1100 /* On some targets, branches with delay slots can have a limited 1101 displacement. Give the back end a chance to tell us we can't do 1102 this. */ 1103 if (! targetm.can_follow_jump (insn, seq->insn (0)))
43
Assuming the condition is false
44
Taking false branch
1104 return; 1105 1106 redundant = XALLOCAVEC (rtx_insn *, XVECLEN (seq, 0))((rtx_insn * *) __builtin_alloca(sizeof (rtx_insn *) * ((((((
seq)->u.fld[0]).rt_rtvec))->num_elem))))
; 1107 for (i = 1; i < seq->len (); i++)
45
Assuming the condition is true
46
Loop condition is true. Entering loop body
1108 { 1109 rtx_insn *trial = seq->insn (i); 1110 int flags; 1111 1112 if (insn_references_resource_p (trial, sets, false)
47
Calling 'insn_references_resource_p'
65
Returning from 'insn_references_resource_p'
1113 || insn_sets_resource_p (trial, needed, false)
66
Calling 'insn_sets_resource_p'
83
Returning from 'insn_sets_resource_p'
1114 || insn_sets_resource_p (trial, sets, false)
84
Calling 'insn_sets_resource_p'
101
Returning from 'insn_sets_resource_p'
1115 /* If TRIAL sets CC0, we can't copy it, so we can't steal this 1116 delay list. */ 1117 || (HAVE_cc00 && find_reg_note (trial, REG_CC_USER, NULL_RTX(rtx) 0)) 1118 /* If TRIAL is from the fallthrough code of an annulled branch insn 1119 in SEQ, we cannot use it. */ 1120 || (INSN_ANNULLED_BRANCH_P (seq->insn (0))(__extension__ ({ __typeof ((seq->insn (0))) const _rtx = (
(seq->insn (0))); if (((enum rtx_code) (_rtx)->code) !=
JUMP_INSN) rtl_check_failed_flag ("INSN_ANNULLED_BRANCH_P", _rtx
, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 1120, __FUNCTION__); _rtx; })->unchanging)
102
Assuming field 'code' is equal to JUMP_INSN
103
Taking false branch
104
Assuming field 'unchanging' is 0
1121 && ! INSN_FROM_TARGET_P (trial)(__extension__ ({ __typeof ((trial)) const _rtx = ((trial)); if
(((enum rtx_code) (_rtx)->code) != INSN && ((enum
rtx_code) (_rtx)->code) != JUMP_INSN && ((enum rtx_code
) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag ("INSN_FROM_TARGET_P"
, _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 1121, __FUNCTION__); _rtx; })->in_struct)
)) 1122 return; 1123 1124 /* If this insn was already done (usually in a previous delay slot), 1125 pretend we put it in our delay slot. */ 1126 redundant[i] = redundant_insn (trial, insn, new_delay_list); 1127 if (redundant[i])
105
Assuming the condition is false
106
Taking false branch
1128 continue; 1129 1130 /* We will end up re-vectoring this branch, so compute flags 1131 based on jumping to the new label. */ 1132 flags = get_jump_flags (insn, JUMP_LABEL (seq->insn (0))(((seq->insn (0))->u.fld[7]).rt_rtx)); 1133 1134 if (! must_annul
106.1
'must_annul' is 0
106.1
'must_annul' is 0
106.1
'must_annul' is 0
108
Assuming the condition is true
109
Taking true branch
1135 && ((condition == const_true_rtx
107
Assuming 'condition' is equal to 'const_true_rtx'
1136 || (! insn_sets_resource_p (trial, other_needed, false) 1137 && ! may_trap_or_fault_p (PATTERN (trial))))) 1138 ? eligible_for_delay (insn, total_slots_filled, trial, flags) 1139 : (must_annul || (delay_list->is_empty () && new_delay_list.is_empty ())) 1140 && (must_annul = 1, 1141 check_annul_list_true_false (0, *delay_list) 1142 && check_annul_list_true_false (0, new_delay_list) 1143 && eligible_for_annul_false (insn, total_slots_filled, 1144 trial, flags))) 1145 { 1146 if (must_annul
109.1
'must_annul' is 0
109.1
'must_annul' is 0
109.1
'must_annul' is 0
)
110
Taking false branch
1147 { 1148 /* Frame related instructions cannot go into annulled delay 1149 slots, it messes up the dwarf info. */ 1150 if (RTX_FRAME_RELATED_P (trial)(__extension__ ({ __typeof ((trial)) const _rtx = ((trial)); if
(((enum rtx_code) (_rtx)->code) != DEBUG_INSN && (
(enum rtx_code) (_rtx)->code) != INSN && ((enum rtx_code
) (_rtx)->code) != CALL_INSN && ((enum rtx_code) (
_rtx)->code) != JUMP_INSN && ((enum rtx_code) (_rtx
)->code) != BARRIER && ((enum rtx_code) (_rtx)->
code) != SET) rtl_check_failed_flag ("RTX_FRAME_RELATED_P",_rtx
, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 1150, __FUNCTION__); _rtx; })->frame_related)
) 1151 return; 1152 used_annul = 1; 1153 } 1154 rtx_insn *temp = copy_delay_slot_insn (trial); 1155 INSN_FROM_TARGET_P (temp)(__extension__ ({ __typeof ((temp)) const _rtx = ((temp)); if
(((enum rtx_code) (_rtx)->code) != INSN && ((enum
rtx_code) (_rtx)->code) != JUMP_INSN && ((enum rtx_code
) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag ("INSN_FROM_TARGET_P"
, _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 1155, __FUNCTION__); _rtx; })->in_struct)
= 1;
111
Assuming field 'code' is equal to INSN
1156 add_to_delay_list (temp, &new_delay_list); 1157 total_slots_filled++; 1158 1159 if (--slots_remaining == 0)
112
Assuming the condition is true
113
Taking true branch
1160 break;
114
Execution continues on line 1168
1161 } 1162 else 1163 return; 1164 } 1165 1166 /* Record the effect of the instructions that were redundant and which 1167 we therefore decided not to copy. */ 1168 for (i = 1; i < seq->len (); i++)
115
Loop condition is true. Entering loop body
117
The value 2 is assigned to 'i'
118
Assuming the condition is true
119
Loop condition is true. Entering loop body
1169 if (redundant[i])
116
Taking false branch
120
Branch condition evaluates to a garbage value
1170 { 1171 fix_reg_dead_note (redundant[i], insn); 1172 update_block (seq->insn (i), insn); 1173 } 1174 1175 /* Show the place to which we will be branching. */ 1176 *pnew_thread = first_active_target_insn (JUMP_LABEL (seq->insn (0))(((seq->insn (0))->u.fld[7]).rt_rtx)); 1177 1178 /* Add any new insns to the delay list and update the count of the 1179 number of slots filled. */ 1180 *pslots_filled = total_slots_filled; 1181 if (used_annul) 1182 *pannul_p = 1; 1183 1184 rtx_insn *temp; 1185 FOR_EACH_VEC_ELT (new_delay_list, i, temp)for (i = 0; (new_delay_list).iterate ((i), &(temp)); ++(i
))
1186 add_to_delay_list (temp, delay_list); 1187} 1188
1189/* Similar to steal_delay_list_from_target except that SEQ is on the 1190 fallthrough path of INSN. Here we only do something if the delay insn 1191 of SEQ is an unconditional branch. In that case we steal its delay slot 1192 for INSN since unconditional branches are much easier to fill. */ 1193 1194static void 1195steal_delay_list_from_fallthrough (rtx_insn *insn, rtx condition, 1196 rtx_sequence *seq, 1197 vec<rtx_insn *> *delay_list, 1198 struct resources *sets, 1199 struct resources *needed, 1200 struct resources *other_needed, 1201 int slots_to_fill, int *pslots_filled, 1202 int *pannul_p) 1203{ 1204 int i; 1205 int flags; 1206 int must_annul = *pannul_p; 1207 int used_annul = 0; 1208 1209 flags = get_jump_flags (insn, JUMP_LABEL (insn)(((insn)->u.fld[7]).rt_rtx)); 1210 1211 /* We can't do anything if SEQ's delay insn isn't an 1212 unconditional branch. */ 1213 1214 if (! simplejump_or_return_p (seq->insn (0))) 1215 return; 1216 1217 for (i = 1; i < seq->len (); i++) 1218 { 1219 rtx_insn *trial = seq->insn (i); 1220 rtx_insn *prior_insn; 1221 1222 /* If TRIAL sets CC0, stealing it will move it too far from the use 1223 of CC0. */ 1224 if (insn_references_resource_p (trial, sets, false) 1225 || insn_sets_resource_p (trial, needed, false) 1226 || insn_sets_resource_p (trial, sets, false) 1227 || (HAVE_cc00 && sets_cc0_p (PATTERN (trial)))) 1228 1229 break; 1230 1231 /* If this insn was already done, we don't need it. */ 1232 if ((prior_insn = redundant_insn (trial, insn, *delay_list))) 1233 { 1234 fix_reg_dead_note (prior_insn, insn); 1235 update_block (trial, insn); 1236 delete_from_delay_slot (trial); 1237 continue; 1238 } 1239 1240 if (! must_annul 1241 && ((condition == const_true_rtx 1242 || (! insn_sets_resource_p (trial, other_needed, false) 1243 && ! may_trap_or_fault_p (PATTERN (trial))))) 1244 ? eligible_for_delay (insn, *pslots_filled, trial, flags) 1245 : (must_annul || delay_list->is_empty ()) && (must_annul = 1, 1246 check_annul_list_true_false (1, *delay_list) 1247 && eligible_for_annul_true (insn, *pslots_filled, trial, flags))) 1248 { 1249 if (must_annul) 1250 used_annul = 1; 1251 delete_from_delay_slot (trial); 1252 add_to_delay_list (trial, delay_list); 1253 1254 if (++(*pslots_filled) == slots_to_fill) 1255 break; 1256 } 1257 else 1258 break; 1259 } 1260 1261 if (used_annul) 1262 *pannul_p = 1; 1263} 1264
1265/* Try merging insns starting at THREAD which match exactly the insns in 1266 INSN's delay list. 1267 1268 If all insns were matched and the insn was previously annulling, the 1269 annul bit will be cleared. 1270 1271 For each insn that is merged, if the branch is or will be non-annulling, 1272 we delete the merged insn. */ 1273 1274static void 1275try_merge_delay_insns (rtx_insn *insn, rtx_insn *thread) 1276{ 1277 rtx_insn *trial, *next_trial; 1278 rtx_insn *delay_insn = as_a <rtx_insn *> (XVECEXP (PATTERN (insn), 0, 0)(((((PATTERN (insn))->u.fld[0]).rt_rtvec))->elem[0])); 1279 int annul_p = JUMP_P (delay_insn)(((enum rtx_code) (delay_insn)->code) == JUMP_INSN) && INSN_ANNULLED_BRANCH_P (delay_insn)(__extension__ ({ __typeof ((delay_insn)) const _rtx = ((delay_insn
)); if (((enum rtx_code) (_rtx)->code) != JUMP_INSN) rtl_check_failed_flag
("INSN_ANNULLED_BRANCH_P", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 1279, __FUNCTION__); _rtx; })->unchanging)
; 1280 int slot_number = 1; 1281 int num_slots = XVECLEN (PATTERN (insn), 0)(((((PATTERN (insn))->u.fld[0]).rt_rtvec))->num_elem); 1282 rtx next_to_match = XVECEXP (PATTERN (insn), 0, slot_number)(((((PATTERN (insn))->u.fld[0]).rt_rtvec))->elem[slot_number
])
; 1283 struct resources set, needed, modified; 1284 auto_vec<std::pair<rtx_insn *, bool>, 10> merged_insns; 1285 int flags; 1286 1287 flags = get_jump_flags (delay_insn, JUMP_LABEL (delay_insn)(((delay_insn)->u.fld[7]).rt_rtx)); 1288 1289 CLEAR_RESOURCE (&needed)do { (&needed)->memory = (&needed)->volatil = (
&needed)->cc = 0; CLEAR_HARD_REG_SET ((&needed)->
regs); } while (0)
; 1290 CLEAR_RESOURCE (&set)do { (&set)->memory = (&set)->volatil = (&set
)->cc = 0; CLEAR_HARD_REG_SET ((&set)->regs); } while
(0)
; 1291 1292 /* If this is not an annulling branch, take into account anything needed in 1293 INSN's delay slot. This prevents two increments from being incorrectly 1294 folded into one. If we are annulling, this would be the correct 1295 thing to do. (The alternative, looking at things set in NEXT_TO_MATCH 1296 will essentially disable this optimization. This method is somewhat of 1297 a kludge, but I don't see a better way.) */ 1298 if (! annul_p) 1299 for (int i = 1; i < num_slots; i++) 1300 if (XVECEXP (PATTERN (insn), 0, i)(((((PATTERN (insn))->u.fld[0]).rt_rtvec))->elem[i])) 1301 mark_referenced_resources (XVECEXP (PATTERN (insn), 0, i)(((((PATTERN (insn))->u.fld[0]).rt_rtvec))->elem[i]), &needed, 1302 true); 1303 1304 for (trial = thread; !stop_search_p (trial, 1); trial = next_trial) 1305 { 1306 rtx pat = PATTERN (trial); 1307 rtx oldtrial = trial; 1308 1309 next_trial = next_nonnote_insn (trial); 1310 1311 /* TRIAL must be a CALL_INSN or INSN. Skip USE and CLOBBER. */ 1312 if (NONJUMP_INSN_P (trial)(((enum rtx_code) (trial)->code) == INSN) 1313 && (GET_CODE (pat)((enum rtx_code) (pat)->code) == USE || GET_CODE (pat)((enum rtx_code) (pat)->code) == CLOBBER)) 1314 continue; 1315 1316 if (GET_CODE (next_to_match)((enum rtx_code) (next_to_match)->code) == GET_CODE (trial)((enum rtx_code) (trial)->code) 1317 /* We can't share an insn that sets cc0. */ 1318 && (!HAVE_cc00 || ! sets_cc0_p (pat)) 1319 && ! insn_references_resource_p (trial, &set, true) 1320 && ! insn_sets_resource_p (trial, &set, true) 1321 && ! insn_sets_resource_p (trial, &needed, true) 1322 && (trial = try_split (pat, trial, 0)) != 0 1323 /* Update next_trial, in case try_split succeeded. */ 1324 && (next_trial = next_nonnote_insn (trial)) 1325 /* Likewise THREAD. */ 1326 && (thread = oldtrial == thread ? trial : thread) 1327 && rtx_equal_p (PATTERN (next_to_match), PATTERN (trial)) 1328 /* Have to test this condition if annul condition is different 1329 from (and less restrictive than) non-annulling one. */ 1330 && eligible_for_delay (delay_insn, slot_number - 1, trial, flags)) 1331 { 1332 1333 if (! annul_p) 1334 { 1335 update_block (trial, thread); 1336 if (trial == thread) 1337 thread = next_active_insn (thread); 1338 1339 delete_related_insns (trial); 1340 INSN_FROM_TARGET_P (next_to_match)(__extension__ ({ __typeof ((next_to_match)) const _rtx = ((next_to_match
)); if (((enum rtx_code) (_rtx)->code) != INSN && (
(enum rtx_code) (_rtx)->code) != JUMP_INSN && ((enum
rtx_code) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag
("INSN_FROM_TARGET_P", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 1340, __FUNCTION__); _rtx; })->in_struct)
= 0; 1341 } 1342 else 1343 merged_insns.safe_push (std::pair<rtx_insn *, bool> (trial, false)); 1344 1345 if (++slot_number == num_slots) 1346 break; 1347 1348 next_to_match = XVECEXP (PATTERN (insn), 0, slot_number)(((((PATTERN (insn))->u.fld[0]).rt_rtvec))->elem[slot_number
])
; 1349 } 1350 1351 mark_set_resources (trial, &set, 0, MARK_SRC_DEST_CALL); 1352 mark_referenced_resources (trial, &needed, true); 1353 } 1354 1355 /* See if we stopped on a filled insn. If we did, try to see if its 1356 delay slots match. */ 1357 if (slot_number != num_slots 1358 && trial && NONJUMP_INSN_P (trial)(((enum rtx_code) (trial)->code) == INSN) 1359 && GET_CODE (PATTERN (trial))((enum rtx_code) (PATTERN (trial))->code) == SEQUENCE 1360 && !(JUMP_P (XVECEXP (PATTERN (trial), 0, 0))(((enum rtx_code) ((((((PATTERN (trial))->u.fld[0]).rt_rtvec
))->elem[0]))->code) == JUMP_INSN)
1361 && INSN_ANNULLED_BRANCH_P (XVECEXP (PATTERN (trial), 0, 0))(__extension__ ({ __typeof (((((((PATTERN (trial))->u.fld[
0]).rt_rtvec))->elem[0]))) const _rtx = (((((((PATTERN (trial
))->u.fld[0]).rt_rtvec))->elem[0]))); if (((enum rtx_code
) (_rtx)->code) != JUMP_INSN) rtl_check_failed_flag ("INSN_ANNULLED_BRANCH_P"
, _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 1361, __FUNCTION__); _rtx; })->unchanging)
)) 1362 { 1363 rtx_sequence *pat = as_a <rtx_sequence *> (PATTERN (trial)); 1364 rtx filled_insn = XVECEXP (pat, 0, 0)(((((pat)->u.fld[0]).rt_rtvec))->elem[0]); 1365 1366 /* Account for resources set/needed by the filled insn. */ 1367 mark_set_resources (filled_insn, &set, 0, MARK_SRC_DEST_CALL); 1368 mark_referenced_resources (filled_insn, &needed, true); 1369 1370 for (int i = 1; i < pat->len (); i++) 1371 { 1372 rtx_insn *dtrial = pat->insn (i); 1373 1374 CLEAR_RESOURCE (&modified)do { (&modified)->memory = (&modified)->volatil
= (&modified)->cc = 0; CLEAR_HARD_REG_SET ((&modified
)->regs); } while (0)
; 1375 /* Account for resources set by the insn following NEXT_TO_MATCH 1376 inside INSN's delay list. */ 1377 for (int j = 1; slot_number + j < num_slots; j++) 1378 mark_set_resources (XVECEXP (PATTERN (insn), 0, slot_number + j)(((((PATTERN (insn))->u.fld[0]).rt_rtvec))->elem[slot_number
+ j])
, 1379 &modified, 0, MARK_SRC_DEST_CALL); 1380 /* Account for resources set by the insn before DTRIAL and inside 1381 TRIAL's delay list. */ 1382 for (int j = 1; j < i; j++) 1383 mark_set_resources (XVECEXP (pat, 0, j)(((((pat)->u.fld[0]).rt_rtvec))->elem[j]), 1384 &modified, 0, MARK_SRC_DEST_CALL); 1385 if (! insn_references_resource_p (dtrial, &set, true) 1386 && ! insn_sets_resource_p (dtrial, &set, true) 1387 && ! insn_sets_resource_p (dtrial, &needed, true) 1388 && (!HAVE_cc00 || ! sets_cc0_p (PATTERN (dtrial))) 1389 && rtx_equal_p (PATTERN (next_to_match), PATTERN (dtrial)) 1390 /* Check that DTRIAL and NEXT_TO_MATCH does not reference a 1391 resource modified between them (only dtrial is checked because 1392 next_to_match and dtrial shall to be equal in order to hit 1393 this line) */ 1394 && ! insn_references_resource_p (dtrial, &modified, true) 1395 && eligible_for_delay (delay_insn, slot_number - 1, dtrial, flags)) 1396 { 1397 if (! annul_p) 1398 { 1399 rtx_insn *new_rtx; 1400 1401 update_block (dtrial, thread); 1402 new_rtx = delete_from_delay_slot (dtrial); 1403 if (thread->deleted ()) 1404 thread = new_rtx; 1405 INSN_FROM_TARGET_P (next_to_match)(__extension__ ({ __typeof ((next_to_match)) const _rtx = ((next_to_match
)); if (((enum rtx_code) (_rtx)->code) != INSN && (
(enum rtx_code) (_rtx)->code) != JUMP_INSN && ((enum
rtx_code) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag
("INSN_FROM_TARGET_P", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 1405, __FUNCTION__); _rtx; })->in_struct)
= 0; 1406 } 1407 else 1408 merged_insns.safe_push (std::pair<rtx_insn *, bool> (dtrial, 1409 true)); 1410 1411 if (++slot_number == num_slots) 1412 break; 1413 1414 next_to_match = XVECEXP (PATTERN (insn), 0, slot_number)(((((PATTERN (insn))->u.fld[0]).rt_rtvec))->elem[slot_number
])
; 1415 } 1416 else 1417 { 1418 /* Keep track of the set/referenced resources for the delay 1419 slots of any trial insns we encounter. */ 1420 mark_set_resources (dtrial, &set, 0, MARK_SRC_DEST_CALL); 1421 mark_referenced_resources (dtrial, &needed, true); 1422 } 1423 } 1424 } 1425 1426 /* If all insns in the delay slot have been matched and we were previously 1427 annulling the branch, we need not any more. In that case delete all the 1428 merged insns. Also clear the INSN_FROM_TARGET_P bit of each insn in 1429 the delay list so that we know that it isn't only being used at the 1430 target. */ 1431 if (slot_number == num_slots && annul_p) 1432 { 1433 unsigned int len = merged_insns.length (); 1434 for (unsigned int i = len - 1; i < len; i--) 1435 if (merged_insns[i].second) 1436 { 1437 update_block (merged_insns[i].first, thread); 1438 rtx_insn *new_rtx = delete_from_delay_slot (merged_insns[i].first); 1439 if (thread->deleted ()) 1440 thread = new_rtx; 1441 } 1442 else 1443 { 1444 update_block (merged_insns[i].first, thread); 1445 delete_related_insns (merged_insns[i].first); 1446 } 1447 1448 INSN_ANNULLED_BRANCH_P (delay_insn)(__extension__ ({ __typeof ((delay_insn)) const _rtx = ((delay_insn
)); if (((enum rtx_code) (_rtx)->code) != JUMP_INSN) rtl_check_failed_flag
("INSN_ANNULLED_BRANCH_P", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 1448, __FUNCTION__); _rtx; })->unchanging)
= 0; 1449 1450 for (int i = 0; i < XVECLEN (PATTERN (insn), 0)(((((PATTERN (insn))->u.fld[0]).rt_rtvec))->num_elem); i++) 1451 INSN_FROM_TARGET_P (XVECEXP (PATTERN (insn), 0, i))(__extension__ ({ __typeof (((((((PATTERN (insn))->u.fld[0
]).rt_rtvec))->elem[i]))) const _rtx = (((((((PATTERN (insn
))->u.fld[0]).rt_rtvec))->elem[i]))); if (((enum rtx_code
) (_rtx)->code) != INSN && ((enum rtx_code) (_rtx)
->code) != JUMP_INSN && ((enum rtx_code) (_rtx)->
code) != CALL_INSN) rtl_check_failed_flag ("INSN_FROM_TARGET_P"
, _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 1451, __FUNCTION__); _rtx; })->in_struct)
= 0; 1452 } 1453} 1454
1455/* See if INSN is redundant with an insn in front of TARGET. Often this 1456 is called when INSN is a candidate for a delay slot of TARGET. 1457 DELAY_LIST are insns that will be placed in delay slots of TARGET in front 1458 of INSN. Often INSN will be redundant with an insn in a delay slot of 1459 some previous insn. This happens when we have a series of branches to the 1460 same label; in that case the first insn at the target might want to go 1461 into each of the delay slots. 1462 1463 If we are not careful, this routine can take up a significant fraction 1464 of the total compilation time (4%), but only wins rarely. Hence we 1465 speed this routine up by making two passes. The first pass goes back 1466 until it hits a label and sees if it finds an insn with an identical 1467 pattern. Only in this (relatively rare) event does it check for 1468 data conflicts. 1469 1470 We do not split insns we encounter. This could cause us not to find a 1471 redundant insn, but the cost of splitting seems greater than the possible 1472 gain in rare cases. */ 1473 1474static rtx_insn * 1475redundant_insn (rtx insn, rtx_insn *target, const vec<rtx_insn *> &delay_list) 1476{ 1477 rtx target_main = target; 1478 rtx ipat = PATTERN (insn); 1479 rtx_insn *trial; 1480 rtx pat; 1481 struct resources needed, set; 1482 int i; 1483 unsigned insns_to_search; 1484 1485 /* If INSN has any REG_UNUSED notes, it can't match anything since we 1486 are allowed to not actually assign to such a register. */ 1487 if (find_reg_note (insn, REG_UNUSED, NULL_RTX(rtx) 0) != 0) 1488 return 0; 1489 1490 /* Scan backwards looking for a match. */ 1491 for (trial = PREV_INSN (target), 1492 insns_to_search = param_max_delay_slot_insn_searchglobal_options.x_param_max_delay_slot_insn_search; 1493 trial && insns_to_search > 0; 1494 trial = PREV_INSN (trial)) 1495 { 1496 /* (use (insn))s can come immediately after a barrier if the 1497 label that used to precede them has been deleted as dead. 1498 See delete_related_insns. */ 1499 if (LABEL_P (trial)(((enum rtx_code) (trial)->code) == CODE_LABEL) || BARRIER_P (trial)(((enum rtx_code) (trial)->code) == BARRIER)) 1500 return 0; 1501 1502 if (!INSN_P (trial)(((((enum rtx_code) (trial)->code) == INSN) || (((enum rtx_code
) (trial)->code) == JUMP_INSN) || (((enum rtx_code) (trial
)->code) == CALL_INSN)) || (((enum rtx_code) (trial)->code
) == DEBUG_INSN))
) 1503 continue; 1504 --insns_to_search; 1505 1506 pat = PATTERN (trial); 1507 if (GET_CODE (pat)((enum rtx_code) (pat)->code) == USE || GET_CODE (pat)((enum rtx_code) (pat)->code) == CLOBBER) 1508 continue; 1509 1510 if (GET_CODE (trial)((enum rtx_code) (trial)->code) == DEBUG_INSN) 1511 continue; 1512 1513 if (rtx_sequence *seq = dyn_cast <rtx_sequence *> (pat)) 1514 { 1515 /* Stop for a CALL and its delay slots because it is difficult to 1516 track its resource needs correctly. */ 1517 if (CALL_P (seq->element (0))(((enum rtx_code) (seq->element (0))->code) == CALL_INSN
)
) 1518 return 0; 1519 1520 /* Stop for an INSN or JUMP_INSN with delayed effects and its delay 1521 slots because it is difficult to track its resource needs 1522 correctly. */ 1523 1524 if (INSN_SETS_ARE_DELAYED (seq->insn (0))false) 1525 return 0; 1526 1527 if (INSN_REFERENCES_ARE_DELAYED (seq->insn (0))false) 1528 return 0; 1529 1530 /* See if any of the insns in the delay slot match, updating 1531 resource requirements as we go. */ 1532 for (i = seq->len () - 1; i > 0; i--) 1533 if (GET_CODE (seq->element (i))((enum rtx_code) (seq->element (i))->code) == GET_CODE (insn)((enum rtx_code) (insn)->code) 1534 && rtx_equal_p (PATTERN (seq->element (i)), ipat) 1535 && ! find_reg_note (seq->element (i), REG_UNUSED, NULL_RTX(rtx) 0)) 1536 break; 1537 1538 /* If found a match, exit this loop early. */ 1539 if (i > 0) 1540 break; 1541 } 1542 1543 else if (GET_CODE (trial)((enum rtx_code) (trial)->code) == GET_CODE (insn)((enum rtx_code) (insn)->code) && rtx_equal_p (pat, ipat) 1544 && ! find_reg_note (trial, REG_UNUSED, NULL_RTX(rtx) 0)) 1545 break; 1546 } 1547 1548 /* If we didn't find an insn that matches, return 0. */ 1549 if (trial == 0) 1550 return 0; 1551 1552 /* See what resources this insn sets and needs. If they overlap, or 1553 if this insn references CC0, it can't be redundant. */ 1554 1555 CLEAR_RESOURCE (&needed)do { (&needed)->memory = (&needed)->volatil = (
&needed)->cc = 0; CLEAR_HARD_REG_SET ((&needed)->
regs); } while (0)
; 1556 CLEAR_RESOURCE (&set)do { (&set)->memory = (&set)->volatil = (&set
)->cc = 0; CLEAR_HARD_REG_SET ((&set)->regs); } while
(0)
; 1557 mark_set_resources (insn, &set, 0, MARK_SRC_DEST_CALL); 1558 mark_referenced_resources (insn, &needed, true); 1559 1560 /* If TARGET is a SEQUENCE, get the main insn. */ 1561 if (NONJUMP_INSN_P (target)(((enum rtx_code) (target)->code) == INSN) && GET_CODE (PATTERN (target))((enum rtx_code) (PATTERN (target))->code) == SEQUENCE) 1562 target_main = XVECEXP (PATTERN (target), 0, 0)(((((PATTERN (target))->u.fld[0]).rt_rtvec))->elem[0]); 1563 1564 if (resource_conflicts_p (&needed, &set) 1565 || (HAVE_cc00 && reg_mentioned_p (cc0_rtx, ipat)) 1566 /* The insn requiring the delay may not set anything needed or set by 1567 INSN. */ 1568 || insn_sets_resource_p (target_main, &needed, true) 1569 || insn_sets_resource_p (target_main, &set, true)) 1570 return 0; 1571 1572 /* Insns we pass may not set either NEEDED or SET, so merge them for 1573 simpler tests. */ 1574 needed.memory |= set.memory; 1575 needed.regs |= set.regs; 1576 1577 /* This insn isn't redundant if it conflicts with an insn that either is 1578 or will be in a delay slot of TARGET. */ 1579 1580 unsigned int j; 1581 rtx_insn *temp; 1582 FOR_EACH_VEC_ELT (delay_list, j, temp)for (j = 0; (delay_list).iterate ((j), &(temp)); ++(j)) 1583 if (insn_sets_resource_p (temp, &needed, true)) 1584 return 0; 1585 1586 if (NONJUMP_INSN_P (target)(((enum rtx_code) (target)->code) == INSN) && GET_CODE (PATTERN (target))((enum rtx_code) (PATTERN (target))->code) == SEQUENCE) 1587 for (i = 1; i < XVECLEN (PATTERN (target), 0)(((((PATTERN (target))->u.fld[0]).rt_rtvec))->num_elem); i++) 1588 if (insn_sets_resource_p (XVECEXP (PATTERN (target), 0, i)(((((PATTERN (target))->u.fld[0]).rt_rtvec))->elem[i]), &needed, 1589 true)) 1590 return 0; 1591 1592 /* Scan backwards until we reach a label or an insn that uses something 1593 INSN sets or sets something insn uses or sets. */ 1594 1595 for (trial = PREV_INSN (target), 1596 insns_to_search = param_max_delay_slot_insn_searchglobal_options.x_param_max_delay_slot_insn_search; 1597 trial && !LABEL_P (trial)(((enum rtx_code) (trial)->code) == CODE_LABEL) && insns_to_search > 0; 1598 trial = PREV_INSN (trial)) 1599 { 1600 if (!INSN_P (trial)(((((enum rtx_code) (trial)->code) == INSN) || (((enum rtx_code
) (trial)->code) == JUMP_INSN) || (((enum rtx_code) (trial
)->code) == CALL_INSN)) || (((enum rtx_code) (trial)->code
) == DEBUG_INSN))
) 1601 continue; 1602 --insns_to_search; 1603 1604 pat = PATTERN (trial); 1605 if (GET_CODE (pat)((enum rtx_code) (pat)->code) == USE || GET_CODE (pat)((enum rtx_code) (pat)->code) == CLOBBER) 1606 continue; 1607 1608 if (GET_CODE (trial)((enum rtx_code) (trial)->code) == DEBUG_INSN) 1609 continue; 1610 1611 if (rtx_sequence *seq = dyn_cast <rtx_sequence *> (pat)) 1612 { 1613 bool annul_p = false; 1614 rtx_insn *control = seq->insn (0); 1615 1616 /* If this is a CALL_INSN and its delay slots, it is hard to track 1617 the resource needs properly, so give up. */ 1618 if (CALL_P (control)(((enum rtx_code) (control)->code) == CALL_INSN)) 1619 return 0; 1620 1621 /* If this is an INSN or JUMP_INSN with delayed effects, it 1622 is hard to track the resource needs properly, so give up. */ 1623 1624 if (INSN_SETS_ARE_DELAYED (control)false) 1625 return 0; 1626 1627 if (INSN_REFERENCES_ARE_DELAYED (control)false) 1628 return 0; 1629 1630 if (JUMP_P (control)(((enum rtx_code) (control)->code) == JUMP_INSN)) 1631 annul_p = INSN_ANNULLED_BRANCH_P (control)(__extension__ ({ __typeof ((control)) const _rtx = ((control
)); if (((enum rtx_code) (_rtx)->code) != JUMP_INSN) rtl_check_failed_flag
("INSN_ANNULLED_BRANCH_P", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 1631, __FUNCTION__); _rtx; })->unchanging)
; 1632 1633 /* See if any of the insns in the delay slot match, updating 1634 resource requirements as we go. */ 1635 for (i = seq->len () - 1; i > 0; i--) 1636 { 1637 rtx_insn *candidate = seq->insn (i); 1638 1639 /* If an insn will be annulled if the branch is false, it isn't 1640 considered as a possible duplicate insn. */ 1641 if (rtx_equal_p (PATTERN (candidate), ipat) 1642 && ! (annul_p && INSN_FROM_TARGET_P (candidate)(__extension__ ({ __typeof ((candidate)) const _rtx = ((candidate
)); if (((enum rtx_code) (_rtx)->code) != INSN && (
(enum rtx_code) (_rtx)->code) != JUMP_INSN && ((enum
rtx_code) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag
("INSN_FROM_TARGET_P", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 1642, __FUNCTION__); _rtx; })->in_struct)
)) 1643 { 1644 /* Show that this insn will be used in the sequel. */ 1645 INSN_FROM_TARGET_P (candidate)(__extension__ ({ __typeof ((candidate)) const _rtx = ((candidate
)); if (((enum rtx_code) (_rtx)->code) != INSN && (
(enum rtx_code) (_rtx)->code) != JUMP_INSN && ((enum
rtx_code) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag
("INSN_FROM_TARGET_P", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 1645, __FUNCTION__); _rtx; })->in_struct)
= 0; 1646 return candidate; 1647 } 1648 1649 /* Unless this is an annulled insn from the target of a branch, 1650 we must stop if it sets anything needed or set by INSN. */ 1651 if ((!annul_p || !INSN_FROM_TARGET_P (candidate)(__extension__ ({ __typeof ((candidate)) const _rtx = ((candidate
)); if (((enum rtx_code) (_rtx)->code) != INSN && (
(enum rtx_code) (_rtx)->code) != JUMP_INSN && ((enum
rtx_code) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag
("INSN_FROM_TARGET_P", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 1651, __FUNCTION__); _rtx; })->in_struct)
) 1652 && insn_sets_resource_p (candidate, &needed, true)) 1653 return 0; 1654 } 1655 1656 /* If the insn requiring the delay slot conflicts with INSN, we 1657 must stop. */ 1658 if (insn_sets_resource_p (control, &needed, true)) 1659 return 0; 1660 } 1661 else 1662 { 1663 /* See if TRIAL is the same as INSN. */ 1664 pat = PATTERN (trial); 1665 if (rtx_equal_p (pat, ipat)) 1666 return trial; 1667 1668 /* Can't go any further if TRIAL conflicts with INSN. */ 1669 if (insn_sets_resource_p (trial, &needed, true)) 1670 return 0; 1671 } 1672 } 1673 1674 return 0; 1675} 1676
1677/* Return 1 if THREAD can only be executed in one way. If LABEL is nonzero, 1678 it is the target of the branch insn being scanned. If ALLOW_FALLTHROUGH 1679 is nonzero, we are allowed to fall into this thread; otherwise, we are 1680 not. 1681 1682 If LABEL is used more than one or we pass a label other than LABEL before 1683 finding an active insn, we do not own this thread. */ 1684 1685static int 1686own_thread_p (rtx thread, rtx label, int allow_fallthrough) 1687{ 1688 rtx_insn *active_insn; 1689 rtx_insn *insn; 1690 1691 /* We don't own the function end. */ 1692 if (thread == 0 || ANY_RETURN_P (thread)(((enum rtx_code) (thread)->code) == RETURN || ((enum rtx_code
) (thread)->code) == SIMPLE_RETURN)
) 1693 return 0; 1694 1695 /* We have a non-NULL insn. */ 1696 rtx_insn *thread_insn = as_a <rtx_insn *> (thread); 1697 1698 /* Get the first active insn, or THREAD_INSN, if it is an active insn. */ 1699 active_insn = next_active_insn (PREV_INSN (thread_insn)); 1700 1701 for (insn = thread_insn; insn != active_insn; insn = NEXT_INSN (insn)) 1702 if (LABEL_P (insn)(((enum rtx_code) (insn)->code) == CODE_LABEL) 1703 && (insn != label || LABEL_NUSES (insn)(((insn)->u.fld[4]).rt_int) != 1)) 1704 return 0; 1705 1706 if (allow_fallthrough) 1707 return 1; 1708 1709 /* Ensure that we reach a BARRIER before any insn or label. */ 1710 for (insn = prev_nonnote_insn (thread_insn); 1711 insn == 0 || !BARRIER_P (insn)(((enum rtx_code) (insn)->code) == BARRIER); 1712 insn = prev_nonnote_insn (insn)) 1713 if (insn == 0 1714 || LABEL_P (insn)(((enum rtx_code) (insn)->code) == CODE_LABEL) 1715 || (NONJUMP_INSN_P (insn)(((enum rtx_code) (insn)->code) == INSN) 1716 && GET_CODE (PATTERN (insn))((enum rtx_code) (PATTERN (insn))->code) != USE 1717 && GET_CODE (PATTERN (insn))((enum rtx_code) (PATTERN (insn))->code) != CLOBBER)) 1718 return 0; 1719 1720 return 1; 1721} 1722
1723/* Called when INSN is being moved from a location near the target of a jump. 1724 We leave a marker of the form (use (INSN)) immediately in front of WHERE 1725 for mark_target_live_regs. These markers will be deleted at the end. 1726 1727 We used to try to update the live status of registers if WHERE is at 1728 the start of a basic block, but that can't work since we may remove a 1729 BARRIER in relax_delay_slots. */ 1730 1731static void 1732update_block (rtx_insn *insn, rtx_insn *where) 1733{ 1734 emit_insn_before (gen_rtx_USE (VOIDmode, insn)gen_rtx_fmt_e_stat ((USE), ((((void) 0, E_VOIDmode))), ((insn
)) )
, where); 1735 1736 /* INSN might be making a value live in a block where it didn't use to 1737 be. So recompute liveness information for this block. */ 1738 incr_ticks_for_insn (insn); 1739} 1740 1741/* Similar to REDIRECT_JUMP except that we update the BB_TICKS entry for 1742 the basic block containing the jump. */ 1743 1744static int 1745reorg_redirect_jump (rtx_jump_insn *jump, rtx nlabel) 1746{ 1747 incr_ticks_for_insn (jump); 1748 return redirect_jump (jump, nlabel, 1); 1749} 1750 1751/* Called when INSN is being moved forward into a delay slot of DELAYED_INSN. 1752 We check every instruction between INSN and DELAYED_INSN for REG_DEAD notes 1753 that reference values used in INSN. If we find one, then we move the 1754 REG_DEAD note to INSN. 1755 1756 This is needed to handle the case where a later insn (after INSN) has a 1757 REG_DEAD note for a register used by INSN, and this later insn subsequently 1758 gets moved before a CODE_LABEL because it is a redundant insn. In this 1759 case, mark_target_live_regs may be confused into thinking the register 1760 is dead because it sees a REG_DEAD note immediately before a CODE_LABEL. */ 1761 1762static void 1763update_reg_dead_notes (rtx_insn *insn, rtx_insn *delayed_insn) 1764{ 1765 rtx link, next; 1766 rtx_insn *p; 1767 1768 for (p = next_nonnote_insn (insn); p != delayed_insn; 1769 p = next_nonnote_insn (p)) 1770 for (link = REG_NOTES (p)(((p)->u.fld[6]).rt_rtx); link; link = next) 1771 { 1772 next = XEXP (link, 1)(((link)->u.fld[1]).rt_rtx); 1773 1774 if (REG_NOTE_KIND (link)((enum reg_note) ((machine_mode) (link)->mode)) != REG_DEAD 1775 || !REG_P (XEXP (link, 0))(((enum rtx_code) ((((link)->u.fld[0]).rt_rtx))->code) ==
REG)
) 1776 continue; 1777 1778 if (reg_referenced_p (XEXP (link, 0)(((link)->u.fld[0]).rt_rtx), PATTERN (insn))) 1779 { 1780 /* Move the REG_DEAD note from P to INSN. */ 1781 remove_note (p, link); 1782 XEXP (link, 1)(((link)->u.fld[1]).rt_rtx) = REG_NOTES (insn)(((insn)->u.fld[6]).rt_rtx); 1783 REG_NOTES (insn)(((insn)->u.fld[6]).rt_rtx) = link; 1784 } 1785 } 1786} 1787 1788/* Called when an insn redundant with start_insn is deleted. If there 1789 is a REG_DEAD note for the target of start_insn between start_insn 1790 and stop_insn, then the REG_DEAD note needs to be deleted since the 1791 value no longer dies there. 1792 1793 If the REG_DEAD note isn't deleted, then mark_target_live_regs may be 1794 confused into thinking the register is dead. */ 1795 1796static void 1797fix_reg_dead_note (rtx_insn *start_insn, rtx stop_insn) 1798{ 1799 rtx link, next; 1800 rtx_insn *p; 1801 1802 for (p = next_nonnote_insn (start_insn); p != stop_insn; 1803 p = next_nonnote_insn (p)) 1804 for (link = REG_NOTES (p)(((p)->u.fld[6]).rt_rtx); link; link = next) 1805 { 1806 next = XEXP (link, 1)(((link)->u.fld[1]).rt_rtx); 1807 1808 if (REG_NOTE_KIND (link)((enum reg_note) ((machine_mode) (link)->mode)) != REG_DEAD 1809 || !REG_P (XEXP (link, 0))(((enum rtx_code) ((((link)->u.fld[0]).rt_rtx))->code) ==
REG)
) 1810 continue; 1811 1812 if (reg_set_p (XEXP (link, 0)(((link)->u.fld[0]).rt_rtx), PATTERN (start_insn))) 1813 { 1814 remove_note (p, link); 1815 return; 1816 } 1817 } 1818} 1819 1820/* Delete any REG_UNUSED notes that exist on INSN but not on OTHER_INSN. 1821 1822 This handles the case of udivmodXi4 instructions which optimize their 1823 output depending on whether any REG_UNUSED notes are present. We must 1824 make sure that INSN calculates as many results as OTHER_INSN does. */ 1825 1826static void 1827update_reg_unused_notes (rtx_insn *insn, rtx other_insn) 1828{ 1829 rtx link, next; 1830 1831 for (link = REG_NOTES (insn)(((insn)->u.fld[6]).rt_rtx); link; link = next) 1832 { 1833 next = XEXP (link, 1)(((link)->u.fld[1]).rt_rtx); 1834 1835 if (REG_NOTE_KIND (link)((enum reg_note) ((machine_mode) (link)->mode)) != REG_UNUSED 1836 || !REG_P (XEXP (link, 0))(((enum rtx_code) ((((link)->u.fld[0]).rt_rtx))->code) ==
REG)
) 1837 continue; 1838 1839 if (!find_regno_note (other_insn, REG_UNUSED, REGNO (XEXP (link, 0))(rhs_regno((((link)->u.fld[0]).rt_rtx))))) 1840 remove_note (insn, link); 1841 } 1842} 1843
1844static vec <rtx> sibling_labels; 1845 1846/* Return the label before INSN, or put a new label there. If SIBLING is 1847 non-zero, it is another label associated with the new label (if any), 1848 typically the former target of the jump that will be redirected to 1849 the new label. */ 1850 1851static rtx_insn * 1852get_label_before (rtx_insn *insn, rtx sibling) 1853{ 1854 rtx_insn *label; 1855 1856 /* Find an existing label at this point 1857 or make a new one if there is none. */ 1858 label = prev_nonnote_insn (insn); 1859 1860 if (label == 0 || !LABEL_P (label)(((enum rtx_code) (label)->code) == CODE_LABEL)) 1861 { 1862 rtx_insn *prev = PREV_INSN (insn); 1863 1864 label = gen_label_rtx (); 1865 emit_label_after (label, prev); 1866 LABEL_NUSES (label)(((label)->u.fld[4]).rt_int) = 0; 1867 if (sibling) 1868 { 1869 sibling_labels.safe_push (label); 1870 sibling_labels.safe_push (sibling); 1871 } 1872 } 1873 return label; 1874} 1875 1876/* Scan a function looking for insns that need a delay slot and find insns to 1877 put into the delay slot. 1878 1879 NON_JUMPS_P is nonzero if we are to only try to fill non-jump insns (such 1880 as calls). We do these first since we don't want jump insns (that are 1881 easier to fill) to get the only insns that could be used for non-jump insns. 1882 When it is zero, only try to fill JUMP_INSNs. 1883 1884 When slots are filled in this manner, the insns (including the 1885 delay_insn) are put together in a SEQUENCE rtx. In this fashion, 1886 it is possible to tell whether a delay slot has really been filled 1887 or not. `final' knows how to deal with this, by communicating 1888 through FINAL_SEQUENCE. */ 1889 1890static void 1891fill_simple_delay_slots (int non_jumps_p) 1892{ 1893 rtx_insn *insn, *trial, *next_trial; 1894 rtx pat; 1895 int i; 1896 int num_unfilled_slots = unfilled_slots_next((rtx_insn **) ((void *) (&unfilled_slots_obstack)->next_free
))
- unfilled_slots_base((rtx_insn **) ((void *) (&unfilled_slots_obstack)->object_base
))
; 1897 struct resources needed, set; 1898 int slots_to_fill, slots_filled; 1899 auto_vec<rtx_insn *, 5> delay_list; 1900 1901 for (i = 0; i < num_unfilled_slots; i++) 1902 { 1903 int flags; 1904 /* Get the next insn to fill. If it has already had any slots assigned, 1905 we can't do anything with it. Maybe we'll improve this later. */ 1906 1907 insn = unfilled_slots_base((rtx_insn **) ((void *) (&unfilled_slots_obstack)->object_base
))
[i]; 1908 if (insn == 0 1909 || insn->deleted () 1910 || (NONJUMP_INSN_P (insn)(((enum rtx_code) (insn)->code) == INSN) 1911 && GET_CODE (PATTERN (insn))((enum rtx_code) (PATTERN (insn))->code) == SEQUENCE) 1912 || (JUMP_P (insn)(((enum rtx_code) (insn)->code) == JUMP_INSN) && non_jumps_p) 1913 || (!JUMP_P (insn)(((enum rtx_code) (insn)->code) == JUMP_INSN) && ! non_jumps_p)) 1914 continue; 1915 1916 /* It may have been that this insn used to need delay slots, but 1917 now doesn't; ignore in that case. This can happen, for example, 1918 on the HP PA RISC, where the number of delay slots depends on 1919 what insns are nearby. */ 1920 slots_to_fill = num_delay_slots (insn); 1921 1922 /* Some machine description have defined instructions to have 1923 delay slots only in certain circumstances which may depend on 1924 nearby insns (which change due to reorg's actions). 1925 1926 For example, the PA port normally has delay slots for unconditional 1927 jumps. 1928 1929 However, the PA port claims such jumps do not have a delay slot 1930 if they are immediate successors of certain CALL_INSNs. This 1931 allows the port to favor filling the delay slot of the call with 1932 the unconditional jump. */ 1933 if (slots_to_fill == 0) 1934 continue; 1935 1936 /* This insn needs, or can use, some delay slots. SLOTS_TO_FILL 1937 says how many. After initialization, first try optimizing 1938 1939 call _foo call _foo 1940 nop add %o7,.-L1,%o7 1941 b,a L1 1942 nop 1943 1944 If this case applies, the delay slot of the call is filled with 1945 the unconditional jump. This is done first to avoid having the 1946 delay slot of the call filled in the backward scan. Also, since 1947 the unconditional jump is likely to also have a delay slot, that 1948 insn must exist when it is subsequently scanned. 1949 1950 This is tried on each insn with delay slots as some machines 1951 have insns which perform calls, but are not represented as 1952 CALL_INSNs. */ 1953 1954 slots_filled = 0; 1955 delay_list.truncate (0); 1956 1957 if (JUMP_P (insn)(((enum rtx_code) (insn)->code) == JUMP_INSN)) 1958 flags = get_jump_flags (insn, JUMP_LABEL (insn)(((insn)->u.fld[7]).rt_rtx)); 1959 else 1960 flags = get_jump_flags (insn, NULL_RTX(rtx) 0); 1961 1962 if ((trial = next_active_insn (insn)) 1963 && JUMP_P (trial)(((enum rtx_code) (trial)->code) == JUMP_INSN) 1964 && simplejump_p (trial) 1965 && eligible_for_delay (insn, slots_filled, trial, flags) 1966 && no_labels_between_p (insn, trial) 1967 && ! can_throw_internal (trial)) 1968 { 1969 rtx_insn **tmp; 1970 slots_filled++; 1971 add_to_delay_list (trial, &delay_list); 1972 1973 /* TRIAL may have had its delay slot filled, then unfilled. When 1974 the delay slot is unfilled, TRIAL is placed back on the unfilled 1975 slots obstack. Unfortunately, it is placed on the end of the 1976 obstack, not in its original location. Therefore, we must search 1977 from entry i + 1 to the end of the unfilled slots obstack to 1978 try and find TRIAL. */ 1979 tmp = &unfilled_slots_base((rtx_insn **) ((void *) (&unfilled_slots_obstack)->object_base
))
[i + 1]; 1980 while (*tmp != trial && tmp != unfilled_slots_next((rtx_insn **) ((void *) (&unfilled_slots_obstack)->next_free
))
) 1981 tmp++; 1982 1983 /* Remove the unconditional jump from consideration for delay slot 1984 filling and unthread it. */ 1985 if (*tmp == trial) 1986 *tmp = 0; 1987 { 1988 rtx_insn *next = NEXT_INSN (trial); 1989 rtx_insn *prev = PREV_INSN (trial); 1990 if (prev) 1991 SET_NEXT_INSN (prev) = next; 1992 if (next) 1993 SET_PREV_INSN (next) = prev; 1994 } 1995 } 1996 1997 /* Now, scan backwards from the insn to search for a potential 1998 delay-slot candidate. Stop searching when a label or jump is hit. 1999 2000 For each candidate, if it is to go into the delay slot (moved 2001 forward in execution sequence), it must not need or set any resources 2002 that were set by later insns and must not set any resources that 2003 are needed for those insns. 2004 2005 The delay slot insn itself sets resources unless it is a call 2006 (in which case the called routine, not the insn itself, is doing 2007 the setting). */ 2008 2009 if (slots_filled < slots_to_fill) 2010 { 2011 /* If the flags register is dead after the insn, then we want to be 2012 able to accept a candidate that clobbers it. For this purpose, 2013 we need to filter the flags register during life analysis, so 2014 that it doesn't create RAW and WAW dependencies, while still 2015 creating the necessary WAR dependencies. */ 2016 bool filter_flags 2017 = (slots_to_fill == 1 2018 && targetm.flags_regnum != INVALID_REGNUM(~(unsigned int) 0) 2019 && find_regno_note (insn, REG_DEAD, targetm.flags_regnum)); 2020 struct resources fset; 2021 CLEAR_RESOURCE (&needed)do { (&needed)->memory = (&needed)->volatil = (
&needed)->cc = 0; CLEAR_HARD_REG_SET ((&needed)->
regs); } while (0)
; 2022 CLEAR_RESOURCE (&set)do { (&set)->memory = (&set)->volatil = (&set
)->cc = 0; CLEAR_HARD_REG_SET ((&set)->regs); } while
(0)
; 2023 mark_set_resources (insn, &set, 0, MARK_SRC_DEST); 2024 if (filter_flags) 2025 { 2026 CLEAR_RESOURCE (&fset)do { (&fset)->memory = (&fset)->volatil = (&
fset)->cc = 0; CLEAR_HARD_REG_SET ((&fset)->regs); }
while (0)
; 2027 mark_set_resources (insn, &fset, 0, MARK_SRC_DEST); 2028 } 2029 mark_referenced_resources (insn, &needed, false); 2030 2031 for (trial = prev_nonnote_insn (insn); ! stop_search_p (trial, 1); 2032 trial = next_trial) 2033 { 2034 next_trial = prev_nonnote_insn (trial); 2035 2036 /* This must be an INSN or CALL_INSN. */ 2037 pat = PATTERN (trial); 2038 2039 /* Stand-alone USE and CLOBBER are just for flow. */ 2040 if (GET_CODE (pat)((enum rtx_code) (pat)->code) == USE || GET_CODE (pat)((enum rtx_code) (pat)->code) == CLOBBER) 2041 continue; 2042 2043 /* And DEBUG_INSNs never go into delay slots. */ 2044 if (GET_CODE (trial)((enum rtx_code) (trial)->code) == DEBUG_INSN) 2045 continue; 2046 2047 /* Check for resource conflict first, to avoid unnecessary 2048 splitting. */ 2049 if (! insn_references_resource_p (trial, &set, true) 2050 && ! insn_sets_resource_p (trial, 2051 filter_flags ? &fset : &set, 2052 true) 2053 && ! insn_sets_resource_p (trial, &needed, true) 2054 /* Can't separate set of cc0 from its use. */ 2055 && (!HAVE_cc00 || ! (reg_mentioned_p (cc0_rtx, pat) && ! sets_cc0_p (pat))) 2056 && ! can_throw_internal (trial)) 2057 { 2058 trial = try_split (pat, trial, 1); 2059 next_trial = prev_nonnote_insn (trial); 2060 if (eligible_for_delay (insn, slots_filled, trial, flags)) 2061 { 2062 /* In this case, we are searching backward, so if we 2063 find insns to put on the delay list, we want 2064 to put them at the head, rather than the 2065 tail, of the list. */ 2066 2067 update_reg_dead_notes (trial, insn); 2068 delay_list.safe_insert (0, trial); 2069 update_block (trial, trial); 2070 delete_related_insns (trial); 2071 if (slots_to_fill == ++slots_filled) 2072 break; 2073 continue; 2074 } 2075 } 2076 2077 mark_set_resources (trial, &set, 0, MARK_SRC_DEST_CALL); 2078 if (filter_flags) 2079 { 2080 mark_set_resources (trial, &fset, 0, MARK_SRC_DEST_CALL); 2081 /* If the flags register is set, then it doesn't create RAW 2082 dependencies any longer and it also doesn't create WAW 2083 dependencies since it's dead after the original insn. */ 2084 if (TEST_HARD_REG_BIT (fset.regs, targetm.flags_regnum)) 2085 { 2086 CLEAR_HARD_REG_BIT (needed.regs, targetm.flags_regnum); 2087 CLEAR_HARD_REG_BIT (fset.regs, targetm.flags_regnum); 2088 } 2089 } 2090 mark_referenced_resources (trial, &needed, true); 2091 } 2092 } 2093 2094 /* If all needed slots haven't been filled, we come here. */ 2095 2096 /* Try to optimize case of jumping around a single insn. */ 2097 if ((ANNUL_IFTRUE_SLOTS0 || ANNUL_IFFALSE_SLOTS0) 2098 && slots_filled != slots_to_fill 2099 && delay_list.is_empty () 2100 && JUMP_P (insn)(((enum rtx_code) (insn)->code) == JUMP_INSN) 2101 && (condjump_p (insn) || condjump_in_parallel_p (insn)) 2102 && !ANY_RETURN_P (JUMP_LABEL (insn))(((enum rtx_code) ((((insn)->u.fld[7]).rt_rtx))->code) ==
RETURN || ((enum rtx_code) ((((insn)->u.fld[7]).rt_rtx))->
code) == SIMPLE_RETURN)
) 2103 { 2104 optimize_skip (as_a <rtx_jump_insn *> (insn), &delay_list); 2105 if (!delay_list.is_empty ()) 2106 slots_filled += 1; 2107 } 2108 2109 /* Try to get insns from beyond the insn needing the delay slot. 2110 These insns can neither set or reference resources set in insns being 2111 skipped, cannot set resources in the insn being skipped, and, if this 2112 is a CALL_INSN (or a CALL_INSN is passed), cannot trap (because the 2113 call might not return). 2114 2115 There used to be code which continued past the target label if 2116 we saw all uses of the target label. This code did not work, 2117 because it failed to account for some instructions which were 2118 both annulled and marked as from the target. This can happen as a 2119 result of optimize_skip. Since this code was redundant with 2120 fill_eager_delay_slots anyways, it was just deleted. */ 2121 2122 if (slots_filled != slots_to_fill 2123 /* If this instruction could throw an exception which is 2124 caught in the same function, then it's not safe to fill 2125 the delay slot with an instruction from beyond this 2126 point. For example, consider: 2127 2128 int i = 2; 2129 2130 try { 2131 f(); 2132 i = 3; 2133 } catch (...) {} 2134 2135 return i; 2136 2137 Even though `i' is a local variable, we must be sure not 2138 to put `i = 3' in the delay slot if `f' might throw an 2139 exception. 2140 2141 Presumably, we should also check to see if we could get 2142 back to this function via `setjmp'. */ 2143 && ! can_throw_internal (insn) 2144 && !JUMP_P (insn)(((enum rtx_code) (insn)->code) == JUMP_INSN)) 2145 { 2146 int maybe_never = 0; 2147 rtx pat, trial_delay; 2148 2149 CLEAR_RESOURCE (&needed)do { (&needed)->memory = (&needed)->volatil = (
&needed)->cc = 0; CLEAR_HARD_REG_SET ((&needed)->
regs); } while (0)
; 2150 CLEAR_RESOURCE (&set)do { (&set)->memory = (&set)->volatil = (&set
)->cc = 0; CLEAR_HARD_REG_SET ((&set)->regs); } while
(0)
; 2151 mark_set_resources (insn, &set, 0, MARK_SRC_DEST_CALL); 2152 mark_referenced_resources (insn, &needed, true); 2153 2154 if (CALL_P (insn)(((enum rtx_code) (insn)->code) == CALL_INSN)) 2155 maybe_never = 1; 2156 2157 for (trial = next_nonnote_insn (insn); !stop_search_p (trial, 1); 2158 trial = next_trial) 2159 { 2160 next_trial = next_nonnote_insn (trial); 2161 2162 /* This must be an INSN or CALL_INSN. */ 2163 pat = PATTERN (trial); 2164 2165 /* Stand-alone USE and CLOBBER are just for flow. */ 2166 if (GET_CODE (pat)((enum rtx_code) (pat)->code) == USE || GET_CODE (pat)((enum rtx_code) (pat)->code) == CLOBBER) 2167 continue; 2168 2169 /* And DEBUG_INSNs do not go in delay slots. */ 2170 if (GET_CODE (trial)((enum rtx_code) (trial)->code) == DEBUG_INSN) 2171 continue; 2172 2173 /* If this already has filled delay slots, get the insn needing 2174 the delay slots. */ 2175 if (GET_CODE (pat)((enum rtx_code) (pat)->code) == SEQUENCE) 2176 trial_delay = XVECEXP (pat, 0, 0)(((((pat)->u.fld[0]).rt_rtvec))->elem[0]); 2177 else 2178 trial_delay = trial; 2179 2180 /* Stop our search when seeing a jump. */ 2181 if (JUMP_P (trial_delay)(((enum rtx_code) (trial_delay)->code) == JUMP_INSN)) 2182 break; 2183 2184 /* See if we have a resource problem before we try to split. */ 2185 if (GET_CODE (pat)((enum rtx_code) (pat)->code) != SEQUENCE 2186 && ! insn_references_resource_p (trial, &set, true) 2187 && ! insn_sets_resource_p (trial, &set, true) 2188 && ! insn_sets_resource_p (trial, &needed, true) 2189 && (!HAVE_cc00 && ! (reg_mentioned_p (cc0_rtx, pat) && ! sets_cc0_p (pat))) 2190 && ! (maybe_never && may_trap_or_fault_p (pat)) 2191 && (trial = try_split (pat, trial, 0)) 2192 && eligible_for_delay (insn, slots_filled, trial, flags) 2193 && ! can_throw_internal (trial)) 2194 { 2195 next_trial = next_nonnote_insn (trial); 2196 add_to_delay_list (trial, &delay_list); 2197 if (HAVE_cc00 && reg_mentioned_p (cc0_rtx, pat)) 2198 link_cc0_insns (trial); 2199 2200 delete_related_insns (trial); 2201 if (slots_to_fill == ++slots_filled) 2202 break; 2203 continue; 2204 } 2205 2206 mark_set_resources (trial, &set, 0, MARK_SRC_DEST_CALL); 2207 mark_referenced_resources (trial, &needed, true); 2208 2209 /* Ensure we don't put insns between the setting of cc and the 2210 comparison by moving a setting of cc into an earlier delay 2211 slot since these insns could clobber the condition code. */ 2212 set.cc = 1; 2213 2214 /* If this is a call, we might not get here. */ 2215 if (CALL_P (trial_delay)(((enum rtx_code) (trial_delay)->code) == CALL_INSN)) 2216 maybe_never = 1; 2217 } 2218 2219 /* If there are slots left to fill and our search was stopped by an 2220 unconditional branch, try the insn at the branch target. We can 2221 redirect the branch if it works. 2222 2223 Don't do this if the insn at the branch target is a branch. */ 2224 if (slots_to_fill != slots_filled 2225 && trial 2226 && jump_to_label_p (trial) 2227 && simplejump_p (trial) 2228 && (next_trial = next_active_insn (JUMP_LABEL_AS_INSN (trial))) != 0 2229 && ! (NONJUMP_INSN_P (next_trial)(((enum rtx_code) (next_trial)->code) == INSN) 2230 && GET_CODE (PATTERN (next_trial))((enum rtx_code) (PATTERN (next_trial))->code) == SEQUENCE) 2231 && !JUMP_P (next_trial)(((enum rtx_code) (next_trial)->code) == JUMP_INSN) 2232 && ! insn_references_resource_p (next_trial, &set, true) 2233 && ! insn_sets_resource_p (next_trial, &set, true) 2234 && ! insn_sets_resource_p (next_trial, &needed, true) 2235 && (!HAVE_cc00 || ! reg_mentioned_p (cc0_rtx, PATTERN (next_trial))) 2236 && ! (maybe_never && may_trap_or_fault_p (PATTERN (next_trial))) 2237 && (next_trial = try_split (PATTERN (next_trial), next_trial, 0)) 2238 && eligible_for_delay (insn, slots_filled, next_trial, flags) 2239 && ! can_throw_internal (trial)) 2240 { 2241 /* See comment in relax_delay_slots about necessity of using 2242 next_real_nondebug_insn here. */ 2243 rtx_insn *new_label = next_real_nondebug_insn (next_trial); 2244 2245 if (new_label != 0) 2246 new_label = get_label_before (new_label, JUMP_LABEL (trial)(((trial)->u.fld[7]).rt_rtx)); 2247 else 2248 new_label = find_end_label (simple_return_rtx); 2249 2250 if (new_label) 2251 { 2252 add_to_delay_list (copy_delay_slot_insn (next_trial), 2253 &delay_list); 2254 slots_filled++; 2255 reorg_redirect_jump (as_a <rtx_jump_insn *> (trial), 2256 new_label); 2257 } 2258 } 2259 } 2260 2261 /* If this is an unconditional jump, then try to get insns from the 2262 target of the jump. */ 2263 rtx_jump_insn *jump_insn; 2264 if ((jump_insn = dyn_cast <rtx_jump_insn *> (insn)) 2265 && simplejump_p (jump_insn) 2266 && slots_filled != slots_to_fill) 2267 fill_slots_from_thread (jump_insn, const_true_rtx, 2268 next_active_insn (JUMP_LABEL_AS_INSN (insn)), 2269 NULLnullptr, 1, 1, own_thread_p (JUMP_LABEL (insn)(((insn)->u.fld[7]).rt_rtx), 2270 JUMP_LABEL (insn)(((insn)->u.fld[7]).rt_rtx), 0), 2271 slots_to_fill, &slots_filled, &delay_list); 2272 2273 if (!delay_list.is_empty ()) 2274 unfilled_slots_base((rtx_insn **) ((void *) (&unfilled_slots_obstack)->object_base
))
[i] 2275 = emit_delay_sequence (insn, delay_list, slots_filled); 2276 2277 if (slots_to_fill == slots_filled) 2278 unfilled_slots_base((rtx_insn **) ((void *) (&unfilled_slots_obstack)->object_base
))
[i] = 0; 2279 2280 note_delay_statistics (slots_filled, 0); 2281 } 2282} 2283
2284/* Follow any unconditional jump at LABEL, for the purpose of redirecting JUMP; 2285 return the ultimate label reached by any such chain of jumps. 2286 Return a suitable return rtx if the chain ultimately leads to a 2287 return instruction. 2288 If LABEL is not followed by a jump, return LABEL. 2289 If the chain loops or we can't find end, return LABEL, 2290 since that tells caller to avoid changing the insn. 2291 If the returned label is obtained by following a crossing jump, 2292 set *CROSSING to true, otherwise set it to false. */ 2293 2294static rtx 2295follow_jumps (rtx label, rtx_insn *jump, bool *crossing) 2296{ 2297 rtx_insn *insn; 2298 rtx_insn *next; 2299 int depth; 2300 2301 *crossing = false; 2302 if (ANY_RETURN_P (label)(((enum rtx_code) (label)->code) == RETURN || ((enum rtx_code
) (label)->code) == SIMPLE_RETURN)
) 2303 return label; 2304 2305 rtx_insn *value = as_a <rtx_insn *> (label); 2306 2307 for (depth = 0; 2308 (depth < 10 2309 && (insn = next_active_insn (value)) != 0 2310 && JUMP_P (insn)(((enum rtx_code) (insn)->code) == JUMP_INSN) 2311 && JUMP_LABEL (insn)(((insn)->u.fld[7]).rt_rtx) != NULL_RTX(rtx) 0 2312 && ((any_uncondjump_p (insn) && onlyjump_p (insn)) 2313 || ANY_RETURN_P (PATTERN (insn))(((enum rtx_code) (PATTERN (insn))->code) == RETURN || ((enum
rtx_code) (PATTERN (insn))->code) == SIMPLE_RETURN)
) 2314 && (next = NEXT_INSN (insn)) 2315 && BARRIER_P (next)(((enum rtx_code) (next)->code) == BARRIER)); 2316 depth++) 2317 { 2318 rtx this_label_or_return = JUMP_LABEL (insn)(((insn)->u.fld[7]).rt_rtx); 2319 2320 /* If we have found a cycle, make the insn jump to itself. */ 2321 if (this_label_or_return == label) 2322 return label; 2323 2324 /* Cannot follow returns and cannot look through tablejumps. */ 2325 if (ANY_RETURN_P (this_label_or_return)(((enum rtx_code) (this_label_or_return)->code) == RETURN ||
((enum rtx_code) (this_label_or_return)->code) == SIMPLE_RETURN
)
) 2326 return this_label_or_return; 2327 2328 rtx_insn *this_label = as_a <rtx_insn *> (this_label_or_return); 2329 if (NEXT_INSN (this_label) 2330 && JUMP_TABLE_DATA_P (NEXT_INSN (this_label))(((enum rtx_code) (NEXT_INSN (this_label))->code) == JUMP_TABLE_DATA
)
) 2331 break; 2332 2333 if (!targetm.can_follow_jump (jump, insn)) 2334 break; 2335 if (!*crossing) 2336 *crossing = CROSSING_JUMP_P (jump)(__extension__ ({ __typeof ((jump)) const _rtx = ((jump)); if
(((enum rtx_code) (_rtx)->code) != JUMP_INSN) rtl_check_failed_flag
("CROSSING_JUMP_P", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 2336, __FUNCTION__); _rtx; })->jump)
; 2337 value = this_label; 2338 } 2339 if (depth == 10) 2340 return label; 2341 return value; 2342} 2343 2344/* Try to find insns to place in delay slots. 2345 2346 INSN is the jump needing SLOTS_TO_FILL delay slots. It tests CONDITION 2347 or is an unconditional branch if CONDITION is const_true_rtx. 2348 *PSLOTS_FILLED is updated with the number of slots that we have filled. 2349 2350 THREAD is a flow-of-control, either the insns to be executed if the 2351 branch is true or if the branch is false, THREAD_IF_TRUE says which. 2352 2353 OPPOSITE_THREAD is the thread in the opposite direction. It is used 2354 to see if any potential delay slot insns set things needed there. 2355 2356 LIKELY is nonzero if it is extremely likely that the branch will be 2357 taken and THREAD_IF_TRUE is set. This is used for the branch at the 2358 end of a loop back up to the top. 2359 2360 OWN_THREAD is true if we are the only user of the thread, i.e. it is 2361 the target of the jump when we are the only jump going there. 2362 2363 If OWN_THREAD is false, it must be the "true" thread of a jump. In that 2364 case, we can only take insns from the head of the thread for our delay 2365 slot. We then adjust the jump to point after the insns we have taken. */ 2366 2367static void 2368fill_slots_from_thread (rtx_jump_insn *insn, rtx condition, 2369 rtx thread_or_return, rtx opposite_thread, int likely, 2370 int thread_if_true, int own_thread, int slots_to_fill, 2371 int *pslots_filled, vec<rtx_insn *> *delay_list) 2372{ 2373 rtx new_thread; 2374 struct resources opposite_needed, set, needed; 2375 rtx_insn *trial; 2376 int lose = 0; 2377 int must_annul = 0; 2378 int flags; 2379 2380 /* Validate our arguments. */ 2381 gcc_assert (condition != const_true_rtx || thread_if_true)((void)(!(condition != const_true_rtx || thread_if_true) ? fancy_abort
("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 2381, __FUNCTION__), 0 : 0))
;
1
Assuming 'condition' is equal to 'const_true_rtx'
2
Assuming the condition is false
3
'?' condition is false
2382 gcc_assert (own_thread || thread_if_true)((void)(!(own_thread || thread_if_true) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 2382, __FUNCTION__), 0 : 0))
;
4
Assuming 'own_thread' is 0
5
'?' condition is false
2383 2384 flags = get_jump_flags (insn, JUMP_LABEL (insn)(((insn)->u.fld[7]).rt_rtx)); 2385 2386 /* If our thread is the end of subroutine, we can't get any delay 2387 insns from that. */ 2388 if (thread_or_return == NULL_RTX(rtx) 0 || ANY_RETURN_P (thread_or_return)(((enum rtx_code) (thread_or_return)->code) == RETURN || (
(enum rtx_code) (thread_or_return)->code) == SIMPLE_RETURN
)
)
6
Assuming 'thread_or_return' is not equal to NULL_RTX
7
Assuming field 'code' is not equal to RETURN
8
Assuming field 'code' is not equal to SIMPLE_RETURN
9
Taking false branch
2389 return; 2390 2391 rtx_insn *thread = as_a <rtx_insn *> (thread_or_return); 2392 2393 /* If this is an unconditional branch, nothing is needed at the 2394 opposite thread. Otherwise, compute what is needed there. */ 2395 if (condition == const_true_rtx)
10
Assuming 'condition' is not equal to 'const_true_rtx'
11
Taking false branch
2396 CLEAR_RESOURCE (&opposite_needed)do { (&opposite_needed)->memory = (&opposite_needed
)->volatil = (&opposite_needed)->cc = 0; CLEAR_HARD_REG_SET
((&opposite_needed)->regs); } while (0)
; 2397 else 2398 mark_target_live_regs (get_insns (), opposite_thread, &opposite_needed); 2399 2400 /* If the insn at THREAD can be split, do it here to avoid having to 2401 update THREAD and NEW_THREAD if it is done in the loop below. Also 2402 initialize NEW_THREAD. */ 2403 2404 new_thread = thread = try_split (PATTERN (thread), thread, 0); 2405 2406 /* Scan insns at THREAD. We are looking for an insn that can be removed 2407 from THREAD (it neither sets nor references resources that were set 2408 ahead of it and it doesn't set anything needs by the insns ahead of 2409 it) and that either can be placed in an annulling insn or aren't 2410 needed at OPPOSITE_THREAD. */ 2411 2412 CLEAR_RESOURCE (&needed)do { (&needed)->memory = (&needed)->volatil = (
&needed)->cc = 0; CLEAR_HARD_REG_SET ((&needed)->
regs); } while (0)
;
12
Loop condition is false. Exiting loop
2413 CLEAR_RESOURCE (&set)do { (&set)->memory = (&set)->volatil = (&set
)->cc = 0; CLEAR_HARD_REG_SET ((&set)->regs); } while
(0)
;
13
Loop condition is false. Exiting loop
2414 2415 /* Handle the flags register specially, to be able to accept a 2416 candidate that clobbers it. See also fill_simple_delay_slots. */ 2417 bool filter_flags 2418 = (slots_to_fill == 1
14
Assuming 'slots_to_fill' is not equal to 1
2419 && targetm.flags_regnum != INVALID_REGNUM(~(unsigned int) 0) 2420 && find_regno_note (insn, REG_DEAD, targetm.flags_regnum)); 2421 struct resources fset; 2422 struct resources flags_res; 2423 if (filter_flags
14.1
'filter_flags' is false
14.1
'filter_flags' is false
14.1
'filter_flags' is false
)
15
Taking false branch
2424 { 2425 CLEAR_RESOURCE (&fset)do { (&fset)->memory = (&fset)->volatil = (&
fset)->cc = 0; CLEAR_HARD_REG_SET ((&fset)->regs); }
while (0)
; 2426 CLEAR_RESOURCE (&flags_res)do { (&flags_res)->memory = (&flags_res)->volatil
= (&flags_res)->cc = 0; CLEAR_HARD_REG_SET ((&flags_res
)->regs); } while (0)
; 2427 SET_HARD_REG_BIT (flags_res.regs, targetm.flags_regnum); 2428 } 2429 2430 /* If we do not own this thread, we must stop as soon as we find 2431 something that we can't put in a delay slot, since all we can do 2432 is branch into THREAD at a later point. Therefore, labels stop 2433 the search if this is not the `true' thread. */ 2434 2435 for (trial = thread; 2436 ! stop_search_p (trial, ! thread_if_true
15.1
'thread_if_true' is not equal to 0
15.1
'thread_if_true' is not equal to 0
15.1
'thread_if_true' is not equal to 0
)
&& (! lose || own_thread);
16
Assuming the condition is false
2437 trial = next_nonnote_insn (trial)) 2438 { 2439 rtx pat, old_trial; 2440 2441 /* If we have passed a label, we no longer own this thread. */ 2442 if (LABEL_P (trial)(((enum rtx_code) (trial)->code) == CODE_LABEL)) 2443 { 2444 own_thread = 0; 2445 continue; 2446 } 2447 2448 pat = PATTERN (trial); 2449 if (GET_CODE (pat)((enum rtx_code) (pat)->code) == USE || GET_CODE (pat)((enum rtx_code) (pat)->code) == CLOBBER) 2450 continue; 2451 2452 if (GET_CODE (trial)((enum rtx_code) (trial)->code) == DEBUG_INSN) 2453 continue; 2454 2455 /* If TRIAL conflicts with the insns ahead of it, we lose. Also, 2456 don't separate or copy insns that set and use CC0. */ 2457 if (! insn_references_resource_p (trial, &set, true) 2458 && ! insn_sets_resource_p (trial, filter_flags ? &fset : &set, true) 2459 && ! insn_sets_resource_p (trial, &needed, true) 2460 /* If we're handling sets to the flags register specially, we 2461 only allow an insn into a delay-slot, if it either: 2462 - doesn't set the flags register, 2463 - the "set" of the flags register isn't used (clobbered), 2464 - insns between the delay-slot insn and the trial-insn 2465 as accounted in "set", have not affected the flags register. */ 2466 && (! filter_flags 2467 || ! insn_sets_resource_p (trial, &flags_res, true) 2468 || find_regno_note (trial, REG_UNUSED, targetm.flags_regnum) 2469 || ! TEST_HARD_REG_BIT (set.regs, targetm.flags_regnum)) 2470 && (!HAVE_cc00 || (! (reg_mentioned_p (cc0_rtx, pat) 2471 && (! own_thread || ! sets_cc0_p (pat))))) 2472 && ! can_throw_internal (trial)) 2473 { 2474 rtx_insn *prior_insn; 2475 2476 /* If TRIAL is redundant with some insn before INSN, we don't 2477 actually need to add it to the delay list; we can merely pretend 2478 we did. */ 2479 if ((prior_insn = redundant_insn (trial, insn, *delay_list))) 2480 { 2481 fix_reg_dead_note (prior_insn, insn); 2482 if (own_thread) 2483 { 2484 update_block (trial, thread); 2485 if (trial == thread) 2486 { 2487 thread = next_active_insn (thread); 2488 if (new_thread == trial) 2489 new_thread = thread; 2490 } 2491 2492 delete_related_insns (trial); 2493 } 2494 else 2495 { 2496 update_reg_unused_notes (prior_insn, trial); 2497 new_thread = next_active_insn (trial); 2498 } 2499 2500 continue; 2501 } 2502 2503 /* There are two ways we can win: If TRIAL doesn't set anything 2504 needed at the opposite thread and can't trap, or if it can 2505 go into an annulled delay slot. But we want neither to copy 2506 nor to speculate frame-related insns. */ 2507 if (!must_annul 2508 && ((condition == const_true_rtx 2509 && (own_thread || !RTX_FRAME_RELATED_P (trial)(__extension__ ({ __typeof ((trial)) const _rtx = ((trial)); if
(((enum rtx_code) (_rtx)->code) != DEBUG_INSN && (
(enum rtx_code) (_rtx)->code) != INSN && ((enum rtx_code
) (_rtx)->code) != CALL_INSN && ((enum rtx_code) (
_rtx)->code) != JUMP_INSN && ((enum rtx_code) (_rtx
)->code) != BARRIER && ((enum rtx_code) (_rtx)->
code) != SET) rtl_check_failed_flag ("RTX_FRAME_RELATED_P",_rtx
, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 2509, __FUNCTION__); _rtx; })->frame_related)
)) 2510 || (! insn_sets_resource_p (trial, &opposite_needed, true) 2511 && ! may_trap_or_fault_p (pat) 2512 && ! RTX_FRAME_RELATED_P (trial)(__extension__ ({ __typeof ((trial)) const _rtx = ((trial)); if
(((enum rtx_code) (_rtx)->code) != DEBUG_INSN && (
(enum rtx_code) (_rtx)->code) != INSN && ((enum rtx_code
) (_rtx)->code) != CALL_INSN && ((enum rtx_code) (
_rtx)->code) != JUMP_INSN && ((enum rtx_code) (_rtx
)->code) != BARRIER && ((enum rtx_code) (_rtx)->
code) != SET) rtl_check_failed_flag ("RTX_FRAME_RELATED_P",_rtx
, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 2512, __FUNCTION__); _rtx; })->frame_related)
))) 2513 { 2514 old_trial = trial; 2515 trial = try_split (pat, trial, 0); 2516 if (new_thread == old_trial) 2517 new_thread = trial; 2518 if (thread == old_trial) 2519 thread = trial; 2520 pat = PATTERN (trial); 2521 if (eligible_for_delay (insn, *pslots_filled, trial, flags)) 2522 goto winner; 2523 } 2524 else if (!RTX_FRAME_RELATED_P (trial)(__extension__ ({ __typeof ((trial)) const _rtx = ((trial)); if
(((enum rtx_code) (_rtx)->code) != DEBUG_INSN && (
(enum rtx_code) (_rtx)->code) != INSN && ((enum rtx_code
) (_rtx)->code) != CALL_INSN && ((enum rtx_code) (
_rtx)->code) != JUMP_INSN && ((enum rtx_code) (_rtx
)->code) != BARRIER && ((enum rtx_code) (_rtx)->
code) != SET) rtl_check_failed_flag ("RTX_FRAME_RELATED_P",_rtx
, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 2524, __FUNCTION__); _rtx; })->frame_related)
2525 && ((ANNUL_IFTRUE_SLOTS0 && ! thread_if_true) 2526 || (ANNUL_IFFALSE_SLOTS0 && thread_if_true))) 2527 { 2528 old_trial = trial; 2529 trial = try_split (pat, trial, 0); 2530 if (new_thread == old_trial) 2531 new_thread = trial; 2532 if (thread == old_trial) 2533 thread = trial; 2534 pat = PATTERN (trial); 2535 if ((must_annul || delay_list->is_empty ()) && (thread_if_true 2536 ? check_annul_list_true_false (0, *delay_list) 2537 && eligible_for_annul_false (insn, *pslots_filled, trial, flags) 2538 : check_annul_list_true_false (1, *delay_list) 2539 && eligible_for_annul_true (insn, *pslots_filled, trial, flags))) 2540 { 2541 rtx_insn *temp; 2542 2543 must_annul = 1; 2544 winner: 2545 2546 if (HAVE_cc00 && reg_mentioned_p (cc0_rtx, pat)) 2547 link_cc0_insns (trial); 2548 2549 /* If we own this thread, delete the insn. If this is the 2550 destination of a branch, show that a basic block status 2551 may have been updated. In any case, mark the new 2552 starting point of this thread. */ 2553 if (own_thread) 2554 { 2555 rtx note; 2556 2557 update_block (trial, thread); 2558 if (trial == thread) 2559 { 2560 thread = next_active_insn (thread); 2561 if (new_thread == trial) 2562 new_thread = thread; 2563 } 2564 2565 /* We are moving this insn, not deleting it. We must 2566 temporarily increment the use count on any referenced 2567 label lest it be deleted by delete_related_insns. */ 2568 for (note = REG_NOTES (trial)(((trial)->u.fld[6]).rt_rtx); 2569 note != NULL_RTX(rtx) 0; 2570 note = XEXP (note, 1)(((note)->u.fld[1]).rt_rtx)) 2571 if (REG_NOTE_KIND (note)((enum reg_note) ((machine_mode) (note)->mode)) == REG_LABEL_OPERAND 2572 || REG_NOTE_KIND (note)((enum reg_note) ((machine_mode) (note)->mode)) == REG_LABEL_TARGET) 2573 { 2574 /* REG_LABEL_OPERAND could be 2575 NOTE_INSN_DELETED_LABEL too. */ 2576 if (LABEL_P (XEXP (note, 0))(((enum rtx_code) ((((note)->u.fld[0]).rt_rtx))->code) ==
CODE_LABEL)
) 2577 LABEL_NUSES (XEXP (note, 0))((((((note)->u.fld[0]).rt_rtx))->u.fld[4]).rt_int)++; 2578 else 2579 gcc_assert (REG_NOTE_KIND (note)((void)(!(((enum reg_note) ((machine_mode) (note)->mode)) ==
REG_LABEL_OPERAND) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 2580, __FUNCTION__), 0 : 0))
2580 == REG_LABEL_OPERAND)((void)(!(((enum reg_note) ((machine_mode) (note)->mode)) ==
REG_LABEL_OPERAND) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 2580, __FUNCTION__), 0 : 0))
; 2581 } 2582 if (jump_to_label_p (trial)) 2583 LABEL_NUSES (JUMP_LABEL (trial))((((((trial)->u.fld[7]).rt_rtx))->u.fld[4]).rt_int)++; 2584 2585 delete_related_insns (trial); 2586 2587 for (note = REG_NOTES (trial)(((trial)->u.fld[6]).rt_rtx); 2588 note != NULL_RTX(rtx) 0; 2589 note = XEXP (note, 1)(((note)->u.fld[1]).rt_rtx)) 2590 if (REG_NOTE_KIND (note)((enum reg_note) ((machine_mode) (note)->mode)) == REG_LABEL_OPERAND 2591 || REG_NOTE_KIND (note)((enum reg_note) ((machine_mode) (note)->mode)) == REG_LABEL_TARGET) 2592 { 2593 /* REG_LABEL_OPERAND could be 2594 NOTE_INSN_DELETED_LABEL too. */ 2595 if (LABEL_P (XEXP (note, 0))(((enum rtx_code) ((((note)->u.fld[0]).rt_rtx))->code) ==
CODE_LABEL)
) 2596 LABEL_NUSES (XEXP (note, 0))((((((note)->u.fld[0]).rt_rtx))->u.fld[4]).rt_int)--; 2597 else 2598 gcc_assert (REG_NOTE_KIND (note)((void)(!(((enum reg_note) ((machine_mode) (note)->mode)) ==
REG_LABEL_OPERAND) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 2599, __FUNCTION__), 0 : 0))
2599 == REG_LABEL_OPERAND)((void)(!(((enum reg_note) ((machine_mode) (note)->mode)) ==
REG_LABEL_OPERAND) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 2599, __FUNCTION__), 0 : 0))
; 2600 } 2601 if (jump_to_label_p (trial)) 2602 LABEL_NUSES (JUMP_LABEL (trial))((((((trial)->u.fld[7]).rt_rtx))->u.fld[4]).rt_int)--; 2603 } 2604 else 2605 new_thread = next_active_insn (trial); 2606 2607 temp = own_thread ? trial : copy_delay_slot_insn (trial); 2608 if (thread_if_true) 2609 INSN_FROM_TARGET_P (temp)(__extension__ ({ __typeof ((temp)) const _rtx = ((temp)); if
(((enum rtx_code) (_rtx)->code) != INSN && ((enum
rtx_code) (_rtx)->code) != JUMP_INSN && ((enum rtx_code
) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag ("INSN_FROM_TARGET_P"
, _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 2609, __FUNCTION__); _rtx; })->in_struct)
= 1; 2610 2611 add_to_delay_list (temp, delay_list); 2612 2613 if (slots_to_fill == ++(*pslots_filled)) 2614 { 2615 /* Even though we have filled all the slots, we 2616 may be branching to a location that has a 2617 redundant insn. Skip any if so. */ 2618 while (new_thread && ! own_thread 2619 && ! insn_sets_resource_p (new_thread, &set, true) 2620 && ! insn_sets_resource_p (new_thread, &needed, 2621 true) 2622 && ! insn_references_resource_p (new_thread, 2623 &set, true) 2624 && (prior_insn 2625 = redundant_insn (new_thread, insn, 2626 *delay_list))) 2627 { 2628 /* We know we do not own the thread, so no need 2629 to call update_block and delete_insn. */ 2630 fix_reg_dead_note (prior_insn, insn); 2631 update_reg_unused_notes (prior_insn, new_thread); 2632 new_thread 2633 = next_active_insn (as_a<rtx_insn *> (new_thread)); 2634 } 2635 break; 2636 } 2637 2638 continue; 2639 } 2640 } 2641 } 2642 2643 /* This insn can't go into a delay slot. */ 2644 lose = 1; 2645 mark_set_resources (trial, &set, 0, MARK_SRC_DEST_CALL); 2646 mark_referenced_resources (trial, &needed, true); 2647 if (filter_flags) 2648 { 2649 mark_set_resources (trial, &fset, 0, MARK_SRC_DEST_CALL); 2650 2651 /* Groups of flags-register setters with users should not 2652 affect opportunities to move flags-register-setting insns 2653 (clobbers) into the delay-slot. */ 2654 CLEAR_HARD_REG_BIT (needed.regs, targetm.flags_regnum); 2655 CLEAR_HARD_REG_BIT (fset.regs, targetm.flags_regnum); 2656 } 2657 2658 /* Ensure we don't put insns between the setting of cc and the comparison 2659 by moving a setting of cc into an earlier delay slot since these insns 2660 could clobber the condition code. */ 2661 set.cc = 1; 2662 2663 /* If this insn is a register-register copy and the next insn has 2664 a use of our destination, change it to use our source. That way, 2665 it will become a candidate for our delay slot the next time 2666 through this loop. This case occurs commonly in loops that 2667 scan a list. 2668 2669 We could check for more complex cases than those tested below, 2670 but it doesn't seem worth it. It might also be a good idea to try 2671 to swap the two insns. That might do better. 2672 2673 We can't do this if the next insn modifies our destination, because 2674 that would make the replacement into the insn invalid. We also can't 2675 do this if it modifies our source, because it might be an earlyclobber 2676 operand. This latter test also prevents updating the contents of 2677 a PRE_INC. We also can't do this if there's overlap of source and 2678 destination. Overlap may happen for larger-than-register-size modes. */ 2679 2680 if (NONJUMP_INSN_P (trial)(((enum rtx_code) (trial)->code) == INSN) && GET_CODE (pat)((enum rtx_code) (pat)->code) == SET 2681 && REG_P (SET_SRC (pat))(((enum rtx_code) ((((pat)->u.fld[1]).rt_rtx))->code) ==
REG)
2682 && REG_P (SET_DEST (pat))(((enum rtx_code) ((((pat)->u.fld[0]).rt_rtx))->code) ==
REG)
2683 && !reg_overlap_mentioned_p (SET_DEST (pat)(((pat)->u.fld[0]).rt_rtx), SET_SRC (pat)(((pat)->u.fld[1]).rt_rtx))) 2684 { 2685 rtx_insn *next = next_nonnote_insn (trial); 2686 2687 if (next && NONJUMP_INSN_P (next)(((enum rtx_code) (next)->code) == INSN) 2688 && GET_CODE (PATTERN (next))((enum rtx_code) (PATTERN (next))->code) != USE 2689 && ! reg_set_p (SET_DEST (pat)(((pat)->u.fld[0]).rt_rtx), next) 2690 && ! reg_set_p (SET_SRC (pat)(((pat)->u.fld[1]).rt_rtx), next) 2691 && reg_referenced_p (SET_DEST (pat)(((pat)->u.fld[0]).rt_rtx), PATTERN (next)) 2692 && ! modified_in_p (SET_DEST (pat)(((pat)->u.fld[0]).rt_rtx), next)) 2693 validate_replace_rtx (SET_DEST (pat)(((pat)->u.fld[0]).rt_rtx), SET_SRC (pat)(((pat)->u.fld[1]).rt_rtx), next); 2694 } 2695 } 2696 2697 /* If we stopped on a branch insn that has delay slots, see if we can 2698 steal some of the insns in those slots. */ 2699 if (trial && NONJUMP_INSN_P (trial)(((enum rtx_code) (trial)->code) == INSN)
17
Assuming 'trial' is non-null
18
Assuming field 'code' is equal to INSN
21
Taking true branch
2700 && GET_CODE (PATTERN (trial))((enum rtx_code) (PATTERN (trial))->code) == SEQUENCE
19
Assuming field 'code' is equal to SEQUENCE
2701 && JUMP_P (XVECEXP (PATTERN (trial), 0, 0))(((enum rtx_code) ((((((PATTERN (trial))->u.fld[0]).rt_rtvec
))->elem[0]))->code) == JUMP_INSN)
)
20
Assuming field 'code' is equal to JUMP_INSN
2702 { 2703 rtx_sequence *sequence = as_a <rtx_sequence *> (PATTERN (trial)); 2704 /* If this is the `true' thread, we will want to follow the jump, 2705 so we can only do this if we have taken everything up to here. */ 2706 if (thread_if_true
21.1
'thread_if_true' is not equal to 0
21.1
'thread_if_true' is not equal to 0
21.1
'thread_if_true' is not equal to 0
&& trial
21.2
'trial' is equal to 'new_thread'
21.2
'trial' is equal to 'new_thread'
21.2
'trial' is equal to 'new_thread'
== new_thread)
22
Taking true branch
2707 { 2708 steal_delay_list_from_target (insn, condition, sequence,
23
Calling 'steal_delay_list_from_target'
2709 delay_list, &set, &needed, 2710 &opposite_needed, slots_to_fill, 2711 pslots_filled, &must_annul, 2712 &new_thread); 2713 /* If we owned the thread and are told that it branched 2714 elsewhere, make sure we own the thread at the new location. */ 2715 if (own_thread && trial != new_thread) 2716 own_thread = own_thread_p (new_thread, new_thread, 0); 2717 } 2718 else if (! thread_if_true) 2719 steal_delay_list_from_fallthrough (insn, condition, sequence, 2720 delay_list, &set, &needed, 2721 &opposite_needed, slots_to_fill, 2722 pslots_filled, &must_annul); 2723 } 2724 2725 /* If we haven't found anything for this delay slot and it is very 2726 likely that the branch will be taken, see if the insn at our target 2727 increments or decrements a register with an increment that does not 2728 depend on the destination register. If so, try to place the opposite 2729 arithmetic insn after the jump insn and put the arithmetic insn in the 2730 delay slot. If we can't do this, return. */ 2731 if (delay_list->is_empty () && likely 2732 && new_thread && !ANY_RETURN_P (new_thread)(((enum rtx_code) (new_thread)->code) == RETURN || ((enum rtx_code
) (new_thread)->code) == SIMPLE_RETURN)
2733 && NONJUMP_INSN_P (new_thread)(((enum rtx_code) (new_thread)->code) == INSN) 2734 && !RTX_FRAME_RELATED_P (new_thread)(__extension__ ({ __typeof ((new_thread)) const _rtx = ((new_thread
)); if (((enum rtx_code) (_rtx)->code) != DEBUG_INSN &&
((enum rtx_code) (_rtx)->code) != INSN && ((enum rtx_code
) (_rtx)->code) != CALL_INSN && ((enum rtx_code) (
_rtx)->code) != JUMP_INSN && ((enum rtx_code) (_rtx
)->code) != BARRIER && ((enum rtx_code) (_rtx)->
code) != SET) rtl_check_failed_flag ("RTX_FRAME_RELATED_P",_rtx
, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 2734, __FUNCTION__); _rtx; })->frame_related)
2735 && GET_CODE (PATTERN (new_thread))((enum rtx_code) (PATTERN (new_thread))->code) != ASM_INPUT 2736 && asm_noperands (PATTERN (new_thread)) < 0) 2737 { 2738 rtx dest; 2739 rtx src; 2740 2741 /* We know "new_thread" is an insn due to NONJUMP_INSN_P (new_thread) 2742 above. */ 2743 trial = as_a <rtx_insn *> (new_thread); 2744 rtx pat = PATTERN (trial); 2745 2746 if (!NONJUMP_INSN_P (trial)(((enum rtx_code) (trial)->code) == INSN) 2747 || GET_CODE (pat)((enum rtx_code) (pat)->code) != SET 2748 || ! eligible_for_delay (insn, 0, trial, flags) 2749 || can_throw_internal (trial)) 2750 return; 2751 2752 dest = SET_DEST (pat)(((pat)->u.fld[0]).rt_rtx), src = SET_SRC (pat)(((pat)->u.fld[1]).rt_rtx); 2753 if ((GET_CODE (src)((enum rtx_code) (src)->code) == PLUS || GET_CODE (src)((enum rtx_code) (src)->code) == MINUS) 2754 && rtx_equal_p (XEXP (src, 0)(((src)->u.fld[0]).rt_rtx), dest) 2755 && (!FLOAT_MODE_P (GET_MODE (src))(((enum mode_class) mode_class[((machine_mode) (src)->mode
)]) == MODE_FLOAT || ((enum mode_class) mode_class[((machine_mode
) (src)->mode)]) == MODE_DECIMAL_FLOAT || ((enum mode_class
) mode_class[((machine_mode) (src)->mode)]) == MODE_COMPLEX_FLOAT
|| ((enum mode_class) mode_class[((machine_mode) (src)->mode
)]) == MODE_VECTOR_FLOAT)
2756 || flag_unsafe_math_optimizationsglobal_options.x_flag_unsafe_math_optimizations) 2757 && ! reg_overlap_mentioned_p (dest, XEXP (src, 1)(((src)->u.fld[1]).rt_rtx)) 2758 && ! side_effects_p (pat)) 2759 { 2760 rtx other = XEXP (src, 1)(((src)->u.fld[1]).rt_rtx); 2761 rtx new_arith; 2762 rtx_insn *ninsn; 2763 2764 /* If this is a constant adjustment, use the same code with 2765 the negated constant. Otherwise, reverse the sense of the 2766 arithmetic. */ 2767 if (CONST_INT_P (other)(((enum rtx_code) (other)->code) == CONST_INT)) 2768 new_arith = gen_rtx_fmt_ee (GET_CODE (src), GET_MODE (src), dest,gen_rtx_fmt_ee_stat ((((enum rtx_code) (src)->code)), (((machine_mode
) (src)->mode)), (dest), (negate_rtx (((machine_mode) (src
)->mode), other)) )
2769 negate_rtx (GET_MODE (src), other))gen_rtx_fmt_ee_stat ((((enum rtx_code) (src)->code)), (((machine_mode
) (src)->mode)), (dest), (negate_rtx (((machine_mode) (src
)->mode), other)) )
; 2770 else 2771 new_arith = gen_rtx_fmt_ee (GET_CODE (src) == PLUS ? MINUS : PLUS,gen_rtx_fmt_ee_stat ((((enum rtx_code) (src)->code) == PLUS
? MINUS : PLUS), (((machine_mode) (src)->mode)), (dest), (
other) )
2772 GET_MODE (src), dest, other)gen_rtx_fmt_ee_stat ((((enum rtx_code) (src)->code) == PLUS
? MINUS : PLUS), (((machine_mode) (src)->mode)), (dest), (
other) )
; 2773 2774 ninsn = emit_insn_after (gen_rtx_SET (dest, new_arith)gen_rtx_fmt_ee_stat ((SET), (((void) 0, E_VOIDmode)), ((dest)
), ((new_arith)) )
, insn); 2775 2776 if (recog_memoized (ninsn) < 0 2777 || (extract_insn (ninsn), 2778 !constrain_operands (1, get_preferred_alternatives (ninsn)))) 2779 { 2780 delete_related_insns (ninsn); 2781 return; 2782 } 2783 2784 if (own_thread) 2785 { 2786 update_block (trial, thread); 2787 if (trial == thread) 2788 { 2789 thread = next_active_insn (thread); 2790 if (new_thread == trial) 2791 new_thread = thread; 2792 } 2793 delete_related_insns (trial); 2794 } 2795 else 2796 new_thread = next_active_insn (trial); 2797 2798 ninsn = own_thread ? trial : copy_delay_slot_insn (trial); 2799 if (thread_if_true) 2800 INSN_FROM_TARGET_P (ninsn)(__extension__ ({ __typeof ((ninsn)) const _rtx = ((ninsn)); if
(((enum rtx_code) (_rtx)->code) != INSN && ((enum
rtx_code) (_rtx)->code) != JUMP_INSN && ((enum rtx_code
) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag ("INSN_FROM_TARGET_P"
, _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 2800, __FUNCTION__); _rtx; })->in_struct)
= 1; 2801 2802 add_to_delay_list (ninsn, delay_list); 2803 (*pslots_filled)++; 2804 } 2805 } 2806 2807 if (!delay_list->is_empty () && must_annul) 2808 INSN_ANNULLED_BRANCH_P (insn)(__extension__ ({ __typeof ((insn)) const _rtx = ((insn)); if
(((enum rtx_code) (_rtx)->code) != JUMP_INSN) rtl_check_failed_flag
("INSN_ANNULLED_BRANCH_P", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 2808, __FUNCTION__); _rtx; })->unchanging)
= 1; 2809 2810 /* If we are to branch into the middle of this thread, find an appropriate 2811 label or make a new one if none, and redirect INSN to it. If we hit the 2812 end of the function, use the end-of-function label. */ 2813 if (new_thread != thread) 2814 { 2815 rtx label; 2816 bool crossing = false; 2817 2818 gcc_assert (thread_if_true)((void)(!(thread_if_true) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 2818, __FUNCTION__), 0 : 0))
; 2819 2820 if (new_thread && simplejump_or_return_p (new_thread) 2821 && redirect_with_delay_list_safe_p (insn, 2822 JUMP_LABEL (new_thread)(((new_thread)->u.fld[7]).rt_rtx), 2823 *delay_list)) 2824 new_thread = follow_jumps (JUMP_LABEL (new_thread)(((new_thread)->u.fld[7]).rt_rtx), insn, 2825 &crossing); 2826 2827 if (ANY_RETURN_P (new_thread)(((enum rtx_code) (new_thread)->code) == RETURN || ((enum rtx_code
) (new_thread)->code) == SIMPLE_RETURN)
) 2828 label = find_end_label (new_thread); 2829 else if (LABEL_P (new_thread)(((enum rtx_code) (new_thread)->code) == CODE_LABEL)) 2830 label = new_thread; 2831 else 2832 label = get_label_before (as_a <rtx_insn *> (new_thread), 2833 JUMP_LABEL (insn)(((insn)->u.fld[7]).rt_rtx)); 2834 2835 if (label) 2836 { 2837 reorg_redirect_jump (insn, label); 2838 if (crossing) 2839 CROSSING_JUMP_P (insn)(__extension__ ({ __typeof ((insn)) const _rtx = ((insn)); if
(((enum rtx_code) (_rtx)->code) != JUMP_INSN) rtl_check_failed_flag
("CROSSING_JUMP_P", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 2839, __FUNCTION__); _rtx; })->jump)
= 1; 2840 } 2841 } 2842} 2843
2844/* Make another attempt to find insns to place in delay slots. 2845 2846 We previously looked for insns located in front of the delay insn 2847 and, for non-jump delay insns, located behind the delay insn. 2848 2849 Here only try to schedule jump insns and try to move insns from either 2850 the target or the following insns into the delay slot. If annulling is 2851 supported, we will be likely to do this. Otherwise, we can do this only 2852 if safe. */ 2853 2854static void 2855fill_eager_delay_slots (void) 2856{ 2857 rtx_insn *insn; 2858 int i; 2859 int num_unfilled_slots = unfilled_slots_next((rtx_insn **) ((void *) (&unfilled_slots_obstack)->next_free
))
- unfilled_slots_base((rtx_insn **) ((void *) (&unfilled_slots_obstack)->object_base
))
; 2860 2861 for (i = 0; i < num_unfilled_slots; i++) 2862 { 2863 rtx condition; 2864 rtx target_label, insn_at_target; 2865 rtx_insn *fallthrough_insn; 2866 auto_vec<rtx_insn *, 5> delay_list; 2867 rtx_jump_insn *jump_insn; 2868 int own_target; 2869 int own_fallthrough; 2870 int prediction, slots_to_fill, slots_filled; 2871 2872 insn = unfilled_slots_base((rtx_insn **) ((void *) (&unfilled_slots_obstack)->object_base
))
[i]; 2873 if (insn == 0 2874 || insn->deleted () 2875 || ! (jump_insn = dyn_cast <rtx_jump_insn *> (insn)) 2876 || ! (condjump_p (jump_insn) || condjump_in_parallel_p (jump_insn))) 2877 continue; 2878 2879 slots_to_fill = num_delay_slots (jump_insn); 2880 /* Some machine description have defined instructions to have 2881 delay slots only in certain circumstances which may depend on 2882 nearby insns (which change due to reorg's actions). 2883 2884 For example, the PA port normally has delay slots for unconditional 2885 jumps. 2886 2887 However, the PA port claims such jumps do not have a delay slot 2888 if they are immediate successors of certain CALL_INSNs. This 2889 allows the port to favor filling the delay slot of the call with 2890 the unconditional jump. */ 2891 if (slots_to_fill == 0) 2892 continue; 2893 2894 slots_filled = 0; 2895 target_label = JUMP_LABEL (jump_insn)(((jump_insn)->u.fld[7]).rt_rtx); 2896 condition = get_branch_condition (jump_insn, target_label); 2897 2898 if (condition == 0) 2899 continue; 2900 2901 /* Get the next active fallthrough and target insns and see if we own 2902 them. Then see whether the branch is likely true. We don't need 2903 to do a lot of this for unconditional branches. */ 2904 2905 insn_at_target = first_active_target_insn (target_label); 2906 own_target = own_thread_p (target_label, target_label, 0); 2907 2908 if (condition == const_true_rtx) 2909 { 2910 own_fallthrough = 0; 2911 fallthrough_insn = 0; 2912 prediction = 2; 2913 } 2914 else 2915 { 2916 fallthrough_insn = next_active_insn (jump_insn); 2917 own_fallthrough = own_thread_p (NEXT_INSN (jump_insn), NULL_RTX(rtx) 0, 1); 2918 prediction = mostly_true_jump (jump_insn); 2919 } 2920 2921 /* If this insn is expected to branch, first try to get insns from our 2922 target, then our fallthrough insns. If it is not expected to branch, 2923 try the other order. */ 2924 2925 if (prediction > 0) 2926 { 2927 fill_slots_from_thread (jump_insn, condition, insn_at_target, 2928 fallthrough_insn, prediction == 2, 1, 2929 own_target, 2930 slots_to_fill, &slots_filled, &delay_list); 2931 2932 if (delay_list.is_empty () && own_fallthrough) 2933 { 2934 /* Even though we didn't find anything for delay slots, 2935 we might have found a redundant insn which we deleted 2936 from the thread that was filled. So we have to recompute 2937 the next insn at the target. */ 2938 target_label = JUMP_LABEL (jump_insn)(((jump_insn)->u.fld[7]).rt_rtx); 2939 insn_at_target = first_active_target_insn (target_label); 2940 2941 fill_slots_from_thread (jump_insn, condition, fallthrough_insn, 2942 insn_at_target, 0, 0, own_fallthrough, 2943 slots_to_fill, &slots_filled, 2944 &delay_list); 2945 } 2946 } 2947 else 2948 { 2949 if (own_fallthrough) 2950 fill_slots_from_thread (jump_insn, condition, fallthrough_insn, 2951 insn_at_target, 0, 0, own_fallthrough, 2952 slots_to_fill, &slots_filled, &delay_list); 2953 2954 if (delay_list.is_empty ()) 2955 fill_slots_from_thread (jump_insn, condition, insn_at_target, 2956 next_active_insn (insn), 0, 1, own_target, 2957 slots_to_fill, &slots_filled, &delay_list); 2958 } 2959 2960 if (!delay_list.is_empty ()) 2961 unfilled_slots_base((rtx_insn **) ((void *) (&unfilled_slots_obstack)->object_base
))
[i] 2962 = emit_delay_sequence (jump_insn, delay_list, slots_filled); 2963 2964 if (slots_to_fill == slots_filled) 2965 unfilled_slots_base((rtx_insn **) ((void *) (&unfilled_slots_obstack)->object_base
))
[i] = 0; 2966 2967 note_delay_statistics (slots_filled, 1); 2968 } 2969} 2970
2971static void delete_computation (rtx_insn *insn); 2972 2973/* Recursively delete prior insns that compute the value (used only by INSN 2974 which the caller is deleting) stored in the register mentioned by NOTE 2975 which is a REG_DEAD note associated with INSN. */ 2976 2977static void 2978delete_prior_computation (rtx note, rtx_insn *insn) 2979{ 2980 rtx_insn *our_prev; 2981 rtx reg = XEXP (note, 0)(((note)->u.fld[0]).rt_rtx); 2982 2983 for (our_prev = prev_nonnote_insn (insn); 2984 our_prev && (NONJUMP_INSN_P (our_prev)(((enum rtx_code) (our_prev)->code) == INSN) 2985 || CALL_P (our_prev)(((enum rtx_code) (our_prev)->code) == CALL_INSN)); 2986 our_prev = prev_nonnote_insn (our_prev)) 2987 { 2988 rtx pat = PATTERN (our_prev); 2989 2990 /* If we reach a CALL which is not calling a const function 2991 or the callee pops the arguments, then give up. */ 2992 if (CALL_P (our_prev)(((enum rtx_code) (our_prev)->code) == CALL_INSN) 2993 && (! RTL_CONST_CALL_P (our_prev)(__extension__ ({ __typeof ((our_prev)) const _rtx = ((our_prev
)); if (((enum rtx_code) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag
("RTL_CONST_CALL_P", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 2993, __FUNCTION__); _rtx; })->unchanging)
2994 || GET_CODE (pat)((enum rtx_code) (pat)->code) != SET || GET_CODE (SET_SRC (pat))((enum rtx_code) ((((pat)->u.fld[1]).rt_rtx))->code) != CALL)) 2995 break; 2996 2997 /* If we reach a SEQUENCE, it is too complex to try to 2998 do anything with it, so give up. We can be run during 2999 and after reorg, so SEQUENCE rtl can legitimately show 3000 up here. */ 3001 if (GET_CODE (pat)((enum rtx_code) (pat)->code) == SEQUENCE) 3002 break; 3003 3004 if (GET_CODE (pat)((enum rtx_code) (pat)->code) == USE 3005 && NONJUMP_INSN_P (XEXP (pat, 0))(((enum rtx_code) ((((pat)->u.fld[0]).rt_rtx))->code) ==
INSN)
) 3006 /* reorg creates USEs that look like this. We leave them 3007 alone because reorg needs them for its own purposes. */ 3008 break; 3009 3010 if (reg_set_p (reg, pat)) 3011 { 3012 if (side_effects_p (pat) && !CALL_P (our_prev)(((enum rtx_code) (our_prev)->code) == CALL_INSN)) 3013 break; 3014 3015 if (GET_CODE (pat)((enum rtx_code) (pat)->code) == PARALLEL) 3016 { 3017 /* If we find a SET of something else, we can't 3018 delete the insn. */ 3019 3020 int i; 3021 3022 for (i = 0; i < XVECLEN (pat, 0)(((((pat)->u.fld[0]).rt_rtvec))->num_elem); i++) 3023 { 3024 rtx part = XVECEXP (pat, 0, i)(((((pat)->u.fld[0]).rt_rtvec))->elem[i]); 3025 3026 if (GET_CODE (part)((enum rtx_code) (part)->code) == SET 3027 && SET_DEST (part)(((part)->u.fld[0]).rt_rtx) != reg) 3028 break; 3029 } 3030 3031 if (i == XVECLEN (pat, 0)(((((pat)->u.fld[0]).rt_rtvec))->num_elem)) 3032 delete_computation (our_prev); 3033 } 3034 else if (GET_CODE (pat)((enum rtx_code) (pat)->code) == SET 3035 && REG_P (SET_DEST (pat))(((enum rtx_code) ((((pat)->u.fld[0]).rt_rtx))->code) ==
REG)
) 3036 { 3037 int dest_regno = REGNO (SET_DEST (pat))(rhs_regno((((pat)->u.fld[0]).rt_rtx))); 3038 int dest_endregno = END_REGNO (SET_DEST (pat)(((pat)->u.fld[0]).rt_rtx)); 3039 int regno = REGNO (reg)(rhs_regno(reg)); 3040 int endregno = END_REGNO (reg); 3041 3042 if (dest_regno >= regno 3043 && dest_endregno <= endregno) 3044 delete_computation (our_prev); 3045 3046 /* We may have a multi-word hard register and some, but not 3047 all, of the words of the register are needed in subsequent 3048 insns. Write REG_UNUSED notes for those parts that were not 3049 needed. */ 3050 else if (dest_regno <= regno 3051 && dest_endregno >= endregno) 3052 { 3053 int i; 3054 3055 add_reg_note (our_prev, REG_UNUSED, reg); 3056 3057 for (i = dest_regno; i < dest_endregno; i++) 3058 if (! find_regno_note (our_prev, REG_UNUSED, i)) 3059 break; 3060 3061 if (i == dest_endregno) 3062 delete_computation (our_prev); 3063 } 3064 } 3065 3066 break; 3067 } 3068 3069 /* If PAT references the register that dies here, it is an 3070 additional use. Hence any prior SET isn't dead. However, this 3071 insn becomes the new place for the REG_DEAD note. */ 3072 if (reg_overlap_mentioned_p (reg, pat)) 3073 { 3074 XEXP (note, 1)(((note)->u.fld[1]).rt_rtx) = REG_NOTES (our_prev)(((our_prev)->u.fld[6]).rt_rtx); 3075 REG_NOTES (our_prev)(((our_prev)->u.fld[6]).rt_rtx) = note; 3076 break; 3077 } 3078 } 3079} 3080 3081/* Delete INSN and recursively delete insns that compute values used only 3082 by INSN. This uses the REG_DEAD notes computed during flow analysis. 3083 3084 Look at all our REG_DEAD notes. If a previous insn does nothing other 3085 than set a register that dies in this insn, we can delete that insn 3086 as well. 3087 3088 On machines with CC0, if CC0 is used in this insn, we may be able to 3089 delete the insn that set it. */ 3090 3091static void 3092delete_computation (rtx_insn *insn) 3093{ 3094 rtx note, next; 3095 3096 if (HAVE_cc00 && reg_referenced_p (cc0_rtx, PATTERN (insn))) 3097 { 3098 rtx_insn *prev = prev_nonnote_insn (insn); 3099 /* We assume that at this stage 3100 CC's are always set explicitly 3101 and always immediately before the jump that 3102 will use them. So if the previous insn 3103 exists to set the CC's, delete it 3104 (unless it performs auto-increments, etc.). */ 3105 if (prev && NONJUMP_INSN_P (prev)(((enum rtx_code) (prev)->code) == INSN) 3106 && sets_cc0_p (PATTERN (prev))) 3107 { 3108 if (sets_cc0_p (PATTERN (prev)) > 0 3109 && ! side_effects_p (PATTERN (prev))) 3110 delete_computation (prev); 3111 else 3112 /* Otherwise, show that cc0 won't be used. */ 3113 add_reg_note (prev, REG_UNUSED, cc0_rtx); 3114 } 3115 } 3116 3117 for (note = REG_NOTES (insn)(((insn)->u.fld[6]).rt_rtx); note; note = next) 3118 { 3119 next = XEXP (note, 1)(((note)->u.fld[1]).rt_rtx); 3120 3121 if (REG_NOTE_KIND (note)((enum reg_note) ((machine_mode) (note)->mode)) != REG_DEAD 3122 /* Verify that the REG_NOTE is legitimate. */ 3123 || !REG_P (XEXP (note, 0))(((enum rtx_code) ((((note)->u.fld[0]).rt_rtx))->code) ==
REG)
) 3124 continue; 3125 3126 delete_prior_computation (note, insn); 3127 } 3128 3129 delete_related_insns (insn); 3130} 3131 3132/* If all INSN does is set the pc, delete it, 3133 and delete the insn that set the condition codes for it 3134 if that's what the previous thing was. */ 3135 3136static void 3137delete_jump (rtx_insn *insn) 3138{ 3139 rtx set = single_set (insn); 3140 3141 if (set && GET_CODE (SET_DEST (set))((enum rtx_code) ((((set)->u.fld[0]).rt_rtx))->code) == PC) 3142 delete_computation (insn); 3143} 3144 3145static rtx_insn * 3146label_before_next_insn (rtx_insn *x, rtx scan_limit) 3147{ 3148 rtx_insn *insn = next_active_insn (x); 3149 while (insn) 3150 { 3151 insn = PREV_INSN (insn); 3152 if (insn == scan_limit || insn == NULL_RTX(rtx) 0) 3153 return NULLnullptr; 3154 if (LABEL_P (insn)(((enum rtx_code) (insn)->code) == CODE_LABEL)) 3155 break; 3156 } 3157 return insn; 3158} 3159 3160/* Return TRUE if there is a NOTE_INSN_SWITCH_TEXT_SECTIONS note in between 3161 BEG and END. */ 3162 3163static bool 3164switch_text_sections_between_p (const rtx_insn *beg, const rtx_insn *end) 3165{ 3166 const rtx_insn *p; 3167 for (p = beg; p != end; p = NEXT_INSN (p)) 3168 if (NOTE_P (p)(((enum rtx_code) (p)->code) == NOTE) && NOTE_KIND (p)(((p)->u.fld[4]).rt_int) == NOTE_INSN_SWITCH_TEXT_SECTIONS) 3169 return true; 3170 return false; 3171} 3172 3173
3174/* Once we have tried two ways to fill a delay slot, make a pass over the 3175 code to try to improve the results and to do such things as more jump 3176 threading. */ 3177 3178static void 3179relax_delay_slots (rtx_insn *first) 3180{ 3181 rtx_insn *insn, *next; 3182 rtx_sequence *pat; 3183 rtx_insn *delay_insn; 3184 rtx target_label; 3185 3186 /* Look at every JUMP_INSN and see if we can improve it. */ 3187 for (insn = first; insn; insn = next) 3188 { 3189 rtx_insn *other, *prior_insn; 3190 bool crossing; 3191 3192 next = next_active_insn (insn); 3193 3194 /* If this is a jump insn, see if it now jumps to a jump, jumps to 3195 the next insn, or jumps to a label that is not the last of a 3196 group of consecutive labels. */ 3197 if (is_a <rtx_jump_insn *> (insn) 3198 && (condjump_p (insn) || condjump_in_parallel_p (insn)) 3199 && !ANY_RETURN_P (target_label = JUMP_LABEL (insn))(((enum rtx_code) (target_label = (((insn)->u.fld[7]).rt_rtx
))->code) == RETURN || ((enum rtx_code) (target_label = ((
(insn)->u.fld[7]).rt_rtx))->code) == SIMPLE_RETURN)
) 3200 { 3201 rtx_jump_insn *jump_insn = as_a <rtx_jump_insn *> (insn); 3202 target_label 3203 = skip_consecutive_labels (follow_jumps (target_label, jump_insn, 3204 &crossing)); 3205 if (ANY_RETURN_P (target_label)(((enum rtx_code) (target_label)->code) == RETURN || ((enum
rtx_code) (target_label)->code) == SIMPLE_RETURN)
) 3206 target_label = find_end_label (target_label); 3207 3208 if (target_label 3209 && next_active_insn (as_a<rtx_insn *> (target_label)) == next 3210 && ! condjump_in_parallel_p (jump_insn) 3211 && ! (next && switch_text_sections_between_p (jump_insn, next))) 3212 { 3213 delete_jump (jump_insn); 3214 continue; 3215 } 3216 3217 if (target_label && target_label != JUMP_LABEL (jump_insn)(((jump_insn)->u.fld[7]).rt_rtx)) 3218 { 3219 reorg_redirect_jump (jump_insn, target_label); 3220 if (crossing) 3221 CROSSING_JUMP_P (jump_insn)(__extension__ ({ __typeof ((jump_insn)) const _rtx = ((jump_insn
)); if (((enum rtx_code) (_rtx)->code) != JUMP_INSN) rtl_check_failed_flag
("CROSSING_JUMP_P", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 3221, __FUNCTION__); _rtx; })->jump)
= 1; 3222 } 3223 3224 /* See if this jump conditionally branches around an unconditional 3225 jump. If so, invert this jump and point it to the target of the 3226 second jump. Check if it's possible on the target. */ 3227 if (next && simplejump_or_return_p (next) 3228 && any_condjump_p (jump_insn) 3229 && target_label 3230 && (next_active_insn (as_a<rtx_insn *> (target_label)) 3231 == next_active_insn (next)) 3232 && no_labels_between_p (jump_insn, next) 3233 && targetm.can_follow_jump (jump_insn, next)) 3234 { 3235 rtx label = JUMP_LABEL (next)(((next)->u.fld[7]).rt_rtx); 3236 3237 /* Be careful how we do this to avoid deleting code or 3238 labels that are momentarily dead. See similar optimization 3239 in jump.c. 3240 3241 We also need to ensure we properly handle the case when 3242 invert_jump fails. */ 3243 3244 ++LABEL_NUSES (target_label)(((target_label)->u.fld[4]).rt_int); 3245 if (!ANY_RETURN_P (label)(((enum rtx_code) (label)->code) == RETURN || ((enum rtx_code
) (label)->code) == SIMPLE_RETURN)
) 3246 ++LABEL_NUSES (label)(((label)->u.fld[4]).rt_int); 3247 3248 if (invert_jump (jump_insn, label, 1)) 3249 { 3250 rtx_insn *from = delete_related_insns (next); 3251 3252 /* We have just removed a BARRIER, which means that the block 3253 number of the next insns has effectively been changed (see 3254 find_basic_block in resource.c), so clear it. */ 3255 if (from) 3256 clear_hashed_info_until_next_barrier (from); 3257 3258 next = jump_insn; 3259 } 3260 3261 if (!ANY_RETURN_P (label)(((enum rtx_code) (label)->code) == RETURN || ((enum rtx_code
) (label)->code) == SIMPLE_RETURN)
) 3262 --LABEL_NUSES (label)(((label)->u.fld[4]).rt_int); 3263 3264 if (--LABEL_NUSES (target_label)(((target_label)->u.fld[4]).rt_int) == 0) 3265 delete_related_insns (target_label); 3266 3267 continue; 3268 } 3269 } 3270 3271 /* If this is an unconditional jump and the previous insn is a 3272 conditional jump, try reversing the condition of the previous 3273 insn and swapping our targets. The next pass might be able to 3274 fill the slots. 3275 3276 Don't do this if we expect the conditional branch to be true, because 3277 we would then be making the more common case longer. */ 3278 3279 if (simplejump_or_return_p (insn) 3280 && (other = prev_active_insn (insn)) != 0 3281 && any_condjump_p (other) 3282 && no_labels_between_p (other, insn) 3283 && mostly_true_jump (other) < 0) 3284 { 3285 rtx other_target = JUMP_LABEL (other)(((other)->u.fld[7]).rt_rtx); 3286 target_label = JUMP_LABEL (insn)(((insn)->u.fld[7]).rt_rtx); 3287 3288 if (invert_jump (as_a <rtx_jump_insn *> (other), target_label, 0)) 3289 reorg_redirect_jump (as_a <rtx_jump_insn *> (insn), other_target); 3290 } 3291 3292 /* Now look only at cases where we have a filled delay slot. */ 3293 if (!NONJUMP_INSN_P (insn)(((enum rtx_code) (insn)->code) == INSN) || GET_CODE (PATTERN (insn))((enum rtx_code) (PATTERN (insn))->code) != SEQUENCE) 3294 continue; 3295 3296 pat = as_a <rtx_sequence *> (PATTERN (insn)); 3297 delay_insn = pat->insn (0); 3298 3299 /* See if the first insn in the delay slot is redundant with some 3300 previous insn. Remove it from the delay slot if so; then set up 3301 to reprocess this insn. */ 3302 if ((prior_insn = redundant_insn (pat->insn (1), delay_insn, vNULL))) 3303 { 3304 fix_reg_dead_note (prior_insn, insn); 3305 update_block (pat->insn (1), insn); 3306 delete_from_delay_slot (pat->insn (1)); 3307 next = prev_active_insn (next); 3308 continue; 3309 } 3310 3311 /* See if we have a RETURN insn with a filled delay slot followed 3312 by a RETURN insn with an unfilled a delay slot. If so, we can delete 3313 the first RETURN (but not its delay insn). This gives the same 3314 effect in fewer instructions. 3315 3316 Only do so if optimizing for size since this results in slower, but 3317 smaller code. */ 3318 if (optimize_function_for_size_p (cfun(cfun + 0)) 3319 && ANY_RETURN_P (PATTERN (delay_insn))(((enum rtx_code) (PATTERN (delay_insn))->code) == RETURN ||
((enum rtx_code) (PATTERN (delay_insn))->code) == SIMPLE_RETURN
)
3320 && next 3321 && JUMP_P (next)(((enum rtx_code) (next)->code) == JUMP_INSN) 3322 && PATTERN (next) == PATTERN (delay_insn)) 3323 { 3324 rtx_insn *after; 3325 int i; 3326 3327 /* Delete the RETURN and just execute the delay list insns. 3328 3329 We do this by deleting the INSN containing the SEQUENCE, then 3330 re-emitting the insns separately, and then deleting the RETURN. 3331 This allows the count of the jump target to be properly 3332 decremented. 3333 3334 Note that we need to change the INSN_UID of the re-emitted insns 3335 since it is used to hash the insns for mark_target_live_regs and 3336 the re-emitted insns will no longer be wrapped up in a SEQUENCE. 3337 3338 Clear the from target bit, since these insns are no longer 3339 in delay slots. */ 3340 for (i = 0; i < XVECLEN (pat, 0)(((((pat)->u.fld[0]).rt_rtvec))->num_elem); i++) 3341 INSN_FROM_TARGET_P (XVECEXP (pat, 0, i))(__extension__ ({ __typeof (((((((pat)->u.fld[0]).rt_rtvec
))->elem[i]))) const _rtx = (((((((pat)->u.fld[0]).rt_rtvec
))->elem[i]))); if (((enum rtx_code) (_rtx)->code) != INSN
&& ((enum rtx_code) (_rtx)->code) != JUMP_INSN &&
((enum rtx_code) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag
("INSN_FROM_TARGET_P", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 3341, __FUNCTION__); _rtx; })->in_struct)
= 0; 3342 3343 rtx_insn *prev = PREV_INSN (insn); 3344 delete_related_insns (insn); 3345 gcc_assert (GET_CODE (pat) == SEQUENCE)((void)(!(((enum rtx_code) (pat)->code) == SEQUENCE) ? fancy_abort
("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 3345, __FUNCTION__), 0 : 0))
; 3346 add_insn_after (delay_insn, prev, NULLnullptr); 3347 after = delay_insn; 3348 for (i = 1; i < pat->len (); i++) 3349 after = emit_copy_of_insn_after (pat->insn (i), after); 3350 delete_scheduled_jump (delay_insn); 3351 continue; 3352 } 3353 3354 /* Now look only at the cases where we have a filled JUMP_INSN. */ 3355 rtx_jump_insn *delay_jump_insn = 3356 dyn_cast <rtx_jump_insn *> (delay_insn); 3357 if (! delay_jump_insn || !(condjump_p (delay_jump_insn) 3358 || condjump_in_parallel_p (delay_jump_insn))) 3359 continue; 3360 3361 target_label = JUMP_LABEL (delay_jump_insn)(((delay_jump_insn)->u.fld[7]).rt_rtx); 3362 if (target_label && ANY_RETURN_P (target_label)(((enum rtx_code) (target_label)->code) == RETURN || ((enum
rtx_code) (target_label)->code) == SIMPLE_RETURN)
) 3363 continue; 3364 3365 /* If this jump goes to another unconditional jump, thread it, but 3366 don't convert a jump into a RETURN here. */ 3367 rtx trial = skip_consecutive_labels (follow_jumps (target_label, 3368 delay_jump_insn, 3369 &crossing)); 3370 if (ANY_RETURN_P (trial)(((enum rtx_code) (trial)->code) == RETURN || ((enum rtx_code
) (trial)->code) == SIMPLE_RETURN)
) 3371 trial = find_end_label (trial); 3372 3373 if (trial && trial != target_label 3374 && redirect_with_delay_slots_safe_p (delay_jump_insn, trial, insn)) 3375 { 3376 reorg_redirect_jump (delay_jump_insn, trial); 3377 target_label = trial; 3378 if (crossing) 3379 CROSSING_JUMP_P (delay_jump_insn)(__extension__ ({ __typeof ((delay_jump_insn)) const _rtx = (
(delay_jump_insn)); if (((enum rtx_code) (_rtx)->code) != JUMP_INSN
) rtl_check_failed_flag ("CROSSING_JUMP_P", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 3379, __FUNCTION__); _rtx; })->jump)
= 1; 3380 } 3381 3382 /* If the first insn at TARGET_LABEL is redundant with a previous 3383 insn, redirect the jump to the following insn and process again. 3384 We use next_real_nondebug_insn instead of next_active_insn so we 3385 don't skip USE-markers, or we'll end up with incorrect 3386 liveness info. */ 3387 trial = next_real_nondebug_insn (target_label); 3388 if (trial && GET_CODE (PATTERN (trial))((enum rtx_code) (PATTERN (trial))->code) != SEQUENCE 3389 && redundant_insn (trial, insn, vNULL) 3390 && ! can_throw_internal (trial)) 3391 { 3392 /* Figure out where to emit the special USE insn so we don't 3393 later incorrectly compute register live/death info. */ 3394 rtx_insn *tmp = next_active_insn (as_a<rtx_insn *> (trial)); 3395 if (tmp == 0) 3396 tmp = find_end_label (simple_return_rtx); 3397 3398 if (tmp) 3399 { 3400 /* Insert the special USE insn and update dataflow info. 3401 We know "trial" is an insn here as it is the output of 3402 next_real_nondebug_insn () above. */ 3403 update_block (as_a <rtx_insn *> (trial), tmp); 3404 3405 /* Now emit a label before the special USE insn, and 3406 redirect our jump to the new label. */ 3407 target_label = get_label_before (PREV_INSN (tmp), target_label); 3408 reorg_redirect_jump (delay_jump_insn, target_label); 3409 next = insn; 3410 continue; 3411 } 3412 } 3413 3414 /* Similarly, if it is an unconditional jump with one insn in its 3415 delay list and that insn is redundant, thread the jump. */ 3416 rtx_sequence *trial_seq = 3417 trial ? dyn_cast <rtx_sequence *> (PATTERN (trial)) : NULLnullptr; 3418 if (trial_seq 3419 && trial_seq->len () == 2 3420 && JUMP_P (trial_seq->insn (0))(((enum rtx_code) (trial_seq->insn (0))->code) == JUMP_INSN
)
3421 && simplejump_or_return_p (trial_seq->insn (0)) 3422 && redundant_insn (trial_seq->insn (1), insn, vNULL)) 3423 { 3424 rtx temp_label = JUMP_LABEL (trial_seq->insn (0))(((trial_seq->insn (0))->u.fld[7]).rt_rtx); 3425 if (ANY_RETURN_P (temp_label)(((enum rtx_code) (temp_label)->code) == RETURN || ((enum rtx_code
) (temp_label)->code) == SIMPLE_RETURN)
) 3426 temp_label = find_end_label (temp_label); 3427 3428 if (temp_label 3429 && redirect_with_delay_slots_safe_p (delay_jump_insn, 3430 temp_label, insn)) 3431 { 3432 update_block (trial_seq->insn (1), insn); 3433 reorg_redirect_jump (delay_jump_insn, temp_label); 3434 next = insn; 3435 continue; 3436 } 3437 } 3438 3439 /* See if we have a simple (conditional) jump that is useless. */ 3440 if (!CROSSING_JUMP_P (delay_jump_insn)(__extension__ ({ __typeof ((delay_jump_insn)) const _rtx = (
(delay_jump_insn)); if (((enum rtx_code) (_rtx)->code) != JUMP_INSN
) rtl_check_failed_flag ("CROSSING_JUMP_P", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 3440, __FUNCTION__); _rtx; })->jump)
3441 && !INSN_ANNULLED_BRANCH_P (delay_jump_insn)(__extension__ ({ __typeof ((delay_jump_insn)) const _rtx = (
(delay_jump_insn)); if (((enum rtx_code) (_rtx)->code) != JUMP_INSN
) rtl_check_failed_flag ("INSN_ANNULLED_BRANCH_P", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 3441, __FUNCTION__); _rtx; })->unchanging)
3442 && !condjump_in_parallel_p (delay_jump_insn) 3443 && prev_active_insn (as_a<rtx_insn *> (target_label)) == insn 3444 && !BARRIER_P (prev_nonnote_insn (as_a<rtx_insn *> (target_label)))(((enum rtx_code) (prev_nonnote_insn (as_a<rtx_insn *> (
target_label)))->code) == BARRIER)
3445 /* If the last insn in the delay slot sets CC0 for some insn, 3446 various code assumes that it is in a delay slot. We could 3447 put it back where it belonged and delete the register notes, 3448 but it doesn't seem worthwhile in this uncommon case. */ 3449 && (!HAVE_cc00 3450 || ! find_reg_note (XVECEXP (pat, 0, XVECLEN (pat, 0) - 1)(((((pat)->u.fld[0]).rt_rtvec))->elem[(((((pat)->u.fld
[0]).rt_rtvec))->num_elem) - 1])
, 3451 REG_CC_USER, NULL_RTX(rtx) 0))) 3452 { 3453 rtx_insn *after; 3454 int i; 3455 3456 /* All this insn does is execute its delay list and jump to the 3457 following insn. So delete the jump and just execute the delay 3458 list insns. 3459 3460 We do this by deleting the INSN containing the SEQUENCE, then 3461 re-emitting the insns separately, and then deleting the jump. 3462 This allows the count of the jump target to be properly 3463 decremented. 3464 3465 Note that we need to change the INSN_UID of the re-emitted insns 3466 since it is used to hash the insns for mark_target_live_regs and 3467 the re-emitted insns will no longer be wrapped up in a SEQUENCE. 3468 3469 Clear the from target bit, since these insns are no longer 3470 in delay slots. */ 3471 for (i = 0; i < XVECLEN (pat, 0)(((((pat)->u.fld[0]).rt_rtvec))->num_elem); i++) 3472 INSN_FROM_TARGET_P (XVECEXP (pat, 0, i))(__extension__ ({ __typeof (((((((pat)->u.fld[0]).rt_rtvec
))->elem[i]))) const _rtx = (((((((pat)->u.fld[0]).rt_rtvec
))->elem[i]))); if (((enum rtx_code) (_rtx)->code) != INSN
&& ((enum rtx_code) (_rtx)->code) != JUMP_INSN &&
((enum rtx_code) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag
("INSN_FROM_TARGET_P", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 3472, __FUNCTION__); _rtx; })->in_struct)
= 0; 3473 3474 rtx_insn *prev = PREV_INSN (insn); 3475 delete_related_insns (insn); 3476 gcc_assert (GET_CODE (pat) == SEQUENCE)((void)(!(((enum rtx_code) (pat)->code) == SEQUENCE) ? fancy_abort
("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 3476, __FUNCTION__), 0 : 0))
; 3477 add_insn_after (delay_jump_insn, prev, NULLnullptr); 3478 after = delay_jump_insn; 3479 for (i = 1; i < pat->len (); i++) 3480 after = emit_copy_of_insn_after (pat->insn (i), after); 3481 delete_scheduled_jump (delay_jump_insn); 3482 continue; 3483 } 3484 3485 /* See if this is an unconditional jump around a single insn which is 3486 identical to the one in its delay slot. In this case, we can just 3487 delete the branch and the insn in its delay slot. */ 3488 if (next && NONJUMP_INSN_P (next)(((enum rtx_code) (next)->code) == INSN) 3489 && label_before_next_insn (next, insn) == target_label 3490 && simplejump_p (insn) 3491 && XVECLEN (pat, 0)(((((pat)->u.fld[0]).rt_rtvec))->num_elem) == 2 3492 && rtx_equal_p (PATTERN (next), PATTERN (pat->insn (1)))) 3493 { 3494 delete_related_insns (insn); 3495 continue; 3496 } 3497 3498 /* See if this jump (with its delay slots) conditionally branches 3499 around an unconditional jump (without delay slots). If so, invert 3500 this jump and point it to the target of the second jump. We cannot 3501 do this for annulled jumps, though. Again, don't convert a jump to 3502 a RETURN here. */ 3503 if (! INSN_ANNULLED_BRANCH_P (delay_jump_insn)(__extension__ ({ __typeof ((delay_jump_insn)) const _rtx = (
(delay_jump_insn)); if (((enum rtx_code) (_rtx)->code) != JUMP_INSN
) rtl_check_failed_flag ("INSN_ANNULLED_BRANCH_P", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 3503, __FUNCTION__); _rtx; })->unchanging)
3504 && any_condjump_p (delay_jump_insn) 3505 && next && simplejump_or_return_p (next) 3506 && (next_active_insn (as_a<rtx_insn *> (target_label)) 3507 == next_active_insn (next)) 3508 && no_labels_between_p (insn, next)) 3509 { 3510 rtx label = JUMP_LABEL (next)(((next)->u.fld[7]).rt_rtx); 3511 rtx old_label = JUMP_LABEL (delay_jump_insn)(((delay_jump_insn)->u.fld[7]).rt_rtx); 3512 3513 if (ANY_RETURN_P (label)(((enum rtx_code) (label)->code) == RETURN || ((enum rtx_code
) (label)->code) == SIMPLE_RETURN)
) 3514 label = find_end_label (label); 3515 3516 /* find_end_label can generate a new label. Check this first. */ 3517 if (label 3518 && no_labels_between_p (insn, next) 3519 && redirect_with_delay_slots_safe_p (delay_jump_insn, 3520 label, insn)) 3521 { 3522 /* Be careful how we do this to avoid deleting code or labels 3523 that are momentarily dead. See similar optimization in 3524 jump.c */ 3525 if (old_label) 3526 ++LABEL_NUSES (old_label)(((old_label)->u.fld[4]).rt_int); 3527 3528 if (invert_jump (delay_jump_insn, label, 1)) 3529 { 3530 /* Must update the INSN_FROM_TARGET_P bits now that 3531 the branch is reversed, so that mark_target_live_regs 3532 will handle the delay slot insn correctly. */ 3533 for (int i = 1; i < XVECLEN (PATTERN (insn), 0)(((((PATTERN (insn))->u.fld[0]).rt_rtvec))->num_elem); i++) 3534 { 3535 rtx slot = XVECEXP (PATTERN (insn), 0, i)(((((PATTERN (insn))->u.fld[0]).rt_rtvec))->elem[i]); 3536 INSN_FROM_TARGET_P (slot)(__extension__ ({ __typeof ((slot)) const _rtx = ((slot)); if
(((enum rtx_code) (_rtx)->code) != INSN && ((enum
rtx_code) (_rtx)->code) != JUMP_INSN && ((enum rtx_code
) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag ("INSN_FROM_TARGET_P"
, _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 3536, __FUNCTION__); _rtx; })->in_struct)
= ! INSN_FROM_TARGET_P (slot)(__extension__ ({ __typeof ((slot)) const _rtx = ((slot)); if
(((enum rtx_code) (_rtx)->code) != INSN && ((enum
rtx_code) (_rtx)->code) != JUMP_INSN && ((enum rtx_code
) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag ("INSN_FROM_TARGET_P"
, _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 3536, __FUNCTION__); _rtx; })->in_struct)
; 3537 } 3538 3539 /* We have just removed a BARRIER, which means that the block 3540 number of the next insns has effectively been changed (see 3541 find_basic_block in resource.c), so clear it. */ 3542 rtx_insn *from = delete_related_insns (next); 3543 if (from) 3544 clear_hashed_info_until_next_barrier (from); 3545 3546 next = insn; 3547 } 3548 3549 if (old_label && --LABEL_NUSES (old_label)(((old_label)->u.fld[4]).rt_int) == 0) 3550 delete_related_insns (old_label); 3551 continue; 3552 } 3553 } 3554 3555 /* If we own the thread opposite the way this insn branches, see if we 3556 can merge its delay slots with following insns. */ 3557 if (INSN_FROM_TARGET_P (pat->insn (1))(__extension__ ({ __typeof ((pat->insn (1))) const _rtx = (
(pat->insn (1))); if (((enum rtx_code) (_rtx)->code) !=
INSN && ((enum rtx_code) (_rtx)->code) != JUMP_INSN
&& ((enum rtx_code) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag
("INSN_FROM_TARGET_P", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 3557, __FUNCTION__); _rtx; })->in_struct)
3558 && own_thread_p (NEXT_INSN (insn), 0, 1)) 3559 try_merge_delay_insns (insn, next); 3560 else if (! INSN_FROM_TARGET_P (pat->insn (1))(__extension__ ({ __typeof ((pat->insn (1))) const _rtx = (
(pat->insn (1))); if (((enum rtx_code) (_rtx)->code) !=
INSN && ((enum rtx_code) (_rtx)->code) != JUMP_INSN
&& ((enum rtx_code) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag
("INSN_FROM_TARGET_P", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 3560, __FUNCTION__); _rtx; })->in_struct)
3561 && own_thread_p (target_label, target_label, 0)) 3562 try_merge_delay_insns (insn, 3563 next_active_insn (as_a<rtx_insn *> (target_label))); 3564 3565 /* If we get here, we haven't deleted INSN. But we may have deleted 3566 NEXT, so recompute it. */ 3567 next = next_active_insn (insn); 3568 } 3569} 3570
3571 3572/* Look for filled jumps to the end of function label. We can try to convert 3573 them into RETURN insns if the insns in the delay slot are valid for the 3574 RETURN as well. */ 3575 3576static void 3577make_return_insns (rtx_insn *first) 3578{ 3579 rtx_insn *insn; 3580 rtx_jump_insn *jump_insn; 3581 rtx real_return_label = function_return_label; 3582 rtx real_simple_return_label = function_simple_return_label; 3583 int slots, i; 3584 3585 /* See if there is a RETURN insn in the function other than the one we 3586 made for END_OF_FUNCTION_LABEL. If so, set up anything we can't change 3587 into a RETURN to jump to it. */ 3588 for (insn = first; insn; insn = NEXT_INSN (insn)) 3589 if (JUMP_P (insn)(((enum rtx_code) (insn)->code) == JUMP_INSN) && ANY_RETURN_P (PATTERN (insn))(((enum rtx_code) (PATTERN (insn))->code) == RETURN || ((enum
rtx_code) (PATTERN (insn))->code) == SIMPLE_RETURN)
) 3590 { 3591 rtx t = get_label_before (insn, NULL_RTX(rtx) 0); 3592 if (PATTERN (insn) == ret_rtx) 3593 real_return_label = t; 3594 else 3595 real_simple_return_label = t; 3596 break; 3597 } 3598 3599 /* Show an extra usage of REAL_RETURN_LABEL so it won't go away if it 3600 was equal to END_OF_FUNCTION_LABEL. */ 3601 if (real_return_label) 3602 LABEL_NUSES (real_return_label)(((real_return_label)->u.fld[4]).rt_int)++; 3603 if (real_simple_return_label) 3604 LABEL_NUSES (real_simple_return_label)(((real_simple_return_label)->u.fld[4]).rt_int)++; 3605 3606 /* Clear the list of insns to fill so we can use it. */ 3607 obstack_free (&unfilled_slots_obstack, unfilled_firstobj)__extension__ ({ struct obstack *__o = (&unfilled_slots_obstack
); void *__obj = (void *) (unfilled_firstobj); if (__obj >
(void *) __o->chunk && __obj < (void *) __o->
chunk_limit) __o->next_free = __o->object_base = (char *
) __obj; else _obstack_free (__o, __obj); })
; 3608 3609 for (insn = first; insn; insn = NEXT_INSN (insn)) 3610 { 3611 int flags; 3612 rtx kind, real_label; 3613 3614 /* Only look at filled JUMP_INSNs that go to the end of function 3615 label. */ 3616 if (!NONJUMP_INSN_P (insn)(((enum rtx_code) (insn)->code) == INSN)) 3617 continue; 3618 3619 if (GET_CODE (PATTERN (insn))((enum rtx_code) (PATTERN (insn))->code) != SEQUENCE) 3620 continue; 3621 3622 rtx_sequence *pat = as_a <rtx_sequence *> (PATTERN (insn)); 3623 3624 if (!jump_to_label_p (pat->insn (0))) 3625 continue; 3626 3627 if (JUMP_LABEL (pat->insn (0))(((pat->insn (0))->u.fld[7]).rt_rtx) == function_return_label) 3628 { 3629 kind = ret_rtx; 3630 real_label = real_return_label; 3631 } 3632 else if (JUMP_LABEL (pat->insn (0))(((pat->insn (0))->u.fld[7]).rt_rtx) == function_simple_return_label) 3633 { 3634 kind = simple_return_rtx; 3635 real_label = real_simple_return_label; 3636 } 3637 else 3638 continue; 3639 3640 jump_insn = as_a <rtx_jump_insn *> (pat->insn (0)); 3641 3642 /* If we can't make the jump into a RETURN, try to redirect it to the best 3643 RETURN and go on to the next insn. */ 3644 if (!reorg_redirect_jump (jump_insn, kind)) 3645 { 3646 /* Make sure redirecting the jump will not invalidate the delay 3647 slot insns. */ 3648 if (redirect_with_delay_slots_safe_p (jump_insn, real_label, insn)) 3649 reorg_redirect_jump (jump_insn, real_label); 3650 continue; 3651 } 3652 3653 /* See if this RETURN can accept the insns current in its delay slot. 3654 It can if it has more or an equal number of slots and the contents 3655 of each is valid. */ 3656 3657 flags = get_jump_flags (jump_insn, JUMP_LABEL (jump_insn)(((jump_insn)->u.fld[7]).rt_rtx)); 3658 slots = num_delay_slots (jump_insn); 3659 if (slots >= XVECLEN (pat, 0)(((((pat)->u.fld[0]).rt_rtvec))->num_elem) - 1) 3660 { 3661 for (i = 1; i < XVECLEN (pat, 0)(((((pat)->u.fld[0]).rt_rtvec))->num_elem); i++) 3662 if (! ( 3663#if ANNUL_IFFALSE_SLOTS0 3664 (INSN_ANNULLED_BRANCH_P (jump_insn)(__extension__ ({ __typeof ((jump_insn)) const _rtx = ((jump_insn
)); if (((enum rtx_code) (_rtx)->code) != JUMP_INSN) rtl_check_failed_flag
("INSN_ANNULLED_BRANCH_P", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 3664, __FUNCTION__); _rtx; })->unchanging)
3665 && INSN_FROM_TARGET_P (pat->insn (i))(__extension__ ({ __typeof ((pat->insn (i))) const _rtx = (
(pat->insn (i))); if (((enum rtx_code) (_rtx)->code) !=
INSN && ((enum rtx_code) (_rtx)->code) != JUMP_INSN
&& ((enum rtx_code) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag
("INSN_FROM_TARGET_P", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 3665, __FUNCTION__); _rtx; })->in_struct)
) 3666 ? eligible_for_annul_false (jump_insn, i - 1, 3667 pat->insn (i), flags) : 3668#endif 3669#if ANNUL_IFTRUE_SLOTS0 3670 (INSN_ANNULLED_BRANCH_P (jump_insn)(__extension__ ({ __typeof ((jump_insn)) const _rtx = ((jump_insn
)); if (((enum rtx_code) (_rtx)->code) != JUMP_INSN) rtl_check_failed_flag
("INSN_ANNULLED_BRANCH_P", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 3670, __FUNCTION__); _rtx; })->unchanging)
3671 && ! INSN_FROM_TARGET_P (pat->insn (i))(__extension__ ({ __typeof ((pat->insn (i))) const _rtx = (
(pat->insn (i))); if (((enum rtx_code) (_rtx)->code) !=
INSN && ((enum rtx_code) (_rtx)->code) != JUMP_INSN
&& ((enum rtx_code) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag
("INSN_FROM_TARGET_P", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 3671, __FUNCTION__); _rtx; })->in_struct)
) 3672 ? eligible_for_annul_true (jump_insn, i - 1, 3673 pat->insn (i), flags) : 3674#endif 3675 eligible_for_delay (jump_insn, i - 1, 3676 pat->insn (i), flags))) 3677 break; 3678 } 3679 else 3680 i = 0; 3681 3682 if (i == XVECLEN (pat, 0)(((((pat)->u.fld[0]).rt_rtvec))->num_elem)) 3683 continue; 3684 3685 /* We have to do something with this insn. If it is an unconditional 3686 RETURN, delete the SEQUENCE and output the individual insns, 3687 followed by the RETURN. Then set things up so we try to find 3688 insns for its delay slots, if it needs some. */ 3689 if (ANY_RETURN_P (PATTERN (jump_insn))(((enum rtx_code) (PATTERN (jump_insn))->code) == RETURN ||
((enum rtx_code) (PATTERN (jump_insn))->code) == SIMPLE_RETURN
)
) 3690 { 3691 rtx_insn *after = PREV_INSN (insn); 3692 3693 delete_related_insns (insn); 3694 insn = jump_insn; 3695 for (i = 1; i < pat->len (); i++) 3696 after = emit_copy_of_insn_after (pat->insn (i), after); 3697 add_insn_after (insn, after, NULLnullptr); 3698 emit_barrier_after (insn); 3699 3700 if (slots) 3701 obstack_ptr_grow (&unfilled_slots_obstack, insn)__extension__ ({ struct obstack *__o = (&unfilled_slots_obstack
); if (__extension__ ({ struct obstack const *__o1 = (__o); (
size_t) (__o1->chunk_limit - __o1->next_free); }) < sizeof
(void *)) _obstack_newchunk (__o, sizeof (void *)); __extension__
({ struct obstack *__o1 = (__o); void *__p1 = __o1->next_free
; *(const void **) __p1 = (insn); __o1->next_free += sizeof
(const void *); (void) 0; }); })
; 3702 } 3703 else 3704 /* It is probably more efficient to keep this with its current 3705 delay slot as a branch to a RETURN. */ 3706 reorg_redirect_jump (jump_insn, real_label); 3707 } 3708 3709 /* Now delete REAL_RETURN_LABEL if we never used it. Then try to fill any 3710 new delay slots we have created. */ 3711 if (real_return_label != NULL_RTX(rtx) 0 && --LABEL_NUSES (real_return_label)(((real_return_label)->u.fld[4]).rt_int) == 0) 3712 delete_related_insns (real_return_label); 3713 if (real_simple_return_label != NULL_RTX(rtx) 0 3714 && --LABEL_NUSES (real_simple_return_label)(((real_simple_return_label)->u.fld[4]).rt_int) == 0) 3715 delete_related_insns (real_simple_return_label); 3716 3717 fill_simple_delay_slots (1); 3718 fill_simple_delay_slots (0); 3719} 3720
3721/* Try to find insns to place in delay slots. */ 3722 3723static void 3724dbr_schedule (rtx_insn *first) 3725{ 3726 rtx_insn *insn, *next, *epilogue_insn = 0; 3727 int i; 3728 bool need_return_insns; 3729 3730 /* If the current function has no insns other than the prologue and 3731 epilogue, then do not try to fill any delay slots. */ 3732 if (n_basic_blocks_for_fn (cfun)(((cfun + 0))->cfg->x_n_basic_blocks) == NUM_FIXED_BLOCKS(2)) 3733 return; 3734 3735 /* Find the highest INSN_UID and allocate and initialize our map from 3736 INSN_UID's to position in code. */ 3737 for (max_uid = 0, insn = first; insn; insn = NEXT_INSN (insn)) 3738 { 3739 if (INSN_UID (insn) > max_uid) 3740 max_uid = INSN_UID (insn); 3741 if (NOTE_P (insn)(((enum rtx_code) (insn)->code) == NOTE) 3742 && NOTE_KIND (insn)(((insn)->u.fld[4]).rt_int) == NOTE_INSN_EPILOGUE_BEG) 3743 epilogue_insn = insn; 3744 } 3745 3746 uid_to_ruid = XNEWVEC (int, max_uid + 1)((int *) xmalloc (sizeof (int) * (max_uid + 1))); 3747 for (i = 0, insn = first; insn; i++, insn = NEXT_INSN (insn)) 3748 uid_to_ruid[INSN_UID (insn)] = i; 3749 3750 /* Initialize the list of insns that need filling. */ 3751 if (unfilled_firstobj == 0) 3752 { 3753 gcc_obstack_init (&unfilled_slots_obstack)_obstack_begin (((&unfilled_slots_obstack)), (memory_block_pool
::block_size), (0), (mempool_obstack_chunk_alloc), (mempool_obstack_chunk_free
))
; 3754 unfilled_firstobj = XOBNEWVAR (&unfilled_slots_obstack, rtx, 0)((rtx *) __extension__ ({ struct obstack *__h = ((&unfilled_slots_obstack
)); __extension__ ({ struct obstack *__o = (__h); size_t __len
= (((0))); if (__extension__ ({ struct obstack const *__o1 =
(__o); (size_t) (__o1->chunk_limit - __o1->next_free);
}) < __len) _obstack_newchunk (__o, __len); ((void) ((__o
)->next_free += (__len))); }); __extension__ ({ struct obstack
*__o1 = (__h); void *__value = (void *) __o1->object_base
; if (__o1->next_free == __value) __o1->maybe_empty_object
= 1; __o1->next_free = ((sizeof (ptrdiff_t) < sizeof (
void *) ? (__o1->object_base) : (char *) 0) + (((__o1->
next_free) - (sizeof (ptrdiff_t) < sizeof (void *) ? (__o1
->object_base) : (char *) 0) + (__o1->alignment_mask)) &
~(__o1->alignment_mask))); if ((size_t) (__o1->next_free
- (char *) __o1->chunk) > (size_t) (__o1->chunk_limit
- (char *) __o1->chunk)) __o1->next_free = __o1->chunk_limit
; __o1->object_base = __o1->next_free; __value; }); }))
; 3755 } 3756 3757 for (insn = next_active_insn (first); insn; insn = next_active_insn (insn)) 3758 { 3759 rtx target; 3760 3761 /* Skip vector tables. We can't get attributes for them. */ 3762 if (JUMP_TABLE_DATA_P (insn)(((enum rtx_code) (insn)->code) == JUMP_TABLE_DATA)) 3763 continue; 3764 3765 if (JUMP_P (insn)(((enum rtx_code) (insn)->code) == JUMP_INSN)) 3766 INSN_ANNULLED_BRANCH_P (insn)(__extension__ ({ __typeof ((insn)) const _rtx = ((insn)); if
(((enum rtx_code) (_rtx)->code) != JUMP_INSN) rtl_check_failed_flag
("INSN_ANNULLED_BRANCH_P", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 3766, __FUNCTION__); _rtx; })->unchanging)
= 0; 3767 INSN_FROM_TARGET_P (insn)(__extension__ ({ __typeof ((insn)) const _rtx = ((insn)); if
(((enum rtx_code) (_rtx)->code) != INSN && ((enum
rtx_code) (_rtx)->code) != JUMP_INSN && ((enum rtx_code
) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag ("INSN_FROM_TARGET_P"
, _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 3767, __FUNCTION__); _rtx; })->in_struct)
= 0; 3768 3769 if (num_delay_slots (insn) > 0) 3770 obstack_ptr_grow (&unfilled_slots_obstack, insn)__extension__ ({ struct obstack *__o = (&unfilled_slots_obstack
); if (__extension__ ({ struct obstack const *__o1 = (__o); (
size_t) (__o1->chunk_limit - __o1->next_free); }) < sizeof
(void *)) _obstack_newchunk (__o, sizeof (void *)); __extension__
({ struct obstack *__o1 = (__o); void *__p1 = __o1->next_free
; *(const void **) __p1 = (insn); __o1->next_free += sizeof
(const void *); (void) 0; }); })
; 3771 3772 /* Ensure all jumps go to the last of a set of consecutive labels. */ 3773 if (JUMP_P (insn)(((enum rtx_code) (insn)->code) == JUMP_INSN) 3774 && (condjump_p (insn) || condjump_in_parallel_p (insn)) 3775 && !ANY_RETURN_P (JUMP_LABEL (insn))(((enum rtx_code) ((((insn)->u.fld[7]).rt_rtx))->code) ==
RETURN || ((enum rtx_code) ((((insn)->u.fld[7]).rt_rtx))->
code) == SIMPLE_RETURN)
3776 && ((target = skip_consecutive_labels (JUMP_LABEL (insn)(((insn)->u.fld[7]).rt_rtx))) 3777 != JUMP_LABEL (insn)(((insn)->u.fld[7]).rt_rtx))) 3778 redirect_jump (as_a <rtx_jump_insn *> (insn), target, 1); 3779 } 3780 3781 init_resource_info (epilogue_insn); 3782 3783 /* Show we haven't computed an end-of-function label yet. */ 3784 function_return_label = function_simple_return_label = NULLnullptr; 3785 3786 /* Initialize the statistics for this function. */ 3787 memset (num_insns_needing_delays, 0, sizeof num_insns_needing_delays); 3788 memset (num_filled_delays, 0, sizeof num_filled_delays); 3789 3790 /* Now do the delay slot filling. Try everything twice in case earlier 3791 changes make more slots fillable. */ 3792 3793 for (reorg_pass_number = 0; 3794 reorg_pass_number < MAX_REORG_PASSES2; 3795 reorg_pass_number++) 3796 { 3797 fill_simple_delay_slots (1); 3798 fill_simple_delay_slots (0); 3799 if (!targetm.no_speculation_in_delay_slots_p ()) 3800 fill_eager_delay_slots (); 3801 relax_delay_slots (first); 3802 } 3803 3804 /* If we made an end of function label, indicate that it is now 3805 safe to delete it by undoing our prior adjustment to LABEL_NUSES. 3806 If it is now unused, delete it. */ 3807 if (function_return_label && --LABEL_NUSES (function_return_label)(((function_return_label)->u.fld[4]).rt_int) == 0) 3808 delete_related_insns (function_return_label); 3809 if (function_simple_return_label 3810 && --LABEL_NUSES (function_simple_return_label)(((function_simple_return_label)->u.fld[4]).rt_int) == 0) 3811 delete_related_insns (function_simple_return_label); 3812 3813 need_return_insns = false; 3814 need_return_insns |= targetm.have_return () && function_return_label != 0; 3815 need_return_insns |= (targetm.have_simple_return () 3816 && function_simple_return_label != 0); 3817 if (need_return_insns) 3818 make_return_insns (first); 3819 3820 /* Delete any USE insns made by update_block; subsequent passes don't need 3821 them or know how to deal with them. */ 3822 for (insn = first; insn; insn = next) 3823 { 3824 next = NEXT_INSN (insn); 3825 3826 if (NONJUMP_INSN_P (insn)(((enum rtx_code) (insn)->code) == INSN) && GET_CODE (PATTERN (insn))((enum rtx_code) (PATTERN (insn))->code) == USE 3827 && INSN_P (XEXP (PATTERN (insn), 0))(((((enum rtx_code) ((((PATTERN (insn))->u.fld[0]).rt_rtx)
)->code) == INSN) || (((enum rtx_code) ((((PATTERN (insn))
->u.fld[0]).rt_rtx))->code) == JUMP_INSN) || (((enum rtx_code
) ((((PATTERN (insn))->u.fld[0]).rt_rtx))->code) == CALL_INSN
)) || (((enum rtx_code) ((((PATTERN (insn))->u.fld[0]).rt_rtx
))->code) == DEBUG_INSN))
) 3828 next = delete_related_insns (insn); 3829 } 3830 3831 obstack_free (&unfilled_slots_obstack, unfilled_firstobj)__extension__ ({ struct obstack *__o = (&unfilled_slots_obstack
); void *__obj = (void *) (unfilled_firstobj); if (__obj >
(void *) __o->chunk && __obj < (void *) __o->
chunk_limit) __o->next_free = __o->object_base = (char *
) __obj; else _obstack_free (__o, __obj); })
; 3832 3833 /* It is not clear why the line below is needed, but it does seem to be. */ 3834 unfilled_firstobj = XOBNEWVAR (&unfilled_slots_obstack, rtx, 0)((rtx *) __extension__ ({ struct obstack *__h = ((&unfilled_slots_obstack
)); __extension__ ({ struct obstack *__o = (__h); size_t __len
= (((0))); if (__extension__ ({ struct obstack const *__o1 =
(__o); (size_t) (__o1->chunk_limit - __o1->next_free);
}) < __len) _obstack_newchunk (__o, __len); ((void) ((__o
)->next_free += (__len))); }); __extension__ ({ struct obstack
*__o1 = (__h); void *__value = (void *) __o1->object_base
; if (__o1->next_free == __value) __o1->maybe_empty_object
= 1; __o1->next_free = ((sizeof (ptrdiff_t) < sizeof (
void *) ? (__o1->object_base) : (char *) 0) + (((__o1->
next_free) - (sizeof (ptrdiff_t) < sizeof (void *) ? (__o1
->object_base) : (char *) 0) + (__o1->alignment_mask)) &
~(__o1->alignment_mask))); if ((size_t) (__o1->next_free
- (char *) __o1->chunk) > (size_t) (__o1->chunk_limit
- (char *) __o1->chunk)) __o1->next_free = __o1->chunk_limit
; __o1->object_base = __o1->next_free; __value; }); }))
; 3835 3836 if (dump_file) 3837 { 3838 int i, j, need_comma; 3839 int total_delay_slots[MAX_DELAY_HISTOGRAM3 + 1]; 3840 int total_annul_slots[MAX_DELAY_HISTOGRAM3 + 1]; 3841 3842 for (reorg_pass_number = 0; 3843 reorg_pass_number < MAX_REORG_PASSES2; 3844 reorg_pass_number++) 3845 { 3846 fprintf (dump_file, ";; Reorg pass #%d:\n", reorg_pass_number + 1); 3847 for (i = 0; i < NUM_REORG_FUNCTIONS2; i++) 3848 { 3849 need_comma = 0; 3850 fprintf (dump_file, ";; Reorg function #%d\n", i); 3851 3852 fprintf (dump_file, ";; %d insns needing delay slots\n;; ", 3853 num_insns_needing_delays[i][reorg_pass_number]); 3854 3855 for (j = 0; j < MAX_DELAY_HISTOGRAM3 + 1; j++) 3856 if (num_filled_delays[i][j][reorg_pass_number]) 3857 { 3858 if (need_comma) 3859 fprintf (dump_file, ", "); 3860 need_comma = 1; 3861 fprintf (dump_file, "%d got %d delays", 3862 num_filled_delays[i][j][reorg_pass_number], j); 3863 } 3864 fprintf (dump_file, "\n"); 3865 } 3866 } 3867 memset (total_delay_slots, 0, sizeof total_delay_slots); 3868 memset (total_annul_slots, 0, sizeof total_annul_slots); 3869 for (insn = first; insn; insn = NEXT_INSN (insn)) 3870 { 3871 if (! insn->deleted () 3872 && NONJUMP_INSN_P (insn)(((enum rtx_code) (insn)->code) == INSN) 3873 && GET_CODE (PATTERN (insn))((enum rtx_code) (PATTERN (insn))->code) != USE 3874 && GET_CODE (PATTERN (insn))((enum rtx_code) (PATTERN (insn))->code) != CLOBBER) 3875 { 3876 if (GET_CODE (PATTERN (insn))((enum rtx_code) (PATTERN (insn))->code) == SEQUENCE) 3877 { 3878 rtx control; 3879 j = XVECLEN (PATTERN (insn), 0)(((((PATTERN (insn))->u.fld[0]).rt_rtvec))->num_elem) - 1; 3880 if (j > MAX_DELAY_HISTOGRAM3) 3881 j = MAX_DELAY_HISTOGRAM3; 3882 control = XVECEXP (PATTERN (insn), 0, 0)(((((PATTERN (insn))->u.fld[0]).rt_rtvec))->elem[0]); 3883 if (JUMP_P (control)(((enum rtx_code) (control)->code) == JUMP_INSN) && INSN_ANNULLED_BRANCH_P (control)(__extension__ ({ __typeof ((control)) const _rtx = ((control
)); if (((enum rtx_code) (_rtx)->code) != JUMP_INSN) rtl_check_failed_flag
("INSN_ANNULLED_BRANCH_P", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/reorg.c"
, 3883, __FUNCTION__); _rtx; })->unchanging)
) 3884 total_annul_slots[j]++; 3885 else 3886 total_delay_slots[j]++; 3887 } 3888 else if (num_delay_slots (insn) > 0) 3889 total_delay_slots[0]++; 3890 } 3891 } 3892 fprintf (dump_file, ";; Reorg totals: "); 3893 need_comma = 0; 3894 for (j = 0; j < MAX_DELAY_HISTOGRAM3 + 1; j++) 3895 { 3896 if (total_delay_slots[j]) 3897 { 3898 if (need_comma) 3899 fprintf (dump_file, ", "); 3900 need_comma = 1; 3901 fprintf (dump_file, "%d got %d delays", total_delay_slots[j], j); 3902 } 3903 } 3904 fprintf (dump_file, "\n"); 3905 3906 if (ANNUL_IFTRUE_SLOTS0 || ANNUL_IFFALSE_SLOTS0) 3907 { 3908 fprintf (dump_file, ";; Reorg annuls: "); 3909 need_comma = 0; 3910 for (j = 0; j < MAX_DELAY_HISTOGRAM3 + 1; j++) 3911 { 3912 if (total_annul_slots[j]) 3913 { 3914 if (need_comma) 3915 fprintf (dump_file, ", "); 3916 need_comma = 1; 3917 fprintf (dump_file, "%d got %d delays", total_annul_slots[j], j); 3918 } 3919 } 3920 fprintf (dump_file, "\n"); 3921 } 3922 3923 fprintf (dump_file, "\n"); 3924 } 3925 3926 if (!sibling_labels.is_empty ()) 3927 { 3928 update_alignments (sibling_labels); 3929 sibling_labels.release (); 3930 } 3931 3932 free_resource_info (); 3933 free (uid_to_ruid); 3934 crtl(&x_rtl)->dbr_scheduled_p = true; 3935} 3936
3937/* Run delay slot optimization. */ 3938static unsigned int 3939rest_of_handle_delay_slots (void) 3940{ 3941 if (DELAY_SLOTS0) 3942 dbr_schedule (get_insns ()); 3943 3944 return 0; 3945} 3946 3947namespace { 3948 3949const pass_data pass_data_delay_slots = 3950{ 3951 RTL_PASS, /* type */ 3952 "dbr", /* name */ 3953 OPTGROUP_NONE, /* optinfo_flags */ 3954 TV_DBR_SCHED, /* tv_id */ 3955 0, /* properties_required */ 3956 0, /* properties_provided */ 3957 0, /* properties_destroyed */ 3958 0, /* todo_flags_start */ 3959 0, /* todo_flags_finish */ 3960}; 3961 3962class pass_delay_slots : public rtl_opt_pass 3963{ 3964public: 3965 pass_delay_slots (gcc::context *ctxt) 3966 : rtl_opt_pass (pass_data_delay_slots, ctxt) 3967 {} 3968 3969 /* opt_pass methods: */ 3970 virtual bool gate (function *); 3971 virtual unsigned int execute (function *) 3972 { 3973 return rest_of_handle_delay_slots (); 3974 } 3975 3976}; // class pass_delay_slots 3977 3978bool 3979pass_delay_slots::gate (function *) 3980{ 3981 /* At -O0 dataflow info isn't updated after RA. */ 3982 if (DELAY_SLOTS0) 3983 return optimizeglobal_options.x_optimize > 0 && flag_delayed_branchglobal_options.x_flag_delayed_branch && !crtl(&x_rtl)->dbr_scheduled_p; 3984 3985 return false; 3986} 3987 3988} // anon namespace 3989 3990rtl_opt_pass * 3991make_pass_delay_slots (gcc::context *ctxt) 3992{ 3993 return new pass_delay_slots (ctxt); 3994} 3995 3996/* Machine dependent reorg pass. */ 3997 3998namespace { 3999 4000const pass_data pass_data_machine_reorg = 4001{ 4002 RTL_PASS, /* type */ 4003 "mach", /* name */ 4004 OPTGROUP_NONE, /* optinfo_flags */ 4005 TV_MACH_DEP, /* tv_id */ 4006 0, /* properties_required */ 4007 0, /* properties_provided */ 4008 0, /* properties_destroyed */ 4009 0, /* todo_flags_start */ 4010 0, /* todo_flags_finish */ 4011}; 4012 4013class pass_machine_reorg : public rtl_opt_pass 4014{ 4015public: 4016 pass_machine_reorg (gcc::context *ctxt) 4017 : rtl_opt_pass (pass_data_machine_reorg, ctxt) 4018 {} 4019 4020 /* opt_pass methods: */ 4021 virtual bool gate (function *) 4022 { 4023 return targetm.machine_dependent_reorg != 0; 4024 } 4025 4026 virtual unsigned int execute (function *) 4027 { 4028 targetm.machine_dependent_reorg (); 4029 return 0; 4030 } 4031 4032}; // class pass_machine_reorg 4033 4034} // anon namespace 4035 4036rtl_opt_pass * 4037make_pass_machine_reorg (gcc::context *ctxt) 4038{ 4039 return new pass_machine_reorg (ctxt); 4040}

/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtl.h

1/* Register Transfer Language (RTL) definitions for GCC
2 Copyright (C) 1987-2021 Free Software Foundation, Inc.
3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify it under
7the terms of the GNU General Public License as published by the Free
8Software Foundation; either version 3, or (at your option) any later
9version.
10
11GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
15
16You should have received a copy of the GNU General Public License
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
19
20#ifndef GCC_RTL_H
21#define GCC_RTL_H
22
23/* This file is occasionally included by generator files which expect
24 machmode.h and other files to exist and would not normally have been
25 included by coretypes.h. */
26#ifdef GENERATOR_FILE
27#include "real.h"
28#include "fixed-value.h"
29#include "statistics.h"
30#include "vec.h"
31#include "hash-table.h"
32#include "hash-set.h"
33#include "input.h"
34#include "is-a.h"
35#endif /* GENERATOR_FILE */
36
37#include "hard-reg-set.h"
38
39class predefined_function_abi;
40
41/* Value used by some passes to "recognize" noop moves as valid
42 instructions. */
43#define NOOP_MOVE_INSN_CODE2147483647 INT_MAX2147483647
44
45/* Register Transfer Language EXPRESSIONS CODES */
46
47#define RTX_CODEenum rtx_code enum rtx_code
48enum rtx_code {
49
50#define DEF_RTL_EXPR(ENUM, NAME, FORMAT, CLASS) ENUM ,
51#include "rtl.def" /* rtl expressions are documented here */
52#undef DEF_RTL_EXPR
53
54 LAST_AND_UNUSED_RTX_CODE}; /* A convenient way to get a value for
55 NUM_RTX_CODE.
56 Assumes default enum value assignment. */
57
58/* The cast here, saves many elsewhere. */
59#define NUM_RTX_CODE((int) LAST_AND_UNUSED_RTX_CODE) ((int) LAST_AND_UNUSED_RTX_CODE)
60
61/* Similar, but since generator files get more entries... */
62#ifdef GENERATOR_FILE
63# define NON_GENERATOR_NUM_RTX_CODE ((int) MATCH_OPERAND)
64#endif
65
66/* Register Transfer Language EXPRESSIONS CODE CLASSES */
67
68enum rtx_class {
69 /* We check bit 0-1 of some rtx class codes in the predicates below. */
70
71 /* Bit 0 = comparison if 0, arithmetic is 1
72 Bit 1 = 1 if commutative. */
73 RTX_COMPARE, /* 0 */
74 RTX_COMM_COMPARE,
75 RTX_BIN_ARITH,
76 RTX_COMM_ARITH,
77
78 /* Must follow the four preceding values. */
79 RTX_UNARY, /* 4 */
80
81 RTX_EXTRA,
82 RTX_MATCH,
83 RTX_INSN,
84
85 /* Bit 0 = 1 if constant. */
86 RTX_OBJ, /* 8 */
87 RTX_CONST_OBJ,
88
89 RTX_TERNARY,
90 RTX_BITFIELD_OPS,
91 RTX_AUTOINC
92};
93
94#define RTX_OBJ_MASK(~1) (~1)
95#define RTX_OBJ_RESULT(RTX_OBJ & (~1)) (RTX_OBJ & RTX_OBJ_MASK(~1))
96#define RTX_COMPARE_MASK(~1) (~1)
97#define RTX_COMPARE_RESULT(RTX_COMPARE & (~1)) (RTX_COMPARE & RTX_COMPARE_MASK(~1))
98#define RTX_ARITHMETIC_MASK(~1) (~1)
99#define RTX_ARITHMETIC_RESULT(RTX_COMM_ARITH & (~1)) (RTX_COMM_ARITH & RTX_ARITHMETIC_MASK(~1))
100#define RTX_BINARY_MASK(~3) (~3)
101#define RTX_BINARY_RESULT(RTX_COMPARE & (~3)) (RTX_COMPARE & RTX_BINARY_MASK(~3))
102#define RTX_COMMUTATIVE_MASK(~2) (~2)
103#define RTX_COMMUTATIVE_RESULT(RTX_COMM_COMPARE & (~2)) (RTX_COMM_COMPARE & RTX_COMMUTATIVE_MASK(~2))
104#define RTX_NON_COMMUTATIVE_RESULT(RTX_COMPARE & (~2)) (RTX_COMPARE & RTX_COMMUTATIVE_MASK(~2))
105
106extern const unsigned char rtx_length[NUM_RTX_CODE((int) LAST_AND_UNUSED_RTX_CODE)];
107#define GET_RTX_LENGTH(CODE)(rtx_length[(int) (CODE)]) (rtx_length[(int) (CODE)])
108
109extern const char * const rtx_name[NUM_RTX_CODE((int) LAST_AND_UNUSED_RTX_CODE)];
110#define GET_RTX_NAME(CODE)(rtx_name[(int) (CODE)]) (rtx_name[(int) (CODE)])
111
112extern const char * const rtx_format[NUM_RTX_CODE((int) LAST_AND_UNUSED_RTX_CODE)];
113#define GET_RTX_FORMAT(CODE)(rtx_format[(int) (CODE)]) (rtx_format[(int) (CODE)])
114
115extern const enum rtx_class rtx_class[NUM_RTX_CODE((int) LAST_AND_UNUSED_RTX_CODE)];
116#define GET_RTX_CLASS(CODE)(rtx_class[(int) (CODE)]) (rtx_class[(int) (CODE)])
117
118/* True if CODE is part of the insn chain (i.e. has INSN_UID, PREV_INSN
119 and NEXT_INSN fields). */
120#define INSN_CHAIN_CODE_P(CODE)((unsigned long) (CODE) - (unsigned long) (DEBUG_INSN) <= (
unsigned long) (NOTE) - (unsigned long) (DEBUG_INSN))
IN_RANGE (CODE, DEBUG_INSN, NOTE)((unsigned long) (CODE) - (unsigned long) (DEBUG_INSN) <= (
unsigned long) (NOTE) - (unsigned long) (DEBUG_INSN))
121
122extern const unsigned char rtx_code_size[NUM_RTX_CODE((int) LAST_AND_UNUSED_RTX_CODE)];
123extern const unsigned char rtx_next[NUM_RTX_CODE((int) LAST_AND_UNUSED_RTX_CODE)];
124
125/* The flags and bitfields of an ADDR_DIFF_VEC. BASE is the base label
126 relative to which the offsets are calculated, as explained in rtl.def. */
127struct addr_diff_vec_flags
128{
129 /* Set at the start of shorten_branches - ONLY WHEN OPTIMIZING - : */
130 unsigned min_align: 8;
131 /* Flags: */
132 unsigned base_after_vec: 1; /* BASE is after the ADDR_DIFF_VEC. */
133 unsigned min_after_vec: 1; /* minimum address target label is
134 after the ADDR_DIFF_VEC. */
135 unsigned max_after_vec: 1; /* maximum address target label is
136 after the ADDR_DIFF_VEC. */
137 unsigned min_after_base: 1; /* minimum address target label is
138 after BASE. */
139 unsigned max_after_base: 1; /* maximum address target label is
140 after BASE. */
141 /* Set by the actual branch shortening process - ONLY WHEN OPTIMIZING - : */
142 unsigned offset_unsigned: 1; /* offsets have to be treated as unsigned. */
143 unsigned : 2;
144 unsigned scale : 8;
145};
146
147/* Structure used to describe the attributes of a MEM. These are hashed
148 so MEMs that the same attributes share a data structure. This means
149 they cannot be modified in place. */
150class GTY(()) mem_attrs
151{
152public:
153 mem_attrs ();
154
155 /* The expression that the MEM accesses, or null if not known.
156 This expression might be larger than the memory reference itself.
157 (In other words, the MEM might access only part of the object.) */
158 tree expr;
159
160 /* The offset of the memory reference from the start of EXPR.
161 Only valid if OFFSET_KNOWN_P. */
162 poly_int64 offset;
163
164 /* The size of the memory reference in bytes. Only valid if
165 SIZE_KNOWN_P. */
166 poly_int64 size;
167
168 /* The alias set of the memory reference. */
169 alias_set_type alias;
170
171 /* The alignment of the reference in bits. Always a multiple of
172 BITS_PER_UNIT. Note that EXPR may have a stricter alignment
173 than the memory reference itself. */
174 unsigned int align;
175
176 /* The address space that the memory reference uses. */
177 unsigned char addrspace;
178
179 /* True if OFFSET is known. */
180 bool offset_known_p;
181
182 /* True if SIZE is known. */
183 bool size_known_p;
184};
185
186/* Structure used to describe the attributes of a REG in similar way as
187 mem_attrs does for MEM above. Note that the OFFSET field is calculated
188 in the same way as for mem_attrs, rather than in the same way as a
189 SUBREG_BYTE. For example, if a big-endian target stores a byte
190 object in the low part of a 4-byte register, the OFFSET field
191 will be -3 rather than 0. */
192
193class GTY((for_user)) reg_attrs {
194public:
195 tree decl; /* decl corresponding to REG. */
196 poly_int64 offset; /* Offset from start of DECL. */
197};
198
199/* Common union for an element of an rtx. */
200
201union rtunion
202{
203 int rt_int;
204 unsigned int rt_uint;
205 poly_uint16_pod rt_subreg;
206 const char *rt_str;
207 rtx rt_rtx;
208 rtvec rt_rtvec;
209 machine_mode rt_type;
210 addr_diff_vec_flags rt_addr_diff_vec_flags;
211 struct cselib_val *rt_cselib;
212 tree rt_tree;
213 basic_block rt_bb;
214 mem_attrs *rt_mem;
215 class constant_descriptor_rtx *rt_constant;
216 struct dw_cfi_node *rt_cfi;
217};
218
219/* Describes the properties of a REG. */
220struct GTY(()) reg_info {
221 /* The value of REGNO. */
222 unsigned int regno;
223
224 /* The value of REG_NREGS. */
225 unsigned int nregs : 8;
226 unsigned int unused : 24;
227
228 /* The value of REG_ATTRS. */
229 reg_attrs *attrs;
230};
231
232/* This structure remembers the position of a SYMBOL_REF within an
233 object_block structure. A SYMBOL_REF only provides this information
234 if SYMBOL_REF_HAS_BLOCK_INFO_P is true. */
235struct GTY(()) block_symbol {
236 /* The usual SYMBOL_REF fields. */
237 rtunion GTY ((skip)) fld[2];
238
239 /* The block that contains this object. */
240 struct object_block *block;
241
242 /* The offset of this object from the start of its block. It is negative
243 if the symbol has not yet been assigned an offset. */
244 HOST_WIDE_INTlong offset;
245};
246
247/* Describes a group of objects that are to be placed together in such
248 a way that their relative positions are known. */
249struct GTY((for_user)) object_block {
250 /* The section in which these objects should be placed. */
251 section *sect;
252
253 /* The alignment of the first object, measured in bits. */
254 unsigned int alignment;
255
256 /* The total size of the objects, measured in bytes. */
257 HOST_WIDE_INTlong size;
258
259 /* The SYMBOL_REFs for each object. The vector is sorted in
260 order of increasing offset and the following conditions will
261 hold for each element X:
262
263 SYMBOL_REF_HAS_BLOCK_INFO_P (X)
264 !SYMBOL_REF_ANCHOR_P (X)
265 SYMBOL_REF_BLOCK (X) == [address of this structure]
266 SYMBOL_REF_BLOCK_OFFSET (X) >= 0. */
267 vec<rtx, va_gc> *objects;
268
269 /* All the anchor SYMBOL_REFs used to address these objects, sorted
270 in order of increasing offset, and then increasing TLS model.
271 The following conditions will hold for each element X in this vector:
272
273 SYMBOL_REF_HAS_BLOCK_INFO_P (X)
274 SYMBOL_REF_ANCHOR_P (X)
275 SYMBOL_REF_BLOCK (X) == [address of this structure]
276 SYMBOL_REF_BLOCK_OFFSET (X) >= 0. */
277 vec<rtx, va_gc> *anchors;
278};
279
280struct GTY((variable_size)) hwivec_def {
281 HOST_WIDE_INTlong elem[1];
282};
283
284/* Number of elements of the HWIVEC if RTX is a CONST_WIDE_INT. */
285#define CWI_GET_NUM_ELEM(RTX)((int)__extension__ ({ __typeof ((RTX)) const _rtx = ((RTX));
if (((enum rtx_code) (_rtx)->code) != CONST_WIDE_INT) rtl_check_failed_flag
("CWI_GET_NUM_ELEM", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtl.h"
, 285, __FUNCTION__); _rtx; })->u2.num_elem)
\
286 ((int)RTL_FLAG_CHECK1("CWI_GET_NUM_ELEM", (RTX), CONST_WIDE_INT)__extension__ ({ __typeof ((RTX)) const _rtx = ((RTX)); if ((
(enum rtx_code) (_rtx)->code) != CONST_WIDE_INT) rtl_check_failed_flag
("CWI_GET_NUM_ELEM", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtl.h"
, 286, __FUNCTION__); _rtx; })
->u2.num_elem)
287#define CWI_PUT_NUM_ELEM(RTX, NUM)(__extension__ ({ __typeof ((RTX)) const _rtx = ((RTX)); if (
((enum rtx_code) (_rtx)->code) != CONST_WIDE_INT) rtl_check_failed_flag
("CWI_PUT_NUM_ELEM", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtl.h"
, 287, __FUNCTION__); _rtx; })->u2.num_elem = (NUM))
\
288 (RTL_FLAG_CHECK1("CWI_PUT_NUM_ELEM", (RTX), CONST_WIDE_INT)__extension__ ({ __typeof ((RTX)) const _rtx = ((RTX)); if ((
(enum rtx_code) (_rtx)->code) != CONST_WIDE_INT) rtl_check_failed_flag
("CWI_PUT_NUM_ELEM", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtl.h"
, 288, __FUNCTION__); _rtx; })
->u2.num_elem = (NUM))
289
290struct GTY((variable_size)) const_poly_int_def {
291 trailing_wide_ints<NUM_POLY_INT_COEFFS1> coeffs;
292};
293
294/* RTL expression ("rtx"). */
295
296/* The GTY "desc" and "tag" options below are a kludge: we need a desc
297 field for gengtype to recognize that inheritance is occurring,
298 so that all subclasses are redirected to the traversal hook for the
299 base class.
300 However, all of the fields are in the base class, and special-casing
301 is at work. Hence we use desc and tag of 0, generating a switch
302 statement of the form:
303 switch (0)
304 {
305 case 0: // all the work happens here
306 }
307 in order to work with the existing special-casing in gengtype. */
308
309struct GTY((desc("0"), tag("0"),
310 chain_next ("RTX_NEXT (&%h)"),
311 chain_prev ("RTX_PREV (&%h)"))) rtx_def {
312 /* The kind of expression this is. */
313 ENUM_BITFIELD(rtx_code)enum rtx_code code: 16;
314
315 /* The kind of value the expression has. */
316 ENUM_BITFIELD(machine_mode)enum machine_mode mode : 8;
317
318 /* 1 in a MEM if we should keep the alias set for this mem unchanged
319 when we access a component.
320 1 in a JUMP_INSN if it is a crossing jump.
321 1 in a CALL_INSN if it is a sibling call.
322 1 in a SET that is for a return.
323 In a CODE_LABEL, part of the two-bit alternate entry field.
324 1 in a CONCAT is VAL_EXPR_IS_COPIED in var-tracking.c.
325 1 in a VALUE is SP_BASED_VALUE_P in cselib.c.
326 1 in a SUBREG generated by LRA for reload insns.
327 1 in a REG if this is a static chain register.
328 Dumped as "/j" in RTL dumps. */
329 unsigned int jump : 1;
330 /* In a CODE_LABEL, part of the two-bit alternate entry field.
331 1 in a MEM if it cannot trap.
332 1 in a CALL_INSN logically equivalent to
333 ECF_LOOPING_CONST_OR_PURE and DECL_LOOPING_CONST_OR_PURE_P.
334 1 in a VALUE is SP_DERIVED_VALUE_P in cselib.c.
335 Dumped as "/c" in RTL dumps. */
336 unsigned int call : 1;
337 /* 1 in a REG, MEM, or CONCAT if the value is set at most once, anywhere.
338 1 in a SUBREG used for SUBREG_PROMOTED_UNSIGNED_P.
339 1 in a SYMBOL_REF if it addresses something in the per-function
340 constants pool.
341 1 in a CALL_INSN logically equivalent to ECF_CONST and TREE_READONLY.
342 1 in a NOTE, or EXPR_LIST for a const call.
343 1 in a JUMP_INSN of an annulling branch.
344 1 in a CONCAT is VAL_EXPR_IS_CLOBBERED in var-tracking.c.
345 1 in a preserved VALUE is PRESERVED_VALUE_P in cselib.c.
346 1 in a clobber temporarily created for LRA.
347 Dumped as "/u" in RTL dumps. */
348 unsigned int unchanging : 1;
349 /* 1 in a MEM or ASM_OPERANDS expression if the memory reference is volatile.
350 1 in an INSN, CALL_INSN, JUMP_INSN, CODE_LABEL, BARRIER, or NOTE
351 if it has been deleted.
352 1 in a REG expression if corresponds to a variable declared by the user,
353 0 for an internally generated temporary.
354 1 in a SUBREG used for SUBREG_PROMOTED_UNSIGNED_P.
355 1 in a LABEL_REF, REG_LABEL_TARGET or REG_LABEL_OPERAND note for a
356 non-local label.
357 In a SYMBOL_REF, this flag is used for machine-specific purposes.
358 In a PREFETCH, this flag indicates that it should be considered a
359 scheduling barrier.
360 1 in a CONCAT is VAL_NEEDS_RESOLUTION in var-tracking.c.
361 Dumped as "/v" in RTL dumps. */
362 unsigned int volatil : 1;
363 /* 1 in a REG if the register is used only in exit code a loop.
364 1 in a SUBREG expression if was generated from a variable with a
365 promoted mode.
366 1 in a CODE_LABEL if the label is used for nonlocal gotos
367 and must not be deleted even if its count is zero.
368 1 in an INSN, JUMP_INSN or CALL_INSN if this insn must be scheduled
369 together with the preceding insn. Valid only within sched.
370 1 in an INSN, JUMP_INSN, or CALL_INSN if insn is in a delay slot and
371 from the target of a branch. Valid from reorg until end of compilation;
372 cleared before used.
373
374 The name of the field is historical. It used to be used in MEMs
375 to record whether the MEM accessed part of a structure.
376 Dumped as "/s" in RTL dumps. */
377 unsigned int in_struct : 1;
378 /* At the end of RTL generation, 1 if this rtx is used. This is used for
379 copying shared structure. See `unshare_all_rtl'.
380 In a REG, this is not needed for that purpose, and used instead
381 in `leaf_renumber_regs_insn'.
382 1 in a SYMBOL_REF, means that emit_library_call
383 has used it as the function.
384 1 in a CONCAT is VAL_HOLDS_TRACK_EXPR in var-tracking.c.
385 1 in a VALUE or DEBUG_EXPR is VALUE_RECURSED_INTO in var-tracking.c. */
386 unsigned int used : 1;
387 /* 1 in an INSN or a SET if this rtx is related to the call frame,
388 either changing how we compute the frame address or saving and
389 restoring registers in the prologue and epilogue.
390 1 in a REG or MEM if it is a pointer.
391 1 in a SYMBOL_REF if it addresses something in the per-function
392 constant string pool.
393 1 in a VALUE is VALUE_CHANGED in var-tracking.c.
394 Dumped as "/f" in RTL dumps. */
395 unsigned frame_related : 1;
396 /* 1 in a REG or PARALLEL that is the current function's return value.
397 1 in a SYMBOL_REF for a weak symbol.
398 1 in a CALL_INSN logically equivalent to ECF_PURE and DECL_PURE_P.
399 1 in a CONCAT is VAL_EXPR_HAS_REVERSE in var-tracking.c.
400 1 in a VALUE or DEBUG_EXPR is NO_LOC_P in var-tracking.c.
401 Dumped as "/i" in RTL dumps. */
402 unsigned return_val : 1;
403
404 union {
405 /* The final union field is aligned to 64 bits on LP64 hosts,
406 giving a 32-bit gap after the fields above. We optimize the
407 layout for that case and use the gap for extra code-specific
408 information. */
409
410 /* The ORIGINAL_REGNO of a REG. */
411 unsigned int original_regno;
412
413 /* The INSN_UID of an RTX_INSN-class code. */
414 int insn_uid;
415
416 /* The SYMBOL_REF_FLAGS of a SYMBOL_REF. */
417 unsigned int symbol_ref_flags;
418
419 /* The PAT_VAR_LOCATION_STATUS of a VAR_LOCATION. */
420 enum var_init_status var_location_status;
421
422 /* In a CONST_WIDE_INT (aka hwivec_def), this is the number of
423 HOST_WIDE_INTs in the hwivec_def. */
424 unsigned int num_elem;
425
426 /* Information about a CONST_VECTOR. */
427 struct
428 {
429 /* The value of CONST_VECTOR_NPATTERNS. */
430 unsigned int npatterns : 16;
431
432 /* The value of CONST_VECTOR_NELTS_PER_PATTERN. */
433 unsigned int nelts_per_pattern : 8;
434
435 /* For future expansion. */
436 unsigned int unused : 8;
437 } const_vector;
438 } GTY ((skip)) u2;
439
440 /* The first element of the operands of this rtx.
441 The number of operands and their types are controlled
442 by the `code' field, according to rtl.def. */
443 union u {
444 rtunion fld[1];
445 HOST_WIDE_INTlong hwint[1];
446 struct reg_info reg;
447 struct block_symbol block_sym;
448 struct real_value rv;
449 struct fixed_value fv;
450 struct hwivec_def hwiv;
451 struct const_poly_int_def cpi;
452 } GTY ((special ("rtx_def"), desc ("GET_CODE (&%0)"))) u;
453};
454
455/* A node for constructing singly-linked lists of rtx. */
456
457struct GTY(()) rtx_expr_list : public rtx_def
458{
459private:
460 /* No extra fields, but adds invariant: (GET_CODE (X) == EXPR_LIST). */
461
462public:
463 /* Get next in list. */
464 rtx_expr_list *next () const;
465
466 /* Get at the underlying rtx. */
467 rtx element () const;
468};
469
470template <>
471template <>
472inline bool
473is_a_helper <rtx_expr_list *>::test (rtx rt)
474{
475 return rt->code == EXPR_LIST;
476}
477
478struct GTY(()) rtx_insn_list : public rtx_def
479{
480private:
481 /* No extra fields, but adds invariant: (GET_CODE (X) == INSN_LIST).
482
483 This is an instance of:
484
485 DEF_RTL_EXPR(INSN_LIST, "insn_list", "ue", RTX_EXTRA)
486
487 i.e. a node for constructing singly-linked lists of rtx_insn *, where
488 the list is "external" to the insn (as opposed to the doubly-linked
489 list embedded within rtx_insn itself). */
490
491public:
492 /* Get next in list. */
493 rtx_insn_list *next () const;
494
495 /* Get at the underlying instruction. */
496 rtx_insn *insn () const;
497
498};
499
500template <>
501template <>
502inline bool
503is_a_helper <rtx_insn_list *>::test (rtx rt)
504{
505 return rt->code == INSN_LIST;
506}
507
508/* A node with invariant GET_CODE (X) == SEQUENCE i.e. a vector of rtx,
509 typically (but not always) of rtx_insn *, used in the late passes. */
510
511struct GTY(()) rtx_sequence : public rtx_def
512{
513private:
514 /* No extra fields, but adds invariant: (GET_CODE (X) == SEQUENCE). */
515
516public:
517 /* Get number of elements in sequence. */
518 int len () const;
519
520 /* Get i-th element of the sequence. */
521 rtx element (int index) const;
522
523 /* Get i-th element of the sequence, with a checked cast to
524 rtx_insn *. */
525 rtx_insn *insn (int index) const;
526};
527
528template <>
529template <>
530inline bool
531is_a_helper <rtx_sequence *>::test (rtx rt)
532{
533 return rt->code == SEQUENCE;
534}
535
536template <>
537template <>
538inline bool
539is_a_helper <const rtx_sequence *>::test (const_rtx rt)
540{
541 return rt->code == SEQUENCE;
542}
543
544struct GTY(()) rtx_insn : public rtx_def
545{
546public:
547 /* No extra fields, but adds the invariant:
548
549 (INSN_P (X)
550 || NOTE_P (X)
551 || JUMP_TABLE_DATA_P (X)
552 || BARRIER_P (X)
553 || LABEL_P (X))
554
555 i.e. that we must be able to use the following:
556 INSN_UID ()
557 NEXT_INSN ()
558 PREV_INSN ()
559 i.e. we have an rtx that has an INSN_UID field and can be part of
560 a linked list of insns.
561 */
562
563 /* Returns true if this insn has been deleted. */
564
565 bool deleted () const { return volatil; }
566
567 /* Mark this insn as deleted. */
568
569 void set_deleted () { volatil = true; }
570
571 /* Mark this insn as not deleted. */
572
573 void set_undeleted () { volatil = false; }
574};
575
576/* Subclasses of rtx_insn. */
577
578struct GTY(()) rtx_debug_insn : public rtx_insn
579{
580 /* No extra fields, but adds the invariant:
581 DEBUG_INSN_P (X) aka (GET_CODE (X) == DEBUG_INSN)
582 i.e. an annotation for tracking variable assignments.
583
584 This is an instance of:
585 DEF_RTL_EXPR(DEBUG_INSN, "debug_insn", "uuBeiie", RTX_INSN)
586 from rtl.def. */
587};
588
589struct GTY(()) rtx_nonjump_insn : public rtx_insn
590{
591 /* No extra fields, but adds the invariant:
592 NONJUMP_INSN_P (X) aka (GET_CODE (X) == INSN)
593 i.e an instruction that cannot jump.
594
595 This is an instance of:
596 DEF_RTL_EXPR(INSN, "insn", "uuBeiie", RTX_INSN)
597 from rtl.def. */
598};
599
600struct GTY(()) rtx_jump_insn : public rtx_insn
601{
602public:
603 /* No extra fields, but adds the invariant:
604 JUMP_P (X) aka (GET_CODE (X) == JUMP_INSN)
605 i.e. an instruction that can possibly jump.
606
607 This is an instance of:
608 DEF_RTL_EXPR(JUMP_INSN, "jump_insn", "uuBeiie0", RTX_INSN)
609 from rtl.def. */
610
611 /* Returns jump target of this instruction. The returned value is not
612 necessarily a code label: it may also be a RETURN or SIMPLE_RETURN
613 expression. Also, when the code label is marked "deleted", it is
614 replaced by a NOTE. In some cases the value is NULL_RTX. */
615
616 inline rtx jump_label () const;
617
618 /* Returns jump target cast to rtx_code_label *. */
619
620 inline rtx_code_label *jump_target () const;
621
622 /* Set jump target. */
623
624 inline void set_jump_target (rtx_code_label *);
625};
626
627struct GTY(()) rtx_call_insn : public rtx_insn
628{
629 /* No extra fields, but adds the invariant:
630 CALL_P (X) aka (GET_CODE (X) == CALL_INSN)
631 i.e. an instruction that can possibly call a subroutine
632 but which will not change which instruction comes next
633 in the current function.
634
635 This is an instance of:
636 DEF_RTL_EXPR(CALL_INSN, "call_insn", "uuBeiiee", RTX_INSN)
637 from rtl.def. */
638};
639
640struct GTY(()) rtx_jump_table_data : public rtx_insn
641{
642 /* No extra fields, but adds the invariant:
643 JUMP_TABLE_DATA_P (X) aka (GET_CODE (INSN) == JUMP_TABLE_DATA)
644 i.e. a data for a jump table, considered an instruction for
645 historical reasons.
646
647 This is an instance of:
648 DEF_RTL_EXPR(JUMP_TABLE_DATA, "jump_table_data", "uuBe0000", RTX_INSN)
649 from rtl.def. */
650
651 /* This can be either:
652
653 (a) a table of absolute jumps, in which case PATTERN (this) is an
654 ADDR_VEC with arg 0 a vector of labels, or
655
656 (b) a table of relative jumps (e.g. for -fPIC), in which case
657 PATTERN (this) is an ADDR_DIFF_VEC, with arg 0 a LABEL_REF and
658 arg 1 the vector of labels.
659
660 This method gets the underlying vec. */
661
662 inline rtvec get_labels () const;
663 inline scalar_int_mode get_data_mode () const;
664};
665
666struct GTY(()) rtx_barrier : public rtx_insn
667{
668 /* No extra fields, but adds the invariant:
669 BARRIER_P (X) aka (GET_CODE (X) == BARRIER)
670 i.e. a marker that indicates that control will not flow through.
671
672 This is an instance of:
673 DEF_RTL_EXPR(BARRIER, "barrier", "uu00000", RTX_EXTRA)
674 from rtl.def. */
675};
676
677struct GTY(()) rtx_code_label : public rtx_insn
678{
679 /* No extra fields, but adds the invariant:
680 LABEL_P (X) aka (GET_CODE (X) == CODE_LABEL)
681 i.e. a label in the assembler.
682
683 This is an instance of:
684 DEF_RTL_EXPR(CODE_LABEL, "code_label", "uuB00is", RTX_EXTRA)
685 from rtl.def. */
686};
687
688struct GTY(()) rtx_note : public rtx_insn
689{
690 /* No extra fields, but adds the invariant:
691 NOTE_P(X) aka (GET_CODE (X) == NOTE)
692 i.e. a note about the corresponding source code.
693
694 This is an instance of:
695 DEF_RTL_EXPR(NOTE, "note", "uuB0ni", RTX_EXTRA)
696 from rtl.def. */
697};
698
699/* The size in bytes of an rtx header (code, mode and flags). */
700#define RTX_HDR_SIZE__builtin_offsetof(struct rtx_def, u) offsetof (struct rtx_def, u)__builtin_offsetof(struct rtx_def, u)
701
702/* The size in bytes of an rtx with code CODE. */
703#define RTX_CODE_SIZE(CODE)rtx_code_size[CODE] rtx_code_size[CODE]
704
705#define NULL_RTX(rtx) 0 (rtx) 0
706
707/* The "next" and "previous" RTX, relative to this one. */
708
709#define RTX_NEXT(X)(rtx_next[((enum rtx_code) (X)->code)] == 0 ? nullptr : *(
rtx *)(((char *)X) + rtx_next[((enum rtx_code) (X)->code)]
))
(rtx_next[GET_CODE (X)((enum rtx_code) (X)->code)] == 0 ? NULLnullptr \
710 : *(rtx *)(((char *)X) + rtx_next[GET_CODE (X)((enum rtx_code) (X)->code)]))
711
712/* FIXME: the "NEXT_INSN (PREV_INSN (X)) == X" condition shouldn't be needed.
713 */
714#define RTX_PREV(X)(((((((enum rtx_code) (X)->code) == INSN) || (((enum rtx_code
) (X)->code) == JUMP_INSN) || (((enum rtx_code) (X)->code
) == CALL_INSN)) || (((enum rtx_code) (X)->code) == DEBUG_INSN
)) || (((enum rtx_code) (X)->code) == NOTE) || (((enum rtx_code
) (X)->code) == JUMP_TABLE_DATA) || (((enum rtx_code) (X)->
code) == BARRIER) || (((enum rtx_code) (X)->code) == CODE_LABEL
)) && PREV_INSN (as_a <rtx_insn *> (X)) != nullptr
&& NEXT_INSN (PREV_INSN (as_a <rtx_insn *> (X)
)) == X ? PREV_INSN (as_a <rtx_insn *> (X)) : nullptr)
((INSN_P (X)(((((enum rtx_code) (X)->code) == INSN) || (((enum rtx_code
) (X)->code) == JUMP_INSN) || (((enum rtx_code) (X)->code
) == CALL_INSN)) || (((enum rtx_code) (X)->code) == DEBUG_INSN
))
\
715 || NOTE_P (X)(((enum rtx_code) (X)->code) == NOTE) \
716 || JUMP_TABLE_DATA_P (X)(((enum rtx_code) (X)->code) == JUMP_TABLE_DATA) \
717 || BARRIER_P (X)(((enum rtx_code) (X)->code) == BARRIER) \
718 || LABEL_P (X)(((enum rtx_code) (X)->code) == CODE_LABEL)) \
719 && PREV_INSN (as_a <rtx_insn *> (X)) != NULLnullptr \
720 && NEXT_INSN (PREV_INSN (as_a <rtx_insn *> (X))) == X \
721 ? PREV_INSN (as_a <rtx_insn *> (X)) : NULLnullptr)
722
723/* Define macros to access the `code' field of the rtx. */
724
725#define GET_CODE(RTX)((enum rtx_code) (RTX)->code) ((enum rtx_code) (RTX)->code)
726#define PUT_CODE(RTX, CODE)((RTX)->code = (CODE)) ((RTX)->code = (CODE))
727
728#define GET_MODE(RTX)((machine_mode) (RTX)->mode) ((machine_mode) (RTX)->mode)
729#define PUT_MODE_RAW(RTX, MODE)((RTX)->mode = (MODE)) ((RTX)->mode = (MODE))
730
731/* RTL vector. These appear inside RTX's when there is a need
732 for a variable number of things. The principle use is inside
733 PARALLEL expressions. */
734
735struct GTY(()) rtvec_def {
736 int num_elem; /* number of elements */
737 rtx GTY ((length ("%h.num_elem"))) elem[1];
738};
739
740#define NULL_RTVEC(rtvec) 0 (rtvec) 0
741
742#define GET_NUM_ELEM(RTVEC)((RTVEC)->num_elem) ((RTVEC)->num_elem)
743#define PUT_NUM_ELEM(RTVEC, NUM)((RTVEC)->num_elem = (NUM)) ((RTVEC)->num_elem = (NUM))
744
745/* Predicate yielding nonzero iff X is an rtx for a register. */
746#define REG_P(X)(((enum rtx_code) (X)->code) == REG) (GET_CODE (X)((enum rtx_code) (X)->code) == REG)
747
748/* Predicate yielding nonzero iff X is an rtx for a memory location. */
749#define MEM_P(X)(((enum rtx_code) (X)->code) == MEM) (GET_CODE (X)((enum rtx_code) (X)->code) == MEM)
750
751#if TARGET_SUPPORTS_WIDE_INT1
752
753/* Match CONST_*s that can represent compile-time constant integers. */
754#define CASE_CONST_SCALAR_INTcase CONST_INT: case CONST_WIDE_INT \
755 case CONST_INT: \
756 case CONST_WIDE_INT
757
758/* Match CONST_*s for which pointer equality corresponds to value
759 equality. */
760#define CASE_CONST_UNIQUEcase CONST_INT: case CONST_WIDE_INT: case CONST_POLY_INT: case
CONST_DOUBLE: case CONST_FIXED
\
761 case CONST_INT: \
762 case CONST_WIDE_INT: \
763 case CONST_POLY_INT: \
764 case CONST_DOUBLE: \
765 case CONST_FIXED
766
767/* Match all CONST_* rtxes. */
768#define CASE_CONST_ANYcase CONST_INT: case CONST_WIDE_INT: case CONST_POLY_INT: case
CONST_DOUBLE: case CONST_FIXED: case CONST_VECTOR
\
769 case CONST_INT: \
770 case CONST_WIDE_INT: \
771 case CONST_POLY_INT: \
772 case CONST_DOUBLE: \
773 case CONST_FIXED: \
774 case CONST_VECTOR
775
776#else
777
778/* Match CONST_*s that can represent compile-time constant integers. */
779#define CASE_CONST_SCALAR_INTcase CONST_INT: case CONST_WIDE_INT \
780 case CONST_INT: \
781 case CONST_DOUBLE
782
783/* Match CONST_*s for which pointer equality corresponds to value
784 equality. */
785#define CASE_CONST_UNIQUEcase CONST_INT: case CONST_WIDE_INT: case CONST_POLY_INT: case
CONST_DOUBLE: case CONST_FIXED
\
786 case CONST_INT: \
787 case CONST_DOUBLE: \
788 case CONST_FIXED
789
790/* Match all CONST_* rtxes. */
791#define CASE_CONST_ANYcase CONST_INT: case CONST_WIDE_INT: case CONST_POLY_INT: case
CONST_DOUBLE: case CONST_FIXED: case CONST_VECTOR
\
792 case CONST_INT: \
793 case CONST_DOUBLE: \
794 case CONST_FIXED: \
795 case CONST_VECTOR
796#endif
797
798/* Predicate yielding nonzero iff X is an rtx for a constant integer. */
799#define CONST_INT_P(X)(((enum rtx_code) (X)->code) == CONST_INT) (GET_CODE (X)((enum rtx_code) (X)->code) == CONST_INT)
800
801/* Predicate yielding nonzero iff X is an rtx for a constant integer. */
802#define CONST_WIDE_INT_P(X)(((enum rtx_code) (X)->code) == CONST_WIDE_INT) (GET_CODE (X)((enum rtx_code) (X)->code) == CONST_WIDE_INT)
803
804/* Predicate yielding nonzero iff X is an rtx for a polynomial constant
805 integer. */
806#define CONST_POLY_INT_P(X)(1 > 1 && ((enum rtx_code) (X)->code) == CONST_POLY_INT
)
\
807 (NUM_POLY_INT_COEFFS1 > 1 && GET_CODE (X)((enum rtx_code) (X)->code) == CONST_POLY_INT)
808
809/* Predicate yielding nonzero iff X is an rtx for a constant fixed-point. */
810#define CONST_FIXED_P(X)(((enum rtx_code) (X)->code) == CONST_FIXED) (GET_CODE (X)((enum rtx_code) (X)->code) == CONST_FIXED)
811
812/* Predicate yielding true iff X is an rtx for a double-int
813 or floating point constant. */
814#define CONST_DOUBLE_P(X)(((enum rtx_code) (X)->code) == CONST_DOUBLE) (GET_CODE (X)((enum rtx_code) (X)->code) == CONST_DOUBLE)
815
816/* Predicate yielding true iff X is an rtx for a double-int. */
817#define CONST_DOUBLE_AS_INT_P(X)(((enum rtx_code) (X)->code) == CONST_DOUBLE && ((
machine_mode) (X)->mode) == ((void) 0, E_VOIDmode))
\
818 (GET_CODE (X)((enum rtx_code) (X)->code) == CONST_DOUBLE && GET_MODE (X)((machine_mode) (X)->mode) == VOIDmode((void) 0, E_VOIDmode))
819
820/* Predicate yielding true iff X is an rtx for a integer const. */
821#if TARGET_SUPPORTS_WIDE_INT1
822#define CONST_SCALAR_INT_P(X)((((enum rtx_code) (X)->code) == CONST_INT) || (((enum rtx_code
) (X)->code) == CONST_WIDE_INT))
\
823 (CONST_INT_P (X)(((enum rtx_code) (X)->code) == CONST_INT) || CONST_WIDE_INT_P (X)(((enum rtx_code) (X)->code) == CONST_WIDE_INT))
824#else
825#define CONST_SCALAR_INT_P(X)((((enum rtx_code) (X)->code) == CONST_INT) || (((enum rtx_code
) (X)->code) == CONST_WIDE_INT))
\
826 (CONST_INT_P (X)(((enum rtx_code) (X)->code) == CONST_INT) || CONST_DOUBLE_AS_INT_P (X)(((enum rtx_code) (X)->code) == CONST_DOUBLE && ((
machine_mode) (X)->mode) == ((void) 0, E_VOIDmode))
)
827#endif
828
829/* Predicate yielding true iff X is an rtx for a double-int. */
830#define CONST_DOUBLE_AS_FLOAT_P(X)(((enum rtx_code) (X)->code) == CONST_DOUBLE && ((
machine_mode) (X)->mode) != ((void) 0, E_VOIDmode))
\
831 (GET_CODE (X)((enum rtx_code) (X)->code) == CONST_DOUBLE && GET_MODE (X)((machine_mode) (X)->mode) != VOIDmode((void) 0, E_VOIDmode))
832
833/* Predicate yielding nonzero iff X is a label insn. */
834#define LABEL_P(X)(((enum rtx_code) (X)->code) == CODE_LABEL) (GET_CODE (X)((enum rtx_code) (X)->code) == CODE_LABEL)
835
836/* Predicate yielding nonzero iff X is a jump insn. */
837#define JUMP_P(X)(((enum rtx_code) (X)->code) == JUMP_INSN) (GET_CODE (X)((enum rtx_code) (X)->code) == JUMP_INSN)
838
839/* Predicate yielding nonzero iff X is a call insn. */
840#define CALL_P(X)(((enum rtx_code) (X)->code) == CALL_INSN) (GET_CODE (X)((enum rtx_code) (X)->code) == CALL_INSN)
841
842/* Predicate yielding nonzero iff X is an insn that cannot jump. */
843#define NONJUMP_INSN_P(X)(((enum rtx_code) (X)->code) == INSN) (GET_CODE (X)((enum rtx_code) (X)->code) == INSN)
844
845/* Predicate yielding nonzero iff X is a debug note/insn. */
846#define DEBUG_INSN_P(X)(((enum rtx_code) (X)->code) == DEBUG_INSN) (GET_CODE (X)((enum rtx_code) (X)->code) == DEBUG_INSN)
847
848/* Predicate yielding nonzero iff X is an insn that is not a debug insn. */
849#define NONDEBUG_INSN_P(X)((((enum rtx_code) (X)->code) == INSN) || (((enum rtx_code
) (X)->code) == JUMP_INSN) || (((enum rtx_code) (X)->code
) == CALL_INSN))
(NONJUMP_INSN_P (X)(((enum rtx_code) (X)->code) == INSN) || JUMP_P (X)(((enum rtx_code) (X)->code) == JUMP_INSN) || CALL_P (X)(((enum rtx_code) (X)->code) == CALL_INSN))
850
851/* Nonzero if DEBUG_MARKER_INSN_P may possibly hold. */
852#define MAY_HAVE_DEBUG_MARKER_INSNSglobal_options.x_debug_nonbind_markers_p debug_nonbind_markers_pglobal_options.x_debug_nonbind_markers_p
853/* Nonzero if DEBUG_BIND_INSN_P may possibly hold. */
854#define MAY_HAVE_DEBUG_BIND_INSNSglobal_options.x_flag_var_tracking_assignments flag_var_tracking_assignmentsglobal_options.x_flag_var_tracking_assignments
855/* Nonzero if DEBUG_INSN_P may possibly hold. */
856#define MAY_HAVE_DEBUG_INSNS(global_options.x_debug_nonbind_markers_p || global_options.x_flag_var_tracking_assignments
)
\
857 (MAY_HAVE_DEBUG_MARKER_INSNSglobal_options.x_debug_nonbind_markers_p || MAY_HAVE_DEBUG_BIND_INSNSglobal_options.x_flag_var_tracking_assignments)
858
859/* Predicate yielding nonzero iff X is a real insn. */
860#define INSN_P(X)(((((enum rtx_code) (X)->code) == INSN) || (((enum rtx_code
) (X)->code) == JUMP_INSN) || (((enum rtx_code) (X)->code
) == CALL_INSN)) || (((enum rtx_code) (X)->code) == DEBUG_INSN
))
(NONDEBUG_INSN_P (X)((((enum rtx_code) (X)->code) == INSN) || (((enum rtx_code
) (X)->code) == JUMP_INSN) || (((enum rtx_code) (X)->code
) == CALL_INSN))
|| DEBUG_INSN_P (X)(((enum rtx_code) (X)->code) == DEBUG_INSN))
861
862/* Predicate yielding nonzero iff X is a note insn. */
863#define NOTE_P(X)(((enum rtx_code) (X)->code) == NOTE) (GET_CODE (X)((enum rtx_code) (X)->code) == NOTE)
864
865/* Predicate yielding nonzero iff X is a barrier insn. */
866#define BARRIER_P(X)(((enum rtx_code) (X)->code) == BARRIER) (GET_CODE (X)((enum rtx_code) (X)->code) == BARRIER)
867
868/* Predicate yielding nonzero iff X is a data for a jump table. */
869#define JUMP_TABLE_DATA_P(INSN)(((enum rtx_code) (INSN)->code) == JUMP_TABLE_DATA) (GET_CODE (INSN)((enum rtx_code) (INSN)->code) == JUMP_TABLE_DATA)
870
871/* Predicate yielding nonzero iff RTX is a subreg. */
872#define SUBREG_P(RTX)(((enum rtx_code) (RTX)->code) == SUBREG) (GET_CODE (RTX)((enum rtx_code) (RTX)->code) == SUBREG)
873
874/* Predicate yielding true iff RTX is a symbol ref. */
875#define SYMBOL_REF_P(RTX)(((enum rtx_code) (RTX)->code) == SYMBOL_REF) (GET_CODE (RTX)((enum rtx_code) (RTX)->code) == SYMBOL_REF)
876
877template <>
878template <>
879inline bool
880is_a_helper <rtx_insn *>::test (rtx rt)
881{
882 return (INSN_P (rt)(((((enum rtx_code) (rt)->code) == INSN) || (((enum rtx_code
) (rt)->code) == JUMP_INSN) || (((enum rtx_code) (rt)->
code) == CALL_INSN)) || (((enum rtx_code) (rt)->code) == DEBUG_INSN
))
883 || NOTE_P (rt)(((enum rtx_code) (rt)->code) == NOTE)
884 || JUMP_TABLE_DATA_P (rt)(((enum rtx_code) (rt)->code) == JUMP_TABLE_DATA)
885 || BARRIER_P (rt)(((enum rtx_code) (rt)->code) == BARRIER)
886 || LABEL_P (rt)(((enum rtx_code) (rt)->code) == CODE_LABEL));
887}
888
889template <>
890template <>
891inline bool
892is_a_helper <const rtx_insn *>::test (const_rtx rt)
893{
894 return (INSN_P (rt)(((((enum rtx_code) (rt)->code) == INSN) || (((enum rtx_code
) (rt)->code) == JUMP_INSN) || (((enum rtx_code) (rt)->
code) == CALL_INSN)) || (((enum rtx_code) (rt)->code) == DEBUG_INSN
))
895 || NOTE_P (rt)(((enum rtx_code) (rt)->code) == NOTE)
896 || JUMP_TABLE_DATA_P (rt)(((enum rtx_code) (rt)->code) == JUMP_TABLE_DATA)
897 || BARRIER_P (rt)(((enum rtx_code) (rt)->code) == BARRIER)
898 || LABEL_P (rt)(((enum rtx_code) (rt)->code) == CODE_LABEL));
899}
900
901template <>
902template <>
903inline bool
904is_a_helper <rtx_debug_insn *>::test (rtx rt)
905{
906 return DEBUG_INSN_P (rt)(((enum rtx_code) (rt)->code) == DEBUG_INSN);
907}
908
909template <>
910template <>
911inline bool
912is_a_helper <rtx_nonjump_insn *>::test (rtx rt)
913{
914 return NONJUMP_INSN_P (rt)(((enum rtx_code) (rt)->code) == INSN);
915}
916
917template <>
918template <>
919inline bool
920is_a_helper <rtx_jump_insn *>::test (rtx rt)
921{
922 return JUMP_P (rt)(((enum rtx_code) (rt)->code) == JUMP_INSN);
923}
924
925template <>
926template <>
927inline bool
928is_a_helper <rtx_jump_insn *>::test (rtx_insn *insn)
929{
930 return JUMP_P (insn)(((enum rtx_code) (insn)->code) == JUMP_INSN);
931}
932
933template <>
934template <>
935inline bool
936is_a_helper <rtx_call_insn *>::test (rtx rt)
937{
938 return CALL_P (rt)(((enum rtx_code) (rt)->code) == CALL_INSN);
939}
940
941template <>
942template <>
943inline bool
944is_a_helper <rtx_call_insn *>::test (rtx_insn *insn)
945{
946 return CALL_P (insn)(((enum rtx_code) (insn)->code) == CALL_INSN);
947}
948
949template <>
950template <>
951inline bool
952is_a_helper <rtx_jump_table_data *>::test (rtx rt)
953{
954 return JUMP_TABLE_DATA_P (rt)(((enum rtx_code) (rt)->code) == JUMP_TABLE_DATA);
955}
956
957template <>
958template <>
959inline bool
960is_a_helper <rtx_jump_table_data *>::test (rtx_insn *insn)
961{
962 return JUMP_TABLE_DATA_P (insn)(((enum rtx_code) (insn)->code) == JUMP_TABLE_DATA);
963}
964
965template <>
966template <>
967inline bool
968is_a_helper <rtx_barrier *>::test (rtx rt)
969{
970 return BARRIER_P (rt)(((enum rtx_code) (rt)->code) == BARRIER);
971}
972
973template <>
974template <>
975inline bool
976is_a_helper <rtx_code_label *>::test (rtx rt)
977{
978 return LABEL_P (rt)(((enum rtx_code) (rt)->code) == CODE_LABEL);
979}
980
981template <>
982template <>
983inline bool
984is_a_helper <rtx_code_label *>::test (rtx_insn *insn)
985{
986 return LABEL_P (insn)(((enum rtx_code) (insn)->code) == CODE_LABEL);
987}
988
989template <>
990template <>
991inline bool
992is_a_helper <rtx_note *>::test (rtx rt)
993{
994 return NOTE_P (rt)(((enum rtx_code) (rt)->code) == NOTE);
995}
996
997template <>
998template <>
999inline bool
1000is_a_helper <rtx_note *>::test (rtx_insn *insn)
1001{
1002 return NOTE_P (insn)(((enum rtx_code) (insn)->code) == NOTE);
1003}
1004
1005/* Predicate yielding nonzero iff X is a return or simple_return. */
1006#define ANY_RETURN_P(X)(((enum rtx_code) (X)->code) == RETURN || ((enum rtx_code)
(X)->code) == SIMPLE_RETURN)
\
1007 (GET_CODE (X)((enum rtx_code) (X)->code) == RETURN || GET_CODE (X)((enum rtx_code) (X)->code) == SIMPLE_RETURN)
1008
1009/* 1 if X is a unary operator. */
1010
1011#define UNARY_P(X)((rtx_class[(int) (((enum rtx_code) (X)->code))]) == RTX_UNARY
)
\
1012 (GET_RTX_CLASS (GET_CODE (X))(rtx_class[(int) (((enum rtx_code) (X)->code))]) == RTX_UNARY)
1013
1014/* 1 if X is a binary operator. */
1015
1016#define BINARY_P(X)(((rtx_class[(int) (((enum rtx_code) (X)->code))]) & (
~3)) == (RTX_COMPARE & (~3)))
\
1017 ((GET_RTX_CLASS (GET_CODE (X))(rtx_class[(int) (((enum rtx_code) (X)->code))]) & RTX_BINARY_MASK(~3)) == RTX_BINARY_RESULT(RTX_COMPARE & (~3)))
1018
1019/* 1 if X is an arithmetic operator. */
1020
1021#define ARITHMETIC_P(X)(((rtx_class[(int) (((enum rtx_code) (X)->code))]) & (
~1)) == (RTX_COMM_ARITH & (~1)))
\
1022 ((GET_RTX_CLASS (GET_CODE (X))(rtx_class[(int) (((enum rtx_code) (X)->code))]) & RTX_ARITHMETIC_MASK(~1)) \
1023 == RTX_ARITHMETIC_RESULT(RTX_COMM_ARITH & (~1)))
1024
1025/* 1 if X is an arithmetic operator. */
1026
1027#define COMMUTATIVE_ARITH_P(X)((rtx_class[(int) (((enum rtx_code) (X)->code))]) == RTX_COMM_ARITH
)
\
1028 (GET_RTX_CLASS (GET_CODE (X))(rtx_class[(int) (((enum rtx_code) (X)->code))]) == RTX_COMM_ARITH)
1029
1030/* 1 if X is a commutative arithmetic operator or a comparison operator.
1031 These two are sometimes selected together because it is possible to
1032 swap the two operands. */
1033
1034#define SWAPPABLE_OPERANDS_P(X)((1 << (rtx_class[(int) (((enum rtx_code) (X)->code)
)])) & ((1 << RTX_COMM_ARITH) | (1 << RTX_COMM_COMPARE
) | (1 << RTX_COMPARE)))
\
1035 ((1 << GET_RTX_CLASS (GET_CODE (X))(rtx_class[(int) (((enum rtx_code) (X)->code))])) \
1036 & ((1 << RTX_COMM_ARITH) | (1 << RTX_COMM_COMPARE) \
1037 | (1 << RTX_COMPARE)))
1038
1039/* 1 if X is a non-commutative operator. */
1040
1041#define NON_COMMUTATIVE_P(X)(((rtx_class[(int) (((enum rtx_code) (X)->code))]) & (
~2)) == (RTX_COMPARE & (~2)))
\
1042 ((GET_RTX_CLASS (GET_CODE (X))(rtx_class[(int) (((enum rtx_code) (X)->code))]) & RTX_COMMUTATIVE_MASK(~2)) \
1043 == RTX_NON_COMMUTATIVE_RESULT(RTX_COMPARE & (~2)))
1044
1045/* 1 if X is a commutative operator on integers. */
1046
1047#define COMMUTATIVE_P(X)(((rtx_class[(int) (((enum rtx_code) (X)->code))]) & (
~2)) == (RTX_COMM_COMPARE & (~2)))
\
1048 ((GET_RTX_CLASS (GET_CODE (X))(rtx_class[(int) (((enum rtx_code) (X)->code))]) & RTX_COMMUTATIVE_MASK(~2)) \
1049 == RTX_COMMUTATIVE_RESULT(RTX_COMM_COMPARE & (~2)))
1050
1051/* 1 if X is a relational operator. */
1052
1053#define COMPARISON_P(X)(((rtx_class[(int) (((enum rtx_code) (X)->code))]) & (
~1)) == (RTX_COMPARE & (~1)))
\
1054 ((GET_RTX_CLASS (GET_CODE (X))(rtx_class[(int) (((enum rtx_code) (X)->code))]) & RTX_COMPARE_MASK(~1)) == RTX_COMPARE_RESULT(RTX_COMPARE & (~1)))
1055
1056/* 1 if X is a constant value that is an integer. */
1057
1058#define CONSTANT_P(X)((rtx_class[(int) (((enum rtx_code) (X)->code))]) == RTX_CONST_OBJ
)
\
1059 (GET_RTX_CLASS (GET_CODE (X))(rtx_class[(int) (((enum rtx_code) (X)->code))]) == RTX_CONST_OBJ)
1060
1061/* 1 if X is a LABEL_REF. */
1062#define LABEL_REF_P(X)(((enum rtx_code) (X)->code) == LABEL_REF) \
1063 (GET_CODE (X)((enum rtx_code) (X)->code) == LABEL_REF)
1064
1065/* 1 if X can be used to represent an object. */
1066#define OBJECT_P(X)(((rtx_class[(int) (((enum rtx_code) (X)->code))]) & (
~1)) == (RTX_OBJ & (~1)))
\
1067 ((GET_RTX_CLASS (GET_CODE (X))(rtx_class[(int) (((enum rtx_code) (X)->code))]) & RTX_OBJ_MASK(~1)) == RTX_OBJ_RESULT(RTX_OBJ & (~1)))
1068
1069/* General accessor macros for accessing the fields of an rtx. */
1070
1071#if defined ENABLE_RTL_CHECKING && (GCC_VERSION(4 * 1000 + 2) >= 2007)
1072/* The bit with a star outside the statement expr and an & inside is
1073 so that N can be evaluated only once. */
1074#define RTL_CHECK1(RTX, N, C1)((RTX)->u.fld[N]) __extension__ \
1075(*({ __typeof (RTX) const _rtx = (RTX); const int _n = (N); \
1076 const enum rtx_code _code = GET_CODE (_rtx)((enum rtx_code) (_rtx)->code); \
1077 if (_n < 0 || _n >= GET_RTX_LENGTH (_code)(rtx_length[(int) (_code)])) \
1078 rtl_check_failed_bounds (_rtx, _n, __FILE__"/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtl.h", __LINE__1078, \
1079 __FUNCTION__); \
1080 if (GET_RTX_FORMAT (_code)(rtx_format[(int) (_code)])[_n] != C1) \
1081 rtl_check_failed_type1 (_rtx, _n, C1, __FILE__"/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtl.h", __LINE__1081, \
1082 __FUNCTION__); \
1083 &_rtx->u.fld[_n]; }))
1084
1085#define RTL_CHECK2(RTX, N, C1, C2)((RTX)->u.fld[N]) __extension__ \
1086(*({ __typeof (RTX) const _rtx = (RTX); const int _n = (N); \
1087 const enum rtx_code _code = GET_CODE (_rtx)((enum rtx_code) (_rtx)->code); \
1088 if (_n < 0 || _n >= GET_RTX_LENGTH (_code)(rtx_length[(int) (_code)])) \
1089 rtl_check_failed_bounds (_rtx, _n, __FILE__"/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtl.h", __LINE__1089, \
1090 __FUNCTION__); \
1091 if (GET_RTX_FORMAT (_code)(rtx_format[(int) (_code)])[_n] != C1 \
1092 && GET_RTX_FORMAT (_code)(rtx_format[(int) (_code)])[_n] != C2) \
1093 rtl_check_failed_type2 (_rtx, _n, C1, C2, __FILE__"/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtl.h", __LINE__1093, \
1094 __FUNCTION__); \
1095 &_rtx->u.fld[_n]; }))
1096
1097#define RTL_CHECKC1(RTX, N, C)((RTX)->u.fld[N]) __extension__ \
1098(*({ __typeof (RTX) const _rtx = (RTX); const int _n = (N); \
1099 if (GET_CODE (_rtx)((enum rtx_code) (_rtx)->code) != (C)) \
1100 rtl_check_failed_code1 (_rtx, (C), __FILE__"/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtl.h", __LINE__1100, \
1101 __FUNCTION__); \
1102 &_rtx->u.fld[_n]; }))
1103
1104#define RTL_CHECKC2(RTX, N, C1, C2)((RTX)->u.fld[N]) __extension__ \
1105(*({ __typeof (RTX) const _rtx = (RTX); const int _n = (N); \
1106 const enum rtx_code _code = GET_CODE (_rtx)((enum rtx_code) (_rtx)->code); \
1107 if (_code != (C1) && _code != (C2)) \
1108 rtl_check_failed_code2 (_rtx, (C1), (C2), __FILE__"/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtl.h", __LINE__1108, \
1109 __FUNCTION__); \
1110 &_rtx->u.fld[_n]; }))
1111
1112#define RTL_CHECKC3(RTX, N, C1, C2, C3)((RTX)->u.fld[N]) __extension__ \
1113(*({ __typeof (RTX) const _rtx = (RTX); const int _n = (N); \
1114 const enum rtx_code _code = GET_CODE (_rtx)((enum rtx_code) (_rtx)->code); \
1115 if (_code != (C1) && _code != (C2) && _code != (C3)) \
1116 rtl_check_failed_code3 (_rtx, (C1), (C2), (C3), __FILE__"/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtl.h", \
1117 __LINE__1117, __FUNCTION__); \
1118 &_rtx->u.fld[_n]; }))
1119
1120#define RTVEC_ELT(RTVEC, I)((RTVEC)->elem[I]) __extension__ \
1121(*({ __typeof (RTVEC) const _rtvec = (RTVEC); const int _i = (I); \
1122 if (_i < 0 || _i >= GET_NUM_ELEM (_rtvec)((_rtvec)->num_elem)) \
1123 rtvec_check_failed_bounds (_rtvec, _i, __FILE__"/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtl.h", __LINE__1123, \
1124 __FUNCTION__); \
1125 &_rtvec->elem[_i]; }))
1126
1127#define XWINT(RTX, N)((RTX)->u.hwint[N]) __extension__ \
1128(*({ __typeof (RTX) const _rtx = (RTX); const int _n = (N); \
1129 const enum rtx_code _code = GET_CODE (_rtx)((enum rtx_code) (_rtx)->code); \
1130 if (_n < 0 || _n >= GET_RTX_LENGTH (_code)(rtx_length[(int) (_code)])) \
1131 rtl_check_failed_bounds (_rtx, _n, __FILE__"/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtl.h", __LINE__1131, \
1132 __FUNCTION__); \
1133 if (GET_RTX_FORMAT (_code)(rtx_format[(int) (_code)])[_n] != 'w') \
1134 rtl_check_failed_type1 (_rtx, _n, 'w', __FILE__"/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtl.h", __LINE__1134, \
1135 __FUNCTION__); \
1136 &_rtx->u.hwint[_n]; }))
1137
1138#define CWI_ELT(RTX, I)((RTX)->u.hwiv.elem[I]) __extension__ \
1139(*({ __typeof (RTX) const _cwi = (RTX); \
1140 int _max = CWI_GET_NUM_ELEM (_cwi)((int)__extension__ ({ __typeof ((_cwi)) const _rtx = ((_cwi)
); if (((enum rtx_code) (_rtx)->code) != CONST_WIDE_INT) rtl_check_failed_flag
("CWI_GET_NUM_ELEM", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtl.h"
, 1140, __FUNCTION__); _rtx; })->u2.num_elem)
; \
1141 const int _i = (I); \
1142 if (_i < 0 || _i >= _max) \
1143 cwi_check_failed_bounds (_cwi, _i, __FILE__"/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtl.h", __LINE__1143, \
1144 __FUNCTION__); \
1145 &_cwi->u.hwiv.elem[_i]; }))
1146
1147#define XCWINT(RTX, N, C)((RTX)->u.hwint[N]) __extension__ \
1148(*({ __typeof (RTX) const _rtx = (RTX); \
1149 if (GET_CODE (_rtx)((enum rtx_code) (_rtx)->code) != (C)) \
1150 rtl_check_failed_code1 (_rtx, (C), __FILE__"/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtl.h", __LINE__1150, \
1151 __FUNCTION__); \
1152 &_rtx->u.hwint[N]; }))
1153
1154#define XCMWINT(RTX, N, C, M)((RTX)->u.hwint[N]) __extension__ \
1155(*({ __typeof (RTX) const _rtx = (RTX); \
1156 if (GET_CODE (_rtx)((enum rtx_code) (_rtx)->code) != (C) || GET_MODE (_rtx)((machine_mode) (_rtx)->mode) != (M)) \
1157 rtl_check_failed_code_mode (_rtx, (C), (M), false, __FILE__"/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtl.h", \
1158 __LINE__1158, __FUNCTION__); \
1159 &_rtx->u.hwint[N]; }))
1160
1161#define XCNMPRV(RTX, C, M)(&(RTX)->u.rv) __extension__ \
1162({ __typeof (RTX) const _rtx = (RTX); \
1163 if (GET_CODE (_rtx)((enum rtx_code) (_rtx)->code) != (C) || GET_MODE (_rtx)((machine_mode) (_rtx)->mode) == (M)) \
1164 rtl_check_failed_code_mode (_rtx, (C), (M), true, __FILE__"/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtl.h", \
1165 __LINE__1165, __FUNCTION__); \
1166 &_rtx->u.rv; })
1167
1168#define XCNMPFV(RTX, C, M)(&(RTX)->u.fv) __extension__ \
1169({ __typeof (RTX) const _rtx = (RTX); \
1170 if (GET_CODE (_rtx)((enum rtx_code) (_rtx)->code) != (C) || GET_MODE (_rtx)((machine_mode) (_rtx)->mode) == (M)) \
1171 rtl_check_failed_code_mode (_rtx, (C), (M), true, __FILE__"/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtl.h", \
1172 __LINE__1172, __FUNCTION__); \
1173 &_rtx->u.fv; })
1174
1175#define REG_CHECK(RTX)(&(RTX)->u.reg) __extension__ \
1176({ __typeof (RTX) const _rtx = (RTX); \
1177 if (GET_CODE (_rtx)((enum rtx_code) (_rtx)->code) != REG) \
1178 rtl_check_failed_code1 (_rtx, REG, __FILE__"/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtl.h", __LINE__1178, \
1179 __FUNCTION__); \
1180 &_rtx->u.reg; })
1181
1182#define BLOCK_SYMBOL_CHECK(RTX)(&(RTX)->u.block_sym) __extension__ \
1183({ __typeof (RTX) const _symbol = (RTX); \
1184 const unsigned int flags = SYMBOL_REF_FLAGS (_symbol)(__extension__ ({ __typeof ((_symbol)) const _rtx = ((_symbol
)); if (((enum rtx_code) (_rtx)->code) != SYMBOL_REF) rtl_check_failed_flag
("SYMBOL_REF_FLAGS", _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtl.h"
, 1184, __FUNCTION__); _rtx; }) ->u2.symbol_ref_flags)
; \
1185 if ((flags & SYMBOL_FLAG_HAS_BLOCK_INFO(1 << 7)) == 0) \
1186 rtl_check_failed_block_symbol (__FILE__"/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtl.h", __LINE__1186, \
1187 __FUNCTION__); \
1188 &_symbol->u.block_sym; })
1189
1190#define HWIVEC_CHECK(RTX,C)(&(RTX)->u.hwiv) __extension__ \
1191({ __typeof (RTX) const _symbol = (RTX); \
1192 RTL_CHECKC1 (_symbol, 0, C)((_symbol)->u.fld[0]); \
1193 &_symbol->u.hwiv; })
1194
1195extern void rtl_check_failed_bounds (const_rtx, int, const char *, int,
1196 const char *)
1197 ATTRIBUTE_NORETURN__attribute__ ((__noreturn__)) ATTRIBUTE_COLD;
1198extern void rtl_check_failed_type1 (const_rtx, int, int, const char *, int,
1199 const char *)
1200 ATTRIBUTE_NORETURN__attribute__ ((__noreturn__)) ATTRIBUTE_COLD;
1201extern void rtl_check_failed_type2 (const_rtx, int, int, int, const char *,
1202 int, const char *)
1203 ATTRIBUTE_NORETURN__attribute__ ((__noreturn__)) ATTRIBUTE_COLD;
1204extern void rtl_check_failed_code1 (const_rtx, enum rtx_code, const char *,
1205 int, const char *)
1206 ATTRIBUTE_NORETURN__attribute__ ((__noreturn__)) ATTRIBUTE_COLD;
1207extern void rtl_check_failed_code2 (const_rtx, enum rtx_code, enum rtx_code,
1208 const char *, int, const char *)
1209 ATTRIBUTE_NORETURN__attribute__ ((__noreturn__)) ATTRIBUTE_COLD;
1210extern void rtl_check_failed_code3 (const_rtx, enum rtx_code, enum rtx_code,
1211 enum rtx_code, const char *, int,
1212 const char *)
1213 ATTRIBUTE_NORETURN__attribute__ ((__noreturn__)) ATTRIBUTE_COLD;
1214extern void rtl_check_failed_code_mode (const_rtx, enum rtx_code, machine_mode,
1215 bool, const char *, int, const char *)
1216 ATTRIBUTE_NORETURN__attribute__ ((__noreturn__)) ATTRIBUTE_COLD;
1217extern void rtl_check_failed_block_symbol (const char *, int, const char *)
1218 ATTRIBUTE_NORETURN__attribute__ ((__noreturn__)) ATTRIBUTE_COLD;
1219extern void cwi_check_failed_bounds (const_rtx, int, const char *, int,
1220 const char *)
1221 ATTRIBUTE_NORETURN__attribute__ ((__noreturn__)) ATTRIBUTE_COLD;
1222extern void rtvec_check_failed_bounds (const_rtvec, int, const char *, int,
1223 const char *)
1224 ATTRIBUTE_NORETURN__attribute__ ((__noreturn__)) ATTRIBUTE_COLD;
1225
1226#else /* not ENABLE_RTL_CHECKING */
1227
1228#define RTL_CHECK1(RTX, N, C1)((RTX)->u.fld[N]) ((RTX)->u.fld[N])
1229#define RTL_CHECK2(RTX, N, C1, C2)((RTX)->u.fld[N]) ((RTX)->u.fld[N])
1230#define RTL_CHECKC1(RTX, N, C)((RTX)->u.fld[N]) ((RTX)->u.fld[N])
1231#define RTL_CHECKC2(RTX, N, C1, C2)((RTX)->u.fld[N]) ((RTX)->u.fld[N])
1232#define RTL_CHECKC3(RTX, N, C1, C2, C3)((RTX)->u.fld[N]) ((RTX)->u.fld[N])
1233#define RTVEC_ELT(RTVEC, I)((RTVEC)->elem[I]) ((RTVEC)->elem[I])
1234#define XWINT(RTX, N)((RTX)->u.hwint[N]) ((RTX)->u.hwint[N])
1235#define CWI_ELT(RTX, I)((RTX)->u.hwiv.elem[I]) ((RTX)->u.hwiv.elem[I])
1236#define XCWINT(RTX, N, C)((RTX)->u.hwint[N]) ((RTX)->u.hwint[N])
1237#define XCMWINT(RTX, N, C, M)((RTX)->u.hwint[N]) ((RTX)->u.hwint[N])
1238#define XCNMWINT(RTX, N, C, M)((RTX)->u.hwint[N]) ((RTX)->u.hwint[N])
1239#define XCNMPRV(RTX, C, M)(&(RTX)->u.rv) (&(RTX)->u.rv)
1240#define XCNMPFV(RTX, C, M)(&(RTX)->u.fv) (&(RTX)->u.fv)
1241#define REG_CHECK(RTX)(&(RTX)->u.reg) (&(RTX)->u.reg)
1242#define BLOCK_SYMBOL_CHECK(RTX)(&(RTX)->u.block_sym) (&(RTX)->u.block_sym)
1243#define HWIVEC_CHECK(RTX,C)(&(RTX)->u.hwiv) (&(RTX)->u.hwiv)
1244
1245#endif
1246
1247/* General accessor macros for accessing the flags of an rtx. */
1248
1249/* Access an individual rtx flag, with no checking of any kind. */
1250#define RTX_FLAG(RTX, FLAG)((RTX)->FLAG) ((RTX)->FLAG)
1251
1252#if defined ENABLE_RTL_FLAG_CHECKING1 && (GCC_VERSION(4 * 1000 + 2) >= 2007)
1253#define RTL_FLAG_CHECK1(NAME, RTX, C1)__extension__ ({ __typeof (RTX) const _rtx = (RTX); if (((enum
rtx_code) (_rtx)->code) != C1) rtl_check_failed_flag (NAME
, _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtl.h"
, 1253, __FUNCTION__); _rtx; })
__extension__ \
1254({ __typeof (RTX) const _rtx = (RTX); \
1255 if (GET_CODE (_rtx)((enum rtx_code) (_rtx)->code) != C1) \
1256 rtl_check_failed_flag (NAME, _rtx, __FILE__"/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtl.h", __LINE__1256, \
1257 __FUNCTION__); \
1258 _rtx; })
1259
1260#define RTL_FLAG_CHECK2(NAME, RTX, C1, C2)__extension__ ({ __typeof (RTX) const _rtx = (RTX); if (((enum
rtx_code) (_rtx)->code) != C1 && ((enum rtx_code)
(_rtx)->code) != C2) rtl_check_failed_flag (NAME,_rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtl.h"
, 1260, __FUNCTION__); _rtx; })
__extension__ \
1261({ __typeof (RTX) const _rtx = (RTX); \
1262 if (GET_CODE (_rtx)((enum rtx_code) (_rtx)->code) != C1 && GET_CODE(_rtx)((enum rtx_code) (_rtx)->code) != C2) \
1263 rtl_check_failed_flag (NAME,_rtx, __FILE__"/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtl.h", __LINE__1263, \
1264 __FUNCTION__); \
1265 _rtx; })
1266
1267#define RTL_FLAG_CHECK3(NAME, RTX, C1, C2, C3)__extension__ ({ __typeof (RTX) const _rtx = (RTX); if (((enum
rtx_code) (_rtx)->code) != C1 && ((enum rtx_code)
(_rtx)->code) != C2 && ((enum rtx_code) (_rtx)->
code) != C3) rtl_check_failed_flag (NAME, _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtl.h"
, 1267, __FUNCTION__); _rtx; })
__extension__ \
1268({ __typeof (RTX) const _rtx = (RTX); \
1269 if (GET_CODE (_rtx)((enum rtx_code) (_rtx)->code) != C1 && GET_CODE(_rtx)((enum rtx_code) (_rtx)->code) != C2 \
1270 && GET_CODE (_rtx)((enum rtx_code) (_rtx)->code) != C3) \
1271 rtl_check_failed_flag (NAME, _rtx, __FILE__"/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtl.h", __LINE__1271, \
1272 __FUNCTION__); \
1273 _rtx; })
1274
1275#define RTL_FLAG_CHECK4(NAME, RTX, C1, C2, C3, C4)__extension__ ({ __typeof (RTX) const _rtx = (RTX); if (((enum
rtx_code) (_rtx)->code) != C1 && ((enum rtx_code)
(_rtx)->code) != C2 && ((enum rtx_code) (_rtx)->
code) != C3 && ((enum rtx_code) (_rtx)->code) != C4
) rtl_check_failed_flag (NAME, _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtl.h"
, 1275, __FUNCTION__); _rtx; })
__extension__ \
1276({ __typeof (RTX) const _rtx = (RTX); \
1277 if (GET_CODE (_rtx)((enum rtx_code) (_rtx)->code) != C1 && GET_CODE(_rtx)((enum rtx_code) (_rtx)->code) != C2 \
1278 && GET_CODE (_rtx)((enum rtx_code) (_rtx)->code) != C3 && GET_CODE(_rtx)((enum rtx_code) (_rtx)->code) != C4) \
1279 rtl_check_failed_flag (NAME, _rtx, __FILE__"/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtl.h", __LINE__1279, \
1280 __FUNCTION__); \
1281 _rtx; })
1282
1283#define RTL_FLAG_CHECK5(NAME, RTX, C1, C2, C3, C4, C5)__extension__ ({ __typeof (RTX) const _rtx = (RTX); if (((enum
rtx_code) (_rtx)->code) != C1 && ((enum rtx_code)
(_rtx)->code) != C2 && ((enum rtx_code) (_rtx)->
code) != C3 && ((enum rtx_code) (_rtx)->code) != C4
&& ((enum rtx_code) (_rtx)->code) != C5) rtl_check_failed_flag
(NAME, _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtl.h"
, 1283, __FUNCTION__); _rtx; })
__extension__ \
1284({ __typeof (RTX) const _rtx = (RTX); \
1285 if (GET_CODE (_rtx)((enum rtx_code) (_rtx)->code) != C1 && GET_CODE (_rtx)((enum rtx_code) (_rtx)->code) != C2 \
1286 && GET_CODE (_rtx)((enum rtx_code) (_rtx)->code) != C3 && GET_CODE (_rtx)((enum rtx_code) (_rtx)->code) != C4 \
1287 && GET_CODE (_rtx)((enum rtx_code) (_rtx)->code) != C5) \
1288 rtl_check_failed_flag (NAME, _rtx, __FILE__"/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtl.h", __LINE__1288, \
1289 __FUNCTION__); \
1290 _rtx; })
1291
1292#define RTL_FLAG_CHECK6(NAME, RTX, C1, C2, C3, C4, C5, C6)__extension__ ({ __typeof (RTX) const _rtx = (RTX); if (((enum
rtx_code) (_rtx)->code) != C1 && ((enum rtx_code)
(_rtx)->code) != C2 && ((enum rtx_code) (_rtx)->
code) != C3 && ((enum rtx_code) (_rtx)->code) != C4
&& ((enum rtx_code) (_rtx)->code) != C5 &&
((enum rtx_code) (_rtx)->code) != C6) rtl_check_failed_flag
(NAME,_rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtl.h"
, 1292, __FUNCTION__); _rtx; })
\
1293 __extension__ \
1294({ __typeof (RTX) const _rtx = (RTX); \
1295 if (GET_CODE (_rtx)((enum rtx_code) (_rtx)->code) != C1 && GET_CODE (_rtx)((enum rtx_code) (_rtx)->code) != C2 \
1296 && GET_CODE (_rtx)((enum rtx_code) (_rtx)->code) != C3 && GET_CODE (_rtx)((enum rtx_code) (_rtx)->code) != C4 \
1297 && GET_CODE (_rtx)((enum rtx_code) (_rtx)->code) != C5 && GET_CODE (_rtx)((enum rtx_code) (_rtx)->code) != C6) \
1298 rtl_check_failed_flag (NAME,_rtx, __FILE__"/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtl.h", __LINE__1298, \
1299 __FUNCTION__); \
1300 _rtx; })
1301
1302#define RTL_FLAG_CHECK7(NAME, RTX, C1, C2, C3, C4, C5, C6, C7)__extension__ ({ __typeof (RTX) const _rtx = (RTX); if (((enum
rtx_code) (_rtx)->code) != C1 && ((enum rtx_code)
(_rtx)->code) != C2 && ((enum rtx_code) (_rtx)->
code) != C3 && ((enum rtx_code) (_rtx)->code) != C4
&& ((enum rtx_code) (_rtx)->code) != C5 &&
((enum rtx_code) (_rtx)->code) != C6 && ((enum rtx_code
) (_rtx)->code) != C7) rtl_check_failed_flag (NAME, _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtl.h"
, 1302, __FUNCTION__); _rtx; })
\
1303 __extension__ \
1304({ __typeof (RTX) const _rtx = (RTX); \
1305 if (GET_CODE (_rtx)((enum rtx_code) (_rtx)->code) != C1 && GET_CODE (_rtx)((enum rtx_code) (_rtx)->code) != C2 \
1306 && GET_CODE (_rtx)((enum rtx_code) (_rtx)->code) != C3 && GET_CODE (_rtx)((enum rtx_code) (_rtx)->code) != C4 \
1307 && GET_CODE (_rtx)((enum rtx_code) (_rtx)->code) != C5 && GET_CODE (_rtx)((enum rtx_code) (_rtx)->code) != C6 \
1308 && GET_CODE (_rtx)((enum rtx_code) (_rtx)->code) != C7) \
1309 rtl_check_failed_flag (NAME, _rtx, __FILE__"/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtl.h", __LINE__1309, \
1310 __FUNCTION__); \
1311 _rtx; })
1312
1313#define RTL_INSN_CHAIN_FLAG_CHECK(NAME, RTX)__extension__ ({ __typeof (RTX) const _rtx = (RTX); if (!((unsigned
long) (((enum rtx_code) (_rtx)->code)) - (unsigned long) (
DEBUG_INSN) <= (unsigned long) (NOTE) - (unsigned long) (DEBUG_INSN
))) rtl_check_failed_flag (NAME, _rtx, "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtl.h"
, 1313, __FUNCTION__); _rtx; })
\
1314 __extension__ \
1315({ __typeof (RTX) const _rtx = (RTX); \
1316 if (!INSN_CHAIN_CODE_P (GET_CODE (_rtx))((unsigned long) (((enum rtx_code) (_rtx)->code)) - (unsigned
long) (DEBUG_INSN) <= (unsigned long) (NOTE) - (unsigned long
) (DEBUG_INSN))
) \
1317 rtl_check_failed_flag (NAME, _rtx, __FILE__"/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtl.h", __LINE__1317, \
1318 __FUNCTION__); \
1319