-
Notifications
You must be signed in to change notification settings - Fork 153
/
mir-gen.c
10018 lines (9243 loc) · 401 KB
/
mir-gen.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/* This file is a part of MIR project.
Copyright (C) 2018-2024 Vladimir Makarov <[email protected]>.
*/
/* Optimization pipeline:
--------------- ------------
---------- ----------- ----------- | Address | | Block |
MIR -->| Simplify |-->| Build CFG |-->| Build SSA |-->| Transformation|-->| Cloning |
---------- ----------- ----------- --------------- ------------
|
V
------------ ------------ ------------ ---------------------------
|Dead Code | |Dead Store | | Copy | | Global Value Numbering, |
|Elimination |<--- |Elimination |<---| Propagation|<---| Constant Propagation, |
------------ ------------ ------------ | Redundat Load Elimination |
| ---------------------------
V
----------- -------- ------- ------ ---- -------
| Loop | |Register| | SSA | |Out of| |Jump| --------- | Build |
| Invariant |-->|Pressure|-->|Combine|-->| SSA |-->|Opts|-->|Machinize|-->| Live |
| Motion | | Relief | ------- ------ ---- --------- | Info |
----------- -------- -------
|
V
-------- ---------- ---------
|Generate| ----- ------- |Register | -------- |Build |
Machine <---|Machine |<--| DCE |<--|Combine|<--|Allocator |<--|Coalesce|<--|Register |
Insns | Insns | ----- ------- ---------- -------- |Conflicts|
-------- ---------
Simplify: Lowering MIR (in mir.c). Always.
Build CGF: Building Control Flow Graph (basic blocks and CFG edges). Always.
Build SSA: Building Single Static Assignment Form by adding phi nodes and SSA edges
(for -O2 and above).
Address Transformation: Optional pass to remove or change ADDR insns (for -O2 and above).
Block Cloning: Cloning insns and BBs to improve hot path optimization opportunities
(for -O2 and above).
Global Value Numbering: Removing redundant insns through GVN. This includes constant
propagation and redundant load eliminations (for -O2 and above).
Copy Propagation: SSA copy propagation and removing redundant extension insns
(for -O2 and above).
Dead store elimination: Removing redundant stores (for -O2 and above).
Dead code elimination: Removing insns with unused outputs (for -O2 and above).
Loop invariant motion (LICM): Moving invarinat insns out of loop (for -O2 and above).
Pressure relief: Moving insns to decrease register pressure (for -O2 and above).
SSA combine: Combining addresses and cmp and branch pairs (for -O2 and above).
Out of SSA: Making conventional SSA and removing phi nodes and SSA edges (for -O2 and above).
Jump optimizations: Different optimizations on jumps and branches (for -O2 and above).
Machinize: Machine-dependent code (e.g. in mir-gen-x86_64.c)
transforming MIR for calls ABI, 2-op insns, etc. Always.
Building Live Info: Calculating live in and live out for the basic blocks. Always.
Build Register Conflicts: Build conflict matrix for registers involved in moves.
It is used for register coalescing
Coalesce: Aggressive register coalescing
Register Allocator (RA): Priority-based linear scan RA (always) with live range splitting
(for -O2 and above).
Combine: Code selection by merging data-depended insns into one (for -O1 and above).
Dead code elimination (DCE): Removing insns with unused outputs (for -O1 and above).
Generate machine insns: Machine-dependent code (e.g. in mir-gen-x86_64.c) creating
machine insns. Always.
-O0 and -O1 are 2-3 times faster than -O2 but generate considerably slower code.
Terminology:
reg - MIR (pseudo-)register (their numbers are in MIR_OP_VAR and MIR_OP_VAR_MEM > MAX_HARD_REG)
hard reg - MIR hard register (their numbers are in MIR_OP_VAR and MIR_OP_VAR_MEM
and less or equal MAX_HARD_REG)
var - pseudo and hard register (MIR_NON_VAR means no var)
loc - hard register and stack locations (stack slot numbers start with MAX_HARD_REG + 1).
Memory aliasing rules:
* Memory has aliases and they are used for recognizing aliased memory
* Memory has nloc attribute. Memory with the same nloc always refer for the same memory
although memory with different nloc still may refer for the same memory. Memory with
the same nloc has the same alias attributes
* Memory found aliased with alias attributes can be recognized as non-aliased one by
using alloca flags described below
* Memory can have flags 'must alloca' and 'may alloca'. 'Must alloca' always goes
with 'may alloca'. 'Must alloca' means that we guarantee memory can be allocated
only alloca in the func. 'May alloca' means that it is not excluded that memory is
allocated by alloca
* Memory with 'must alloca' flag can have disp attribute. We can define that
'must alloca' memory refers the same memory using disp attribute
*/
#include <stdlib.h>
#include <string.h>
#include <inttypes.h>
#include <assert.h>
#include "mir-code-alloc.h"
#include "mir-alloc.h"
#define gen_assert(cond) assert (cond)
typedef struct gen_ctx *gen_ctx_t;
static void util_error (gen_ctx_t gen_ctx, const char *message);
static void varr_error (const char *message) { util_error (NULL, message); }
#define MIR_VARR_ERROR varr_error
#include "mir.h"
#include "mir-dlist.h"
#include "mir-bitmap.h"
#include "mir-htab.h"
#include "mir-hash.h"
#include "mir-gen.h"
/* Functions used by target dependent code: */
static MIR_alloc_t gen_alloc (gen_ctx_t gen_ctx);
static void *gen_malloc (gen_ctx_t gen_ctx, size_t size);
static void gen_free (gen_ctx_t gen_ctx, void *ptr);
static MIR_reg_t gen_new_temp_reg (gen_ctx_t gen_ctx, MIR_type_t type, MIR_func_t func);
static int gen_nested_loop_label_p (gen_ctx_t gen_ctx, MIR_insn_t insn);
static void set_label_disp (gen_ctx_t gen_ctx, MIR_insn_t insn, size_t disp);
static size_t get_label_disp (gen_ctx_t gen_ctx, MIR_insn_t insn);
static void create_new_bb_insns (gen_ctx_t gen_ctx, MIR_insn_t before, MIR_insn_t after,
MIR_insn_t insn_for_bb);
static void gen_delete_insn (gen_ctx_t gen_ctx, MIR_insn_t insn);
static void gen_add_insn_before (gen_ctx_t gen_ctx, MIR_insn_t before, MIR_insn_t insn);
static void gen_add_insn_after (gen_ctx_t gen_ctx, MIR_insn_t after, MIR_insn_t insn);
static void setup_call_hard_reg_args (gen_ctx_t gen_ctx, MIR_insn_t call_insn, MIR_reg_t hard_reg);
static uint64_t get_ref_value (gen_ctx_t gen_ctx, const MIR_op_t *ref_op);
static void gen_setup_lrefs (gen_ctx_t gen_ctx, uint8_t *func_code);
static int64_t gen_int_log2 (int64_t i);
#define SWAP(v1, v2, temp) \
do { \
temp = v1; \
v1 = v2; \
v2 = temp; \
} while (0)
#ifndef MIR_GEN_CALL_TRACE
#define MIR_GEN_CALL_TRACE 0
#endif
#if MIR_NO_GEN_DEBUG
#define DEBUG(level, code)
#else
#define DEBUG(level, code) \
{ \
if (debug_file != NULL && debug_level >= level) code; \
}
#endif
typedef struct func_cfg *func_cfg_t;
struct target_ctx;
struct data_flow_ctx;
struct ssa_ctx;
struct gvn_ctx;
struct lr_ctx;
struct coalesce_ctx;
struct ra_ctx;
struct combine_ctx;
typedef struct loop_node *loop_node_t;
DEF_VARR (loop_node_t);
typedef struct dead_var *dead_var_t;
DEF_DLIST_LINK (dead_var_t);
struct dead_var {
MIR_reg_t var;
DLIST_LINK (dead_var_t) dead_var_link;
};
DEF_DLIST (dead_var_t, dead_var_link);
typedef struct bb_insn *bb_insn_t;
DEF_VARR (bb_insn_t);
typedef struct target_bb_version *target_bb_version_t;
DEF_VARR (target_bb_version_t);
typedef void *void_ptr_t;
DEF_VARR (void_ptr_t);
typedef struct {
unsigned char alloca_flag;
unsigned char disp_def_p; /* can be true only for MUST_ALLOCA */
MIR_type_t type; /* memory type */
MIR_alias_t alias, nonalias; /* memory aliases */
MIR_insn_t def_insn; /* base def insn: its value + disp form address */
int64_t disp; /* defined only when disp_def_p, otherwise disp is unknown */
} mem_attr_t;
DEF_VARR (mem_attr_t);
typedef struct spot_attr {
uint32_t spot, prop;
MIR_op_t *mem_ref; /* ref for memory if the spot is memory, NULL otherwise */
} spot_attr_t;
DEF_VARR (spot_attr_t);
DEF_VARR (MIR_op_t);
DEF_VARR (MIR_insn_t);
struct gen_ctx {
MIR_context_t ctx;
unsigned optimize_level; /* 0:fast gen; 1:RA+combiner; 2: +GVN/CCP (default); >=3: everything */
MIR_item_t curr_func_item;
#if !MIR_NO_GEN_DEBUG
FILE *debug_file;
int debug_level;
#endif
VARR (void_ptr_t) * to_free;
int addr_insn_p; /* true if we have address insns in the input func */
bitmap_t tied_regs; /* regs tied to hard reg */
bitmap_t addr_regs; /* regs in addr insns as 2nd op */
bitmap_t insn_to_consider, temp_bitmap, temp_bitmap2, temp_bitmap3;
bitmap_t call_used_hard_regs[MIR_T_BOUND];
bitmap_t func_used_hard_regs; /* before prolog: used hard regs except global var hard regs */
func_cfg_t curr_cfg;
uint32_t curr_bb_index, curr_loop_node_index;
DLIST (dead_var_t) free_dead_vars;
unsigned long long overall_bbs_num, overall_gen_bbs_num;
struct target_ctx *target_ctx;
struct data_flow_ctx *data_flow_ctx;
struct ssa_ctx *ssa_ctx;
struct gvn_ctx *gvn_ctx;
struct lr_ctx *lr_ctx;
struct coalesce_ctx *coalesce_ctx;
struct ra_ctx *ra_ctx;
struct combine_ctx *combine_ctx;
VARR (MIR_op_t) * temp_ops;
VARR (MIR_insn_t) * temp_insns, *temp_insns2;
VARR (bb_insn_t) * temp_bb_insns, *temp_bb_insns2;
VARR (loop_node_t) * loop_nodes, *queue_nodes, *loop_entries; /* used in building loop tree */
/* true when alloca memory escapes by assigning alloca address to memory: */
unsigned char full_escape_p;
VARR (mem_attr_t) * mem_attrs; /* nloc (> 0) => mem attributes */
int max_int_hard_regs, max_fp_hard_regs;
/* Slots num for variables. Some variable can take several slots and can be aligned. */
size_t func_stack_slots_num;
VARR (target_bb_version_t) * target_succ_bb_versions;
VARR (void_ptr_t) * succ_bb_addrs;
void *bb_wrapper; /* to jump to lazy basic block generation */
VARR (spot_attr_t) * spot2attr; /* map: spot number -> spot_attr */
VARR (spot_attr_t) * spot_attrs; /* spot attrs wit only non-zero properies */
};
#define optimize_level gen_ctx->optimize_level
#define curr_func_item gen_ctx->curr_func_item
#define debug_file gen_ctx->debug_file
#define debug_level gen_ctx->debug_level
#define to_free gen_ctx->to_free
#define addr_insn_p gen_ctx->addr_insn_p
#define tied_regs gen_ctx->tied_regs
#define addr_regs gen_ctx->addr_regs
#define insn_to_consider gen_ctx->insn_to_consider
#define temp_bitmap gen_ctx->temp_bitmap
#define temp_bitmap2 gen_ctx->temp_bitmap2
#define temp_bitmap3 gen_ctx->temp_bitmap3
#define call_used_hard_regs gen_ctx->call_used_hard_regs
#define func_used_hard_regs gen_ctx->func_used_hard_regs
#define curr_cfg gen_ctx->curr_cfg
#define curr_bb_index gen_ctx->curr_bb_index
#define curr_loop_node_index gen_ctx->curr_loop_node_index
#define full_escape_p gen_ctx->full_escape_p
#define mem_attrs gen_ctx->mem_attrs
#define free_dead_vars gen_ctx->free_dead_vars
#define overall_bbs_num gen_ctx->overall_bbs_num
#define overall_gen_bbs_num gen_ctx->overall_gen_bbs_num
#define temp_ops gen_ctx->temp_ops
#define temp_insns gen_ctx->temp_insns
#define temp_insns2 gen_ctx->temp_insns2
#define temp_bb_insns gen_ctx->temp_bb_insns
#define temp_bb_insns2 gen_ctx->temp_bb_insns2
#define loop_nodes gen_ctx->loop_nodes
#define queue_nodes gen_ctx->queue_nodes
#define loop_entries gen_ctx->loop_entries
#define max_int_hard_regs gen_ctx->max_int_hard_regs
#define max_fp_hard_regs gen_ctx->max_fp_hard_regs
#define func_stack_slots_num gen_ctx->func_stack_slots_num
#define target_succ_bb_versions gen_ctx->target_succ_bb_versions
#define succ_bb_addrs gen_ctx->succ_bb_addrs
#define bb_wrapper gen_ctx->bb_wrapper
#define spot_attrs gen_ctx->spot_attrs
#define spot2attr gen_ctx->spot2attr
#define LOOP_COST_FACTOR 5
typedef struct bb_version *bb_version_t;
struct func_or_bb {
/* full_p is used only when func_p and means generation machine code for full func */
char func_p, full_p;
union {
MIR_item_t func_item;
bb_version_t bb_version;
} u;
};
typedef struct func_or_bb func_or_bb_t;
DEF_VARR (func_or_bb_t);
static inline gen_ctx_t *gen_ctx_loc (MIR_context_t ctx) { return (gen_ctx_t *) ctx; }
DEF_VARR (int);
DEF_VARR (uint8_t);
DEF_VARR (uint64_t);
DEF_VARR (MIR_code_reloc_t);
#if defined(__x86_64__) || defined(_M_AMD64)
#include "mir-gen-x86_64.c"
#elif defined(__aarch64__)
#include "mir-gen-aarch64.c"
#elif defined(__PPC64__)
#include "mir-gen-ppc64.c"
#elif defined(__s390x__)
#include "mir-gen-s390x.c"
#elif defined(__riscv)
#if __riscv_xlen != 64 || __riscv_flen < 64 || !__riscv_float_abi_double || !__riscv_mul \
|| !__riscv_div || !__riscv_compressed
#error "only 64-bit RISCV supported (at least rv64imafd)"
#endif
#if __riscv_flen == 128
#error "RISCV 128-bit floats (Q set) is not supported"
#endif
#include "mir-gen-riscv64.c"
#else
#error "undefined or unsupported generation target"
#endif
typedef struct bb_stub *bb_stub_t;
DEF_DLIST_LINK (bb_version_t);
struct bb_version {
bb_stub_t bb_stub;
DLIST_LINK (bb_version_t) bb_version_link;
int call_p;
void *addr; /* bb code address or generator creating and returning address */
void *machine_code;
struct target_bb_version target_data; /* data container for the target code */
uint32_t n_attrs;
spot_attr_t attrs[1];
};
/* Definition of double list of bb_version_t type elements */
DEF_DLIST (bb_version_t, bb_version_link);
struct bb_stub {
DLIST (bb_version_t) bb_versions;
MIR_item_t func_item;
MIR_insn_t first_insn, last_insn;
};
static void MIR_NO_RETURN util_error (gen_ctx_t gen_ctx, const char *message) {
(*MIR_get_error_func (gen_ctx->ctx)) (MIR_alloc_error, message);
}
static MIR_alloc_t gen_alloc (gen_ctx_t gen_ctx) {
return MIR_get_alloc (gen_ctx->ctx);
}
static void *gen_malloc (gen_ctx_t gen_ctx, size_t size) {
MIR_alloc_t alloc = MIR_get_alloc (gen_ctx->ctx);
void *res = MIR_malloc (alloc, size);
if (res == NULL) util_error (gen_ctx, "no memory");
return res;
}
static void gen_free (gen_ctx_t gen_ctx, void *ptr) {
MIR_alloc_t alloc = gen_alloc (gen_ctx);
MIR_free (alloc, ptr);
}
static void *gen_malloc_and_mark_to_free (gen_ctx_t gen_ctx, size_t size) {
void *res = gen_malloc (gen_ctx, size);
VARR_PUSH (void_ptr_t, to_free, res);
return res;
}
#define DEFAULT_INIT_BITMAP_BITS_NUM 256
typedef struct bb *bb_t;
DEF_DLIST_LINK (bb_t);
typedef struct insn_data *insn_data_t;
DEF_DLIST_LINK (bb_insn_t);
typedef struct edge *edge_t;
typedef edge_t in_edge_t;
typedef edge_t out_edge_t;
DEF_DLIST_LINK (in_edge_t);
DEF_DLIST_LINK (out_edge_t);
struct edge {
bb_t src, dst;
DLIST_LINK (in_edge_t) in_link;
DLIST_LINK (out_edge_t) out_link;
unsigned char fall_through_p, back_edge_p, flag1, flag2;
};
DEF_DLIST (in_edge_t, in_link);
DEF_DLIST (out_edge_t, out_link);
struct insn_data { /* used only for calls/labels in -O0 mode */
bb_t bb;
union {
bitmap_t call_hard_reg_args; /* non-null for calls */
size_t label_disp; /* used for labels */
} u;
};
#define MAY_ALLOCA 0x1
#define MUST_ALLOCA 0x2
struct bb_insn {
MIR_insn_t insn;
unsigned char gvn_val_const_p; /* true for int value, false otherwise */
unsigned char alloca_flag; /* true for value may and/or must be from alloca */
uint32_t index, mem_index;
int64_t gvn_val; /* used for GVN, it is negative index for non GVN expr insns */
DLIST_LINK (bb_insn_t) bb_insn_link;
bb_t bb;
DLIST (dead_var_t) insn_dead_vars;
bitmap_t call_hard_reg_args; /* non-null for calls */
size_t label_disp; /* for label */
};
DEF_DLIST (bb_insn_t, bb_insn_link);
struct bb {
size_t index, pre, rpost, bfs; /* preorder, reverse post order, breadth first order */
DLIST_LINK (bb_t) bb_link;
DLIST (in_edge_t) in_edges;
/* The out edges order: optional fall through bb, optional label bb,
optional exit bb. There is always at least one edge. */
DLIST (out_edge_t) out_edges;
DLIST (bb_insn_t) bb_insns;
unsigned char call_p; /* used in mem avail calculation, true if there is a call in BB */
unsigned char flag; /* used in different calculation */
unsigned char reachable_p; /* reachable if its label is used as value */
bitmap_t in, out, gen, kill; /* var bitmaps for different data flow problems */
bitmap_t dom_in, dom_out; /* additional var bitmaps */
loop_node_t loop_node;
int max_int_pressure, max_fp_pressure;
};
DEF_DLIST (bb_t, bb_link);
DEF_DLIST_LINK (loop_node_t);
DEF_DLIST_TYPE (loop_node_t);
struct loop_node {
uint32_t index; /* if BB != NULL, it is index of BB */
bb_t bb; /* NULL for internal tree node */
loop_node_t entry;
loop_node_t parent;
union { /* used in LICM */
loop_node_t preheader; /* used for non-bb loop it is loop node of preheader bb */
loop_node_t preheader_loop; /* used for preheader bb it is the loop node */
} u;
DLIST (loop_node_t) children;
DLIST_LINK (loop_node_t) children_link;
int max_int_pressure, max_fp_pressure;
};
DEF_DLIST_CODE (loop_node_t, children_link);
DEF_DLIST_LINK (func_cfg_t);
struct reg_info {
long freq;
/* The following members are defined and used only in RA */
size_t live_length; /* # of program points where reg lives */
};
typedef struct reg_info reg_info_t;
DEF_VARR (reg_info_t);
typedef struct {
int uns_p;
union {
int64_t i;
uint64_t u;
} u;
} const_t;
struct func_cfg {
MIR_reg_t max_var;
uint32_t curr_bb_insn_index;
VARR (reg_info_t) * reg_info; /* regs */
bitmap_t call_crossed_regs;
DLIST (bb_t) bbs;
loop_node_t root_loop_node;
};
static void init_dead_vars (gen_ctx_t gen_ctx) { DLIST_INIT (dead_var_t, free_dead_vars); }
static void free_dead_var (gen_ctx_t gen_ctx, dead_var_t dv) {
DLIST_APPEND (dead_var_t, free_dead_vars, dv);
}
static dead_var_t get_dead_var (gen_ctx_t gen_ctx) {
dead_var_t dv;
if ((dv = DLIST_HEAD (dead_var_t, free_dead_vars)) == NULL)
return gen_malloc (gen_ctx, sizeof (struct dead_var));
DLIST_REMOVE (dead_var_t, free_dead_vars, dv);
return dv;
}
static void finish_dead_vars (gen_ctx_t gen_ctx) {
dead_var_t dv;
while ((dv = DLIST_HEAD (dead_var_t, free_dead_vars)) != NULL) {
DLIST_REMOVE (dead_var_t, free_dead_vars, dv);
gen_free (gen_ctx, dv);
}
}
static void add_bb_insn_dead_var (gen_ctx_t gen_ctx, bb_insn_t bb_insn, MIR_reg_t var) {
dead_var_t dv;
for (dv = DLIST_HEAD (dead_var_t, bb_insn->insn_dead_vars); dv != NULL;
dv = DLIST_NEXT (dead_var_t, dv))
if (dv->var == var) return;
dv = get_dead_var (gen_ctx);
dv->var = var;
DLIST_APPEND (dead_var_t, bb_insn->insn_dead_vars, dv);
}
static dead_var_t find_bb_insn_dead_var (bb_insn_t bb_insn, MIR_reg_t var) {
dead_var_t dv;
for (dv = DLIST_HEAD (dead_var_t, bb_insn->insn_dead_vars); dv != NULL;
dv = DLIST_NEXT (dead_var_t, dv))
if (dv->var == var) return dv;
return NULL;
}
static void clear_bb_insn_dead_vars (gen_ctx_t gen_ctx, bb_insn_t bb_insn) {
dead_var_t dv;
while ((dv = DLIST_HEAD (dead_var_t, bb_insn->insn_dead_vars)) != NULL) {
DLIST_REMOVE (dead_var_t, bb_insn->insn_dead_vars, dv);
free_dead_var (gen_ctx, dv);
}
}
static void remove_bb_insn_dead_var (gen_ctx_t gen_ctx, bb_insn_t bb_insn, MIR_reg_t var) {
dead_var_t dv, next_dv;
gen_assert (var != MIR_NON_VAR);
for (dv = DLIST_HEAD (dead_var_t, bb_insn->insn_dead_vars); dv != NULL; dv = next_dv) {
next_dv = DLIST_NEXT (dead_var_t, dv);
if (dv->var != var) continue;
DLIST_REMOVE (dead_var_t, bb_insn->insn_dead_vars, dv);
free_dead_var (gen_ctx, dv);
}
}
static void move_bb_insn_dead_vars (gen_ctx_t gen_ctx, bb_insn_t bb_insn, bb_insn_t from_bb_insn,
int (*filter_p) (gen_ctx_t, bb_insn_t, MIR_reg_t)) {
dead_var_t dv;
while ((dv = DLIST_HEAD (dead_var_t, from_bb_insn->insn_dead_vars)) != NULL) {
DLIST_REMOVE (dead_var_t, from_bb_insn->insn_dead_vars, dv);
if (filter_p (gen_ctx, bb_insn, dv->var)) {
DLIST_APPEND (dead_var_t, bb_insn->insn_dead_vars, dv);
} else {
free_dead_var (gen_ctx, dv);
}
}
}
static int insn_data_p (MIR_insn_t insn) {
return insn->code == MIR_LABEL || MIR_call_code_p (insn->code);
}
static void setup_insn_data (gen_ctx_t gen_ctx, MIR_insn_t insn, bb_t bb) {
insn_data_t insn_data;
if (!insn_data_p (insn)) {
insn->data = bb;
return;
}
insn_data = insn->data = gen_malloc (gen_ctx, sizeof (struct insn_data));
insn_data->bb = bb;
insn_data->u.call_hard_reg_args = NULL;
}
static bb_t get_insn_data_bb (MIR_insn_t insn) {
return insn_data_p (insn) ? ((insn_data_t) insn->data)->bb : (bb_t) insn->data;
}
static void delete_insn_data (gen_ctx_t gen_ctx, MIR_insn_t insn) {
insn_data_t insn_data = insn->data;
if (insn_data == NULL || !insn_data_p (insn)) return;
if (MIR_call_code_p (insn->code) && insn_data->u.call_hard_reg_args != NULL)
bitmap_destroy (insn_data->u.call_hard_reg_args);
gen_free (gen_ctx, insn_data);
}
static bb_insn_t create_bb_insn (gen_ctx_t gen_ctx, MIR_insn_t insn, bb_t bb) {
bb_insn_t bb_insn = gen_malloc (gen_ctx, sizeof (struct bb_insn));
MIR_alloc_t alloc = gen_alloc (gen_ctx);
insn->data = bb_insn;
bb_insn->bb = bb;
bb_insn->insn = insn;
bb_insn->gvn_val_const_p = FALSE;
bb_insn->alloca_flag = insn->code == MIR_ALLOCA ? MAY_ALLOCA | MUST_ALLOCA : 0;
bb_insn->call_hard_reg_args = NULL;
gen_assert (curr_cfg->curr_bb_insn_index != (uint32_t) ~0ull);
bb_insn->index = curr_cfg->curr_bb_insn_index++;
bb_insn->mem_index = 0;
bb_insn->gvn_val = bb_insn->index;
DLIST_INIT (dead_var_t, bb_insn->insn_dead_vars);
if (MIR_call_code_p (insn->code))
bb_insn->call_hard_reg_args = bitmap_create2 (alloc, MAX_HARD_REG + 1);
bb_insn->label_disp = 0;
return bb_insn;
}
static bb_insn_t add_new_bb_insn (gen_ctx_t gen_ctx, MIR_insn_t insn, bb_t bb, int append_p) {
bb_insn_t bb_insn = create_bb_insn (gen_ctx, insn, bb);
if (append_p)
DLIST_APPEND (bb_insn_t, bb->bb_insns, bb_insn);
else
DLIST_PREPEND (bb_insn_t, bb->bb_insns, bb_insn);
return bb_insn;
}
static void delete_bb_insn (gen_ctx_t gen_ctx, bb_insn_t bb_insn) {
DLIST_REMOVE (bb_insn_t, bb_insn->bb->bb_insns, bb_insn);
bb_insn->insn->data = NULL;
clear_bb_insn_dead_vars (gen_ctx, bb_insn);
if (bb_insn->call_hard_reg_args != NULL) bitmap_destroy (bb_insn->call_hard_reg_args);
gen_free (gen_ctx, bb_insn);
}
static bb_t get_insn_bb (gen_ctx_t gen_ctx, MIR_insn_t insn) {
return optimize_level == 0 ? get_insn_data_bb (insn) : ((bb_insn_t) insn->data)->bb;
}
static void create_new_bb_insns (gen_ctx_t gen_ctx, MIR_insn_t before, MIR_insn_t after,
MIR_insn_t insn_for_bb) {
MIR_insn_t insn;
bb_insn_t bb_insn, new_bb_insn;
bb_t bb;
/* Null insn_for_bb means it should be in the 1st block: skip entry and exit blocks: */
bb = insn_for_bb == NULL ? DLIST_EL (bb_t, curr_cfg->bbs, 2) : get_insn_bb (gen_ctx, insn_for_bb);
if (optimize_level == 0) {
for (insn = (before == NULL ? DLIST_HEAD (MIR_insn_t, curr_func_item->u.func->insns)
: DLIST_NEXT (MIR_insn_t, before));
insn != after; insn = DLIST_NEXT (MIR_insn_t, insn))
setup_insn_data (gen_ctx, insn, bb);
return;
}
if (before != NULL && (bb_insn = before->data)->bb == bb) {
for (insn = DLIST_NEXT (MIR_insn_t, before); insn != after;
insn = DLIST_NEXT (MIR_insn_t, insn), bb_insn = new_bb_insn) {
new_bb_insn = create_bb_insn (gen_ctx, insn, bb);
DLIST_INSERT_AFTER (bb_insn_t, bb->bb_insns, bb_insn, new_bb_insn);
}
} else {
gen_assert (after != NULL);
bb_insn = after->data;
insn = (before == NULL ? DLIST_HEAD (MIR_insn_t, curr_func_item->u.func->insns)
: DLIST_NEXT (MIR_insn_t, before));
for (; insn != after; insn = DLIST_NEXT (MIR_insn_t, insn)) {
new_bb_insn = create_bb_insn (gen_ctx, insn, bb);
if (bb == bb_insn->bb)
DLIST_INSERT_BEFORE (bb_insn_t, bb->bb_insns, bb_insn, new_bb_insn);
else
DLIST_APPEND (bb_insn_t, bb->bb_insns, new_bb_insn);
}
}
}
static void gen_delete_insn (gen_ctx_t gen_ctx, MIR_insn_t insn) {
if (optimize_level == 0)
delete_insn_data (gen_ctx, insn);
else
delete_bb_insn (gen_ctx, insn->data);
MIR_remove_insn (gen_ctx->ctx, curr_func_item, insn);
}
static void gen_add_insn_before (gen_ctx_t gen_ctx, MIR_insn_t before, MIR_insn_t insn) {
MIR_context_t ctx = gen_ctx->ctx;
MIR_insn_t insn_for_bb = before;
gen_assert (!MIR_any_branch_code_p (insn->code) && insn->code != MIR_LABEL);
if (before->code == MIR_LABEL) {
insn_for_bb = DLIST_PREV (MIR_insn_t, before);
gen_assert (insn_for_bb == NULL || !MIR_any_branch_code_p (insn_for_bb->code));
}
MIR_insert_insn_before (ctx, curr_func_item, before, insn);
create_new_bb_insns (gen_ctx, DLIST_PREV (MIR_insn_t, insn), before, insn_for_bb);
}
static void gen_add_insn_after (gen_ctx_t gen_ctx, MIR_insn_t after, MIR_insn_t insn) {
MIR_insn_t insn_for_bb = after;
gen_assert (insn->code != MIR_LABEL);
if (MIR_any_branch_code_p (insn_for_bb->code)) insn_for_bb = DLIST_NEXT (MIR_insn_t, insn_for_bb);
gen_assert (!MIR_any_branch_code_p (insn_for_bb->code));
MIR_insert_insn_after (gen_ctx->ctx, curr_func_item, after, insn);
create_new_bb_insns (gen_ctx, after, DLIST_NEXT (MIR_insn_t, insn), insn_for_bb);
}
static void gen_move_insn_before (gen_ctx_t gen_ctx, MIR_insn_t before, MIR_insn_t insn) {
bb_insn_t bb_insn = insn->data, before_bb_insn = before->data;
DLIST_REMOVE (MIR_insn_t, curr_func_item->u.func->insns, insn);
MIR_insert_insn_before (gen_ctx->ctx, curr_func_item, before, insn);
DLIST_REMOVE (bb_insn_t, bb_insn->bb->bb_insns, bb_insn);
DLIST_INSERT_BEFORE (bb_insn_t, before_bb_insn->bb->bb_insns, before_bb_insn, bb_insn);
bb_insn->bb = before_bb_insn->bb;
}
static void MIR_UNUSED setup_call_hard_reg_args (gen_ctx_t gen_ctx, MIR_insn_t call_insn,
MIR_reg_t hard_reg) {
MIR_alloc_t alloc = gen_alloc (gen_ctx);
insn_data_t insn_data;
gen_assert (MIR_call_code_p (call_insn->code) && hard_reg <= MAX_HARD_REG);
if (optimize_level != 0) {
bitmap_set_bit_p (((bb_insn_t) call_insn->data)->call_hard_reg_args, hard_reg);
return;
}
if ((insn_data = call_insn->data)->u.call_hard_reg_args == NULL)
insn_data->u.call_hard_reg_args = (void *) bitmap_create2 (alloc, MAX_HARD_REG + 1);
bitmap_set_bit_p (insn_data->u.call_hard_reg_args, hard_reg);
}
static int MIR_UNUSED gen_nested_loop_label_p (gen_ctx_t gen_ctx, MIR_insn_t insn) {
gen_assert (insn->code == MIR_LABEL);
if (optimize_level <= 1) return FALSE;
bb_t bb = get_insn_bb (gen_ctx, insn);
if (bb->loop_node == NULL) return FALSE;
loop_node_t node, parent = bb->loop_node->parent;
if (parent->entry == NULL || parent->entry->bb != bb) return FALSE;
for (node = DLIST_HEAD (loop_node_t, parent->children); node != NULL;
node = DLIST_NEXT (loop_node_t, node))
if (node->bb == NULL) return FALSE; /* subloop */
return TRUE;
}
static void set_label_disp (gen_ctx_t gen_ctx, MIR_insn_t insn, size_t disp) {
gen_assert (insn->code == MIR_LABEL);
if (optimize_level == 0)
((insn_data_t) insn->data)->u.label_disp = disp;
else
((bb_insn_t) insn->data)->label_disp = disp;
}
static size_t get_label_disp (gen_ctx_t gen_ctx, MIR_insn_t insn) {
gen_assert (insn->code == MIR_LABEL);
return (optimize_level == 0 ? ((insn_data_t) insn->data)->u.label_disp
: ((bb_insn_t) insn->data)->label_disp);
}
static uint64_t get_ref_value (gen_ctx_t gen_ctx, const MIR_op_t *ref_op) {
gen_assert (ref_op->mode == MIR_OP_REF);
if (ref_op->u.ref->item_type == MIR_data_item && ref_op->u.ref->u.data->name != NULL
&& _MIR_reserved_ref_name_p (gen_ctx->ctx, ref_op->u.ref->u.data->name))
return (uint64_t) ref_op->u.ref->u.data->u.els;
return (uint64_t) ref_op->u.ref->addr;
}
static void gen_setup_lrefs (gen_ctx_t gen_ctx, uint8_t *func_code) {
for (MIR_lref_data_t lref = curr_func_item->u.func->first_lref; lref != NULL;
lref = lref->next) { /* set up lrefs */
int64_t disp = (int64_t) get_label_disp (gen_ctx, lref->label) + lref->disp;
*(void **) lref->load_addr
= lref->label2 == NULL ? (void *) (func_code + disp)
: (void *) (disp - (int64_t) get_label_disp (gen_ctx, lref->label2));
}
}
static void setup_used_hard_regs (gen_ctx_t gen_ctx, MIR_type_t type, MIR_reg_t hard_reg) {
MIR_reg_t curr_hard_reg;
int i, slots_num = target_locs_num (hard_reg, type);
for (i = 0; i < slots_num; i++)
if ((curr_hard_reg = target_nth_loc (hard_reg, type, i)) <= MAX_HARD_REG)
bitmap_set_bit_p (func_used_hard_regs, curr_hard_reg);
}
static MIR_reg_t get_temp_hard_reg (MIR_type_t type, int first_p) {
if (type == MIR_T_F) return first_p ? TEMP_FLOAT_HARD_REG1 : TEMP_FLOAT_HARD_REG2;
if (type == MIR_T_D) return first_p ? TEMP_DOUBLE_HARD_REG1 : TEMP_DOUBLE_HARD_REG2;
if (type == MIR_T_LD) return first_p ? TEMP_LDOUBLE_HARD_REG1 : TEMP_LDOUBLE_HARD_REG2;
return first_p ? TEMP_INT_HARD_REG1 : TEMP_INT_HARD_REG2;
}
static bb_t create_bb (gen_ctx_t gen_ctx, MIR_insn_t insn) {
bb_t bb = gen_malloc (gen_ctx, sizeof (struct bb));
MIR_alloc_t alloc = gen_alloc (gen_ctx);
bb->pre = bb->rpost = bb->bfs = 0;
bb->loop_node = NULL;
DLIST_INIT (bb_insn_t, bb->bb_insns);
DLIST_INIT (in_edge_t, bb->in_edges);
DLIST_INIT (out_edge_t, bb->out_edges);
bb->call_p = bb->flag = bb->reachable_p = FALSE;
bb->in = bitmap_create2 (alloc, DEFAULT_INIT_BITMAP_BITS_NUM);
bb->out = bitmap_create2 (alloc, DEFAULT_INIT_BITMAP_BITS_NUM);
bb->gen = bitmap_create2 (alloc, DEFAULT_INIT_BITMAP_BITS_NUM);
bb->kill = bitmap_create2 (alloc, DEFAULT_INIT_BITMAP_BITS_NUM);
bb->dom_in = bitmap_create2 (alloc, DEFAULT_INIT_BITMAP_BITS_NUM);
bb->dom_out = bitmap_create2 (alloc, DEFAULT_INIT_BITMAP_BITS_NUM);
bb->max_int_pressure = bb->max_fp_pressure = 0;
if (insn != NULL) {
if (optimize_level == 0)
setup_insn_data (gen_ctx, insn, bb);
else
add_new_bb_insn (gen_ctx, insn, bb, TRUE);
}
return bb;
}
static void add_new_bb (gen_ctx_t gen_ctx, bb_t bb) {
DLIST_APPEND (bb_t, curr_cfg->bbs, bb);
bb->index = curr_bb_index++;
}
static void insert_new_bb_after (gen_ctx_t gen_ctx, bb_t after, bb_t bb) {
DLIST_INSERT_AFTER (bb_t, curr_cfg->bbs, after, bb);
bb->index = curr_bb_index++;
}
static void insert_new_bb_before (gen_ctx_t gen_ctx, bb_t before, bb_t bb) {
DLIST_INSERT_BEFORE (bb_t, curr_cfg->bbs, before, bb);
bb->index = curr_bb_index++;
}
static edge_t create_edge (gen_ctx_t gen_ctx, bb_t src, bb_t dst, int fall_through_p,
int append_p) {
edge_t e = gen_malloc (gen_ctx, sizeof (struct edge));
e->src = src;
e->dst = dst;
if (append_p) {
DLIST_APPEND (in_edge_t, dst->in_edges, e);
DLIST_APPEND (out_edge_t, src->out_edges, e);
} else {
DLIST_PREPEND (in_edge_t, dst->in_edges, e);
DLIST_PREPEND (out_edge_t, src->out_edges, e);
}
e->fall_through_p = fall_through_p;
e->back_edge_p = e->flag1 = e->flag2 = FALSE;
return e;
}
static void delete_edge (gen_ctx_t gen_ctx, edge_t e) {
DLIST_REMOVE (out_edge_t, e->src->out_edges, e);
DLIST_REMOVE (in_edge_t, e->dst->in_edges, e);
gen_free (gen_ctx, e);
}
static edge_t find_edge (bb_t src, bb_t dst) {
for (edge_t e = DLIST_HEAD (out_edge_t, src->out_edges); e != NULL;
e = DLIST_NEXT (out_edge_t, e))
if (e->dst == dst) return e;
return NULL;
}
static void delete_bb (gen_ctx_t gen_ctx, bb_t bb) {
MIR_alloc_t alloc = gen_alloc (gen_ctx);
edge_t e, next_e;
for (e = DLIST_HEAD (out_edge_t, bb->out_edges); e != NULL; e = next_e) {
next_e = DLIST_NEXT (out_edge_t, e);
delete_edge (gen_ctx, e);
}
for (e = DLIST_HEAD (in_edge_t, bb->in_edges); e != NULL; e = next_e) {
next_e = DLIST_NEXT (in_edge_t, e);
delete_edge (gen_ctx, e);
}
if (bb->loop_node != NULL) {
if (bb->loop_node->parent->entry == bb->loop_node) bb->loop_node->parent->entry = NULL;
DLIST_REMOVE (loop_node_t, bb->loop_node->parent->children, bb->loop_node);
if (bb->loop_node->u.preheader_loop != NULL)
bb->loop_node->u.preheader_loop->u.preheader = NULL;
gen_free (gen_ctx, bb->loop_node);
}
DLIST_REMOVE (bb_t, curr_cfg->bbs, bb);
bitmap_destroy (bb->in);
bitmap_destroy (bb->out);
bitmap_destroy (bb->gen);
bitmap_destroy (bb->kill);
bitmap_destroy (bb->dom_in);
bitmap_destroy (bb->dom_out);
gen_free (gen_ctx, bb);
}
static void print_bb_insn (gen_ctx_t gen_ctx, bb_insn_t bb_insn, int with_notes_p);
/* Return BB to put insns from edge E. If necessary, split edge by creating new bb, bb enumeration
and new bb bitmaps can be invalid after that. Loop info is undefined for the new bb. */
static bb_t split_edge_if_necessary (gen_ctx_t gen_ctx, edge_t e) {
MIR_context_t ctx = gen_ctx->ctx;
size_t i;
bb_t new_bb, src = e->src, dst = e->dst;
edge_t e2;
bb_insn_t last_bb_insn = DLIST_TAIL (bb_insn_t, src->bb_insns);
bb_insn_t first_bb_insn = DLIST_HEAD (bb_insn_t, dst->bb_insns);
MIR_insn_t insn, tail_insn, last_insn = last_bb_insn->insn, first_insn = first_bb_insn->insn;
DEBUG (4, {
fprintf (debug_file, " Splitting bb%lu->bb%lu:\n", (unsigned long) src->index,
(unsigned long) dst->index);
});
if (DLIST_HEAD (out_edge_t, src->out_edges) == DLIST_TAIL (out_edge_t, src->out_edges)
|| e->fall_through_p) { /* fall through or src with one dest */
if (e->fall_through_p) {
insn = MIR_new_insn_arr (ctx, MIR_USE, 0, NULL); /* just nop */
MIR_insert_insn_after (ctx, curr_func_item, last_insn, insn);
} else if (DLIST_HEAD (in_edge_t, src->in_edges) == DLIST_TAIL (in_edge_t, src->in_edges)) {
return src;
} else { /* jump with one dest only: move jmp to new fall-though block */
gen_assert (last_insn->code == MIR_JMP || last_insn->code == MIR_RET
|| last_insn->code == MIR_JRET);
delete_bb_insn (gen_ctx, last_bb_insn);
insn = last_insn;
}
new_bb = create_bb (gen_ctx, insn);
insert_new_bb_after (gen_ctx, src, new_bb);
DLIST_REMOVE (in_edge_t, dst->in_edges, e);
e->dst = new_bb;
DLIST_APPEND (in_edge_t, new_bb->in_edges, e);
create_edge (gen_ctx, new_bb, dst, e->fall_through_p, TRUE);
e->fall_through_p = TRUE;
DEBUG (4, {
fprintf (debug_file, " creating fall through bb%lu after bb%lu, redirect the edge to it",
(unsigned long) new_bb->index, (unsigned long) src->index);
fprintf (debug_file, ", and create edge bb%lu->bb%lu:\n", (unsigned long) new_bb->index,
(unsigned long) dst->index);
fprintf (debug_file, " new bb insn is ");
print_bb_insn (gen_ctx, (bb_insn_t) insn->data, FALSE);
});
} else if (DLIST_HEAD (in_edge_t, dst->in_edges) == DLIST_TAIL (in_edge_t, dst->in_edges)) {
gen_assert (first_insn->code == MIR_LABEL);
if (first_bb_insn == DLIST_TAIL (bb_insn_t, dst->bb_insns)) return dst;
/* non-fall through dest with one source only: move dest label to new block */
delete_bb_insn (gen_ctx, first_bb_insn);
new_bb = create_bb (gen_ctx, first_insn);
insert_new_bb_before (gen_ctx, dst, new_bb);
DLIST_REMOVE (in_edge_t, dst->in_edges, e);
e->dst = new_bb;
DLIST_APPEND (in_edge_t, new_bb->in_edges, e);
create_edge (gen_ctx, new_bb, dst, TRUE, TRUE);
DEBUG (4, {
fprintf (debug_file, " creating bb%lu before bb%lu, redirect the edge to it",
(unsigned long) new_bb->index, (unsigned long) dst->index);
fprintf (debug_file, ", and create fall-through edge bb%lu->bb%lu:\n",
(unsigned long) new_bb->index, (unsigned long) dst->index);
fprintf (debug_file, " new bb insn is ");
print_bb_insn (gen_ctx, (bb_insn_t) first_insn->data, FALSE);
});
} else { /* critical non-fall through edge: */
gen_assert (first_insn->code == MIR_LABEL);
for (e2 = DLIST_HEAD (in_edge_t, dst->in_edges); e2 != NULL; e2 = DLIST_NEXT (in_edge_t, e2)) {
if (e2->fall_through_p) break;
gen_assert (DLIST_TAIL (bb_insn_t, e2->src->bb_insns) != NULL
&& MIR_any_branch_code_p (DLIST_TAIL (bb_insn_t, e2->src->bb_insns)->insn->code));
}
if (e2 != NULL) { /* make fall through edge to dst a jump edge */
gen_assert (e2->dst == dst);
insn = MIR_new_insn (ctx, MIR_JMP, MIR_new_label_op (ctx, first_insn));
tail_insn = DLIST_TAIL (bb_insn_t, e2->src->bb_insns)->insn;
if (DLIST_NEXT (out_edge_t, e2) == NULL && DLIST_PREV (out_edge_t, e2) == NULL) {
/* e2->src with the only output edge: just put jump at the end of e2->src */
gen_assert (!MIR_any_branch_code_p (tail_insn->code));
gen_add_insn_after (gen_ctx, tail_insn, insn);
e2->fall_through_p = FALSE;
DEBUG (4, {
fprintf (debug_file,
" Make edge bb%lu->bb%lu a non-fall through, add new insn at the of bb%lu ",
(unsigned long) e2->src->index, (unsigned long) e2->dst->index,
(unsigned long) e2->src->index);
print_bb_insn (gen_ctx, (bb_insn_t) insn->data, FALSE);
});
} else {
MIR_insert_insn_after (ctx, curr_func_item, tail_insn, insn);
new_bb = create_bb (gen_ctx, insn);
insert_new_bb_after (gen_ctx, e2->src, new_bb);
DLIST_REMOVE (in_edge_t, e2->dst->in_edges, e2);
e2->dst = new_bb;
DLIST_APPEND (in_edge_t, new_bb->in_edges, e2);