!33 revert patches to fix build errors

From: @hht8
Reviewed-by: @jdkboy
Signed-off-by: @jdkboy
This commit is contained in:
openeuler-ci-bot 2020-09-15 09:26:18 +08:00 committed by Gitee
commit 5e17aba22c
6 changed files with 6 additions and 632 deletions

View File

@ -1,148 +0,0 @@
diff -uprN a/gcc/lra-constraints.c b/gcc/lra-constraints.c
--- a/gcc/lra-constraints.c 2020-03-12 19:07:21.000000000 +0800
+++ b/gcc/lra-constraints.c 2020-09-08 10:02:52.308147305 +0800
@@ -235,12 +235,17 @@ get_reg_class (int regno)
CL. Use elimination first if REG is a hard register. If REG is a
reload pseudo created by this constraints pass, assume that it will
be allocated a hard register from its allocno class, but allow that
- class to be narrowed to CL if it is currently a superset of CL.
+ class to be narrowed to CL if it is currently a superset of CL and
+ if either:
+
+ - ALLOW_ALL_RELOAD_CLASS_CHANGES_P is true or
+ - the instruction we're processing is not a reload move.
If NEW_CLASS is nonnull, set *NEW_CLASS to the new allocno class of
REGNO (reg), or NO_REGS if no change in its class was needed. */
static bool
-in_class_p (rtx reg, enum reg_class cl, enum reg_class *new_class)
+in_class_p (rtx reg, enum reg_class cl, enum reg_class *new_class,
+ bool allow_all_reload_class_changes_p = false)
{
enum reg_class rclass, common_class;
machine_mode reg_mode;
@@ -265,7 +270,8 @@ in_class_p (rtx reg, enum reg_class cl,
typically moves that have many alternatives, and restricting
reload pseudos for one alternative may lead to situations
where other reload pseudos are no longer allocatable. */
- || (INSN_UID (curr_insn) >= new_insn_uid_start
+ || (!allow_all_reload_class_changes_p
+ && INSN_UID (curr_insn) >= new_insn_uid_start
&& curr_insn_set != NULL
&& ((OBJECT_P (SET_SRC (curr_insn_set))
&& ! CONSTANT_P (SET_SRC (curr_insn_set)))
@@ -557,13 +563,12 @@ init_curr_insn_input_reloads (void)
curr_insn_input_reloads_num = 0;
}
-/* Create a new pseudo using MODE, RCLASS, ORIGINAL or reuse already
- created input reload pseudo (only if TYPE is not OP_OUT). Don't
- reuse pseudo if IN_SUBREG_P is true and the reused pseudo should be
- wrapped up in SUBREG. The result pseudo is returned through
- RESULT_REG. Return TRUE if we created a new pseudo, FALSE if we
- reused the already created input reload pseudo. Use TITLE to
- describe new registers for debug purposes. */
+/* Create a new pseudo using MODE, RCLASS, ORIGINAL or reuse an existing
+ reload pseudo. Don't reuse an existing reload pseudo if IN_SUBREG_P
+ is true and the reused pseudo should be wrapped up in a SUBREG.
+ The result pseudo is returned through RESULT_REG. Return TRUE if we
+ created a new pseudo, FALSE if we reused an existing reload pseudo.
+ Use TITLE to describe new registers for debug purposes. */
static bool
get_reload_reg (enum op_type type, machine_mode mode, rtx original,
enum reg_class rclass, bool in_subreg_p,
@@ -575,6 +580,35 @@ get_reload_reg (enum op_type type, machi
if (type == OP_OUT)
{
+ /* Output reload registers tend to start out with a conservative
+ choice of register class. Usually this is ALL_REGS, although
+ a target might narrow it (for performance reasons) through
+ targetm.preferred_reload_class. It's therefore quite common
+ for a reload instruction to require a more restrictive class
+ than the class that was originally assigned to the reload register.
+
+ In these situations, it's more efficient to refine the choice
+ of register class rather than create a second reload register.
+ This also helps to avoid cycling for registers that are only
+ used by reload instructions. */
+ if (REG_P (original)
+ && (int) REGNO (original) >= new_regno_start
+ && INSN_UID (curr_insn) >= new_insn_uid_start
+ && in_class_p (original, rclass, &new_class, true))
+ {
+ unsigned int regno = REGNO (original);
+ if (lra_dump_file != NULL)
+ {
+ fprintf (lra_dump_file, " Reuse r%d for output ", regno);
+ dump_value_slim (lra_dump_file, original, 1);
+ }
+ if (new_class != lra_get_allocno_class (regno))
+ lra_change_class (regno, new_class, ", change to", false);
+ if (lra_dump_file != NULL)
+ fprintf (lra_dump_file, "\n");
+ *result_reg = original;
+ return false;
+ }
*result_reg
= lra_create_new_reg_with_unique_value (mode, original, rclass, title);
return true;
diff -uprN a/gcc/testsuite/gcc.c-torture/compile/pr96796.c b/gcc/testsuite/gcc.c-torture/compile/pr96796.c
--- a/gcc/testsuite/gcc.c-torture/compile/pr96796.c 1970-01-01 08:00:00.000000000 +0800
+++ b/gcc/testsuite/gcc.c-torture/compile/pr96796.c 2020-09-08 09:59:40.077774393 +0800
@@ -0,0 +1,55 @@
+/* { dg-additional-options "-fcommon" } */
+
+struct S0 {
+ signed f0 : 8;
+ unsigned f1;
+ unsigned f4;
+};
+struct S1 {
+ long f3;
+ char f4;
+} g_3_4;
+
+int g_5, func_1_l_32, func_50___trans_tmp_31;
+static struct S0 g_144, g_834, g_1255, g_1261;
+
+int g_273[120] = {};
+int *g_555;
+char **g_979;
+static int g_1092_0;
+static int g_1193;
+int safe_mul_func_int16_t_s_s(int si1, int si2) { return si1 * si2; }
+static struct S0 *func_50();
+int func_1() { func_50(g_3_4, g_5, func_1_l_32, 8, 3); }
+void safe_div_func_int64_t_s_s(int *);
+void safe_mod_func_uint32_t_u_u(struct S0);
+struct S0 *func_50(int p_51, struct S0 p_52, struct S1 p_53, int p_54,
+ int p_55) {
+ int __trans_tmp_30;
+ char __trans_tmp_22;
+ short __trans_tmp_19;
+ long l_985_1;
+ long l_1191[8];
+ safe_div_func_int64_t_s_s(g_273);
+ __builtin_printf((char*)g_1261.f4);
+ safe_mod_func_uint32_t_u_u(g_834);
+ g_144.f0 += 1;
+ for (;;) {
+ struct S1 l_1350 = {&l_1350};
+ for (; p_53.f3; p_53.f3 -= 1)
+ for (; g_1193 <= 2; g_1193 += 1) {
+ __trans_tmp_19 = safe_mul_func_int16_t_s_s(l_1191[l_985_1 + p_53.f3],
+ p_55 % (**g_979 = 10));
+ __trans_tmp_22 = g_1255.f1 * p_53.f4;
+ __trans_tmp_30 = __trans_tmp_19 + __trans_tmp_22;
+ if (__trans_tmp_30)
+ g_1261.f0 = p_51;
+ else {
+ g_1255.f0 = p_53.f3;
+ int *l_1422 = g_834.f0 = g_144.f4 != (*l_1422)++ > 0 < 0 ^ 51;
+ g_555 = ~0;
+ g_1092_0 |= func_50___trans_tmp_31;
+ }
+ }
+ }
+}

View File

@ -1,207 +0,0 @@
This backport contains 2 patchs from gcc main stream tree.
The commit id of these patchs list as following in the order of time.
0001-AArch64-Improve-SVE-constant-moves.patch
4aeb1ba7f62c1d680c819ae3e137c3bad6f520ca
0002-aarch64-Add-vector-vector-vec_extract-patterns-PR928.patch
c15893df6eafc32efd6184379dd7f02c36da7d12
diff -Nurp a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
--- a/gcc/config/aarch64/aarch64.c 2020-09-03 19:50:00.484000000 +0800
+++ b/gcc/config/aarch64/aarch64.c 2020-09-03 19:50:19.336943210 +0800
@@ -3632,7 +3632,7 @@ aarch64_maybe_expand_sve_subreg_move (rt
attributes. Unlike gen_lowpart, this doesn't care whether the
mode change is valid. */
-static rtx
+rtx
aarch64_replace_reg_mode (rtx x, machine_mode mode)
{
if (GET_MODE (x) == mode)
@@ -15016,6 +15016,36 @@ aarch64_simd_check_vect_par_cnst_half (r
return true;
}
+/* Return a PARALLEL containing NELTS elements, with element I equal
+ to BASE + I * STEP. */
+
+rtx
+aarch64_gen_stepped_int_parallel (unsigned int nelts, int base, int step)
+{
+ rtvec vec = rtvec_alloc (nelts);
+ for (unsigned int i = 0; i < nelts; ++i)
+ RTVEC_ELT (vec, i) = gen_int_mode (base + i * step, DImode);
+ return gen_rtx_PARALLEL (VOIDmode, vec);
+}
+
+/* Return true if OP is a PARALLEL of CONST_INTs that form a linear
+ series with step STEP. */
+
+bool
+aarch64_stepped_int_parallel_p (rtx op, int step)
+{
+ if (GET_CODE (op) != PARALLEL || !CONST_INT_P (XVECEXP (op, 0, 0)))
+ return false;
+
+ unsigned HOST_WIDE_INT base = UINTVAL (XVECEXP (op, 0, 0));
+ for (int i = 1; i < XVECLEN (op, 0); ++i)
+ if (!CONST_INT_P (XVECEXP (op, 0, i))
+ || UINTVAL (XVECEXP (op, 0, i)) != base + i * step)
+ return false;
+
+ return true;
+}
+
/* Bounds-check lanes. Ensure OPERAND lies between LOW (inclusive) and
HIGH (exclusive). */
void
diff -Nurp a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h
--- a/gcc/config/aarch64/aarch64-protos.h 2020-09-03 19:50:00.484000000 +0800
+++ b/gcc/config/aarch64/aarch64-protos.h 2020-09-03 19:50:29.137683100 +0800
@@ -501,6 +501,8 @@ bool aarch64_sve_ld1r_operand_p (rtx);
bool aarch64_sve_ldr_operand_p (rtx);
bool aarch64_sve_struct_memory_operand_p (rtx);
rtx aarch64_simd_vect_par_cnst_half (machine_mode, int, bool);
+rtx aarch64_gen_stepped_int_parallel (unsigned int, int, int);
+bool aarch64_stepped_int_parallel_p (rtx, int);
rtx aarch64_tls_get_addr (void);
tree aarch64_fold_builtin (tree, int, tree *, bool);
unsigned aarch64_dbx_register_number (unsigned);
@@ -516,6 +518,7 @@ void aarch64_expand_mov_immediate (rtx,
void aarch64_emit_sve_pred_move (rtx, rtx, rtx);
void aarch64_expand_sve_mem_move (rtx, rtx, machine_mode);
bool aarch64_maybe_expand_sve_subreg_move (rtx, rtx);
+rtx aarch64_replace_reg_mode (rtx, machine_mode);
void aarch64_split_sve_subreg_move (rtx, rtx, rtx);
void aarch64_expand_prologue (void);
void aarch64_expand_vector_init (rtx, rtx);
diff -Nurp a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
--- a/gcc/config/aarch64/aarch64-simd.md 2020-09-03 19:50:00.484000000 +0800
+++ b/gcc/config/aarch64/aarch64-simd.md 2020-09-03 19:50:44.100673150 +0800
@@ -282,37 +282,51 @@
rtx dst_high_part = gen_highpart (<VHALF>mode, dst);
rtx lo = aarch64_simd_vect_par_cnst_half (<MODE>mode, <nunits>, false);
rtx hi = aarch64_simd_vect_par_cnst_half (<MODE>mode, <nunits>, true);
-
- emit_insn
- (gen_aarch64_simd_mov_from_<mode>low (dst_low_part, src, lo));
- emit_insn
- (gen_aarch64_simd_mov_from_<mode>high (dst_high_part, src, hi));
+ emit_insn (gen_aarch64_get_half<mode> (dst_low_part, src, lo));
+ emit_insn (gen_aarch64_get_half<mode> (dst_high_part, src, hi));
}
DONE;
}
)
-(define_insn "aarch64_simd_mov_from_<mode>low"
- [(set (match_operand:<VHALF> 0 "register_operand" "=r")
+(define_expand "aarch64_get_half<mode>"
+ [(set (match_operand:<VHALF> 0 "register_operand")
(vec_select:<VHALF>
- (match_operand:VQ 1 "register_operand" "w")
- (match_operand:VQ 2 "vect_par_cnst_lo_half" "")))]
- "TARGET_SIMD && reload_completed"
- "umov\t%0, %1.d[0]"
- [(set_attr "type" "neon_to_gp<q>")
- (set_attr "length" "4")
- ])
+ (match_operand:VQ 1 "register_operand")
+ (match_operand 2 "ascending_int_parallel")))]
+ "TARGET_SIMD"
+)
+
+(define_insn_and_split "aarch64_simd_mov_from_<mode>low"
+ [(set (match_operand:<VHALF> 0 "register_operand" "=w,?r")
+ (vec_select:<VHALF>
+ (match_operand:VQ_NO2E 1 "register_operand" "w,w")
+ (match_operand:VQ_NO2E 2 "vect_par_cnst_lo_half" "")))]
+ "TARGET_SIMD"
+ "@
+ #
+ umov\t%0, %1.d[0]"
+ "&& reload_completed && aarch64_simd_register (operands[0], <VHALF>mode)"
+ [(set (match_dup 0) (match_dup 1))]
+ {
+ operands[1] = aarch64_replace_reg_mode (operands[1], <VHALF>mode);
+ }
+ [(set_attr "type" "mov_reg,neon_to_gp<q>")
+ (set_attr "length" "4")]
+)
(define_insn "aarch64_simd_mov_from_<mode>high"
- [(set (match_operand:<VHALF> 0 "register_operand" "=r")
+ [(set (match_operand:<VHALF> 0 "register_operand" "=w,?r")
(vec_select:<VHALF>
- (match_operand:VQ 1 "register_operand" "w")
- (match_operand:VQ 2 "vect_par_cnst_hi_half" "")))]
- "TARGET_SIMD && reload_completed"
- "umov\t%0, %1.d[1]"
- [(set_attr "type" "neon_to_gp<q>")
- (set_attr "length" "4")
- ])
+ (match_operand:VQ_NO2E 1 "register_operand" "w,w")
+ (match_operand:VQ_NO2E 2 "vect_par_cnst_hi_half" "")))]
+ "TARGET_SIMD"
+ "@
+ dup\\t%d0, %1.d[1]
+ umov\t%0, %1.d[1]"
+ [(set_attr "type" "neon_dup<q>,neon_to_gp<q>")
+ (set_attr "length" "4")]
+)
(define_insn "orn<mode>3"
[(set (match_operand:VDQ_I 0 "register_operand" "=w")
@@ -6016,6 +6030,35 @@
DONE;
})
+;; Extract a 64-bit vector from one half of a 128-bit vector.
+(define_expand "vec_extract<mode><Vhalf>"
+ [(match_operand:<VHALF> 0 "register_operand")
+ (match_operand:VQ_NO2E 1 "register_operand")
+ (match_operand 2 "immediate_operand")]
+ "TARGET_SIMD"
+{
+ int start = INTVAL (operands[2]);
+ if (start != 0 && start != <nunits> / 2)
+ FAIL;
+ rtx sel = aarch64_gen_stepped_int_parallel (<nunits> / 2, start, 1);
+ emit_insn (gen_aarch64_get_half<mode> (operands[0], operands[1], sel));
+ DONE;
+})
+
+;; Extract a single-element 64-bit vector from one half of a 128-bit vector.
+(define_expand "vec_extractv2dfv1df"
+ [(match_operand:V1DF 0 "register_operand")
+ (match_operand:V2DF 1 "register_operand")
+ (match_operand 2 "immediate_operand")]
+ "TARGET_SIMD"
+{
+ /* V1DF is rarely used by other patterns, so it should be better to hide
+ it in a subreg destination of a normal DF op. */
+ rtx scalar0 = gen_lowpart (DFmode, operands[0]);
+ emit_insn (gen_vec_extractv2dfdf (scalar0, operands[1], operands[2]));
+ DONE;
+})
+
;; aes
(define_insn "aarch64_crypto_aes<aes_op>v16qi"
diff -Nurp a/gcc/config/aarch64/predicates.md b/gcc/config/aarch64/predicates.md
--- a/gcc/config/aarch64/predicates.md 2020-09-03 19:50:00.484000000 +0800
+++ b/gcc/config/aarch64/predicates.md 2020-09-03 19:50:49.315344350 +0800
@@ -438,6 +438,12 @@
return aarch64_simd_check_vect_par_cnst_half (op, mode, false);
})
+(define_predicate "ascending_int_parallel"
+ (match_code "parallel")
+{
+ return aarch64_stepped_int_parallel_p (op, 1);
+})
+
(define_special_predicate "aarch64_simd_lshift_imm"
(match_code "const,const_vector")
{

View File

@ -1,88 +0,0 @@
This backport contains 1 patch from gcc main stream tree.
The commit id of these patchs list as following in the order of time.
0001-arm-aarch64-Handle-no_insn-in-TARGET_SCHED_VARIABLE_.patch
d0bc0cb66bcb0e6a5a5a31a9e900e8ccc98e34e5
diff -Nurp a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
--- a/gcc/config/aarch64/aarch64.c 2020-09-03 15:54:20.136000000 +0800
+++ b/gcc/config/aarch64/aarch64.c 2020-09-03 15:55:22.736000000 +0800
@@ -11044,6 +11044,23 @@ aarch64_sched_issue_rate (void)
return aarch64_tune_params.issue_rate;
}
+/* Implement TARGET_SCHED_VARIABLE_ISSUE. */
+static int
+aarch64_sched_variable_issue (FILE *, int, rtx_insn *insn, int more)
+{
+ if (DEBUG_INSN_P (insn))
+ return more;
+
+ rtx_code code = GET_CODE (PATTERN (insn));
+ if (code == USE || code == CLOBBER)
+ return more;
+
+ if (get_attr_type (insn) == TYPE_NO_INSN)
+ return more;
+
+ return more - 1;
+}
+
static int
aarch64_sched_first_cycle_multipass_dfa_lookahead (void)
{
@@ -19428,6 +19445,9 @@ aarch64_libgcc_floating_mode_supported_p
#undef TARGET_SCHED_ISSUE_RATE
#define TARGET_SCHED_ISSUE_RATE aarch64_sched_issue_rate
+#undef TARGET_SCHED_VARIABLE_ISSUE
+#define TARGET_SCHED_VARIABLE_ISSUE aarch64_sched_variable_issue
+
#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
aarch64_sched_first_cycle_multipass_dfa_lookahead
diff -Nurp a/gcc/config/arm/arm.c b/gcc/config/arm/arm.c
--- a/gcc/config/arm/arm.c 2020-09-03 15:54:20.100000000 +0800
+++ b/gcc/config/arm/arm.c 2020-09-03 15:55:22.740000000 +0800
@@ -258,6 +258,7 @@ static bool arm_sched_can_speculate_insn
static bool arm_macro_fusion_p (void);
static bool arm_cannot_copy_insn_p (rtx_insn *);
static int arm_issue_rate (void);
+static int arm_sched_variable_issue (FILE *, int, rtx_insn *, int);
static int arm_first_cycle_multipass_dfa_lookahead (void);
static int arm_first_cycle_multipass_dfa_lookahead_guard (rtx_insn *, int);
static void arm_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
@@ -666,6 +667,9 @@ static const struct attribute_spec arm_a
#undef TARGET_SCHED_ISSUE_RATE
#define TARGET_SCHED_ISSUE_RATE arm_issue_rate
+#undef TARGET_SCHED_VARIABLE_ISSUE
+#define TARGET_SCHED_VARIABLE_ISSUE arm_sched_variable_issue
+
#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
arm_first_cycle_multipass_dfa_lookahead
@@ -28316,6 +28320,23 @@ arm_issue_rate (void)
return current_tune->issue_rate;
}
+/* Implement TARGET_SCHED_VARIABLE_ISSUE. */
+static int
+arm_sched_variable_issue (FILE *, int, rtx_insn *insn, int more)
+{
+ if (DEBUG_INSN_P (insn))
+ return more;
+
+ rtx_code code = GET_CODE (PATTERN (insn));
+ if (code == USE || code == CLOBBER)
+ return more;
+
+ if (get_attr_type (insn) == TYPE_NO_INSN)
+ return more;
+
+ return more - 1;
+}
+
/* Return how many instructions should scheduler lookahead to choose the
best one. */
static int

View File

@ -1,78 +0,0 @@
This backport contains 1 patch from gcc main stream tree.
The commit id of these patchs list as following in the order of time.
0001-re-PR-tree-optimization-92537-ICE-in-vect_slp_analyz.patch
2439d584d5def75d705f33218bb3b97fca4c11a1
diff -Nurp a/gcc/testsuite/gfortran.dg/pr92537.f90 b/gcc/testsuite/gfortran.dg/pr92537.f90
--- a/gcc/testsuite/gfortran.dg/pr92537.f90 1970-01-01 08:00:00.000000000 +0800
+++ b/gcc/testsuite/gfortran.dg/pr92537.f90 2020-09-03 16:53:43.024000000 +0800
@@ -0,0 +1,32 @@
+! { dg-do compile }
+! { dg-options "-O2 -ftree-vectorize -fno-inline" }
+! { dg-additional-options "-march=skylake" { target x86_64-*-* i?86-*-* } }
+MODULE pr93527
+ implicit none
+ integer, parameter :: wp = kind (1.d0)
+ interface p_min
+ module procedure p_min_wp
+ end interface
+contains
+ subroutine foo (pr)
+ real(wp), pointer :: pr(:)
+ integer :: nzd
+ real(wp) :: pmin
+ real(wp) :: pmin_diag
+ integer :: i
+ nzd = 15
+ allocate (pr(nzd))
+ pmin_diag = 4000._wp
+ pmin = p_min(pmin_diag)
+ pmin = min (pmin,pmin_diag)
+ pr(1) = log(pmin)
+ do i=1,nzd-1
+ pr(i+1) = log(pmin) + i
+ end do
+ end subroutine foo
+ function p_min_wp (x) result (p_min)
+ real(wp), intent(in) :: x
+ real(wp) :: p_min
+ p_min = x
+ end function p_min_wp
+end module pr93527
diff -Nurp a/gcc/tree-vect-slp.c b/gcc/tree-vect-slp.c
--- a/gcc/tree-vect-slp.c 2020-09-03 16:53:22.668000000 +0800
+++ b/gcc/tree-vect-slp.c 2020-09-03 16:53:43.024000000 +0800
@@ -2176,18 +2176,6 @@ vect_analyze_slp_instance (vec_info *vin
matches[group_size / const_max_nunits * const_max_nunits] = false;
vect_free_slp_tree (node, false);
}
- else if (constructor
- && SLP_TREE_DEF_TYPE (node) != vect_internal_def)
- {
- /* CONSTRUCTOR vectorization relies on a vector stmt being
- generated, that doesn't work for fully external ones. */
- if (dump_enabled_p ())
- dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
- "Build SLP failed: CONSTRUCTOR of external "
- "or constant elements\n");
- vect_free_slp_tree (node, false);
- return false;
- }
else
{
/* Create a new SLP instance. */
@@ -2872,7 +2860,12 @@ vect_slp_analyze_operations (vec_info *v
if (!vect_slp_analyze_node_operations (vinfo,
SLP_INSTANCE_TREE (instance),
instance, visited, lvisited,
- &cost_vec))
+ &cost_vec)
+ /* Instances with a root stmt require vectorized defs for the
+ SLP tree root. */
+ || (SLP_INSTANCE_ROOT_STMT (instance)
+ && (SLP_TREE_DEF_TYPE (SLP_INSTANCE_TREE (instance))
+ != vect_internal_def)))
{
slp_tree node = SLP_INSTANCE_TREE (instance);
stmt_vec_info stmt_info = SLP_TREE_SCALAR_STMTS (node)[0];

View File

@ -2,7 +2,6 @@
%global gcc_version 9.3.1 %global gcc_version 9.3.1
%global gcc_major 9.3.1 %global gcc_major 9.3.1
%global pkgversion 3.0.0.b010
%global _unpackaged_files_terminate_build 0 %global _unpackaged_files_terminate_build 0
%global _performance_build 1 %global _performance_build 1
@ -60,7 +59,7 @@
Summary: Various compilers (C, C++, Objective-C, ...) Summary: Various compilers (C, C++, Objective-C, ...)
Name: gcc Name: gcc
Version: %{gcc_version} Version: %{gcc_version}
Release: %{DATE}.7 Release: %{DATE}.8
License: GPLv3+ and GPLv3+ with exceptions and GPLv2+ with exceptions and LGPLv2+ and BSD License: GPLv3+ and GPLv3+ with exceptions and GPLv2+ with exceptions and LGPLv2+ and BSD
Source0: gcc-9.3.0.tar.xz Source0: gcc-9.3.0.tar.xz
%global isl_version 0.16.1 %global isl_version 0.16.1
@ -176,12 +175,7 @@ Patch60: fix-load-eliding-in-SM.patch
Patch61: fix-SSA-update-for-vectorizer-epilogue.patch Patch61: fix-SSA-update-for-vectorizer-epilogue.patch
Patch62: fix-ICE-when-vectorizing-nested-cycles.patch Patch62: fix-ICE-when-vectorizing-nested-cycles.patch
Patch63: fix-avoid-bogus-uninit-warning-with-store-motion.patch Patch63: fix-avoid-bogus-uninit-warning-with-store-motion.patch
Patch64: ipa-const-prop-null-point-check-bugfix.patch Patch64: fix-ICE-during-GIMPLE-pass-dse.patch
Patch65: avoid-cycling-on-vertain-subreg-reloads.patch
Patch66: fix-ICE-in-verify_target_availability.patch
Patch67: fix-ICE-vect_slp_analyze_node_operations.patch
Patch68: fix-ICE-in-extract_constrain_insn.patch
Patch69: fix-ICE-during-GIMPLE-pass-dse.patch
%global gcc_target_platform %{_arch}-linux-gnu %global gcc_target_platform %{_arch}-linux-gnu
@ -689,11 +683,6 @@ not stable, so plugins must be rebuilt any time GCC is updated.
%patch62 -p1 %patch62 -p1
%patch63 -p1 %patch63 -p1
%patch64 -p1 %patch64 -p1
%patch65 -p1
%patch66 -p1
%patch67 -p1
%patch68 -p1
%patch69 -p1
%build %build
@ -761,7 +750,7 @@ CC="$CC" CFLAGS="$OPT_FLAGS" \
--enable-gnu-unique-object --enable-linker-build-id --with-linker-hash-style=gnu \ --enable-gnu-unique-object --enable-linker-build-id --with-linker-hash-style=gnu \
--enable-languages=c,c++,objc,obj-c++,fortran,lto --enable-plugin \ --enable-languages=c,c++,objc,obj-c++,fortran,lto --enable-plugin \
--enable-initfini-array --disable-libgcj --without-isl --without-cloog \ --enable-initfini-array --disable-libgcj --without-isl --without-cloog \
--enable-gnu-indirect-function --build=%{gcc_target_platform} --with-pkgversion=%{pkgversion} \ --enable-gnu-indirect-function --build=%{gcc_target_platform} \
--with-stage1-ldflags="$OPT_LDFLAGS" \ --with-stage1-ldflags="$OPT_LDFLAGS" \
--with-boot-ldflags="$OPT_LDFLAGS" \ --with-boot-ldflags="$OPT_LDFLAGS" \
%ifarch x86_64 %ifarch x86_64
@ -2622,6 +2611,9 @@ end
%doc rpm.doc/changelogs/libcc1/ChangeLog* %doc rpm.doc/changelogs/libcc1/ChangeLog*
%changelog %changelog
* Thu Sep 15 2020 huanghaitao <huanghaitao@huawei.com> - 9.3.1-20200911.8
- revert patches to fix build errors
* Fri Sep 11 2020 eastb233 <xiezhiheng@huawei.com> - 9.3.1-20200911.7 * Fri Sep 11 2020 eastb233 <xiezhiheng@huawei.com> - 9.3.1-20200911.7
- avoid-cycling-on-vertain-subreg-reloads.patch: New file - avoid-cycling-on-vertain-subreg-reloads.patch: New file
- fix-ICE-in-verify_target_availability.patch: New file - fix-ICE-in-verify_target_availability.patch: New file

View File

@ -1,97 +0,0 @@
commit 3c4fa8a8562d3788bb763ca5c8fb1563b8d4eb1a
Author: Martin Jambor <jamborm@gcc.gnu.org>
Date: Wed Nov 13 15:12:58 2019 +0100
Add a few missing checks that IPA_NODE_REF is not NULL (PR 92454)
2019-11-13 Jan Hubicka <hubicka@ucw.cz>
Martin Jambor <mjambor@suse.cz>
PR ipa/92454
* ipa-cp.c (spread_undeadness): Check that IPA_NODE_REF exists.
(identify_dead_nodes): Likewise.
testsuite/
* g++.dg/ipa/pr92454.C: New test.
From-SVN: r278142
diff --git a/gcc/ipa-cp.c b/gcc/ipa-cp.c
index 54b9724998a..207d7c88bbd 100644
--- a/gcc/ipa-cp.c
+++ b/gcc/ipa-cp.c
@@ -4979,7 +4979,7 @@ spread_undeadness (struct cgraph_node *node)
callee = cs->callee->function_symbol (NULL);
info = IPA_NODE_REF (callee);
- if (info->node_dead)
+ if (info && info->node_dead)
{
info->node_dead = 0;
spread_undeadness (callee);
@@ -5017,18 +5017,19 @@ identify_dead_nodes (struct cgraph_node *node)
struct cgraph_node *v;
for (v = node; v; v = ((struct ipa_dfs_info *) v->aux)->next_cycle)
if (v->local.local
+ && IPA_NODE_REF (v)
&& !v->call_for_symbol_thunks_and_aliases
(has_undead_caller_from_outside_scc_p, NULL, true))
IPA_NODE_REF (v)->node_dead = 1;
for (v = node; v; v = ((struct ipa_dfs_info *) v->aux)->next_cycle)
- if (!IPA_NODE_REF (v)->node_dead)
+ if (IPA_NODE_REF (v) && !IPA_NODE_REF (v)->node_dead)
spread_undeadness (v);
if (dump_file && (dump_flags & TDF_DETAILS))
{
for (v = node; v; v = ((struct ipa_dfs_info *) v->aux)->next_cycle)
- if (IPA_NODE_REF (v)->node_dead)
+ if (IPA_NODE_REF (v) && IPA_NODE_REF (v)->node_dead)
fprintf (dump_file, " Marking node as dead: %s.\n", v->dump_name ());
}
}
diff --git a/gcc/testsuite/g++.dg/ipa/pr92454.C b/gcc/testsuite/g++.dg/ipa/pr92454.C
new file mode 100644
index 00000000000..de67c66aed0
--- /dev/null
+++ b/gcc/testsuite/g++.dg/ipa/pr92454.C
@@ -0,0 +1,38 @@
+/* Originally PR ipa/91969, options adjusted for PR ipa/92454 */
+/* { dg-options "-O3 --param ipa-cp-eval-threshold=1" } */
+
+enum by
+{
+};
+class A
+{
+public:
+ class B
+ {
+ public:
+ virtual void m_fn2 (by) = 0;
+ };
+ virtual int m_fn1 ();
+ B *cf;
+};
+by a;
+class C : A, A::B
+{
+ void m_fn2 (by);
+};
+void C::m_fn2 (by) { cf->m_fn2 (a); }
+
+struct a
+{
+ virtual ~a ();
+};
+
+struct b
+{
+ virtual void d (...);
+};
+
+struct c : a, b
+{
+ void d (...) {}
+};