gcc/fix-an-ICE-in-vect_recog_mask_conversion_pattern.patch

116 lines
3.9 KiB
Diff
Raw Normal View History

Upload GCC feature and bugfix patches. - avoid-cycling-on-vertain-subreg-reloads.patch: Add patch source comment - change-gcc-BASE-VER.patch: Likewise - dont-generate-IF_THEN_ELSE.patch: Likewise - fix-ICE-in-compute_live_loop_exits.patch: Likewise - fix-ICE-in-eliminate_stmt.patch: Likewise - fix-ICE-in-vect_create_epilog_for_reduction.patch: Likewise - fix-ICE-in-vect_stmt_to_vectorize.patch: Likewise - fix-ICE-in-verify_ssa.patch: Likewise - fix-ICE-when-vectorizing-nested-cycles.patch: Likewise - fix-cost-of-plus.patch: Likewise - ipa-const-prop-self-recursion-bugfix.patch: Likewise - simplify-removing-subregs.patch: Likewise - medium-code-mode.patch: Bugfix - fix-when-peeling-for-alignment.patch: Move to ... - fix-PR-92351-When-peeling-for-alignment.patch: ... this - AArch64-Fix-constraints-for-CPY-M.patch: New file - Apply-maximum-nunits-for-BB-SLP.patch: New file - Fix-EXTRACT_LAST_REDUCTION-segfault.patch: New file - Fix-up-push_partial_def-little-endian-bitfield.patch: New file - Fix-zero-masking-for-vcvtps2ph.patch: New file - IRA-Handle-fully-tied-destinations.patch: New file - SLP-VECT-Add-check-to-fix-96837.patch: New file - aarch64-Fix-ash-lr-lshr-mode-3-expanders.patch: New file - aarch64-Fix-bf16-and-matrix-g++-gfortran.patch: New file - aarch64-Fix-mismatched-SVE-predicate-modes.patch: New file - aarch64-fix-sve-acle-error.patch: New file - adjust-vector-cost-and-move-EXTRACT_LAST_REDUCTION-costing.patch: New file - bf16-and-matrix-characteristic.patch: New file - fix-ICE-IPA-compare-VRP-types.patch: New file - fix-ICE-in-affine-combination.patch: New file - fix-ICE-in-pass-vect.patch: New file - fix-ICE-in-vect_update_misalignment_for_peel.patch: New file - fix-addlosymdi-ICE-in-pass-reload.patch: New file - fix-an-ICE-in-vect_recog_mask_conversion_pattern.patch: New file - fix-avx512vl-vcvttpd2dq-2-fail.patch: New file - fix-issue499-add-nop-convert.patch: New file - fix-issue604-ldist-dependency-fixup.patch: New file - modulo-sched-Carefully-process-loop-counter-initiali.patch: New file - re-PR-target-91124-gcc.target-i386-avx512vl-vpshldvd.patch: New file - reduction-paths-with-unhandled-live-stmt.patch: New file - redundant-loop-elimination.patch: New file - sccvn-Improve-handling-of-load-masked-with-integer.patch: New file - speed-up-DDG-analysis-and-fix-bootstrap-compare-debug.patch: New file - store-merging-Consider-also-overlapping-stores-earlier.patch: New file - tree-optimization-96920-another-ICE-when-vectorizing.patch: New file - tree-optimization-97812-fix-range-query-in-VRP-asser.patch: New file - vectorizable-comparison-Swap-operands-only-once.patch: New file - x86-Fix-bf16-and-matrix.patch: New file
2020-12-30 09:54:10 +08:00
This backport contains 1 patch from gcc main stream tree.
The commit id of these patchs list as following in the order of time.
0001-aarch64-Fix-an-ICE-in-vect_recog_mask_conversion_pattern.patch:
91d80cf4bd2827dd9c40fe6a7c719c909d79083d
diff -Nurp a/gcc/testsuite/gcc.target/aarch64/pr96757.c b/gcc/testsuite/gcc.target/aarch64/pr96757.c
--- a/gcc/testsuite/gcc.target/aarch64/pr96757.c 1969-12-31 19:00:00.000000000 -0500
+++ b/gcc/testsuite/gcc.target/aarch64/pr96757.c 2020-10-12 08:32:12.192000000 -0400
@@ -0,0 +1,23 @@
+/* PR target/96757 */
+/* { dg-do compile } */
+/* { dg-options "-O3" } */
+
+short
+fun1(short i, short j)
+{
+ return i * j;
+}
+
+int
+fun(int a, int b, int c)
+{
+ int *v, z, k, m;
+ short f, d;
+ for (int i=0; i<c; i++)
+ {
+ f= 4 <= d;
+ k= a > m;
+ z = f > k;
+ *v += fun1(z,b);
+ }
+}
diff -Nurp a/gcc/tree-vect-patterns.c b/gcc/tree-vect-patterns.c
--- a/gcc/tree-vect-patterns.c 2020-10-12 08:05:18.924000000 -0400
+++ b/gcc/tree-vect-patterns.c 2020-10-12 08:50:56.996000000 -0400
@@ -3917,6 +3917,8 @@ vect_recog_mask_conversion_pattern (stmt
tree vectype1, vectype2;
stmt_vec_info pattern_stmt_info;
vec_info *vinfo = stmt_vinfo->vinfo;
+ tree rhs1_op0 = NULL_TREE, rhs1_op1 = NULL_TREE;
+ tree rhs1_op0_type = NULL_TREE, rhs1_op1_type = NULL_TREE;
/* Check for MASK_LOAD ans MASK_STORE calls requiring mask conversion. */
if (is_gimple_call (last_stmt)
@@ -4016,9 +4018,37 @@ vect_recog_mask_conversion_pattern (stmt
it is better for b1 and b2 to use the mask type associated
with int elements rather bool (byte) elements. */
- rhs1_type = search_type_for_mask (TREE_OPERAND (rhs1, 0), vinfo);
- if (!rhs1_type)
- rhs1_type = TREE_TYPE (TREE_OPERAND (rhs1, 0));
+ rhs1_op0 = TREE_OPERAND (rhs1, 0);
+ rhs1_op1 = TREE_OPERAND (rhs1, 1);
+ if (!rhs1_op0 || !rhs1_op1)
+ return NULL;
+ rhs1_op0_type = search_type_for_mask (rhs1_op0, vinfo);
+ rhs1_op1_type = search_type_for_mask (rhs1_op1, vinfo);
+
+ if (!rhs1_op0_type)
+ rhs1_type = TREE_TYPE (rhs1_op0);
+ else if (!rhs1_op1_type)
+ rhs1_type = TREE_TYPE (rhs1_op1);
+ else if (TYPE_PRECISION (rhs1_op0_type)
+ != TYPE_PRECISION (rhs1_op1_type))
+ {
+ int tmp0 = (int) TYPE_PRECISION (rhs1_op0_type)
+ - (int) TYPE_PRECISION (TREE_TYPE (lhs));
+ int tmp1 = (int) TYPE_PRECISION (rhs1_op1_type)
+ - (int) TYPE_PRECISION (TREE_TYPE (lhs));
+ if ((tmp0 > 0 && tmp1 > 0) || (tmp0 < 0 && tmp1 < 0))
+ {
+ if (abs (tmp0) > abs (tmp1))
+ rhs1_type = rhs1_op1_type;
+ else
+ rhs1_type = rhs1_op0_type;
+ }
+ else
+ rhs1_type = build_nonstandard_integer_type
+ (TYPE_PRECISION (TREE_TYPE (lhs)), 1);
+ }
+ else
+ rhs1_type = rhs1_op0_type;
}
else
return NULL;
@@ -4036,8 +4066,8 @@ vect_recog_mask_conversion_pattern (stmt
name from the outset. */
if (known_eq (TYPE_VECTOR_SUBPARTS (vectype1),
TYPE_VECTOR_SUBPARTS (vectype2))
- && (TREE_CODE (rhs1) == SSA_NAME
- || rhs1_type == TREE_TYPE (TREE_OPERAND (rhs1, 0))))
+ && !rhs1_op0_type
+ && !rhs1_op1_type)
return NULL;
/* If rhs1 is invariant and we can promote it leave the COND_EXPR
@@ -4069,7 +4099,16 @@ vect_recog_mask_conversion_pattern (stmt
if (TREE_CODE (rhs1) != SSA_NAME)
{
tmp = vect_recog_temp_ssa_var (TREE_TYPE (rhs1), NULL);
- pattern_stmt = gimple_build_assign (tmp, rhs1);
+ if (rhs1_op0_type
+ && TYPE_PRECISION (rhs1_op0_type) != TYPE_PRECISION (rhs1_type))
+ rhs1_op0 = build_mask_conversion (rhs1_op0,
+ vectype2, stmt_vinfo);
+ if (rhs1_op1_type
+ && TYPE_PRECISION (rhs1_op1_type) != TYPE_PRECISION (rhs1_type))
+ rhs1_op1 = build_mask_conversion (rhs1_op1,
+ vectype2, stmt_vinfo);
+ pattern_stmt = gimple_build_assign (tmp, TREE_CODE (rhs1),
+ rhs1_op0, rhs1_op1);
rhs1 = tmp;
append_pattern_def_seq (stmt_vinfo, pattern_stmt, vectype2);
}