Skip to content

Commit

Permalink
[DAGCombiner] Remove a hasOneUse check in visitAND (llvm#115142)
Browse files Browse the repository at this point in the history
For some reason there was a hasOneUse check on the splat for the
second operand and it's not obvious to me why. The check blocks
optimisations for lowering of nodes like AVGFLOORU and AVGCEILU.

In a follow-on patch I also plan to improve the generated code
for AVGCEILU further by teaching computeKnownBits about
zero-extending masked loads.
  • Loading branch information
david-arm authored Nov 8, 2024
1 parent ff07df6 commit b9dd602
Show file tree
Hide file tree
Showing 2 changed files with 64 additions and 2 deletions.
3 changes: 1 addition & 2 deletions llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7096,8 +7096,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
// fold (and (masked_load) (splat_vec (x, ...))) to zext_masked_load
auto *MLoad = dyn_cast<MaskedLoadSDNode>(N0);
ConstantSDNode *Splat = isConstOrConstSplat(N1, true, true);
if (MLoad && MLoad->getExtensionType() == ISD::EXTLOAD && Splat &&
N1.hasOneUse()) {
if (MLoad && MLoad->getExtensionType() == ISD::EXTLOAD && Splat) {
EVT LoadVT = MLoad->getMemoryVT();
EVT ExtVT = VT;
if (TLI.isLoadExtLegal(ISD::ZEXTLOAD, ExtVT, LoadVT)) {
Expand Down
63 changes: 63 additions & 0 deletions llvm/test/CodeGen/AArch64/sve-hadd.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1341,3 +1341,66 @@ entry:
%avg = ashr <vscale x 2 x i64> %add, splat (i64 1)
ret <vscale x 2 x i64> %avg
}

define void @zext_mload_avgflooru(ptr %p1, ptr %p2, <vscale x 8 x i1> %mask) {
; SVE-LABEL: zext_mload_avgflooru:
; SVE: // %bb.0:
; SVE-NEXT: ld1b { z0.h }, p0/z, [x0]
; SVE-NEXT: ld1b { z1.h }, p0/z, [x1]
; SVE-NEXT: eor z2.d, z0.d, z1.d
; SVE-NEXT: and z0.d, z0.d, z1.d
; SVE-NEXT: lsr z1.h, z2.h, #1
; SVE-NEXT: add z0.h, z0.h, z1.h
; SVE-NEXT: st1h { z0.h }, p0, [x0]
; SVE-NEXT: ret
;
; SVE2-LABEL: zext_mload_avgflooru:
; SVE2: // %bb.0:
; SVE2-NEXT: ld1b { z0.h }, p0/z, [x0]
; SVE2-NEXT: ld1b { z1.h }, p0/z, [x1]
; SVE2-NEXT: ptrue p1.h
; SVE2-NEXT: uhadd z0.h, p1/m, z0.h, z1.h
; SVE2-NEXT: st1h { z0.h }, p0, [x0]
; SVE2-NEXT: ret
%ld1 = call <vscale x 8 x i8> @llvm.masked.load(ptr %p1, i32 16, <vscale x 8 x i1> %mask, <vscale x 8 x i8> zeroinitializer)
%ld2 = call <vscale x 8 x i8> @llvm.masked.load(ptr %p2, i32 16, <vscale x 8 x i1> %mask, <vscale x 8 x i8> zeroinitializer)
%and = and <vscale x 8 x i8> %ld1, %ld2
%xor = xor <vscale x 8 x i8> %ld1, %ld2
%shift = lshr <vscale x 8 x i8> %xor, splat(i8 1)
%avg = add <vscale x 8 x i8> %and, %shift
%avgext = zext <vscale x 8 x i8> %avg to <vscale x 8 x i16>
call void @llvm.masked.store.nxv8i16(<vscale x 8 x i16> %avgext, ptr %p1, i32 16, <vscale x 8 x i1> %mask)
ret void
}

define void @zext_mload_avgceilu(ptr %p1, ptr %p2, <vscale x 8 x i1> %mask) {
; SVE-LABEL: zext_mload_avgceilu:
; SVE: // %bb.0:
; SVE-NEXT: ld1b { z0.h }, p0/z, [x0]
; SVE-NEXT: ld1b { z1.h }, p0/z, [x1]
; SVE-NEXT: eor z2.d, z0.d, z1.d
; SVE-NEXT: orr z0.d, z0.d, z1.d
; SVE-NEXT: lsr z1.h, z2.h, #1
; SVE-NEXT: sub z0.h, z0.h, z1.h
; SVE-NEXT: st1b { z0.h }, p0, [x0]
; SVE-NEXT: ret
;
; SVE2-LABEL: zext_mload_avgceilu:
; SVE2: // %bb.0:
; SVE2-NEXT: ld1b { z0.h }, p0/z, [x0]
; SVE2-NEXT: ld1b { z1.h }, p0/z, [x1]
; SVE2-NEXT: ptrue p1.h
; SVE2-NEXT: urhadd z0.h, p1/m, z0.h, z1.h
; SVE2-NEXT: st1b { z0.h }, p0, [x0]
; SVE2-NEXT: ret
%ld1 = call <vscale x 8 x i8> @llvm.masked.load(ptr %p1, i32 16, <vscale x 8 x i1> %mask, <vscale x 8 x i8> zeroinitializer)
%ld2 = call <vscale x 8 x i8> @llvm.masked.load(ptr %p2, i32 16, <vscale x 8 x i1> %mask, <vscale x 8 x i8> zeroinitializer)
%zext1 = zext <vscale x 8 x i8> %ld1 to <vscale x 8 x i16>
%zext2 = zext <vscale x 8 x i8> %ld2 to <vscale x 8 x i16>
%add1 = add nuw nsw <vscale x 8 x i16> %zext1, splat(i16 1)
%add2 = add nuw nsw <vscale x 8 x i16> %add1, %zext2
%shift = lshr <vscale x 8 x i16> %add2, splat(i16 1)
%trunc = trunc <vscale x 8 x i16> %shift to <vscale x 8 x i8>
call void @llvm.masked.store.nxv8i8(<vscale x 8 x i8> %trunc, ptr %p1, i32 16, <vscale x 8 x i1> %mask)
ret void
}

0 comments on commit b9dd602

Please sign in to comment.