[FPEnv] Fix chain handling for fpexcept.strict nodes

We need to ensure that fpexcept.strict nodes are not optimized away even if
the result is unused. To do that, we need to chain them into the block's
terminator nodes, like already done for PendingExcepts.

This patch adds two new lists of pending chains, PendingConstrainedFP and
PendingConstrainedFPStrict to hold constrained FP intrinsic nodes without
and with fpexcept.strict markers. This allows not only to solve the above
problem, but also to relax chains a bit further by no longer flushing all
FP nodes before a store or other memory access. (They are still flushed
before nodes with other side effects.)

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D72341
This commit is contained in:
Ulrich Weigand 2020-01-13 14:37:07 +01:00
parent d7d88b9d8b
commit 04a86966fb
10 changed files with 549 additions and 478 deletions

View File

@ -1025,6 +1025,8 @@ void SelectionDAGBuilder::clear() {
UnusedArgNodeMap.clear();
PendingLoads.clear();
PendingExports.clear();
PendingConstrainedFP.clear();
PendingConstrainedFPStrict.clear();
CurInst = nullptr;
HasTailCall = false;
SDNodeOrder = LowestSDNodeOrder;
@ -1035,7 +1037,7 @@ void SelectionDAGBuilder::clearDanglingDebugInfo() {
DanglingDebugInfoMap.clear();
}
SDValue SelectionDAGBuilder::getRoot() {
SDValue SelectionDAGBuilder::getMemoryRoot() {
if (PendingLoads.empty())
return DAG.getRoot();
@ -1053,9 +1055,31 @@ SDValue SelectionDAGBuilder::getRoot() {
return Root;
}
SDValue SelectionDAGBuilder::getRoot() {
// Chain up all pending constrained intrinsics together with all
// pending loads, by simply appending them to PendingLoads and
// then calling getMemoryRoot().
PendingLoads.reserve(PendingLoads.size() +
PendingConstrainedFP.size() +
PendingConstrainedFPStrict.size());
PendingLoads.append(PendingConstrainedFP.begin(),
PendingConstrainedFP.end());
PendingLoads.append(PendingConstrainedFPStrict.begin(),
PendingConstrainedFPStrict.end());
PendingConstrainedFP.clear();
PendingConstrainedFPStrict.clear();
return getMemoryRoot();
}
SDValue SelectionDAGBuilder::getControlRoot() {
SDValue Root = DAG.getRoot();
// We need to emit pending fpexcept.strict constrained intrinsics,
// so append them to the PendingExports list.
PendingExports.append(PendingConstrainedFPStrict.begin(),
PendingConstrainedFPStrict.end());
PendingConstrainedFPStrict.clear();
if (PendingExports.empty())
return Root;
@ -4060,9 +4084,11 @@ void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
SDValue Root;
bool ConstantMemory = false;
if (isVolatile || NumValues > MaxParallelChains)
if (isVolatile)
// Serialize volatile loads with other side effects.
Root = getRoot();
else if (NumValues > MaxParallelChains)
Root = getMemoryRoot();
else if (AA &&
AA->pointsToConstantMemory(MemoryLocation(
SV,
@ -4237,7 +4263,7 @@ void SelectionDAGBuilder::visitStore(const StoreInst &I) {
SDValue Src = getValue(SrcV);
SDValue Ptr = getValue(PtrV);
SDValue Root = getRoot();
SDValue Root = I.isVolatile() ? getRoot() : getMemoryRoot();
SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
SDLoc dl = getCurSDLoc();
unsigned Alignment = I.getAlignment();
@ -4329,7 +4355,7 @@ void SelectionDAGBuilder::visitMaskedStore(const CallInst &I,
VT.getStoreSize().getKnownMinSize(),
Alignment, AAInfo);
SDValue StoreNode =
DAG.getMaskedStore(getRoot(), sdl, Src0, Ptr, Offset, Mask, VT, MMO,
DAG.getMaskedStore(getMemoryRoot(), sdl, Src0, Ptr, Offset, Mask, VT, MMO,
ISD::UNINDEXED, false /* Truncating */, IsCompressing);
DAG.setRoot(StoreNode);
setValue(&I, StoreNode);
@ -4463,7 +4489,7 @@ void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
IndexType = ISD::SIGNED_SCALED;
Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
}
SDValue Ops[] = { getRoot(), Src0, Mask, Base, Index, Scale };
SDValue Ops[] = { getMemoryRoot(), Src0, Mask, Base, Index, Scale };
SDValue Scatter = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), VT, sdl,
Ops, MMO, IndexType);
DAG.setRoot(Scatter);
@ -5850,7 +5876,8 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
// FIXME: Support passing different dest/src alignments to the memcpy DAG
// node.
SDValue MC = DAG.getMemcpy(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
SDValue Root = isVol ? getRoot() : getMemoryRoot();
SDValue MC = DAG.getMemcpy(Root, sdl, Op1, Op2, Op3, Align, isVol,
false, isTC,
MachinePointerInfo(I.getArgOperand(0)),
MachinePointerInfo(I.getArgOperand(1)));
@ -5866,7 +5893,8 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
unsigned Align = std::max<unsigned>(MSI.getDestAlignment(), 1);
bool isVol = MSI.isVolatile();
bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
SDValue MS = DAG.getMemset(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
SDValue Root = isVol ? getRoot() : getMemoryRoot();
SDValue MS = DAG.getMemset(Root, sdl, Op1, Op2, Op3, Align, isVol,
isTC, MachinePointerInfo(I.getArgOperand(0)));
updateDAGForMaybeTailCall(MS);
return;
@ -5884,7 +5912,8 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
// FIXME: Support passing different dest/src alignments to the memmove DAG
// node.
SDValue MM = DAG.getMemmove(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
SDValue Root = isVol ? getRoot() : getMemoryRoot();
SDValue MM = DAG.getMemmove(Root, sdl, Op1, Op2, Op3, Align, isVol,
isTC, MachinePointerInfo(I.getArgOperand(0)),
MachinePointerInfo(I.getArgOperand(1)));
updateDAGForMaybeTailCall(MM);
@ -7039,9 +7068,29 @@ void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
SDValue Result = DAG.getNode(Opcode, sdl, VTs, Opers);
assert(Result.getNode()->getNumValues() == 2);
// See above -- chain is handled like for loads here.
// Push node to the appropriate list so that future instructions can be
// chained up correctly.
SDValue OutChain = Result.getValue(1);
PendingLoads.push_back(OutChain);
switch (FPI.getExceptionBehavior().getValue()) {
case fp::ExceptionBehavior::ebIgnore:
// The only reason why ebIgnore nodes still need to be chained is that
// they might depend on the current rounding mode, and therefore must
// not be moved across instruction that may change that mode.
LLVM_FALLTHROUGH;
case fp::ExceptionBehavior::ebMayTrap:
// These must not be moved across calls or instructions that may change
// floating-point exception masks.
PendingConstrainedFP.push_back(OutChain);
break;
case fp::ExceptionBehavior::ebStrict:
// These must not be moved across calls or instructions that may change
// floating-point exception masks or read floating-point exception flags.
// In addition, they cannot be optimized out even if unused.
PendingConstrainedFPStrict.push_back(OutChain);
break;
}
SDValue FPResult = Result.getValue(0);
setValue(&FPI, FPResult);
}
@ -7424,7 +7473,8 @@ bool SelectionDAGBuilder::visitMemPCpyCall(const CallInst &I) {
// In the mempcpy context we need to pass in a false value for isTailCall
// because the return pointer needs to be adjusted by the size of
// the copied memory.
SDValue MC = DAG.getMemcpy(getRoot(), sdl, Dst, Src, Size, Align, isVol,
SDValue Root = isVol ? getRoot() : getMemoryRoot();
SDValue MC = DAG.getMemcpy(Root, sdl, Dst, Src, Size, Align, isVol,
false, /*isTailCall=*/false,
MachinePointerInfo(I.getArgOperand(0)),
MachinePointerInfo(I.getArgOperand(1)));

View File

@ -143,6 +143,17 @@ private:
/// tokenfactor for them just before terminator instructions.
SmallVector<SDValue, 8> PendingExports;
/// Similar to loads, nodes corresponding to constrained FP intrinsics are
/// bunched up and emitted when necessary. These can be moved across each
/// other and any (normal) memory operation (load or store), but not across
/// calls or instructions having unspecified side effects. As a special
/// case, constrained FP intrinsics using fpexcept.strict may not be deleted
/// even if otherwise unused, so they need to be chained before any
/// terminator instruction (like PendingExports). We track the latter
/// set of nodes in a separate list.
SmallVector<SDValue, 8> PendingConstrainedFP;
SmallVector<SDValue, 8> PendingConstrainedFPStrict;
/// A unique monotonically increasing number used to order the SDNodes we
/// create.
unsigned SDNodeOrder;
@ -447,12 +458,18 @@ public:
/// Return the current virtual root of the Selection DAG, flushing any
/// PendingLoad items. This must be done before emitting a store or any other
/// node that may need to be ordered after any prior load instructions.
/// memory node that may need to be ordered after any prior load instructions.
SDValue getMemoryRoot();
/// Similar to getMemoryRoot, but also flushes PendingConstrainedFP(Strict)
/// items. This must be done before emitting any call other any other node
/// that may need to be ordered after FP instructions due to other side
/// effects.
SDValue getRoot();
/// Similar to getRoot, but instead of flushing all the PendingLoad items,
/// flush all the PendingExports items. It is necessary to do this before
/// emitting a terminator instruction.
/// flush all the PendingExports (and PendingConstrainedFPStrict) items.
/// It is necessary to do this before emitting a terminator instruction.
SDValue getControlRoot();
SDLoc getCurSDLoc() const {

View File

@ -1358,21 +1358,21 @@ define void @test_constrained_libcall_multichain(float* %firstptr, ppc_fp128* %r
; PC64LE-NEXT: fmr 4, 2
; PC64LE-NEXT: fmr 30, 1
; PC64LE-NEXT: fmr 29, 2
; PC64LE-NEXT: stfd 1, 16(30)
; PC64LE-NEXT: stfd 2, 24(30)
; PC64LE-NEXT: stfd 1, 16(30)
; PC64LE-NEXT: bl __gcc_qmul
; PC64LE-NEXT: nop
; PC64LE-NEXT: fmr 1, 31
; PC64LE-NEXT: xxlxor 2, 2, 2
; PC64LE-NEXT: li 5, 2
; PC64LE-NEXT: stfd 30, 32(30)
; PC64LE-NEXT: stfd 29, 40(30)
; PC64LE-NEXT: stfd 30, 32(30)
; PC64LE-NEXT: bl __powitf2
; PC64LE-NEXT: nop
; PC64LE-NEXT: frsp 0, 1
; PC64LE-NEXT: stfsx 0, 0, 29
; PC64LE-NEXT: stfd 2, -8(30)
; PC64LE-NEXT: stfd 1, -16(30)
; PC64LE-NEXT: stfd 2, -8(30)
; PC64LE-NEXT: addi 1, 1, 80
; PC64LE-NEXT: ld 0, 16(1)
; PC64LE-NEXT: lfd 31, -8(1) # 8-byte Folded Reload
@ -1409,21 +1409,21 @@ define void @test_constrained_libcall_multichain(float* %firstptr, ppc_fp128* %r
; PC64LE9-NEXT: fmr 4, 2
; PC64LE9-NEXT: fmr 30, 2
; PC64LE9-NEXT: fmr 29, 1
; PC64LE9-NEXT: stfd 1, 16(30)
; PC64LE9-NEXT: stfd 2, 24(30)
; PC64LE9-NEXT: stfd 1, 16(30)
; PC64LE9-NEXT: bl __gcc_qmul
; PC64LE9-NEXT: nop
; PC64LE9-NEXT: fmr 1, 31
; PC64LE9-NEXT: xxlxor 2, 2, 2
; PC64LE9-NEXT: li 5, 2
; PC64LE9-NEXT: stfd 29, 32(30)
; PC64LE9-NEXT: stfd 30, 40(30)
; PC64LE9-NEXT: stfd 29, 32(30)
; PC64LE9-NEXT: bl __powitf2
; PC64LE9-NEXT: nop
; PC64LE9-NEXT: frsp 0, 1
; PC64LE9-NEXT: stfs 0, 0(29)
; PC64LE9-NEXT: stfd 2, -8(30)
; PC64LE9-NEXT: stfd 1, -16(30)
; PC64LE9-NEXT: stfd 2, -8(30)
; PC64LE9-NEXT: addi 1, 1, 80
; PC64LE9-NEXT: ld 0, 16(1)
; PC64LE9-NEXT: lfd 31, -8(1) # 8-byte Folded Reload
@ -1463,15 +1463,15 @@ define void @test_constrained_libcall_multichain(float* %firstptr, ppc_fp128* %r
; PC64-NEXT: fmr 4, 2
; PC64-NEXT: fmr 29, 1
; PC64-NEXT: fmr 28, 2
; PC64-NEXT: stfd 1, 16(30)
; PC64-NEXT: stfd 2, 24(30)
; PC64-NEXT: stfd 1, 16(30)
; PC64-NEXT: bl __gcc_qmul
; PC64-NEXT: nop
; PC64-NEXT: fmr 1, 31
; PC64-NEXT: fmr 2, 30
; PC64-NEXT: li 5, 2
; PC64-NEXT: stfd 29, 32(30)
; PC64-NEXT: stfd 28, 40(30)
; PC64-NEXT: stfd 29, 32(30)
; PC64-NEXT: bl __powitf2
; PC64-NEXT: nop
; PC64-NEXT: frsp 0, 1
@ -1481,8 +1481,8 @@ define void @test_constrained_libcall_multichain(float* %firstptr, ppc_fp128* %r
; PC64-NEXT: lfd 29, 152(1) # 8-byte Folded Reload
; PC64-NEXT: lfd 28, 144(1) # 8-byte Folded Reload
; PC64-NEXT: ld 29, 120(1) # 8-byte Folded Reload
; PC64-NEXT: stfd 2, -8(30)
; PC64-NEXT: stfd 1, -16(30)
; PC64-NEXT: stfd 2, -8(30)
; PC64-NEXT: ld 30, 128(1) # 8-byte Folded Reload
; PC64-NEXT: addi 1, 1, 176
; PC64-NEXT: ld 0, 16(1)

View File

@ -287,4 +287,56 @@ define void @f12(float %f1, float %f2, float *%ptr1, float *%ptr2) #0 {
ret void
}
; If the result of any FP operation is unused, it can be removed
; -- except for fpexcept.strict operations.
define void @f13(float %f1) {
; CHECK-LABEL: f13:
; CHECK-NOT: sqeb
; CHECK: br %r14
%sqrt = call float @llvm.sqrt.f32(float %f1)
ret void
}
define void @f14(float %f1) {
; CHECK-LABEL: f14:
; CHECK-NOT: sqeb
; CHECK: br %r14
%sqrt = call float @llvm.experimental.constrained.sqrt.f32(
float %f1,
metadata !"round.dynamic",
metadata !"fpexcept.ignore") #0
ret void
}
define void @f15(float %f1) {
; CHECK-LABEL: f15:
; CHECK-NOT: sqeb
; CHECK: br %r14
%sqrt = call float @llvm.experimental.constrained.sqrt.f32(
float %f1,
metadata !"round.dynamic",
metadata !"fpexcept.maytrap") #0
ret void
}
define void @f16(float %f1) {
; CHECK-LABEL: f16:
; CHECK: sqebr
; CHECK: br %r14
%sqrt = call float @llvm.experimental.constrained.sqrt.f32(
float %f1,
metadata !"round.dynamic",
metadata !"fpexcept.strict") #0
ret void
}
attributes #0 = { strictfp }

File diff suppressed because it is too large Load Diff

View File

@ -1104,10 +1104,10 @@ define i128 @f20s128(double %x) nounwind strictfp {
; X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X87-NEXT: movl {{[0-9]+}}(%esp), %edx
; X87-NEXT: movl {{[0-9]+}}(%esp), %edi
; X87-NEXT: movl %edi, 12(%esi)
; X87-NEXT: movl %edx, 8(%esi)
; X87-NEXT: movl %ecx, 4(%esi)
; X87-NEXT: movl %edi, 8(%esi)
; X87-NEXT: movl %edx, 12(%esi)
; X87-NEXT: movl %eax, (%esi)
; X87-NEXT: movl %ecx, 4(%esi)
; X87-NEXT: movl %esi, %eax
; X87-NEXT: addl $36, %esp
; X87-NEXT: popl %esi
@ -1130,10 +1130,10 @@ define i128 @f20s128(double %x) nounwind strictfp {
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-SSE-NEXT: movl %edi, 12(%esi)
; X86-SSE-NEXT: movl %edx, 8(%esi)
; X86-SSE-NEXT: movl %ecx, 4(%esi)
; X86-SSE-NEXT: movl %edi, 8(%esi)
; X86-SSE-NEXT: movl %edx, 12(%esi)
; X86-SSE-NEXT: movl %eax, (%esi)
; X86-SSE-NEXT: movl %ecx, 4(%esi)
; X86-SSE-NEXT: movl %esi, %eax
; X86-SSE-NEXT: addl $36, %esp
; X86-SSE-NEXT: popl %esi
@ -1443,10 +1443,10 @@ define i128 @f20u128(double %x) nounwind strictfp {
; X87-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X87-NEXT: movl {{[0-9]+}}(%esp), %edx
; X87-NEXT: movl {{[0-9]+}}(%esp), %edi
; X87-NEXT: movl %edi, 12(%esi)
; X87-NEXT: movl %edx, 8(%esi)
; X87-NEXT: movl %ecx, 4(%esi)
; X87-NEXT: movl %edi, 8(%esi)
; X87-NEXT: movl %edx, 12(%esi)
; X87-NEXT: movl %eax, (%esi)
; X87-NEXT: movl %ecx, 4(%esi)
; X87-NEXT: movl %esi, %eax
; X87-NEXT: addl $36, %esp
; X87-NEXT: popl %esi
@ -1469,10 +1469,10 @@ define i128 @f20u128(double %x) nounwind strictfp {
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-SSE-NEXT: movl %edi, 12(%esi)
; X86-SSE-NEXT: movl %edx, 8(%esi)
; X86-SSE-NEXT: movl %ecx, 4(%esi)
; X86-SSE-NEXT: movl %edi, 8(%esi)
; X86-SSE-NEXT: movl %edx, 12(%esi)
; X86-SSE-NEXT: movl %eax, (%esi)
; X86-SSE-NEXT: movl %ecx, 4(%esi)
; X86-SSE-NEXT: movl %esi, %eax
; X86-SSE-NEXT: addl $36, %esp
; X86-SSE-NEXT: popl %esi

View File

@ -47,10 +47,10 @@ define void @TestFPExtF32_F128() nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-NEXT: movl %esi, vf128+8
; X86-NEXT: movl %edx, vf128+12
; X86-NEXT: movl %eax, vf128
; X86-NEXT: movl %esi, vf128+12
; X86-NEXT: movl %edx, vf128+8
; X86-NEXT: movl %ecx, vf128+4
; X86-NEXT: movl %eax, vf128
; X86-NEXT: addl $24, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl
@ -94,10 +94,10 @@ define void @TestFPExtF64_F128() nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-NEXT: movl %esi, vf128+8
; X86-NEXT: movl %edx, vf128+12
; X86-NEXT: movl %eax, vf128
; X86-NEXT: movl %esi, vf128+12
; X86-NEXT: movl %edx, vf128+8
; X86-NEXT: movl %ecx, vf128+4
; X86-NEXT: movl %eax, vf128
; X86-NEXT: addl $40, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl
@ -143,10 +143,10 @@ define void @TestFPExtF80_F128() nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-NEXT: movl %esi, vf128+8
; X86-NEXT: movl %edx, vf128+12
; X86-NEXT: movl %eax, vf128
; X86-NEXT: movl %esi, vf128+12
; X86-NEXT: movl %edx, vf128+8
; X86-NEXT: movl %ecx, vf128+4
; X86-NEXT: movl %eax, vf128
; X86-NEXT: addl $40, %esp
; X86-NEXT: popl %esi
; X86-NEXT: retl
@ -396,10 +396,10 @@ define i128 @fptosi_i128(fp128 %x) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-NEXT: movl %edi, 12(%esi)
; X86-NEXT: movl %edx, 8(%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %edi, 8(%esi)
; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@ -535,10 +535,10 @@ define i128 @fptoui_i128(fp128 %x) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-NEXT: movl %edi, 12(%esi)
; X86-NEXT: movl %edx, 8(%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %edi, 8(%esi)
; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi

View File

@ -42,10 +42,10 @@ define fp128 @add(fp128 %x, fp128 %y) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-NEXT: movl %edi, 12(%esi)
; X86-NEXT: movl %edx, 8(%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %edi, 8(%esi)
; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@ -87,10 +87,10 @@ define fp128 @sub(fp128 %x, fp128 %y) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-NEXT: movl %edi, 12(%esi)
; X86-NEXT: movl %edx, 8(%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %edi, 8(%esi)
; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@ -132,10 +132,10 @@ define fp128 @mul(fp128 %x, fp128 %y) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-NEXT: movl %edi, 12(%esi)
; X86-NEXT: movl %edx, 8(%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %edi, 8(%esi)
; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@ -177,10 +177,10 @@ define fp128 @div(fp128 %x, fp128 %y) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-NEXT: movl %edi, 12(%esi)
; X86-NEXT: movl %edx, 8(%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %edi, 8(%esi)
; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@ -226,10 +226,10 @@ define fp128 @fma(fp128 %x, fp128 %y, fp128 %z) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-NEXT: movl %edi, 12(%esi)
; X86-NEXT: movl %edx, 8(%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %edi, 8(%esi)
; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@ -271,10 +271,10 @@ define fp128 @frem(fp128 %x, fp128 %y) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-NEXT: movl %edi, 12(%esi)
; X86-NEXT: movl %edx, 8(%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %edi, 8(%esi)
; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@ -312,10 +312,10 @@ define fp128 @ceil(fp128 %x) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-NEXT: movl %edi, 12(%esi)
; X86-NEXT: movl %edx, 8(%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %edi, 8(%esi)
; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@ -353,10 +353,10 @@ define fp128 @cos(fp128 %x) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-NEXT: movl %edi, 12(%esi)
; X86-NEXT: movl %edx, 8(%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %edi, 8(%esi)
; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@ -394,10 +394,10 @@ define fp128 @exp(fp128 %x) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-NEXT: movl %edi, 12(%esi)
; X86-NEXT: movl %edx, 8(%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %edi, 8(%esi)
; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@ -435,10 +435,10 @@ define fp128 @exp2(fp128 %x) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-NEXT: movl %edi, 12(%esi)
; X86-NEXT: movl %edx, 8(%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %edi, 8(%esi)
; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@ -476,10 +476,10 @@ define fp128 @floor(fp128 %x) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-NEXT: movl %edi, 12(%esi)
; X86-NEXT: movl %edx, 8(%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %edi, 8(%esi)
; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@ -517,10 +517,10 @@ define fp128 @log(fp128 %x) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-NEXT: movl %edi, 12(%esi)
; X86-NEXT: movl %edx, 8(%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %edi, 8(%esi)
; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@ -558,10 +558,10 @@ define fp128 @log10(fp128 %x) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-NEXT: movl %edi, 12(%esi)
; X86-NEXT: movl %edx, 8(%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %edi, 8(%esi)
; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@ -599,10 +599,10 @@ define fp128 @log2(fp128 %x) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-NEXT: movl %edi, 12(%esi)
; X86-NEXT: movl %edx, 8(%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %edi, 8(%esi)
; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@ -644,10 +644,10 @@ define fp128 @maxnum(fp128 %x, fp128 %y) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-NEXT: movl %edi, 12(%esi)
; X86-NEXT: movl %edx, 8(%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %edi, 8(%esi)
; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@ -689,10 +689,10 @@ define fp128 @minnum(fp128 %x, fp128 %y) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-NEXT: movl %edi, 12(%esi)
; X86-NEXT: movl %edx, 8(%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %edi, 8(%esi)
; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@ -730,10 +730,10 @@ define fp128 @nearbyint(fp128 %x) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-NEXT: movl %edi, 12(%esi)
; X86-NEXT: movl %edx, 8(%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %edi, 8(%esi)
; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@ -775,10 +775,10 @@ define fp128 @pow(fp128 %x, fp128 %y) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-NEXT: movl %edi, 12(%esi)
; X86-NEXT: movl %edx, 8(%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %edi, 8(%esi)
; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@ -817,10 +817,10 @@ define fp128 @powi(fp128 %x, i32 %y) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-NEXT: movl %edi, 12(%esi)
; X86-NEXT: movl %edx, 8(%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %edi, 8(%esi)
; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@ -858,10 +858,10 @@ define fp128 @rint(fp128 %x) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-NEXT: movl %edi, 12(%esi)
; X86-NEXT: movl %edx, 8(%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %edi, 8(%esi)
; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@ -899,10 +899,10 @@ define fp128 @round(fp128 %x) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-NEXT: movl %edi, 12(%esi)
; X86-NEXT: movl %edx, 8(%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %edi, 8(%esi)
; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@ -940,10 +940,10 @@ define fp128 @sin(fp128 %x) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-NEXT: movl %edi, 12(%esi)
; X86-NEXT: movl %edx, 8(%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %edi, 8(%esi)
; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@ -981,10 +981,10 @@ define fp128 @sqrt(fp128 %x) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-NEXT: movl %edi, 12(%esi)
; X86-NEXT: movl %edx, 8(%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %edi, 8(%esi)
; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi
@ -1022,10 +1022,10 @@ define fp128 @trunc(fp128 %x) nounwind strictfp {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-NEXT: movl %edi, 12(%esi)
; X86-NEXT: movl %edx, 8(%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %edi, 8(%esi)
; X86-NEXT: movl %edx, 12(%esi)
; X86-NEXT: movl %eax, (%esi)
; X86-NEXT: movl %ecx, 4(%esi)
; X86-NEXT: movl %esi, %eax
; X86-NEXT: addl $20, %esp
; X86-NEXT: popl %esi

View File

@ -40,8 +40,8 @@ define <4 x double> @constrained_vector_fadd_v4f64() #0 {
; CHECK: [[MOVAPDrm:%[0-9]+]]:vr128 = MOVAPDrm $rip, 1, $noreg, %const.0, $noreg :: (load 16 from constant-pool)
; CHECK: [[ADDPDrm:%[0-9]+]]:vr128 = ADDPDrm [[MOVAPDrm]], $rip, 1, $noreg, %const.1, $noreg, implicit $mxcsr :: (load 16 from constant-pool)
; CHECK: [[ADDPDrm1:%[0-9]+]]:vr128 = ADDPDrm [[MOVAPDrm]], $rip, 1, $noreg, %const.2, $noreg, implicit $mxcsr :: (load 16 from constant-pool)
; CHECK: $xmm0 = COPY [[ADDPDrm]]
; CHECK: $xmm1 = COPY [[ADDPDrm1]]
; CHECK: $xmm0 = COPY [[ADDPDrm1]]
; CHECK: $xmm1 = COPY [[ADDPDrm]]
; CHECK: RET 0, $xmm0, $xmm1
entry:
%add = call <4 x double> @llvm.experimental.constrained.fadd.v4f64(

View File

@ -115,10 +115,10 @@ define <4 x double> @constrained_vector_fdiv_v4f64() #0 {
; CHECK-LABEL: constrained_vector_fdiv_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movapd {{.*#+}} xmm2 = [1.0E+1,1.0E+1]
; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.0E+0,2.0E+0]
; CHECK-NEXT: divpd %xmm2, %xmm0
; CHECK-NEXT: movapd {{.*#+}} xmm1 = [3.0E+0,4.0E+0]
; CHECK-NEXT: divpd %xmm2, %xmm1
; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.0E+0,2.0E+0]
; CHECK-NEXT: divpd %xmm2, %xmm0
; CHECK-NEXT: retq
;
; AVX1-LABEL: constrained_vector_fdiv_v4f64:
@ -292,9 +292,9 @@ define <3 x double> @constrained_vector_frem_v3f64() #0 {
; CHECK-NEXT: callq fmod
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
; CHECK-NEXT: movsd (%rsp), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
@ -507,10 +507,10 @@ entry:
define <4 x double> @constrained_vector_fmul_v4f64() #0 {
; CHECK-LABEL: constrained_vector_fmul_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movapd {{.*#+}} xmm1 = [1.7976931348623157E+308,1.7976931348623157E+308]
; CHECK-NEXT: movapd {{.*#+}} xmm0 = [2.0E+0,3.0E+0]
; CHECK-NEXT: mulpd %xmm1, %xmm0
; CHECK-NEXT: mulpd {{.*}}(%rip), %xmm1
; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.7976931348623157E+308,1.7976931348623157E+308]
; CHECK-NEXT: movapd {{.*#+}} xmm1 = [4.0E+0,5.0E+0]
; CHECK-NEXT: mulpd %xmm0, %xmm1
; CHECK-NEXT: mulpd {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
;
; AVX1-LABEL: constrained_vector_fmul_v4f64:
@ -644,10 +644,10 @@ entry:
define <4 x double> @constrained_vector_fadd_v4f64() #0 {
; CHECK-LABEL: constrained_vector_fadd_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movapd {{.*#+}} xmm1 = [1.7976931348623157E+308,1.7976931348623157E+308]
; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.0E+0,1.0000000000000001E-1]
; CHECK-NEXT: addpd %xmm1, %xmm0
; CHECK-NEXT: addpd {{.*}}(%rip), %xmm1
; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.7976931348623157E+308,1.7976931348623157E+308]
; CHECK-NEXT: movapd {{.*#+}} xmm1 = [2.0E+0,2.0000000000000001E-1]
; CHECK-NEXT: addpd %xmm0, %xmm1
; CHECK-NEXT: addpd {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
;
; AVX1-LABEL: constrained_vector_fadd_v4f64:
@ -784,10 +784,10 @@ entry:
define <4 x double> @constrained_vector_fsub_v4f64() #0 {
; CHECK-LABEL: constrained_vector_fsub_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movapd {{.*#+}} xmm1 = [-1.7976931348623157E+308,-1.7976931348623157E+308]
; CHECK-NEXT: movapd %xmm1, %xmm0
; CHECK-NEXT: subpd {{.*}}(%rip), %xmm0
; CHECK-NEXT: movapd {{.*#+}} xmm0 = [-1.7976931348623157E+308,-1.7976931348623157E+308]
; CHECK-NEXT: movapd %xmm0, %xmm1
; CHECK-NEXT: subpd {{.*}}(%rip), %xmm1
; CHECK-NEXT: subpd {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
;
; AVX1-LABEL: constrained_vector_fsub_v4f64:
@ -912,8 +912,8 @@ entry:
define <4 x double> @constrained_vector_sqrt_v4f64() #0 {
; CHECK-LABEL: constrained_vector_sqrt_v4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sqrtpd {{.*}}(%rip), %xmm0
; CHECK-NEXT: sqrtpd {{.*}}(%rip), %xmm1
; CHECK-NEXT: sqrtpd {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_sqrt_v4f64:
@ -1077,9 +1077,9 @@ define <3 x double> @constrained_vector_pow_v3f64() #0 {
; CHECK-NEXT: callq pow
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
; CHECK-NEXT: movsd (%rsp), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
@ -1333,9 +1333,9 @@ define <3 x double> @constrained_vector_powi_v3f64() #0 {
; CHECK-NEXT: callq __powidf2
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
; CHECK-NEXT: movsd (%rsp), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
@ -1570,9 +1570,9 @@ define <3 x double> @constrained_vector_sin_v3f64() #0 {
; CHECK-NEXT: callq sin
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
; CHECK-NEXT: movsd (%rsp), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
@ -1794,9 +1794,9 @@ define <3 x double> @constrained_vector_cos_v3f64() #0 {
; CHECK-NEXT: callq cos
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
; CHECK-NEXT: movsd (%rsp), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
@ -2018,9 +2018,9 @@ define <3 x double> @constrained_vector_exp_v3f64() #0 {
; CHECK-NEXT: callq exp
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
; CHECK-NEXT: movsd (%rsp), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
@ -2242,9 +2242,9 @@ define <3 x double> @constrained_vector_exp2_v3f64() #0 {
; CHECK-NEXT: callq exp2
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
; CHECK-NEXT: movsd (%rsp), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
@ -2466,9 +2466,9 @@ define <3 x double> @constrained_vector_log_v3f64() #0 {
; CHECK-NEXT: callq log
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
; CHECK-NEXT: movsd (%rsp), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
@ -2690,9 +2690,9 @@ define <3 x double> @constrained_vector_log10_v3f64() #0 {
; CHECK-NEXT: callq log10
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
; CHECK-NEXT: movsd (%rsp), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
@ -2914,9 +2914,9 @@ define <3 x double> @constrained_vector_log2_v3f64() #0 {
; CHECK-NEXT: callq log2
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
; CHECK-NEXT: movsd (%rsp), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
@ -3116,9 +3116,9 @@ define <3 x double> @constrained_vector_rint_v3f64() #0 {
; CHECK-NEXT: callq rint
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
; CHECK-NEXT: movsd (%rsp), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
@ -3286,9 +3286,9 @@ define <3 x double> @constrained_vector_nearby_v3f64() #0 {
; CHECK-NEXT: callq nearbyint
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
; CHECK-NEXT: movsd (%rsp), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
@ -3492,9 +3492,9 @@ define <3 x double> @constrained_vector_max_v3f64() #0 {
; CHECK-NEXT: callq fmax
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
; CHECK-NEXT: movsd (%rsp), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
@ -3742,9 +3742,9 @@ define <3 x double> @constrained_vector_min_v3f64() #0 {
; CHECK-NEXT: callq fmin
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
; CHECK-NEXT: movsd (%rsp), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
@ -3975,9 +3975,9 @@ entry:
define <3 x i64> @constrained_vector_fptosi_v3i64_v3f32() #0 {
; CHECK-LABEL: constrained_vector_fptosi_v3i64_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rdx
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rcx
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rdx
; CHECK-NEXT: cvttss2si {{.*}}(%rip), %rax
; CHECK-NEXT: retq
;
; AVX1-LABEL: constrained_vector_fptosi_v3i64_v3f32:
@ -4217,9 +4217,9 @@ entry:
define <3 x i64> @constrained_vector_fptosi_v3i64_v3f64() #0 {
; CHECK-LABEL: constrained_vector_fptosi_v3i64_v3f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rdx
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rcx
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rdx
; CHECK-NEXT: cvttsd2si {{.*}}(%rip), %rax
; CHECK-NEXT: retq
;
; AVX1-LABEL: constrained_vector_fptosi_v3i64_v3f64:
@ -5542,9 +5542,9 @@ define <3 x double> @constrained_vector_fpext_v3f32() #0 {
; CHECK-LABEL: constrained_vector_fpext_v3f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: cvtss2sd %xmm0, %xmm1
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: cvtss2sd %xmm0, %xmm0
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: cvtss2sd %xmm1, %xmm1
; CHECK-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; CHECK-NEXT: cvtss2sd %xmm2, %xmm2
; CHECK-NEXT: movsd %xmm2, -{{[0-9]+}}(%rsp)
@ -5573,8 +5573,8 @@ entry:
define <4 x double> @constrained_vector_fpext_v4f32() #0 {
; CHECK-LABEL: constrained_vector_fpext_v4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvtps2pd {{.*}}(%rip), %xmm0
; CHECK-NEXT: cvtps2pd {{.*}}(%rip), %xmm1
; CHECK-NEXT: cvtps2pd {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
;
; AVX-LABEL: constrained_vector_fpext_v4f32:
@ -5694,9 +5694,9 @@ define <3 x double> @constrained_vector_ceil_v3f64() #0 {
; CHECK-NEXT: callq ceil
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
; CHECK-NEXT: movsd (%rsp), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
@ -5822,9 +5822,9 @@ define <3 x double> @constrained_vector_floor_v3f64() #0 {
; CHECK-NEXT: callq floor
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
; CHECK-NEXT: movsd (%rsp), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
@ -5972,9 +5972,9 @@ define <3 x double> @constrained_vector_round_v3f64() #0 {
; CHECK-NEXT: callq round
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
; CHECK-NEXT: movsd (%rsp), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
@ -6112,9 +6112,9 @@ define <3 x double> @constrained_vector_trunc_v3f64() #0 {
; CHECK-NEXT: callq trunc
; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Reload
; CHECK-NEXT: movsd (%rsp), %xmm0 # 8-byte Reload
; CHECK-NEXT: # xmm0 = mem[0],zero
; CHECK-NEXT: movsd (%rsp), %xmm1 # 8-byte Reload
; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Reload
; CHECK-NEXT: # xmm1 = mem[0],zero
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
@ -6396,8 +6396,8 @@ entry:
define <3 x double> @constrained_vector_sitofp_v3f64_v3i64(<3 x i64> %x) #0 {
; CHECK-LABEL: constrained_vector_sitofp_v3f64_v3i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: cvtsi2sd %rdi, %xmm0
; CHECK-NEXT: cvtsi2sd %rsi, %xmm1
; CHECK-NEXT: cvtsi2sd %rdi, %xmm0
; CHECK-NEXT: cvtsi2sd %rdx, %xmm2
; CHECK-NEXT: movsd %xmm2, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: fldl -{{[0-9]+}}(%rsp)
@ -7255,15 +7255,15 @@ entry:
define <4 x double> @constrained_vector_uitofp_v4f64_v4i32(<4 x i32> %x) #0 {
; CHECK-LABEL: constrained_vector_uitofp_v4f64_v4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movapd %xmm0, %xmm1
; CHECK-NEXT: xorpd %xmm2, %xmm2
; CHECK-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; CHECK-NEXT: movapd {{.*#+}} xmm3 = [4.503599627370496E+15,4.503599627370496E+15]
; CHECK-NEXT: orpd %xmm3, %xmm0
; CHECK-NEXT: subpd %xmm3, %xmm0
; CHECK-NEXT: movapd %xmm0, %xmm1
; CHECK-NEXT: unpckhps {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
; CHECK-NEXT: movapd {{.*#+}} xmm3 = [4.503599627370496E+15,4.503599627370496E+15]
; CHECK-NEXT: orpd %xmm3, %xmm1
; CHECK-NEXT: subpd %xmm3, %xmm1
; CHECK-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; CHECK-NEXT: orpd %xmm3, %xmm0
; CHECK-NEXT: subpd %xmm3, %xmm0
; CHECK-NEXT: retq
;
; AVX1-LABEL: constrained_vector_uitofp_v4f64_v4i32:
@ -7331,22 +7331,22 @@ define <4 x double> @constrained_vector_uitofp_v4f64_v4i64(<4 x i64> %x) #0 {
; CHECK-LABEL: constrained_vector_uitofp_v4f64_v4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: movdqa {{.*#+}} xmm2 = [4294967295,4294967295]
; CHECK-NEXT: movdqa %xmm0, %xmm3
; CHECK-NEXT: movdqa %xmm1, %xmm3
; CHECK-NEXT: pand %xmm2, %xmm3
; CHECK-NEXT: movdqa {{.*#+}} xmm4 = [4841369599423283200,4841369599423283200]
; CHECK-NEXT: por %xmm4, %xmm3
; CHECK-NEXT: psrlq $32, %xmm0
; CHECK-NEXT: movdqa {{.*#+}} xmm5 = [4985484787499139072,4985484787499139072]
; CHECK-NEXT: por %xmm5, %xmm0
; CHECK-NEXT: movapd {{.*#+}} xmm6 = [1.9342813118337666E+25,1.9342813118337666E+25]
; CHECK-NEXT: subpd %xmm6, %xmm0
; CHECK-NEXT: addpd %xmm3, %xmm0
; CHECK-NEXT: pand %xmm1, %xmm2
; CHECK-NEXT: por %xmm4, %xmm2
; CHECK-NEXT: psrlq $32, %xmm1
; CHECK-NEXT: movdqa {{.*#+}} xmm5 = [4985484787499139072,4985484787499139072]
; CHECK-NEXT: por %xmm5, %xmm1
; CHECK-NEXT: movapd {{.*#+}} xmm6 = [1.9342813118337666E+25,1.9342813118337666E+25]
; CHECK-NEXT: subpd %xmm6, %xmm1
; CHECK-NEXT: addpd %xmm2, %xmm1
; CHECK-NEXT: addpd %xmm3, %xmm1
; CHECK-NEXT: pand %xmm0, %xmm2
; CHECK-NEXT: por %xmm4, %xmm2
; CHECK-NEXT: psrlq $32, %xmm0
; CHECK-NEXT: por %xmm5, %xmm0
; CHECK-NEXT: subpd %xmm6, %xmm0
; CHECK-NEXT: addpd %xmm2, %xmm0
; CHECK-NEXT: retq
;
; AVX1-LABEL: constrained_vector_uitofp_v4f64_v4i64: