summaryrefslogtreecommitdiff
path: root/mlir/lib/Dialect/LLVMIR/Transforms/OptimizeForNVVM.cpp
blob: 8c33148d1d2d78feb19e616ac60920e83f34793a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
//===- OptimizeForNVVM.cpp - Optimize LLVM IR for NVVM ---------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#include "mlir/Dialect/LLVMIR/Transforms/OptimizeForNVVM.h"

#include "mlir/Dialect/LLVMIR/NVVMDialect.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"

namespace mlir {
namespace NVVM {
#define GEN_PASS_DEF_NVVMOPTIMIZEFORTARGET
#include "mlir/Dialect/LLVMIR/Transforms/Passes.h.inc"
} // namespace NVVM
} // namespace mlir

using namespace mlir;

namespace {
// Replaces fdiv on fp16 with fp32 multiplication with reciprocal plus one
// (conditional) Newton iteration.
//
// This as accurate as promoting the division to fp32 in the NVPTX backend, but
// faster because it performs less Newton iterations, avoids the slow path
// for e.g. denormals, and allows reuse of the reciprocal for multiple divisions
// by the same divisor.
struct ExpandDivF16 : public OpRewritePattern<LLVM::FDivOp> {
  using OpRewritePattern<LLVM::FDivOp>::OpRewritePattern;

private:
  LogicalResult matchAndRewrite(LLVM::FDivOp op,
                                PatternRewriter &rewriter) const override;
};

struct NVVMOptimizeForTarget
    : public NVVM::impl::NVVMOptimizeForTargetBase<NVVMOptimizeForTarget> {
  void runOnOperation() override;

  void getDependentDialects(DialectRegistry &registry) const override {
    registry.insert<NVVM::NVVMDialect>();
  }
};
} // namespace

LogicalResult ExpandDivF16::matchAndRewrite(LLVM::FDivOp op,
                                            PatternRewriter &rewriter) const {
  if (!op.getType().isF16())
    return rewriter.notifyMatchFailure(op, "not f16");
  Location loc = op.getLoc();

  Type f32Type = rewriter.getF32Type();
  Type i32Type = rewriter.getI32Type();

  // Extend lhs and rhs to fp32.
  Value lhs = rewriter.create<LLVM::FPExtOp>(loc, f32Type, op.getLhs());
  Value rhs = rewriter.create<LLVM::FPExtOp>(loc, f32Type, op.getRhs());

  // float rcp = rcp.approx.ftz.f32(rhs), approx = lhs * rcp.
  Value rcp = rewriter.create<NVVM::RcpApproxFtzF32Op>(loc, f32Type, rhs);
  Value approx = rewriter.create<LLVM::FMulOp>(loc, lhs, rcp);

  // Refine the approximation with one Newton iteration:
  // float refined = approx + (lhs - approx * rhs) * rcp;
  Value err = rewriter.create<LLVM::FMAOp>(
      loc, approx, rewriter.create<LLVM::FNegOp>(loc, rhs), lhs);
  Value refined = rewriter.create<LLVM::FMAOp>(loc, err, rcp, approx);

  // Use refined value if approx is normal (exponent neither all 0 or all 1).
  Value mask = rewriter.create<LLVM::ConstantOp>(
      loc, i32Type, rewriter.getUI32IntegerAttr(0x7f800000));
  Value cast = rewriter.create<LLVM::BitcastOp>(loc, i32Type, approx);
  Value exp = rewriter.create<LLVM::AndOp>(loc, i32Type, cast, mask);
  Value zero = rewriter.create<LLVM::ConstantOp>(
      loc, i32Type, rewriter.getUI32IntegerAttr(0));
  Value pred = rewriter.create<LLVM::OrOp>(
      loc,
      rewriter.create<LLVM::ICmpOp>(loc, LLVM::ICmpPredicate::eq, exp, zero),
      rewriter.create<LLVM::ICmpOp>(loc, LLVM::ICmpPredicate::eq, exp, mask));
  Value result =
      rewriter.create<LLVM::SelectOp>(loc, f32Type, pred, approx, refined);

  // Replace with trucation back to fp16.
  rewriter.replaceOpWithNewOp<LLVM::FPTruncOp>(op, op.getType(), result);

  return success();
}

void NVVMOptimizeForTarget::runOnOperation() {
  MLIRContext *ctx = getOperation()->getContext();
  RewritePatternSet patterns(ctx);
  patterns.add<ExpandDivF16>(ctx);
  if (failed(applyPatternsAndFoldGreedily(getOperation(), std::move(patterns))))
    return signalPassFailure();
}

std::unique_ptr<Pass> NVVM::createOptimizeForTargetPass() {
  return std::make_unique<NVVMOptimizeForTarget>();
}