Coverage Report

Created: 2020-06-26 05:44

/home/arjun/llvm-project/mlir/include/mlir/Dialect/Affine/IR/AffineOps.h
Line
Count
Source (jump to first uncovered line)
1
//===- AffineOps.h - MLIR Affine Operations -------------------------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This file defines convenience types for working with Affine operations
10
// in the MLIR operation set.
11
//
12
//===----------------------------------------------------------------------===//
13
14
#ifndef MLIR_DIALECT_AFFINE_IR_AFFINEOPS_H
15
#define MLIR_DIALECT_AFFINE_IR_AFFINEOPS_H
16
17
#include "mlir/Dialect/Affine/IR/AffineMemoryOpInterfaces.h"
18
#include "mlir/IR/AffineMap.h"
19
#include "mlir/IR/Builders.h"
20
#include "mlir/IR/Dialect.h"
21
#include "mlir/IR/OpDefinition.h"
22
#include "mlir/IR/StandardTypes.h"
23
#include "mlir/Interfaces/LoopLikeInterface.h"
24
#include "mlir/Interfaces/SideEffectInterfaces.h"
25
26
namespace mlir {
27
class AffineApplyOp;
28
class AffineBound;
29
class AffineDimExpr;
30
class AffineValueMap;
31
class AffineTerminatorOp;
32
class FlatAffineConstraints;
33
class OpBuilder;
34
35
/// A utility function to check if a value is defined at the top level of an
36
/// op with trait `AffineScope` or is a region argument for such an op. A value
37
/// of index type defined at the top level is always a valid symbol for all its
38
/// uses.
39
bool isTopLevelValue(Value value);
40
41
/// AffineDmaStartOp starts a non-blocking DMA operation that transfers data
42
/// from a source memref to a destination memref. The source and destination
43
/// memref need not be of the same dimensionality, but need to have the same
44
/// elemental type. The operands include the source and destination memref's
45
/// each followed by its indices, size of the data transfer in terms of the
46
/// number of elements (of the elemental type of the memref), a tag memref with
47
/// its indices, and optionally at the end, a stride and a
48
/// number_of_elements_per_stride arguments. The tag location is used by an
49
/// AffineDmaWaitOp to check for completion. The indices of the source memref,
50
/// destination memref, and the tag memref have the same restrictions as any
51
/// affine.load/store. In particular, index for each memref dimension must be an
52
/// affine expression of loop induction variables and symbols.
53
/// The optional stride arguments should be of 'index' type, and specify a
54
/// stride for the slower memory space (memory space with a lower memory space
55
/// id), transferring chunks of number_of_elements_per_stride every stride until
56
/// %num_elements are transferred. Either both or no stride arguments should be
57
/// specified. The value of 'num_elements' must be a multiple of
58
/// 'number_of_elements_per_stride'.
59
//
60
// For example, a DmaStartOp operation that transfers 256 elements of a memref
61
// '%src' in memory space 0 at indices [%i + 3, %j] to memref '%dst' in memory
62
// space 1 at indices [%k + 7, %l], would be specified as follows:
63
//
64
//   %num_elements = constant 256
65
//   %idx = constant 0 : index
66
//   %tag = alloc() : memref<1xi32, 4>
67
//   affine.dma_start %src[%i + 3, %j], %dst[%k + 7, %l], %tag[%idx],
68
//     %num_elements :
69
//       memref<40x128xf32, 0>, memref<2x1024xf32, 1>, memref<1xi32, 2>
70
//
71
//   If %stride and %num_elt_per_stride are specified, the DMA is expected to
72
//   transfer %num_elt_per_stride elements every %stride elements apart from
73
//   memory space 0 until %num_elements are transferred.
74
//
75
//   affine.dma_start %src[%i, %j], %dst[%k, %l], %tag[%idx], %num_elements,
76
//     %stride, %num_elt_per_stride : ...
77
//
78
// TODO(mlir-team): add additional operands to allow source and destination
79
// striding, and multiple stride levels (possibly using AffineMaps to specify
80
// multiple levels of striding).
81
// TODO(andydavis) Consider replacing src/dst memref indices with view memrefs.
82
class AffineDmaStartOp : public Op<AffineDmaStartOp, OpTrait::VariadicOperands,
83
                                   OpTrait::ZeroResult> {
84
public:
85
  using Op::Op;
86
87
  static void build(OpBuilder &builder, OperationState &result, Value srcMemRef,
88
                    AffineMap srcMap, ValueRange srcIndices, Value destMemRef,
89
                    AffineMap dstMap, ValueRange destIndices, Value tagMemRef,
90
                    AffineMap tagMap, ValueRange tagIndices, Value numElements,
91
                    Value stride = nullptr, Value elementsPerStride = nullptr);
92
93
  /// Returns the operand index of the src memref.
94
0
  unsigned getSrcMemRefOperandIndex() { return 0; }
95
96
  /// Returns the source MemRefType for this DMA operation.
97
0
  Value getSrcMemRef() { return getOperand(getSrcMemRefOperandIndex()); }
98
0
  MemRefType getSrcMemRefType() {
99
0
    return getSrcMemRef().getType().cast<MemRefType>();
100
0
  }
101
102
  /// Returns the rank (number of indices) of the source MemRefType.
103
0
  unsigned getSrcMemRefRank() { return getSrcMemRefType().getRank(); }
104
105
  /// Returns the affine map used to access the src memref.
106
0
  AffineMap getSrcMap() { return getSrcMapAttr().getValue(); }
107
0
  AffineMapAttr getSrcMapAttr() {
108
0
    return getAttr(getSrcMapAttrName()).cast<AffineMapAttr>();
109
0
  }
110
111
  /// Returns the source memref affine map indices for this DMA operation.
112
0
  operand_range getSrcIndices() {
113
0
    return {operand_begin() + getSrcMemRefOperandIndex() + 1,
114
0
            operand_begin() + getSrcMemRefOperandIndex() + 1 +
115
0
                getSrcMap().getNumInputs()};
116
0
  }
117
118
  /// Returns the memory space of the src memref.
119
0
  unsigned getSrcMemorySpace() {
120
0
    return getSrcMemRef().getType().cast<MemRefType>().getMemorySpace();
121
0
  }
122
123
  /// Returns the operand index of the dst memref.
124
0
  unsigned getDstMemRefOperandIndex() {
125
0
    return getSrcMemRefOperandIndex() + 1 + getSrcMap().getNumInputs();
126
0
  }
127
128
  /// Returns the destination MemRefType for this DMA operations.
129
0
  Value getDstMemRef() { return getOperand(getDstMemRefOperandIndex()); }
130
0
  MemRefType getDstMemRefType() {
131
0
    return getDstMemRef().getType().cast<MemRefType>();
132
0
  }
133
134
  /// Returns the rank (number of indices) of the destination MemRefType.
135
0
  unsigned getDstMemRefRank() {
136
0
    return getDstMemRef().getType().cast<MemRefType>().getRank();
137
0
  }
138
139
  /// Returns the memory space of the src memref.
140
0
  unsigned getDstMemorySpace() {
141
0
    return getDstMemRef().getType().cast<MemRefType>().getMemorySpace();
142
0
  }
143
144
  /// Returns the affine map used to access the dst memref.
145
0
  AffineMap getDstMap() { return getDstMapAttr().getValue(); }
146
0
  AffineMapAttr getDstMapAttr() {
147
0
    return getAttr(getDstMapAttrName()).cast<AffineMapAttr>();
148
0
  }
149
150
  /// Returns the destination memref indices for this DMA operation.
151
0
  operand_range getDstIndices() {
152
0
    return {operand_begin() + getDstMemRefOperandIndex() + 1,
153
0
            operand_begin() + getDstMemRefOperandIndex() + 1 +
154
0
                getDstMap().getNumInputs()};
155
0
  }
156
157
  /// Returns the operand index of the tag memref.
158
0
  unsigned getTagMemRefOperandIndex() {
159
0
    return getDstMemRefOperandIndex() + 1 + getDstMap().getNumInputs();
160
0
  }
161
162
  /// Returns the Tag MemRef for this DMA operation.
163
0
  Value getTagMemRef() { return getOperand(getTagMemRefOperandIndex()); }
164
0
  MemRefType getTagMemRefType() {
165
0
    return getTagMemRef().getType().cast<MemRefType>();
166
0
  }
167
168
  /// Returns the rank (number of indices) of the tag MemRefType.
169
0
  unsigned getTagMemRefRank() {
170
0
    return getTagMemRef().getType().cast<MemRefType>().getRank();
171
0
  }
172
173
  /// Returns the affine map used to access the tag memref.
174
0
  AffineMap getTagMap() { return getTagMapAttr().getValue(); }
175
0
  AffineMapAttr getTagMapAttr() {
176
0
    return getAttr(getTagMapAttrName()).cast<AffineMapAttr>();
177
0
  }
178
179
  /// Returns the tag memref indices for this DMA operation.
180
0
  operand_range getTagIndices() {
181
0
    return {operand_begin() + getTagMemRefOperandIndex() + 1,
182
0
            operand_begin() + getTagMemRefOperandIndex() + 1 +
183
0
                getTagMap().getNumInputs()};
184
0
  }
185
186
  /// Returns the number of elements being transferred by this DMA operation.
187
0
  Value getNumElements() {
188
0
    return getOperand(getTagMemRefOperandIndex() + 1 +
189
0
                      getTagMap().getNumInputs());
190
0
  }
191
192
  /// Returns the AffineMapAttr associated with 'memref'.
193
0
  NamedAttribute getAffineMapAttrForMemRef(Value memref) {
194
0
    if (memref == getSrcMemRef())
195
0
      return {Identifier::get(getSrcMapAttrName(), getContext()),
196
0
              getSrcMapAttr()};
197
0
    else if (memref == getDstMemRef())
198
0
      return {Identifier::get(getDstMapAttrName(), getContext()),
199
0
              getDstMapAttr()};
200
0
    assert(memref == getTagMemRef() &&
201
0
           "DmaStartOp expected source, destination or tag memref");
202
0
    return {Identifier::get(getTagMapAttrName(), getContext()),
203
0
            getTagMapAttr()};
204
0
  }
205
206
  /// Returns true if this is a DMA from a faster memory space to a slower one.
207
0
  bool isDestMemorySpaceFaster() {
208
0
    return (getSrcMemorySpace() < getDstMemorySpace());
209
0
  }
210
211
  /// Returns true if this is a DMA from a slower memory space to a faster one.
212
0
  bool isSrcMemorySpaceFaster() {
213
0
    // Assumes that a lower number is for a slower memory space.
214
0
    return (getDstMemorySpace() < getSrcMemorySpace());
215
0
  }
216
217
  /// Given a DMA start operation, returns the operand position of either the
218
  /// source or destination memref depending on the one that is at the higher
219
  /// level of the memory hierarchy. Asserts failure if neither is true.
220
0
  unsigned getFasterMemPos() {
221
0
    assert(isSrcMemorySpaceFaster() || isDestMemorySpaceFaster());
222
0
    return isSrcMemorySpaceFaster() ? 0 : getDstMemRefOperandIndex();
223
0
  }
224
225
0
  static StringRef getSrcMapAttrName() { return "src_map"; }
226
0
  static StringRef getDstMapAttrName() { return "dst_map"; }
227
0
  static StringRef getTagMapAttrName() { return "tag_map"; }
228
229
0
  static StringRef getOperationName() { return "affine.dma_start"; }
230
  static ParseResult parse(OpAsmParser &parser, OperationState &result);
231
  void print(OpAsmPrinter &p);
232
  LogicalResult verify();
233
  LogicalResult fold(ArrayRef<Attribute> cstOperands,
234
                     SmallVectorImpl<OpFoldResult> &results);
235
236
  /// Returns true if this DMA operation is strided, returns false otherwise.
237
0
  bool isStrided() {
238
0
    return getNumOperands() !=
239
0
           getTagMemRefOperandIndex() + 1 + getTagMap().getNumInputs() + 1;
240
0
  }
241
242
  /// Returns the stride value for this DMA operation.
243
0
  Value getStride() {
244
0
    if (!isStrided())
245
0
      return nullptr;
246
0
    return getOperand(getNumOperands() - 1 - 1);
247
0
  }
248
249
  /// Returns the number of elements to transfer per stride for this DMA op.
250
0
  Value getNumElementsPerStride() {
251
0
    if (!isStrided())
252
0
      return nullptr;
253
0
    return getOperand(getNumOperands() - 1);
254
0
  }
255
};
256
257
/// AffineDmaWaitOp blocks until the completion of a DMA operation associated
258
/// with the tag element '%tag[%index]'. %tag is a memref, and %index has to be
259
/// an index with the same restrictions as any load/store index. In particular,
260
/// index for each memref dimension must be an affine expression of loop
261
/// induction variables and symbols. %num_elements is the number of elements
262
/// associated with the DMA operation. For example:
263
//
264
//   affine.dma_start %src[%i, %j], %dst[%k, %l], %tag[%index], %num_elements :
265
//     memref<2048xf32, 0>, memref<256xf32, 1>, memref<1xi32, 2>
266
//   ...
267
//   ...
268
//   affine.dma_wait %tag[%index], %num_elements : memref<1xi32, 2>
269
//
270
class AffineDmaWaitOp : public Op<AffineDmaWaitOp, OpTrait::VariadicOperands,
271
                                  OpTrait::ZeroResult> {
272
public:
273
  using Op::Op;
274
275
  static void build(OpBuilder &builder, OperationState &result, Value tagMemRef,
276
                    AffineMap tagMap, ValueRange tagIndices, Value numElements);
277
278
0
  static StringRef getOperationName() { return "affine.dma_wait"; }
279
280
  // Returns the Tag MemRef associated with the DMA operation being waited on.
281
0
  Value getTagMemRef() { return getOperand(0); }
282
0
  MemRefType getTagMemRefType() {
283
0
    return getTagMemRef().getType().cast<MemRefType>();
284
0
  }
285
286
  /// Returns the affine map used to access the tag memref.
287
0
  AffineMap getTagMap() { return getTagMapAttr().getValue(); }
288
0
  AffineMapAttr getTagMapAttr() {
289
0
    return getAttr(getTagMapAttrName()).cast<AffineMapAttr>();
290
0
  }
291
292
  // Returns the tag memref index for this DMA operation.
293
0
  operand_range getTagIndices() {
294
0
    return {operand_begin() + 1,
295
0
            operand_begin() + 1 + getTagMap().getNumInputs()};
296
0
  }
297
298
  // Returns the rank (number of indices) of the tag memref.
299
0
  unsigned getTagMemRefRank() {
300
0
    return getTagMemRef().getType().cast<MemRefType>().getRank();
301
0
  }
302
303
  /// Returns the AffineMapAttr associated with 'memref'.
304
0
  NamedAttribute getAffineMapAttrForMemRef(Value memref) {
305
0
    assert(memref == getTagMemRef());
306
0
    return {Identifier::get(getTagMapAttrName(), getContext()),
307
0
            getTagMapAttr()};
308
0
  }
309
310
  /// Returns the number of elements transferred in the associated DMA op.
311
0
  Value getNumElements() { return getOperand(1 + getTagMap().getNumInputs()); }
312
313
0
  static StringRef getTagMapAttrName() { return "tag_map"; }
314
  static ParseResult parse(OpAsmParser &parser, OperationState &result);
315
  void print(OpAsmPrinter &p);
316
  LogicalResult verify();
317
  LogicalResult fold(ArrayRef<Attribute> cstOperands,
318
                     SmallVectorImpl<OpFoldResult> &results);
319
};
320
321
/// Returns true if the given Value can be used as a dimension id in the region
322
/// of the closest surrounding op that has the trait `AffineScope`.
323
bool isValidDim(Value value);
324
325
/// Returns true if the given Value can be used as a dimension id in `region`,
326
/// i.e., for all its uses in `region`.
327
bool isValidDim(Value value, Region *region);
328
329
/// Returns true if the given value can be used as a symbol in the region of the
330
/// closest surrounding op that has the trait `AffineScope`.
331
bool isValidSymbol(Value value);
332
333
/// Returns true if the given Value can be used as a symbol for `region`, i.e.,
334
/// for all its uses in `region`.
335
bool isValidSymbol(Value value, Region *region);
336
337
/// Modifies both `map` and `operands` in-place so as to:
338
/// 1. drop duplicate operands
339
/// 2. drop unused dims and symbols from map
340
/// 3. promote valid symbols to symbolic operands in case they appeared as
341
///    dimensional operands
342
/// 4. propagate constant operands and drop them
343
void canonicalizeMapAndOperands(AffineMap *map,
344
                                SmallVectorImpl<Value> *operands);
345
346
/// Canonicalizes an integer set the same way canonicalizeMapAndOperands does
347
/// for affine maps.
348
void canonicalizeSetAndOperands(IntegerSet *set,
349
                                SmallVectorImpl<Value> *operands);
350
351
/// Returns a composed AffineApplyOp by composing `map` and `operands` with
352
/// other AffineApplyOps supplying those operands. The operands of the resulting
353
/// AffineApplyOp do not change the length of  AffineApplyOp chains.
354
AffineApplyOp makeComposedAffineApply(OpBuilder &b, Location loc, AffineMap map,
355
                                      ArrayRef<Value> operands);
356
357
/// Given an affine map `map` and its input `operands`, this method composes
358
/// into `map`, maps of AffineApplyOps whose results are the values in
359
/// `operands`, iteratively until no more of `operands` are the result of an
360
/// AffineApplyOp. When this function returns, `map` becomes the composed affine
361
/// map, and each Value in `operands` is guaranteed to be either a loop IV or a
362
/// terminal symbol, i.e., a symbol defined at the top level or a block/function
363
/// argument.
364
void fullyComposeAffineMapAndOperands(AffineMap *map,
365
                                      SmallVectorImpl<Value> *operands);
366
367
#include "mlir/Dialect/Affine/IR/AffineOpsDialect.h.inc"
368
369
#define GET_OP_CLASSES
370
#include "mlir/Dialect/Affine/IR/AffineOps.h.inc"
371
372
/// Returns if the provided value is the induction variable of a AffineForOp.
373
bool isForInductionVar(Value val);
374
375
/// Returns the loop parent of an induction variable. If the provided value is
376
/// not an induction variable, then return nullptr.
377
AffineForOp getForInductionVarOwner(Value val);
378
379
/// Extracts the induction variables from a list of AffineForOps and places them
380
/// in the output argument `ivs`.
381
void extractForInductionVars(ArrayRef<AffineForOp> forInsts,
382
                             SmallVectorImpl<Value> *ivs);
383
384
/// AffineBound represents a lower or upper bound in the for operation.
385
/// This class does not own the underlying operands. Instead, it refers
386
/// to the operands stored in the AffineForOp. Its life span should not exceed
387
/// that of the for operation it refers to.
388
class AffineBound {
389
public:
390
0
  AffineForOp getAffineForOp() { return op; }
391
0
  AffineMap getMap() { return map; }
392
393
0
  unsigned getNumOperands() { return opEnd - opStart; }
394
0
  Value getOperand(unsigned idx) { return op.getOperand(opStart + idx); }
395
396
  using operand_iterator = AffineForOp::operand_iterator;
397
  using operand_range = AffineForOp::operand_range;
398
399
0
  operand_iterator operand_begin() { return op.operand_begin() + opStart; }
400
0
  operand_iterator operand_end() { return op.operand_begin() + opEnd; }
401
0
  operand_range getOperands() { return {operand_begin(), operand_end()}; }
402
403
private:
404
  // 'affine.for' operation that contains this bound.
405
  AffineForOp op;
406
  // Start and end positions of this affine bound operands in the list of
407
  // the containing 'affine.for' operation operands.
408
  unsigned opStart, opEnd;
409
  // Affine map for this bound.
410
  AffineMap map;
411
412
  AffineBound(AffineForOp op, unsigned opStart, unsigned opEnd, AffineMap map)
413
0
      : op(op), opStart(opStart), opEnd(opEnd), map(map) {}
414
415
  friend class AffineForOp;
416
};
417
418
/// An `AffineApplyNormalizer` is a helper class that supports renumbering
419
/// operands of AffineApplyOp. This acts as a reindexing map of Value to
420
/// positional dims or symbols and allows simplifications such as:
421
///
422
/// ```mlir
423
///    %1 = affine.apply (d0, d1) -> (d0 - d1) (%0, %0)
424
/// ```
425
///
426
/// into:
427
///
428
/// ```mlir
429
///    %1 = affine.apply () -> (0)
430
/// ```
431
struct AffineApplyNormalizer {
432
  AffineApplyNormalizer(AffineMap map, ArrayRef<Value> operands);
433
434
  /// Returns the AffineMap resulting from normalization.
435
0
  AffineMap getAffineMap() { return affineMap; }
436
437
0
  SmallVector<Value, 8> getOperands() {
438
0
    SmallVector<Value, 8> res(reorderedDims);
439
0
    res.append(concatenatedSymbols.begin(), concatenatedSymbols.end());
440
0
    return res;
441
0
  }
442
443
0
  unsigned getNumSymbols() { return concatenatedSymbols.size(); }
444
0
  unsigned getNumDims() { return reorderedDims.size(); }
445
446
  /// Normalizes 'otherMap' and its operands 'otherOperands' to map to this
447
  /// normalizer's coordinate space.
448
  void normalize(AffineMap *otherMap, SmallVectorImpl<Value> *otherOperands);
449
450
private:
451
  /// Helper function to insert `v` into the coordinate system of the current
452
  /// AffineApplyNormalizer. Returns the AffineDimExpr with the corresponding
453
  /// renumbered position.
454
  AffineDimExpr renumberOneDim(Value v);
455
456
  /// Given an `other` normalizer, this rewrites `other.affineMap` in the
457
  /// coordinate system of the current AffineApplyNormalizer.
458
  /// Returns the rewritten AffineMap and updates the dims and symbols of
459
  /// `this`.
460
  AffineMap renumber(const AffineApplyNormalizer &other);
461
462
  /// Maps of Value to position in `affineMap`.
463
  DenseMap<Value, unsigned> dimValueToPosition;
464
465
  /// Ordered dims and symbols matching positional dims and symbols in
466
  /// `affineMap`.
467
  SmallVector<Value, 8> reorderedDims;
468
  SmallVector<Value, 8> concatenatedSymbols;
469
470
  /// The number of symbols in concatenated symbols that belong to the original
471
  /// map as opposed to those concatendated during map composition.
472
  unsigned numProperSymbols;
473
474
  AffineMap affineMap;
475
476
  /// Used with RAII to control the depth at which AffineApply are composed
477
  /// recursively. Only accepts depth 1 for now to allow a behavior where a
478
  /// newly composed AffineApplyOp does not increase the length of the chain of
479
  /// AffineApplyOps. Full composition is implemented iteratively on top of
480
  /// this behavior.
481
0
  static unsigned &affineApplyDepth() {
482
0
    static thread_local unsigned depth = 0;
483
0
    return depth;
484
0
  }
485
  static constexpr unsigned kMaxAffineApplyDepth = 1;
486
487
0
  AffineApplyNormalizer() : numProperSymbols(0) { affineApplyDepth()++; }
488
489
public:
490
0
  ~AffineApplyNormalizer() { affineApplyDepth()--; }
491
};
492
493
} // end namespace mlir
494
495
#endif