Skip to content

Commit 91d2ecf

Browse files
authored
[NFC] Fix some typos in libc and mlir comments (#133374)
1 parent 45b9e24 commit 91d2ecf

File tree

10 files changed

+20
-20
lines changed

10 files changed

+20
-20
lines changed

libc/src/__support/CPP/atomic.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,7 @@ template <typename T> struct Atomic {
9797

9898
LIBC_INLINE constexpr Atomic() = default;
9999

100-
// Intializes the value without using atomic operations.
100+
// Initializes the value without using atomic operations.
101101
LIBC_INLINE constexpr Atomic(value_type v) : val(v) {}
102102

103103
LIBC_INLINE Atomic(const Atomic &) = delete;

mlir/include/mlir/Analysis/Presburger/Simplex.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -344,7 +344,7 @@ class SimplexBase {
344344
SmallVector<UndoLogEntry, 8> undoLog;
345345

346346
/// Holds a vector of bases. The ith saved basis is the basis that should be
347-
/// restored when processing the ith occurrance of UndoLogEntry::RestoreBasis
347+
/// restored when processing the ith occurrence of UndoLogEntry::RestoreBasis
348348
/// in undoLog. This is used by getSnapshotBasis.
349349
SmallVector<SmallVector<int, 8>, 8> savedBases;
350350

@@ -367,7 +367,7 @@ class SimplexBase {
367367
///
368368
/// This does not directly support negative-valued variables, so it uses the big
369369
/// M parameter trick to make all the variables non-negative. Basically we
370-
/// introduce an artifical variable M that is considered to have a value of
370+
/// introduce an artificial variable M that is considered to have a value of
371371
/// +infinity and instead of the variables x, y, z, we internally use variables
372372
/// M + x, M + y, M + z, which are now guaranteed to be non-negative. See the
373373
/// documentation for SimplexBase for more details. M is also considered to be
@@ -561,7 +561,7 @@ struct SymbolicLexOpt {
561561
/// negative for all values in the symbol domain, the row needs to be pivoted
562562
/// irrespective of the precise value of the symbols. To answer queries like
563563
/// "Is this symbolic sample always negative in the symbol domain?", we maintain
564-
/// a `LexSimplex domainSimplex` correponding to the symbol domain.
564+
/// a `LexSimplex domainSimplex` corresponding to the symbol domain.
565565
///
566566
/// In other cases, it may be that the symbolic sample is violated at some
567567
/// values in the symbol domain and not violated at others. In this case,

mlir/include/mlir/AsmParser/AsmParser.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ parseAsmSourceFile(const llvm::SourceMgr &sourceMgr, Block *block,
4747
/// not, an error diagnostic is emitted to the context and a null value is
4848
/// returned.
4949
/// If `numRead` is provided, it is set to the number of consumed characters on
50-
/// succesful parse. Otherwise, parsing fails if the entire string is not
50+
/// successful parse. Otherwise, parsing fails if the entire string is not
5151
/// consumed.
5252
/// Some internal copying can be skipped if the source string is known to be
5353
/// null terminated.
@@ -58,7 +58,7 @@ Attribute parseAttribute(llvm::StringRef attrStr, MLIRContext *context,
5858
/// This parses a single MLIR type to an MLIR context if it was valid. If not,
5959
/// an error diagnostic is emitted to the context.
6060
/// If `numRead` is provided, it is set to the number of consumed characters on
61-
/// succesful parse. Otherwise, parsing fails if the entire string is not
61+
/// successful parse. Otherwise, parsing fails if the entire string is not
6262
/// consumed.
6363
/// Some internal copying can be skipped if the source string is known to be
6464
/// null terminated.

mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_loose.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ module {
4141

4242
//
4343
// Note: position for loose_compressed level can vary in the end,
44-
// therefore we loosly check it with {{.*}}.
44+
// therefore we loosely check it with {{.*}}.
4545
//
4646
// CHECK: ---- Sparse Tensor ----
4747
// CHECK-NEXT: nse = 17

mlir/test/Integration/GPU/CUDA/TensorCore/sm80/transform-mma-sync-matmul-f16-f16-accum.mlir

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -96,21 +96,21 @@ func.func @main() {
9696
%f0 = arith.constant 0.0e+00 : f16
9797
%c32 = arith.constant 32 : index
9898

99-
// Intialize the lhs matrix with a linspace function.
99+
// Initialize the lhs matrix with a linspace function.
100100
scf.for %r = %c0 to %M step %c1 {
101101
scf.for %c = %c0 to %K step %c1 {
102102
%idx = func.call @compute_linspace_val(%r, %c, %K) : (index, index, index) -> f16
103103
memref.store %idx, %lhs[%r, %c] : !lhs_memref_type
104104
}
105105
}
106-
// Intialize the rhs matrix with a linspace function.
106+
// Initialize the rhs matrix with a linspace function.
107107
scf.for %r = %c0 to %K step %c1 {
108108
scf.for %c = %c0 to %N step %c1 {
109109
%idx = func.call @compute_linspace_val(%r, %c, %N) : (index, index, index) -> f16
110110
memref.store %idx, %rhs[%r, %c] : !rhs_memref_type
111111
}
112112
}
113-
// Intialize the rhs matrix with a linspace function.
113+
// Initialize the rhs matrix with a linspace function.
114114
scf.for %r = %c0 to %M step %c1 {
115115
scf.for %c = %c0 to %N step %c1 {
116116
%idx = func.call @compute_linspace_val(%r, %c, %N) : (index, index, index) -> f16

mlir/test/Integration/GPU/CUDA/TensorCore/sm80/transform-mma-sync-matmul-f32.mlir

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -47,21 +47,21 @@ func.func @main() {
4747
%f0 = arith.constant 0.0e+00 : f32
4848
%c32 = arith.constant 32 : index
4949

50-
// Intialize the lhs matrix with a linspace function.
50+
// Initialize the lhs matrix with a linspace function.
5151
scf.for %r = %c0 to %M step %c1 {
5252
scf.for %c = %c0 to %K step %c1 {
5353
%idx = func.call @compute_linspace_val(%r, %c, %K) : (index, index, index) -> f32
5454
memref.store %idx, %lhs[%r, %c] : !lhs_memref_type
5555
}
5656
}
57-
// Intialize the rhs matrix with a linspace function.
57+
// Initialize the rhs matrix with a linspace function.
5858
scf.for %r = %c0 to %K step %c1 {
5959
scf.for %c = %c0 to %N step %c1 {
6060
%idx = func.call @compute_linspace_val(%r, %c, %N) : (index, index, index) -> f32
6161
memref.store %idx, %rhs[%r, %c] : !rhs_memref_type
6262
}
6363
}
64-
// Intialize the rhs matrix with a linspace function.
64+
// Initialize the rhs matrix with a linspace function.
6565
scf.for %r = %c0 to %M step %c1 {
6666
scf.for %c = %c0 to %N step %c1 {
6767
%idx = func.call @compute_linspace_val(%r, %c, %N) : (index, index, index) -> f32

mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f16.mlir

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,15 +20,15 @@ func.func @main() {
2020
%c32 = arith.constant 32 : index
2121
%c1 = arith.constant 1 : index
2222

23-
// Intialize the Input matrix with the column index in each row.
23+
// Initialize the Input matrix with the column index in each row.
2424
scf.for %arg0 = %c0 to %c16 step %c1 {
2525
scf.for %arg1 = %c0 to %c16 step %c1 {
2626
%2 = arith.index_cast %arg1 : index to i16
2727
%3 = arith.sitofp %2 : i16 to f16
2828
memref.store %3, %0[%arg0, %arg1] : memref<16x16xf16>
2929
}
3030
}
31-
// Intialize the accumulator matrix with zeros.
31+
// Initialize the accumulator matrix with zeros.
3232
scf.for %arg0 = %c0 to %c16 step %c1 {
3333
scf.for %arg1 = %c0 to %c16 step %c1 {
3434
memref.store %f0, %22[%arg0, %arg1] : memref<16x16xf16>

mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f32-bare-ptr.mlir

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,13 +20,13 @@ func.func @main() {
2020
%c32 = arith.constant 32 : index
2121
%c1 = arith.constant 1 : index
2222

23-
// Intialize the Input matrix with ones.
23+
// Initialize the Input matrix with ones.
2424
scf.for %arg0 = %c0 to %c16 step %c1 {
2525
scf.for %arg1 = %c0 to %c16 step %c1 {
2626
memref.store %f1, %h0[%arg0, %arg1] : memref<16x16xf16>
2727
}
2828
}
29-
// Intialize the accumulator matrix with zeros.
29+
// Initialize the accumulator matrix with zeros.
3030
scf.for %arg0 = %c0 to %c16 step %c1 {
3131
scf.for %arg1 = %c0 to %c16 step %c1 {
3232
memref.store %f0, %h_out[%arg0, %arg1] : memref<16x16xf32>

mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f32.mlir

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,13 +18,13 @@ func.func @main() {
1818
%c32 = arith.constant 32 : index
1919
%c1 = arith.constant 1 : index
2020

21-
// Intialize the Input matrix with ones.
21+
// Initialize the Input matrix with ones.
2222
scf.for %arg0 = %c0 to %c16 step %c1 {
2323
scf.for %arg1 = %c0 to %c16 step %c1 {
2424
memref.store %f1, %0[%arg0, %arg1] : memref<16x16xf16>
2525
}
2626
}
27-
// Intialize the accumulator matrix with zeros.
27+
// Initialize the accumulator matrix with zeros.
2828
scf.for %arg0 = %c0 to %c16 step %c1 {
2929
scf.for %arg1 = %c0 to %c16 step %c1 {
3030
memref.store %f0, %22[%arg0, %arg1] : memref<16x16xf32>

mlir/test/mlir-tblgen/op-properties-predicates.td

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ def OpWithPredicates : NS_Op<"op_with_predicates"> {
3636
}
3737

3838
// CHECK-LABEL: OpWithPredicates::verifyInvariantsImpl()
39-
// Note: for test readibility, we capture [[maybe_unused]] into the variable maybe_unused
39+
// Note: for test readability, we capture [[maybe_unused]] into the variable maybe_unused
4040
// CHECK: [[maybe_unused:\[\[maybe_unused\]\]]] int64_t tblgen_scalar = this->getScalar();
4141
// CHECK: if (!((tblgen_scalar >= 0)))
4242
// CHECK-NEXT: return emitOpError("property 'scalar' failed to satisfy constraint: non-negative int64_t");

0 commit comments

Comments
 (0)