@@ -1043,8 +1043,7 @@ void reduCGFuncForRangeFastAtomics(handler &CGH, KernelType KernelFunc,
1043
1043
});
1044
1044
}
1045
1045
1046
- template <class KernelName >
1047
- struct MainRangeFastReduce ;
1046
+ template <class KernelName > struct MainRangeFastReduce ;
1048
1047
template <typename KernelName, typename KernelType, int Dims, class Reduction >
1049
1048
void reduCGFuncForRangeFastReduce (handler &CGH, KernelType KernelFunc,
1050
1049
const range<Dims> &Range,
@@ -1123,8 +1122,7 @@ void reduCGFuncForRangeFastReduce(handler &CGH, KernelType KernelFunc,
1123
1122
});
1124
1123
}
1125
1124
1126
- template <class KernelName >
1127
- struct MainRangeBasic ;
1125
+ template <class KernelName > struct MainRangeBasic ;
1128
1126
template <typename KernelName, typename KernelType, int Dims, class Reduction >
1129
1127
void reduCGFuncForRangeBasic (handler &CGH, KernelType KernelFunc,
1130
1128
const range<Dims> &Range,
@@ -1259,8 +1257,7 @@ void reduCGFuncForRange(handler &CGH, KernelType KernelFunc,
1259
1257
}
1260
1258
}
1261
1259
1262
- template <class KernelName >
1263
- struct MainNDRangeBothFastReduceAndAtomics ;
1260
+ template <class KernelName > struct MainNDRangeBothFastReduceAndAtomics ;
1264
1261
// / Implements a command group function that enqueues a kernel that calls
1265
1262
// / user's lambda function KernelFunc and also does one iteration of reduction
1266
1263
// / of elements computed in user's lambda function.
@@ -1292,8 +1289,7 @@ void reduCGFuncForNDRangeBothFastReduceAndAtomics(
1292
1289
});
1293
1290
}
1294
1291
1295
- template <class KernelName >
1296
- struct MainNDRangeFastAtomicsOnly ;
1292
+ template <class KernelName > struct MainNDRangeFastAtomicsOnly ;
1297
1293
// / Implements a command group function that enqueues a kernel that calls
1298
1294
// / user's lambda function KernelFunc and also does one iteration of reduction
1299
1295
// / of elements computed in user's lambda function.
@@ -1368,8 +1364,7 @@ void reduCGFuncForNDRangeFastAtomicsOnly(
1368
1364
});
1369
1365
}
1370
1366
1371
- template <class KernelName >
1372
- struct MainNDRangeBothFastReduceOnly ;
1367
+ template <class KernelName > struct MainNDRangeBothFastReduceOnly ;
1373
1368
// / Implements a command group function that enqueues a kernel that
1374
1369
// / calls user's lambda function and does one iteration of reduction
1375
1370
// / of elements in each of work-groups.
@@ -1410,8 +1405,7 @@ void reduCGFuncForNDRangeFastReduceOnly(
1410
1405
});
1411
1406
}
1412
1407
1413
- template <class KernelName >
1414
- struct MainNDRangeBasic ;
1408
+ template <class KernelName > struct MainNDRangeBasic ;
1415
1409
// / Implements a command group function that enqueues a kernel that calls
1416
1410
// / user's lambda function \param KernelFunc and does one iteration of
1417
1411
// / reduction of elements in each of work-groups.
@@ -1490,8 +1484,7 @@ void reduCGFuncForNDRangeBasic(handler &CGH, bool IsPow2WG,
1490
1484
});
1491
1485
}
1492
1486
1493
- template <class KernelName >
1494
- struct AuxFastReduce ;
1487
+ template <class KernelName > struct AuxFastReduce ;
1495
1488
// / Implements a command group function that enqueues a kernel that does one
1496
1489
// / iteration of reduction of elements in each of work-groups.
1497
1490
// / This version uses ext::oneapi::reduce() algorithm to reduce elements in each
@@ -1532,8 +1525,7 @@ void reduAuxCGFuncFastReduceImpl(handler &CGH, bool UniformWG,
1532
1525
});
1533
1526
}
1534
1527
1535
- template <class KernelName >
1536
- struct AuxNoFastReduceNorAtomic ;
1528
+ template <class KernelName > struct AuxNoFastReduceNorAtomic ;
1537
1529
// / Implements a command group function that enqueues a kernel that does one
1538
1530
// / iteration of reduction of elements in each of work-groups.
1539
1531
// / This version uses tree-reduction algorithm to reduce elements in each
@@ -2050,8 +2042,7 @@ void reduCGFuncImplArray(
2050
2042
...);
2051
2043
}
2052
2044
2053
- template <class KernelName , class Accessor >
2054
- struct MainNDRangeMulti ;
2045
+ template <class KernelName , class Accessor > struct MainNDRangeMulti ;
2055
2046
template <typename KernelName, typename KernelType, int Dims,
2056
2047
typename ... Reductions, size_t ... Is>
2057
2048
void reduCGFuncMulti (handler &CGH, KernelType KernelFunc,
@@ -2126,8 +2117,7 @@ void reduCGFuncMulti(handler &CGH, KernelType KernelFunc,
2126
2117
Rest (createReduOutAccs<false >(NWorkGroups, CGH, ReduTuple, ReduIndices));
2127
2118
}
2128
2119
2129
- template <class KernelName >
2130
- struct MainNDRangeAtomic64 ;
2120
+ template <class KernelName > struct MainNDRangeAtomic64 ;
2131
2121
// Specialization for devices with the atomic64 aspect, which guarantees 64 (and
2132
2122
// temporarily 32) bit floating point support for atomic add.
2133
2123
// TODO 32 bit floating point atomics are eventually expected to be supported by
@@ -2384,8 +2374,7 @@ void reduCGFunc(handler &CGH, KernelType KernelFunc,
2384
2374
}
2385
2375
}
2386
2376
2387
- template <class KernelName , class Accessor >
2388
- struct AuxMulti ;
2377
+ template <class KernelName , class Accessor > struct AuxMulti ;
2389
2378
template <typename KernelName, typename KernelType, typename ... Reductions,
2390
2379
size_t ... Is>
2391
2380
size_t reduAuxCGFunc (handler &CGH, size_t NWorkItems, size_t MaxWGSize,
0 commit comments