Skip to content

Commit cb1c033

Browse files
committed
skip old cccl.
1 parent 01089a4 commit cb1c033

File tree

3 files changed

+22
-2
lines changed

3 files changed

+22
-2
lines changed

python-package/xgboost/testing/multi_target.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414

1515
from .._typing import ArrayLike
1616
from ..compat import import_cupy
17-
from ..core import Booster, DMatrix, ExtMemQuantileDMatrix, QuantileDMatrix
17+
from ..core import Booster, DMatrix, ExtMemQuantileDMatrix, QuantileDMatrix, build_info
1818
from ..objective import Objective, TreeObjective
1919
from ..sklearn import XGBClassifier
2020
from ..training import train
@@ -218,6 +218,10 @@ def run_with_iter(device: Device) -> None: # pylint: disable=too-many-locals
218218
X, _, _ = it.as_arrays()
219219
assert_allclose(device, booster_0.inplace_predict(X), booster_1.inplace_predict(X))
220220

221+
v = build_info()["THRUST_VERSION"]
222+
if v[0] < 3:
223+
pytest.xfail("CCCL version too old.")
224+
221225
it = IteratorForTest(
222226
Xs,
223227
ys,

src/tree/gpu_hist/leaf_sum.cu

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
*/
44
#include <thrust/iterator/tabulate_output_iterator.h> // for make_tabulate_output_iterator
55
#include <thrust/scan.h> // for inclusive_scan
6+
#include <thrust/version.h> // for THRUST_MAJOR_VERSION
67

78
#include <cstddef> // for size_t
89
#include <cstdint> // for int32_t
@@ -18,6 +19,12 @@
1819
#include "xgboost/linalg.h" // for MatrixView
1920
#include "xgboost/span.h" // for Span
2021

22+
#if THRUST_MAJOR_VERSION >= 3
23+
// do nothing
24+
#else
25+
#include "../../common/linalg_op.cuh" // for tbegin
26+
#endif
27+
2128
namespace xgboost::tree::cuda_impl {
2229
void LeafGradSum(Context const* ctx, std::vector<LeafInfo> const& h_leaves,
2330
common::Span<GradientQuantiser const> roundings,
@@ -55,8 +62,12 @@ void LeafGradSum(Context const* ctx, std::vector<LeafInfo> const& h_leaves,
5562
return roundings[t].ToFixedPoint(g);
5663
});
5764
// Use an output iterator to implement running sum.
65+
#if THRUST_MAJOR_VERSION >= 3
5866
auto out_it = thrust::make_tabulate_output_iterator(
5967
[=] XGBOOST_DEVICE(std::int32_t idx, GradientPairInt64 v) mutable { out_t(idx) += v; });
68+
#else
69+
auto out_it = linalg::tbegin(out_t);
70+
#endif
6071

6172
std::size_t n_bytes = 0;
6273
dh::safe_cuda(cub::DeviceSegmentedReduce::Sum(nullptr, n_bytes, it, out_it, h_leaves.size(),

src/tree/updater_gpu_hist.cuh

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -200,7 +200,12 @@ class MultiTargetHistMaker {
200200
auto d_roundings = this->value_quantizer_->Quantizers();
201201
// Node indices for all leaves
202202
std::vector<bst_node_t> leaves_idx(n_leaves);
203-
203+
#if THRUST_MAJOR_VERSION >= 3
204+
// do nothing
205+
#else
206+
CHECK_EQ(this->partitioners_.Size(), 1)
207+
<< "External memory not implemented for old CCCL versions. (thrust < 3.0)";
208+
#endif
204209
std::int32_t batch_idx = 0;
205210
for (auto const& p_part : this->partitioners_) {
206211
auto leaves = p_part->GetLeaves();

0 commit comments

Comments
 (0)