Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion ci/run_ctests.sh
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#!/bin/bash
# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-FileCopyrightText: Copyright (c) 2025-2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0

set -euo pipefail
Expand All @@ -26,3 +26,7 @@ for gt in "${GTEST_DIR}"/*_TEST; do
echo "Running gtest ${test_name}"
"${gt}" "$@"
done

# Run C_API_TEST with CPU memory for local solves (excluding time limit tests)
echo "Running gtest C_API_TEST with CUOPT_USE_CPU_MEM_FOR_LOCAL"
CUOPT_USE_CPU_MEM_FOR_LOCAL=1 "${GTEST_DIR}/C_API_TEST" --gtest_filter=-c_api/TimeLimitTestFixture.* "$@"
Comment on lines +30 to +32
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Guard the extra C_API_TEST run when the binary is absent.

The new unconditional invocation will fail in configurations that skip building C_API_TEST. Add an existence check to keep CI/dev flows resilient.

🛡️ Proposed guard
-echo "Running gtest C_API_TEST with CUOPT_USE_CPU_MEM_FOR_LOCAL"
-CUOPT_USE_CPU_MEM_FOR_LOCAL=1 "${GTEST_DIR}/C_API_TEST" --gtest_filter=-c_api/TimeLimitTestFixture.* "$@"
+if [[ -x "${GTEST_DIR}/C_API_TEST" ]]; then
+  echo "Running gtest C_API_TEST with CUOPT_USE_CPU_MEM_FOR_LOCAL"
+  CUOPT_USE_CPU_MEM_FOR_LOCAL=1 "${GTEST_DIR}/C_API_TEST" --gtest_filter=-c_api/TimeLimitTestFixture.* "$@"
+else
+  echo "Skipping C_API_TEST (binary not found)"
+fi
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
# Run C_API_TEST with CPU memory for local solves (excluding time limit tests)
echo "Running gtest C_API_TEST with CUOPT_USE_CPU_MEM_FOR_LOCAL"
CUOPT_USE_CPU_MEM_FOR_LOCAL=1 "${GTEST_DIR}/C_API_TEST" --gtest_filter=-c_api/TimeLimitTestFixture.* "$@"
# Run C_API_TEST with CPU memory for local solves (excluding time limit tests)
if [[ -x "${GTEST_DIR}/C_API_TEST" ]]; then
echo "Running gtest C_API_TEST with CUOPT_USE_CPU_MEM_FOR_LOCAL"
CUOPT_USE_CPU_MEM_FOR_LOCAL=1 "${GTEST_DIR}/C_API_TEST" --gtest_filter=-c_api/TimeLimitTestFixture.* "$@"
else
echo "Skipping C_API_TEST (binary not found)"
fi
🤖 Prompt for AI Agents
In `@ci/run_ctests.sh` around lines 30 - 32, The added unconditional invocation of
C_API_TEST using CUOPT_USE_CPU_MEM_FOR_LOCAL can fail when the C_API_TEST binary
isn't built; update the script to check for the executable (e.g., test -x
"${GTEST_DIR}/C_API_TEST" || skip) before setting CUOPT_USE_CPU_MEM_FOR_LOCAL
and running the test, and emit a clear message or silently skip if missing;
ensure you still pass through "$@" and preserve the existing --gtest_filter
argument when guarded.

63 changes: 43 additions & 20 deletions cpp/cuopt_cli.cpp
Original file line number Diff line number Diff line change
@@ -1,12 +1,14 @@
/* clang-format off */
/*
* SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-FileCopyrightText: Copyright (c) 2025-2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*/
/* clang-format on */

#include <cuopt/linear_programming/mip/solver_settings.hpp>
#include <cuopt/linear_programming/optimization_problem.hpp>
#include <cuopt/linear_programming/optimization_problem_interface.hpp>
#include <cuopt/linear_programming/optimization_problem_utils.hpp>
#include <cuopt/linear_programming/solve.hpp>
#include <mps_parser/parser.hpp>
#include <utilities/logger.hpp>
Expand Down Expand Up @@ -89,7 +91,6 @@ int run_single_file(const std::string& file_path,
bool solve_relaxation,
const std::map<std::string, std::string>& settings_strings)
{
const raft::handle_t handle_{};
cuopt::linear_programming::solver_settings_t<int, double> settings;

try {
Expand Down Expand Up @@ -122,13 +123,31 @@ int run_single_file(const std::string& file_path,
return -1;
}

auto op_problem =
cuopt::linear_programming::mps_data_model_to_optimization_problem(&handle_, mps_data_model);
// Determine memory backend and create problem using interface
// Create handle only for GPU memory backend (avoid CUDA init on CPU-only hosts)
auto memory_backend = cuopt::linear_programming::get_memory_backend_type();
std::unique_ptr<raft::handle_t> handle_ptr;
std::unique_ptr<cuopt::linear_programming::optimization_problem_interface_t<int, double>>
problem_interface;

if (memory_backend == cuopt::linear_programming::memory_backend_t::GPU) {
handle_ptr = std::make_unique<raft::handle_t>();
problem_interface =
std::make_unique<cuopt::linear_programming::gpu_optimization_problem_t<int, double>>(
handle_ptr.get());
} else {
problem_interface =
std::make_unique<cuopt::linear_programming::cpu_optimization_problem_t<int, double>>(nullptr);
}

// Populate the problem from MPS data model
cuopt::linear_programming::populate_from_mps_data_model(problem_interface.get(), mps_data_model);

const bool is_mip =
(op_problem.get_problem_category() == cuopt::linear_programming::problem_category_t::MIP ||
op_problem.get_problem_category() == cuopt::linear_programming::problem_category_t::IP) &&
!solve_relaxation;
const bool is_mip = (problem_interface->get_problem_category() ==
cuopt::linear_programming::problem_category_t::MIP ||
problem_interface->get_problem_category() ==
cuopt::linear_programming::problem_category_t::IP) &&
!solve_relaxation;

try {
auto initial_solution =
Expand Down Expand Up @@ -157,10 +176,10 @@ int run_single_file(const std::string& file_path,
try {
if (is_mip) {
auto& mip_settings = settings.get_mip_settings();
auto solution = cuopt::linear_programming::solve_mip(op_problem, mip_settings);
auto solution = cuopt::linear_programming::solve_mip(problem_interface.get(), mip_settings);
} else {
auto& lp_settings = settings.get_pdlp_settings();
auto solution = cuopt::linear_programming::solve_lp(op_problem, lp_settings);
auto solution = cuopt::linear_programming::solve_lp(problem_interface.get(), lp_settings);
}
} catch (const std::exception& e) {
CUOPT_LOG_ERROR("Error: %s", e.what());
Expand Down Expand Up @@ -334,19 +353,23 @@ int main(int argc, char* argv[])
const auto initial_solution_file = program.get<std::string>("--initial-solution");
const auto solve_relaxation = program.get<bool>("--relaxation");

// All arguments are parsed as string, default values are parsed as int if unused.
const auto num_gpus = program.is_used("--num-gpus")
? std::stoi(program.get<std::string>("--num-gpus"))
: program.get<int>("--num-gpus");

// Only initialize CUDA resources if using GPU memory backend (not remote execution)
auto memory_backend = cuopt::linear_programming::get_memory_backend_type();
std::vector<std::shared_ptr<rmm::mr::device_memory_resource>> memory_resources;

for (int i = 0; i < std::min(raft::device_setter::get_device_count(), num_gpus); ++i) {
cudaSetDevice(i);
memory_resources.push_back(make_async());
rmm::mr::set_per_device_resource(rmm::cuda_device_id{i}, memory_resources.back().get());
if (memory_backend == cuopt::linear_programming::memory_backend_t::GPU) {
// All arguments are parsed as string, default values are parsed as int if unused.
const auto num_gpus = program.is_used("--num-gpus")
? std::stoi(program.get<std::string>("--num-gpus"))
: program.get<int>("--num-gpus");

for (int i = 0; i < std::min(raft::device_setter::get_device_count(), num_gpus); ++i) {
cudaSetDevice(i);
memory_resources.push_back(make_async());
rmm::mr::set_per_device_resource(rmm::cuda_device_id{i}, memory_resources.back().get());
}
cudaSetDevice(0);
}
cudaSetDevice(0);

return run_single_file(file_name, initial_solution_file, solve_relaxation, settings_strings);
}
Loading