mirror of https://github.com/mamba-org/mamba.git
libmambapy: use `Context` explicitly (#3309)
* All python singletons lifetime are bound to a unique `Context` instance lifetime In `libmambapy`: - bound singletons lifetime to the first `Context` instance created - throw an error if more than one `Context` instance are created at any time (this only applies to python and will be changed at another time) - therefore the python user must create a `Context` before using any other functions This specific change does not (yet) changes the other functions APIs to take explicitly the `Context` object. * simplify python expressions * python API change, added requirement for a `Context` instance affected functions: - MultiPackageCache (constructor) - SubdirData.create_repo - SubdirIndex.create - SubdirIndex.download - clean - transmute - get_virtual_packages - cancel_json_output * libmamba loggers are now unregistered when Context instance is destroyed * fixed: dont use `default` as a name * remove quick-test file * linter fixes * Updated documentation * temptative fix for potential data race in libmamba tests * libmamba tests: added an assertion to be sure * linter pass * fixup libmambatests * added log for helping understanding ci failure * review fixes
This commit is contained in:
parent
d12f3711e6
commit
ab088e5d8a
|
@ -55,11 +55,23 @@ The Python bindings to the C++ ``libmamba`` library remain available through ``i
|
|||
They are now considered the first class citizen to using Mamba in Python.
|
||||
Changes include:
|
||||
|
||||
- The global ``Context``, previously available through ``Context()``, must now be accessed through
|
||||
``Context.instance()``.
|
||||
What's more, it is required to be passed explicitly in a few more functions.
|
||||
Future version of ``libmambapy`` will continue in this direction until there are no global context.
|
||||
- The global ``Context``, previously available through ``Context()``, must now be instantiated at least
|
||||
once before using ``libmambapy`` functions and types. Only one instance can exist at any time,
|
||||
but that instance can be destroyed and another recreated later, for example if you use it in
|
||||
specific functions scopes.
|
||||
|
||||
What's more, it is required to be passed explicitly in a few more functions. Notably:
|
||||
- ``MultiPackageCache`` (constructor)
|
||||
- ``SubdirData.create_repo``
|
||||
- ``SubdirIndex.create``
|
||||
- ``SubdirIndex.download``
|
||||
- ``clean``
|
||||
- ``transmute``
|
||||
- ``get_virtual_packages``
|
||||
- ``cancel_json_output``
|
||||
|
||||
In version 2, ``Context()`` will throw an exception to allow catching errors more smoothly.
|
||||
|
||||
- ``ChannelContext`` is no longer an implicit global variable.
|
||||
It must be constructed with one of ``ChannelContext.make_simple`` or
|
||||
``ChannelContext.make_conda_compatible`` (with ``Context.instance`` as argument in most cases)
|
||||
|
|
|
@ -272,7 +272,11 @@ namespace mamba
|
|||
specs::AuthenticationDataBase m_authentication_info;
|
||||
bool m_authentication_infos_loaded = false;
|
||||
|
||||
std::shared_ptr<Logger> logger;
|
||||
class ScopedLogger;
|
||||
std::vector<ScopedLogger> loggers;
|
||||
|
||||
std::shared_ptr<Logger> main_logger();
|
||||
void add_logger(std::shared_ptr<Logger>);
|
||||
|
||||
TaskSynchronizer tasksync;
|
||||
};
|
||||
|
|
|
@ -444,7 +444,9 @@ namespace mamba
|
|||
{
|
||||
using Request = solver::Request;
|
||||
|
||||
request.jobs.reserve(request.jobs.size() + (!no_pin) * ctx.pinned_packages.size() + !no_py_pin);
|
||||
const auto estimated_jobs_count = request.jobs.size()
|
||||
+ (!no_pin) * ctx.pinned_packages.size() + !no_py_pin;
|
||||
request.jobs.reserve(estimated_jobs_count);
|
||||
if (!no_pin)
|
||||
{
|
||||
for (const auto& pin : file_pins(prefix_data.path() / "conda-meta" / "pinned"))
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
|
||||
namespace mamba
|
||||
{
|
||||
|
||||
class Logger : public spdlog::logger
|
||||
{
|
||||
public:
|
||||
|
@ -63,6 +64,57 @@ namespace mamba
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
enum class logger_kind
|
||||
{
|
||||
normal_logger,
|
||||
default_logger,
|
||||
};
|
||||
|
||||
// Associate the registration of a logger to the lifetime of this object.
|
||||
// This is used to help with making sure loggers are unregistered once
|
||||
// their logical owner is destroyed.
|
||||
class Context::ScopedLogger
|
||||
{
|
||||
std::shared_ptr<Logger> m_logger;
|
||||
|
||||
public:
|
||||
|
||||
explicit ScopedLogger(std::shared_ptr<Logger> new_logger, logger_kind kind = logger_kind::normal_logger)
|
||||
: m_logger(std::move(new_logger))
|
||||
{
|
||||
assert(m_logger);
|
||||
if (kind == logger_kind::default_logger)
|
||||
{
|
||||
spdlog::set_default_logger(m_logger);
|
||||
}
|
||||
else
|
||||
{
|
||||
spdlog::register_logger(m_logger);
|
||||
}
|
||||
}
|
||||
|
||||
~ScopedLogger()
|
||||
{
|
||||
if (m_logger)
|
||||
{
|
||||
spdlog::drop(m_logger->name());
|
||||
}
|
||||
}
|
||||
|
||||
std::shared_ptr<Logger> logger() const
|
||||
{
|
||||
assert(m_logger);
|
||||
return m_logger;
|
||||
}
|
||||
|
||||
ScopedLogger(ScopedLogger&&) = default;
|
||||
ScopedLogger& operator=(ScopedLogger&&) = default;
|
||||
|
||||
ScopedLogger(const ScopedLogger&) = delete;
|
||||
ScopedLogger& operator=(const ScopedLogger&) = delete;
|
||||
};
|
||||
|
||||
spdlog::level::level_enum convert_log_level(log_level l)
|
||||
{
|
||||
return static_cast<spdlog::level::level_enum>(l);
|
||||
|
@ -86,6 +138,16 @@ namespace mamba
|
|||
}
|
||||
}
|
||||
|
||||
std::shared_ptr<Logger> Context::main_logger()
|
||||
{
|
||||
if (loggers.empty())
|
||||
{
|
||||
return {};
|
||||
}
|
||||
|
||||
return loggers.front().logger();
|
||||
}
|
||||
|
||||
void Context::enable_logging_and_signal_handling(Context& context)
|
||||
{
|
||||
if (use_default_signal_handler_val)
|
||||
|
@ -93,26 +155,25 @@ namespace mamba
|
|||
set_default_signal_handler();
|
||||
}
|
||||
|
||||
context.logger = std::make_shared<Logger>("libmamba", context.output_params.log_pattern, "\n");
|
||||
context.loggers.clear(); // Make sure we work with a known set of loggers, first one is
|
||||
// always the default one.
|
||||
|
||||
context.loggers.emplace_back(
|
||||
std::make_shared<Logger>("libmamba", context.output_params.log_pattern, "\n"),
|
||||
logger_kind::default_logger
|
||||
);
|
||||
MainExecutor::instance().on_close(
|
||||
context.tasksync.synchronized([&context] { context.logger->flush(); })
|
||||
context.tasksync.synchronized([&context] { context.main_logger()->flush(); })
|
||||
);
|
||||
|
||||
std::shared_ptr<spdlog::logger> libcurl_logger = std::make_shared<Logger>(
|
||||
"libcurl",
|
||||
context.output_params.log_pattern,
|
||||
""
|
||||
);
|
||||
std::shared_ptr<spdlog::logger> libsolv_logger = std::make_shared<Logger>(
|
||||
"libsolv",
|
||||
context.output_params.log_pattern,
|
||||
""
|
||||
context.loggers.emplace_back(
|
||||
std::make_shared<Logger>("libcurl", context.output_params.log_pattern, "")
|
||||
);
|
||||
|
||||
spdlog::register_logger(libcurl_logger);
|
||||
spdlog::register_logger(libsolv_logger);
|
||||
context.loggers.emplace_back(
|
||||
std::make_shared<Logger>("libsolv", context.output_params.log_pattern, "")
|
||||
);
|
||||
|
||||
spdlog::set_default_logger(context.logger);
|
||||
spdlog::set_level(convert_log_level(context.output_params.logging_level));
|
||||
}
|
||||
|
||||
|
@ -384,9 +445,9 @@ namespace mamba
|
|||
|
||||
void Context::dump_backtrace_no_guards()
|
||||
{
|
||||
if (logger) // REVIEW: is this correct?
|
||||
if (main_logger()) // REVIEW: is this correct?
|
||||
{
|
||||
logger->dump_backtrace_no_guards();
|
||||
main_logger()->dump_backtrace_no_guards();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -18,26 +18,34 @@ namespace mamba
|
|||
void
|
||||
execute_tasks_from_concurrent_threads(std::size_t task_count, std::size_t tasks_per_thread, Func work)
|
||||
{
|
||||
std::vector<std::thread> producers;
|
||||
const auto estimated_thread_count = (task_count / tasks_per_thread) * 2;
|
||||
std::vector<std::thread> producers(estimated_thread_count);
|
||||
|
||||
std::size_t tasks_left_to_launch = task_count;
|
||||
std::size_t thread_idx = 0;
|
||||
while (tasks_left_to_launch > 0)
|
||||
{
|
||||
const std::size_t tasks_to_generate = std::min(tasks_per_thread, tasks_left_to_launch);
|
||||
producers.emplace_back(
|
||||
[=]
|
||||
{
|
||||
for (std::size_t i = 0; i < tasks_to_generate; ++i)
|
||||
{
|
||||
work();
|
||||
}
|
||||
}
|
||||
);
|
||||
producers[thread_idx] = std::thread{ [=]
|
||||
{
|
||||
for (std::size_t i = 0; i < tasks_to_generate;
|
||||
++i)
|
||||
{
|
||||
work();
|
||||
}
|
||||
} };
|
||||
tasks_left_to_launch -= tasks_to_generate;
|
||||
++thread_idx;
|
||||
assert(thread_idx < producers.size());
|
||||
}
|
||||
|
||||
// Make sure all the producers are finished before continuing.
|
||||
for (auto&& t : producers)
|
||||
{
|
||||
t.join(); // Make sure all the producers are finished before continuing.
|
||||
if (t.joinable())
|
||||
{
|
||||
t.join();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -6,6 +6,7 @@ __all__ = [
|
|||
"Channel",
|
||||
"ChannelPriority",
|
||||
"CompressedProblemsGraph",
|
||||
"ContextOptions",
|
||||
"Context",
|
||||
"ExtraPkgInfo",
|
||||
"History",
|
||||
|
@ -363,6 +364,26 @@ class CompressedProblemsGraph:
|
|||
def tree_message(self) -> str: ...
|
||||
pass
|
||||
|
||||
class ContextOptions:
|
||||
def __init__(self) -> None: ...
|
||||
@property
|
||||
def json(self) -> bool:
|
||||
"""
|
||||
:type: bool
|
||||
"""
|
||||
@json.setter
|
||||
def json(self, arg0: bool) -> None:
|
||||
pass
|
||||
@property
|
||||
def enable_logging_and_signal_handling(self) -> bool:
|
||||
"""
|
||||
:type: bool
|
||||
"""
|
||||
@enable_logging_and_signal_handling.setter
|
||||
def enable_logging_and_signal_handling(self, arg0: bool) -> None:
|
||||
pass
|
||||
pass
|
||||
|
||||
class Context:
|
||||
class OutputParams:
|
||||
def __init__(self) -> None: ...
|
||||
|
@ -500,7 +521,7 @@ class Context:
|
|||
pass
|
||||
pass
|
||||
|
||||
def __init__(self) -> None: ...
|
||||
def __init__(self, options: ContextOptions = ContextOptions()) -> None: ...
|
||||
def set_log_level(self, arg0: LogLevel) -> None: ...
|
||||
def set_verbosity(self, arg0: int) -> None: ...
|
||||
@property
|
||||
|
@ -966,7 +987,7 @@ class MatchSpec:
|
|||
pass
|
||||
|
||||
class MultiPackageCache:
|
||||
def __init__(self, arg0: typing.List[Path]) -> None: ...
|
||||
def __init__(self, context: Context, arg0: typing.List[Path]) -> None: ...
|
||||
def get_tarball_path(self, arg0: PackageInfo, arg1: bool) -> Path: ...
|
||||
@property
|
||||
def first_writable_path(self) -> Path:
|
||||
|
@ -1579,7 +1600,7 @@ class SpecImpl(SpecBase):
|
|||
|
||||
class SubdirData:
|
||||
def cache_path(self) -> str: ...
|
||||
def create_repo(self, arg0: Pool) -> Repo: ...
|
||||
def create_repo(self, context: Context, arg0: Pool) -> Repo: ...
|
||||
def loaded(self) -> bool: ...
|
||||
pass
|
||||
|
||||
|
@ -1590,6 +1611,7 @@ class SubdirIndex:
|
|||
def __len__(self) -> int: ...
|
||||
def create(
|
||||
self,
|
||||
context: Context,
|
||||
arg0: Channel,
|
||||
arg1: str,
|
||||
arg2: str,
|
||||
|
@ -1597,7 +1619,7 @@ class SubdirIndex:
|
|||
arg4: str,
|
||||
arg5: str,
|
||||
) -> None: ...
|
||||
def download(self) -> bool: ...
|
||||
def download(self, context: Context) -> bool: ...
|
||||
pass
|
||||
|
||||
class SubdirIndexEntry:
|
||||
|
@ -1669,10 +1691,10 @@ class ostream_redirect:
|
|||
def cache_fn_url(arg0: str) -> str:
|
||||
pass
|
||||
|
||||
def cancel_json_output() -> None:
|
||||
def cancel_json_output(context: Context) -> None:
|
||||
pass
|
||||
|
||||
def clean(arg0: int) -> None:
|
||||
def clean(context: Context, arg0: int) -> None:
|
||||
pass
|
||||
|
||||
def create_cache_dir(arg0: Path) -> str:
|
||||
|
@ -1684,7 +1706,7 @@ def generate_ed25519_keypair() -> typing.Tuple[str, str]:
|
|||
def get_channels(arg0: typing.List[str]) -> typing.List[Channel]:
|
||||
pass
|
||||
|
||||
def get_virtual_packages() -> typing.List[PackageInfo]:
|
||||
def get_virtual_packages(context: Context) -> typing.List[PackageInfo]:
|
||||
pass
|
||||
|
||||
def init_console() -> None:
|
||||
|
@ -1697,6 +1719,7 @@ def simplify_conflicts(arg0: ProblemsGraph) -> ProblemsGraph:
|
|||
pass
|
||||
|
||||
def transmute(
|
||||
context: Context,
|
||||
source_package: Path,
|
||||
destination_package: Path,
|
||||
compression_level: int,
|
||||
|
|
|
@ -54,10 +54,29 @@ deprecated(std::string_view message, std::string_view since_version = "1.5")
|
|||
|
||||
namespace mambapy
|
||||
{
|
||||
// When using this library we for now still need to have a few singletons available
|
||||
// to avoid the python code to have to create 3-4 objects before starting to work with
|
||||
// mamba functions. Instead, here, we associate the lifetime of all the necessary
|
||||
// singletons to the lifetime of the Context. This is to provide to the user explicit
|
||||
// control over the lifetime and construction options of the Context and library
|
||||
// resources, preventing issues related to default configuration/options.
|
||||
// In the future, we might remove all singletons and provide a simple way to start
|
||||
// working with mamba, but the C++ side needs to be made 100% singleton-less first.
|
||||
//
|
||||
// In the code below we provide a mechanism to associate the lifetime of the
|
||||
// necessary singletons to the lifetime of one Context instance and forbid more
|
||||
// instances in the case of Python (it is theoretically allowed by the C++ api).
|
||||
|
||||
|
||||
class Singletons
|
||||
{
|
||||
public:
|
||||
|
||||
explicit Singletons(mamba::ContextOptions options)
|
||||
: m_context(std::move(options))
|
||||
{
|
||||
}
|
||||
|
||||
mamba::MainExecutor& main_executor()
|
||||
{
|
||||
return m_main_executor;
|
||||
|
@ -80,34 +99,32 @@ namespace mambapy
|
|||
|
||||
private:
|
||||
|
||||
template <class T, class D, class Factory>
|
||||
T& init_once(std::unique_ptr<T, D>& ptr, Factory&& factory)
|
||||
{
|
||||
static std::once_flag init_flag;
|
||||
std::call_once(init_flag, [&] { ptr = std::make_unique<T>(factory()); });
|
||||
if (!ptr)
|
||||
{
|
||||
throw mamba::mamba_error(
|
||||
fmt::format(
|
||||
"attempt to use {} singleton instance after destruction",
|
||||
typeid(T).name()
|
||||
),
|
||||
mamba::mamba_error_code::internal_failure
|
||||
);
|
||||
}
|
||||
return *ptr;
|
||||
}
|
||||
|
||||
mamba::MainExecutor m_main_executor;
|
||||
mamba::Context m_context{ { /* .enable_logging_and_signal_handling = */ true } };
|
||||
mamba::Context m_context;
|
||||
mamba::Console m_console{ m_context };
|
||||
// ChannelContext needs to be lazy initialized, to ensure the Context has been initialized
|
||||
// before
|
||||
std::unique_ptr<mamba::ChannelContext> p_channel_context = nullptr;
|
||||
mamba::Configuration m_config{ m_context };
|
||||
};
|
||||
|
||||
Singletons singletons;
|
||||
std::unique_ptr<Singletons> current_singletons;
|
||||
|
||||
Singletons& singletons()
|
||||
{
|
||||
if (current_singletons == nullptr)
|
||||
{
|
||||
throw std::runtime_error("Context instance must be created first");
|
||||
}
|
||||
return *current_singletons;
|
||||
}
|
||||
|
||||
struct destroy_singleton
|
||||
{
|
||||
template <class... Args>
|
||||
void operator()(Args&&...) noexcept
|
||||
{
|
||||
current_singletons.reset();
|
||||
}
|
||||
};
|
||||
|
||||
// MSubdirData objects are movable only, and they need to be moved into
|
||||
// a std::vector before we call MSudbirData::download. Since we cannot
|
||||
|
@ -152,12 +169,11 @@ namespace mambapy
|
|||
}
|
||||
}
|
||||
|
||||
bool download()
|
||||
bool download(mamba::Context& ctx)
|
||||
{
|
||||
using namespace mamba;
|
||||
// TODO: expose SubdirDataMonitor to libmambapy and remove this
|
||||
// logic
|
||||
Context& ctx = mambapy::singletons.context();
|
||||
expected_t<void> download_res;
|
||||
if (SubdirDataMonitor::can_monitor(ctx))
|
||||
{
|
||||
|
@ -415,15 +431,19 @@ bind_submodule_impl(pybind11::module_ m)
|
|||
);
|
||||
|
||||
py::class_<MultiPackageCache>(m, "MultiPackageCache")
|
||||
.def(py::init<>(
|
||||
[](const std::vector<fs::u8path>& pkgs_dirs)
|
||||
{
|
||||
return MultiPackageCache{
|
||||
pkgs_dirs,
|
||||
mambapy::singletons.context().validation_params,
|
||||
};
|
||||
}
|
||||
))
|
||||
.def(
|
||||
py::init<>(
|
||||
[](Context& context, const std::vector<fs::u8path>& pkgs_dirs)
|
||||
{
|
||||
return MultiPackageCache{
|
||||
pkgs_dirs,
|
||||
context.validation_params,
|
||||
};
|
||||
}
|
||||
),
|
||||
py::arg("context"),
|
||||
py::arg("pkgs_dirs")
|
||||
)
|
||||
.def("get_tarball_path", &MultiPackageCache::get_tarball_path)
|
||||
.def_property_readonly("first_writable_path", &MultiPackageCache::first_writable_path);
|
||||
|
||||
|
@ -490,11 +510,14 @@ bind_submodule_impl(pybind11::module_ m)
|
|||
py::class_<SubdirData>(m, "SubdirData")
|
||||
.def(
|
||||
"create_repo",
|
||||
[](SubdirData& subdir, solver::libsolv::Database& db) -> solver::libsolv::RepoInfo
|
||||
[](SubdirData& self, Context& context, solver::libsolv::Database& db
|
||||
) -> solver::libsolv::RepoInfo
|
||||
{
|
||||
deprecated("Use libmambapy.load_subdir_in_database instead", "2.0");
|
||||
return extract(load_subdir_in_database(mambapy::singletons.context(), db, subdir));
|
||||
}
|
||||
return extract(load_subdir_in_database(context, db, self));
|
||||
},
|
||||
py::arg("context"),
|
||||
py::arg("db")
|
||||
)
|
||||
.def("loaded", &SubdirData::is_loaded)
|
||||
.def(
|
||||
|
@ -547,25 +570,17 @@ bind_submodule_impl(pybind11::module_ m)
|
|||
.def(
|
||||
"create",
|
||||
[](SubdirIndex& self,
|
||||
Context& context,
|
||||
ChannelContext& channel_context,
|
||||
const specs::Channel& channel,
|
||||
const std::string& platform,
|
||||
const std::string& full_url,
|
||||
MultiPackageCache& caches,
|
||||
const std::string& repodata_fn,
|
||||
const std::string& url)
|
||||
{
|
||||
self.create(
|
||||
mambapy::singletons.context(),
|
||||
channel_context,
|
||||
channel,
|
||||
platform,
|
||||
full_url,
|
||||
caches,
|
||||
repodata_fn,
|
||||
url
|
||||
);
|
||||
const std::string& url) {
|
||||
self.create(context, channel_context, channel, platform, full_url, caches, repodata_fn, url);
|
||||
},
|
||||
py::arg("context"),
|
||||
py::arg("channel_context"),
|
||||
py::arg("channel"),
|
||||
py::arg("platform"),
|
||||
|
@ -640,24 +655,35 @@ bind_submodule_impl(pybind11::module_ m)
|
|||
.def_readwrite("no_progress_bars", &Context::GraphicsParams::no_progress_bars)
|
||||
.def_readwrite("palette", &Context::GraphicsParams::palette);
|
||||
|
||||
py::class_<Context, std::unique_ptr<Context, py::nodelete>> ctx(m, "Context");
|
||||
ctx //
|
||||
.def_static(
|
||||
// Still need a singleton as long as mambatest::singleton::context is used
|
||||
"instance",
|
||||
[]() -> auto& { return mambapy::singletons.context(); },
|
||||
py::return_value_policy::reference
|
||||
py::class_<ContextOptions>(m, "ContextOptions")
|
||||
.def(
|
||||
py::init([](bool enable_logging_and_signal_handling = true)
|
||||
{ return ContextOptions{ enable_logging_and_signal_handling }; }),
|
||||
py::arg("enable_logging_and_signal_handling") = true
|
||||
)
|
||||
.def(py::init(
|
||||
// Deprecating would lead to confusing error. Better to make sure people stop using it.
|
||||
[]() -> std::unique_ptr<Context, py::nodelete>
|
||||
{
|
||||
throw std::invalid_argument( //
|
||||
"Context() will create a new Context object in the future.\n"
|
||||
"Use Context.instance() to access the global singleton."
|
||||
);
|
||||
}
|
||||
))
|
||||
.def_readwrite(
|
||||
"enable_logging_and_signal_handling",
|
||||
&ContextOptions::enable_logging_and_signal_handling
|
||||
);
|
||||
|
||||
// The lifetime of the unique Context instance will determine the lifetime of the other
|
||||
// singletons.
|
||||
using context_ptr = std::unique_ptr<Context, mambapy::destroy_singleton>;
|
||||
auto context_constructor = [](ContextOptions options = {}) -> context_ptr
|
||||
{
|
||||
if (mambapy::current_singletons)
|
||||
{
|
||||
throw std::runtime_error("Only one Context instance can exist at any time");
|
||||
}
|
||||
|
||||
mambapy::current_singletons = std::make_unique<mambapy::Singletons>(options);
|
||||
assert(&mambapy::singletons() == mambapy::current_singletons.get());
|
||||
return context_ptr(&mambapy::singletons().context());
|
||||
};
|
||||
|
||||
py::class_<Context, context_ptr> ctx(m, "Context");
|
||||
|
||||
ctx.def(py::init(context_constructor), py::arg("options") = ContextOptions{ true })
|
||||
.def_static("use_default_signal_handler", &Context::use_default_signal_handler)
|
||||
.def_readwrite("graphics_params", &Context::graphics_params)
|
||||
.def_readwrite("offline", &Context::offline)
|
||||
|
@ -1144,18 +1170,25 @@ bind_submodule_impl(pybind11::module_ m)
|
|||
py::arg("json_str")
|
||||
);
|
||||
|
||||
m.def("clean", [](int flags) { return clean(mambapy::singletons.config(), flags); });
|
||||
m.def(
|
||||
"clean",
|
||||
[](Context&, int flags) { return clean(mambapy::singletons().config(), flags); },
|
||||
py::arg("context"),
|
||||
py::arg("flags")
|
||||
);
|
||||
|
||||
m.def(
|
||||
"transmute",
|
||||
+[](const fs::u8path& pkg_file, const fs::u8path& target, int compression_level, int compression_threads
|
||||
)
|
||||
+[](Context& context,
|
||||
const fs::u8path& pkg_file,
|
||||
const fs::u8path& target,
|
||||
int compression_level,
|
||||
int compression_threads)
|
||||
{
|
||||
const auto extract_options = mamba::ExtractOptions::from_context(
|
||||
mambapy::singletons.context()
|
||||
);
|
||||
const auto extract_options = mamba::ExtractOptions::from_context(context);
|
||||
return transmute(pkg_file, target, compression_level, compression_threads, extract_options);
|
||||
},
|
||||
py::arg("context"),
|
||||
py::arg("source_package"),
|
||||
py::arg("destination_package"),
|
||||
py::arg("compression_level"),
|
||||
|
@ -1171,9 +1204,9 @@ bind_submodule_impl(pybind11::module_ m)
|
|||
// py::arg("out_package"), py::arg("compression_level"), py::arg("compression_threads") = 1);
|
||||
|
||||
|
||||
m.def("get_virtual_packages", [] { return get_virtual_packages(mambapy::singletons.context()); });
|
||||
m.def("get_virtual_packages", [](Context& context) { return get_virtual_packages(context); });
|
||||
|
||||
m.def("cancel_json_output", [] { Console::instance().cancel_json_print(); });
|
||||
m.def("cancel_json_output", [](Context&) { mambapy::singletons().console().cancel_json_print(); });
|
||||
|
||||
// CLEAN FLAGS
|
||||
m.attr("MAMBA_CLEAN_ALL") = MAMBA_CLEAN_ALL;
|
||||
|
|
|
@ -1,14 +1,13 @@
|
|||
import libmambapy
|
||||
|
||||
|
||||
def test_context_singleton():
|
||||
libmambapy.Context.instance().platform = "mambaos-64"
|
||||
ctx = libmambapy.Context.instance()
|
||||
assert ctx.platform == "mambaos-64"
|
||||
def test_context_instance_scoped():
|
||||
ctx = libmambapy.Context() # Initialize and then terminate libmamba internals
|
||||
return ctx
|
||||
|
||||
|
||||
def test_channel_context():
|
||||
ctx = libmambapy.Context.instance()
|
||||
ctx = libmambapy.Context()
|
||||
|
||||
cc = libmambapy.ChannelContext.make_conda_compatible(ctx)
|
||||
assert cc.make_channel("pkgs/main")[0].url.str() == "https://repo.anaconda.com/pkgs/main"
|
||||
|
|
Loading…
Reference in New Issue