Stores channel into MSubdirdata and libsolv Repo appdata pointer (#1544)

This commit is contained in:
Johan Mabille 2022-03-01 10:49:24 +01:00 committed by GitHub
parent 0f09d34c7c
commit 304bdb51b6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 126 additions and 125 deletions

View File

@ -62,6 +62,9 @@ namespace mamba
friend class ChannelBuilder;
};
bool operator==(const Channel& lhs, const Channel& rhs);
bool operator!=(const Channel& lhs, const Channel& rhs);
const Channel& make_channel(const std::string& value);
std::vector<const Channel*> get_channels(const std::vector<std::string>& channel_names);

View File

@ -10,6 +10,7 @@
#include <string>
#include <tuple>
#include "channel.hpp"
#include "prefix_data.hpp"
extern "C"
@ -82,6 +83,12 @@ namespace mamba
const fs::path& filename,
const RepoMetadata& meta);
MRepo(MPool& pool,
const std::string& name,
const fs::path& filename,
const RepoMetadata& meta,
const Channel& channel);
/**
* Constructor.
* @param pool ``libsolv`` pool wrapper
@ -92,6 +99,12 @@ namespace mamba
~MRepo();
MRepo(const MRepo&) = delete;
MRepo& operator=(const MRepo&) = delete;
MRepo(MRepo&&);
MRepo& operator=(MRepo&&);
void set_installed();
void set_priority(int priority, int subpriority);
void add_package_info(Repodata*, const PackageInfo& pkg_info);
@ -103,6 +116,7 @@ namespace mamba
bool write() const;
const std::string& url() const;
Repo* repo() const;
const Channel* channel() const;
std::tuple<int, int> priority() const;
std::size_t size() const;
@ -117,6 +131,7 @@ namespace mamba
RepoMetadata m_metadata;
Repo* m_repo;
const Channel* p_channel = nullptr;
};
} // namespace mamba

View File

@ -13,6 +13,7 @@
#include "nlohmann/json.hpp"
#include "mamba/core/channel.hpp"
#include "mamba/core/context.hpp"
#include "mamba/core/fetch.hpp"
#include "mamba/core/mamba_fs.hpp"
@ -43,19 +44,11 @@ namespace mamba
class MSubdirData
{
public:
/**
* Constructor.
* @param name Name of the subdirectory (<channel>/<subdir>)
* @param repodata_url URL of the repodata file
* @param repodata_fn Local path of the repodata file
* @param possible packages caches
* @param is_noarch Local path of the repodata file
*/
MSubdirData(const std::string& name,
const std::string& repodata_url,
const std::string& repodata_fn,
MSubdirData(const Channel& channel,
const std::string& platform,
const std::string& url,
MultiPackageCache& caches,
bool is_noarch);
const std::string& repodata_fn = "repodata.json");
// TODO return seconds as double
fs::file_time_type::duration check_cache(const fs::path& cache_file,
@ -100,6 +93,7 @@ namespace mamba
bool m_is_noarch;
nlohmann::json m_mod_etag;
std::unique_ptr<TemporaryFile> m_temp_file;
const Channel* p_channel = nullptr;
};
// Contrary to conda original function, this one expects a full url

View File

@ -121,9 +121,8 @@ namespace mamba
MTransaction(MPool& pool,
const std::vector<MatchSpec>& specs_to_remove,
const std::vector<MatchSpec>& specs_to_install,
MultiPackageCache& caches,
std::vector<MRepo*> repos);
MTransaction(MSolver& solver, MultiPackageCache& caches, std::vector<MRepo*> repos);
MultiPackageCache& caches);
MTransaction(MSolver& solver, MultiPackageCache& caches);
~MTransaction();
@ -157,7 +156,6 @@ namespace mamba
MultiPackageCache m_multi_cache;
const fs::path m_cache_path;
std::vector<Solvable*> m_to_install, m_to_remove;
std::vector<MRepo*> m_repos;
History::UserRequest m_history_entry;
Transaction* m_transaction;
@ -169,8 +167,7 @@ namespace mamba
MTransaction create_explicit_transaction_from_urls(MPool& pool,
const std::vector<std::string>& urls,
MultiPackageCache& package_caches,
std::vector<MRepo*>& repos);
MultiPackageCache& package_caches);
} // namespace mamba
#endif // MAMBA_TRANSACTION_HPP

View File

@ -54,14 +54,7 @@ namespace mamba
{
for (auto& [platform, url] : channel->platform_urls(true))
{
std::string repodata_full_url = concat(url, "/repodata.json");
auto sdir = std::make_shared<MSubdirData>(
concat(channel->canonical_name(), "/", platform),
repodata_full_url,
cache_fn_url(repodata_full_url),
package_caches,
platform == "noarch");
auto sdir = std::make_shared<MSubdirData>(*channel, platform, url, package_caches);
sdir->load();
multi_dl.add(sdir->target());
@ -118,7 +111,7 @@ namespace mamba
{
MRepo repo = subdir->create_repo(pool);
repo.set_priority(prio.first, prio.second);
repos.push_back(repo);
repos.push_back(std::move(repo));
}
catch (std::runtime_error& e)
{

View File

@ -368,8 +368,7 @@ namespace mamba
prefix_data.add_virtual_packages(get_virtual_packages());
auto repo = MRepo(pool, prefix_data);
repos.push_back(repo);
repos.push_back(MRepo(pool, prefix_data));
MSolver solver(pool,
{ { SOLVER_FLAG_ALLOW_UNINSTALL, ctx.allow_uninstall },
@ -436,11 +435,7 @@ namespace mamba
throw std::runtime_error("UnsatisfiableError");
}
std::vector<MRepo*> repo_ptrs;
for (auto& r : repos)
repo_ptrs.push_back(&r);
MTransaction trans(solver, package_caches, repo_ptrs);
MTransaction trans(solver, package_caches);
if (ctx.json)
{
@ -473,8 +468,7 @@ namespace mamba
fs::path pkgs_dirs(Context::instance().root_prefix / "pkgs");
MultiPackageCache pkg_caches({ pkgs_dirs });
std::vector<MRepo*> repos = {};
auto transaction = create_explicit_transaction_from_urls(pool, specs, pkg_caches, repos);
auto transaction = create_explicit_transaction_from_urls(pool, specs, pkg_caches);
prefix_data.load();
prefix_data.add_virtual_packages(get_virtual_packages());

View File

@ -72,12 +72,7 @@ namespace mamba
MPool pool;
PrefixData prefix_data(ctx.target_prefix);
prefix_data.load();
auto repo = MRepo(pool, prefix_data);
repos.push_back(repo);
std::vector<MRepo*> repo_ptrs;
for (auto& r : repos)
repo_ptrs.push_back(&r);
repos.push_back(MRepo(pool, prefix_data));
const fs::path pkgs_dirs(ctx.root_prefix / "pkgs");
MultiPackageCache package_caches({ pkgs_dirs });
@ -94,7 +89,7 @@ namespace mamba
if (force)
{
std::vector<MatchSpec> mspecs(specs.begin(), specs.end());
auto transaction = MTransaction(pool, mspecs, {}, package_caches, repo_ptrs);
auto transaction = MTransaction(pool, mspecs, {}, package_caches);
execute_transaction(transaction);
}
else
@ -121,7 +116,7 @@ namespace mamba
solver.add_jobs(specs, solver_flag);
solver.solve();
MTransaction transaction(solver, package_caches, repo_ptrs);
MTransaction transaction(solver, package_caches);
execute_transaction(transaction);
}
}

View File

@ -46,8 +46,7 @@ namespace mamba
prefix_data.add_virtual_packages(get_virtual_packages());
auto repo = MRepo(pool, prefix_data);
repos.push_back(repo);
repos.push_back(MRepo(pool, prefix_data));
MSolver solver(pool,
{ { SOLVER_FLAG_ALLOW_DOWNGRADE, ctx.allow_downgrade },
@ -106,13 +105,7 @@ namespace mamba
solver.solve();
// TODO this is not so great
std::vector<MRepo*> repo_ptrs;
for (auto& r : repos)
{
repo_ptrs.push_back(&r);
}
MTransaction transaction(solver, package_caches, repo_ptrs);
MTransaction transaction(solver, package_caches);
auto execute_transaction = [&](MTransaction& transaction)
{

View File

@ -244,6 +244,16 @@ namespace mamba
return build_url(*this, join_url(base, name(), platform), with_credential);
}
bool operator==(const Channel& lhs, const Channel& rhs)
{
return lhs.location() == rhs.location() && lhs.name() == rhs.name();
}
bool operator!=(const Channel& lhs, const Channel& rhs)
{
return !(lhs == rhs);
}
/************************************
* utility functions implementation *
************************************/

View File

@ -37,9 +37,20 @@ namespace mamba
{
m_url = rsplit(metadata.url, "/", 1)[0];
m_repo = repo_create(pool, m_url.c_str());
m_repo->appdata = this;
read_file(index);
}
MRepo::MRepo(MPool& pool,
const std::string& name,
const fs::path& index,
const RepoMetadata& metadata,
const Channel& channel)
: MRepo(pool, name, index, metadata)
{
p_channel = &channel;
}
MRepo::MRepo(MPool& pool,
const std::string& name,
const std::string& index,
@ -47,6 +58,7 @@ namespace mamba
: m_url(url)
{
m_repo = repo_create(pool, name.c_str());
m_repo->appdata = this;
read_file(index);
}
@ -119,6 +131,7 @@ namespace mamba
const std::vector<PackageInfo>& package_infos)
{
m_repo = repo_create(pool, name.c_str());
m_repo->appdata = this;
int flags = 0;
Repodata* data;
data = repo_add_repodata(m_repo, flags);
@ -132,6 +145,7 @@ namespace mamba
MRepo::MRepo(MPool& pool, const PrefixData& prefix_data)
{
m_repo = repo_create(pool, "installed");
m_repo->appdata = this;
int flags = 0;
Repodata* data;
data = repo_add_repodata(m_repo, flags);
@ -160,6 +174,32 @@ namespace mamba
// /*reuse_ids*/1);
}
MRepo::MRepo(MRepo&& rhs)
: m_json_file(std::move(rhs.m_json_file))
, m_solv_file(std::move(rhs.m_solv_file))
, m_url(std::move(rhs.m_url))
, m_metadata(std::move(rhs.m_metadata))
, m_repo(rhs.m_repo)
, p_channel(rhs.p_channel)
{
rhs.m_repo = nullptr;
rhs.p_channel = nullptr;
m_repo->appdata = this;
}
MRepo& MRepo::operator=(MRepo&& rhs)
{
using std::swap;
swap(m_json_file, rhs.m_json_file);
swap(m_solv_file, rhs.m_solv_file);
swap(m_url, rhs.m_url);
swap(m_metadata, rhs.m_metadata);
swap(m_repo, rhs.m_repo);
swap(p_channel, rhs.p_channel);
m_repo->appdata = this;
return *this;
}
void MRepo::set_installed()
{
pool_set_installed(m_repo->pool, m_repo);
@ -186,6 +226,11 @@ namespace mamba
return m_repo;
}
const Channel* MRepo::channel() const
{
return p_channel;
}
std::tuple<int, int> MRepo::priority() const
{
return std::make_tuple(m_repo->priority, m_repo->subpriority);

View File

@ -8,6 +8,7 @@
#include "mamba/core/channel.hpp"
#include "mamba/core/output.hpp"
#include "mamba/core/package_info.hpp"
#include "mamba/core/repo.hpp"
#include "mamba/core/util.hpp"
namespace mamba
@ -36,20 +37,9 @@ namespace mamba
inline bool channel_match(Solvable* s, const std::string& channel)
{
// TODO this could should be a lot better.
// TODO this might match too much (e.g. bioconda would also match
// bioconda-experimental etc) Note: s->repo->name is the URL of the repo
// TODO maybe better to check all repos, select pointers, and compare the
// pointer (s->repo == ptr?)
const Channel& chan = make_channel(s->repo->name);
for (const auto& url : chan.urls(false))
{
if (url.find(channel) != std::string::npos)
{
return true;
}
}
return false;
MRepo* mrepo = reinterpret_cast<MRepo*>(s->repo->appdata);
const Channel* chan = mrepo->channel();
return chan && chan->name() == channel;
}
void MSolver::add_global_job(int job_flag)

View File

@ -160,22 +160,22 @@ namespace mamba
}
MSubdirData::MSubdirData(const std::string& name,
const std::string& repodata_url,
const std::string& repodata_fn,
MSubdirData::MSubdirData(const Channel& channel,
const std::string& platform,
const std::string& url,
MultiPackageCache& caches,
bool is_noarch)
const std::string& repodata_fn)
: m_progress_bar(ProgressProxy())
, m_loaded(false)
, m_download_complete(false)
, m_repodata_url(repodata_url)
, m_name(name)
, m_json_fn(repodata_fn)
, m_solv_fn(repodata_fn.substr(0, repodata_fn.size() - 4) + "solv")
, m_repodata_url(concat(url, "/", repodata_fn))
, m_name(concat(channel.canonical_name(), "/", platform))
, m_caches(caches)
, m_is_noarch(is_noarch)
, m_is_noarch(platform == "noarch")
, p_channel(&channel)
{
m_json_fn = cache_fn_url(m_repodata_url);
m_solv_fn = m_json_fn.substr(0, m_json_fn.size() - 4) + "solv";
}
fs::file_time_type::duration MSubdirData::check_cache(
@ -593,7 +593,7 @@ namespace mamba
m_mod_etag.value("_etag", ""),
m_mod_etag.value("_mod", "") };
return MRepo(pool, m_name, cache_path(), meta);
return MRepo(pool, m_name, cache_path(), meta, *p_channel);
}
void MSubdirData::clear_cache()

View File

@ -420,10 +420,8 @@ namespace mamba
MTransaction::MTransaction(MPool& pool,
const std::vector<MatchSpec>& specs_to_remove,
const std::vector<MatchSpec>& specs_to_install,
MultiPackageCache& caches,
std::vector<MRepo*> repos)
MultiPackageCache& caches)
: m_multi_cache(caches)
, m_repos(repos)
{
// auto& ctx = Context::instance();
std::vector<PackageInfo> pi_result;
@ -525,11 +523,8 @@ namespace mamba
}
MTransaction::MTransaction(MSolver& solver,
MultiPackageCache& caches,
std::vector<MRepo*> repos)
MTransaction::MTransaction(MSolver& solver, MultiPackageCache& caches)
: m_multi_cache(caches)
, m_repos(repos)
{
if (!solver.is_solved())
{
@ -1062,17 +1057,9 @@ namespace mamba
for (auto& s : m_to_install)
{
std::string url;
MRepo* mamba_repo = nullptr;
for (auto& r : m_repos)
{
if (r->repo() == s->repo)
{
mamba_repo = r;
break;
}
}
MRepo* mamba_repo = reinterpret_cast<MRepo*>(s->repo->appdata);
std::string url;
if (mamba_repo == nullptr || mamba_repo->url() == "")
{
// use fallback mediadir / mediafile
@ -1491,8 +1478,7 @@ namespace mamba
MTransaction create_explicit_transaction_from_urls(MPool& pool,
const std::vector<std::string>& urls,
MultiPackageCache& package_caches,
std::vector<MRepo*>& repos)
MultiPackageCache& package_caches)
{
std::vector<MatchSpec> specs_to_install;
for (auto& u : urls)
@ -1518,6 +1504,6 @@ namespace mamba
}
specs_to_install.push_back(ms);
}
return MTransaction(pool, {}, specs_to_install, package_caches, repos);
return MTransaction(pool, {}, specs_to_install, package_caches);
}
} // namespace mamba

View File

@ -10,13 +10,10 @@ namespace mamba
#ifdef __linux__
Context::instance().quiet = true;
{
const mamba::Channel& c = mamba::make_channel("conda-forge");
mamba::MultiDownloadTarget multi_dl;
mamba::MultiPackageCache pkg_cache({ "/tmp/" });
mamba::MSubdirData cf("conda-forge/linux-64",
"file:///nonexistent/repodata.json",
"zyx.json",
pkg_cache,
false);
mamba::MSubdirData cf(c, "linux-64", "file:///nonexistent/repodata.json", pkg_cache);
cf.load();
multi_dl.add(cf.target());
@ -29,13 +26,10 @@ namespace mamba
EXPECT_EQ(cf.target()->result, 37);
}
{
const mamba::Channel& c = mamba::make_channel("conda-forge");
mamba::MultiDownloadTarget multi_dl;
mamba::MultiPackageCache pkg_cache({ "/tmp/" });
mamba::MSubdirData cf("conda-forge/noarch",
"file:///nonexistent/repodata.json",
"zyx.json",
pkg_cache,
true);
mamba::MSubdirData cf(c, "noarch", "file:///nonexistent/repodata.json", pkg_cache);
cf.load();
multi_dl.add(cf.target());
EXPECT_THROW(multi_dl.download(MAMBA_DOWNLOAD_FAILFAST), std::runtime_error);

View File

@ -104,7 +104,7 @@ PYBIND11_MODULE(bindings, m)
.def("clear", &MRepo::clear);
py::class_<MTransaction>(m, "Transaction")
.def(py::init<MSolver&, MultiPackageCache&, std::vector<MRepo*>&>())
.def(py::init<MSolver&, MultiPackageCache&>())
.def("to_conda", &MTransaction::to_conda)
.def("log_json", &MTransaction::log_json)
.def("print", &MTransaction::print)
@ -218,11 +218,11 @@ PYBIND11_MODULE(bindings, m)
});
py::class_<MSubdirData>(m, "SubdirData")
.def(py::init<const std::string&,
.def(py::init<const Channel&,
const std::string&,
const std::string&,
MultiPackageCache&,
bool>())
const std::string&>())
.def("create_repo", &MSubdirData::create_repo)
.def("load", &MSubdirData::load)
.def("loaded", &MSubdirData::loaded)

View File

@ -231,7 +231,7 @@ def remove(args, parser):
return exit_code
package_cache = api.MultiPackageCache(context.pkgs_dirs)
transaction = api.Transaction(solver, package_cache, repos)
transaction = api.Transaction(solver, package_cache)
if not transaction.prompt():
exit(0)
@ -573,7 +573,7 @@ def install(args, parser, command="install"):
return exit_code
package_cache = api.MultiPackageCache(context.pkgs_dirs)
transaction = api.Transaction(solver, package_cache, repos)
transaction = api.Transaction(solver, package_cache)
mmb_specs, to_link, to_unlink = transaction.to_conda()
specs_to_add = [MatchSpec(m) for m in mmb_specs[0]]

View File

@ -136,7 +136,7 @@ def mamba_install(prefix, specs, args, env, dry_run=False, *_, **kwargs):
exit(1)
package_cache = api.MultiPackageCache(context.pkgs_dirs)
transaction = api.Transaction(solver, package_cache, repos)
transaction = api.Transaction(solver, package_cache)
mmb_specs, to_link, to_unlink = transaction.to_conda()
specs_to_add = [MatchSpec(m) for m in mmb_specs[0]]

View File

@ -79,20 +79,12 @@ def get_index(
for channel in api.get_channels(all_channels):
for channel_platform, url in channel.platform_urls(with_credentials=True):
full_url = CondaHttpAuth.add_binstar_token(url + "/" + repodata_fn)
name = None
if channel.name:
name = channel.name + "/" + channel_platform
else:
name = channel.platform_url(channel_platform, with_credentials=False)
full_url = CondaHttpAuth.add_binstar_token(url)
sd = api.SubdirData(
name,
full_url,
api.cache_fn_url(full_url),
pkgs_dirs,
channel_platform == "noarch",
channel, channel_platform, full_url, pkgs_dirs, repodata_fn
)
sd.load()
index.append(