cargo/ops/resolve.rs
1//! High-level APIs for executing the resolver.
2//!
3//! This module provides functions for running the resolver given a workspace, including loading
4//! the `Cargo.lock` file and checking if it needs updating.
5//!
6//! There are roughly 3 main functions:
7//!
8//! - [`resolve_ws`]: A simple, high-level function with no options.
9//! - [`resolve_ws_with_opts`]: A medium-level function with options like
10//! user-provided features. This is the most appropriate function to use in
11//! most cases.
12//! - [`resolve_with_previous`]: A low-level function for running the resolver,
13//! providing the most power and flexibility.
14//!
15//! ### Data Structures
16//!
17//! - [`Workspace`]:
18//! Usually created by [`crate::util::command_prelude::ArgMatchesExt::workspace`] which discovers the root of the
19//! workspace, and loads all the workspace members as a [`Package`] object
20//! - [`Package`]
21//! Corresponds with `Cargo.toml` manifest (deserialized as [`Manifest`]) and its associated files.
22//! - [`Target`]s are crates such as the library, binaries, integration test, or examples.
23//! They are what is actually compiled by `rustc`.
24//! Each `Target` defines a crate root, like `src/lib.rs` or `examples/foo.rs`.
25//! - [`PackageId`] --- A unique identifier for a package.
26//! - [`PackageRegistry`]:
27//! The primary interface for how the dependency
28//! resolver finds packages. It contains the `SourceMap`, and handles things
29//! like the `[patch]` table. The dependency resolver
30//! sends a query to the `PackageRegistry` to "get me all packages that match
31//! this dependency declaration". The `Registry` trait provides a generic interface
32//! to the `PackageRegistry`, but this is only used for providing an alternate
33//! implementation of the `PackageRegistry` for testing.
34//! - [`SourceMap`]: Map of all available sources.
35//! - [`Source`]: An abstraction for something that can fetch packages (a remote
36//! registry, a git repo, the local filesystem, etc.). Check out the [source
37//! implementations] for all the details about registries, indexes, git
38//! dependencies, etc.
39//! * [`SourceId`]: A unique identifier for a source.
40//! - [`Summary`]: A of a [`Manifest`], and is essentially
41//! the information that can be found in a registry index. Queries against the
42//! `PackageRegistry` yields a `Summary`. The resolver uses the summary
43//! information to build the dependency graph.
44//! - [`PackageSet`] --- Contains all the `Package` objects. This works with the
45//! [`Downloads`] struct to coordinate downloading packages. It has a reference
46//! to the `SourceMap` to get the `Source` objects which tell the `Downloads`
47//! struct which URLs to fetch.
48//!
49//! [`Package`]: crate::core::package
50//! [`Target`]: crate::core::Target
51//! [`Manifest`]: crate::core::Manifest
52//! [`Source`]: crate::sources::source::Source
53//! [`SourceMap`]: crate::sources::source::SourceMap
54//! [`PackageRegistry`]: crate::core::registry::PackageRegistry
55//! [source implementations]: crate::sources
56//! [`Downloads`]: crate::core::package::Downloads
57
58use crate::core::Dependency;
59use crate::core::GitReference;
60use crate::core::PackageId;
61use crate::core::PackageIdSpec;
62use crate::core::PackageIdSpecQuery;
63use crate::core::PackageSet;
64use crate::core::SourceId;
65use crate::core::Workspace;
66use crate::core::compiler::{CompileKind, RustcTargetData};
67use crate::core::registry::{LockedPatchDependency, PackageRegistry};
68use crate::core::resolver::features::{
69 CliFeatures, FeatureOpts, FeatureResolver, ForceAllTargets, RequestedFeatures, ResolvedFeatures,
70};
71use crate::core::resolver::{
72 self, HasDevUnits, Resolve, ResolveOpts, ResolveVersion, VersionOrdering, VersionPreferences,
73};
74use crate::core::summary::Summary;
75use crate::ops;
76use crate::sources::RecursivePathSource;
77use crate::util::CanonicalUrl;
78use crate::util::cache_lock::CacheLockMode;
79use crate::util::context::FeatureUnification;
80use crate::util::errors::CargoResult;
81use anyhow::Context as _;
82use cargo_util::paths;
83use cargo_util_schemas::core::PartialVersion;
84use std::borrow::Cow;
85use std::collections::{HashMap, HashSet};
86use std::rc::Rc;
87use tracing::{debug, trace};
88
89/// Filter for keep using Package ID from previous lockfile.
90type Keep<'a> = &'a dyn Fn(&PackageId) -> bool;
91
92/// Result for `resolve_ws_with_opts`.
93pub struct WorkspaceResolve<'gctx> {
94 /// Packages to be downloaded.
95 pub pkg_set: PackageSet<'gctx>,
96 /// The resolve for the entire workspace.
97 ///
98 /// This may be `None` for things like `cargo install` and `-Zavoid-dev-deps`.
99 /// This does not include `paths` overrides.
100 pub workspace_resolve: Option<Resolve>,
101 /// The narrowed resolve, with the specific features enabled.
102 pub targeted_resolve: Resolve,
103 /// Package specs requested for compilation along with specific features enabled. This usually
104 /// has the length of one but there may be more specs with different features when using the
105 /// `package` feature resolver.
106 pub specs_and_features: Vec<SpecsAndResolvedFeatures>,
107}
108
109/// Pair of package specs requested for compilation along with enabled features.
110pub struct SpecsAndResolvedFeatures {
111 /// Packages that are supposed to be built.
112 pub specs: Vec<PackageIdSpec>,
113 /// The features activated per package.
114 pub resolved_features: ResolvedFeatures,
115}
116
117const UNUSED_PATCH_WARNING: &str = "\
118Check that the patched package version and available features are compatible
119with the dependency requirements. If the patch has a different version from
120what is locked in the Cargo.lock file, run `cargo update` to use the new
121version. This may also occur with an optional dependency that is not enabled.";
122
123/// Resolves all dependencies for the workspace using the previous
124/// lock file as a guide if present.
125///
126/// This function will also write the result of resolution as a new lock file
127/// (unless it is an ephemeral workspace such as `cargo install` or `cargo
128/// package`).
129///
130/// This is a simple interface used by commands like `clean`, `fetch`, and
131/// `package`, which don't specify any options or features.
132pub fn resolve_ws<'a>(ws: &Workspace<'a>, dry_run: bool) -> CargoResult<(PackageSet<'a>, Resolve)> {
133 let mut registry = ws.package_registry()?;
134 let resolve = resolve_with_registry(ws, &mut registry, dry_run)?;
135 let packages = get_resolved_packages(&resolve, registry)?;
136 Ok((packages, resolve))
137}
138
139/// Resolves dependencies for some packages of the workspace,
140/// taking into account `paths` overrides and activated features.
141///
142/// This function will also write the result of resolution as a new lock file
143/// (unless `Workspace::require_optional_deps` is false, such as `cargo
144/// install` or `-Z avoid-dev-deps`), or it is an ephemeral workspace (`cargo
145/// install` or `cargo package`).
146///
147/// `specs` may be empty, which indicates it should resolve all workspace
148/// members. In this case, `opts.all_features` must be `true`.
149pub fn resolve_ws_with_opts<'gctx>(
150 ws: &Workspace<'gctx>,
151 target_data: &mut RustcTargetData<'gctx>,
152 requested_targets: &[CompileKind],
153 cli_features: &CliFeatures,
154 specs: &[PackageIdSpec],
155 has_dev_units: HasDevUnits,
156 force_all_targets: ForceAllTargets,
157 dry_run: bool,
158) -> CargoResult<WorkspaceResolve<'gctx>> {
159 let feature_unification = ws.resolve_feature_unification();
160 let individual_specs = match feature_unification {
161 FeatureUnification::Selected => vec![specs.to_owned()],
162 FeatureUnification::Workspace => {
163 vec![ops::Packages::All(Vec::new()).to_package_id_specs(ws)?]
164 }
165 FeatureUnification::Package => specs.iter().map(|spec| vec![spec.clone()]).collect(),
166 };
167 let specs: Vec<_> = individual_specs
168 .iter()
169 .map(|specs| specs.iter())
170 .flatten()
171 .cloned()
172 .collect();
173 let specs = &specs[..];
174 let mut registry = ws.package_registry()?;
175 let (resolve, resolved_with_overrides) = if ws.ignore_lock() {
176 let add_patches = true;
177 let resolve = None;
178 let resolved_with_overrides = resolve_with_previous(
179 &mut registry,
180 ws,
181 cli_features,
182 has_dev_units,
183 resolve.as_ref(),
184 None,
185 specs,
186 add_patches,
187 )?;
188 ops::print_lockfile_changes(ws, None, &resolved_with_overrides, &mut registry)?;
189 (resolve, resolved_with_overrides)
190 } else if ws.require_optional_deps() {
191 // First, resolve the root_package's *listed* dependencies, as well as
192 // downloading and updating all remotes and such.
193 let resolve = resolve_with_registry(ws, &mut registry, dry_run)?;
194 // No need to add patches again, `resolve_with_registry` has done it.
195 let add_patches = false;
196
197 // Second, resolve with precisely what we're doing. Filter out
198 // transitive dependencies if necessary, specify features, handle
199 // overrides, etc.
200 add_overrides(&mut registry, ws)?;
201
202 for (replace_spec, dep) in ws.root_replace() {
203 if !resolve
204 .iter()
205 .any(|r| replace_spec.matches(r) && !dep.matches_id(r))
206 {
207 ws.gctx()
208 .shell()
209 .warn(format!("package replacement is not used: {}", replace_spec))?
210 }
211
212 let mut unused_fields = Vec::new();
213 if dep.features().len() != 0 {
214 unused_fields.push("`features`");
215 }
216 if !dep.uses_default_features() {
217 unused_fields.push("`default-features`")
218 }
219 if !unused_fields.is_empty() {
220 let mut shell = ws.gctx().shell();
221 shell.warn(format!(
222 "unused field in replacement for `{}`: {}",
223 dep.package_name(),
224 unused_fields.join(", ")
225 ))?;
226 shell.note(format!(
227 "configure {} in the `dependencies` entry",
228 unused_fields.join(", ")
229 ))?;
230 }
231 }
232
233 let resolved_with_overrides = resolve_with_previous(
234 &mut registry,
235 ws,
236 cli_features,
237 has_dev_units,
238 Some(&resolve),
239 None,
240 specs,
241 add_patches,
242 )?;
243 (Some(resolve), resolved_with_overrides)
244 } else {
245 let add_patches = true;
246 let resolve = ops::load_pkg_lockfile(ws)?;
247 let resolved_with_overrides = resolve_with_previous(
248 &mut registry,
249 ws,
250 cli_features,
251 has_dev_units,
252 resolve.as_ref(),
253 None,
254 specs,
255 add_patches,
256 )?;
257 // Skipping `print_lockfile_changes` as there are cases where this prints irrelevant
258 // information
259 (resolve, resolved_with_overrides)
260 };
261
262 let pkg_set = get_resolved_packages(&resolved_with_overrides, registry)?;
263
264 let members_with_features = ws.members_with_features(specs, cli_features)?;
265 let member_ids = members_with_features
266 .iter()
267 .map(|(p, _fts)| p.package_id())
268 .collect::<Vec<_>>();
269 pkg_set.download_accessible(
270 &resolved_with_overrides,
271 &member_ids,
272 has_dev_units,
273 requested_targets,
274 target_data,
275 force_all_targets,
276 )?;
277
278 let mut specs_and_features = Vec::new();
279
280 for specs in individual_specs {
281 let feature_opts = FeatureOpts::new(ws, has_dev_units, force_all_targets)?;
282
283 // We want to narrow the features to the current specs so that stuff like `cargo check -p a
284 // -p b -F a/a,b/b` works and the resolver does not contain that `a` does not have feature
285 // `b` and vice-versa. However, resolver v1 needs to see even features of unselected
286 // packages turned on if it was because of working directory being inside the unselected
287 // package, because they might turn on a feature of a selected package.
288 let narrowed_features = match feature_unification {
289 FeatureUnification::Package => {
290 let mut narrowed_features = cli_features.clone();
291 let enabled_features = members_with_features
292 .iter()
293 .filter_map(|(package, cli_features)| {
294 specs
295 .iter()
296 .any(|spec| spec.matches(package.package_id()))
297 .then_some(cli_features.features.iter())
298 })
299 .flatten()
300 .cloned()
301 .collect();
302 narrowed_features.features = Rc::new(enabled_features);
303 Cow::Owned(narrowed_features)
304 }
305 FeatureUnification::Selected | FeatureUnification::Workspace => {
306 Cow::Borrowed(cli_features)
307 }
308 };
309
310 let resolved_features = FeatureResolver::resolve(
311 ws,
312 target_data,
313 &resolved_with_overrides,
314 &pkg_set,
315 &*narrowed_features,
316 &specs,
317 requested_targets,
318 feature_opts,
319 )?;
320
321 pkg_set.warn_no_lib_packages_and_artifact_libs_overlapping_deps(
322 ws,
323 &resolved_with_overrides,
324 &member_ids,
325 has_dev_units,
326 requested_targets,
327 target_data,
328 force_all_targets,
329 )?;
330
331 specs_and_features.push(SpecsAndResolvedFeatures {
332 specs,
333 resolved_features,
334 });
335 }
336
337 Ok(WorkspaceResolve {
338 pkg_set,
339 workspace_resolve: resolve,
340 targeted_resolve: resolved_with_overrides,
341 specs_and_features,
342 })
343}
344
345#[tracing::instrument(skip_all)]
346fn resolve_with_registry<'gctx>(
347 ws: &Workspace<'gctx>,
348 registry: &mut PackageRegistry<'gctx>,
349 dry_run: bool,
350) -> CargoResult<Resolve> {
351 let prev = ops::load_pkg_lockfile(ws)?;
352 let mut resolve = resolve_with_previous(
353 registry,
354 ws,
355 &CliFeatures::new_all(true),
356 HasDevUnits::Yes,
357 prev.as_ref(),
358 None,
359 &[],
360 true,
361 )?;
362
363 let print = if !ws.is_ephemeral() && ws.require_optional_deps() {
364 if !dry_run {
365 ops::write_pkg_lockfile(ws, &mut resolve)?
366 } else {
367 true
368 }
369 } else {
370 // This mostly represents
371 // - `cargo install --locked` and the only change is the package is no longer local but
372 // from the registry which is noise
373 // - publish of libraries
374 false
375 };
376 if print {
377 ops::print_lockfile_changes(ws, prev.as_ref(), &resolve, registry)?;
378 }
379 Ok(resolve)
380}
381
382/// Resolves all dependencies for a package using an optional previous instance
383/// of resolve to guide the resolution process.
384///
385/// This also takes an optional filter `keep_previous`, which informs the `registry`
386/// which package ID should be locked to the previous instance of resolve
387/// (often used in pairings with updates). See comments in [`register_previous_locks`]
388/// for scenarios that might override this.
389///
390/// The previous resolve normally comes from a lock file. This function does not
391/// read or write lock files from the filesystem.
392///
393/// `specs` may be empty, which indicates it should resolve all workspace
394/// members. In this case, `opts.all_features` must be `true`.
395///
396/// If `register_patches` is true, then entries from the `[patch]` table in
397/// the manifest will be added to the given `PackageRegistry`.
398#[tracing::instrument(skip_all)]
399pub fn resolve_with_previous<'gctx>(
400 registry: &mut PackageRegistry<'gctx>,
401 ws: &Workspace<'gctx>,
402 cli_features: &CliFeatures,
403 has_dev_units: HasDevUnits,
404 previous: Option<&Resolve>,
405 keep_previous: Option<Keep<'_>>,
406 specs: &[PackageIdSpec],
407 register_patches: bool,
408) -> CargoResult<Resolve> {
409 // We only want one Cargo at a time resolving a crate graph since this can
410 // involve a lot of frobbing of the global caches.
411 let _lock = ws
412 .gctx()
413 .acquire_package_cache_lock(CacheLockMode::DownloadExclusive)?;
414
415 // Some packages are already loaded when setting up a workspace. This
416 // makes it so anything that was already loaded will not be loaded again.
417 // Without this there were cases where members would be parsed multiple times
418 ws.preload(registry);
419
420 // In case any members were not already loaded or the Workspace is_ephemeral.
421 for member in ws.members() {
422 registry.add_sources(Some(member.package_id().source_id()))?;
423 }
424
425 // Try to keep all from previous resolve if no instruction given.
426 let keep_previous = keep_previous.unwrap_or(&|_| true);
427
428 // While registering patches, we will record preferences for particular versions
429 // of various packages.
430 let mut version_prefs = VersionPreferences::default();
431 if ws.gctx().cli_unstable().minimal_versions {
432 version_prefs.version_ordering(VersionOrdering::MinimumVersionsFirst)
433 }
434 if ws.resolve_honors_rust_version() {
435 let mut rust_versions: Vec<_> = ws
436 .members()
437 .filter_map(|p| p.rust_version().map(|rv| rv.as_partial().clone()))
438 .collect();
439 if rust_versions.is_empty() {
440 let rustc = ws.gctx().load_global_rustc(Some(ws))?;
441 let rust_version: PartialVersion = rustc.version.clone().into();
442 rust_versions.push(rust_version);
443 }
444 version_prefs.rust_versions(rust_versions);
445 }
446
447 let avoid_patch_ids = if register_patches {
448 register_patch_entries(registry, ws, previous, &mut version_prefs, keep_previous)?
449 } else {
450 HashSet::new()
451 };
452
453 // Refine `keep` with patches that should avoid locking.
454 let keep = |p: &PackageId| keep_previous(p) && !avoid_patch_ids.contains(p);
455
456 let dev_deps = ws.require_optional_deps() || has_dev_units == HasDevUnits::Yes;
457
458 if let Some(r) = previous {
459 trace!("previous: {:?}", r);
460
461 // In the case where a previous instance of resolve is available, we
462 // want to lock as many packages as possible to the previous version
463 // without disturbing the graph structure.
464 register_previous_locks(ws, registry, r, &keep, dev_deps);
465
466 // Prefer to use anything in the previous lock file, aka we want to have conservative updates.
467 let _span = tracing::span!(tracing::Level::TRACE, "prefer_package_id").entered();
468 for id in r.iter().filter(keep) {
469 debug!("attempting to prefer {}", id);
470 version_prefs.prefer_package_id(id);
471 }
472 }
473
474 if register_patches {
475 registry.lock_patches();
476 }
477
478 let summaries: Vec<(Summary, ResolveOpts)> = {
479 let _span = tracing::span!(tracing::Level::TRACE, "registry.lock").entered();
480 ws.members_with_features(specs, cli_features)?
481 .into_iter()
482 .map(|(member, features)| {
483 let summary = registry.lock(member.summary().clone());
484 (
485 summary,
486 ResolveOpts {
487 dev_deps,
488 features: RequestedFeatures::CliFeatures(features),
489 },
490 )
491 })
492 .collect()
493 };
494
495 let replace = lock_replacements(ws, previous, &keep);
496
497 let mut resolved = resolver::resolve(
498 &summaries,
499 &replace,
500 registry,
501 &version_prefs,
502 ResolveVersion::with_rust_version(ws.lowest_rust_version()),
503 Some(ws.gctx()),
504 )?;
505
506 let patches = registry.patches().values().flat_map(|v| v.iter());
507 resolved.register_used_patches(patches);
508
509 if register_patches && !resolved.unused_patches().is_empty() {
510 emit_warnings_of_unused_patches(ws, &resolved, registry)?;
511 }
512
513 if let Some(previous) = previous {
514 resolved.merge_from(previous)?;
515 }
516 let gctx = ws.gctx();
517 let mut deferred = gctx.deferred_global_last_use()?;
518 deferred.save_no_error(gctx);
519 Ok(resolved)
520}
521
522/// Read the `paths` configuration variable to discover all path overrides that
523/// have been configured.
524#[tracing::instrument(skip_all)]
525pub fn add_overrides<'a>(
526 registry: &mut PackageRegistry<'a>,
527 ws: &Workspace<'a>,
528) -> CargoResult<()> {
529 let gctx = ws.gctx();
530 let Some(paths) = gctx.paths_overrides()? else {
531 return Ok(());
532 };
533
534 let paths = paths.val.iter().map(|(s, def)| {
535 // The path listed next to the string is the config file in which the
536 // key was located, so we want to pop off the `.cargo/config` component
537 // to get the directory containing the `.cargo` folder.
538 (paths::normalize_path(&def.root(gctx).join(s)), def)
539 });
540
541 for (path, definition) in paths {
542 let id = SourceId::for_path(&path)?;
543 let mut source = RecursivePathSource::new(&path, id, ws.gctx());
544 source.load().with_context(|| {
545 format!(
546 "failed to update path override `{}` \
547 (defined in `{}`)",
548 path.display(),
549 definition
550 )
551 })?;
552 registry.add_override(Box::new(source));
553 }
554 Ok(())
555}
556
557pub fn get_resolved_packages<'gctx>(
558 resolve: &Resolve,
559 registry: PackageRegistry<'gctx>,
560) -> CargoResult<PackageSet<'gctx>> {
561 let ids: Vec<PackageId> = resolve.iter().collect();
562 registry.get(&ids)
563}
564
565/// In this function we're responsible for informing the `registry` of all
566/// locked dependencies from the previous lock file we had, `resolve`.
567///
568/// This gets particularly tricky for a couple of reasons. The first is that we
569/// want all updates to be conservative, so we actually want to take the
570/// `resolve` into account (and avoid unnecessary registry updates and such).
571/// the second, however, is that we want to be resilient to updates of
572/// manifests. For example if a dependency is added or a version is changed we
573/// want to make sure that we properly re-resolve (conservatively) instead of
574/// providing an opaque error.
575///
576/// The logic here is somewhat subtle, but there should be more comments below to
577/// clarify things.
578///
579/// Note that this function, at the time of this writing, is basically the
580/// entire fix for issue #4127.
581#[tracing::instrument(skip_all)]
582fn register_previous_locks(
583 ws: &Workspace<'_>,
584 registry: &mut PackageRegistry<'_>,
585 resolve: &Resolve,
586 keep: Keep<'_>,
587 dev_deps: bool,
588) {
589 let path_pkg = |id: SourceId| {
590 if !id.is_path() {
591 return None;
592 }
593 if let Ok(path) = id.url().to_file_path() {
594 if let Ok(pkg) = ws.load(&path.join("Cargo.toml")) {
595 return Some(pkg);
596 }
597 }
598 None
599 };
600
601 // Ok so we've been passed in a `keep` function which basically says "if I
602 // return `true` then this package wasn't listed for an update on the command
603 // line". That is, if we run `cargo update foo` then `keep(bar)` will return
604 // `true`, whereas `keep(foo)` will return `false` (roughly speaking).
605 //
606 // This isn't actually quite what we want, however. Instead we want to
607 // further refine this `keep` function with *all transitive dependencies* of
608 // the packages we're not keeping. For example, consider a case like this:
609 //
610 // * There's a crate `log`.
611 // * There's a crate `serde` which depends on `log`.
612 //
613 // Let's say we then run `cargo update serde`. This may *also* want to
614 // update the `log` dependency as our newer version of `serde` may have a
615 // new minimum version required for `log`. Now this isn't always guaranteed
616 // to work. What'll happen here is we *won't* lock the `log` dependency nor
617 // the `log` crate itself, but we will inform the registry "please prefer
618 // this version of `log`". That way if our newer version of serde works with
619 // the older version of `log`, we conservatively won't update `log`. If,
620 // however, nothing else in the dependency graph depends on `log` and the
621 // newer version of `serde` requires a new version of `log` it'll get pulled
622 // in (as we didn't accidentally lock it to an old version).
623 let mut avoid_locking = HashSet::new();
624 registry.add_to_yanked_whitelist(resolve.iter().filter(keep));
625 for node in resolve.iter() {
626 if !keep(&node) {
627 add_deps(resolve, node, &mut avoid_locking);
628 }
629 }
630
631 // Ok, but the above loop isn't the entire story! Updates to the dependency
632 // graph can come from two locations, the `cargo update` command or
633 // manifests themselves. For example a manifest on the filesystem may
634 // have been updated to have an updated version requirement on `serde`. In
635 // this case both `keep(serde)` and `keep(log)` return `true` (the `keep`
636 // that's an argument to this function). We, however, don't want to keep
637 // either of those! Otherwise we'll get obscure resolve errors about locked
638 // versions.
639 //
640 // To solve this problem we iterate over all packages with path sources
641 // (aka ones with manifests that are changing) and take a look at all of
642 // their dependencies. If any dependency does not match something in the
643 // previous lock file, then we're guaranteed that the main resolver will
644 // update the source of this dependency no matter what. Knowing this we
645 // poison all packages from the same source, forcing them all to get
646 // updated.
647 //
648 // This may seem like a heavy hammer, and it is! It means that if you change
649 // anything from crates.io then all of crates.io becomes unlocked. Note,
650 // however, that we still want conservative updates. This currently happens
651 // because the first candidate the resolver picks is the previously locked
652 // version, and only if that fails to activate to we move on and try
653 // a different version. (giving the guise of conservative updates)
654 //
655 // For example let's say we had `serde = "0.1"` written in our lock file.
656 // When we later edit this to `serde = "0.1.3"` we don't want to lock serde
657 // at its old version, 0.1.1. Instead we want to allow it to update to
658 // `0.1.3` and update its own dependencies (like above). To do this *all
659 // crates from crates.io* are not locked (aka added to `avoid_locking`).
660 // For dependencies like `log` their previous version in the lock file will
661 // come up first before newer version, if newer version are available.
662 {
663 let _span = tracing::span!(tracing::Level::TRACE, "poison").entered();
664 let mut path_deps = ws.members().cloned().collect::<Vec<_>>();
665 let mut visited = HashSet::new();
666 while let Some(member) = path_deps.pop() {
667 if !visited.insert(member.package_id()) {
668 continue;
669 }
670 let is_ws_member = ws.is_member(&member);
671 for dep in member.dependencies() {
672 // If this dependency didn't match anything special then we may want
673 // to poison the source as it may have been added. If this path
674 // dependencies is **not** a workspace member, however, and it's an
675 // optional/non-transitive dependency then it won't be necessarily
676 // be in our lock file. If this shows up then we avoid poisoning
677 // this source as otherwise we'd repeatedly update the registry.
678 //
679 // TODO: this breaks adding an optional dependency in a
680 // non-workspace member and then simultaneously editing the
681 // dependency on that crate to enable the feature. For now,
682 // this bug is better than the always-updating registry though.
683 if !is_ws_member && (dep.is_optional() || !dep.is_transitive()) {
684 continue;
685 }
686
687 // If dev-dependencies aren't being resolved, skip them.
688 if !dep.is_transitive() && !dev_deps {
689 continue;
690 }
691
692 // If this is a path dependency, then try to push it onto our
693 // worklist.
694 if let Some(pkg) = path_pkg(dep.source_id()) {
695 path_deps.push(pkg);
696 continue;
697 }
698
699 // If we match *anything* in the dependency graph then we consider
700 // ourselves all ok, and assume that we'll resolve to that.
701 if resolve.iter().any(|id| dep.matches_ignoring_source(id)) {
702 continue;
703 }
704
705 // Ok if nothing matches, then we poison the source of these
706 // dependencies and the previous lock file.
707 debug!(
708 "poisoning {} because {} looks like it changed {}",
709 dep.source_id(),
710 member.package_id(),
711 dep.package_name()
712 );
713 for id in resolve
714 .iter()
715 .filter(|id| id.source_id() == dep.source_id())
716 {
717 add_deps(resolve, id, &mut avoid_locking);
718 }
719 }
720 }
721 }
722
723 // Additionally, here we process all path dependencies listed in the previous
724 // resolve. They can not only have their dependencies change but also
725 // the versions of the package change as well. If this ends up happening
726 // then we want to make sure we don't lock a package ID node that doesn't
727 // actually exist. Note that we don't do transitive visits of all the
728 // package's dependencies here as that'll be covered below to poison those
729 // if they changed.
730 //
731 // This must come after all other `add_deps` calls to ensure it recursively walks the tree when
732 // called.
733 for node in resolve.iter() {
734 if let Some(pkg) = path_pkg(node.source_id()) {
735 if pkg.package_id() != node {
736 avoid_locking.insert(node);
737 }
738 }
739 }
740
741 // Alright now that we've got our new, fresh, shiny, and refined `keep`
742 // function let's put it to action. Take a look at the previous lock file,
743 // filter everything by this callback, and then shove everything else into
744 // the registry as a locked dependency.
745 let keep = |id: &PackageId| keep(id) && !avoid_locking.contains(id);
746
747 registry.clear_lock();
748 {
749 let _span = tracing::span!(tracing::Level::TRACE, "register_lock").entered();
750 for node in resolve.iter().filter(keep) {
751 let deps = resolve
752 .deps_not_replaced(node)
753 .map(|p| p.0)
754 .filter(keep)
755 .collect::<Vec<_>>();
756
757 // In the v2 lockfile format and prior the `branch=master` dependency
758 // directive was serialized the same way as the no-branch-listed
759 // directive. Nowadays in Cargo, however, these two directives are
760 // considered distinct and are no longer represented the same way. To
761 // maintain compatibility with older lock files we register locked nodes
762 // for *both* the master branch and the default branch.
763 //
764 // Note that this is only applicable for loading older resolves now at
765 // this point. All new lock files are encoded as v3-or-later, so this is
766 // just compat for loading an old lock file successfully.
767 if let Some(node) = master_branch_git_source(node, resolve) {
768 registry.register_lock(node, deps.clone());
769 }
770
771 registry.register_lock(node, deps);
772 }
773 }
774
775 /// Recursively add `node` and all its transitive dependencies to `set`.
776 fn add_deps(resolve: &Resolve, node: PackageId, set: &mut HashSet<PackageId>) {
777 if !set.insert(node) {
778 return;
779 }
780 debug!("ignoring any lock pointing directly at {}", node);
781 for (dep, _) in resolve.deps_not_replaced(node) {
782 add_deps(resolve, dep, set);
783 }
784 }
785}
786
787fn master_branch_git_source(id: PackageId, resolve: &Resolve) -> Option<PackageId> {
788 if resolve.version() <= ResolveVersion::V2 {
789 let source = id.source_id();
790 if let Some(GitReference::DefaultBranch) = source.git_reference() {
791 let new_source =
792 SourceId::for_git(source.url(), GitReference::Branch("master".to_string()))
793 .unwrap()
794 .with_precise_from(source);
795 return Some(id.with_source_id(new_source));
796 }
797 }
798 None
799}
800
801/// Emits warnings of unused patches case by case.
802///
803/// This function does its best to provide more targeted and helpful
804/// (such as showing close candidates that failed to match). However, that's
805/// not terribly easy to do, so just show a general help message if we cannot.
806fn emit_warnings_of_unused_patches(
807 ws: &Workspace<'_>,
808 resolve: &Resolve,
809 registry: &PackageRegistry<'_>,
810) -> CargoResult<()> {
811 const MESSAGE: &str = "was not used in the crate graph.";
812
813 // Patch package with the source URLs being patch
814 let mut patch_pkgid_to_urls = HashMap::new();
815 for (url, summaries) in registry.patches().iter() {
816 for summary in summaries.iter() {
817 patch_pkgid_to_urls
818 .entry(summary.package_id())
819 .or_insert_with(HashSet::new)
820 .insert(url);
821 }
822 }
823
824 // pkg name -> all source IDs of under the same pkg name
825 let mut source_ids_grouped_by_pkg_name = HashMap::new();
826 for pkgid in resolve.iter() {
827 source_ids_grouped_by_pkg_name
828 .entry(pkgid.name())
829 .or_insert_with(HashSet::new)
830 .insert(pkgid.source_id());
831 }
832
833 let mut unemitted_unused_patches = Vec::new();
834 for unused in resolve.unused_patches().iter() {
835 // Show alternative source URLs if the source URLs being patch
836 // cannot not be found in the crate graph.
837 match (
838 source_ids_grouped_by_pkg_name.get(&unused.name()),
839 patch_pkgid_to_urls.get(unused),
840 ) {
841 (Some(ids), Some(patched_urls))
842 if ids
843 .iter()
844 .all(|id| !patched_urls.contains(id.canonical_url())) =>
845 {
846 use std::fmt::Write;
847 let mut msg = String::new();
848 writeln!(msg, "Patch `{}` {}", unused, MESSAGE)?;
849 write!(
850 msg,
851 "Perhaps you misspelled the source URL being patched.\n\
852 Possible URLs for `[patch.<URL>]`:",
853 )?;
854 for id in ids.iter() {
855 write!(msg, "\n {}", id.display_registry_name())?;
856 }
857 ws.gctx().shell().warn(msg)?;
858 }
859 _ => unemitted_unused_patches.push(unused),
860 }
861 }
862
863 // Show general help message.
864 if !unemitted_unused_patches.is_empty() {
865 let warnings: Vec<_> = unemitted_unused_patches
866 .iter()
867 .map(|pkgid| format!("Patch `{}` {}", pkgid, MESSAGE))
868 .collect();
869 ws.gctx()
870 .shell()
871 .warn(format!("{}\n{}", warnings.join("\n"), UNUSED_PATCH_WARNING))?;
872 }
873
874 return Ok(());
875}
876
877/// Informs `registry` and `version_pref` that `[patch]` entries are available
878/// and preferable for the dependency resolution.
879///
880/// This returns a set of PackageIds of `[patch]` entries, and some related
881/// locked PackageIds, for which locking should be avoided (but which will be
882/// preferred when searching dependencies, via [`VersionPreferences::prefer_patch_deps`]).
883#[tracing::instrument(level = "debug", skip_all, ret)]
884fn register_patch_entries(
885 registry: &mut PackageRegistry<'_>,
886 ws: &Workspace<'_>,
887 previous: Option<&Resolve>,
888 version_prefs: &mut VersionPreferences,
889 keep_previous: Keep<'_>,
890) -> CargoResult<HashSet<PackageId>> {
891 let mut avoid_patch_ids = HashSet::new();
892 for (url, patches) in ws.root_patch()?.iter() {
893 for patch in patches {
894 version_prefs.prefer_dependency(patch.clone());
895 }
896 let Some(previous) = previous else {
897 let patches: Vec<_> = patches.iter().map(|p| (p, None)).collect();
898 let unlock_ids = registry.patch(url, &patches)?;
899 // Since nothing is locked, this shouldn't possibly return anything.
900 assert!(unlock_ids.is_empty());
901 continue;
902 };
903
904 // This is a list of pairs where the first element of the pair is
905 // the raw `Dependency` which matches what's listed in `Cargo.toml`.
906 // The second element is, if present, the "locked" version of
907 // the `Dependency` as well as the `PackageId` that it previously
908 // resolved to. This second element is calculated by looking at the
909 // previous resolve graph, which is primarily what's done here to
910 // build the `registrations` list.
911 let mut registrations = Vec::new();
912 for dep in patches {
913 let candidates = || {
914 previous
915 .iter()
916 .chain(previous.unused_patches().iter().cloned())
917 .filter(&keep_previous)
918 };
919
920 let lock = match candidates().find(|id| dep.matches_id(*id)) {
921 // If we found an exactly matching candidate in our list of
922 // candidates, then that's the one to use.
923 Some(package_id) => {
924 let mut locked_dep = dep.clone();
925 locked_dep.lock_to(package_id);
926 Some(LockedPatchDependency {
927 dependency: locked_dep,
928 package_id,
929 alt_package_id: None,
930 })
931 }
932 None => {
933 // If the candidate does not have a matching source id
934 // then we may still have a lock candidate. If we're
935 // loading a v2-encoded resolve graph and `dep` is a
936 // git dep with `branch = 'master'`, then this should
937 // also match candidates without `branch = 'master'`
938 // (which is now treated separately in Cargo).
939 //
940 // In this scenario we try to convert candidates located
941 // in the resolve graph to explicitly having the
942 // `master` branch (if they otherwise point to
943 // `DefaultBranch`). If this works and our `dep`
944 // matches that then this is something we'll lock to.
945 match candidates().find(|&id| match master_branch_git_source(id, previous) {
946 Some(id) => dep.matches_id(id),
947 None => false,
948 }) {
949 Some(id_using_default) => {
950 let id_using_master = id_using_default.with_source_id(
951 dep.source_id()
952 .with_precise_from(id_using_default.source_id()),
953 );
954
955 let mut locked_dep = dep.clone();
956 locked_dep.lock_to(id_using_master);
957 Some(LockedPatchDependency {
958 dependency: locked_dep,
959 package_id: id_using_master,
960 // Note that this is where the magic
961 // happens, where the resolve graph
962 // probably has locks pointing to
963 // DefaultBranch sources, and by including
964 // this here those will get transparently
965 // rewritten to Branch("master") which we
966 // have a lock entry for.
967 alt_package_id: Some(id_using_default),
968 })
969 }
970
971 // No locked candidate was found
972 None => None,
973 }
974 }
975 };
976
977 registrations.push((dep, lock));
978 }
979
980 let canonical = CanonicalUrl::new(url)?;
981 for (orig_patch, unlock_id) in registry.patch(url, ®istrations)? {
982 // Avoid the locked patch ID.
983 avoid_patch_ids.insert(unlock_id);
984 // Also avoid the thing it is patching.
985 avoid_patch_ids.extend(previous.iter().filter(|id| {
986 orig_patch.matches_ignoring_source(*id)
987 && *id.source_id().canonical_url() == canonical
988 }));
989 }
990 }
991
992 Ok(avoid_patch_ids)
993}
994
995/// Locks each `[replace]` entry to a specific Package ID
996/// if the lockfile contains any corresponding previous replacement.
997fn lock_replacements(
998 ws: &Workspace<'_>,
999 previous: Option<&Resolve>,
1000 keep: Keep<'_>,
1001) -> Vec<(PackageIdSpec, Dependency)> {
1002 let root_replace = ws.root_replace();
1003 let replace = match previous {
1004 Some(r) => root_replace
1005 .iter()
1006 .map(|(spec, dep)| {
1007 for (&key, &val) in r.replacements().iter() {
1008 if spec.matches(key) && dep.matches_id(val) && keep(&val) {
1009 let mut dep = dep.clone();
1010 dep.lock_to(val);
1011 return (spec.clone(), dep);
1012 }
1013 }
1014 (spec.clone(), dep.clone())
1015 })
1016 .collect::<Vec<_>>(),
1017 None => root_replace.to_vec(),
1018 };
1019 replace
1020}