Codeforces Gym 103446H. Life is a Game

Codeforces Gym 103446H. Life is a Game

It is easy to note that for each query, all the nodes passing through must form a connected block, and all the edges passing through must be the set of edges containing the connected block in the minimum spanning tree of the original map.

My Solution

For a query, the simplest solution is to enumerate each edge from smallest to largest by edge weight, if the edge is on the connecting block of the current query and the current query can pass through that edge. Change the current query connection block to the union of the two connected blocks connected by the edge. The final answer to the query is to add all the point weights and initial values within the current connection block. The time complexity of this is \(\mathcal O(n^2)).

Consider optimization. It is easy to see that for all queries, the step that must be taken is to enumerate each edge from small to large by edge weight. The difference between them is to determine whether each query can pass through the currently enumerated edge. Also notice that the answer to all queries in the same connected block is only related to the initial value of the query, so consider heuristic merging. For each connected block, use a heap to maintain which queries are in the current connected block. All of the connected blocks cannot be dealt with with by the query of edge\(e\), deleting them in the heap, heuristically merge heaps of the remaining connected blocks where \(u,v\\\\\\\\\\\\\\\\\\\\\\)

Reference code (by qyw)
#include<bits/stdc++.h>
#define pii pair<int,int>
#define fi first
#define se second
using namespace std;

const int N=100005;
long long INF=0x3f3f3f3f3f3f3f3f;
int n,m,q,a[N];
int fa[N],sz[N];
long long rr[N],sum[N];
struct edge{int u,v,w;}e[N];
priority_queue<pii> pq[N];

bool cmp(edge a,edge b){return a.w<b.w;}

int findfa(int x){return x==fa[x]?x:fa[x]=findfa(fa[x]);}

void dlpq(int x,long long w)
{
	while(!pq[x].empty())
	{
		pii u=pq[x].top();
		if(-u.fi+sum[x]>=w) return;
		rr[u.se]=-u.fi+sum[x],pq[x].pop();
	}
}

void mgpq(int x,int y,int w)
{
	while(!pq[x].empty())
	{
		pii u=pq[x].top();
		pq[x].pop();
		pq[y].push(u);
	}
}

void merge(int x,int y,int w)
{
	int fx=findfa(x),fy=findfa(y);
	if(fx==fy) return;
	dlpq(fx,w),dlpq(fy,w);
	if(pq[fx].size()>pq[fy].size()) swap(fx,fy);
	mgpq(fx,fy,w);
	fa[fx]=fy,sum[fy]+=sum[fx];
}

void solve()
{
	sort(e+1,e+m+1,cmp);
	for(int i=1;i<=n;i++) fa[i]=i,sum[i]=a[i];
	for(int i=1;i<=m;i++) merge(e[i].u,e[i].v,e[i].w);
	int ff=findfa(1);
	dlpq(ff,INF);
	for(int i=1;i<=q;i++) printf("%d\n",rr[i]);
}

int main()
{
	scanf("%d%d%d",&n,&m,&q);
	for(int i=1;i<=n;i++) scanf("%d",&a[i]);
	for(int i=1;i<=m;i++) scanf("%d%d%d",&e[i].u,&e[i].v,&e[i].w);
	for(int i=1;i<=q;i++)
	{
		int x,y;
		scanf("%d%d",&x,&y);
		pq[x].push(pii{-y,i});
	}
	solve();
}
Solution 2

Consider constructing a \(\text{Kruskal}\) tree of the original map from small to large edge weights.

Each time a query starts violently jumping from the leaf node to the father in the rebuilt tree until the first edge that cannot be passed is found (i.e., the subtree point weight below the edge and the initial value is less than the edge weight). The correctness of this is evident in the time complexity of \(\mathcal O(n^2)\).

Notice that the process of jumping fathers violently described above can be optimized by doubling the number of trees. The time complexity is \(mathcal O(n\log n)).

Reference Code
#include <bits/stdc++.h>
using namespace std;

static constexpr int Maxn = 2e5 + 5, LOG = 19;
static constexpr int64_t inf = 0x3f3f3f3f3f3f3f3f;

int n, m, q, nc;
int64_t a[Maxn];
struct Edge {
  int u, v;
  int64_t w;
  Edge() = default;
  Edge(int u, int v, int64_t w) : u(u), v(v), w(w) { }
  friend bool operator < (const Edge &lhs, const Edge &rhs) {
    return lhs.w < rhs.w;
  }
} e[Maxn];
int fa[Maxn];
int fnd(int x) {
  return fa[x] == x ? x : fa[x] = fnd(fa[x]);
} // fnd
vector<int> g[Maxn];
int64_t b[Maxn], sa[Maxn];
int64_t c[LOG][Maxn];
int par[LOG][Maxn];
void dfs(int u, int fa) {
  sa[u] = (u > n ? 0 : a[u]); par[0][u] = fa;
  for (int j = 1; j < LOG; ++j) par[j][u] = par[j - 1][par[j - 1][u]];
  for (const int &v: g[u]) dfs(v, u), sa[u] += sa[v];
  c[0][u] = (u == nc ? -inf : sa[u] - b[fa]);
} // dfs

int main(void) {

  scanf("%d%d%d", &n, &m, &q);
  for (int i = 1; i <= n; ++i)
    scanf("%lld", &a[i]);
  for (int i = 1; i <= m; ++i)
    scanf("%d%d%lld", &e[i].u, &e[i].v, &e[i].w);
  sort(e + 1, e + m + 1);
  for (int i = 1; i <= n; ++i) fa[i] = i;
  nc = n;
  for (int i = 1; i <= m; ++i) {
    int u = e[i].u, v = e[i].v;
    int64_t w = e[i].w;
    int fu = fnd(u), fv = fnd(v);
    if (fu != fv) {
      ++nc, fa[nc] = nc;
      b[nc] = w;
      fa[fu] = nc, fa[fv] = nc;
      g[nc].push_back(fu);
      g[nc].push_back(fv);
    }
  }
  memset(c, inf, sizeof(c));
  dfs(nc, 0);
  for (int j = 1; j < LOG; ++j) for (int i = 1; i <= nc; ++i)
    c[j][i] = min(c[j - 1][i], c[j - 1][par[j - 1][i]]);
  while (q--) {
    int x;
    int64_t w;
    scanf("%d%lld", &x, &w);
    if (b[par[0][x]] > w + a[x]) {
      printf("%lld\n", w + a[x]);
    } else {
      int u = x;
      for (int j = LOG - 1; j >= 0; --j)
        if (w + c[j][u] >= 0) u = par[j][u];
      printf("%lld\n", sa[u] + w);
    }
  }

  exit(EXIT_SUCCESS);
} // main

Posted by aneesme on Sun, 05 Dec 2021 10:39:22 -0800