Blog of RuSun

\begin {array}{c} \mathfrak {One Problem Is Difficult} \\\\ \mathfrak {Because You Don't Know} \\\\ \mathfrak {Why It Is Diffucult} \end {array}

P6222 「P6156 简单题」加强版

P6222 「P6156 简单题」加强版

$$
\begin {aligned}
ANS & = \sum _ {i = 1} ^ n \sum _ {j = 1} ^ n (i + j) ^ K (i, j)\mu ^ 2((i, j)) \\
& = \sum _ {i = 1} ^ n \sum _ {j = 1} ^ n (i + j) ^ K \sum _ {d = 1} ^ n d\mu ^ 2(d) [gcd(i, j) = d]\\
& = \sum _ {d = 1} ^ n d\mu ^ 2(d) \sum _ {i = 1} ^ {\lfloor \frac n d\rfloor } \sum _ {j = 1} ^ {\lfloor \frac n d\rfloor } (id + jd) ^ K [gcd(i, j) = 1]\\
& = \sum _ {d = 1} ^ n d ^ {K + 1}\mu ^ 2(d) \sum _ {i = 1} ^ {\lfloor \frac n d\rfloor } \sum _ {j = 1} ^ {\lfloor \frac n d\rfloor } (i+ j) ^ K \sum _ {e | (i, j)} \mu(e)\\
& = \sum _ {d = 1} ^ n d ^ {K + 1}\mu ^ 2(d) \sum _ {e = 1} ^ n \mu(e)\sum _ {i = 1} ^ {\lfloor \frac n {de}\rfloor } \sum _ {j = 1} ^ {\lfloor \frac n {de}\rfloor } (ie+ je) ^ K \\
& = \sum _ {d = 1} ^ n d ^ {K + 1}\mu ^ 2(d) \sum _ {e = 1} ^ n \mu(e)e ^ K\sum _ {i = 1} ^ {\lfloor \frac n {de}\rfloor } \sum _ {j = 1} ^ {\lfloor \frac n {de}\rfloor } (i + j) ^ K \\
& = \sum _ {T = 1} ^ n \sum _ {i = 1} ^ {\lfloor \frac n T\rfloor } \sum _ {j = 1} ^ {\lfloor \frac n T\rfloor } (i + j) ^ K \sum _ {d | T} d ^ {K + 1} \mu ^ 2(d) \mu(\frac T d) (\frac T d) ^ K \\
& = \sum _ {T = 1} ^ n T^ K \sum _ {i = 1} ^ {\lfloor \frac n T\rfloor } \sum _ {j = 1} ^ {\lfloor \frac n T\rfloor } (i + j) ^ K \sum _ {d | T} d \mu ^ 2(d) \mu(\frac T d) \\
\end {aligned}
$$
现在考察 $S(n) = \sum _ {i = 1} ^ n \sum _ {j = 1} ^ n (i + j) ^ K$ 的计算。记 $f(n) = \sum _ {i = 1} ^ n i ^ K, g(n) = \sum _ {i = 1} ^ n f(i)$ 。
$$
\begin {aligned}
S(n) - S(n - 1) & = \sum _ {j = 1} ^ n (n + j) ^ K + \sum _ {i = 1} ^ {n - 1} (i + n) ^ K \\
& = f(2n) - f(n) + f(2n - 1) - f(n) \\
& = f(2n) + f(2n - 1) - 2f(n)
\end {aligned}
$$

$$
\begin {cases}
S(n) - S(n - 1) = f(2n) + f(2n - 1) -2f(n) \\
S(n - 1) - S(n - 2) = f(2n - 2) + f(2n - 3) -2f(n - 1) \\
\dots \\
S(1) - S(0) = f(2) + f(1) - 2f(1)
\end {cases}
$$

将式子加起来,将每个式子的前两项加起来,后一项加起来,那么有 $S(n) = g(2n) - 2g(n)$ 。

现在考察 $f(n) = \sum _ {d | T} d \mu ^ 2(d) \mu(\frac T d)$ 的计算,显然是个积性函数,只需考察 $n = p ^ k$ 的 $f(n)$ 。
$$
f(p ^ k) = \begin {cases}
\mu ^ 2(1) \mu(1) = 1, k = 0 \\
\mu ^ 2(1) \mu(p) + p \mu ^ 2 (p) \mu (1) = -1 + p, k = 1 \\
\mu ^ 2 (1) \mu (p ^ 2) + p \mu ^ 2(p) \mu (p) + p ^ 2 \mu^2(p ^ 2) \mu(p) = -p, k = 2 \\
0, k \ge 3
\end {cases}
$$
对于 $k \ge 3$ ,$\mu(d)$ 和 $\mu(\frac T d)$ 中至少有一项含有 $2$ 及以上次幂,为 $0$ 。

对于这个式子,可以数论分块,复杂度 $O(n + \frac {n \log K} {\ln n} + q\sqrt n)$ 。

查看代码
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
#include <cstdio>
using namespace std;
template <class Type>
void read(Type &x)
{
char c;
bool flag = false;
while ((c = getchar()) < '0' || c > '9')
c == '-' && (flag = true);
x = c - '0';
while ((c = getchar()) >= '0' && c <= '9')
x = (x << 3) + (x << 1) + c - '0';
if (flag) x = ~x + 1;
}
template <class Type, class ...rest>
void read(Type &x, rest &...y) { read(x), read(y...); }
template <class Type>
void write(Type x)
{
if (x < 0) putchar('-'), x = ~x + 1;
if (x > 9) write(x / 10);
putchar(x % 10 + '0');
}
typedef unsigned int UI;
typedef unsigned long long ULL;
const int N = 2e7 + 10;
int n, m, MAXN;
bool vis[N];
int cnt, p[N];
UI f[N], F[N];
UI binpow (int b, int k = m)
{
UI res = 1;
for (; k; k >>= 1, b = (ULL)b * b)
if (k & 1) res = (ULL)res * b;
return res;
}
void init ()
{
vis[1] = true;
f[1] = 1, F[1] = 1;
for (int i = 2; i <= MAXN; ++i)
{
if (!vis[i])
{
p[++cnt] = i;
f[i] = i - 1, F[i] = binpow(i);
}
for (int j = 1; j <= cnt && i * p[j] <= MAXN; ++j)
{
vis[i * p[j]] = true;
F[i * p[j]] = (ULL)F[i] * F[p[j]];
if (i % p[j] == 0)
{
int t = i / p[j];
f[i * p[j]] = t % p[j] ? (ULL)f[t] * -p[j] : 0;
break;
}
f[i * p[j]] = (ULL)f[i] * f[p[j]];
}
}
for (int i = 2; i <= MAXN; ++i)
f[i] = (ULL)f[i] * F[i] + f[i - 1];
for (int i = 2; i <= MAXN; ++i) F[i] += F[i - 1];
for (int i = 2; i <= MAXN; ++i) F[i] += F[i - 1];
for (int i = 1; i << 1 <= MAXN; ++i)
F[i] = F[i << 1] - (F[i] << 1);
}
int main ()
{
int T; read(T, MAXN, m);
MAXN <<= 1;
init();
while (T--)
{
read(n);
UI res = 0;
for (int l = 1, r; l <= n; l = r + 1)
{
r = n / (n / l);
res += (ULL)(f[r] - f[l - 1]) * F[n / l];
}
write(res), puts("");
}
return 0;
}