Compare commits
776 Commits
new-releas
...
codex/add-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5100dd28be | ||
|
|
e731596315 | ||
|
|
641526af81 | ||
|
|
82a25c037a | ||
|
|
c6fc5c0518 | ||
|
|
b5c2732f88 | ||
|
|
09fd3e152a | ||
|
|
3f9424e884 | ||
|
|
3048cc1ff9 | ||
|
|
3b766e1aac | ||
|
|
c3b7b7e918 | ||
|
|
7d0b447e1c | ||
|
|
33b0e222ca | ||
|
|
1fc45ffac8 | ||
|
|
9c2cc7f73c | ||
|
|
1c5e76d51a | ||
|
|
7665a6832f | ||
|
|
a06710ff03 | ||
|
|
ad078c3f18 | ||
|
|
400a6621ee | ||
|
|
bf56787874 | ||
|
|
08ad7ef257 | ||
|
|
1c0ce41328 | ||
|
|
85ac6fa523 | ||
|
|
becc4624bb | ||
|
|
754ba731fa | ||
|
|
ac9981a1f5 | ||
|
|
83ef15fd47 | ||
|
|
a3cb938675 | ||
|
|
9b60988232 | ||
|
|
98e951f611 | ||
|
|
baca2df8df | ||
|
|
8a5e23d374 | ||
|
|
897e017361 | ||
|
|
a3e9ef91ad | ||
|
|
76dd86d1b3 | ||
|
|
206a9dfabd | ||
|
|
aaf05910eb | ||
|
|
a0555d5fa6 | ||
|
|
38ebcbb304 | ||
|
|
9b5ccac76e | ||
|
|
87d4b0fff4 | ||
|
|
bd5a9ac632 | ||
|
|
6650b2f34a | ||
|
|
5cc58f9bb3 | ||
|
|
baf7f6a6f5 | ||
|
|
94e9959fe0 | ||
|
|
7c2fd5202e | ||
|
|
ee01b81f3e | ||
|
|
0e5d672763 | ||
|
|
cd2b490b40 | ||
|
|
50f0b83fcd | ||
|
|
9499164d3c | ||
|
|
2140d9aca4 | ||
|
|
ccec40ed17 | ||
|
|
ad4dfb21e1 | ||
|
|
7784b2468e | ||
|
|
146f9d415f | ||
|
|
37fd80e4b9 | ||
|
|
949a93982e | ||
|
|
c4f5651199 | ||
|
|
b0aa8bc9f7 | ||
|
|
c98ffe2130 | ||
|
|
4812f08a73 | ||
|
|
f3ebb38edf | ||
|
|
0007aea204 | ||
|
|
b5c25731e6 | ||
|
|
5297e362f3 | ||
|
|
a58c8000aa | ||
|
|
b27bb367e8 | ||
|
|
d2648eaa39 | ||
|
|
c2902fd200 | ||
|
|
16b2318242 | ||
|
|
907cba194f | ||
|
|
3bf78ff47a | ||
|
|
921e0c46b6 | ||
|
|
fd899f66aa | ||
|
|
30ec4f571f | ||
|
|
7db6b468d9 | ||
|
|
eed7f88f29 | ||
|
|
94d486579c | ||
|
|
5206c6f2d6 | ||
|
|
230f22da86 | ||
|
|
793668a413 | ||
|
|
82aa53aa59 | ||
|
|
cd7ff6f9c1 | ||
|
|
c56974cf59 | ||
|
|
dcc265458c | ||
|
|
ecec53a8c1 | ||
|
|
7d8e81fb2e | ||
|
|
9fc5d315af | ||
|
|
d84508b4d5 | ||
|
|
022f5c9e25 | ||
|
|
3179d6ad0c | ||
|
|
b2f3cb0dfa | ||
|
|
18e8227dfb | ||
|
|
7c358a1aee | ||
|
|
108b2a8bfb | ||
|
|
66ac07b4f3 | ||
|
|
a2061bf31e | ||
|
|
6f7ab9c927 | ||
|
|
9038e9acbd | ||
|
|
02e627e0bd | ||
|
|
5b66208a7e | ||
|
|
591f55edc7 | ||
|
|
e1d9e2489c | ||
|
|
b1693b1c21 | ||
|
|
49d904ca0a | ||
|
|
ca9351252a | ||
|
|
935d9d39f8 | ||
|
|
f8213c32b9 | ||
|
|
14894b4d70 | ||
|
|
7155778eac | ||
|
|
4133e5460d | ||
|
|
73fda8a6ec | ||
|
|
86df20234b | ||
|
|
179921a131 | ||
|
|
9e16a4bb26 | ||
|
|
c5cac2b459 | ||
|
|
555455d710 | ||
|
|
765f856ed4 | ||
|
|
757e3177ed | ||
|
|
d8357e80d2 | ||
|
|
ef1f0c4102 | ||
|
|
1119f2f5b5 | ||
|
|
bb02398086 | ||
|
|
3ff7eec8f3 | ||
|
|
d8cbeff386 | ||
|
|
64f20ab44a | ||
|
|
57e0423b3a | ||
|
|
c635f6b9a2 | ||
|
|
7be5427283 | ||
|
|
7f93e88379 | ||
|
|
40d4dd36c9 | ||
|
|
d8f38f2298 | ||
|
|
5c88d1310d | ||
|
|
4a20d7f7c2 | ||
|
|
585e5e5973 | ||
|
|
e3111d0a32 | ||
|
|
2f0e217751 | ||
|
|
6405cf0a6f | ||
|
|
6eed4adc65 | ||
|
|
bdd9db579a | ||
|
|
1107fa1d62 | ||
|
|
efa73257c5 | ||
|
|
8c08521301 | ||
|
|
462d5765e2 | ||
|
|
6eeb2e4076 | ||
|
|
0094cac675 | ||
|
|
4ab0893ffb | ||
|
|
e01d1e73e1 | ||
|
|
471d110c5e | ||
|
|
f89113377a | ||
|
|
6740e87b4d | ||
|
|
8b761f232b | ||
|
|
e0c2a7c284 | ||
|
|
ac2f9ae533 | ||
|
|
eedda1ae5c | ||
|
|
8cecbec7a7 | ||
|
|
6432ff1257 | ||
|
|
4359b12003 | ||
|
|
5358ac0fc2 | ||
|
|
529a79725e | ||
|
|
9109ecd8fc | ||
|
|
84883be513 | ||
|
|
79328e4292 | ||
|
|
a24799918c | ||
|
|
a31d7b86be | ||
|
|
7884a98be7 | ||
|
|
c190ba816d | ||
|
|
a3954dd4c6 | ||
|
|
6e3c048328 | ||
|
|
b750542e6d | ||
|
|
cbb8755972 | ||
|
|
dc36997a08 | ||
|
|
1630fbdafe | ||
|
|
341b7a5f2a | ||
|
|
9547bada3a | ||
|
|
9d69fce834 | ||
|
|
c6a605ccce | ||
|
|
4aeb7ef9ad | ||
|
|
a68cbb232b | ||
|
|
e1b3bfe6fb | ||
|
|
f78c46446b | ||
|
|
1b72880007 | ||
|
|
29f7915b79 | ||
|
|
2327db6fdc | ||
|
|
fd02dc782d | ||
|
|
3a234ec950 | ||
|
|
9e89d27fcd | ||
|
|
b3ec7ce960 | ||
|
|
baee4949d3 | ||
|
|
14fe5ef873 | ||
|
|
fc425023f5 | ||
|
|
9c58e4ce2e | ||
|
|
df6a6d5f4f | ||
|
|
e896c08f9c | ||
|
|
56bc3c6e45 | ||
|
|
cbef406f9b | ||
|
|
8a76563018 | ||
|
|
415c1c5bee | ||
|
|
f334daa979 | ||
|
|
504207faa6 | ||
|
|
d024749633 | ||
|
|
f14e4a4b67 | ||
|
|
1e819cdb26 | ||
|
|
5edfea279d | ||
|
|
c612f9a852 | ||
|
|
95175cb394 | ||
|
|
cba4a466e5 | ||
|
|
7c1705712d | ||
|
|
a9e24307cc | ||
|
|
3a87b4e43b | ||
|
|
4bcd4cbda1 | ||
|
|
71ce01c9e1 | ||
|
|
c6d48080a4 | ||
|
|
46d2f12851 | ||
|
|
367cd71db9 | ||
|
|
2af958e12c | ||
|
|
3cb28875c3 | ||
|
|
dad592c801 | ||
|
|
c171891999 | ||
|
|
3b1025abbb | ||
|
|
f00dcc276f | ||
|
|
392c923980 | ||
|
|
2864015469 | ||
|
|
8bb799068e | ||
|
|
063df572b0 | ||
|
|
966fb47e64 | ||
|
|
43e09da694 | ||
|
|
69705df0b3 | ||
|
|
91a5fea11f | ||
|
|
467be9ac76 | ||
|
|
19df96ed56 | ||
|
|
b957ff2ecd | ||
|
|
91073c1244 | ||
|
|
926beee832 | ||
|
|
a9415aaaf6 | ||
|
|
c308a794e8 | ||
|
|
bc7559586f | ||
|
|
04bc643cec | ||
|
|
33a21d6a7a | ||
|
|
7b1ef07c41 | ||
|
|
2f15976b34 | ||
|
|
20920fa17b | ||
|
|
53ac3ec0b4 | ||
|
|
ce4f04dad2 | ||
|
|
f81712eb91 | ||
|
|
31938fb922 | ||
|
|
f8fd9d9eff | ||
|
|
dde14eba7d | ||
|
|
54c84079c4 | ||
|
|
d0586f09a9 | ||
|
|
09ac7ed008 | ||
|
|
97796f39d2 | ||
|
|
4d7f91b378 | ||
|
|
69a77222ef | ||
|
|
0afc3e9e5e | ||
|
|
65d33bcc0f | ||
|
|
6a01008a2b | ||
|
|
6dc01eae3a | ||
|
|
7b7fe84e0d | ||
|
|
5c36f4308f | ||
|
|
45809d1c91 | ||
|
|
357414c345 | ||
|
|
260b9120c3 | ||
|
|
976ea52167 | ||
|
|
2d69bf2366 | ||
|
|
dee5fe9851 | ||
|
|
88697c4630 | ||
|
|
16b8d4945b | ||
|
|
d09c611d15 | ||
|
|
9247877037 | ||
|
|
2cec527a22 | ||
|
|
4b1309cbf2 | ||
|
|
8b6fe6a98f | ||
|
|
91463e34f1 | ||
|
|
1221be30a3 | ||
|
|
6dfa9cb703 | ||
|
|
e363234172 | ||
|
|
3d09b6a221 | ||
|
|
2d6b19e1a2 | ||
|
|
ece9202b61 | ||
|
|
9d694da939 | ||
|
|
20c027b79c | ||
|
|
8878b3d032 | ||
|
|
1ab9d115cf | ||
|
|
8ec12d7d68 | ||
|
|
c3370ec5da | ||
|
|
f3ae5a657c | ||
|
|
825c78a048 | ||
|
|
3865342c93 | ||
|
|
ac5f461d40 | ||
|
|
f9c601eb7e | ||
|
|
e8b4ac6046 | ||
|
|
051a6cf974 | ||
|
|
1c9464b988 | ||
|
|
6838901788 | ||
|
|
ad5e5d21ca | ||
|
|
26d821c0de | ||
|
|
010677cbee | ||
|
|
c110d459fb | ||
|
|
4d1975e0a7 | ||
|
|
82734a750c | ||
|
|
56fa4e1e42 | ||
|
|
ca3e33122e | ||
|
|
fe52311bf4 | ||
|
|
01b73950ee | ||
|
|
12880f1ffa | ||
|
|
53be88b677 | ||
|
|
3427ead8b8 | ||
|
|
32652189b0 | ||
|
|
ae376f15fb | ||
|
|
72fbdac467 | ||
|
|
0857c7b448 | ||
|
|
07b4c1c0ed | ||
|
|
196dc79ec7 | ||
|
|
24b3da717a | ||
|
|
98acc4254d | ||
|
|
eac78c7993 | ||
|
|
da1bc0f7bf | ||
|
|
aa4f92f458 | ||
|
|
a96e05d4ae | ||
|
|
5c95fd92b4 | ||
|
|
4cb2a62551 | ||
|
|
5b4fad9e25 | ||
|
|
ea0ac25f38 | ||
|
|
7688aca7d6 | ||
|
|
a7215ad972 | ||
|
|
8e2403a7da | ||
|
|
318554e6bf | ||
|
|
c64979b8dd | ||
|
|
bfe21b29d4 | ||
|
|
e9d9a6ffe8 | ||
|
|
5313c71a0d | ||
|
|
d36ef3d424 | ||
|
|
4a4f613238 | ||
|
|
dc6a24618e | ||
|
|
74a7c6dbb6 | ||
|
|
67f65f958b | ||
|
|
78b6ba5cef | ||
|
|
3f019d34cc | ||
|
|
304260e484 | ||
|
|
704bd66b63 | ||
|
|
1acc162c18 | ||
|
|
553c97a0c1 | ||
|
|
bd66befcf0 | ||
|
|
3e769a9c6c | ||
|
|
19b0a5ae82 | ||
|
|
bd71f7f4ea | ||
|
|
171ce25ba6 | ||
|
|
6c5a44f774 | ||
|
|
5c3c05bf93 | ||
|
|
67d0999bc3 | ||
|
|
553a4622bf | ||
|
|
6f81ef006d | ||
|
|
a04870a662 | ||
|
|
f7d26390c5 | ||
|
|
141783fb2d | ||
|
|
2fedd4876e | ||
|
|
e187b0aaf0 | ||
|
|
e95374d7c6 | ||
|
|
8f2d0cda2f | ||
|
|
9d261d2b9c | ||
|
|
7792fe0e4c | ||
|
|
86259244e4 | ||
|
|
0ec593fa90 | ||
|
|
7391d6be73 | ||
|
|
e4e23065f1 | ||
|
|
fb33a24891 | ||
|
|
78768fd714 | ||
|
|
f2d9912697 | ||
|
|
9a4ed6bbd7 | ||
|
|
d5ed451299 | ||
|
|
bacbeb3ed4 | ||
|
|
84b311760f | ||
|
|
8fbc2e0463 | ||
|
|
849765712f | ||
|
|
393bb911c0 | ||
|
|
4a5f1aebee | ||
|
|
a11d9646e3 | ||
|
|
ed7bc1909c | ||
|
|
e9e5b5642d | ||
|
|
7524aa7b5e | ||
|
|
7af1d32ef6 | ||
|
|
399af801a1 | ||
|
|
4a72c5ea6e | ||
|
|
20d6f5fdf4 | ||
|
|
3d69715dba | ||
|
|
de1766d565 | ||
|
|
0982c639ae | ||
|
|
5188b7a6a0 | ||
|
|
759164831d | ||
|
|
5431fa2d0c | ||
|
|
e130fd8db9 | ||
|
|
ded554d334 | ||
|
|
2d31915f0a | ||
|
|
ba3e808802 | ||
|
|
e3488da194 | ||
|
|
740214e021 | ||
|
|
c51e901f68 | ||
|
|
8c611dcb4b | ||
|
|
a45b8b1eb1 | ||
|
|
56f82f3e7f | ||
|
|
486db3a771 | ||
|
|
b02544bc0b | ||
|
|
e9639ad189 | ||
|
|
95a4f74d2a | ||
|
|
293f299c08 | ||
|
|
80d58ad24c | ||
|
|
3e83893b3f | ||
|
|
8c76a8c7dc | ||
|
|
0780db55e1 | ||
|
|
1ed7c15118 | ||
|
|
569bdb6073 | ||
|
|
1def53b7fe | ||
|
|
f9c98a377d | ||
|
|
93bf3e8a1f | ||
|
|
d202f3539b | ||
|
|
12e73d4898 | ||
|
|
449dd7cc0b | ||
|
|
b0419edda6 | ||
|
|
c0e87abaee | ||
|
|
c8485776fe | ||
|
|
aa3e2d0fe6 | ||
|
|
98c64f9d5f | ||
|
|
7d81c17cca | ||
|
|
652d396a81 | ||
|
|
1d83c493af | ||
|
|
cf35cbe59e | ||
|
|
9221c08418 | ||
|
|
48d43c14b1 | ||
|
|
776efa74a4 | ||
|
|
b14e83f499 | ||
|
|
a9b6b65238 | ||
|
|
a036b7f122 | ||
|
|
0bccf23db3 | ||
|
|
0cbd594512 | ||
|
|
efe93a5f57 | ||
|
|
3fda66b85b | ||
|
|
ddfb6707b4 | ||
|
|
a69f7a9531 | ||
|
|
d583aa43ca | ||
|
|
3abb573142 | ||
|
|
d556dada9f | ||
|
|
ce7d49484f | ||
|
|
e4acd18429 | ||
|
|
c2d4784810 | ||
|
|
76bea6c577 | ||
|
|
3ff0b0b2c4 | ||
|
|
a1c7dc17ce | ||
|
|
24723b2f10 | ||
|
|
f998e9e949 | ||
|
|
73661f7d1f | ||
|
|
b5d4db07d1 | ||
|
|
c6a022132b | ||
|
|
195c0ccf8a | ||
|
|
b09a86c0c1 | ||
|
|
de43505ae4 | ||
|
|
d7c5b900b8 | ||
|
|
edad7b6a74 | ||
|
|
829a1f7992 | ||
|
|
d729aa7d5e | ||
|
|
0d0cef3438 | ||
|
|
d7a112fefe | ||
|
|
a5decaa7cf | ||
|
|
8dea3f470f | ||
|
|
e02935dc5b | ||
|
|
24ad2fe2dd | ||
|
|
571dda6549 | ||
|
|
006bee4a5a | ||
|
|
dbb751c8f0 | ||
|
|
3439f7886d | ||
|
|
d418a04602 | ||
|
|
7047422e48 | ||
|
|
2bdec1fa5a | ||
|
|
b654c49e55 | ||
|
|
f2cb7d506d | ||
|
|
a6dad3fc6d | ||
|
|
fbcff85ecb | ||
|
|
788c67c29a | ||
|
|
2f19d38693 | ||
|
|
3aae30ed2a | ||
|
|
593c7ad307 | ||
|
|
73658c758a | ||
|
|
b6af94cbbb | ||
|
|
852729ff38 | ||
|
|
152ac35bc2 | ||
|
|
df63a40606 | ||
|
|
a59c107b23 | ||
|
|
f9fe6f89fe | ||
|
|
2a82455b3d | ||
|
|
3a524a3bdd | ||
|
|
3a66aa8a60 | ||
|
|
4b45b28f25 | ||
|
|
9139ef3125 | ||
|
|
6360d0545a | ||
|
|
1961adb530 | ||
|
|
79feab89c4 | ||
|
|
5d0b13294c | ||
|
|
67edc2d641 | ||
|
|
6b569cceb5 | ||
|
|
6f2fe5954f | ||
|
|
fca1319b7d | ||
|
|
f77f06a3bd | ||
|
|
e62c807295 | ||
|
|
90df6921b7 | ||
|
|
5098442086 | ||
|
|
d0014c6793 | ||
|
|
ae7ebc0bd8 | ||
|
|
1f269f9834 | ||
|
|
7f1ae5adcf | ||
|
|
3d00fee6c2 | ||
|
|
17913f5acf | ||
|
|
c38ac29edb | ||
|
|
38044d4afe | ||
|
|
61b93ebf36 | ||
|
|
bf91adf3f8 | ||
|
|
00026b5f8b | ||
|
|
8c22396d8b | ||
|
|
b6d6631b12 | ||
|
|
a098483cbb | ||
|
|
f9a297e08d | ||
|
|
bcdd80911f | ||
|
|
b120965b6a | ||
|
|
16f918621f | ||
|
|
f7574230a1 | ||
|
|
2879344d9c | ||
|
|
9f5eef1f38 | ||
|
|
c5aa1bec18 | ||
|
|
b51263664e | ||
|
|
1e7db0d293 | ||
|
|
2a54f3c048 | ||
|
|
1c20b815b3 | ||
|
|
43a2b26f63 | ||
|
|
3cf19a1bc2 | ||
|
|
67a23c3182 | ||
|
|
796dbaf08c | ||
|
|
3a3c88a2d0 | ||
|
|
870296fa7e | ||
|
|
a28046c233 | ||
|
|
0bba0e074f | ||
|
|
c4c6227962 | ||
|
|
e6c914d2fa | ||
|
|
be8f4fc59a | ||
|
|
fbdf870fbf | ||
|
|
7b0cca41b4 | ||
|
|
33d0e9ec8c | ||
|
|
42f1c67ca8 | ||
|
|
e28c49a8fe | ||
|
|
54d5a3a259 | ||
|
|
de6b43f334 | ||
|
|
07f508bd0c | ||
|
|
62a86dbe8d | ||
|
|
492ada0ed4 | ||
|
|
d8eef02867 | ||
|
|
6c7235d6a7 | ||
|
|
0a09d78fa5 | ||
|
|
19c3f3efb2 | ||
|
|
e97e8df6ba | ||
|
|
cb6f5323ae | ||
|
|
47464cedec | ||
|
|
982d203d91 | ||
|
|
9307c19f35 | ||
|
|
605a82793b | ||
|
|
df9ee44d42 | ||
|
|
e9f7d5e73a | ||
|
|
3529c2e732 | ||
|
|
d9e0b7abab | ||
|
|
b2800fefc6 | ||
|
|
d913e20edc | ||
|
|
c2a71a5abe | ||
|
|
d61615e0b0 | ||
|
|
ac9d83c72f | ||
|
|
ff9149b5c9 | ||
|
|
4239654722 | ||
|
|
38474bd66a | ||
|
|
bcfe83f702 | ||
|
|
32f57c49d6 | ||
|
|
60ba131ac8 | ||
|
|
a5f627ba1a | ||
|
|
04d16e6d2b | ||
|
|
1dd36f9035 | ||
|
|
6ec4cb33ca | ||
|
|
e7cd8a1c2d | ||
|
|
4e2852d5ff | ||
|
|
b309bc34e1 | ||
|
|
b8147b64e0 | ||
|
|
aab6ea022e | ||
|
|
dd17ed0e63 | ||
|
|
dbb587d681 | ||
|
|
768aa06ceb | ||
|
|
9ffa34b697 | ||
|
|
740802c491 | ||
|
|
b9ac96c332 | ||
|
|
d06535388a | ||
|
|
2b73bdf6b0 | ||
|
|
6aa803d712 | ||
|
|
320afdea64 | ||
|
|
ccbe72cfc1 | ||
|
|
b9bbd42373 | ||
|
|
68e9144ce3 | ||
|
|
9b2b267820 | ||
|
|
ff3524d9b1 | ||
|
|
b99d20b725 | ||
|
|
768b93140f | ||
|
|
4750810a67 | ||
|
|
e0e0db4247 | ||
|
|
bccadec887 | ||
|
|
0759503e50 | ||
|
|
7f1c020746 | ||
|
|
5d4e92db7d | ||
|
|
8b6e88c85c | ||
|
|
64190dd0c4 | ||
|
|
7100bcdf04 | ||
|
|
10cdad039d | ||
|
|
f1eee09cf4 | ||
|
|
4d48bd31ca | ||
|
|
d628bc4034 | ||
|
|
b179aa9b6f | ||
|
|
30807f5535 | ||
|
|
396f430022 | ||
|
|
eb131bebdf | ||
|
|
5c15837677 | ||
|
|
2fada16abb | ||
|
|
c37614cbc8 | ||
|
|
3116f95c1a | ||
|
|
b0e8b66666 | ||
|
|
3caf48c9be | ||
|
|
3c6ebb73ae | ||
|
|
0d9b638636 | ||
|
|
2ba70b9501 | ||
|
|
16f98cebc0 | ||
|
|
fe9ff498ce | ||
|
|
eba831ca30 | ||
|
|
dec3d44224 | ||
|
|
9ed1551125 | ||
|
|
e5e6a34e80 | ||
|
|
897e766728 | ||
|
|
9200a6731d | ||
|
|
61c166ab19 | ||
|
|
659c8cd953 | ||
|
|
9ee988753d | ||
|
|
8ae6c43ca4 | ||
|
|
b6713870ef | ||
|
|
40477493d3 | ||
|
|
efcf3ac6eb | ||
|
|
9e43f7beda | ||
|
|
aa9412e1b4 | ||
|
|
cf6c835e18 | ||
|
|
e5ecf291f3 | ||
|
|
9d0cafcfa6 | ||
|
|
7715623430 | ||
|
|
f5a4e80e2c | ||
|
|
8463aabedf | ||
|
|
7f30144ef2 | ||
|
|
fa5516aad6 | ||
|
|
ca0336af9e | ||
|
|
65ed1aeade | ||
|
|
4d283ab386 | ||
|
|
3ff2a0d0e7 | ||
|
|
3cd1b3719f | ||
|
|
9926eb9f95 | ||
|
|
3abaa82501 | ||
|
|
88d8cd8650 | ||
|
|
a08f21d66c | ||
|
|
d58286989c | ||
|
|
b58af3349c | ||
|
|
940df4631f | ||
|
|
685706e0aa | ||
|
|
7b0979e134 | ||
|
|
61ae2de841 | ||
|
|
5b28eed2c0 | ||
|
|
f8a11779fe | ||
|
|
d11a83c232 | ||
|
|
3255c7a3fa | ||
|
|
4756d0a532 | ||
|
|
7ba2142363 | ||
|
|
96d1eb0d0d | ||
|
|
144cfa0eda | ||
|
|
a0dff192ae | ||
|
|
1fffeeedd2 | ||
|
|
f51b078042 | ||
|
|
b6023a51fb | ||
|
|
78cfad8b2f | ||
|
|
68b3dff74a | ||
|
|
bfc4abd6e8 | ||
|
|
8c77a760fc | ||
|
|
b9bf8ac9d7 | ||
|
|
d6182bedd7 | ||
|
|
2217904876 | ||
|
|
2c2362b4d3 | ||
|
|
612ed3fef2 | ||
|
|
fb2a6d0d04 | ||
|
|
19d3d39115 | ||
|
|
c1413e6916 | ||
|
|
e7705e661a | ||
|
|
21b110bfd7 | ||
|
|
1fcb573909 | ||
|
|
0f6c5f5453 | ||
|
|
350ca1511b | ||
|
|
539263a8ba | ||
|
|
3f0e265baf | ||
|
|
21e2538e57 | ||
|
|
480902bd66 | ||
|
|
853b9d59d8 | ||
|
|
6d04284c44 | ||
|
|
4a50781453 | ||
|
|
18561c55ce | ||
|
|
77da48050d | ||
|
|
9a97aacd85 | ||
|
|
52daf3936a | ||
|
|
2f246d19f4 | ||
|
|
413595542a | ||
|
|
42a5da854d | ||
|
|
d1d83a6ef7 | ||
|
|
194050705d | ||
|
|
989f8c91c8 | ||
|
|
edba5fb5e9 | ||
|
|
faa1defa5c | ||
|
|
f7e0cee1b0 | ||
|
|
b3a0edaa6d | ||
|
|
9c34b30723 | ||
|
|
36a5847df5 | ||
|
|
a19379aa58 | ||
|
|
768d048e1c | ||
|
|
94c11a0262 | ||
|
|
649b0bfd02 | ||
|
|
57a00ec677 | ||
|
|
aeb2114170 | ||
|
|
b8d405fddd | ||
|
|
b32013cb97 | ||
|
|
226a62a3c0 | ||
|
|
8e73a482a2 | ||
|
|
0533aeb814 | ||
|
|
aead6de888 | ||
|
|
8d82fd4cfe | ||
|
|
8f44db6499 | ||
|
|
c7553b1280 | ||
|
|
8b8683f22e | ||
|
|
774ace6e3b | ||
|
|
4a8f91a0fc | ||
|
|
18c9784b61 | ||
|
|
e5d401c67c | ||
|
|
ae77589a98 | ||
|
|
ad373c0e19 | ||
|
|
51f26d12fe | ||
|
|
f1b60b2016 | ||
|
|
8c2dc2b1e4 | ||
|
|
dc9a44c12a | ||
|
|
d9753b6349 | ||
|
|
a554c0b143 | ||
|
|
7381fa95e6 | ||
|
|
53d1176d53 | ||
|
|
52c4be0696 | ||
|
|
13a3b21d19 | ||
|
|
5cee084340 | ||
|
|
bf00c26a83 | ||
|
|
3846648c12 | ||
|
|
eb6423875f | ||
|
|
e3524a10a7 | ||
|
|
468dad6169 | ||
|
|
bc27982992 | ||
|
|
57e5decb55 | ||
|
|
b6319c6f6e | ||
|
|
0a902f562f | ||
|
|
454135856e | ||
|
|
33fddc27ad | ||
|
|
ce052a4eb5 | ||
|
|
b43d77a56b | ||
|
|
1635a92218 | ||
|
|
2a8a1b27e1 | ||
|
|
f5f3cce2c8 | ||
|
|
a085e6315b | ||
|
|
a8d600a3b4 | ||
|
|
4a2e17447b |
3
.claude/settings.local.json
Normal file
3
.claude/settings.local.json
Normal file
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"enableAllProjectMcpServers": false
|
||||
}
|
||||
12
.gitattributes
vendored
Normal file
12
.gitattributes
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
# Documentation
|
||||
*.html linguist-documentation
|
||||
docs/* linguist-documentation
|
||||
docs/examples/* linguist-documentation
|
||||
docs/md_v2/* linguist-documentation
|
||||
|
||||
# Explicitly mark Python as the main language
|
||||
*.py linguist-detectable=true
|
||||
*.py linguist-language=Python
|
||||
|
||||
# Exclude HTML from language statistics
|
||||
*.html linguist-detectable=false
|
||||
59
.github/DISCUSSION_TEMPLATE/feature-requests.yml
vendored
Normal file
59
.github/DISCUSSION_TEMPLATE/feature-requests.yml
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
title: "[Feature Request]: "
|
||||
labels: ["⚙️ New"]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thank you for your interest in suggesting a new feature! Before you submit, please take a moment to check if already exists in
|
||||
this discussions category to avoid duplicates. 😊
|
||||
|
||||
- type: textarea
|
||||
id: needs_to_be_done
|
||||
attributes:
|
||||
label: What needs to be done?
|
||||
description: Please describe the feature or functionality you'd like to see.
|
||||
placeholder: "e.g., Return alt text along with images scraped from a webpages in Result"
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: problem_to_solve
|
||||
attributes:
|
||||
label: What problem does this solve?
|
||||
description: Explain the pain point or issue this feature will help address.
|
||||
placeholder: "e.g., Bypass Captchas added by cloudflare"
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: target_users
|
||||
attributes:
|
||||
label: Target users/beneficiaries
|
||||
description: Who would benefit from this feature? (e.g., specific teams, developers, users, etc.)
|
||||
placeholder: "e.g., Marketing teams, developers"
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
id: current_workarounds
|
||||
attributes:
|
||||
label: Current alternatives/workarounds
|
||||
description: Are there any existing solutions or workarounds? How does this feature improve upon them?
|
||||
placeholder: "e.g., Users manually select the css classes mapped to data fields to extract them"
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
### 💡 Implementation Ideas
|
||||
|
||||
- type: textarea
|
||||
id: proposed_approach
|
||||
attributes:
|
||||
label: Proposed approach
|
||||
description: Share any ideas you have for how this feature could be implemented. Point out any challenges your foresee
|
||||
and the success metrics for this feature
|
||||
placeholder: "e.g., Implement a breadth first traversal algorithm for scraper"
|
||||
validations:
|
||||
required: false
|
||||
127
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
127
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
@@ -0,0 +1,127 @@
|
||||
name: Bug Report
|
||||
description: Report a bug with the Crawl4AI.
|
||||
title: "[Bug]: "
|
||||
labels: ["🐞 Bug","🩺 Needs Triage"]
|
||||
body:
|
||||
- type: input
|
||||
id: crawl4ai_version
|
||||
attributes:
|
||||
label: crawl4ai version
|
||||
description: Specify the version of crawl4ai you are using.
|
||||
placeholder: "e.g., 2.0.0"
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: expected_behavior
|
||||
attributes:
|
||||
label: Expected Behavior
|
||||
description: Describe what you expected to happen.
|
||||
placeholder: "Provide a detailed explanation of the expected outcome."
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: current_behavior
|
||||
attributes:
|
||||
label: Current Behavior
|
||||
description: Describe what is happening instead of the expected behavior.
|
||||
placeholder: "Describe the actual result or issue you encountered."
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
id: reproducible
|
||||
attributes:
|
||||
label: Is this reproducible?
|
||||
description: Indicate whether this bug can be reproduced consistently.
|
||||
options:
|
||||
- "Yes"
|
||||
- "No"
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: inputs
|
||||
attributes:
|
||||
label: Inputs Causing the Bug
|
||||
description: Provide details about the inputs causing the issue.
|
||||
placeholder: |
|
||||
- URL(s):
|
||||
- Settings used:
|
||||
- Input data (if applicable):
|
||||
render: bash
|
||||
|
||||
- type: textarea
|
||||
id: steps_to_reproduce
|
||||
attributes:
|
||||
label: Steps to Reproduce
|
||||
description: Provide step-by-step instructions to reproduce the issue.
|
||||
placeholder: |
|
||||
1. Go to...
|
||||
2. Click on...
|
||||
3. Observe the issue...
|
||||
render: bash
|
||||
|
||||
- type: textarea
|
||||
id: code_snippets
|
||||
attributes:
|
||||
label: Code snippets
|
||||
description: Provide code snippets(if any). Add comments as necessary
|
||||
placeholder: print("Hello world")
|
||||
render: python
|
||||
|
||||
# Header Section with Title
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
## Supporting Information
|
||||
Please provide the following details to help us understand and resolve your issue. This will assist us in reproducing and diagnosing the problem
|
||||
|
||||
- type: input
|
||||
id: os
|
||||
attributes:
|
||||
label: OS
|
||||
description: Please provide the operating system & distro where the issue occurs.
|
||||
placeholder: "e.g., Windows, macOS, Linux"
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: python_version
|
||||
attributes:
|
||||
label: Python version
|
||||
description: Specify the Python version being used.
|
||||
placeholder: "e.g., 3.8.5"
|
||||
validations:
|
||||
required: true
|
||||
|
||||
# Browser Field
|
||||
- type: input
|
||||
id: browser
|
||||
attributes:
|
||||
label: Browser
|
||||
description: Provide the name of the browser you are using.
|
||||
placeholder: "e.g., Chrome, Firefox, Safari"
|
||||
validations:
|
||||
required: false
|
||||
|
||||
# Browser Version Field
|
||||
- type: input
|
||||
id: browser_version
|
||||
attributes:
|
||||
label: Browser version
|
||||
description: Provide the version of the browser you are using.
|
||||
placeholder: "e.g., 91.0.4472.124"
|
||||
validations:
|
||||
required: false
|
||||
|
||||
# Error Logs Field (Text Area)
|
||||
- type: textarea
|
||||
id: error_logs
|
||||
attributes:
|
||||
label: Error logs & Screenshots (if applicable)
|
||||
description: If you encountered any errors, please provide the error logs. Attach any relevant screenshots to help us understand the issue.
|
||||
placeholder: "Paste error logs here and attach your screenshots"
|
||||
validations:
|
||||
required: false
|
||||
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: Feature Requests
|
||||
url: https://github.com/unclecode/crawl4ai/discussions/categories/feature-requests
|
||||
about: "Suggest new features or enhancements for Crawl4AI"
|
||||
- name: Forums - Q&A
|
||||
url: https://github.com/unclecode/crawl4ai/discussions/categories/forums-q-a
|
||||
about: "Ask questions or engage in general discussions about Crawl4AI"
|
||||
19
.github/pull_request_template.md
vendored
Normal file
19
.github/pull_request_template.md
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
## Summary
|
||||
Please include a summary of the change and/or which issues are fixed.
|
||||
|
||||
eg: `Fixes #123` (Tag GitHub issue numbers in this format, so it automatically links the issues with your PR)
|
||||
|
||||
## List of files changed and why
|
||||
eg: quickstart.py - To update the example as per new changes
|
||||
|
||||
## How Has This Been Tested?
|
||||
Please describe the tests that you ran to verify your changes.
|
||||
|
||||
## Checklist:
|
||||
|
||||
- [ ] My code follows the style guidelines of this project
|
||||
- [ ] I have performed a self-review of my own code
|
||||
- [ ] I have commented my code, particularly in hard-to-understand areas
|
||||
- [ ] I have made corresponding changes to the documentation
|
||||
- [ ] I have added/updated unit tests that prove my fix is effective or that my feature works
|
||||
- [ ] New and existing unit tests pass locally with my changes
|
||||
35
.github/workflows/main.yml
vendored
Normal file
35
.github/workflows/main.yml
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
name: Discord GitHub Notifications
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [opened]
|
||||
issue_comment:
|
||||
types: [created]
|
||||
pull_request:
|
||||
types: [opened]
|
||||
discussion:
|
||||
types: [created]
|
||||
|
||||
jobs:
|
||||
notify-discord:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set webhook based on event type
|
||||
id: set-webhook
|
||||
run: |
|
||||
if [ "${{ github.event_name }}" == "discussion" ]; then
|
||||
echo "webhook=${{ secrets.DISCORD_DISCUSSIONS_WEBHOOK }}" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "webhook=${{ secrets.DISCORD_WEBHOOK }}" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Discord Notification
|
||||
uses: Ilshidur/action-discord@master
|
||||
env:
|
||||
DISCORD_WEBHOOK: ${{ steps.set-webhook.outputs.webhook }}
|
||||
with:
|
||||
args: |
|
||||
${{ github.event_name == 'issues' && format('📣 New issue created: **{0}** by {1} - {2}', github.event.issue.title, github.event.issue.user.login, github.event.issue.html_url) ||
|
||||
github.event_name == 'issue_comment' && format('💬 New comment on issue **{0}** by {1} - {2}', github.event.issue.title, github.event.comment.user.login, github.event.comment.html_url) ||
|
||||
github.event_name == 'pull_request' && format('🔄 New PR opened: **{0}** by {1} - {2}', github.event.pull_request.title, github.event.pull_request.user.login, github.event.pull_request.html_url) ||
|
||||
format('💬 New discussion started: **{0}** by {1} - {2}', github.event.discussion.title, github.event.discussion.user.login, github.event.discussion.html_url) }}
|
||||
96
.gitignore
vendored
96
.gitignore
vendored
@@ -165,6 +165,8 @@ Crawl4AI.egg-info/
|
||||
Crawl4AI.egg-info/*
|
||||
crawler_data.db
|
||||
.vscode/
|
||||
.tests/
|
||||
.test_pads/
|
||||
test_pad.py
|
||||
test_pad*.py
|
||||
.data/
|
||||
@@ -172,3 +174,97 @@ Crawl4AI.egg-info/
|
||||
|
||||
requirements0.txt
|
||||
a.txt
|
||||
|
||||
*.sh
|
||||
.idea
|
||||
docs/examples/.chainlit/
|
||||
docs/examples/.chainlit/*
|
||||
.chainlit/config.toml
|
||||
.chainlit/translations/en-US.json
|
||||
|
||||
local/
|
||||
.files/
|
||||
|
||||
a.txt
|
||||
.lambda_function.py
|
||||
ec2*
|
||||
|
||||
update_changelog.sh
|
||||
|
||||
.DS_Store
|
||||
docs/.DS_Store
|
||||
tmp/
|
||||
test_env/
|
||||
**/.DS_Store
|
||||
**/.DS_Store
|
||||
|
||||
todo.md
|
||||
todo_executor.md
|
||||
git_changes.py
|
||||
git_changes.md
|
||||
pypi_build.sh
|
||||
git_issues.py
|
||||
git_issues.md
|
||||
|
||||
.next/
|
||||
.tests/
|
||||
# .issues/
|
||||
.docs/
|
||||
.issues/
|
||||
.gitboss/
|
||||
todo_executor.md
|
||||
protect-all-except-feature.sh
|
||||
manage-collab.sh
|
||||
publish.sh
|
||||
combine.sh
|
||||
combined_output.txt
|
||||
.local
|
||||
.scripts
|
||||
tree.md
|
||||
tree.md
|
||||
.scripts
|
||||
.local
|
||||
.do
|
||||
/plans
|
||||
plans/
|
||||
|
||||
# Codeium
|
||||
.codeiumignore
|
||||
todo/
|
||||
|
||||
# Continue development files
|
||||
.continue/
|
||||
.continuerc.json
|
||||
continue.lock
|
||||
continue_core.log
|
||||
contextProviders/
|
||||
continue_workspace/
|
||||
.continue-cache/
|
||||
continue_config.json
|
||||
|
||||
# Continue temporary files
|
||||
.continue-temp/
|
||||
.continue-logs/
|
||||
.continue-downloads/
|
||||
|
||||
# Continue VS Code specific
|
||||
.vscode-continue/
|
||||
.vscode-continue-cache/
|
||||
|
||||
.prompts/
|
||||
|
||||
.llm.env
|
||||
.private/
|
||||
|
||||
CLAUDE_MONITOR.md
|
||||
CLAUDE.md
|
||||
|
||||
tests/**/test_site
|
||||
tests/**/reports
|
||||
tests/**/benchmark_reports
|
||||
|
||||
docs/**/data
|
||||
.codecat/
|
||||
|
||||
docs/apps/linkdin/debug*/
|
||||
docs/apps/linkdin/samples/insights/*
|
||||
1490
CHANGELOG.md
1490
CHANGELOG.md
File diff suppressed because it is too large
Load Diff
131
CODE_OF_CONDUCT.md
Normal file
131
CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1,131 @@
|
||||
# Crawl4AI Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
We as members, contributors, and leaders pledge to make participation in our
|
||||
community a harassment-free experience for everyone, regardless of age, body
|
||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||
identity and expression, level of experience, education, socio-economic status,
|
||||
nationality, personal appearance, race, caste, color, religion, or sexual
|
||||
identity and orientation.
|
||||
|
||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||
diverse, inclusive, and healthy community.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to a positive environment for our
|
||||
community include:
|
||||
|
||||
* Demonstrating empathy and kindness toward other people
|
||||
* Being respectful of differing opinions, viewpoints, and experiences
|
||||
* Giving and gracefully accepting constructive feedback
|
||||
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||
and learning from the experience
|
||||
* Focusing on what is best not just for us as individuals, but for the overall
|
||||
community
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
* The use of sexualized language or imagery, and sexual attention or advances of
|
||||
any kind
|
||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or email address,
|
||||
without their explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
Community leaders are responsible for clarifying and enforcing our standards of
|
||||
acceptable behavior and will take appropriate and fair corrective action in
|
||||
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||
or harmful.
|
||||
|
||||
Community leaders have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||
decisions when appropriate.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all community spaces, and also applies when
|
||||
an individual is officially representing the community in public spaces.
|
||||
Examples of representing our community include using an official email address,
|
||||
posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported to the community leaders responsible for enforcement at
|
||||
unclecode@crawl4ai.com. All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
All community leaders are obligated to respect the privacy and security of the
|
||||
reporter of any incident.
|
||||
|
||||
## Enforcement Guidelines
|
||||
|
||||
Community leaders will follow these Community Impact Guidelines in determining
|
||||
the consequences for any action they deem in violation of this Code of Conduct:
|
||||
|
||||
### 1. Correction
|
||||
|
||||
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||
unprofessional or unwelcome in the community.
|
||||
|
||||
**Consequence**: A private, written warning from community leaders, providing
|
||||
clarity around the nature of the violation and an explanation of why the
|
||||
behavior was inappropriate. A public apology may be requested.
|
||||
|
||||
### 2. Warning
|
||||
|
||||
**Community Impact**: A violation through a single incident or series of
|
||||
actions.
|
||||
|
||||
**Consequence**: A warning with consequences for continued behavior. No
|
||||
interaction with the people involved, including unsolicited interaction with
|
||||
those enforcing the Code of Conduct, for a specified period of time. This
|
||||
includes avoiding interactions in community spaces as well as external channels
|
||||
like social media. Violating these terms may lead to a temporary or permanent
|
||||
ban.
|
||||
|
||||
### 3. Temporary Ban
|
||||
|
||||
**Community Impact**: A serious violation of community standards, including
|
||||
sustained inappropriate behavior.
|
||||
|
||||
**Consequence**: A temporary ban from any sort of interaction or public
|
||||
communication with the community for a specified period of time. No public or
|
||||
private interaction with the people involved, including unsolicited interaction
|
||||
with those enforcing the Code of Conduct, is allowed during this period.
|
||||
Violating these terms may lead to a permanent ban.
|
||||
|
||||
### 4. Permanent Ban
|
||||
|
||||
**Community Impact**: Demonstrating a pattern of violation of community
|
||||
standards, including sustained inappropriate behavior, harassment of an
|
||||
individual, or aggression toward or disparagement of classes of individuals.
|
||||
|
||||
**Consequence**: A permanent ban from any sort of public interaction within the
|
||||
community.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||
version 2.1, available at
|
||||
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
|
||||
|
||||
Community Impact Guidelines were inspired by
|
||||
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
|
||||
|
||||
For answers to common questions about this code of conduct, see the FAQ at
|
||||
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
|
||||
[https://www.contributor-covenant.org/translations][translations].
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
|
||||
[Mozilla CoC]: https://github.com/mozilla/diversity
|
||||
[FAQ]: https://www.contributor-covenant.org/faq
|
||||
[translations]: https://www.contributor-covenant.org/translations
|
||||
55
CONTRIBUTORS.md
Normal file
55
CONTRIBUTORS.md
Normal file
@@ -0,0 +1,55 @@
|
||||
# Contributors to Crawl4AI
|
||||
|
||||
We would like to thank the following people for their contributions to Crawl4AI:
|
||||
|
||||
## Core Team
|
||||
|
||||
- [Unclecode](https://github.com/unclecode) - Project Creator and Main Developer
|
||||
- [Nasrin](https://github.com/ntohidi) - Project Manager and Developer
|
||||
- [Aravind Karnam](https://github.com/aravindkarnam) - Head of Community and Product
|
||||
|
||||
## Community Contributors
|
||||
|
||||
- [aadityakanjolia4](https://github.com/aadityakanjolia4) - Fix for `CustomHTML2Text` is not defined.
|
||||
- [FractalMind](https://github.com/FractalMind) - Created the first official Docker Hub image and fixed Dockerfile errors
|
||||
- [ketonkss4](https://github.com/ketonkss4) - Identified Selenium's new capabilities, helping reduce dependencies
|
||||
- [jonymusky](https://github.com/jonymusky) - Javascript execution documentation, and wait_for
|
||||
- [datehoer](https://github.com/datehoer) - Add browser prxy support
|
||||
|
||||
## Pull Requests
|
||||
|
||||
- [dvschuyl](https://github.com/dvschuyl) - AsyncPlaywrightCrawlerStrategy page-evaluate context destroyed by navigation [#304](https://github.com/unclecode/crawl4ai/pull/304)
|
||||
- [nelzomal](https://github.com/nelzomal) - Enhance development installation instructions [#286](https://github.com/unclecode/crawl4ai/pull/286)
|
||||
- [HamzaFarhan](https://github.com/HamzaFarhan) - Handled the cases where markdown_with_citations, references_markdown, and filtered_html might not be defined [#293](https://github.com/unclecode/crawl4ai/pull/293)
|
||||
- [NanmiCoder](https://github.com/NanmiCoder) - fix: crawler strategy exception handling and fixes [#271](https://github.com/unclecode/crawl4ai/pull/271)
|
||||
- [paulokuong](https://github.com/paulokuong) - fix: RAWL4_AI_BASE_DIRECTORY should be Path object instead of string [#298](https://github.com/unclecode/crawl4ai/pull/298)
|
||||
|
||||
#### Feb-Alpha-1
|
||||
- [sufianuddin](https://github.com/sufianuddin) - fix: [Documentation for JsonCssExtractionStrategy](https://github.com/unclecode/crawl4ai/issues/651)
|
||||
- [tautikAg](https://github.com/tautikAg) - fix: [Markdown output has incorect spacing](https://github.com/unclecode/crawl4ai/issues/599)
|
||||
- [cardit1](https://github.com/cardit1) - fix: ['AsyncPlaywrightCrawlerStrategy' object has no attribute 'downloads_path'](https://github.com/unclecode/crawl4ai/issues/585)
|
||||
- [dmurat](https://github.com/dmurat) - fix: [ Incorrect rendering of inline code inside of links ](https://github.com/unclecode/crawl4ai/issues/583)
|
||||
- [Sparshsing](https://github.com/Sparshsing) - fix: [Relative Urls in the webpage not extracted properly ](https://github.com/unclecode/crawl4ai/issues/570)
|
||||
|
||||
|
||||
|
||||
## Other Contributors
|
||||
|
||||
- [Gokhan](https://github.com/gkhngyk)
|
||||
- [Shiv Kumar](https://github.com/shivkumar0757)
|
||||
- [QIN2DIM](https://github.com/QIN2DIM)
|
||||
|
||||
#### Typo fixes
|
||||
- [ssoydan](https://github.com/ssoydan)
|
||||
- [Darshan](https://github.com/Darshan2104)
|
||||
- [tuhinmallick](https://github.com/tuhinmallick)
|
||||
|
||||
## Acknowledgements
|
||||
|
||||
We also want to thank all the users who have reported bugs, suggested features, or helped in any other way to make Crawl4AI better.
|
||||
|
||||
---
|
||||
|
||||
If you've contributed to Crawl4AI and your name isn't on this list, please [open a pull request](https://github.com/unclecode/crawl4ai/pulls) with your name, link, and contribution, and we'll review it promptly.
|
||||
|
||||
Thank you all for your contributions!
|
||||
218
Dockerfile
218
Dockerfile
@@ -1,40 +1,200 @@
|
||||
# Use an official Python runtime as a parent image
|
||||
FROM python:3.10-slim
|
||||
FROM python:3.12-slim-bookworm AS build
|
||||
|
||||
# Set the working directory in the container
|
||||
WORKDIR /usr/src/app
|
||||
# C4ai version
|
||||
ARG C4AI_VER=0.6.0
|
||||
ENV C4AI_VERSION=$C4AI_VER
|
||||
LABEL c4ai.version=$C4AI_VER
|
||||
|
||||
# Copy the current directory contents into the container at /usr/src/app
|
||||
COPY . .
|
||||
# Set build arguments
|
||||
ARG APP_HOME=/app
|
||||
ARG GITHUB_REPO=https://github.com/unclecode/crawl4ai.git
|
||||
ARG GITHUB_BRANCH=main
|
||||
ARG USE_LOCAL=true
|
||||
|
||||
# Install any needed packages specified in requirements.txt
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
ENV PYTHONFAULTHANDLER=1 \
|
||||
PYTHONHASHSEED=random \
|
||||
PYTHONUNBUFFERED=1 \
|
||||
PIP_NO_CACHE_DIR=1 \
|
||||
PYTHONDONTWRITEBYTECODE=1 \
|
||||
PIP_DISABLE_PIP_VERSION_CHECK=1 \
|
||||
PIP_DEFAULT_TIMEOUT=100 \
|
||||
DEBIAN_FRONTEND=noninteractive \
|
||||
REDIS_HOST=localhost \
|
||||
REDIS_PORT=6379
|
||||
|
||||
ARG PYTHON_VERSION=3.12
|
||||
ARG INSTALL_TYPE=default
|
||||
ARG ENABLE_GPU=false
|
||||
ARG TARGETARCH
|
||||
|
||||
LABEL maintainer="unclecode"
|
||||
LABEL description="🔥🕷️ Crawl4AI: Open-source LLM Friendly Web Crawler & scraper"
|
||||
LABEL version="1.0"
|
||||
|
||||
# Install dependencies for Chrome and ChromeDriver
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
wget \
|
||||
xvfb \
|
||||
unzip \
|
||||
build-essential \
|
||||
curl \
|
||||
gnupg2 \
|
||||
ca-certificates \
|
||||
apt-transport-https \
|
||||
software-properties-common \
|
||||
&& wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - \
|
||||
&& echo "deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google-chrome.list \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y google-chrome-stable \
|
||||
wget \
|
||||
gnupg \
|
||||
git \
|
||||
cmake \
|
||||
pkg-config \
|
||||
python3-dev \
|
||||
libjpeg-dev \
|
||||
redis-server \
|
||||
supervisor \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Set display port and dbus env to avoid hanging
|
||||
ENV DISPLAY=:99
|
||||
ENV DBUS_SESSION_BUS_ADDRESS=/dev/null
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
libglib2.0-0 \
|
||||
libnss3 \
|
||||
libnspr4 \
|
||||
libatk1.0-0 \
|
||||
libatk-bridge2.0-0 \
|
||||
libcups2 \
|
||||
libdrm2 \
|
||||
libdbus-1-3 \
|
||||
libxcb1 \
|
||||
libxkbcommon0 \
|
||||
libx11-6 \
|
||||
libxcomposite1 \
|
||||
libxdamage1 \
|
||||
libxext6 \
|
||||
libxfixes3 \
|
||||
libxrandr2 \
|
||||
libgbm1 \
|
||||
libpango-1.0-0 \
|
||||
libcairo2 \
|
||||
libasound2 \
|
||||
libatspi2.0-0 \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Make port 80 available to the world outside this container
|
||||
EXPOSE 80
|
||||
RUN apt-get update && apt-get dist-upgrade -y \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Define environment variable
|
||||
ENV PYTHONUNBUFFERED 1
|
||||
RUN if [ "$ENABLE_GPU" = "true" ] && [ "$TARGETARCH" = "amd64" ] ; then \
|
||||
apt-get update && apt-get install -y --no-install-recommends \
|
||||
nvidia-cuda-toolkit \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* ; \
|
||||
else \
|
||||
echo "Skipping NVIDIA CUDA Toolkit installation (unsupported platform or GPU disabled)"; \
|
||||
fi
|
||||
|
||||
# Run uvicorn
|
||||
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "80", "--workers", "4"]
|
||||
RUN if [ "$TARGETARCH" = "arm64" ]; then \
|
||||
echo "🦾 Installing ARM-specific optimizations"; \
|
||||
apt-get update && apt-get install -y --no-install-recommends \
|
||||
libopenblas-dev \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*; \
|
||||
elif [ "$TARGETARCH" = "amd64" ]; then \
|
||||
echo "🖥️ Installing AMD64-specific optimizations"; \
|
||||
apt-get update && apt-get install -y --no-install-recommends \
|
||||
libomp-dev \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*; \
|
||||
else \
|
||||
echo "Skipping platform-specific optimizations (unsupported platform)"; \
|
||||
fi
|
||||
|
||||
# Create a non-root user and group
|
||||
RUN groupadd -r appuser && useradd --no-log-init -r -g appuser appuser
|
||||
|
||||
# Create and set permissions for appuser home directory
|
||||
RUN mkdir -p /home/appuser && chown -R appuser:appuser /home/appuser
|
||||
|
||||
WORKDIR ${APP_HOME}
|
||||
|
||||
RUN echo '#!/bin/bash\n\
|
||||
if [ "$USE_LOCAL" = "true" ]; then\n\
|
||||
echo "📦 Installing from local source..."\n\
|
||||
pip install --no-cache-dir /tmp/project/\n\
|
||||
else\n\
|
||||
echo "🌐 Installing from GitHub..."\n\
|
||||
for i in {1..3}; do \n\
|
||||
git clone --branch ${GITHUB_BRANCH} ${GITHUB_REPO} /tmp/crawl4ai && break || \n\
|
||||
{ echo "Attempt $i/3 failed! Taking a short break... ☕"; sleep 5; }; \n\
|
||||
done\n\
|
||||
pip install --no-cache-dir /tmp/crawl4ai\n\
|
||||
fi' > /tmp/install.sh && chmod +x /tmp/install.sh
|
||||
|
||||
COPY . /tmp/project/
|
||||
|
||||
# Copy supervisor config first (might need root later, but okay for now)
|
||||
COPY deploy/docker/supervisord.conf .
|
||||
|
||||
COPY deploy/docker/requirements.txt .
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
RUN if [ "$INSTALL_TYPE" = "all" ] ; then \
|
||||
pip install --no-cache-dir \
|
||||
torch \
|
||||
torchvision \
|
||||
torchaudio \
|
||||
scikit-learn \
|
||||
nltk \
|
||||
transformers \
|
||||
tokenizers && \
|
||||
python -m nltk.downloader punkt stopwords ; \
|
||||
fi
|
||||
|
||||
RUN if [ "$INSTALL_TYPE" = "all" ] ; then \
|
||||
pip install "/tmp/project/[all]" && \
|
||||
python -m crawl4ai.model_loader ; \
|
||||
elif [ "$INSTALL_TYPE" = "torch" ] ; then \
|
||||
pip install "/tmp/project/[torch]" ; \
|
||||
elif [ "$INSTALL_TYPE" = "transformer" ] ; then \
|
||||
pip install "/tmp/project/[transformer]" && \
|
||||
python -m crawl4ai.model_loader ; \
|
||||
else \
|
||||
pip install "/tmp/project" ; \
|
||||
fi
|
||||
|
||||
RUN pip install --no-cache-dir --upgrade pip && \
|
||||
/tmp/install.sh && \
|
||||
python -c "import crawl4ai; print('✅ crawl4ai is ready to rock!')" && \
|
||||
python -c "from playwright.sync_api import sync_playwright; print('✅ Playwright is feeling dramatic!')"
|
||||
|
||||
RUN crawl4ai-setup
|
||||
|
||||
RUN playwright install --with-deps
|
||||
|
||||
RUN mkdir -p /home/appuser/.cache/ms-playwright \
|
||||
&& cp -r /root/.cache/ms-playwright/chromium-* /home/appuser/.cache/ms-playwright/ \
|
||||
&& chown -R appuser:appuser /home/appuser/.cache/ms-playwright
|
||||
|
||||
RUN crawl4ai-doctor
|
||||
|
||||
# Copy application code
|
||||
COPY deploy/docker/* ${APP_HOME}/
|
||||
|
||||
# copy the playground + any future static assets
|
||||
COPY deploy/docker/static ${APP_HOME}/static
|
||||
|
||||
# Change ownership of the application directory to the non-root user
|
||||
RUN chown -R appuser:appuser ${APP_HOME}
|
||||
|
||||
# give permissions to redis persistence dirs if used
|
||||
RUN mkdir -p /var/lib/redis /var/log/redis && chown -R appuser:appuser /var/lib/redis /var/log/redis
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD bash -c '\
|
||||
MEM=$(free -m | awk "/^Mem:/{print \$2}"); \
|
||||
if [ $MEM -lt 2048 ]; then \
|
||||
echo "⚠️ Warning: Less than 2GB RAM available! Your container might need a memory boost! 🚀"; \
|
||||
exit 1; \
|
||||
fi && \
|
||||
redis-cli ping > /dev/null && \
|
||||
curl -f http://localhost:11235/health || exit 1'
|
||||
|
||||
EXPOSE 6379
|
||||
# Switch to the non-root user before starting the application
|
||||
USER appuser
|
||||
|
||||
# Set environment variables to ptoduction
|
||||
ENV PYTHON_ENV=production
|
||||
|
||||
# Start the application using supervisord
|
||||
CMD ["supervisord", "-c", "supervisord.conf"]
|
||||
339
JOURNAL.md
Normal file
339
JOURNAL.md
Normal file
@@ -0,0 +1,339 @@
|
||||
# Development Journal
|
||||
|
||||
This journal tracks significant feature additions, bug fixes, and architectural decisions in the crawl4ai project. It serves as both documentation and a historical record of the project's evolution.
|
||||
|
||||
## [2025-04-17] Added Content Source Selection for Markdown Generation
|
||||
|
||||
**Feature:** Configurable content source for markdown generation
|
||||
|
||||
**Changes Made:**
|
||||
1. Added `content_source: str = "cleaned_html"` parameter to `MarkdownGenerationStrategy` class
|
||||
2. Updated `DefaultMarkdownGenerator` to accept and pass the content source parameter
|
||||
3. Renamed the `cleaned_html` parameter to `input_html` in the `generate_markdown` method
|
||||
4. Modified `AsyncWebCrawler.aprocess_html` to select the appropriate HTML source based on the generator's config
|
||||
5. Added `preprocess_html_for_schema` import in `async_webcrawler.py`
|
||||
|
||||
**Implementation Details:**
|
||||
- Added a new `content_source` parameter to specify which HTML input to use for markdown generation
|
||||
- Options include: "cleaned_html" (default), "raw_html", and "fit_html"
|
||||
- Used a dictionary dispatch pattern in `aprocess_html` to select the appropriate HTML source
|
||||
- Added proper error handling with fallback to cleaned_html if content source selection fails
|
||||
- Ensured backward compatibility by defaulting to "cleaned_html" option
|
||||
|
||||
**Files Modified:**
|
||||
- `crawl4ai/markdown_generation_strategy.py`: Added content_source parameter and updated the method signature
|
||||
- `crawl4ai/async_webcrawler.py`: Added HTML source selection logic and updated imports
|
||||
|
||||
**Examples:**
|
||||
- Created `docs/examples/content_source_example.py` demonstrating how to use the new parameter
|
||||
|
||||
**Challenges:**
|
||||
- Maintaining backward compatibility while reorganizing the parameter flow
|
||||
- Ensuring proper error handling for all content source options
|
||||
- Making the change with minimal code modifications
|
||||
|
||||
**Why This Feature:**
|
||||
The content source selection feature allows users to choose which HTML content to use as input for markdown generation:
|
||||
1. "cleaned_html" - Uses the post-processed HTML after scraping strategy (original behavior)
|
||||
2. "raw_html" - Uses the original raw HTML directly from the web page
|
||||
3. "fit_html" - Uses the preprocessed HTML optimized for schema extraction
|
||||
|
||||
This feature provides greater flexibility in how users generate markdown, enabling them to:
|
||||
- Capture more detailed content from the original HTML when needed
|
||||
- Use schema-optimized HTML when working with structured data
|
||||
- Choose the approach that best suits their specific use case
|
||||
## [2025-04-17] Implemented High Volume Stress Testing Solution for SDK
|
||||
|
||||
**Feature:** Comprehensive stress testing framework using `arun_many` and the dispatcher system to evaluate performance, concurrency handling, and identify potential issues under high-volume crawling scenarios.
|
||||
|
||||
**Changes Made:**
|
||||
1. Created a dedicated stress testing framework in the `benchmarking/` (or similar) directory.
|
||||
2. Implemented local test site generation (`SiteGenerator`) with configurable heavy HTML pages.
|
||||
3. Added basic memory usage tracking (`SimpleMemoryTracker`) using platform-specific commands (avoiding `psutil` dependency for this specific test).
|
||||
4. Utilized `CrawlerMonitor` from `crawl4ai` for rich terminal UI and real-time monitoring of test progress and dispatcher activity.
|
||||
5. Implemented detailed result summary saving (JSON) and memory sample logging (CSV).
|
||||
6. Developed `run_benchmark.py` to orchestrate tests with predefined configurations.
|
||||
7. Created `run_all.sh` as a simple wrapper for `run_benchmark.py`.
|
||||
|
||||
**Implementation Details:**
|
||||
- Generates a local test site with configurable pages containing heavy text and image content.
|
||||
- Uses Python's built-in `http.server` for local serving, minimizing network variance.
|
||||
- Leverages `crawl4ai`'s `arun_many` method for processing URLs.
|
||||
- Utilizes `MemoryAdaptiveDispatcher` to manage concurrency via the `max_sessions` parameter (note: memory adaptation features require `psutil`, not used by `SimpleMemoryTracker`).
|
||||
- Tracks memory usage via `SimpleMemoryTracker`, recording samples throughout test execution to a CSV file.
|
||||
- Uses `CrawlerMonitor` (which uses the `rich` library) for clear terminal visualization and progress reporting directly from the dispatcher.
|
||||
- Stores detailed final metrics in a JSON summary file.
|
||||
|
||||
**Files Created/Updated:**
|
||||
- `stress_test_sdk.py`: Main stress testing implementation using `arun_many`.
|
||||
- `benchmark_report.py`: (Assumed) Report generator for comparing test results.
|
||||
- `run_benchmark.py`: Test runner script with predefined configurations.
|
||||
- `run_all.sh`: Simple bash script wrapper for `run_benchmark.py`.
|
||||
- `USAGE.md`: Comprehensive documentation on usage and interpretation (updated).
|
||||
|
||||
**Testing Approach:**
|
||||
- Creates a controlled, reproducible test environment with a local HTTP server.
|
||||
- Processes URLs using `arun_many`, allowing the dispatcher to manage concurrency up to `max_sessions`.
|
||||
- Optionally logs per-batch summaries (when not in streaming mode) after processing chunks.
|
||||
- Supports different test sizes via `run_benchmark.py` configurations.
|
||||
- Records memory samples via platform commands for basic trend analysis.
|
||||
- Includes cleanup functionality for the test environment.
|
||||
|
||||
**Challenges:**
|
||||
- Ensuring proper cleanup of HTTP server processes.
|
||||
- Getting reliable memory tracking across platforms without adding heavy dependencies (`psutil`) to this specific test script.
|
||||
- Designing `run_benchmark.py` to correctly pass arguments to `stress_test_sdk.py`.
|
||||
|
||||
**Why This Feature:**
|
||||
The high volume stress testing solution addresses critical needs for ensuring Crawl4AI's `arun_many` reliability:
|
||||
1. Provides a reproducible way to evaluate performance under concurrent load.
|
||||
2. Allows testing the dispatcher's concurrency control (`max_session_permit`) and queue management.
|
||||
3. Enables performance tuning by observing throughput (`URLs/sec`) under different `max_sessions` settings.
|
||||
4. Creates a controlled environment for testing `arun_many` behavior.
|
||||
5. Supports continuous integration by providing deterministic test conditions for `arun_many`.
|
||||
|
||||
**Design Decisions:**
|
||||
- Chose local site generation for reproducibility and isolation from network issues.
|
||||
- Utilized the built-in `CrawlerMonitor` for real-time feedback, leveraging its `rich` integration.
|
||||
- Implemented optional per-batch logging in `stress_test_sdk.py` (when not streaming) to provide chunk-level summaries alongside the continuous monitor.
|
||||
- Adopted `arun_many` with a `MemoryAdaptiveDispatcher` as the core mechanism for parallel execution, reflecting the intended SDK usage.
|
||||
- Created `run_benchmark.py` to simplify running standard test configurations.
|
||||
- Used `SimpleMemoryTracker` to provide basic memory insights without requiring `psutil` for this particular test runner.
|
||||
|
||||
**Future Enhancements to Consider:**
|
||||
- Create a separate test variant that *does* use `psutil` to specifically stress the memory-adaptive features of the dispatcher.
|
||||
- Add support for generated JavaScript content.
|
||||
- Add support for Docker-based testing with explicit memory limits.
|
||||
- Enhance `benchmark_report.py` to provide more sophisticated analysis of performance and memory trends from the generated JSON/CSV files.
|
||||
|
||||
---
|
||||
|
||||
## [2025-04-17] Refined Stress Testing System Parameters and Execution
|
||||
|
||||
**Changes Made:**
|
||||
1. Corrected `run_benchmark.py` and `stress_test_sdk.py` to use `--max-sessions` instead of the incorrect `--workers` parameter, accurately reflecting dispatcher configuration.
|
||||
2. Updated `run_benchmark.py` argument handling to correctly pass all relevant custom parameters (including `--stream`, `--monitor-mode`, etc.) to `stress_test_sdk.py`.
|
||||
3. (Assuming changes in `benchmark_report.py`) Applied dark theme to benchmark reports for better readability.
|
||||
4. (Assuming changes in `benchmark_report.py`) Improved visualization code to eliminate matplotlib warnings.
|
||||
5. Updated `run_benchmark.py` to provide clickable `file://` links to generated reports in the terminal output.
|
||||
6. Updated `USAGE.md` with comprehensive parameter descriptions reflecting the final script arguments.
|
||||
7. Updated `run_all.sh` wrapper to correctly invoke `run_benchmark.py` with flexible arguments.
|
||||
|
||||
**Details of Changes:**
|
||||
|
||||
1. **Parameter Correction (`--max-sessions`)**:
|
||||
* Identified the fundamental misunderstanding where `--workers` was used incorrectly.
|
||||
* Refactored `stress_test_sdk.py` to accept `--max-sessions` and configure the `MemoryAdaptiveDispatcher`'s `max_session_permit` accordingly.
|
||||
* Updated `run_benchmark.py` argument parsing and command construction to use `--max-sessions`.
|
||||
* Updated `TEST_CONFIGS` in `run_benchmark.py` to use `max_sessions`.
|
||||
|
||||
2. **Argument Handling (`run_benchmark.py`)**:
|
||||
* Improved logic to collect all command-line arguments provided to `run_benchmark.py`.
|
||||
* Ensured all relevant arguments (like `--stream`, `--monitor-mode`, `--port`, `--use-rate-limiter`, etc.) are correctly forwarded when calling `stress_test_sdk.py` as a subprocess.
|
||||
|
||||
3. **Dark Theme & Visualization Fixes (Assumed in `benchmark_report.py`)**:
|
||||
* (Describes changes assumed to be made in the separate reporting script).
|
||||
|
||||
4. **Clickable Links (`run_benchmark.py`)**:
|
||||
* Added logic to find the latest HTML report and PNG chart in the `benchmark_reports` directory after `benchmark_report.py` runs.
|
||||
* Used `pathlib` to generate correct `file://` URLs for terminal output.
|
||||
|
||||
5. **Documentation Improvements (`USAGE.md`)**:
|
||||
* Rewrote sections to explain `arun_many`, dispatchers, and `--max-sessions`.
|
||||
* Updated parameter tables for all scripts (`stress_test_sdk.py`, `run_benchmark.py`).
|
||||
* Clarified the difference between batch and streaming modes and their effect on logging.
|
||||
* Updated examples to use correct arguments.
|
||||
|
||||
**Files Modified:**
|
||||
- `stress_test_sdk.py`: Changed `--workers` to `--max-sessions`, added new arguments, used `arun_many`.
|
||||
- `run_benchmark.py`: Changed argument handling, updated configs, calls `stress_test_sdk.py`.
|
||||
- `run_all.sh`: Updated to call `run_benchmark.py` correctly.
|
||||
- `USAGE.md`: Updated documentation extensively.
|
||||
- `benchmark_report.py`: (Assumed modifications for dark theme and viz fixes).
|
||||
|
||||
**Testing:**
|
||||
- Verified that `--max-sessions` correctly limits concurrency via the `CrawlerMonitor` output.
|
||||
- Confirmed that custom arguments passed to `run_benchmark.py` are forwarded to `stress_test_sdk.py`.
|
||||
- Validated clickable links work in supporting terminals.
|
||||
- Ensured documentation matches the final script parameters and behavior.
|
||||
|
||||
**Why These Changes:**
|
||||
These refinements correct the fundamental approach of the stress test to align with `crawl4ai`'s actual architecture and intended usage:
|
||||
1. Ensures the test evaluates the correct components (`arun_many`, `MemoryAdaptiveDispatcher`).
|
||||
2. Makes test configurations more accurate and flexible.
|
||||
3. Improves the usability of the testing framework through better argument handling and documentation.
|
||||
|
||||
|
||||
**Future Enhancements to Consider:**
|
||||
- Add support for generated JavaScript content to test JS rendering performance
|
||||
- Implement more sophisticated memory analysis like generational garbage collection tracking
|
||||
- Add support for Docker-based testing with memory limits to force OOM conditions
|
||||
- Create visualization tools for analyzing memory usage patterns across test runs
|
||||
- Add benchmark comparisons between different crawler versions or configurations
|
||||
|
||||
## [2025-04-17] Fixed Issues in Stress Testing System
|
||||
|
||||
**Changes Made:**
|
||||
1. Fixed custom parameter handling in run_benchmark.py
|
||||
2. Applied dark theme to benchmark reports for better readability
|
||||
3. Improved visualization code to eliminate matplotlib warnings
|
||||
4. Added clickable links to generated reports in terminal output
|
||||
5. Enhanced documentation with comprehensive parameter descriptions
|
||||
|
||||
**Details of Changes:**
|
||||
|
||||
1. **Custom Parameter Handling Fix**
|
||||
- Identified bug where custom URL count was being ignored in run_benchmark.py
|
||||
- Rewrote argument handling to use a custom args dictionary
|
||||
- Properly passed parameters to the test_simple_stress.py command
|
||||
- Added better UI indication of custom parameters in use
|
||||
|
||||
2. **Dark Theme Implementation**
|
||||
- Added complete dark theme to HTML benchmark reports
|
||||
- Applied dark styling to all visualization components
|
||||
- Used Nord-inspired color palette for charts and graphs
|
||||
- Improved contrast and readability for data visualization
|
||||
- Updated text colors and backgrounds for better eye comfort
|
||||
|
||||
3. **Matplotlib Warning Fixes**
|
||||
- Resolved warnings related to improper use of set_xticklabels()
|
||||
- Implemented correct x-axis positioning for bar charts
|
||||
- Ensured proper alignment of bar labels and data points
|
||||
- Updated plotting code to use modern matplotlib practices
|
||||
|
||||
4. **Documentation Improvements**
|
||||
- Created comprehensive USAGE.md with detailed instructions
|
||||
- Added parameter documentation for all scripts
|
||||
- Included examples for all common use cases
|
||||
- Provided detailed explanations for interpreting results
|
||||
- Added troubleshooting guide for common issues
|
||||
|
||||
**Files Modified:**
|
||||
- `tests/memory/run_benchmark.py`: Fixed custom parameter handling
|
||||
- `tests/memory/benchmark_report.py`: Added dark theme and fixed visualization warnings
|
||||
- `tests/memory/run_all.sh`: Added clickable links to reports
|
||||
- `tests/memory/USAGE.md`: Created comprehensive documentation
|
||||
|
||||
**Testing:**
|
||||
- Verified that custom URL counts are now correctly used
|
||||
- Confirmed dark theme is properly applied to all report elements
|
||||
- Checked that matplotlib warnings are no longer appearing
|
||||
- Validated clickable links to reports work in terminals that support them
|
||||
|
||||
**Why These Changes:**
|
||||
These improvements address several usability issues with the stress testing system:
|
||||
1. Better parameter handling ensures test configurations work as expected
|
||||
2. Dark theme reduces eye strain during extended test review sessions
|
||||
3. Fixing visualization warnings improves code quality and output clarity
|
||||
4. Enhanced documentation makes the system more accessible for future use
|
||||
|
||||
**Future Enhancements:**
|
||||
- Add additional visualization options for different types of analysis
|
||||
- Implement theme toggle to support both light and dark preferences
|
||||
- Add export options for embedding reports in other documentation
|
||||
- Create dedicated CI/CD integration templates for automated testing
|
||||
|
||||
## [2025-04-09] Added MHTML Capture Feature
|
||||
|
||||
**Feature:** MHTML snapshot capture of crawled pages
|
||||
|
||||
**Changes Made:**
|
||||
1. Added `capture_mhtml: bool = False` parameter to `CrawlerRunConfig` class
|
||||
2. Added `mhtml: Optional[str] = None` field to `CrawlResult` model
|
||||
3. Added `mhtml_data: Optional[str] = None` field to `AsyncCrawlResponse` class
|
||||
4. Implemented `capture_mhtml()` method in `AsyncPlaywrightCrawlerStrategy` class to capture MHTML via CDP
|
||||
5. Modified the crawler to capture MHTML when enabled and pass it to the result
|
||||
|
||||
**Implementation Details:**
|
||||
- MHTML capture uses Chrome DevTools Protocol (CDP) via Playwright's CDP session API
|
||||
- The implementation waits for page to fully load before capturing MHTML content
|
||||
- Enhanced waiting for JavaScript content with requestAnimationFrame for better JS content capture
|
||||
- We ensure all browser resources are properly cleaned up after capture
|
||||
|
||||
**Files Modified:**
|
||||
- `crawl4ai/models.py`: Added the mhtml field to CrawlResult
|
||||
- `crawl4ai/async_configs.py`: Added capture_mhtml parameter to CrawlerRunConfig
|
||||
- `crawl4ai/async_crawler_strategy.py`: Implemented MHTML capture logic
|
||||
- `crawl4ai/async_webcrawler.py`: Added mapping from AsyncCrawlResponse.mhtml_data to CrawlResult.mhtml
|
||||
|
||||
**Testing:**
|
||||
- Created comprehensive tests in `tests/20241401/test_mhtml.py` covering:
|
||||
- Capturing MHTML when enabled
|
||||
- Ensuring mhtml is None when disabled explicitly
|
||||
- Ensuring mhtml is None by default
|
||||
- Capturing MHTML on JavaScript-enabled pages
|
||||
|
||||
**Challenges:**
|
||||
- Had to improve page loading detection to ensure JavaScript content was fully rendered
|
||||
- Tests needed to be run independently due to Playwright browser instance management
|
||||
- Modified test expected content to match actual MHTML output
|
||||
|
||||
**Why This Feature:**
|
||||
The MHTML capture feature allows users to capture complete web pages including all resources (CSS, images, etc.) in a single file. This is valuable for:
|
||||
1. Offline viewing of captured pages
|
||||
2. Creating permanent snapshots of web content for archival
|
||||
3. Ensuring consistent content for later analysis, even if the original site changes
|
||||
|
||||
**Future Enhancements to Consider:**
|
||||
- Add option to save MHTML to file
|
||||
- Support for filtering what resources get included in MHTML
|
||||
- Add support for specifying MHTML capture options
|
||||
|
||||
## [2025-04-10] Added Network Request and Console Message Capturing
|
||||
|
||||
**Feature:** Comprehensive capturing of network requests/responses and browser console messages during crawling
|
||||
|
||||
**Changes Made:**
|
||||
1. Added `capture_network_requests: bool = False` and `capture_console_messages: bool = False` parameters to `CrawlerRunConfig` class
|
||||
2. Added `network_requests: Optional[List[Dict[str, Any]]] = None` and `console_messages: Optional[List[Dict[str, Any]]] = None` fields to both `AsyncCrawlResponse` and `CrawlResult` models
|
||||
3. Implemented event listeners in `AsyncPlaywrightCrawlerStrategy._crawl_web()` to capture browser network events and console messages
|
||||
4. Added proper event listener cleanup in the finally block to prevent resource leaks
|
||||
5. Modified the crawler flow to pass captured data from AsyncCrawlResponse to CrawlResult
|
||||
|
||||
**Implementation Details:**
|
||||
- Network capture uses Playwright event listeners (`request`, `response`, and `requestfailed`) to record all network activity
|
||||
- Console capture uses Playwright event listeners (`console` and `pageerror`) to record console messages and errors
|
||||
- Each network event includes metadata like URL, headers, status, and timing information
|
||||
- Each console message includes type, text content, and source location when available
|
||||
- All captured events include timestamps for chronological analysis
|
||||
- Error handling ensures even failed capture attempts won't crash the main crawling process
|
||||
|
||||
**Files Modified:**
|
||||
- `crawl4ai/models.py`: Added new fields to AsyncCrawlResponse and CrawlResult
|
||||
- `crawl4ai/async_configs.py`: Added new configuration parameters to CrawlerRunConfig
|
||||
- `crawl4ai/async_crawler_strategy.py`: Implemented capture logic using event listeners
|
||||
- `crawl4ai/async_webcrawler.py`: Added data transfer from AsyncCrawlResponse to CrawlResult
|
||||
|
||||
**Documentation:**
|
||||
- Created detailed documentation in `docs/md_v2/advanced/network-console-capture.md`
|
||||
- Added feature to site navigation in `mkdocs.yml`
|
||||
- Updated CrawlResult documentation in `docs/md_v2/api/crawl-result.md`
|
||||
- Created comprehensive example in `docs/examples/network_console_capture_example.py`
|
||||
|
||||
**Testing:**
|
||||
- Created `tests/general/test_network_console_capture.py` with tests for:
|
||||
- Verifying capture is disabled by default
|
||||
- Testing network request capturing
|
||||
- Testing console message capturing
|
||||
- Ensuring both capture types can be enabled simultaneously
|
||||
- Checking correct content is captured in expected formats
|
||||
|
||||
**Challenges:**
|
||||
- Initial implementation had synchronous/asynchronous mismatches in event handlers
|
||||
- Needed to fix type of property access vs. method calls in handlers
|
||||
- Required careful cleanup of event listeners to prevent memory leaks
|
||||
|
||||
**Why This Feature:**
|
||||
The network and console capture feature provides deep visibility into web page activity, enabling:
|
||||
1. Debugging complex web applications by seeing all network requests and errors
|
||||
2. Security analysis to detect unexpected third-party requests and data flows
|
||||
3. Performance profiling to identify slow-loading resources
|
||||
4. API discovery in single-page applications
|
||||
5. Comprehensive analysis of web application behavior
|
||||
|
||||
**Future Enhancements to Consider:**
|
||||
- Option to filter captured events by type, domain, or content
|
||||
- Support for capturing response bodies (with size limits)
|
||||
- Aggregate statistics calculation for performance metrics
|
||||
- Integration with visualization tools for network waterfall analysis
|
||||
- Exporting captures in HAR format for use with external tools
|
||||
20
LICENSE
20
LICENSE
@@ -48,4 +48,22 @@ You may add Your own copyright statement to Your modifications and may provide a
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
---
|
||||
Attribution Requirement
|
||||
|
||||
All distributions, publications, or public uses of this software, or derivative works based on this software, must include the following attribution:
|
||||
|
||||
"This product includes software developed by UncleCode (https://x.com/unclecode) as part of the Crawl4AI project (https://github.com/unclecode/crawl4ai)."
|
||||
|
||||
This attribution must be displayed in a prominent and easily accessible location, such as:
|
||||
|
||||
- For software distributions: In a NOTICE file, README file, or equivalent documentation.
|
||||
- For publications (research papers, articles, blog posts): In the acknowledgments section or a footnote.
|
||||
- For websites/web applications: In an "About" or "Credits" section.
|
||||
- For command-line tools: In the help/usage output.
|
||||
|
||||
This requirement ensures proper credit is given for the use of Crawl4AI and helps promote the project.
|
||||
|
||||
---
|
||||
2
MANIFEST.in
Normal file
2
MANIFEST.in
Normal file
@@ -0,0 +1,2 @@
|
||||
include requirements.txt
|
||||
recursive-include crawl4ai/js_snippet *.js
|
||||
46
MISSION.md
Normal file
46
MISSION.md
Normal file
@@ -0,0 +1,46 @@
|
||||
# Mission
|
||||
|
||||

|
||||
|
||||
### 1. The Data Capitalization Opportunity
|
||||
|
||||
We live in an unprecedented era of digital wealth creation. Every day, individuals and enterprises generate massive amounts of valuable digital footprints across various platforms, social media channels, messenger apps, and cloud services. While people can interact with their data within these platforms, there's an immense untapped opportunity to transform this data into true capital assets. Just as physical property became a foundational element of wealth creation, personal and enterprise data has the potential to become a new form of capital on balance sheets.
|
||||
|
||||
For individuals, this represents an opportunity to transform their digital activities into valuable assets. For enterprises, their internal communications, team discussions, and collaborative documents contain rich insights that could be structured and valued as intellectual capital. This wealth of information represents an unprecedented opportunity for value creation in the digital age.
|
||||
|
||||
### 2. The Potential of Authentic Data
|
||||
|
||||
While synthetic data has played a crucial role in AI development, there's an enormous untapped potential in the authentic data generated by individuals and organizations. Every message, document, and interaction contains unique insights and patterns that could enhance AI development. The challenge isn't a lack of data - it's that most authentic human-generated data remains inaccessible for productive use.
|
||||
|
||||
By enabling willing participation in data sharing, we can unlock this vast reservoir of authentic human knowledge. This represents an opportunity to enhance AI development with diverse, real-world data that reflects the full spectrum of human experience and knowledge.
|
||||
|
||||
## Our Pathway to Data Democracy
|
||||
|
||||
### 1. Open-Source Foundation
|
||||
|
||||
Our first step is creating an open-source data extraction engine that empowers developers and innovators to build tools for data structuring and organization. This foundation ensures transparency, security, and community-driven development. By making these tools openly available, we enable the technical infrastructure needed for true data ownership and capitalization.
|
||||
|
||||
### 2. Data Capitalization Platform
|
||||
|
||||
Building on this open-source foundation, we're developing a platform that helps individuals and enterprises transform their digital footprints into structured, valuable assets. This platform will provide the tools and frameworks needed to organize, understand, and value personal and organizational data as true capital assets.
|
||||
|
||||
### 3. Creating a Data Marketplace
|
||||
|
||||
The final piece is establishing a marketplace where individuals and organizations can willingly share their data assets. This creates opportunities for:
|
||||
- Individuals to earn equity, revenue, or other forms of value from their data
|
||||
- Enterprises to access diverse, high-quality data for AI development
|
||||
- Researchers to work with authentic human-generated data
|
||||
- Startups to build innovative solutions using real-world data
|
||||
|
||||
## Economic Vision: A Shared Data Economy
|
||||
|
||||
We envision a future where data becomes a fundamental asset class in a thriving shared economy. This transformation will democratize AI development by enabling willing participation in data sharing, ensuring that the benefits of AI advancement flow back to data creators. Just as property rights revolutionized economic systems, establishing data as a capital asset will create new opportunities for wealth creation and economic participation.
|
||||
|
||||
This shared data economy will:
|
||||
- Enable individuals to capitalize on their digital footprints
|
||||
- Create new revenue streams for data creators
|
||||
- Provide AI developers with access to diverse, authentic data
|
||||
- Foster innovation through broader access to real-world data
|
||||
- Ensure more equitable distribution of AI's economic benefits
|
||||
|
||||
Our vision is to facilitate this transformation from the ground up - starting with open-source tools, progressing to data capitalization platforms, and ultimately creating a thriving marketplace where data becomes a true asset class in a shared economy. This approach ensures that the future of AI is built on a foundation of authentic human knowledge, with benefits flowing back to the individuals and organizations who create and share their valuable data.
|
||||
503
ROADMAP.md
Normal file
503
ROADMAP.md
Normal file
@@ -0,0 +1,503 @@
|
||||
# Crawl4AI Strategic Roadmap
|
||||
|
||||
```mermaid
|
||||
%%{init: {'themeVariables': { 'fontSize': '14px'}}}%%
|
||||
graph TD
|
||||
subgraph A1[Advanced Crawling Systems 🔧]
|
||||
A["`
|
||||
• Graph Crawler ✓
|
||||
• Question-Based Crawler
|
||||
• Knowledge-Optimal Crawler
|
||||
• Agentic Crawler
|
||||
`"]
|
||||
end
|
||||
|
||||
subgraph A2[Specialized Features 🛠️]
|
||||
B["`
|
||||
• Automated Schema Generator
|
||||
• Domain-Specific Scrapers
|
||||
•
|
||||
•
|
||||
`"]
|
||||
end
|
||||
|
||||
subgraph A3[Development Tools 🔨]
|
||||
C["`
|
||||
• Interactive Playground
|
||||
• Performance Monitor
|
||||
• Cloud Integration
|
||||
•
|
||||
`"]
|
||||
end
|
||||
|
||||
subgraph A4[Community & Growth 🌱]
|
||||
D["`
|
||||
• Sponsorship Program
|
||||
• Educational Content
|
||||
•
|
||||
•
|
||||
`"]
|
||||
end
|
||||
|
||||
classDef default fill:#f9f9f9,stroke:#333,stroke-width:2px
|
||||
classDef section fill:#f0f0f0,stroke:#333,stroke-width:4px,rx:10
|
||||
class A1,A2,A3,A4 section
|
||||
|
||||
%% Layout hints
|
||||
A1 --> A2[" "]
|
||||
A3 --> A4[" "]
|
||||
linkStyle 0,1 stroke:none
|
||||
```
|
||||
|
||||
Crawl4AI is evolving to provide more intelligent, efficient, and versatile web crawling capabilities. This roadmap outlines the key developments and features planned for the project, organized into strategic sections that build upon our current foundation.
|
||||
|
||||
## 1. Advanced Crawling Systems 🔧
|
||||
|
||||
This section introduces three powerful crawling systems that extend Crawl4AI's capabilities from basic web crawling to intelligent, purpose-driven data extraction.
|
||||
|
||||
### 1.1 Question-Based Crawler
|
||||
The Question-Based Crawler enhances our core engine by enabling automatic discovery and extraction of relevant web content based on natural language questions.
|
||||
|
||||
Key Features:
|
||||
- SerpiAPI integration for intelligent web search
|
||||
- Relevancy scoring for search results
|
||||
- Automatic URL discovery and prioritization
|
||||
- Cross-source validation
|
||||
|
||||
```python
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
from crawl4ai.discovery import QuestionBasedDiscovery
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
discovery = QuestionBasedDiscovery(crawler)
|
||||
results = await discovery.arun(
|
||||
question="What are the system requirements for major cloud providers' GPU instances?",
|
||||
max_urls=5,
|
||||
relevance_threshold=0.7
|
||||
)
|
||||
|
||||
for result in results:
|
||||
print(f"Source: {result.url} (Relevance: {result.relevance_score})")
|
||||
print(f"Content: {result.markdown}\n")
|
||||
```
|
||||
|
||||
### 1.2 Knowledge-Optimal Crawler
|
||||
An intelligent crawling system that solves the optimization problem of minimizing data extraction while maximizing knowledge acquisition for specific objectives.
|
||||
|
||||
Key Features:
|
||||
- Smart content prioritization
|
||||
- Minimal data extraction for maximum knowledge
|
||||
- Probabilistic relevance assessment
|
||||
- Objective-driven crawling paths
|
||||
|
||||
```python
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
from crawl4ai.optimization import KnowledgeOptimizer
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
optimizer = KnowledgeOptimizer(
|
||||
objective="Understand GPU instance pricing and limitations across cloud providers",
|
||||
required_knowledge=[
|
||||
"pricing structure",
|
||||
"GPU specifications",
|
||||
"usage limits",
|
||||
"availability zones"
|
||||
],
|
||||
confidence_threshold=0.85
|
||||
)
|
||||
|
||||
result = await crawler.arun(
|
||||
urls=[
|
||||
"https://aws.amazon.com/ec2/pricing/",
|
||||
"https://cloud.google.com/gpu",
|
||||
"https://azure.microsoft.com/pricing/"
|
||||
],
|
||||
optimizer=optimizer,
|
||||
optimization_mode="minimal_extraction"
|
||||
)
|
||||
|
||||
print(f"Knowledge Coverage: {result.knowledge_coverage}")
|
||||
print(f"Data Efficiency: {result.efficiency_ratio}")
|
||||
print(f"Extracted Content: {result.optimal_content}")
|
||||
```
|
||||
|
||||
### 1.3 Agentic Crawler
|
||||
An autonomous system capable of understanding complex goals and automatically planning and executing multi-step crawling operations.
|
||||
|
||||
Key Features:
|
||||
- Autonomous goal interpretation
|
||||
- Dynamic step planning
|
||||
- Interactive navigation capabilities
|
||||
- Visual recognition and interaction
|
||||
- Automatic error recovery
|
||||
|
||||
```python
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
from crawl4ai.agents import CrawlerAgent
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
agent = CrawlerAgent(crawler)
|
||||
|
||||
# Automatic planning and execution
|
||||
result = await agent.arun(
|
||||
goal="Find research papers about quantum computing published in 2023 with more than 50 citations",
|
||||
auto_retry=True
|
||||
)
|
||||
print("Generated Plan:", result.executed_steps)
|
||||
print("Extracted Data:", result.data)
|
||||
|
||||
# Using custom steps with automatic execution
|
||||
result = await agent.arun(
|
||||
goal="Extract conference deadlines from ML conferences",
|
||||
custom_plan=[
|
||||
"Navigate to conference page",
|
||||
"Find important dates section",
|
||||
"Extract submission deadlines",
|
||||
"Verify dates are for 2024"
|
||||
]
|
||||
)
|
||||
|
||||
# Monitoring execution
|
||||
print("Step Completion:", result.step_status)
|
||||
print("Execution Time:", result.execution_time)
|
||||
print("Success Rate:", result.success_rate)
|
||||
```
|
||||
|
||||
# Section 2: Specialized Features 🛠️
|
||||
|
||||
This section introduces specialized tools and features that enhance Crawl4AI's capabilities for specific use cases and data extraction needs.
|
||||
|
||||
### 2.1 Automated Schema Generator
|
||||
A system that automatically generates JsonCssExtractionStrategy schemas from natural language descriptions, making structured data extraction accessible to all users.
|
||||
|
||||
Key Features:
|
||||
- Natural language schema generation
|
||||
- Automatic pattern detection
|
||||
- Predefined schema templates
|
||||
- Chrome extension for visual schema building
|
||||
|
||||
```python
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
from crawl4ai.schema import SchemaGenerator
|
||||
|
||||
# Generate schema from natural language description
|
||||
generator = SchemaGenerator()
|
||||
schema = await generator.generate(
|
||||
url="https://news-website.com",
|
||||
description="For each news article on the page, I need the headline, publication date, and main image"
|
||||
)
|
||||
|
||||
# Use generated schema with crawler
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://news-website.com",
|
||||
extraction_strategy=schema
|
||||
)
|
||||
|
||||
# Example of generated schema:
|
||||
"""
|
||||
{
|
||||
"name": "News Article Extractor",
|
||||
"baseSelector": "article.news-item",
|
||||
"fields": [
|
||||
{
|
||||
"name": "headline",
|
||||
"selector": "h2.article-title",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "date",
|
||||
"selector": "span.publish-date",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "image",
|
||||
"selector": "img.article-image",
|
||||
"type": "attribute",
|
||||
"attribute": "src"
|
||||
}
|
||||
]
|
||||
}
|
||||
"""
|
||||
```
|
||||
|
||||
### 2.2 Domain Specific Scrapers
|
||||
Specialized extraction strategies optimized for common website types and platforms, providing consistent and reliable data extraction without additional configuration.
|
||||
|
||||
Key Features:
|
||||
- Pre-configured extractors for popular platforms
|
||||
- Academic site specialization (arXiv, NCBI)
|
||||
- E-commerce standardization
|
||||
- Documentation site handling
|
||||
|
||||
```python
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
from crawl4ai.extractors import AcademicExtractor, EcommerceExtractor
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
# Academic paper extraction
|
||||
papers = await crawler.arun(
|
||||
url="https://arxiv.org/list/cs.AI/recent",
|
||||
extractor="academic", # Built-in extractor type
|
||||
site_type="arxiv", # Specific site optimization
|
||||
extract_fields=[
|
||||
"title",
|
||||
"authors",
|
||||
"abstract",
|
||||
"citations"
|
||||
]
|
||||
)
|
||||
|
||||
# E-commerce product data
|
||||
products = await crawler.arun(
|
||||
url="https://store.example.com/products",
|
||||
extractor="ecommerce",
|
||||
extract_fields=[
|
||||
"name",
|
||||
"price",
|
||||
"availability",
|
||||
"reviews"
|
||||
]
|
||||
)
|
||||
```
|
||||
|
||||
### 2.3 Web Embedding Index
|
||||
Creates and maintains a semantic search infrastructure for crawled content, enabling efficient retrieval and querying of web content through vector embeddings.
|
||||
|
||||
Key Features:
|
||||
- Automatic embedding generation
|
||||
- Intelligent content chunking
|
||||
- Efficient vector storage and indexing
|
||||
- Semantic search capabilities
|
||||
|
||||
```python
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
from crawl4ai.indexing import WebIndex
|
||||
|
||||
# Initialize and build index
|
||||
index = WebIndex(model="efficient-mini")
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
# Crawl and index content
|
||||
await index.build(
|
||||
urls=["https://docs.example.com"],
|
||||
crawler=crawler,
|
||||
options={
|
||||
"chunk_method": "semantic",
|
||||
"update_policy": "incremental",
|
||||
"embedding_batch_size": 100
|
||||
}
|
||||
)
|
||||
|
||||
# Search through indexed content
|
||||
results = await index.search(
|
||||
query="How to implement OAuth authentication?",
|
||||
filters={
|
||||
"content_type": "technical",
|
||||
"recency": "6months"
|
||||
},
|
||||
top_k=5
|
||||
)
|
||||
|
||||
# Get similar content
|
||||
similar = await index.find_similar(
|
||||
url="https://docs.example.com/auth/oauth",
|
||||
threshold=0.85
|
||||
)
|
||||
```
|
||||
|
||||
Each of these specialized features builds upon Crawl4AI's core functionality while providing targeted solutions for specific use cases. They can be used independently or combined for more complex data extraction and processing needs.
|
||||
|
||||
# Section 3: Development Tools 🔧
|
||||
|
||||
This section covers tools designed to enhance the development experience, monitoring, and deployment of Crawl4AI applications.
|
||||
|
||||
### 3.1 Crawl4AI Playground 🎮
|
||||
|
||||
The Crawl4AI Playground is an interactive web-based development environment that simplifies web scraping experimentation, development, and deployment. With its intuitive interface and AI-powered assistance, users can quickly prototype, test, and deploy web scraping solutions.
|
||||
|
||||
#### Key Features 🌟
|
||||
|
||||
##### Visual Strategy Builder
|
||||
- Interactive point-and-click interface for building extraction strategies
|
||||
- Real-time preview of selected elements
|
||||
- Side-by-side comparison of different extraction approaches
|
||||
- Visual validation of CSS selectors and XPath queries
|
||||
|
||||
##### AI Assistant Integration
|
||||
- Strategy recommendations based on target website analysis
|
||||
- Parameter optimization suggestions
|
||||
- Best practices guidance for specific use cases
|
||||
- Automated error detection and resolution
|
||||
- Performance optimization tips
|
||||
|
||||
##### Real-Time Testing & Validation
|
||||
- Live preview of extraction results
|
||||
- Side-by-side comparison of multiple strategies
|
||||
- Performance metrics visualization
|
||||
- Automatic validation of extracted data
|
||||
- Error detection and debugging tools
|
||||
|
||||
##### Project Management
|
||||
- Save and organize multiple scraping projects
|
||||
- Version control for configurations
|
||||
- Export/import project settings
|
||||
- Share configurations with team members
|
||||
- Project templates for common use cases
|
||||
|
||||
##### Deployment Pipeline
|
||||
- One-click deployment to various environments
|
||||
- Docker container generation
|
||||
- Cloud deployment templates (AWS, GCP, Azure)
|
||||
- Scaling configuration management
|
||||
- Monitoring setup automation
|
||||
|
||||
|
||||
### 3.2 Performance Monitoring System
|
||||
A comprehensive monitoring solution providing real-time insights into crawler operations, resource usage, and system health through both CLI and GUI interfaces.
|
||||
|
||||
Key Features:
|
||||
- Real-time resource tracking
|
||||
- Active crawl monitoring
|
||||
- Performance statistics
|
||||
- Customizable alerting system
|
||||
|
||||
```python
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
from crawl4ai.monitor import CrawlMonitor
|
||||
|
||||
# Initialize monitoring
|
||||
monitor = CrawlMonitor()
|
||||
|
||||
# Start monitoring with CLI interface
|
||||
await monitor.start(
|
||||
mode="cli", # or "gui"
|
||||
refresh_rate="1s",
|
||||
metrics={
|
||||
"resources": ["cpu", "memory", "network"],
|
||||
"crawls": ["active", "queued", "completed"],
|
||||
"performance": ["success_rate", "response_times"]
|
||||
}
|
||||
)
|
||||
|
||||
# Example CLI output:
|
||||
"""
|
||||
Crawl4AI Monitor (Live) - Press Q to exit
|
||||
────────────────────────────────────────
|
||||
System Usage:
|
||||
├─ CPU: ███████░░░ 70%
|
||||
└─ Memory: ████░░░░░ 2.1GB/8GB
|
||||
|
||||
Active Crawls:
|
||||
ID URL Status Progress
|
||||
001 docs.example.com 🟢 Active 75%
|
||||
002 api.service.com 🟡 Queue -
|
||||
|
||||
Metrics (Last 5min):
|
||||
├─ Success Rate: 98%
|
||||
├─ Avg Response: 0.6s
|
||||
└─ Pages/sec: 8.5
|
||||
"""
|
||||
```
|
||||
|
||||
### 3.3 Cloud Integration
|
||||
Streamlined deployment tools for setting up Crawl4AI in various cloud environments, with support for scaling and monitoring.
|
||||
|
||||
Key Features:
|
||||
- One-click deployment solutions
|
||||
- Auto-scaling configuration
|
||||
- Load balancing setup
|
||||
- Cloud-specific optimizations
|
||||
- Monitoring integration
|
||||
|
||||
```python
|
||||
from crawl4ai import AsyncWebCrawler
|
||||
from crawl4ai.deploy import CloudDeployer
|
||||
|
||||
# Initialize deployer
|
||||
deployer = CloudDeployer()
|
||||
|
||||
# Deploy crawler service
|
||||
deployment = await deployer.deploy(
|
||||
service_name="crawler-cluster",
|
||||
platform="aws", # or "gcp", "azure"
|
||||
config={
|
||||
"instance_type": "compute-optimized",
|
||||
"auto_scaling": {
|
||||
"min_instances": 2,
|
||||
"max_instances": 10,
|
||||
"scale_based_on": "cpu_usage"
|
||||
},
|
||||
"region": "us-east-1",
|
||||
"monitoring": True
|
||||
}
|
||||
)
|
||||
|
||||
# Get deployment status and endpoints
|
||||
print(f"Service Status: {deployment.status}")
|
||||
print(f"API Endpoint: {deployment.endpoint}")
|
||||
print(f"Monitor URL: {deployment.monitor_url}")
|
||||
```
|
||||
|
||||
These development tools work together to provide a comprehensive environment for developing, testing, monitoring, and deploying Crawl4AI applications. The Playground helps users experiment and generate optimal configurations, the Performance Monitor ensures smooth operation, and the Cloud Integration tools simplify deployment and scaling.
|
||||
|
||||
# Section 4: Community & Growth 🌱
|
||||
|
||||
This section outlines initiatives designed to build and support the Crawl4AI community, provide educational resources, and ensure sustainable project growth.
|
||||
|
||||
### 4.1 Sponsorship Program
|
||||
A structured program to support ongoing development and maintenance of Crawl4AI while providing valuable benefits to sponsors.
|
||||
|
||||
Key Features:
|
||||
- Multiple sponsorship tiers
|
||||
- Sponsor recognition system
|
||||
- Priority support for sponsors
|
||||
- Early access to new features
|
||||
- Custom feature development opportunities
|
||||
|
||||
Program Structure (not yet finalized):
|
||||
```
|
||||
Sponsorship Tiers:
|
||||
|
||||
🥉 Bronze Supporter
|
||||
- GitHub Sponsor badge
|
||||
- Priority issue response
|
||||
- Community Discord role
|
||||
|
||||
🥈 Silver Supporter
|
||||
- All Bronze benefits
|
||||
- Technical support channel
|
||||
- Vote on roadmap priorities
|
||||
- Early access to beta features
|
||||
|
||||
🥇 Gold Supporter
|
||||
- All Silver benefits
|
||||
- Custom feature requests
|
||||
- Direct developer access
|
||||
- Private support sessions
|
||||
|
||||
💎 Diamond Partner
|
||||
- All Gold benefits
|
||||
- Custom development
|
||||
- On-demand consulting
|
||||
- Integration support
|
||||
```
|
||||
|
||||
### 4.2 "How to Crawl" Video Series
|
||||
A comprehensive educational resource teaching users how to effectively use Crawl4AI for various web scraping and data extraction scenarios.
|
||||
|
||||
Key Features:
|
||||
- Step-by-step tutorials
|
||||
- Real-world use cases
|
||||
- Best practices
|
||||
- Integration guides
|
||||
- Advanced feature deep-dives
|
||||
|
||||
These community initiatives are designed to:
|
||||
- Provide comprehensive learning resources
|
||||
- Foster a supportive user community
|
||||
- Ensure sustainable project development
|
||||
- Share knowledge and best practices
|
||||
- Create opportunities for collaboration
|
||||
|
||||
The combination of structured support through sponsorship, educational content through video series, and interactive learning through the playground creates a robust ecosystem for both new and experienced users of Crawl4AI.
|
||||
24
cliff.toml
Normal file
24
cliff.toml
Normal file
@@ -0,0 +1,24 @@
|
||||
[changelog]
|
||||
# Template format
|
||||
header = """
|
||||
# Changelog\n
|
||||
All notable changes to this project will be documented in this file.\n
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).\n
|
||||
"""
|
||||
|
||||
# Organize commits by type
|
||||
[git]
|
||||
conventional_commits = true
|
||||
filter_unconventional = true
|
||||
commit_parsers = [
|
||||
{ message = "^feat", group = "Added"},
|
||||
{ message = "^fix", group = "Fixed"},
|
||||
{ message = "^doc", group = "Documentation"},
|
||||
{ message = "^perf", group = "Performance"},
|
||||
{ message = "^refactor", group = "Changed"},
|
||||
{ message = "^style", group = "Changed"},
|
||||
{ message = "^test", group = "Testing"},
|
||||
{ message = "^chore\\(release\\): prepare for", skip = true},
|
||||
{ message = "^chore", group = "Miscellaneous Tasks"},
|
||||
]
|
||||
@@ -1 +1,170 @@
|
||||
from .web_crawler import WebCrawler
|
||||
# __init__.py
|
||||
import warnings
|
||||
|
||||
from .async_webcrawler import AsyncWebCrawler, CacheMode
|
||||
# MODIFIED: Add SeedingConfig here
|
||||
from .async_configs import BrowserConfig, CrawlerRunConfig, HTTPCrawlerConfig, LLMConfig, ProxyConfig, GeolocationConfig, SeedingConfig
|
||||
|
||||
from .content_scraping_strategy import (
|
||||
ContentScrapingStrategy,
|
||||
WebScrapingStrategy,
|
||||
LXMLWebScrapingStrategy,
|
||||
)
|
||||
from .async_logger import (
|
||||
AsyncLoggerBase,
|
||||
AsyncLogger,
|
||||
)
|
||||
from .proxy_strategy import (
|
||||
ProxyRotationStrategy,
|
||||
RoundRobinProxyStrategy,
|
||||
)
|
||||
from .extraction_strategy import (
|
||||
ExtractionStrategy,
|
||||
LLMExtractionStrategy,
|
||||
CosineStrategy,
|
||||
JsonCssExtractionStrategy,
|
||||
JsonXPathExtractionStrategy,
|
||||
JsonLxmlExtractionStrategy,
|
||||
RegexExtractionStrategy
|
||||
)
|
||||
from .chunking_strategy import ChunkingStrategy, RegexChunking
|
||||
from .markdown_generation_strategy import DefaultMarkdownGenerator
|
||||
from .content_filter_strategy import (
|
||||
PruningContentFilter,
|
||||
BM25ContentFilter,
|
||||
LLMContentFilter,
|
||||
RelevantContentFilter,
|
||||
)
|
||||
from .models import CrawlResult, MarkdownGenerationResult, DisplayMode
|
||||
from .components.crawler_monitor import CrawlerMonitor
|
||||
from .async_dispatcher import (
|
||||
MemoryAdaptiveDispatcher,
|
||||
SemaphoreDispatcher,
|
||||
RateLimiter,
|
||||
BaseDispatcher,
|
||||
)
|
||||
from .docker_client import Crawl4aiDockerClient
|
||||
from .hub import CrawlerHub
|
||||
from .browser_profiler import BrowserProfiler
|
||||
from .deep_crawling import (
|
||||
DeepCrawlStrategy,
|
||||
BFSDeepCrawlStrategy,
|
||||
FilterChain,
|
||||
URLPatternFilter,
|
||||
DomainFilter,
|
||||
ContentTypeFilter,
|
||||
URLFilter,
|
||||
FilterStats,
|
||||
SEOFilter,
|
||||
KeywordRelevanceScorer,
|
||||
URLScorer,
|
||||
CompositeScorer,
|
||||
DomainAuthorityScorer,
|
||||
FreshnessScorer,
|
||||
PathDepthScorer,
|
||||
BestFirstCrawlingStrategy,
|
||||
DFSDeepCrawlStrategy,
|
||||
DeepCrawlDecorator,
|
||||
)
|
||||
# NEW: Import AsyncUrlSeeder
|
||||
from .async_url_seeder import AsyncUrlSeeder
|
||||
|
||||
from .utils import (
|
||||
start_colab_display_server,
|
||||
setup_colab_environment
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"AsyncLoggerBase",
|
||||
"AsyncLogger",
|
||||
"AsyncWebCrawler",
|
||||
"BrowserProfiler",
|
||||
"LLMConfig",
|
||||
"GeolocationConfig",
|
||||
# NEW: Add SeedingConfig
|
||||
"SeedingConfig",
|
||||
# NEW: Add AsyncUrlSeeder
|
||||
"AsyncUrlSeeder",
|
||||
"DeepCrawlStrategy",
|
||||
"BFSDeepCrawlStrategy",
|
||||
"BestFirstCrawlingStrategy",
|
||||
"DFSDeepCrawlStrategy",
|
||||
"FilterChain",
|
||||
"URLPatternFilter",
|
||||
"ContentTypeFilter",
|
||||
"DomainFilter",
|
||||
"FilterStats",
|
||||
"URLFilter",
|
||||
"SEOFilter",
|
||||
"KeywordRelevanceScorer",
|
||||
"URLScorer",
|
||||
"CompositeScorer",
|
||||
"DomainAuthorityScorer",
|
||||
"FreshnessScorer",
|
||||
"PathDepthScorer",
|
||||
"DeepCrawlDecorator",
|
||||
"CrawlResult",
|
||||
"CrawlerHub",
|
||||
"CacheMode",
|
||||
"ContentScrapingStrategy",
|
||||
"WebScrapingStrategy",
|
||||
"LXMLWebScrapingStrategy",
|
||||
"BrowserConfig",
|
||||
"CrawlerRunConfig",
|
||||
"HTTPCrawlerConfig",
|
||||
"ExtractionStrategy",
|
||||
"LLMExtractionStrategy",
|
||||
"CosineStrategy",
|
||||
"JsonCssExtractionStrategy",
|
||||
"JsonXPathExtractionStrategy",
|
||||
"JsonLxmlExtractionStrategy",
|
||||
"RegexExtractionStrategy",
|
||||
"ChunkingStrategy",
|
||||
"RegexChunking",
|
||||
"DefaultMarkdownGenerator",
|
||||
"RelevantContentFilter",
|
||||
"PruningContentFilter",
|
||||
"BM25ContentFilter",
|
||||
"LLMContentFilter",
|
||||
"BaseDispatcher",
|
||||
"MemoryAdaptiveDispatcher",
|
||||
"SemaphoreDispatcher",
|
||||
"RateLimiter",
|
||||
"CrawlerMonitor",
|
||||
"DisplayMode",
|
||||
"MarkdownGenerationResult",
|
||||
"Crawl4aiDockerClient",
|
||||
"ProxyRotationStrategy",
|
||||
"RoundRobinProxyStrategy",
|
||||
"ProxyConfig",
|
||||
"start_colab_display_server",
|
||||
"setup_colab_environment",
|
||||
]
|
||||
|
||||
|
||||
# def is_sync_version_installed():
|
||||
# try:
|
||||
# import selenium # noqa
|
||||
|
||||
# return True
|
||||
# except ImportError:
|
||||
# return False
|
||||
|
||||
|
||||
# if is_sync_version_installed():
|
||||
# try:
|
||||
# from .web_crawler import WebCrawler
|
||||
|
||||
# __all__.append("WebCrawler")
|
||||
# except ImportError:
|
||||
# print(
|
||||
# "Warning: Failed to import WebCrawler even though selenium is installed. This might be due to other missing dependencies."
|
||||
# )
|
||||
# else:
|
||||
# WebCrawler = None
|
||||
# # import warnings
|
||||
# # print("Warning: Synchronous WebCrawler is not available. Install crawl4ai[sync] for synchronous support. However, please note that the synchronous version will be deprecated soon.")
|
||||
|
||||
# Disable all Pydantic warnings
|
||||
warnings.filterwarnings("ignore", module="pydantic")
|
||||
# pydantic_warnings.filter_warnings()
|
||||
3
crawl4ai/__version__.py
Normal file
3
crawl4ai/__version__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
# crawl4ai/_version.py
|
||||
__version__ = "0.6.3"
|
||||
|
||||
1461
crawl4ai/async_configs.py
Normal file
1461
crawl4ai/async_configs.py
Normal file
File diff suppressed because it is too large
Load Diff
2181
crawl4ai/async_crawler_strategy.py
Normal file
2181
crawl4ai/async_crawler_strategy.py
Normal file
File diff suppressed because it is too large
Load Diff
564
crawl4ai/async_database.py
Normal file
564
crawl4ai/async_database.py
Normal file
@@ -0,0 +1,564 @@
|
||||
import os
|
||||
from pathlib import Path
|
||||
import aiosqlite
|
||||
import asyncio
|
||||
from typing import Optional, Dict
|
||||
from contextlib import asynccontextmanager
|
||||
import json
|
||||
from .models import CrawlResult, MarkdownGenerationResult, StringCompatibleMarkdown
|
||||
import aiofiles
|
||||
from .async_logger import AsyncLogger
|
||||
|
||||
from .utils import ensure_content_dirs, generate_content_hash
|
||||
from .utils import VersionManager
|
||||
from .utils import get_error_context, create_box_message
|
||||
|
||||
base_directory = DB_PATH = os.path.join(
|
||||
os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()), ".crawl4ai"
|
||||
)
|
||||
os.makedirs(DB_PATH, exist_ok=True)
|
||||
DB_PATH = os.path.join(base_directory, "crawl4ai.db")
|
||||
|
||||
|
||||
class AsyncDatabaseManager:
|
||||
def __init__(self, pool_size: int = 10, max_retries: int = 3):
|
||||
self.db_path = DB_PATH
|
||||
self.content_paths = ensure_content_dirs(os.path.dirname(DB_PATH))
|
||||
self.pool_size = pool_size
|
||||
self.max_retries = max_retries
|
||||
self.connection_pool: Dict[int, aiosqlite.Connection] = {}
|
||||
self.pool_lock = asyncio.Lock()
|
||||
self.init_lock = asyncio.Lock()
|
||||
self.connection_semaphore = asyncio.Semaphore(pool_size)
|
||||
self._initialized = False
|
||||
self.version_manager = VersionManager()
|
||||
self.logger = AsyncLogger(
|
||||
log_file=os.path.join(base_directory, ".crawl4ai", "crawler_db.log"),
|
||||
verbose=False,
|
||||
tag_width=10,
|
||||
)
|
||||
|
||||
async def initialize(self):
|
||||
"""Initialize the database and connection pool"""
|
||||
try:
|
||||
self.logger.info("Initializing database", tag="INIT")
|
||||
# Ensure the database file exists
|
||||
os.makedirs(os.path.dirname(self.db_path), exist_ok=True)
|
||||
|
||||
# Check if version update is needed
|
||||
needs_update = self.version_manager.needs_update()
|
||||
|
||||
# Always ensure base table exists
|
||||
await self.ainit_db()
|
||||
|
||||
# Verify the table exists
|
||||
async with aiosqlite.connect(self.db_path, timeout=30.0) as db:
|
||||
async with db.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='crawled_data'"
|
||||
) as cursor:
|
||||
result = await cursor.fetchone()
|
||||
if not result:
|
||||
raise Exception("crawled_data table was not created")
|
||||
|
||||
# If version changed or fresh install, run updates
|
||||
if needs_update:
|
||||
self.logger.info("New version detected, running updates", tag="INIT")
|
||||
await self.update_db_schema()
|
||||
from .migrations import (
|
||||
run_migration,
|
||||
) # Import here to avoid circular imports
|
||||
|
||||
await run_migration()
|
||||
self.version_manager.update_version() # Update stored version after successful migration
|
||||
self.logger.success(
|
||||
"Version update completed successfully", tag="COMPLETE"
|
||||
)
|
||||
else:
|
||||
self.logger.success(
|
||||
"Database initialization completed successfully", tag="COMPLETE"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(
|
||||
message="Database initialization error: {error}",
|
||||
tag="ERROR",
|
||||
params={"error": str(e)},
|
||||
)
|
||||
self.logger.info(
|
||||
message="Database will be initialized on first use", tag="INIT"
|
||||
)
|
||||
|
||||
raise
|
||||
|
||||
async def cleanup(self):
|
||||
"""Cleanup connections when shutting down"""
|
||||
async with self.pool_lock:
|
||||
for conn in self.connection_pool.values():
|
||||
await conn.close()
|
||||
self.connection_pool.clear()
|
||||
|
||||
@asynccontextmanager
|
||||
async def get_connection(self):
|
||||
"""Connection pool manager with enhanced error handling"""
|
||||
if not self._initialized:
|
||||
async with self.init_lock:
|
||||
if not self._initialized:
|
||||
try:
|
||||
await self.initialize()
|
||||
self._initialized = True
|
||||
except Exception as e:
|
||||
import sys
|
||||
|
||||
error_context = get_error_context(sys.exc_info())
|
||||
self.logger.error(
|
||||
message="Database initialization failed:\n{error}\n\nContext:\n{context}\n\nTraceback:\n{traceback}",
|
||||
tag="ERROR",
|
||||
force_verbose=True,
|
||||
params={
|
||||
"error": str(e),
|
||||
"context": error_context["code_context"],
|
||||
"traceback": error_context["full_traceback"],
|
||||
},
|
||||
)
|
||||
raise
|
||||
|
||||
await self.connection_semaphore.acquire()
|
||||
task_id = id(asyncio.current_task())
|
||||
|
||||
try:
|
||||
async with self.pool_lock:
|
||||
if task_id not in self.connection_pool:
|
||||
try:
|
||||
conn = await aiosqlite.connect(self.db_path, timeout=30.0)
|
||||
await conn.execute("PRAGMA journal_mode = WAL")
|
||||
await conn.execute("PRAGMA busy_timeout = 5000")
|
||||
|
||||
# Verify database structure
|
||||
async with conn.execute(
|
||||
"PRAGMA table_info(crawled_data)"
|
||||
) as cursor:
|
||||
columns = await cursor.fetchall()
|
||||
column_names = [col[1] for col in columns]
|
||||
expected_columns = {
|
||||
"url",
|
||||
"html",
|
||||
"cleaned_html",
|
||||
"markdown",
|
||||
"extracted_content",
|
||||
"success",
|
||||
"media",
|
||||
"links",
|
||||
"metadata",
|
||||
"screenshot",
|
||||
"response_headers",
|
||||
"downloaded_files",
|
||||
}
|
||||
missing_columns = expected_columns - set(column_names)
|
||||
if missing_columns:
|
||||
raise ValueError(
|
||||
f"Database missing columns: {missing_columns}"
|
||||
)
|
||||
|
||||
self.connection_pool[task_id] = conn
|
||||
except Exception as e:
|
||||
import sys
|
||||
|
||||
error_context = get_error_context(sys.exc_info())
|
||||
error_message = (
|
||||
f"Unexpected error in db get_connection at line {error_context['line_no']} "
|
||||
f"in {error_context['function']} ({error_context['filename']}):\n"
|
||||
f"Error: {str(e)}\n\n"
|
||||
f"Code context:\n{error_context['code_context']}"
|
||||
)
|
||||
self.logger.error(
|
||||
message="{error}",
|
||||
tag="ERROR",
|
||||
params={"error": str(error_message)},
|
||||
boxes=["error"],
|
||||
)
|
||||
|
||||
raise
|
||||
|
||||
yield self.connection_pool[task_id]
|
||||
|
||||
except Exception as e:
|
||||
import sys
|
||||
|
||||
error_context = get_error_context(sys.exc_info())
|
||||
error_message = (
|
||||
f"Unexpected error in db get_connection at line {error_context['line_no']} "
|
||||
f"in {error_context['function']} ({error_context['filename']}):\n"
|
||||
f"Error: {str(e)}\n\n"
|
||||
f"Code context:\n{error_context['code_context']}"
|
||||
)
|
||||
self.logger.error(
|
||||
message="{error}",
|
||||
tag="ERROR",
|
||||
params={"error": str(error_message)},
|
||||
boxes=["error"],
|
||||
)
|
||||
raise
|
||||
finally:
|
||||
async with self.pool_lock:
|
||||
if task_id in self.connection_pool:
|
||||
await self.connection_pool[task_id].close()
|
||||
del self.connection_pool[task_id]
|
||||
self.connection_semaphore.release()
|
||||
|
||||
async def execute_with_retry(self, operation, *args):
|
||||
"""Execute database operations with retry logic"""
|
||||
for attempt in range(self.max_retries):
|
||||
try:
|
||||
async with self.get_connection() as db:
|
||||
result = await operation(db, *args)
|
||||
await db.commit()
|
||||
return result
|
||||
except Exception as e:
|
||||
if attempt == self.max_retries - 1:
|
||||
self.logger.error(
|
||||
message="Operation failed after {retries} attempts: {error}",
|
||||
tag="ERROR",
|
||||
force_verbose=True,
|
||||
params={"retries": self.max_retries, "error": str(e)},
|
||||
)
|
||||
raise
|
||||
await asyncio.sleep(1 * (attempt + 1)) # Exponential backoff
|
||||
|
||||
async def ainit_db(self):
|
||||
"""Initialize database schema"""
|
||||
async with aiosqlite.connect(self.db_path, timeout=30.0) as db:
|
||||
await db.execute(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS crawled_data (
|
||||
url TEXT PRIMARY KEY,
|
||||
html TEXT,
|
||||
cleaned_html TEXT,
|
||||
markdown TEXT,
|
||||
extracted_content TEXT,
|
||||
success BOOLEAN,
|
||||
media TEXT DEFAULT "{}",
|
||||
links TEXT DEFAULT "{}",
|
||||
metadata TEXT DEFAULT "{}",
|
||||
screenshot TEXT DEFAULT "",
|
||||
response_headers TEXT DEFAULT "{}",
|
||||
downloaded_files TEXT DEFAULT "{}" -- New column added
|
||||
)
|
||||
"""
|
||||
)
|
||||
await db.commit()
|
||||
|
||||
async def update_db_schema(self):
|
||||
"""Update database schema if needed"""
|
||||
async with aiosqlite.connect(self.db_path, timeout=30.0) as db:
|
||||
cursor = await db.execute("PRAGMA table_info(crawled_data)")
|
||||
columns = await cursor.fetchall()
|
||||
column_names = [column[1] for column in columns]
|
||||
|
||||
# List of new columns to add
|
||||
new_columns = [
|
||||
"media",
|
||||
"links",
|
||||
"metadata",
|
||||
"screenshot",
|
||||
"response_headers",
|
||||
"downloaded_files",
|
||||
]
|
||||
|
||||
for column in new_columns:
|
||||
if column not in column_names:
|
||||
await self.aalter_db_add_column(column, db)
|
||||
await db.commit()
|
||||
|
||||
async def aalter_db_add_column(self, new_column: str, db):
|
||||
"""Add new column to the database"""
|
||||
if new_column == "response_headers":
|
||||
await db.execute(
|
||||
f'ALTER TABLE crawled_data ADD COLUMN {new_column} TEXT DEFAULT "{{}}"'
|
||||
)
|
||||
else:
|
||||
await db.execute(
|
||||
f'ALTER TABLE crawled_data ADD COLUMN {new_column} TEXT DEFAULT ""'
|
||||
)
|
||||
self.logger.info(
|
||||
message="Added column '{column}' to the database",
|
||||
tag="INIT",
|
||||
params={"column": new_column},
|
||||
)
|
||||
|
||||
async def aget_cached_url(self, url: str) -> Optional[CrawlResult]:
|
||||
"""Retrieve cached URL data as CrawlResult"""
|
||||
|
||||
async def _get(db):
|
||||
async with db.execute(
|
||||
"SELECT * FROM crawled_data WHERE url = ?", (url,)
|
||||
) as cursor:
|
||||
row = await cursor.fetchone()
|
||||
if not row:
|
||||
return None
|
||||
|
||||
# Get column names
|
||||
columns = [description[0] for description in cursor.description]
|
||||
# Create dict from row data
|
||||
row_dict = dict(zip(columns, row))
|
||||
|
||||
# Load content from files using stored hashes
|
||||
content_fields = {
|
||||
"html": row_dict["html"],
|
||||
"cleaned_html": row_dict["cleaned_html"],
|
||||
"markdown": row_dict["markdown"],
|
||||
"extracted_content": row_dict["extracted_content"],
|
||||
"screenshot": row_dict["screenshot"],
|
||||
"screenshots": row_dict["screenshot"],
|
||||
}
|
||||
|
||||
for field, hash_value in content_fields.items():
|
||||
if hash_value:
|
||||
content = await self._load_content(
|
||||
hash_value,
|
||||
field.split("_")[0], # Get content type from field name
|
||||
)
|
||||
row_dict[field] = content or ""
|
||||
else:
|
||||
row_dict[field] = ""
|
||||
|
||||
# Parse JSON fields
|
||||
json_fields = [
|
||||
"media",
|
||||
"links",
|
||||
"metadata",
|
||||
"response_headers",
|
||||
"markdown",
|
||||
]
|
||||
for field in json_fields:
|
||||
try:
|
||||
row_dict[field] = (
|
||||
json.loads(row_dict[field]) if row_dict[field] else {}
|
||||
)
|
||||
except json.JSONDecodeError:
|
||||
# Very UGLY, never mention it to me please
|
||||
if field == "markdown" and isinstance(row_dict[field], str):
|
||||
row_dict[field] = MarkdownGenerationResult(
|
||||
raw_markdown=row_dict[field] or "",
|
||||
markdown_with_citations="",
|
||||
references_markdown="",
|
||||
fit_markdown="",
|
||||
fit_html="",
|
||||
)
|
||||
else:
|
||||
row_dict[field] = {}
|
||||
|
||||
if isinstance(row_dict["markdown"], Dict):
|
||||
if row_dict["markdown"].get("raw_markdown"):
|
||||
row_dict["markdown"] = row_dict["markdown"]["raw_markdown"]
|
||||
|
||||
# Parse downloaded_files
|
||||
try:
|
||||
row_dict["downloaded_files"] = (
|
||||
json.loads(row_dict["downloaded_files"])
|
||||
if row_dict["downloaded_files"]
|
||||
else []
|
||||
)
|
||||
except json.JSONDecodeError:
|
||||
row_dict["downloaded_files"] = []
|
||||
|
||||
# Remove any fields not in CrawlResult model
|
||||
valid_fields = CrawlResult.__annotations__.keys()
|
||||
filtered_dict = {k: v for k, v in row_dict.items() if k in valid_fields}
|
||||
filtered_dict["markdown"] = row_dict["markdown"]
|
||||
return CrawlResult(**filtered_dict)
|
||||
|
||||
try:
|
||||
return await self.execute_with_retry(_get)
|
||||
except Exception as e:
|
||||
self.logger.error(
|
||||
message="Error retrieving cached URL: {error}",
|
||||
tag="ERROR",
|
||||
force_verbose=True,
|
||||
params={"error": str(e)},
|
||||
)
|
||||
return None
|
||||
|
||||
async def acache_url(self, result: CrawlResult):
|
||||
"""Cache CrawlResult data"""
|
||||
# Store content files and get hashes
|
||||
content_map = {
|
||||
"html": (result.html, "html"),
|
||||
"cleaned_html": (result.cleaned_html or "", "cleaned"),
|
||||
"markdown": None,
|
||||
"extracted_content": (result.extracted_content or "", "extracted"),
|
||||
"screenshot": (result.screenshot or "", "screenshots"),
|
||||
}
|
||||
|
||||
try:
|
||||
if isinstance(result.markdown, StringCompatibleMarkdown):
|
||||
content_map["markdown"] = (
|
||||
result.markdown,
|
||||
"markdown",
|
||||
)
|
||||
elif isinstance(result.markdown, MarkdownGenerationResult):
|
||||
content_map["markdown"] = (
|
||||
result.markdown.model_dump_json(),
|
||||
"markdown",
|
||||
)
|
||||
elif isinstance(result.markdown, str):
|
||||
markdown_result = MarkdownGenerationResult(raw_markdown=result.markdown)
|
||||
content_map["markdown"] = (
|
||||
markdown_result.model_dump_json(),
|
||||
"markdown",
|
||||
)
|
||||
else:
|
||||
content_map["markdown"] = (
|
||||
MarkdownGenerationResult().model_dump_json(),
|
||||
"markdown",
|
||||
)
|
||||
except Exception as e:
|
||||
self.logger.warning(
|
||||
message=f"Error processing markdown content: {str(e)}", tag="WARNING"
|
||||
)
|
||||
# Fallback to empty markdown result
|
||||
content_map["markdown"] = (
|
||||
MarkdownGenerationResult().model_dump_json(),
|
||||
"markdown",
|
||||
)
|
||||
|
||||
content_hashes = {}
|
||||
for field, (content, content_type) in content_map.items():
|
||||
content_hashes[field] = await self._store_content(content, content_type)
|
||||
|
||||
async def _cache(db):
|
||||
await db.execute(
|
||||
"""
|
||||
INSERT INTO crawled_data (
|
||||
url, html, cleaned_html, markdown,
|
||||
extracted_content, success, media, links, metadata,
|
||||
screenshot, response_headers, downloaded_files
|
||||
)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
ON CONFLICT(url) DO UPDATE SET
|
||||
html = excluded.html,
|
||||
cleaned_html = excluded.cleaned_html,
|
||||
markdown = excluded.markdown,
|
||||
extracted_content = excluded.extracted_content,
|
||||
success = excluded.success,
|
||||
media = excluded.media,
|
||||
links = excluded.links,
|
||||
metadata = excluded.metadata,
|
||||
screenshot = excluded.screenshot,
|
||||
response_headers = excluded.response_headers,
|
||||
downloaded_files = excluded.downloaded_files
|
||||
""",
|
||||
(
|
||||
result.url,
|
||||
content_hashes["html"],
|
||||
content_hashes["cleaned_html"],
|
||||
content_hashes["markdown"],
|
||||
content_hashes["extracted_content"],
|
||||
result.success,
|
||||
json.dumps(result.media),
|
||||
json.dumps(result.links),
|
||||
json.dumps(result.metadata or {}),
|
||||
content_hashes["screenshot"],
|
||||
json.dumps(result.response_headers or {}),
|
||||
json.dumps(result.downloaded_files or []),
|
||||
),
|
||||
)
|
||||
|
||||
try:
|
||||
await self.execute_with_retry(_cache)
|
||||
except Exception as e:
|
||||
self.logger.error(
|
||||
message="Error caching URL: {error}",
|
||||
tag="ERROR",
|
||||
force_verbose=True,
|
||||
params={"error": str(e)},
|
||||
)
|
||||
|
||||
async def aget_total_count(self) -> int:
|
||||
"""Get total number of cached URLs"""
|
||||
|
||||
async def _count(db):
|
||||
async with db.execute("SELECT COUNT(*) FROM crawled_data") as cursor:
|
||||
result = await cursor.fetchone()
|
||||
return result[0] if result else 0
|
||||
|
||||
try:
|
||||
return await self.execute_with_retry(_count)
|
||||
except Exception as e:
|
||||
self.logger.error(
|
||||
message="Error getting total count: {error}",
|
||||
tag="ERROR",
|
||||
force_verbose=True,
|
||||
params={"error": str(e)},
|
||||
)
|
||||
return 0
|
||||
|
||||
async def aclear_db(self):
|
||||
"""Clear all data from the database"""
|
||||
|
||||
async def _clear(db):
|
||||
await db.execute("DELETE FROM crawled_data")
|
||||
|
||||
try:
|
||||
await self.execute_with_retry(_clear)
|
||||
except Exception as e:
|
||||
self.logger.error(
|
||||
message="Error clearing database: {error}",
|
||||
tag="ERROR",
|
||||
force_verbose=True,
|
||||
params={"error": str(e)},
|
||||
)
|
||||
|
||||
async def aflush_db(self):
|
||||
"""Drop the entire table"""
|
||||
|
||||
async def _flush(db):
|
||||
await db.execute("DROP TABLE IF EXISTS crawled_data")
|
||||
|
||||
try:
|
||||
await self.execute_with_retry(_flush)
|
||||
except Exception as e:
|
||||
self.logger.error(
|
||||
message="Error flushing database: {error}",
|
||||
tag="ERROR",
|
||||
force_verbose=True,
|
||||
params={"error": str(e)},
|
||||
)
|
||||
|
||||
async def _store_content(self, content: str, content_type: str) -> str:
|
||||
"""Store content in filesystem and return hash"""
|
||||
if not content:
|
||||
return ""
|
||||
|
||||
content_hash = generate_content_hash(content)
|
||||
file_path = os.path.join(self.content_paths[content_type], content_hash)
|
||||
|
||||
# Only write if file doesn't exist
|
||||
if not os.path.exists(file_path):
|
||||
async with aiofiles.open(file_path, "w", encoding="utf-8") as f:
|
||||
await f.write(content)
|
||||
|
||||
return content_hash
|
||||
|
||||
async def _load_content(
|
||||
self, content_hash: str, content_type: str
|
||||
) -> Optional[str]:
|
||||
"""Load content from filesystem by hash"""
|
||||
if not content_hash:
|
||||
return None
|
||||
|
||||
file_path = os.path.join(self.content_paths[content_type], content_hash)
|
||||
try:
|
||||
async with aiofiles.open(file_path, "r", encoding="utf-8") as f:
|
||||
return await f.read()
|
||||
except:
|
||||
self.logger.error(
|
||||
message="Failed to load content: {file_path}",
|
||||
tag="ERROR",
|
||||
force_verbose=True,
|
||||
params={"file_path": file_path},
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
# Create a singleton instance
|
||||
async_db_manager = AsyncDatabaseManager()
|
||||
646
crawl4ai/async_dispatcher.py
Normal file
646
crawl4ai/async_dispatcher.py
Normal file
@@ -0,0 +1,646 @@
|
||||
from typing import Dict, Optional, List, Tuple
|
||||
from .async_configs import CrawlerRunConfig
|
||||
from .models import (
|
||||
CrawlResult,
|
||||
CrawlerTaskResult,
|
||||
CrawlStatus,
|
||||
DomainState,
|
||||
)
|
||||
|
||||
from .components.crawler_monitor import CrawlerMonitor
|
||||
|
||||
from .types import AsyncWebCrawler
|
||||
|
||||
from collections.abc import AsyncGenerator
|
||||
|
||||
import time
|
||||
import psutil
|
||||
import asyncio
|
||||
import uuid
|
||||
|
||||
from urllib.parse import urlparse
|
||||
import random
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
|
||||
class RateLimiter:
|
||||
def __init__(
|
||||
self,
|
||||
base_delay: Tuple[float, float] = (1.0, 3.0),
|
||||
max_delay: float = 60.0,
|
||||
max_retries: int = 3,
|
||||
rate_limit_codes: List[int] = None,
|
||||
):
|
||||
self.base_delay = base_delay
|
||||
self.max_delay = max_delay
|
||||
self.max_retries = max_retries
|
||||
self.rate_limit_codes = rate_limit_codes or [429, 503]
|
||||
self.domains: Dict[str, DomainState] = {}
|
||||
|
||||
def get_domain(self, url: str) -> str:
|
||||
return urlparse(url).netloc
|
||||
|
||||
async def wait_if_needed(self, url: str) -> None:
|
||||
domain = self.get_domain(url)
|
||||
state = self.domains.get(domain)
|
||||
|
||||
if not state:
|
||||
self.domains[domain] = DomainState()
|
||||
state = self.domains[domain]
|
||||
|
||||
now = time.time()
|
||||
if state.last_request_time:
|
||||
wait_time = max(0, state.current_delay - (now - state.last_request_time))
|
||||
if wait_time > 0:
|
||||
await asyncio.sleep(wait_time)
|
||||
|
||||
# Random delay within base range if no current delay
|
||||
if state.current_delay == 0:
|
||||
state.current_delay = random.uniform(*self.base_delay)
|
||||
|
||||
state.last_request_time = time.time()
|
||||
|
||||
def update_delay(self, url: str, status_code: int) -> bool:
|
||||
domain = self.get_domain(url)
|
||||
state = self.domains[domain]
|
||||
|
||||
if status_code in self.rate_limit_codes:
|
||||
state.fail_count += 1
|
||||
if state.fail_count > self.max_retries:
|
||||
return False
|
||||
|
||||
# Exponential backoff with random jitter
|
||||
state.current_delay = min(
|
||||
state.current_delay * 2 * random.uniform(0.75, 1.25), self.max_delay
|
||||
)
|
||||
else:
|
||||
# Gradually reduce delay on success
|
||||
state.current_delay = max(
|
||||
random.uniform(*self.base_delay), state.current_delay * 0.75
|
||||
)
|
||||
state.fail_count = 0
|
||||
|
||||
return True
|
||||
|
||||
|
||||
|
||||
class BaseDispatcher(ABC):
|
||||
def __init__(
|
||||
self,
|
||||
rate_limiter: Optional[RateLimiter] = None,
|
||||
monitor: Optional[CrawlerMonitor] = None,
|
||||
):
|
||||
self.crawler = None
|
||||
self._domain_last_hit: Dict[str, float] = {}
|
||||
self.concurrent_sessions = 0
|
||||
self.rate_limiter = rate_limiter
|
||||
self.monitor = monitor
|
||||
|
||||
@abstractmethod
|
||||
async def crawl_url(
|
||||
self,
|
||||
url: str,
|
||||
config: CrawlerRunConfig,
|
||||
task_id: str,
|
||||
monitor: Optional[CrawlerMonitor] = None,
|
||||
) -> CrawlerTaskResult:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def run_urls(
|
||||
self,
|
||||
urls: List[str],
|
||||
crawler: AsyncWebCrawler, # noqa: F821
|
||||
config: CrawlerRunConfig,
|
||||
monitor: Optional[CrawlerMonitor] = None,
|
||||
) -> List[CrawlerTaskResult]:
|
||||
pass
|
||||
|
||||
|
||||
class MemoryAdaptiveDispatcher(BaseDispatcher):
|
||||
def __init__(
|
||||
self,
|
||||
memory_threshold_percent: float = 90.0,
|
||||
critical_threshold_percent: float = 95.0, # New critical threshold
|
||||
recovery_threshold_percent: float = 85.0, # New recovery threshold
|
||||
check_interval: float = 1.0,
|
||||
max_session_permit: int = 20,
|
||||
fairness_timeout: float = 600.0, # 10 minutes before prioritizing long-waiting URLs
|
||||
rate_limiter: Optional[RateLimiter] = None,
|
||||
monitor: Optional[CrawlerMonitor] = None,
|
||||
):
|
||||
super().__init__(rate_limiter, monitor)
|
||||
self.memory_threshold_percent = memory_threshold_percent
|
||||
self.critical_threshold_percent = critical_threshold_percent
|
||||
self.recovery_threshold_percent = recovery_threshold_percent
|
||||
self.check_interval = check_interval
|
||||
self.max_session_permit = max_session_permit
|
||||
self.fairness_timeout = fairness_timeout
|
||||
self.result_queue = asyncio.Queue()
|
||||
self.task_queue = asyncio.PriorityQueue() # Priority queue for better management
|
||||
self.memory_pressure_mode = False # Flag to indicate when we're in memory pressure mode
|
||||
self.current_memory_percent = 0.0 # Track current memory usage
|
||||
|
||||
async def _memory_monitor_task(self):
|
||||
"""Background task to continuously monitor memory usage and update state"""
|
||||
while True:
|
||||
self.current_memory_percent = psutil.virtual_memory().percent
|
||||
|
||||
# Enter memory pressure mode if we cross the threshold
|
||||
if not self.memory_pressure_mode and self.current_memory_percent >= self.memory_threshold_percent:
|
||||
self.memory_pressure_mode = True
|
||||
if self.monitor:
|
||||
self.monitor.update_memory_status("PRESSURE")
|
||||
|
||||
# Exit memory pressure mode if we go below recovery threshold
|
||||
elif self.memory_pressure_mode and self.current_memory_percent <= self.recovery_threshold_percent:
|
||||
self.memory_pressure_mode = False
|
||||
if self.monitor:
|
||||
self.monitor.update_memory_status("NORMAL")
|
||||
|
||||
# In critical mode, we might need to take more drastic action
|
||||
if self.current_memory_percent >= self.critical_threshold_percent:
|
||||
if self.monitor:
|
||||
self.monitor.update_memory_status("CRITICAL")
|
||||
# We could implement additional memory-saving measures here
|
||||
|
||||
await asyncio.sleep(self.check_interval)
|
||||
|
||||
def _get_priority_score(self, wait_time: float, retry_count: int) -> float:
|
||||
"""Calculate priority score (lower is higher priority)
|
||||
- URLs waiting longer than fairness_timeout get higher priority
|
||||
- More retry attempts decreases priority
|
||||
"""
|
||||
if wait_time > self.fairness_timeout:
|
||||
# High priority for long-waiting URLs
|
||||
return -wait_time
|
||||
# Standard priority based on retries
|
||||
return retry_count
|
||||
|
||||
async def crawl_url(
|
||||
self,
|
||||
url: str,
|
||||
config: CrawlerRunConfig,
|
||||
task_id: str,
|
||||
retry_count: int = 0,
|
||||
) -> CrawlerTaskResult:
|
||||
start_time = time.time()
|
||||
error_message = ""
|
||||
memory_usage = peak_memory = 0.0
|
||||
|
||||
# Get starting memory for accurate measurement
|
||||
process = psutil.Process()
|
||||
start_memory = process.memory_info().rss / (1024 * 1024)
|
||||
|
||||
try:
|
||||
if self.monitor:
|
||||
self.monitor.update_task(
|
||||
task_id,
|
||||
status=CrawlStatus.IN_PROGRESS,
|
||||
start_time=start_time,
|
||||
retry_count=retry_count
|
||||
)
|
||||
|
||||
self.concurrent_sessions += 1
|
||||
|
||||
if self.rate_limiter:
|
||||
await self.rate_limiter.wait_if_needed(url)
|
||||
|
||||
# Check if we're in critical memory state
|
||||
if self.current_memory_percent >= self.critical_threshold_percent:
|
||||
# Requeue this task with increased priority and retry count
|
||||
enqueue_time = time.time()
|
||||
priority = self._get_priority_score(enqueue_time - start_time, retry_count + 1)
|
||||
await self.task_queue.put((priority, (url, task_id, retry_count + 1, enqueue_time)))
|
||||
|
||||
# Update monitoring
|
||||
if self.monitor:
|
||||
self.monitor.update_task(
|
||||
task_id,
|
||||
status=CrawlStatus.QUEUED,
|
||||
error_message="Requeued due to critical memory pressure"
|
||||
)
|
||||
|
||||
# Return placeholder result with requeued status
|
||||
return CrawlerTaskResult(
|
||||
task_id=task_id,
|
||||
url=url,
|
||||
result=CrawlResult(
|
||||
url=url, html="", metadata={"status": "requeued"},
|
||||
success=False, error_message="Requeued due to critical memory pressure"
|
||||
),
|
||||
memory_usage=0,
|
||||
peak_memory=0,
|
||||
start_time=start_time,
|
||||
end_time=time.time(),
|
||||
error_message="Requeued due to critical memory pressure",
|
||||
retry_count=retry_count + 1
|
||||
)
|
||||
|
||||
# Execute the crawl
|
||||
result = await self.crawler.arun(url, config=config, session_id=task_id)
|
||||
|
||||
# Measure memory usage
|
||||
end_memory = process.memory_info().rss / (1024 * 1024)
|
||||
memory_usage = peak_memory = end_memory - start_memory
|
||||
|
||||
# Handle rate limiting
|
||||
if self.rate_limiter and result.status_code:
|
||||
if not self.rate_limiter.update_delay(url, result.status_code):
|
||||
error_message = f"Rate limit retry count exceeded for domain {urlparse(url).netloc}"
|
||||
if self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
|
||||
|
||||
# Update status based on result
|
||||
if not result.success:
|
||||
error_message = result.error_message
|
||||
if self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
|
||||
elif self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.COMPLETED)
|
||||
|
||||
except Exception as e:
|
||||
error_message = str(e)
|
||||
if self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
|
||||
result = CrawlResult(
|
||||
url=url, html="", metadata={}, success=False, error_message=str(e)
|
||||
)
|
||||
|
||||
finally:
|
||||
end_time = time.time()
|
||||
if self.monitor:
|
||||
self.monitor.update_task(
|
||||
task_id,
|
||||
end_time=end_time,
|
||||
memory_usage=memory_usage,
|
||||
peak_memory=peak_memory,
|
||||
error_message=error_message,
|
||||
retry_count=retry_count
|
||||
)
|
||||
self.concurrent_sessions -= 1
|
||||
|
||||
return CrawlerTaskResult(
|
||||
task_id=task_id,
|
||||
url=url,
|
||||
result=result,
|
||||
memory_usage=memory_usage,
|
||||
peak_memory=peak_memory,
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
error_message=error_message,
|
||||
retry_count=retry_count
|
||||
)
|
||||
|
||||
async def run_urls(
|
||||
self,
|
||||
urls: List[str],
|
||||
crawler: AsyncWebCrawler,
|
||||
config: CrawlerRunConfig,
|
||||
) -> List[CrawlerTaskResult]:
|
||||
self.crawler = crawler
|
||||
|
||||
# Start the memory monitor task
|
||||
memory_monitor = asyncio.create_task(self._memory_monitor_task())
|
||||
|
||||
if self.monitor:
|
||||
self.monitor.start()
|
||||
|
||||
results = []
|
||||
|
||||
try:
|
||||
# Initialize task queue
|
||||
for url in urls:
|
||||
task_id = str(uuid.uuid4())
|
||||
if self.monitor:
|
||||
self.monitor.add_task(task_id, url)
|
||||
# Add to queue with initial priority 0, retry count 0, and current time
|
||||
await self.task_queue.put((0, (url, task_id, 0, time.time())))
|
||||
|
||||
active_tasks = []
|
||||
|
||||
# Process until both queues are empty
|
||||
while not self.task_queue.empty() or active_tasks:
|
||||
# If memory pressure is low, start new tasks
|
||||
if not self.memory_pressure_mode and len(active_tasks) < self.max_session_permit:
|
||||
try:
|
||||
# Try to get a task with timeout to avoid blocking indefinitely
|
||||
priority, (url, task_id, retry_count, enqueue_time) = await asyncio.wait_for(
|
||||
self.task_queue.get(), timeout=0.1
|
||||
)
|
||||
|
||||
# Create and start the task
|
||||
task = asyncio.create_task(
|
||||
self.crawl_url(url, config, task_id, retry_count)
|
||||
)
|
||||
active_tasks.append(task)
|
||||
|
||||
# Update waiting time in monitor
|
||||
if self.monitor:
|
||||
wait_time = time.time() - enqueue_time
|
||||
self.monitor.update_task(
|
||||
task_id,
|
||||
wait_time=wait_time,
|
||||
status=CrawlStatus.IN_PROGRESS
|
||||
)
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
# No tasks in queue, that's fine
|
||||
pass
|
||||
|
||||
# Wait for completion even if queue is starved
|
||||
if active_tasks:
|
||||
done, pending = await asyncio.wait(
|
||||
active_tasks, timeout=0.1, return_when=asyncio.FIRST_COMPLETED
|
||||
)
|
||||
|
||||
# Process completed tasks
|
||||
for completed_task in done:
|
||||
result = await completed_task
|
||||
results.append(result)
|
||||
|
||||
# Update active tasks list
|
||||
active_tasks = list(pending)
|
||||
else:
|
||||
# If no active tasks but still waiting, sleep briefly
|
||||
await asyncio.sleep(self.check_interval / 2)
|
||||
|
||||
# Update priorities for waiting tasks if needed
|
||||
await self._update_queue_priorities()
|
||||
|
||||
return results
|
||||
|
||||
except Exception as e:
|
||||
if self.monitor:
|
||||
self.monitor.update_memory_status(f"QUEUE_ERROR: {str(e)}")
|
||||
|
||||
finally:
|
||||
# Clean up
|
||||
memory_monitor.cancel()
|
||||
if self.monitor:
|
||||
self.monitor.stop()
|
||||
|
||||
async def _update_queue_priorities(self):
|
||||
"""Periodically update priorities of items in the queue to prevent starvation"""
|
||||
# Skip if queue is empty
|
||||
if self.task_queue.empty():
|
||||
return
|
||||
|
||||
# Use a drain-and-refill approach to update all priorities
|
||||
temp_items = []
|
||||
|
||||
# Drain the queue (with a safety timeout to prevent blocking)
|
||||
try:
|
||||
drain_start = time.time()
|
||||
while not self.task_queue.empty() and time.time() - drain_start < 5.0: # 5 second safety timeout
|
||||
try:
|
||||
# Get item from queue with timeout
|
||||
priority, (url, task_id, retry_count, enqueue_time) = await asyncio.wait_for(
|
||||
self.task_queue.get(), timeout=0.1
|
||||
)
|
||||
|
||||
# Calculate new priority based on current wait time
|
||||
current_time = time.time()
|
||||
wait_time = current_time - enqueue_time
|
||||
new_priority = self._get_priority_score(wait_time, retry_count)
|
||||
|
||||
# Store with updated priority
|
||||
temp_items.append((new_priority, (url, task_id, retry_count, enqueue_time)))
|
||||
|
||||
# Update monitoring stats for this task
|
||||
if self.monitor and task_id in self.monitor.stats:
|
||||
self.monitor.update_task(task_id, wait_time=wait_time)
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
# Queue might be empty or very slow
|
||||
break
|
||||
except Exception as e:
|
||||
# If anything goes wrong, make sure we refill the queue with what we've got
|
||||
self.monitor.update_memory_status(f"QUEUE_ERROR: {str(e)}")
|
||||
|
||||
# Calculate queue statistics
|
||||
if temp_items and self.monitor:
|
||||
total_queued = len(temp_items)
|
||||
wait_times = [item[1][3] for item in temp_items]
|
||||
highest_wait_time = time.time() - min(wait_times) if wait_times else 0
|
||||
avg_wait_time = sum(time.time() - t for t in wait_times) / len(wait_times) if wait_times else 0
|
||||
|
||||
# Update queue statistics in monitor
|
||||
self.monitor.update_queue_statistics(
|
||||
total_queued=total_queued,
|
||||
highest_wait_time=highest_wait_time,
|
||||
avg_wait_time=avg_wait_time
|
||||
)
|
||||
|
||||
# Sort by priority (lowest number = highest priority)
|
||||
temp_items.sort(key=lambda x: x[0])
|
||||
|
||||
# Refill the queue with updated priorities
|
||||
for item in temp_items:
|
||||
await self.task_queue.put(item)
|
||||
|
||||
async def run_urls_stream(
|
||||
self,
|
||||
urls: List[str],
|
||||
crawler: AsyncWebCrawler,
|
||||
config: CrawlerRunConfig,
|
||||
) -> AsyncGenerator[CrawlerTaskResult, None]:
|
||||
self.crawler = crawler
|
||||
|
||||
# Start the memory monitor task
|
||||
memory_monitor = asyncio.create_task(self._memory_monitor_task())
|
||||
|
||||
if self.monitor:
|
||||
self.monitor.start()
|
||||
|
||||
try:
|
||||
# Initialize task queue
|
||||
for url in urls:
|
||||
task_id = str(uuid.uuid4())
|
||||
if self.monitor:
|
||||
self.monitor.add_task(task_id, url)
|
||||
# Add to queue with initial priority 0, retry count 0, and current time
|
||||
await self.task_queue.put((0, (url, task_id, 0, time.time())))
|
||||
|
||||
active_tasks = []
|
||||
completed_count = 0
|
||||
total_urls = len(urls)
|
||||
|
||||
while completed_count < total_urls:
|
||||
# If memory pressure is low, start new tasks
|
||||
if not self.memory_pressure_mode and len(active_tasks) < self.max_session_permit:
|
||||
try:
|
||||
# Try to get a task with timeout
|
||||
priority, (url, task_id, retry_count, enqueue_time) = await asyncio.wait_for(
|
||||
self.task_queue.get(), timeout=0.1
|
||||
)
|
||||
|
||||
# Create and start the task
|
||||
task = asyncio.create_task(
|
||||
self.crawl_url(url, config, task_id, retry_count)
|
||||
)
|
||||
active_tasks.append(task)
|
||||
|
||||
# Update waiting time in monitor
|
||||
if self.monitor:
|
||||
wait_time = time.time() - enqueue_time
|
||||
self.monitor.update_task(
|
||||
task_id,
|
||||
wait_time=wait_time,
|
||||
status=CrawlStatus.IN_PROGRESS
|
||||
)
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
# No tasks in queue, that's fine
|
||||
pass
|
||||
|
||||
# Process completed tasks and yield results
|
||||
if active_tasks:
|
||||
done, pending = await asyncio.wait(
|
||||
active_tasks, timeout=0.1, return_when=asyncio.FIRST_COMPLETED
|
||||
)
|
||||
|
||||
for completed_task in done:
|
||||
result = await completed_task
|
||||
|
||||
# Only count as completed if it wasn't requeued
|
||||
if "requeued" not in result.error_message:
|
||||
completed_count += 1
|
||||
yield result
|
||||
|
||||
# Update active tasks list
|
||||
active_tasks = list(pending)
|
||||
else:
|
||||
# If no active tasks but still waiting, sleep briefly
|
||||
await asyncio.sleep(self.check_interval / 2)
|
||||
|
||||
# Update priorities for waiting tasks if needed
|
||||
await self._update_queue_priorities()
|
||||
|
||||
finally:
|
||||
# Clean up
|
||||
memory_monitor.cancel()
|
||||
if self.monitor:
|
||||
self.monitor.stop()
|
||||
|
||||
|
||||
class SemaphoreDispatcher(BaseDispatcher):
|
||||
def __init__(
|
||||
self,
|
||||
semaphore_count: int = 5,
|
||||
max_session_permit: int = 20,
|
||||
rate_limiter: Optional[RateLimiter] = None,
|
||||
monitor: Optional[CrawlerMonitor] = None,
|
||||
):
|
||||
super().__init__(rate_limiter, monitor)
|
||||
self.semaphore_count = semaphore_count
|
||||
self.max_session_permit = max_session_permit
|
||||
|
||||
async def crawl_url(
|
||||
self,
|
||||
url: str,
|
||||
config: CrawlerRunConfig,
|
||||
task_id: str,
|
||||
semaphore: asyncio.Semaphore = None,
|
||||
) -> CrawlerTaskResult:
|
||||
start_time = time.time()
|
||||
error_message = ""
|
||||
memory_usage = peak_memory = 0.0
|
||||
|
||||
try:
|
||||
if self.monitor:
|
||||
self.monitor.update_task(
|
||||
task_id, status=CrawlStatus.IN_PROGRESS, start_time=start_time
|
||||
)
|
||||
|
||||
if self.rate_limiter:
|
||||
await self.rate_limiter.wait_if_needed(url)
|
||||
|
||||
async with semaphore:
|
||||
process = psutil.Process()
|
||||
start_memory = process.memory_info().rss / (1024 * 1024)
|
||||
result = await self.crawler.arun(url, config=config, session_id=task_id)
|
||||
end_memory = process.memory_info().rss / (1024 * 1024)
|
||||
|
||||
memory_usage = peak_memory = end_memory - start_memory
|
||||
|
||||
if self.rate_limiter and result.status_code:
|
||||
if not self.rate_limiter.update_delay(url, result.status_code):
|
||||
error_message = f"Rate limit retry count exceeded for domain {urlparse(url).netloc}"
|
||||
if self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
|
||||
return CrawlerTaskResult(
|
||||
task_id=task_id,
|
||||
url=url,
|
||||
result=result,
|
||||
memory_usage=memory_usage,
|
||||
peak_memory=peak_memory,
|
||||
start_time=start_time,
|
||||
end_time=time.time(),
|
||||
error_message=error_message,
|
||||
)
|
||||
|
||||
if not result.success:
|
||||
error_message = result.error_message
|
||||
if self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
|
||||
elif self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.COMPLETED)
|
||||
|
||||
except Exception as e:
|
||||
error_message = str(e)
|
||||
if self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
|
||||
result = CrawlResult(
|
||||
url=url, html="", metadata={}, success=False, error_message=str(e)
|
||||
)
|
||||
|
||||
finally:
|
||||
end_time = time.time()
|
||||
if self.monitor:
|
||||
self.monitor.update_task(
|
||||
task_id,
|
||||
end_time=end_time,
|
||||
memory_usage=memory_usage,
|
||||
peak_memory=peak_memory,
|
||||
error_message=error_message,
|
||||
)
|
||||
|
||||
return CrawlerTaskResult(
|
||||
task_id=task_id,
|
||||
url=url,
|
||||
result=result,
|
||||
memory_usage=memory_usage,
|
||||
peak_memory=peak_memory,
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
error_message=error_message,
|
||||
)
|
||||
|
||||
async def run_urls(
|
||||
self,
|
||||
crawler: AsyncWebCrawler, # noqa: F821
|
||||
urls: List[str],
|
||||
config: CrawlerRunConfig,
|
||||
) -> List[CrawlerTaskResult]:
|
||||
self.crawler = crawler
|
||||
if self.monitor:
|
||||
self.monitor.start()
|
||||
|
||||
try:
|
||||
semaphore = asyncio.Semaphore(self.semaphore_count)
|
||||
tasks = []
|
||||
|
||||
for url in urls:
|
||||
task_id = str(uuid.uuid4())
|
||||
if self.monitor:
|
||||
self.monitor.add_task(task_id, url)
|
||||
task = asyncio.create_task(
|
||||
self.crawl_url(url, config, task_id, semaphore)
|
||||
)
|
||||
tasks.append(task)
|
||||
|
||||
return await asyncio.gather(*tasks, return_exceptions=True)
|
||||
finally:
|
||||
if self.monitor:
|
||||
self.monitor.stop()
|
||||
373
crawl4ai/async_logger.py
Normal file
373
crawl4ai/async_logger.py
Normal file
@@ -0,0 +1,373 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from enum import Enum
|
||||
from typing import Optional, Dict, Any, List
|
||||
import os
|
||||
from datetime import datetime
|
||||
from urllib.parse import unquote
|
||||
from rich.console import Console
|
||||
from rich.text import Text
|
||||
from .utils import create_box_message
|
||||
|
||||
|
||||
class LogLevel(Enum):
|
||||
DEFAULT = 0
|
||||
DEBUG = 1
|
||||
INFO = 2
|
||||
SUCCESS = 3
|
||||
WARNING = 4
|
||||
ERROR = 5
|
||||
CRITICAL = 6
|
||||
ALERT = 7
|
||||
NOTICE = 8
|
||||
EXCEPTION = 9
|
||||
FATAL = 10
|
||||
|
||||
|
||||
def __str__(self):
|
||||
return self.name.lower()
|
||||
|
||||
class LogColor(str, Enum):
|
||||
"""Enum for log colors."""
|
||||
|
||||
DEBUG = "bright_black"
|
||||
INFO = "cyan"
|
||||
SUCCESS = "green"
|
||||
WARNING = "yellow"
|
||||
ERROR = "red"
|
||||
CYAN = "cyan"
|
||||
GREEN = "green"
|
||||
YELLOW = "yellow"
|
||||
MAGENTA = "magenta"
|
||||
DIM_MAGENTA = "dim magenta"
|
||||
|
||||
def __str__(self):
|
||||
"""Automatically convert rich color to string."""
|
||||
return self.value
|
||||
|
||||
|
||||
class AsyncLoggerBase(ABC):
|
||||
@abstractmethod
|
||||
def debug(self, message: str, tag: str = "DEBUG", **kwargs):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def info(self, message: str, tag: str = "INFO", **kwargs):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def success(self, message: str, tag: str = "SUCCESS", **kwargs):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def warning(self, message: str, tag: str = "WARNING", **kwargs):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def error(self, message: str, tag: str = "ERROR", **kwargs):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def url_status(self, url: str, success: bool, timing: float, tag: str = "FETCH", url_length: int = 100):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def error_status(self, url: str, error: str, tag: str = "ERROR", url_length: int = 100):
|
||||
pass
|
||||
|
||||
|
||||
class AsyncLogger(AsyncLoggerBase):
|
||||
"""
|
||||
Asynchronous logger with support for colored console output and file logging.
|
||||
Supports templated messages with colored components.
|
||||
"""
|
||||
|
||||
DEFAULT_ICONS = {
|
||||
"INIT": "→",
|
||||
"READY": "✓",
|
||||
"FETCH": "↓",
|
||||
"SCRAPE": "◆",
|
||||
"EXTRACT": "■",
|
||||
"COMPLETE": "●",
|
||||
"ERROR": "×",
|
||||
"DEBUG": "⋯",
|
||||
"INFO": "ℹ",
|
||||
"WARNING": "⚠",
|
||||
"SUCCESS": "✔",
|
||||
"CRITICAL": "‼",
|
||||
"ALERT": "⚡",
|
||||
"NOTICE": "ℹ",
|
||||
"EXCEPTION": "❗",
|
||||
"FATAL": "☠",
|
||||
"DEFAULT": "•",
|
||||
}
|
||||
|
||||
DEFAULT_COLORS = {
|
||||
LogLevel.DEBUG: LogColor.DEBUG,
|
||||
LogLevel.INFO: LogColor.INFO,
|
||||
LogLevel.SUCCESS: LogColor.SUCCESS,
|
||||
LogLevel.WARNING: LogColor.WARNING,
|
||||
LogLevel.ERROR: LogColor.ERROR,
|
||||
}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
log_file: Optional[str] = None,
|
||||
log_level: LogLevel = LogLevel.DEBUG,
|
||||
tag_width: int = 10,
|
||||
icons: Optional[Dict[str, str]] = None,
|
||||
colors: Optional[Dict[LogLevel, LogColor]] = None,
|
||||
verbose: bool = True,
|
||||
):
|
||||
"""
|
||||
Initialize the logger.
|
||||
|
||||
Args:
|
||||
log_file: Optional file path for logging
|
||||
log_level: Minimum log level to display
|
||||
tag_width: Width for tag formatting
|
||||
icons: Custom icons for different tags
|
||||
colors: Custom colors for different log levels
|
||||
verbose: Whether to output to console
|
||||
"""
|
||||
self.log_file = log_file
|
||||
self.log_level = log_level
|
||||
self.tag_width = tag_width
|
||||
self.icons = icons or self.DEFAULT_ICONS
|
||||
self.colors = colors or self.DEFAULT_COLORS
|
||||
self.verbose = verbose
|
||||
self.console = Console()
|
||||
|
||||
# Create log file directory if needed
|
||||
if log_file:
|
||||
os.makedirs(os.path.dirname(os.path.abspath(log_file)), exist_ok=True)
|
||||
|
||||
def _format_tag(self, tag: str) -> str:
|
||||
"""Format a tag with consistent width."""
|
||||
return f"[{tag}]".ljust(self.tag_width, ".")
|
||||
|
||||
def _get_icon(self, tag: str) -> str:
|
||||
"""Get the icon for a tag, defaulting to info icon if not found."""
|
||||
return self.icons.get(tag, self.icons["INFO"])
|
||||
|
||||
def _shorten(self, text, length, placeholder="..."):
|
||||
"""Truncate text in the middle if longer than length, or pad if shorter."""
|
||||
if len(text) <= length:
|
||||
return text.ljust(length) # Pad with spaces to reach desired length
|
||||
half = (length - len(placeholder)) // 2
|
||||
shortened = text[:half] + placeholder + text[-half:]
|
||||
return shortened.ljust(length) # Also pad shortened text to consistent length
|
||||
|
||||
def _write_to_file(self, message: str):
|
||||
"""Write a message to the log file if configured."""
|
||||
if self.log_file:
|
||||
text = Text.from_markup(message)
|
||||
plain_text = text.plain
|
||||
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
|
||||
with open(self.log_file, "a", encoding="utf-8") as f:
|
||||
f.write(f"[{timestamp}] {plain_text}\n")
|
||||
|
||||
def _log(
|
||||
self,
|
||||
level: LogLevel,
|
||||
message: str,
|
||||
tag: str,
|
||||
params: Optional[Dict[str, Any]] = None,
|
||||
colors: Optional[Dict[str, LogColor]] = None,
|
||||
boxes: Optional[List[str]] = None,
|
||||
base_color: Optional[LogColor] = None,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Core logging method that handles message formatting and output.
|
||||
|
||||
Args:
|
||||
level: Log level for this message
|
||||
message: Message template string
|
||||
tag: Tag for the message
|
||||
params: Parameters to format into the message
|
||||
colors: Color overrides for specific parameters
|
||||
boxes: Box overrides for specific parameters
|
||||
base_color: Base color for the entire message
|
||||
"""
|
||||
if level.value < self.log_level.value:
|
||||
return
|
||||
|
||||
# avoid conflict with rich formatting
|
||||
parsed_message = message.replace("[", "[[").replace("]", "]]")
|
||||
if params:
|
||||
# FIXME: If there are formatting strings in floating point format,
|
||||
# this may result in colors and boxes not being applied properly.
|
||||
# such as {value:.2f}, the value is 0.23333 format it to 0.23,
|
||||
# but we replace("0.23333", "[color]0.23333[/color]")
|
||||
formatted_message = parsed_message.format(**params)
|
||||
for key, value in params.items():
|
||||
# value_str may discard `[` and `]`, so we need to replace it.
|
||||
value_str = str(value).replace("[", "[[").replace("]", "]]")
|
||||
# check is need apply color
|
||||
if colors and key in colors:
|
||||
color_str = f"[{colors[key]}]{value_str}[/{colors[key]}]"
|
||||
formatted_message = formatted_message.replace(value_str, color_str)
|
||||
value_str = color_str
|
||||
|
||||
# check is need apply box
|
||||
if boxes and key in boxes:
|
||||
formatted_message = formatted_message.replace(value_str,
|
||||
create_box_message(value_str, type=str(level)))
|
||||
|
||||
else:
|
||||
formatted_message = parsed_message
|
||||
|
||||
# Construct the full log line
|
||||
color: LogColor = base_color or self.colors[level]
|
||||
log_line = f"[{color}]{self._format_tag(tag)} {self._get_icon(tag)} {formatted_message} [/{color}]"
|
||||
|
||||
# Output to console if verbose
|
||||
if self.verbose or kwargs.get("force_verbose", False):
|
||||
self.console.print(log_line)
|
||||
|
||||
# Write to file if configured
|
||||
self._write_to_file(log_line)
|
||||
|
||||
def debug(self, message: str, tag: str = "DEBUG", **kwargs):
|
||||
"""Log a debug message."""
|
||||
self._log(LogLevel.DEBUG, message, tag, **kwargs)
|
||||
|
||||
def info(self, message: str, tag: str = "INFO", **kwargs):
|
||||
"""Log an info message."""
|
||||
self._log(LogLevel.INFO, message, tag, **kwargs)
|
||||
|
||||
def success(self, message: str, tag: str = "SUCCESS", **kwargs):
|
||||
"""Log a success message."""
|
||||
self._log(LogLevel.SUCCESS, message, tag, **kwargs)
|
||||
|
||||
def warning(self, message: str, tag: str = "WARNING", **kwargs):
|
||||
"""Log a warning message."""
|
||||
self._log(LogLevel.WARNING, message, tag, **kwargs)
|
||||
|
||||
def critical(self, message: str, tag: str = "CRITICAL", **kwargs):
|
||||
"""Log a critical message."""
|
||||
self._log(LogLevel.ERROR, message, tag, **kwargs)
|
||||
def exception(self, message: str, tag: str = "EXCEPTION", **kwargs):
|
||||
"""Log an exception message."""
|
||||
self._log(LogLevel.ERROR, message, tag, **kwargs)
|
||||
def fatal(self, message: str, tag: str = "FATAL", **kwargs):
|
||||
"""Log a fatal message."""
|
||||
self._log(LogLevel.ERROR, message, tag, **kwargs)
|
||||
def alert(self, message: str, tag: str = "ALERT", **kwargs):
|
||||
"""Log an alert message."""
|
||||
self._log(LogLevel.ERROR, message, tag, **kwargs)
|
||||
def notice(self, message: str, tag: str = "NOTICE", **kwargs):
|
||||
"""Log a notice message."""
|
||||
self._log(LogLevel.INFO, message, tag, **kwargs)
|
||||
|
||||
def error(self, message: str, tag: str = "ERROR", **kwargs):
|
||||
"""Log an error message."""
|
||||
self._log(LogLevel.ERROR, message, tag, **kwargs)
|
||||
|
||||
def url_status(
|
||||
self,
|
||||
url: str,
|
||||
success: bool,
|
||||
timing: float,
|
||||
tag: str = "FETCH",
|
||||
url_length: int = 100,
|
||||
):
|
||||
"""
|
||||
Convenience method for logging URL fetch status.
|
||||
|
||||
Args:
|
||||
url: The URL being processed
|
||||
success: Whether the operation was successful
|
||||
timing: Time taken for the operation
|
||||
tag: Tag for the message
|
||||
url_length: Maximum length for URL in log
|
||||
"""
|
||||
decoded_url = unquote(url)
|
||||
readable_url = self._shorten(decoded_url, url_length)
|
||||
self._log(
|
||||
level=LogLevel.SUCCESS if success else LogLevel.ERROR,
|
||||
message="{url} | {status} | ⏱: {timing:.2f}s",
|
||||
tag=tag,
|
||||
params={
|
||||
"url": readable_url,
|
||||
"status": "✓" if success else "✗",
|
||||
"timing": timing,
|
||||
},
|
||||
colors={
|
||||
"status": LogColor.SUCCESS if success else LogColor.ERROR,
|
||||
"timing": LogColor.WARNING,
|
||||
},
|
||||
)
|
||||
|
||||
def error_status(
|
||||
self, url: str, error: str, tag: str = "ERROR", url_length: int = 50
|
||||
):
|
||||
"""
|
||||
Convenience method for logging error status.
|
||||
|
||||
Args:
|
||||
url: The URL being processed
|
||||
error: Error message
|
||||
tag: Tag for the message
|
||||
url_length: Maximum length for URL in log
|
||||
"""
|
||||
decoded_url = unquote(url)
|
||||
readable_url = self._shorten(decoded_url, url_length)
|
||||
self._log(
|
||||
level=LogLevel.ERROR,
|
||||
message="{url} | Error: {error}",
|
||||
tag=tag,
|
||||
params={"url": readable_url, "error": error},
|
||||
)
|
||||
|
||||
class AsyncFileLogger(AsyncLoggerBase):
|
||||
"""
|
||||
File-only asynchronous logger that writes logs to a specified file.
|
||||
"""
|
||||
|
||||
def __init__(self, log_file: str):
|
||||
"""
|
||||
Initialize the file logger.
|
||||
|
||||
Args:
|
||||
log_file: File path for logging
|
||||
"""
|
||||
self.log_file = log_file
|
||||
os.makedirs(os.path.dirname(os.path.abspath(log_file)), exist_ok=True)
|
||||
|
||||
def _write_to_file(self, level: str, message: str, tag: str):
|
||||
"""Write a message to the log file."""
|
||||
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
|
||||
with open(self.log_file, "a", encoding="utf-8") as f:
|
||||
f.write(f"[{timestamp}] [{level}] [{tag}] {message}\n")
|
||||
|
||||
def debug(self, message: str, tag: str = "DEBUG", **kwargs):
|
||||
"""Log a debug message to file."""
|
||||
self._write_to_file("DEBUG", message, tag)
|
||||
|
||||
def info(self, message: str, tag: str = "INFO", **kwargs):
|
||||
"""Log an info message to file."""
|
||||
self._write_to_file("INFO", message, tag)
|
||||
|
||||
def success(self, message: str, tag: str = "SUCCESS", **kwargs):
|
||||
"""Log a success message to file."""
|
||||
self._write_to_file("SUCCESS", message, tag)
|
||||
|
||||
def warning(self, message: str, tag: str = "WARNING", **kwargs):
|
||||
"""Log a warning message to file."""
|
||||
self._write_to_file("WARNING", message, tag)
|
||||
|
||||
def error(self, message: str, tag: str = "ERROR", **kwargs):
|
||||
"""Log an error message to file."""
|
||||
self._write_to_file("ERROR", message, tag)
|
||||
|
||||
def url_status(self, url: str, success: bool, timing: float, tag: str = "FETCH", url_length: int = 100):
|
||||
"""Log URL fetch status to file."""
|
||||
status = "SUCCESS" if success else "FAILED"
|
||||
message = f"{url[:url_length]}... | Status: {status} | Time: {timing:.2f}s"
|
||||
self._write_to_file("URL_STATUS", message, tag)
|
||||
|
||||
def error_status(self, url: str, error: str, tag: str = "ERROR", url_length: int = 100):
|
||||
"""Log error status to file."""
|
||||
message = f"{url[:url_length]}... | Error: {error}"
|
||||
self._write_to_file("ERROR", message, tag)
|
||||
1295
crawl4ai/async_url_seeder.py
Normal file
1295
crawl4ai/async_url_seeder.py
Normal file
File diff suppressed because it is too large
Load Diff
840
crawl4ai/async_webcrawler.py
Normal file
840
crawl4ai/async_webcrawler.py
Normal file
@@ -0,0 +1,840 @@
|
||||
from .__version__ import __version__ as crawl4ai_version
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Optional, List
|
||||
import json
|
||||
import asyncio
|
||||
|
||||
# from contextlib import nullcontext, asynccontextmanager
|
||||
from contextlib import asynccontextmanager
|
||||
from .models import (
|
||||
CrawlResult,
|
||||
MarkdownGenerationResult,
|
||||
DispatchResult,
|
||||
ScrapingResult,
|
||||
CrawlResultContainer,
|
||||
RunManyReturn
|
||||
)
|
||||
from .async_database import async_db_manager
|
||||
from .chunking_strategy import * # noqa: F403
|
||||
from .chunking_strategy import IdentityChunking
|
||||
from .content_filter_strategy import * # noqa: F403
|
||||
from .extraction_strategy import * # noqa: F403
|
||||
from .extraction_strategy import NoExtractionStrategy
|
||||
from .async_crawler_strategy import (
|
||||
AsyncCrawlerStrategy,
|
||||
AsyncPlaywrightCrawlerStrategy,
|
||||
AsyncCrawlResponse,
|
||||
)
|
||||
from .cache_context import CacheMode, CacheContext
|
||||
from .markdown_generation_strategy import (
|
||||
DefaultMarkdownGenerator,
|
||||
MarkdownGenerationStrategy,
|
||||
)
|
||||
from .deep_crawling import DeepCrawlDecorator
|
||||
from .async_logger import AsyncLogger, AsyncLoggerBase
|
||||
from .async_configs import BrowserConfig, CrawlerRunConfig, ProxyConfig, SeedingConfig
|
||||
from .async_dispatcher import * # noqa: F403
|
||||
from .async_dispatcher import BaseDispatcher, MemoryAdaptiveDispatcher, RateLimiter
|
||||
from .async_url_seeder import AsyncUrlSeeder
|
||||
|
||||
from .utils import (
|
||||
sanitize_input_encode,
|
||||
InvalidCSSSelectorError,
|
||||
fast_format_html,
|
||||
get_error_context,
|
||||
RobotsParser,
|
||||
preprocess_html_for_schema,
|
||||
)
|
||||
|
||||
|
||||
class AsyncWebCrawler:
|
||||
"""
|
||||
Asynchronous web crawler with flexible caching capabilities.
|
||||
|
||||
There are two ways to use the crawler:
|
||||
|
||||
1. Using context manager (recommended for simple cases):
|
||||
```python
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(url="https://example.com")
|
||||
```
|
||||
|
||||
2. Using explicit lifecycle management (recommended for long-running applications):
|
||||
```python
|
||||
crawler = AsyncWebCrawler()
|
||||
await crawler.start()
|
||||
|
||||
# Use the crawler multiple times
|
||||
result1 = await crawler.arun(url="https://example.com")
|
||||
result2 = await crawler.arun(url="https://another.com")
|
||||
|
||||
await crawler.close()
|
||||
```
|
||||
|
||||
Attributes:
|
||||
browser_config (BrowserConfig): Configuration object for browser settings.
|
||||
crawler_strategy (AsyncCrawlerStrategy): Strategy for crawling web pages.
|
||||
logger (AsyncLogger): Logger instance for recording events and errors.
|
||||
crawl4ai_folder (str): Directory for storing cache.
|
||||
base_directory (str): Base directory for storing cache.
|
||||
ready (bool): Whether the crawler is ready for use.
|
||||
|
||||
Methods:
|
||||
start(): Start the crawler explicitly without using context manager.
|
||||
close(): Close the crawler explicitly without using context manager.
|
||||
arun(): Run the crawler for a single source: URL (web, local file, or raw HTML).
|
||||
awarmup(): Perform warmup sequence.
|
||||
arun_many(): Run the crawler for multiple sources.
|
||||
aprocess_html(): Process HTML content.
|
||||
|
||||
Typical Usage:
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(url="https://example.com")
|
||||
print(result.markdown)
|
||||
|
||||
Using configuration:
|
||||
browser_config = BrowserConfig(browser_type="chromium", headless=True)
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
crawler_config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS
|
||||
)
|
||||
result = await crawler.arun(url="https://example.com", config=crawler_config)
|
||||
print(result.markdown)
|
||||
"""
|
||||
|
||||
_domain_last_hit = {}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
crawler_strategy: AsyncCrawlerStrategy = None,
|
||||
config: BrowserConfig = None,
|
||||
base_directory: str = str(
|
||||
os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home())),
|
||||
thread_safe: bool = False,
|
||||
logger: AsyncLoggerBase = None,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Initialize the AsyncWebCrawler.
|
||||
|
||||
Args:
|
||||
crawler_strategy: Strategy for crawling web pages. Default AsyncPlaywrightCrawlerStrategy
|
||||
config: Configuration object for browser settings. Default BrowserConfig()
|
||||
base_directory: Base directory for storing cache
|
||||
thread_safe: Whether to use thread-safe operations
|
||||
**kwargs: Additional arguments for backwards compatibility
|
||||
"""
|
||||
# Handle browser configuration
|
||||
browser_config = config or BrowserConfig()
|
||||
|
||||
self.browser_config = browser_config
|
||||
|
||||
# Initialize logger first since other components may need it
|
||||
self.logger = logger or AsyncLogger(
|
||||
log_file=os.path.join(base_directory, ".crawl4ai", "crawler.log"),
|
||||
verbose=self.browser_config.verbose,
|
||||
tag_width=10,
|
||||
)
|
||||
|
||||
# Initialize crawler strategy
|
||||
params = {k: v for k, v in kwargs.items() if k in [
|
||||
"browser_config", "logger"]}
|
||||
self.crawler_strategy = crawler_strategy or AsyncPlaywrightCrawlerStrategy(
|
||||
browser_config=browser_config,
|
||||
logger=self.logger,
|
||||
**params, # Pass remaining kwargs for backwards compatibility
|
||||
)
|
||||
|
||||
# Thread safety setup
|
||||
self._lock = asyncio.Lock() if thread_safe else None
|
||||
|
||||
# Initialize directories
|
||||
self.crawl4ai_folder = os.path.join(base_directory, ".crawl4ai")
|
||||
os.makedirs(self.crawl4ai_folder, exist_ok=True)
|
||||
os.makedirs(f"{self.crawl4ai_folder}/cache", exist_ok=True)
|
||||
|
||||
# Initialize robots parser
|
||||
self.robots_parser = RobotsParser()
|
||||
|
||||
self.ready = False
|
||||
|
||||
# Decorate arun method with deep crawling capabilities
|
||||
self._deep_handler = DeepCrawlDecorator(self)
|
||||
self.arun = self._deep_handler(self.arun)
|
||||
|
||||
self.url_seeder: Optional[AsyncUrlSeeder] = None
|
||||
|
||||
async def start(self):
|
||||
"""
|
||||
Start the crawler explicitly without using context manager.
|
||||
This is equivalent to using 'async with' but gives more control over the lifecycle.
|
||||
Returns:
|
||||
AsyncWebCrawler: The initialized crawler instance
|
||||
"""
|
||||
await self.crawler_strategy.__aenter__()
|
||||
self.logger.info(f"Crawl4AI {crawl4ai_version}", tag="INIT")
|
||||
self.ready = True
|
||||
return self
|
||||
|
||||
async def close(self):
|
||||
"""
|
||||
Close the crawler explicitly without using context manager.
|
||||
This should be called when you're done with the crawler if you used start().
|
||||
|
||||
This method will:
|
||||
1. Clean up browser resources
|
||||
2. Close any open pages and contexts
|
||||
"""
|
||||
await self.crawler_strategy.__aexit__(None, None, None)
|
||||
|
||||
async def __aenter__(self):
|
||||
return await self.start()
|
||||
|
||||
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
||||
await self.close()
|
||||
|
||||
@asynccontextmanager
|
||||
async def nullcontext(self):
|
||||
"""异步空上下文管理器"""
|
||||
yield
|
||||
|
||||
async def arun(
|
||||
self,
|
||||
url: str,
|
||||
config: CrawlerRunConfig = None,
|
||||
**kwargs,
|
||||
) -> RunManyReturn:
|
||||
"""
|
||||
Runs the crawler for a single source: URL (web, local file, or raw HTML).
|
||||
|
||||
Migration Guide:
|
||||
Old way (deprecated):
|
||||
result = await crawler.arun(
|
||||
url="https://example.com",
|
||||
word_count_threshold=200,
|
||||
screenshot=True,
|
||||
...
|
||||
)
|
||||
|
||||
New way (recommended):
|
||||
config = CrawlerRunConfig(
|
||||
word_count_threshold=200,
|
||||
screenshot=True,
|
||||
...
|
||||
)
|
||||
result = await crawler.arun(url="https://example.com", crawler_config=config)
|
||||
|
||||
Args:
|
||||
url: The URL to crawl (http://, https://, file://, or raw:)
|
||||
crawler_config: Configuration object controlling crawl behavior
|
||||
[other parameters maintained for backwards compatibility]
|
||||
|
||||
Returns:
|
||||
CrawlResult: The result of crawling and processing
|
||||
"""
|
||||
# Auto-start if not ready
|
||||
if not self.ready:
|
||||
await self.start()
|
||||
|
||||
config = config or CrawlerRunConfig()
|
||||
if not isinstance(url, str) or not url:
|
||||
raise ValueError(
|
||||
"Invalid URL, make sure the URL is a non-empty string")
|
||||
|
||||
async with self._lock or self.nullcontext():
|
||||
try:
|
||||
self.logger.verbose = config.verbose
|
||||
|
||||
# Default to ENABLED if no cache mode specified
|
||||
if config.cache_mode is None:
|
||||
config.cache_mode = CacheMode.ENABLED
|
||||
|
||||
# Create cache context
|
||||
cache_context = CacheContext(url, config.cache_mode, False)
|
||||
|
||||
# Initialize processing variables
|
||||
async_response: AsyncCrawlResponse = None
|
||||
cached_result: CrawlResult = None
|
||||
screenshot_data = None
|
||||
pdf_data = None
|
||||
extracted_content = None
|
||||
start_time = time.perf_counter()
|
||||
|
||||
# Try to get cached result if appropriate
|
||||
if cache_context.should_read():
|
||||
cached_result = await async_db_manager.aget_cached_url(url)
|
||||
|
||||
if cached_result:
|
||||
html = sanitize_input_encode(cached_result.html)
|
||||
extracted_content = sanitize_input_encode(
|
||||
cached_result.extracted_content or ""
|
||||
)
|
||||
extracted_content = (
|
||||
None
|
||||
if not extracted_content or extracted_content == "[]"
|
||||
else extracted_content
|
||||
)
|
||||
# If screenshot is requested but its not in cache, then set cache_result to None
|
||||
screenshot_data = cached_result.screenshot
|
||||
pdf_data = cached_result.pdf
|
||||
# if config.screenshot and not screenshot or config.pdf and not pdf:
|
||||
if config.screenshot and not screenshot_data:
|
||||
cached_result = None
|
||||
|
||||
if config.pdf and not pdf_data:
|
||||
cached_result = None
|
||||
|
||||
self.logger.url_status(
|
||||
url=cache_context.display_url,
|
||||
success=bool(html),
|
||||
timing=time.perf_counter() - start_time,
|
||||
tag="FETCH",
|
||||
)
|
||||
|
||||
# Update proxy configuration from rotation strategy if available
|
||||
if config and config.proxy_rotation_strategy:
|
||||
next_proxy: ProxyConfig = await config.proxy_rotation_strategy.get_next_proxy()
|
||||
if next_proxy:
|
||||
self.logger.info(
|
||||
message="Switch proxy: {proxy}",
|
||||
tag="PROXY",
|
||||
params={"proxy": next_proxy.server}
|
||||
)
|
||||
config.proxy_config = next_proxy
|
||||
# config = config.clone(proxy_config=next_proxy)
|
||||
|
||||
# Fetch fresh content if needed
|
||||
if not cached_result or not html:
|
||||
t1 = time.perf_counter()
|
||||
|
||||
if config.user_agent:
|
||||
self.crawler_strategy.update_user_agent(
|
||||
config.user_agent)
|
||||
|
||||
# Check robots.txt if enabled
|
||||
if config and config.check_robots_txt:
|
||||
if not await self.robots_parser.can_fetch(
|
||||
url, self.browser_config.user_agent
|
||||
):
|
||||
return CrawlResult(
|
||||
url=url,
|
||||
html="",
|
||||
success=False,
|
||||
status_code=403,
|
||||
error_message="Access denied by robots.txt",
|
||||
response_headers={
|
||||
"X-Robots-Status": "Blocked by robots.txt"
|
||||
},
|
||||
)
|
||||
|
||||
##############################
|
||||
# Call CrawlerStrategy.crawl #
|
||||
##############################
|
||||
async_response = await self.crawler_strategy.crawl(
|
||||
url,
|
||||
config=config, # Pass the entire config object
|
||||
)
|
||||
|
||||
html = sanitize_input_encode(async_response.html)
|
||||
screenshot_data = async_response.screenshot
|
||||
pdf_data = async_response.pdf_data
|
||||
js_execution_result = async_response.js_execution_result
|
||||
|
||||
t2 = time.perf_counter()
|
||||
self.logger.url_status(
|
||||
url=cache_context.display_url,
|
||||
success=bool(html),
|
||||
timing=t2 - t1,
|
||||
tag="FETCH",
|
||||
)
|
||||
|
||||
###############################################################
|
||||
# Process the HTML content, Call CrawlerStrategy.process_html #
|
||||
###############################################################
|
||||
crawl_result: CrawlResult = await self.aprocess_html(
|
||||
url=url,
|
||||
html=html,
|
||||
extracted_content=extracted_content,
|
||||
config=config, # Pass the config object instead of individual parameters
|
||||
screenshot_data=screenshot_data,
|
||||
pdf_data=pdf_data,
|
||||
verbose=config.verbose,
|
||||
is_raw_html=True if url.startswith("raw:") else False,
|
||||
redirected_url=async_response.redirected_url,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
crawl_result.status_code = async_response.status_code
|
||||
crawl_result.redirected_url = async_response.redirected_url or url
|
||||
crawl_result.response_headers = async_response.response_headers
|
||||
crawl_result.downloaded_files = async_response.downloaded_files
|
||||
crawl_result.js_execution_result = js_execution_result
|
||||
crawl_result.mhtml = async_response.mhtml_data
|
||||
crawl_result.ssl_certificate = async_response.ssl_certificate
|
||||
# Add captured network and console data if available
|
||||
crawl_result.network_requests = async_response.network_requests
|
||||
crawl_result.console_messages = async_response.console_messages
|
||||
|
||||
crawl_result.success = bool(html)
|
||||
crawl_result.session_id = getattr(
|
||||
config, "session_id", None)
|
||||
|
||||
self.logger.url_status(
|
||||
url=cache_context.display_url,
|
||||
success=crawl_result.success,
|
||||
timing=time.perf_counter() - start_time,
|
||||
tag="COMPLETE",
|
||||
)
|
||||
|
||||
# Update cache if appropriate
|
||||
if cache_context.should_write() and not bool(cached_result):
|
||||
await async_db_manager.acache_url(crawl_result)
|
||||
|
||||
return CrawlResultContainer(crawl_result)
|
||||
|
||||
else:
|
||||
self.logger.url_status(
|
||||
url=cache_context.display_url,
|
||||
success=True,
|
||||
timing=time.perf_counter() - start_time,
|
||||
tag="COMPLETE"
|
||||
)
|
||||
cached_result.success = bool(html)
|
||||
cached_result.session_id = getattr(
|
||||
config, "session_id", None)
|
||||
cached_result.redirected_url = cached_result.redirected_url or url
|
||||
return CrawlResultContainer(cached_result)
|
||||
|
||||
except Exception as e:
|
||||
error_context = get_error_context(sys.exc_info())
|
||||
|
||||
error_message = (
|
||||
f"Unexpected error in _crawl_web at line {error_context['line_no']} "
|
||||
f"in {error_context['function']} ({error_context['filename']}):\n"
|
||||
f"Error: {str(e)}\n\n"
|
||||
f"Code context:\n{error_context['code_context']}"
|
||||
)
|
||||
|
||||
self.logger.error_status(
|
||||
url=url,
|
||||
error=error_message,
|
||||
tag="ERROR",
|
||||
)
|
||||
|
||||
return CrawlResultContainer(
|
||||
CrawlResult(
|
||||
url=url, html="", success=False, error_message=error_message
|
||||
)
|
||||
)
|
||||
|
||||
async def aprocess_html(
|
||||
self,
|
||||
url: str,
|
||||
html: str,
|
||||
extracted_content: str,
|
||||
config: CrawlerRunConfig,
|
||||
screenshot_data: str,
|
||||
pdf_data: str,
|
||||
verbose: bool,
|
||||
**kwargs,
|
||||
) -> CrawlResult:
|
||||
"""
|
||||
Process HTML content using the provided configuration.
|
||||
|
||||
Args:
|
||||
url: The URL being processed
|
||||
html: Raw HTML content
|
||||
extracted_content: Previously extracted content (if any)
|
||||
config: Configuration object controlling processing behavior
|
||||
screenshot_data: Screenshot data (if any)
|
||||
pdf_data: PDF data (if any)
|
||||
verbose: Whether to enable verbose logging
|
||||
**kwargs: Additional parameters for backwards compatibility
|
||||
|
||||
Returns:
|
||||
CrawlResult: Processed result containing extracted and formatted content
|
||||
"""
|
||||
cleaned_html = ""
|
||||
try:
|
||||
_url = url if not kwargs.get("is_raw_html", False) else "Raw HTML"
|
||||
t1 = time.perf_counter()
|
||||
|
||||
# Get scraping strategy and ensure it has a logger
|
||||
scraping_strategy = config.scraping_strategy
|
||||
if not scraping_strategy.logger:
|
||||
scraping_strategy.logger = self.logger
|
||||
|
||||
# Process HTML content
|
||||
params = config.__dict__.copy()
|
||||
params.pop("url", None)
|
||||
# add keys from kwargs to params that doesn't exist in params
|
||||
params.update({k: v for k, v in kwargs.items()
|
||||
if k not in params.keys()})
|
||||
|
||||
################################
|
||||
# Scraping Strategy Execution #
|
||||
################################
|
||||
result: ScrapingResult = scraping_strategy.scrap(
|
||||
url, html, **params)
|
||||
|
||||
if result is None:
|
||||
raise ValueError(
|
||||
f"Process HTML, Failed to extract content from the website: {url}"
|
||||
)
|
||||
|
||||
except InvalidCSSSelectorError as e:
|
||||
raise ValueError(str(e))
|
||||
except Exception as e:
|
||||
raise ValueError(
|
||||
f"Process HTML, Failed to extract content from the website: {url}, error: {str(e)}"
|
||||
)
|
||||
|
||||
# Extract results - handle both dict and ScrapingResult
|
||||
if isinstance(result, dict):
|
||||
cleaned_html = sanitize_input_encode(
|
||||
result.get("cleaned_html", ""))
|
||||
media = result.get("media", {})
|
||||
tables = media.pop("tables", []) if isinstance(media, dict) else []
|
||||
links = result.get("links", {})
|
||||
metadata = result.get("metadata", {})
|
||||
else:
|
||||
cleaned_html = sanitize_input_encode(result.cleaned_html)
|
||||
media = result.media.model_dump()
|
||||
tables = media.pop("tables", [])
|
||||
links = result.links.model_dump()
|
||||
metadata = result.metadata
|
||||
|
||||
fit_html = preprocess_html_for_schema(html_content=html, text_threshold= 500, max_size= 300_000)
|
||||
|
||||
################################
|
||||
# Generate Markdown #
|
||||
################################
|
||||
markdown_generator: Optional[MarkdownGenerationStrategy] = (
|
||||
config.markdown_generator or DefaultMarkdownGenerator()
|
||||
)
|
||||
|
||||
# --- SELECT HTML SOURCE BASED ON CONTENT_SOURCE ---
|
||||
# Get the desired source from the generator config, default to 'cleaned_html'
|
||||
selected_html_source = getattr(markdown_generator, 'content_source', 'cleaned_html')
|
||||
|
||||
# Define the source selection logic using dict dispatch
|
||||
html_source_selector = {
|
||||
"raw_html": lambda: html, # The original raw HTML
|
||||
"cleaned_html": lambda: cleaned_html, # The HTML after scraping strategy
|
||||
"fit_html": lambda: fit_html, # The HTML after preprocessing for schema
|
||||
}
|
||||
|
||||
markdown_input_html = cleaned_html # Default to cleaned_html
|
||||
|
||||
try:
|
||||
# Get the appropriate lambda function, default to returning cleaned_html if key not found
|
||||
source_lambda = html_source_selector.get(selected_html_source, lambda: cleaned_html)
|
||||
# Execute the lambda to get the selected HTML
|
||||
markdown_input_html = source_lambda()
|
||||
|
||||
# Log which source is being used (optional, but helpful for debugging)
|
||||
# if self.logger and verbose:
|
||||
# actual_source_used = selected_html_source if selected_html_source in html_source_selector else 'cleaned_html (default)'
|
||||
# self.logger.debug(f"Using '{actual_source_used}' as source for Markdown generation for {url}", tag="MARKDOWN_SRC")
|
||||
|
||||
except Exception as e:
|
||||
# Handle potential errors, especially from preprocess_html_for_schema
|
||||
if self.logger:
|
||||
self.logger.warning(
|
||||
f"Error getting/processing '{selected_html_source}' for markdown source: {e}. Falling back to cleaned_html.",
|
||||
tag="MARKDOWN_SRC"
|
||||
)
|
||||
# Ensure markdown_input_html is still the default cleaned_html in case of error
|
||||
markdown_input_html = cleaned_html
|
||||
# --- END: HTML SOURCE SELECTION ---
|
||||
|
||||
# Uncomment if by default we want to use PruningContentFilter
|
||||
# if not config.content_filter and not markdown_generator.content_filter:
|
||||
# markdown_generator.content_filter = PruningContentFilter()
|
||||
|
||||
markdown_result: MarkdownGenerationResult = (
|
||||
markdown_generator.generate_markdown(
|
||||
input_html=markdown_input_html,
|
||||
base_url=params.get("redirected_url", url)
|
||||
# html2text_options=kwargs.get('html2text', {})
|
||||
)
|
||||
)
|
||||
|
||||
# Log processing completion
|
||||
self.logger.url_status(
|
||||
url=_url,
|
||||
success=True,
|
||||
timing=int((time.perf_counter() - t1) * 1000) / 1000,
|
||||
tag="SCRAPE"
|
||||
)
|
||||
# self.logger.info(
|
||||
# message="{url:.50}... | Time: {timing}s",
|
||||
# tag="SCRAPE",
|
||||
# params={"url": _url, "timing": int((time.perf_counter() - t1) * 1000) / 1000},
|
||||
# )
|
||||
|
||||
################################
|
||||
# Structured Content Extraction #
|
||||
################################
|
||||
if (
|
||||
not bool(extracted_content)
|
||||
and config.extraction_strategy
|
||||
and not isinstance(config.extraction_strategy, NoExtractionStrategy)
|
||||
):
|
||||
t1 = time.perf_counter()
|
||||
# Choose content based on input_format
|
||||
content_format = config.extraction_strategy.input_format
|
||||
if content_format == "fit_markdown" and not markdown_result.fit_markdown:
|
||||
self.logger.warning(
|
||||
message="Fit markdown requested but not available. Falling back to raw markdown.",
|
||||
tag="EXTRACT",
|
||||
params={"url": _url},
|
||||
)
|
||||
content_format = "markdown"
|
||||
|
||||
content = {
|
||||
"markdown": markdown_result.raw_markdown,
|
||||
"html": html,
|
||||
"fit_html": fit_html,
|
||||
"cleaned_html": cleaned_html,
|
||||
"fit_markdown": markdown_result.fit_markdown,
|
||||
}.get(content_format, markdown_result.raw_markdown)
|
||||
|
||||
# Use IdentityChunking for HTML input, otherwise use provided chunking strategy
|
||||
chunking = (
|
||||
IdentityChunking()
|
||||
if content_format in ["html", "cleaned_html", "fit_html"]
|
||||
else config.chunking_strategy
|
||||
)
|
||||
sections = chunking.chunk(content)
|
||||
extracted_content = config.extraction_strategy.run(url, sections)
|
||||
extracted_content = json.dumps(
|
||||
extracted_content, indent=4, default=str, ensure_ascii=False
|
||||
)
|
||||
|
||||
# Log extraction completion
|
||||
self.logger.info(
|
||||
message="Completed for {url:.50}... | Time: {timing}s",
|
||||
tag="EXTRACT",
|
||||
params={"url": _url, "timing": time.perf_counter() - t1},
|
||||
)
|
||||
|
||||
# Apply HTML formatting if requested
|
||||
if config.prettiify:
|
||||
cleaned_html = fast_format_html(cleaned_html)
|
||||
|
||||
# Return complete crawl result
|
||||
return CrawlResult(
|
||||
url=url,
|
||||
html=html,
|
||||
fit_html=fit_html,
|
||||
cleaned_html=cleaned_html,
|
||||
markdown=markdown_result,
|
||||
media=media,
|
||||
tables=tables, # NEW
|
||||
links=links,
|
||||
metadata=metadata,
|
||||
screenshot=screenshot_data,
|
||||
pdf=pdf_data,
|
||||
extracted_content=extracted_content,
|
||||
success=True,
|
||||
error_message="",
|
||||
)
|
||||
|
||||
async def arun_many(
|
||||
self,
|
||||
urls: List[str],
|
||||
config: Optional[CrawlerRunConfig] = None,
|
||||
dispatcher: Optional[BaseDispatcher] = None,
|
||||
# Legacy parameters maintained for backwards compatibility
|
||||
# word_count_threshold=MIN_WORD_THRESHOLD,
|
||||
# extraction_strategy: ExtractionStrategy = None,
|
||||
# chunking_strategy: ChunkingStrategy = RegexChunking(),
|
||||
# content_filter: RelevantContentFilter = None,
|
||||
# cache_mode: Optional[CacheMode] = None,
|
||||
# bypass_cache: bool = False,
|
||||
# css_selector: str = None,
|
||||
# screenshot: bool = False,
|
||||
# pdf: bool = False,
|
||||
# user_agent: str = None,
|
||||
# verbose=True,
|
||||
**kwargs,
|
||||
) -> RunManyReturn:
|
||||
"""
|
||||
Runs the crawler for multiple URLs concurrently using a configurable dispatcher strategy.
|
||||
|
||||
Args:
|
||||
urls: List of URLs to crawl
|
||||
config: Configuration object controlling crawl behavior for all URLs
|
||||
dispatcher: The dispatcher strategy instance to use. Defaults to MemoryAdaptiveDispatcher
|
||||
[other parameters maintained for backwards compatibility]
|
||||
|
||||
Returns:
|
||||
Union[List[CrawlResult], AsyncGenerator[CrawlResult, None]]:
|
||||
Either a list of all results or an async generator yielding results
|
||||
|
||||
Examples:
|
||||
|
||||
# Batch processing (default)
|
||||
results = await crawler.arun_many(
|
||||
urls=["https://example1.com", "https://example2.com"],
|
||||
config=CrawlerRunConfig(cache_mode=CacheMode.BYPASS)
|
||||
)
|
||||
for result in results:
|
||||
print(f"Processed {result.url}: {len(result.markdown)} chars")
|
||||
|
||||
# Streaming results
|
||||
async for result in await crawler.arun_many(
|
||||
urls=["https://example1.com", "https://example2.com"],
|
||||
config=CrawlerRunConfig(cache_mode=CacheMode.BYPASS, stream=True),
|
||||
):
|
||||
print(f"Processed {result.url}: {len(result.markdown)} chars")
|
||||
"""
|
||||
config = config or CrawlerRunConfig()
|
||||
# if config is None:
|
||||
# config = CrawlerRunConfig(
|
||||
# word_count_threshold=word_count_threshold,
|
||||
# extraction_strategy=extraction_strategy,
|
||||
# chunking_strategy=chunking_strategy,
|
||||
# content_filter=content_filter,
|
||||
# cache_mode=cache_mode,
|
||||
# bypass_cache=bypass_cache,
|
||||
# css_selector=css_selector,
|
||||
# screenshot=screenshot,
|
||||
# pdf=pdf,
|
||||
# verbose=verbose,
|
||||
# **kwargs,
|
||||
# )
|
||||
|
||||
if dispatcher is None:
|
||||
dispatcher = MemoryAdaptiveDispatcher(
|
||||
rate_limiter=RateLimiter(
|
||||
base_delay=(1.0, 3.0), max_delay=60.0, max_retries=3
|
||||
),
|
||||
)
|
||||
|
||||
def transform_result(task_result):
|
||||
return (
|
||||
setattr(
|
||||
task_result.result,
|
||||
"dispatch_result",
|
||||
DispatchResult(
|
||||
task_id=task_result.task_id,
|
||||
memory_usage=task_result.memory_usage,
|
||||
peak_memory=task_result.peak_memory,
|
||||
start_time=task_result.start_time,
|
||||
end_time=task_result.end_time,
|
||||
error_message=task_result.error_message,
|
||||
),
|
||||
)
|
||||
or task_result.result
|
||||
)
|
||||
|
||||
stream = config.stream
|
||||
|
||||
if stream:
|
||||
|
||||
async def result_transformer():
|
||||
async for task_result in dispatcher.run_urls_stream(
|
||||
crawler=self, urls=urls, config=config
|
||||
):
|
||||
yield transform_result(task_result)
|
||||
|
||||
return result_transformer()
|
||||
else:
|
||||
_results = await dispatcher.run_urls(crawler=self, urls=urls, config=config)
|
||||
return [transform_result(res) for res in _results]
|
||||
|
||||
async def aseed_urls(
|
||||
self,
|
||||
domain_or_domains: Union[str, List[str]],
|
||||
config: Optional[SeedingConfig] = None,
|
||||
**kwargs
|
||||
) -> Union[List[str], Dict[str, List[Union[str, Dict[str, Any]]]]]:
|
||||
"""
|
||||
Discovers, filters, and optionally validates URLs for a given domain(s)
|
||||
using sitemaps and Common Crawl archives.
|
||||
|
||||
Args:
|
||||
domain_or_domains: A single domain string (e.g., "iana.org") or a list of domains.
|
||||
config: A SeedingConfig object to control the seeding process.
|
||||
Parameters passed directly via kwargs will override those in 'config'.
|
||||
**kwargs: Additional parameters (e.g., `source`, `live_check`, `extract_head`,
|
||||
`pattern`, `concurrency`, `hits_per_sec`, `force_refresh`, `verbose`)
|
||||
that will be used to construct or update the SeedingConfig.
|
||||
|
||||
Returns:
|
||||
If `extract_head` is False:
|
||||
- For a single domain: `List[str]` of discovered URLs.
|
||||
- For multiple domains: `Dict[str, List[str]]` mapping each domain to its URLs.
|
||||
If `extract_head` is True:
|
||||
- For a single domain: `List[Dict[str, Any]]` where each dict contains 'url'
|
||||
and 'head_data' (parsed <head> metadata).
|
||||
- For multiple domains: `Dict[str, List[Dict[str, Any]]]` mapping each domain
|
||||
to a list of URL data dictionaries.
|
||||
|
||||
Raises:
|
||||
ValueError: If `domain_or_domains` is not a string or a list of strings.
|
||||
Exception: Any underlying exceptions from AsyncUrlSeeder or network operations.
|
||||
|
||||
Example:
|
||||
>>> # Discover URLs from sitemap with live check for 'example.com'
|
||||
>>> result = await crawler.aseed_urls("example.com", source="sitemap", live_check=True, hits_per_sec=10)
|
||||
|
||||
>>> # Discover URLs from Common Crawl, extract head data for 'example.com' and 'python.org'
|
||||
>>> multi_domain_result = await crawler.aseed_urls(
|
||||
>>> ["example.com", "python.org"],
|
||||
>>> source="cc", extract_head=True, concurrency=200, hits_per_sec=50
|
||||
>>> )
|
||||
"""
|
||||
# Initialize AsyncUrlSeeder here if it hasn't been already
|
||||
if not self.url_seeder:
|
||||
# Pass the crawler's base_directory for seeder's cache management
|
||||
# Pass the crawler's logger for consistent logging
|
||||
self.url_seeder = AsyncUrlSeeder(
|
||||
base_directory=self.crawl4ai_folder,
|
||||
logger=self.logger
|
||||
)
|
||||
|
||||
# Merge config object with direct kwargs, giving kwargs precedence
|
||||
seeding_config = config.clone(**kwargs) if config else SeedingConfig.from_kwargs(kwargs)
|
||||
|
||||
# Ensure base_directory is set for the seeder's cache
|
||||
seeding_config.base_directory = seeding_config.base_directory or self.crawl4ai_folder
|
||||
# Ensure the seeder uses the crawler's logger (if not already set)
|
||||
if not self.url_seeder.logger:
|
||||
self.url_seeder.logger = self.logger
|
||||
|
||||
# Pass verbose setting if explicitly provided in SeedingConfig or kwargs
|
||||
if seeding_config.verbose is not None:
|
||||
self.url_seeder.logger.verbose = seeding_config.verbose
|
||||
else: # Default to crawler's verbose setting
|
||||
self.url_seeder.logger.verbose = self.logger.verbose
|
||||
|
||||
|
||||
if isinstance(domain_or_domains, str):
|
||||
self.logger.info(
|
||||
message="Starting URL seeding for domain: {domain}",
|
||||
tag="SEED",
|
||||
params={"domain": domain_or_domains}
|
||||
)
|
||||
return await self.url_seeder.urls(
|
||||
domain_or_domains,
|
||||
seeding_config
|
||||
)
|
||||
elif isinstance(domain_or_domains, (list, tuple)):
|
||||
self.logger.info(
|
||||
message="Starting URL seeding for {count} domains",
|
||||
tag="SEED",
|
||||
params={"count": len(domain_or_domains)}
|
||||
)
|
||||
# AsyncUrlSeeder.many_urls directly accepts a list of domains and individual params.
|
||||
return await self.url_seeder.many_urls(
|
||||
domain_or_domains,
|
||||
seeding_config
|
||||
)
|
||||
else:
|
||||
raise ValueError("`domain_or_domains` must be a string or a list of strings.")
|
||||
1113
crawl4ai/browser_manager.py
Normal file
1113
crawl4ai/browser_manager.py
Normal file
File diff suppressed because it is too large
Load Diff
1033
crawl4ai/browser_profiler.py
Normal file
1033
crawl4ai/browser_profiler.py
Normal file
File diff suppressed because it is too large
Load Diff
117
crawl4ai/cache_context.py
Normal file
117
crawl4ai/cache_context.py
Normal file
@@ -0,0 +1,117 @@
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class CacheMode(Enum):
|
||||
"""
|
||||
Defines the caching behavior for web crawling operations.
|
||||
|
||||
Modes:
|
||||
- ENABLED: Normal caching behavior (read and write)
|
||||
- DISABLED: No caching at all
|
||||
- READ_ONLY: Only read from cache, don't write
|
||||
- WRITE_ONLY: Only write to cache, don't read
|
||||
- BYPASS: Bypass cache for this operation
|
||||
"""
|
||||
|
||||
ENABLED = "enabled"
|
||||
DISABLED = "disabled"
|
||||
READ_ONLY = "read_only"
|
||||
WRITE_ONLY = "write_only"
|
||||
BYPASS = "bypass"
|
||||
|
||||
|
||||
class CacheContext:
|
||||
"""
|
||||
Encapsulates cache-related decisions and URL handling.
|
||||
|
||||
This class centralizes all cache-related logic and URL type checking,
|
||||
making the caching behavior more predictable and maintainable.
|
||||
|
||||
Attributes:
|
||||
url (str): The URL being processed.
|
||||
cache_mode (CacheMode): The cache mode for the current operation.
|
||||
always_bypass (bool): If True, bypasses caching for this operation.
|
||||
is_cacheable (bool): True if the URL is cacheable, False otherwise.
|
||||
is_web_url (bool): True if the URL is a web URL, False otherwise.
|
||||
is_local_file (bool): True if the URL is a local file, False otherwise.
|
||||
is_raw_html (bool): True if the URL is raw HTML, False otherwise.
|
||||
_url_display (str): The display name for the URL (web, local file, or raw HTML).
|
||||
"""
|
||||
|
||||
def __init__(self, url: str, cache_mode: CacheMode, always_bypass: bool = False):
|
||||
"""
|
||||
Initializes the CacheContext with the provided URL and cache mode.
|
||||
|
||||
Args:
|
||||
url (str): The URL being processed.
|
||||
cache_mode (CacheMode): The cache mode for the current operation.
|
||||
always_bypass (bool): If True, bypasses caching for this operation.
|
||||
"""
|
||||
self.url = url
|
||||
self.cache_mode = cache_mode
|
||||
self.always_bypass = always_bypass
|
||||
self.is_cacheable = url.startswith(("http://", "https://", "file://"))
|
||||
self.is_web_url = url.startswith(("http://", "https://"))
|
||||
self.is_local_file = url.startswith("file://")
|
||||
self.is_raw_html = url.startswith("raw:")
|
||||
self._url_display = url if not self.is_raw_html else "Raw HTML"
|
||||
|
||||
def should_read(self) -> bool:
|
||||
"""
|
||||
Determines if cache should be read based on context.
|
||||
|
||||
How it works:
|
||||
1. If always_bypass is True or is_cacheable is False, return False.
|
||||
2. If cache_mode is ENABLED or READ_ONLY, return True.
|
||||
|
||||
Returns:
|
||||
bool: True if cache should be read, False otherwise.
|
||||
"""
|
||||
if self.always_bypass or not self.is_cacheable:
|
||||
return False
|
||||
return self.cache_mode in [CacheMode.ENABLED, CacheMode.READ_ONLY]
|
||||
|
||||
def should_write(self) -> bool:
|
||||
"""
|
||||
Determines if cache should be written based on context.
|
||||
|
||||
How it works:
|
||||
1. If always_bypass is True or is_cacheable is False, return False.
|
||||
2. If cache_mode is ENABLED or WRITE_ONLY, return True.
|
||||
|
||||
Returns:
|
||||
bool: True if cache should be written, False otherwise.
|
||||
"""
|
||||
if self.always_bypass or not self.is_cacheable:
|
||||
return False
|
||||
return self.cache_mode in [CacheMode.ENABLED, CacheMode.WRITE_ONLY]
|
||||
|
||||
@property
|
||||
def display_url(self) -> str:
|
||||
"""Returns the URL in display format."""
|
||||
return self._url_display
|
||||
|
||||
|
||||
def _legacy_to_cache_mode(
|
||||
disable_cache: bool = False,
|
||||
bypass_cache: bool = False,
|
||||
no_cache_read: bool = False,
|
||||
no_cache_write: bool = False,
|
||||
) -> CacheMode:
|
||||
"""
|
||||
Converts legacy cache parameters to the new CacheMode enum.
|
||||
|
||||
This is an internal function to help transition from the old boolean flags
|
||||
to the new CacheMode system.
|
||||
"""
|
||||
if disable_cache:
|
||||
return CacheMode.DISABLED
|
||||
if bypass_cache:
|
||||
return CacheMode.BYPASS
|
||||
if no_cache_read and no_cache_write:
|
||||
return CacheMode.DISABLED
|
||||
if no_cache_read:
|
||||
return CacheMode.WRITE_ONLY
|
||||
if no_cache_write:
|
||||
return CacheMode.READ_ONLY
|
||||
return CacheMode.ENABLED
|
||||
@@ -6,19 +6,49 @@ from .model_loader import load_nltk_punkt
|
||||
|
||||
# Define the abstract base class for chunking strategies
|
||||
class ChunkingStrategy(ABC):
|
||||
|
||||
"""
|
||||
Abstract base class for chunking strategies.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def chunk(self, text: str) -> list:
|
||||
"""
|
||||
Abstract method to chunk the given text.
|
||||
|
||||
Args:
|
||||
text (str): The text to chunk.
|
||||
|
||||
Returns:
|
||||
list: A list of chunks.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
|
||||
# Create an identity chunking strategy f(x) = [x]
|
||||
class IdentityChunking(ChunkingStrategy):
|
||||
"""
|
||||
Chunking strategy that returns the input text as a single chunk.
|
||||
"""
|
||||
|
||||
def chunk(self, text: str) -> list:
|
||||
return [text]
|
||||
|
||||
|
||||
# Regex-based chunking
|
||||
class RegexChunking(ChunkingStrategy):
|
||||
def __init__(self, patterns=None):
|
||||
"""
|
||||
Chunking strategy that splits text based on regular expression patterns.
|
||||
"""
|
||||
|
||||
def __init__(self, patterns=None, **kwargs):
|
||||
"""
|
||||
Initialize the RegexChunking object.
|
||||
|
||||
Args:
|
||||
patterns (list): A list of regular expression patterns to split text.
|
||||
"""
|
||||
if patterns is None:
|
||||
patterns = [r'\n\n'] # Default split pattern
|
||||
patterns = [r"\n\n"] # Default split pattern
|
||||
self.patterns = patterns
|
||||
|
||||
def chunk(self, text: str) -> list:
|
||||
@@ -29,12 +59,20 @@ class RegexChunking(ChunkingStrategy):
|
||||
new_paragraphs.extend(re.split(pattern, paragraph))
|
||||
paragraphs = new_paragraphs
|
||||
return paragraphs
|
||||
|
||||
# NLP-based sentence chunking
|
||||
|
||||
|
||||
# NLP-based sentence chunking
|
||||
class NlpSentenceChunking(ChunkingStrategy):
|
||||
def __init__(self):
|
||||
"""
|
||||
Chunking strategy that splits text into sentences using NLTK's sentence tokenizer.
|
||||
"""
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
"""
|
||||
Initialize the NlpSentenceChunking object.
|
||||
"""
|
||||
from crawl4ai.le.legacy.model_loader import load_nltk_punkt
|
||||
load_nltk_punkt()
|
||||
pass
|
||||
|
||||
def chunk(self, text: str) -> list:
|
||||
# Improved regex for sentence splitting
|
||||
@@ -42,19 +80,35 @@ class NlpSentenceChunking(ChunkingStrategy):
|
||||
# r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<![A-Z][A-Z]\.)(?<![A-Za-z]\.)(?<=\.|\?|\!|\n)\s'
|
||||
# )
|
||||
# sentences = sentence_endings.split(text)
|
||||
# sens = [sent.strip() for sent in sentences if sent]
|
||||
# sens = [sent.strip() for sent in sentences if sent]
|
||||
from nltk.tokenize import sent_tokenize
|
||||
|
||||
sentences = sent_tokenize(text)
|
||||
sens = [sent.strip() for sent in sentences]
|
||||
|
||||
sens = [sent.strip() for sent in sentences]
|
||||
|
||||
return list(set(sens))
|
||||
|
||||
|
||||
|
||||
# Topic-based segmentation using TextTiling
|
||||
class TopicSegmentationChunking(ChunkingStrategy):
|
||||
|
||||
def __init__(self, num_keywords=3):
|
||||
"""
|
||||
Chunking strategy that segments text into topics using NLTK's TextTilingTokenizer.
|
||||
|
||||
How it works:
|
||||
1. Segment the text into topics using TextTilingTokenizer
|
||||
2. Extract keywords for each topic segment
|
||||
"""
|
||||
|
||||
def __init__(self, num_keywords=3, **kwargs):
|
||||
"""
|
||||
Initialize the TopicSegmentationChunking object.
|
||||
|
||||
Args:
|
||||
num_keywords (int): The number of keywords to extract for each topic segment.
|
||||
"""
|
||||
import nltk as nl
|
||||
self.tokenizer = nl.toknize.TextTilingTokenizer()
|
||||
|
||||
self.tokenizer = nl.tokenize.TextTilingTokenizer()
|
||||
self.num_keywords = num_keywords
|
||||
|
||||
def chunk(self, text: str) -> list:
|
||||
@@ -65,8 +119,14 @@ class TopicSegmentationChunking(ChunkingStrategy):
|
||||
def extract_keywords(self, text: str) -> list:
|
||||
# Tokenize and remove stopwords and punctuation
|
||||
import nltk as nl
|
||||
|
||||
tokens = nl.toknize.word_tokenize(text)
|
||||
tokens = [token.lower() for token in tokens if token not in nl.corpus.stopwords.words('english') and token not in string.punctuation]
|
||||
tokens = [
|
||||
token.lower()
|
||||
for token in tokens
|
||||
if token not in nl.corpus.stopwords.words("english")
|
||||
and token not in string.punctuation
|
||||
]
|
||||
|
||||
# Calculate frequency distribution
|
||||
freq_dist = Counter(tokens)
|
||||
@@ -77,29 +137,120 @@ class TopicSegmentationChunking(ChunkingStrategy):
|
||||
# Segment the text into topics
|
||||
segments = self.chunk(text)
|
||||
# Extract keywords for each topic segment
|
||||
segments_with_topics = [(segment, self.extract_keywords(segment)) for segment in segments]
|
||||
segments_with_topics = [
|
||||
(segment, self.extract_keywords(segment)) for segment in segments
|
||||
]
|
||||
return segments_with_topics
|
||||
|
||||
|
||||
|
||||
# Fixed-length word chunks
|
||||
class FixedLengthWordChunking(ChunkingStrategy):
|
||||
def __init__(self, chunk_size=100):
|
||||
"""
|
||||
Chunking strategy that splits text into fixed-length word chunks.
|
||||
|
||||
How it works:
|
||||
1. Split the text into words
|
||||
2. Create chunks of fixed length
|
||||
3. Return the list of chunks
|
||||
"""
|
||||
|
||||
def __init__(self, chunk_size=100, **kwargs):
|
||||
"""
|
||||
Initialize the fixed-length word chunking strategy with the given chunk size.
|
||||
|
||||
Args:
|
||||
chunk_size (int): The size of each chunk in words.
|
||||
"""
|
||||
self.chunk_size = chunk_size
|
||||
|
||||
def chunk(self, text: str) -> list:
|
||||
words = text.split()
|
||||
return [' '.join(words[i:i + self.chunk_size]) for i in range(0, len(words), self.chunk_size)]
|
||||
|
||||
return [
|
||||
" ".join(words[i : i + self.chunk_size])
|
||||
for i in range(0, len(words), self.chunk_size)
|
||||
]
|
||||
|
||||
|
||||
# Sliding window chunking
|
||||
class SlidingWindowChunking(ChunkingStrategy):
|
||||
def __init__(self, window_size=100, step=50):
|
||||
"""
|
||||
Chunking strategy that splits text into overlapping word chunks.
|
||||
|
||||
How it works:
|
||||
1. Split the text into words
|
||||
2. Create chunks of fixed length
|
||||
3. Return the list of chunks
|
||||
"""
|
||||
|
||||
def __init__(self, window_size=100, step=50, **kwargs):
|
||||
"""
|
||||
Initialize the sliding window chunking strategy with the given window size and
|
||||
step size.
|
||||
|
||||
Args:
|
||||
window_size (int): The size of the sliding window in words.
|
||||
step (int): The step size for sliding the window in words.
|
||||
"""
|
||||
self.window_size = window_size
|
||||
self.step = step
|
||||
|
||||
def chunk(self, text: str) -> list:
|
||||
words = text.split()
|
||||
chunks = []
|
||||
for i in range(0, len(words), self.step):
|
||||
chunks.append(' '.join(words[i:i + self.window_size]))
|
||||
return chunks
|
||||
|
||||
|
||||
if len(words) <= self.window_size:
|
||||
return [text]
|
||||
|
||||
for i in range(0, len(words) - self.window_size + 1, self.step):
|
||||
chunk = " ".join(words[i : i + self.window_size])
|
||||
chunks.append(chunk)
|
||||
|
||||
# Handle the last chunk if it doesn't align perfectly
|
||||
if i + self.window_size < len(words):
|
||||
chunks.append(" ".join(words[-self.window_size :]))
|
||||
|
||||
return chunks
|
||||
|
||||
|
||||
class OverlappingWindowChunking(ChunkingStrategy):
|
||||
"""
|
||||
Chunking strategy that splits text into overlapping word chunks.
|
||||
|
||||
How it works:
|
||||
1. Split the text into words using whitespace
|
||||
2. Create chunks of fixed length equal to the window size
|
||||
3. Slide the window by the overlap size
|
||||
4. Return the list of chunks
|
||||
"""
|
||||
|
||||
def __init__(self, window_size=1000, overlap=100, **kwargs):
|
||||
"""
|
||||
Initialize the overlapping window chunking strategy with the given window size and
|
||||
overlap size.
|
||||
|
||||
Args:
|
||||
window_size (int): The size of the window in words.
|
||||
overlap (int): The size of the overlap between consecutive chunks in words.
|
||||
"""
|
||||
self.window_size = window_size
|
||||
self.overlap = overlap
|
||||
|
||||
def chunk(self, text: str) -> list:
|
||||
words = text.split()
|
||||
chunks = []
|
||||
|
||||
if len(words) <= self.window_size:
|
||||
return [text]
|
||||
|
||||
start = 0
|
||||
while start < len(words):
|
||||
end = start + self.window_size
|
||||
chunk = " ".join(words[start:end])
|
||||
chunks.append(chunk)
|
||||
|
||||
if end >= len(words):
|
||||
break
|
||||
|
||||
start = end - self.overlap
|
||||
|
||||
return chunks
|
||||
|
||||
1419
crawl4ai/cli.py
Normal file
1419
crawl4ai/cli.py
Normal file
File diff suppressed because it is too large
Load Diff
837
crawl4ai/components/crawler_monitor.py
Normal file
837
crawl4ai/components/crawler_monitor.py
Normal file
@@ -0,0 +1,837 @@
|
||||
import time
|
||||
import uuid
|
||||
import threading
|
||||
import psutil
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Dict, Optional, List
|
||||
import threading
|
||||
from rich.console import Console
|
||||
from rich.layout import Layout
|
||||
from rich.panel import Panel
|
||||
from rich.table import Table
|
||||
from rich.text import Text
|
||||
from rich.live import Live
|
||||
from rich import box
|
||||
from ..models import CrawlStatus
|
||||
|
||||
class TerminalUI:
|
||||
"""Terminal user interface for CrawlerMonitor using rich library."""
|
||||
|
||||
def __init__(self, refresh_rate: float = 1.0, max_width: int = 120):
|
||||
"""
|
||||
Initialize the terminal UI.
|
||||
|
||||
Args:
|
||||
refresh_rate: How often to refresh the UI (in seconds)
|
||||
max_width: Maximum width of the UI in characters
|
||||
"""
|
||||
self.console = Console(width=max_width)
|
||||
self.layout = Layout()
|
||||
self.refresh_rate = refresh_rate
|
||||
self.stop_event = threading.Event()
|
||||
self.ui_thread = None
|
||||
self.monitor = None # Will be set by CrawlerMonitor
|
||||
self.max_width = max_width
|
||||
|
||||
# Setup layout - vertical layout (top to bottom)
|
||||
self.layout.split(
|
||||
Layout(name="header", size=3),
|
||||
Layout(name="pipeline_status", size=10),
|
||||
Layout(name="task_details", ratio=1),
|
||||
Layout(name="footer", size=3) # Increased footer size to fit all content
|
||||
)
|
||||
|
||||
def start(self, monitor):
|
||||
"""Start the UI thread."""
|
||||
self.monitor = monitor
|
||||
self.stop_event.clear()
|
||||
self.ui_thread = threading.Thread(target=self._ui_loop)
|
||||
self.ui_thread.daemon = True
|
||||
self.ui_thread.start()
|
||||
|
||||
def stop(self):
|
||||
"""Stop the UI thread."""
|
||||
if self.ui_thread and self.ui_thread.is_alive():
|
||||
self.stop_event.set()
|
||||
# Only try to join if we're not in the UI thread
|
||||
# This prevents "cannot join current thread" errors
|
||||
if threading.current_thread() != self.ui_thread:
|
||||
self.ui_thread.join(timeout=5.0)
|
||||
|
||||
def _ui_loop(self):
|
||||
"""Main UI rendering loop."""
|
||||
import sys
|
||||
import select
|
||||
import termios
|
||||
import tty
|
||||
|
||||
# Setup terminal for non-blocking input
|
||||
old_settings = termios.tcgetattr(sys.stdin)
|
||||
try:
|
||||
tty.setcbreak(sys.stdin.fileno())
|
||||
|
||||
# Use Live display to render the UI
|
||||
with Live(self.layout, refresh_per_second=1/self.refresh_rate, screen=True) as live:
|
||||
self.live = live # Store the live display for updates
|
||||
|
||||
# Main UI loop
|
||||
while not self.stop_event.is_set():
|
||||
self._update_display()
|
||||
|
||||
# Check for key press (non-blocking)
|
||||
if select.select([sys.stdin], [], [], 0)[0]:
|
||||
key = sys.stdin.read(1)
|
||||
# Check for 'q' to quit
|
||||
if key == 'q':
|
||||
# Signal stop but don't call monitor.stop() from UI thread
|
||||
# as it would cause the thread to try to join itself
|
||||
self.stop_event.set()
|
||||
self.monitor.is_running = False
|
||||
break
|
||||
|
||||
time.sleep(self.refresh_rate)
|
||||
|
||||
# Just check if the monitor was stopped
|
||||
if not self.monitor.is_running:
|
||||
break
|
||||
finally:
|
||||
# Restore terminal settings
|
||||
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_settings)
|
||||
|
||||
def _update_display(self):
|
||||
"""Update the terminal display with current statistics."""
|
||||
if not self.monitor:
|
||||
return
|
||||
|
||||
# Update crawler status panel
|
||||
self.layout["header"].update(self._create_status_panel())
|
||||
|
||||
# Update pipeline status panel and task details panel
|
||||
self.layout["pipeline_status"].update(self._create_pipeline_panel())
|
||||
self.layout["task_details"].update(self._create_task_details_panel())
|
||||
|
||||
# Update footer
|
||||
self.layout["footer"].update(self._create_footer())
|
||||
|
||||
def _create_status_panel(self) -> Panel:
|
||||
"""Create the crawler status panel."""
|
||||
summary = self.monitor.get_summary()
|
||||
|
||||
# Format memory status with icon
|
||||
memory_status = self.monitor.get_memory_status()
|
||||
memory_icon = "🟢" # Default NORMAL
|
||||
if memory_status == "PRESSURE":
|
||||
memory_icon = "🟠"
|
||||
elif memory_status == "CRITICAL":
|
||||
memory_icon = "🔴"
|
||||
|
||||
# Get current memory usage
|
||||
current_memory = psutil.Process().memory_info().rss / (1024 * 1024) # MB
|
||||
memory_percent = (current_memory / psutil.virtual_memory().total) * 100
|
||||
|
||||
# Format runtime
|
||||
runtime = self.monitor._format_time(time.time() - self.monitor.start_time if self.monitor.start_time else 0)
|
||||
|
||||
# Create the status text
|
||||
status_text = Text()
|
||||
status_text.append(f"Web Crawler Dashboard | Runtime: {runtime} | Memory: {memory_percent:.1f}% {memory_icon}\n")
|
||||
status_text.append(f"Status: {memory_status} | URLs: {summary['urls_completed']}/{summary['urls_total']} | ")
|
||||
status_text.append(f"Peak Mem: {summary['peak_memory_percent']:.1f}% at {self.monitor._format_time(summary['peak_memory_time'])}")
|
||||
|
||||
return Panel(status_text, title="Crawler Status", border_style="blue")
|
||||
|
||||
def _create_pipeline_panel(self) -> Panel:
|
||||
"""Create the pipeline status panel."""
|
||||
summary = self.monitor.get_summary()
|
||||
queue_stats = self.monitor.get_queue_stats()
|
||||
|
||||
# Create a table for status counts
|
||||
table = Table(show_header=True, box=None)
|
||||
table.add_column("Status", style="cyan")
|
||||
table.add_column("Count", justify="right")
|
||||
table.add_column("Percentage", justify="right")
|
||||
table.add_column("Stat", style="cyan")
|
||||
table.add_column("Value", justify="right")
|
||||
|
||||
# Calculate overall progress
|
||||
progress = f"{summary['urls_completed']}/{summary['urls_total']}"
|
||||
progress_percent = f"{summary['completion_percentage']:.1f}%"
|
||||
|
||||
# Add rows for each status
|
||||
table.add_row(
|
||||
"Overall Progress",
|
||||
progress,
|
||||
progress_percent,
|
||||
"Est. Completion",
|
||||
summary.get('estimated_completion_time', "N/A")
|
||||
)
|
||||
|
||||
# Add rows for each status
|
||||
status_counts = summary['status_counts']
|
||||
total = summary['urls_total'] or 1 # Avoid division by zero
|
||||
|
||||
# Status rows
|
||||
table.add_row(
|
||||
"Completed",
|
||||
str(status_counts.get(CrawlStatus.COMPLETED.name, 0)),
|
||||
f"{status_counts.get(CrawlStatus.COMPLETED.name, 0) / total * 100:.1f}%",
|
||||
"Avg. Time/URL",
|
||||
f"{summary.get('avg_task_duration', 0):.2f}s"
|
||||
)
|
||||
|
||||
table.add_row(
|
||||
"Failed",
|
||||
str(status_counts.get(CrawlStatus.FAILED.name, 0)),
|
||||
f"{status_counts.get(CrawlStatus.FAILED.name, 0) / total * 100:.1f}%",
|
||||
"Concurrent Tasks",
|
||||
str(status_counts.get(CrawlStatus.IN_PROGRESS.name, 0))
|
||||
)
|
||||
|
||||
table.add_row(
|
||||
"In Progress",
|
||||
str(status_counts.get(CrawlStatus.IN_PROGRESS.name, 0)),
|
||||
f"{status_counts.get(CrawlStatus.IN_PROGRESS.name, 0) / total * 100:.1f}%",
|
||||
"Queue Size",
|
||||
str(queue_stats['total_queued'])
|
||||
)
|
||||
|
||||
table.add_row(
|
||||
"Queued",
|
||||
str(status_counts.get(CrawlStatus.QUEUED.name, 0)),
|
||||
f"{status_counts.get(CrawlStatus.QUEUED.name, 0) / total * 100:.1f}%",
|
||||
"Max Wait Time",
|
||||
f"{queue_stats['highest_wait_time']:.1f}s"
|
||||
)
|
||||
|
||||
# Requeued is a special case as it's not a status
|
||||
requeued_count = summary.get('requeued_count', 0)
|
||||
table.add_row(
|
||||
"Requeued",
|
||||
str(requeued_count),
|
||||
f"{summary.get('requeue_rate', 0):.1f}%",
|
||||
"Avg Wait Time",
|
||||
f"{queue_stats['avg_wait_time']:.1f}s"
|
||||
)
|
||||
|
||||
# Add empty row for spacing
|
||||
table.add_row(
|
||||
"",
|
||||
"",
|
||||
"",
|
||||
"Requeue Rate",
|
||||
f"{summary.get('requeue_rate', 0):.1f}%"
|
||||
)
|
||||
|
||||
return Panel(table, title="Pipeline Status", border_style="green")
|
||||
|
||||
def _create_task_details_panel(self) -> Panel:
|
||||
"""Create the task details panel."""
|
||||
# Create a table for task details
|
||||
table = Table(show_header=True, expand=True)
|
||||
table.add_column("Task ID", style="cyan", no_wrap=True, width=10)
|
||||
table.add_column("URL", style="blue", ratio=3)
|
||||
table.add_column("Status", style="green", width=15)
|
||||
table.add_column("Memory", justify="right", width=8)
|
||||
table.add_column("Peak", justify="right", width=8)
|
||||
table.add_column("Duration", justify="right", width=10)
|
||||
|
||||
# Get all task stats
|
||||
task_stats = self.monitor.get_all_task_stats()
|
||||
|
||||
# Add summary row
|
||||
active_tasks = sum(1 for stats in task_stats.values()
|
||||
if stats['status'] == CrawlStatus.IN_PROGRESS.name)
|
||||
|
||||
total_memory = sum(stats['memory_usage'] for stats in task_stats.values())
|
||||
total_peak = sum(stats['peak_memory'] for stats in task_stats.values())
|
||||
|
||||
# Summary row with separators
|
||||
table.add_row(
|
||||
"SUMMARY",
|
||||
f"Total: {len(task_stats)}",
|
||||
f"Active: {active_tasks}",
|
||||
f"{total_memory:.1f}",
|
||||
f"{total_peak:.1f}",
|
||||
"N/A"
|
||||
)
|
||||
|
||||
# Add a separator
|
||||
table.add_row("—" * 10, "—" * 20, "—" * 10, "—" * 8, "—" * 8, "—" * 10)
|
||||
|
||||
# Status icons
|
||||
status_icons = {
|
||||
CrawlStatus.QUEUED.name: "⏳",
|
||||
CrawlStatus.IN_PROGRESS.name: "🔄",
|
||||
CrawlStatus.COMPLETED.name: "✅",
|
||||
CrawlStatus.FAILED.name: "❌"
|
||||
}
|
||||
|
||||
# Calculate how many rows we can display based on available space
|
||||
# We can display more rows now that we have a dedicated panel
|
||||
display_count = min(len(task_stats), 20) # Display up to 20 tasks
|
||||
|
||||
# Add rows for each task
|
||||
for task_id, stats in sorted(
|
||||
list(task_stats.items())[:display_count],
|
||||
# Sort: 1. IN_PROGRESS first, 2. QUEUED, 3. COMPLETED/FAILED by recency
|
||||
key=lambda x: (
|
||||
0 if x[1]['status'] == CrawlStatus.IN_PROGRESS.name else
|
||||
1 if x[1]['status'] == CrawlStatus.QUEUED.name else
|
||||
2,
|
||||
-1 * (x[1].get('end_time', 0) or 0) # Most recent first
|
||||
)
|
||||
):
|
||||
# Truncate task_id and URL for display
|
||||
short_id = task_id[:8]
|
||||
url = stats['url']
|
||||
if len(url) > 50: # Allow longer URLs in the dedicated panel
|
||||
url = url[:47] + "..."
|
||||
|
||||
# Format status with icon
|
||||
status = f"{status_icons.get(stats['status'], '?')} {stats['status']}"
|
||||
|
||||
# Add row
|
||||
table.add_row(
|
||||
short_id,
|
||||
url,
|
||||
status,
|
||||
f"{stats['memory_usage']:.1f}",
|
||||
f"{stats['peak_memory']:.1f}",
|
||||
stats['duration'] if 'duration' in stats else "0:00"
|
||||
)
|
||||
|
||||
return Panel(table, title="Task Details", border_style="yellow")
|
||||
|
||||
def _create_footer(self) -> Panel:
|
||||
"""Create the footer panel."""
|
||||
from rich.columns import Columns
|
||||
from rich.align import Align
|
||||
|
||||
memory_status = self.monitor.get_memory_status()
|
||||
memory_icon = "🟢" # Default NORMAL
|
||||
if memory_status == "PRESSURE":
|
||||
memory_icon = "🟠"
|
||||
elif memory_status == "CRITICAL":
|
||||
memory_icon = "🔴"
|
||||
|
||||
# Left section - memory status
|
||||
left_text = Text()
|
||||
left_text.append("Memory Status: ", style="bold")
|
||||
status_style = "green" if memory_status == "NORMAL" else "yellow" if memory_status == "PRESSURE" else "red bold"
|
||||
left_text.append(f"{memory_icon} {memory_status}", style=status_style)
|
||||
|
||||
# Center section - copyright
|
||||
center_text = Text("© Crawl4AI 2025 | Made by UnclecCode", style="cyan italic")
|
||||
|
||||
# Right section - quit instruction
|
||||
right_text = Text()
|
||||
right_text.append("Press ", style="bold")
|
||||
right_text.append("q", style="white on blue")
|
||||
right_text.append(" to quit", style="bold")
|
||||
|
||||
# Create columns with the three sections
|
||||
footer_content = Columns(
|
||||
[
|
||||
Align.left(left_text),
|
||||
Align.center(center_text),
|
||||
Align.right(right_text)
|
||||
],
|
||||
expand=True
|
||||
)
|
||||
|
||||
# Create a more visible footer panel
|
||||
return Panel(
|
||||
footer_content,
|
||||
border_style="white",
|
||||
padding=(0, 1) # Add padding for better visibility
|
||||
)
|
||||
|
||||
|
||||
class CrawlerMonitor:
|
||||
"""
|
||||
Comprehensive monitoring and visualization system for tracking web crawler operations in real-time.
|
||||
Provides a terminal-based dashboard that displays task statuses, memory usage, queue statistics,
|
||||
and performance metrics.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
urls_total: int = 0,
|
||||
refresh_rate: float = 1.0,
|
||||
enable_ui: bool = True,
|
||||
max_width: int = 120
|
||||
):
|
||||
"""
|
||||
Initialize the CrawlerMonitor.
|
||||
|
||||
Args:
|
||||
urls_total: Total number of URLs to be crawled
|
||||
refresh_rate: How often to refresh the UI (in seconds)
|
||||
enable_ui: Whether to display the terminal UI
|
||||
max_width: Maximum width of the UI in characters
|
||||
"""
|
||||
# Core monitoring attributes
|
||||
self.stats = {} # Task ID -> stats dict
|
||||
self.memory_status = "NORMAL"
|
||||
self.start_time = None
|
||||
self.end_time = None
|
||||
self.is_running = False
|
||||
self.queue_stats = {
|
||||
"total_queued": 0,
|
||||
"highest_wait_time": 0.0,
|
||||
"avg_wait_time": 0.0
|
||||
}
|
||||
self.urls_total = urls_total
|
||||
self.urls_completed = 0
|
||||
self.peak_memory_percent = 0.0
|
||||
self.peak_memory_time = 0.0
|
||||
|
||||
# Status counts
|
||||
self.status_counts = {
|
||||
CrawlStatus.QUEUED.name: 0,
|
||||
CrawlStatus.IN_PROGRESS.name: 0,
|
||||
CrawlStatus.COMPLETED.name: 0,
|
||||
CrawlStatus.FAILED.name: 0
|
||||
}
|
||||
|
||||
# Requeue tracking
|
||||
self.requeued_count = 0
|
||||
|
||||
# Thread-safety
|
||||
self._lock = threading.RLock()
|
||||
|
||||
# Terminal UI
|
||||
self.enable_ui = enable_ui
|
||||
self.terminal_ui = TerminalUI(
|
||||
refresh_rate=refresh_rate,
|
||||
max_width=max_width
|
||||
) if enable_ui else None
|
||||
|
||||
def start(self):
|
||||
"""
|
||||
Start the monitoring session.
|
||||
|
||||
- Initializes the start_time
|
||||
- Sets is_running to True
|
||||
- Starts the terminal UI if enabled
|
||||
"""
|
||||
with self._lock:
|
||||
self.start_time = time.time()
|
||||
self.is_running = True
|
||||
|
||||
# Start the terminal UI
|
||||
if self.enable_ui and self.terminal_ui:
|
||||
self.terminal_ui.start(self)
|
||||
|
||||
def stop(self):
|
||||
"""
|
||||
Stop the monitoring session.
|
||||
|
||||
- Records end_time
|
||||
- Sets is_running to False
|
||||
- Stops the terminal UI
|
||||
- Generates final summary statistics
|
||||
"""
|
||||
with self._lock:
|
||||
self.end_time = time.time()
|
||||
self.is_running = False
|
||||
|
||||
# Stop the terminal UI
|
||||
if self.enable_ui and self.terminal_ui:
|
||||
self.terminal_ui.stop()
|
||||
|
||||
def add_task(self, task_id: str, url: str):
|
||||
"""
|
||||
Register a new task with the monitor.
|
||||
|
||||
Args:
|
||||
task_id: Unique identifier for the task
|
||||
url: URL being crawled
|
||||
|
||||
The task is initialized with:
|
||||
- status: QUEUED
|
||||
- url: The URL to crawl
|
||||
- enqueue_time: Current time
|
||||
- memory_usage: 0
|
||||
- peak_memory: 0
|
||||
- wait_time: 0
|
||||
- retry_count: 0
|
||||
"""
|
||||
with self._lock:
|
||||
self.stats[task_id] = {
|
||||
"task_id": task_id,
|
||||
"url": url,
|
||||
"status": CrawlStatus.QUEUED.name,
|
||||
"enqueue_time": time.time(),
|
||||
"start_time": None,
|
||||
"end_time": None,
|
||||
"memory_usage": 0.0,
|
||||
"peak_memory": 0.0,
|
||||
"error_message": "",
|
||||
"wait_time": 0.0,
|
||||
"retry_count": 0,
|
||||
"duration": "0:00",
|
||||
"counted_requeue": False
|
||||
}
|
||||
|
||||
# Update status counts
|
||||
self.status_counts[CrawlStatus.QUEUED.name] += 1
|
||||
|
||||
def update_task(
|
||||
self,
|
||||
task_id: str,
|
||||
status: Optional[CrawlStatus] = None,
|
||||
start_time: Optional[float] = None,
|
||||
end_time: Optional[float] = None,
|
||||
memory_usage: Optional[float] = None,
|
||||
peak_memory: Optional[float] = None,
|
||||
error_message: Optional[str] = None,
|
||||
retry_count: Optional[int] = None,
|
||||
wait_time: Optional[float] = None
|
||||
):
|
||||
"""
|
||||
Update statistics for a specific task.
|
||||
|
||||
Args:
|
||||
task_id: Unique identifier for the task
|
||||
status: New status (QUEUED, IN_PROGRESS, COMPLETED, FAILED)
|
||||
start_time: When task execution started
|
||||
end_time: When task execution ended
|
||||
memory_usage: Current memory usage in MB
|
||||
peak_memory: Maximum memory usage in MB
|
||||
error_message: Error description if failed
|
||||
retry_count: Number of retry attempts
|
||||
wait_time: Time spent in queue
|
||||
|
||||
Updates task statistics and updates status counts.
|
||||
If status changes, decrements old status count and
|
||||
increments new status count.
|
||||
"""
|
||||
with self._lock:
|
||||
# Check if task exists
|
||||
if task_id not in self.stats:
|
||||
return
|
||||
|
||||
task_stats = self.stats[task_id]
|
||||
|
||||
# Update status counts if status is changing
|
||||
old_status = task_stats["status"]
|
||||
if status and status.name != old_status:
|
||||
self.status_counts[old_status] -= 1
|
||||
self.status_counts[status.name] += 1
|
||||
|
||||
# Track completion
|
||||
if status == CrawlStatus.COMPLETED:
|
||||
self.urls_completed += 1
|
||||
|
||||
# Track requeues
|
||||
if old_status in [CrawlStatus.COMPLETED.name, CrawlStatus.FAILED.name] and not task_stats.get("counted_requeue", False):
|
||||
self.requeued_count += 1
|
||||
task_stats["counted_requeue"] = True
|
||||
|
||||
# Update task statistics
|
||||
if status:
|
||||
task_stats["status"] = status.name
|
||||
if start_time is not None:
|
||||
task_stats["start_time"] = start_time
|
||||
if end_time is not None:
|
||||
task_stats["end_time"] = end_time
|
||||
if memory_usage is not None:
|
||||
task_stats["memory_usage"] = memory_usage
|
||||
|
||||
# Update peak memory if necessary
|
||||
current_percent = (memory_usage / psutil.virtual_memory().total) * 100
|
||||
if current_percent > self.peak_memory_percent:
|
||||
self.peak_memory_percent = current_percent
|
||||
self.peak_memory_time = time.time()
|
||||
|
||||
if peak_memory is not None:
|
||||
task_stats["peak_memory"] = peak_memory
|
||||
if error_message is not None:
|
||||
task_stats["error_message"] = error_message
|
||||
if retry_count is not None:
|
||||
task_stats["retry_count"] = retry_count
|
||||
if wait_time is not None:
|
||||
task_stats["wait_time"] = wait_time
|
||||
|
||||
# Calculate duration
|
||||
if task_stats["start_time"]:
|
||||
end = task_stats["end_time"] or time.time()
|
||||
duration = end - task_stats["start_time"]
|
||||
task_stats["duration"] = self._format_time(duration)
|
||||
|
||||
def update_memory_status(self, status: str):
|
||||
"""
|
||||
Update the current memory status.
|
||||
|
||||
Args:
|
||||
status: Memory status (NORMAL, PRESSURE, CRITICAL, or custom)
|
||||
|
||||
Also updates the UI to reflect the new status.
|
||||
"""
|
||||
with self._lock:
|
||||
self.memory_status = status
|
||||
|
||||
def update_queue_statistics(
|
||||
self,
|
||||
total_queued: int,
|
||||
highest_wait_time: float,
|
||||
avg_wait_time: float
|
||||
):
|
||||
"""
|
||||
Update statistics related to the task queue.
|
||||
|
||||
Args:
|
||||
total_queued: Number of tasks currently in queue
|
||||
highest_wait_time: Longest wait time of any queued task
|
||||
avg_wait_time: Average wait time across all queued tasks
|
||||
"""
|
||||
with self._lock:
|
||||
self.queue_stats = {
|
||||
"total_queued": total_queued,
|
||||
"highest_wait_time": highest_wait_time,
|
||||
"avg_wait_time": avg_wait_time
|
||||
}
|
||||
|
||||
def get_task_stats(self, task_id: str) -> Dict:
|
||||
"""
|
||||
Get statistics for a specific task.
|
||||
|
||||
Args:
|
||||
task_id: Unique identifier for the task
|
||||
|
||||
Returns:
|
||||
Dictionary containing all task statistics
|
||||
"""
|
||||
with self._lock:
|
||||
return self.stats.get(task_id, {}).copy()
|
||||
|
||||
def get_all_task_stats(self) -> Dict[str, Dict]:
|
||||
"""
|
||||
Get statistics for all tasks.
|
||||
|
||||
Returns:
|
||||
Dictionary mapping task_ids to their statistics
|
||||
"""
|
||||
with self._lock:
|
||||
return self.stats.copy()
|
||||
|
||||
def get_memory_status(self) -> str:
|
||||
"""
|
||||
Get the current memory status.
|
||||
|
||||
Returns:
|
||||
Current memory status string
|
||||
"""
|
||||
with self._lock:
|
||||
return self.memory_status
|
||||
|
||||
def get_queue_stats(self) -> Dict:
|
||||
"""
|
||||
Get current queue statistics.
|
||||
|
||||
Returns:
|
||||
Dictionary with queue statistics including:
|
||||
- total_queued: Number of tasks in queue
|
||||
- highest_wait_time: Longest wait time
|
||||
- avg_wait_time: Average wait time
|
||||
"""
|
||||
with self._lock:
|
||||
return self.queue_stats.copy()
|
||||
|
||||
def get_summary(self) -> Dict:
|
||||
"""
|
||||
Get a summary of all crawler statistics.
|
||||
|
||||
Returns:
|
||||
Dictionary containing:
|
||||
- runtime: Total runtime in seconds
|
||||
- urls_total: Total URLs to process
|
||||
- urls_completed: Number of completed URLs
|
||||
- completion_percentage: Percentage complete
|
||||
- status_counts: Count of tasks in each status
|
||||
- memory_status: Current memory status
|
||||
- peak_memory_percent: Highest memory usage
|
||||
- peak_memory_time: When peak memory occurred
|
||||
- avg_task_duration: Average task processing time
|
||||
- estimated_completion_time: Projected finish time
|
||||
- requeue_rate: Percentage of tasks requeued
|
||||
"""
|
||||
with self._lock:
|
||||
# Calculate runtime
|
||||
current_time = time.time()
|
||||
runtime = current_time - (self.start_time or current_time)
|
||||
|
||||
# Calculate completion percentage
|
||||
completion_percentage = 0
|
||||
if self.urls_total > 0:
|
||||
completion_percentage = (self.urls_completed / self.urls_total) * 100
|
||||
|
||||
# Calculate average task duration for completed tasks
|
||||
completed_tasks = [
|
||||
task for task in self.stats.values()
|
||||
if task["status"] == CrawlStatus.COMPLETED.name and task.get("start_time") and task.get("end_time")
|
||||
]
|
||||
|
||||
avg_task_duration = 0
|
||||
if completed_tasks:
|
||||
total_duration = sum(task["end_time"] - task["start_time"] for task in completed_tasks)
|
||||
avg_task_duration = total_duration / len(completed_tasks)
|
||||
|
||||
# Calculate requeue rate
|
||||
requeue_rate = 0
|
||||
if len(self.stats) > 0:
|
||||
requeue_rate = (self.requeued_count / len(self.stats)) * 100
|
||||
|
||||
# Calculate estimated completion time
|
||||
estimated_completion_time = "N/A"
|
||||
if avg_task_duration > 0 and self.urls_total > 0 and self.urls_completed > 0:
|
||||
remaining_tasks = self.urls_total - self.urls_completed
|
||||
estimated_seconds = remaining_tasks * avg_task_duration
|
||||
estimated_completion_time = self._format_time(estimated_seconds)
|
||||
|
||||
return {
|
||||
"runtime": runtime,
|
||||
"urls_total": self.urls_total,
|
||||
"urls_completed": self.urls_completed,
|
||||
"completion_percentage": completion_percentage,
|
||||
"status_counts": self.status_counts.copy(),
|
||||
"memory_status": self.memory_status,
|
||||
"peak_memory_percent": self.peak_memory_percent,
|
||||
"peak_memory_time": self.peak_memory_time,
|
||||
"avg_task_duration": avg_task_duration,
|
||||
"estimated_completion_time": estimated_completion_time,
|
||||
"requeue_rate": requeue_rate,
|
||||
"requeued_count": self.requeued_count
|
||||
}
|
||||
|
||||
def render(self):
|
||||
"""
|
||||
Render the terminal UI.
|
||||
|
||||
This is the main UI rendering loop that:
|
||||
1. Updates all statistics
|
||||
2. Formats the display
|
||||
3. Renders the ASCII interface
|
||||
4. Handles keyboard input
|
||||
|
||||
Note: The actual rendering is handled by the TerminalUI class
|
||||
which uses the rich library's Live display.
|
||||
"""
|
||||
if self.enable_ui and self.terminal_ui:
|
||||
# Force an update of the UI
|
||||
if hasattr(self.terminal_ui, '_update_display'):
|
||||
self.terminal_ui._update_display()
|
||||
|
||||
def _format_time(self, seconds: float) -> str:
|
||||
"""
|
||||
Format time in hours:minutes:seconds.
|
||||
|
||||
Args:
|
||||
seconds: Time in seconds
|
||||
|
||||
Returns:
|
||||
Formatted time string (e.g., "1:23:45")
|
||||
"""
|
||||
delta = timedelta(seconds=int(seconds))
|
||||
hours, remainder = divmod(delta.seconds, 3600)
|
||||
minutes, seconds = divmod(remainder, 60)
|
||||
|
||||
if hours > 0:
|
||||
return f"{hours}:{minutes:02}:{seconds:02}"
|
||||
else:
|
||||
return f"{minutes}:{seconds:02}"
|
||||
|
||||
def _calculate_estimated_completion(self) -> str:
|
||||
"""
|
||||
Calculate estimated completion time based on current progress.
|
||||
|
||||
Returns:
|
||||
Formatted time string
|
||||
"""
|
||||
summary = self.get_summary()
|
||||
return summary.get("estimated_completion_time", "N/A")
|
||||
|
||||
|
||||
# Example code for testing
|
||||
if __name__ == "__main__":
|
||||
# Initialize the monitor
|
||||
monitor = CrawlerMonitor(urls_total=100)
|
||||
|
||||
# Start monitoring
|
||||
monitor.start()
|
||||
|
||||
try:
|
||||
# Simulate some tasks
|
||||
for i in range(20):
|
||||
task_id = str(uuid.uuid4())
|
||||
url = f"https://example.com/page{i}"
|
||||
monitor.add_task(task_id, url)
|
||||
|
||||
# Simulate 20% of tasks are already running
|
||||
if i < 4:
|
||||
monitor.update_task(
|
||||
task_id=task_id,
|
||||
status=CrawlStatus.IN_PROGRESS,
|
||||
start_time=time.time() - 30, # Started 30 seconds ago
|
||||
memory_usage=10.5
|
||||
)
|
||||
|
||||
# Simulate 10% of tasks are completed
|
||||
if i >= 4 and i < 6:
|
||||
start_time = time.time() - 60
|
||||
end_time = time.time() - 15
|
||||
monitor.update_task(
|
||||
task_id=task_id,
|
||||
status=CrawlStatus.IN_PROGRESS,
|
||||
start_time=start_time,
|
||||
memory_usage=8.2
|
||||
)
|
||||
monitor.update_task(
|
||||
task_id=task_id,
|
||||
status=CrawlStatus.COMPLETED,
|
||||
end_time=end_time,
|
||||
memory_usage=0,
|
||||
peak_memory=15.7
|
||||
)
|
||||
|
||||
# Simulate 5% of tasks fail
|
||||
if i >= 6 and i < 7:
|
||||
start_time = time.time() - 45
|
||||
end_time = time.time() - 20
|
||||
monitor.update_task(
|
||||
task_id=task_id,
|
||||
status=CrawlStatus.IN_PROGRESS,
|
||||
start_time=start_time,
|
||||
memory_usage=12.3
|
||||
)
|
||||
monitor.update_task(
|
||||
task_id=task_id,
|
||||
status=CrawlStatus.FAILED,
|
||||
end_time=end_time,
|
||||
memory_usage=0,
|
||||
peak_memory=18.2,
|
||||
error_message="Connection timeout"
|
||||
)
|
||||
|
||||
# Simulate memory pressure
|
||||
monitor.update_memory_status("PRESSURE")
|
||||
|
||||
# Simulate queue statistics
|
||||
monitor.update_queue_statistics(
|
||||
total_queued=16, # 20 - 4 (in progress)
|
||||
highest_wait_time=120.5,
|
||||
avg_wait_time=60.2
|
||||
)
|
||||
|
||||
# Keep the monitor running for a demonstration
|
||||
print("Crawler Monitor is running. Press 'q' to exit.")
|
||||
while monitor.is_running:
|
||||
time.sleep(0.1)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
print("\nExiting crawler monitor...")
|
||||
finally:
|
||||
# Stop the monitor
|
||||
monitor.stop()
|
||||
print("Crawler monitor exited successfully.")
|
||||
@@ -4,24 +4,143 @@ from dotenv import load_dotenv
|
||||
load_dotenv() # Load environment variables from .env file
|
||||
|
||||
# Default provider, ONLY used when the extraction strategy is LLMExtractionStrategy
|
||||
DEFAULT_PROVIDER = "openai/gpt-4-turbo"
|
||||
DEFAULT_PROVIDER = "openai/gpt-4o"
|
||||
DEFAULT_PROVIDER_API_KEY = "OPENAI_API_KEY"
|
||||
MODEL_REPO_BRANCH = "new-release-0.0.2"
|
||||
# Provider-model dictionary, ONLY used when the extraction strategy is LLMExtractionStrategy
|
||||
PROVIDER_MODELS = {
|
||||
"ollama/llama3": "no-token-needed", # Any model from Ollama no need for API token
|
||||
"ollama/llama3": "no-token-needed", # Any model from Ollama no need for API token
|
||||
"groq/llama3-70b-8192": os.getenv("GROQ_API_KEY"),
|
||||
"groq/llama3-8b-8192": os.getenv("GROQ_API_KEY"),
|
||||
"openai/gpt-3.5-turbo": os.getenv("OPENAI_API_KEY"),
|
||||
"openai/gpt-4-turbo": os.getenv("OPENAI_API_KEY"),
|
||||
"openai/gpt-4o-mini": os.getenv("OPENAI_API_KEY"),
|
||||
"openai/gpt-4o": os.getenv("OPENAI_API_KEY"),
|
||||
"openai/o1-mini": os.getenv("OPENAI_API_KEY"),
|
||||
"openai/o1-preview": os.getenv("OPENAI_API_KEY"),
|
||||
"openai/o3-mini": os.getenv("OPENAI_API_KEY"),
|
||||
"openai/o3-mini-high": os.getenv("OPENAI_API_KEY"),
|
||||
"anthropic/claude-3-haiku-20240307": os.getenv("ANTHROPIC_API_KEY"),
|
||||
"anthropic/claude-3-opus-20240229": os.getenv("ANTHROPIC_API_KEY"),
|
||||
"anthropic/claude-3-sonnet-20240229": os.getenv("ANTHROPIC_API_KEY"),
|
||||
"anthropic/claude-3-5-sonnet-20240620": os.getenv("ANTHROPIC_API_KEY"),
|
||||
"gemini/gemini-pro": os.getenv("GEMINI_API_KEY"),
|
||||
'gemini/gemini-1.5-pro': os.getenv("GEMINI_API_KEY"),
|
||||
'gemini/gemini-2.0-flash': os.getenv("GEMINI_API_KEY"),
|
||||
'gemini/gemini-2.0-flash-exp': os.getenv("GEMINI_API_KEY"),
|
||||
'gemini/gemini-2.0-flash-lite-preview-02-05': os.getenv("GEMINI_API_KEY"),
|
||||
"deepseek/deepseek-chat": os.getenv("DEEPSEEK_API_KEY"),
|
||||
}
|
||||
PROVIDER_MODELS_PREFIXES = {
|
||||
"ollama": "no-token-needed", # Any model from Ollama no need for API token
|
||||
"groq": os.getenv("GROQ_API_KEY"),
|
||||
"openai": os.getenv("OPENAI_API_KEY"),
|
||||
"anthropic": os.getenv("ANTHROPIC_API_KEY"),
|
||||
"gemini": os.getenv("GEMINI_API_KEY"),
|
||||
"deepseek": os.getenv("DEEPSEEK_API_KEY"),
|
||||
}
|
||||
|
||||
|
||||
# Chunk token threshold
|
||||
CHUNK_TOKEN_THRESHOLD = 1000
|
||||
CHUNK_TOKEN_THRESHOLD = 2**11 # 2048 tokens
|
||||
OVERLAP_RATE = 0.1
|
||||
WORD_TOKEN_RATE = 1.3
|
||||
|
||||
# Threshold for the minimum number of word in a HTML tag to be considered
|
||||
MIN_WORD_THRESHOLD = 5
|
||||
# Threshold for the minimum number of word in a HTML tag to be considered
|
||||
MIN_WORD_THRESHOLD = 1
|
||||
IMAGE_DESCRIPTION_MIN_WORD_THRESHOLD = 1
|
||||
|
||||
IMPORTANT_ATTRS = ["src", "href", "alt", "title", "width", "height"]
|
||||
ONLY_TEXT_ELIGIBLE_TAGS = [
|
||||
"b",
|
||||
"i",
|
||||
"u",
|
||||
"span",
|
||||
"del",
|
||||
"ins",
|
||||
"sub",
|
||||
"sup",
|
||||
"strong",
|
||||
"em",
|
||||
"code",
|
||||
"kbd",
|
||||
"var",
|
||||
"s",
|
||||
"q",
|
||||
"abbr",
|
||||
"cite",
|
||||
"dfn",
|
||||
"time",
|
||||
"small",
|
||||
"mark",
|
||||
]
|
||||
SOCIAL_MEDIA_DOMAINS = [
|
||||
"facebook.com",
|
||||
"twitter.com",
|
||||
"x.com",
|
||||
"linkedin.com",
|
||||
"instagram.com",
|
||||
"pinterest.com",
|
||||
"tiktok.com",
|
||||
"snapchat.com",
|
||||
"reddit.com",
|
||||
]
|
||||
|
||||
# Threshold for the Image extraction - Range is 1 to 6
|
||||
# Images are scored based on point based system, to filter based on usefulness. Points are assigned
|
||||
# to each image based on the following aspects.
|
||||
# If either height or width exceeds 150px
|
||||
# If image size is greater than 10Kb
|
||||
# If alt property is set
|
||||
# If image format is in jpg, png or webp
|
||||
# If image is in the first half of the total images extracted from the page
|
||||
IMAGE_SCORE_THRESHOLD = 2
|
||||
|
||||
MAX_METRICS_HISTORY = 1000
|
||||
|
||||
NEED_MIGRATION = True
|
||||
URL_LOG_SHORTEN_LENGTH = 30
|
||||
SHOW_DEPRECATION_WARNINGS = True
|
||||
SCREENSHOT_HEIGHT_TRESHOLD = 10000
|
||||
PAGE_TIMEOUT = 60000
|
||||
DOWNLOAD_PAGE_TIMEOUT = 60000
|
||||
|
||||
# Global user settings with descriptions and default values
|
||||
USER_SETTINGS = {
|
||||
"DEFAULT_LLM_PROVIDER": {
|
||||
"default": "openai/gpt-4o",
|
||||
"description": "Default LLM provider in 'company/model' format (e.g., 'openai/gpt-4o', 'anthropic/claude-3-sonnet')",
|
||||
"type": "string"
|
||||
},
|
||||
"DEFAULT_LLM_PROVIDER_TOKEN": {
|
||||
"default": "",
|
||||
"description": "API token for the default LLM provider",
|
||||
"type": "string",
|
||||
"secret": True
|
||||
},
|
||||
"VERBOSE": {
|
||||
"default": False,
|
||||
"description": "Enable verbose output for all commands",
|
||||
"type": "boolean"
|
||||
},
|
||||
"BROWSER_HEADLESS": {
|
||||
"default": True,
|
||||
"description": "Run browser in headless mode by default",
|
||||
"type": "boolean"
|
||||
},
|
||||
"BROWSER_TYPE": {
|
||||
"default": "chromium",
|
||||
"description": "Default browser type (chromium or firefox)",
|
||||
"type": "string",
|
||||
"options": ["chromium", "firefox"]
|
||||
},
|
||||
"CACHE_MODE": {
|
||||
"default": "bypass",
|
||||
"description": "Default cache mode (bypass, use, or refresh)",
|
||||
"type": "string",
|
||||
"options": ["bypass", "use", "refresh"]
|
||||
},
|
||||
"USER_AGENT_MODE": {
|
||||
"default": "default",
|
||||
"description": "Default user agent mode (default, random, or mobile)",
|
||||
"type": "string",
|
||||
"options": ["default", "random", "mobile"]
|
||||
}
|
||||
}
|
||||
|
||||
1079
crawl4ai/content_filter_strategy.py
Normal file
1079
crawl4ai/content_filter_strategy.py
Normal file
File diff suppressed because it is too large
Load Diff
1673
crawl4ai/content_scraping_strategy.py
Normal file
1673
crawl4ai/content_scraping_strategy.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,92 +0,0 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from selenium import webdriver
|
||||
from selenium.webdriver.chrome.service import Service
|
||||
from selenium.webdriver.common.by import By
|
||||
from selenium.webdriver.support.ui import WebDriverWait
|
||||
from selenium.webdriver.support import expected_conditions as EC
|
||||
from selenium.webdriver.chrome.options import Options
|
||||
from selenium.common.exceptions import InvalidArgumentException
|
||||
|
||||
from typing import List
|
||||
import requests
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
class CrawlerStrategy(ABC):
|
||||
@abstractmethod
|
||||
def crawl(self, url: str, **kwargs) -> str:
|
||||
pass
|
||||
|
||||
class CloudCrawlerStrategy(CrawlerStrategy):
|
||||
def __init__(self, use_cached_html = False):
|
||||
super().__init__()
|
||||
self.use_cached_html = use_cached_html
|
||||
|
||||
def crawl(self, url: str) -> str:
|
||||
data = {
|
||||
"urls": [url],
|
||||
"include_raw_html": True,
|
||||
"forced": True,
|
||||
"extract_blocks": False,
|
||||
}
|
||||
|
||||
response = requests.post("http://crawl4ai.uccode.io/crawl", json=data)
|
||||
response = response.json()
|
||||
html = response["results"][0]["html"]
|
||||
return html
|
||||
|
||||
class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
|
||||
def __init__(self, use_cached_html=False, js_code=None):
|
||||
super().__init__()
|
||||
print("[LOG] 🚀 Initializing LocalSeleniumCrawlerStrategy")
|
||||
self.options = Options()
|
||||
self.options.headless = True
|
||||
self.options.add_argument("--no-sandbox")
|
||||
self.options.add_argument("--disable-dev-shm-usage")
|
||||
self.options.add_argument("--disable-gpu")
|
||||
self.options.add_argument("--disable-extensions")
|
||||
self.options.add_argument("--headless")
|
||||
self.use_cached_html = use_cached_html
|
||||
self.js_code = js_code
|
||||
|
||||
# chromedriver_autoinstaller.install()
|
||||
import chromedriver_autoinstaller
|
||||
self.service = Service(chromedriver_autoinstaller.install())
|
||||
self.driver = webdriver.Chrome(service=self.service, options=self.options)
|
||||
|
||||
def crawl(self, url: str) -> str:
|
||||
if self.use_cached_html:
|
||||
cache_file_path = os.path.join(Path.home(), ".crawl4ai", "cache", url.replace("/", "_"))
|
||||
if os.path.exists(cache_file_path):
|
||||
with open(cache_file_path, "r") as f:
|
||||
return f.read()
|
||||
|
||||
try:
|
||||
self.driver.get(url)
|
||||
WebDriverWait(self.driver, 10).until(
|
||||
EC.presence_of_all_elements_located((By.TAG_NAME, "html"))
|
||||
)
|
||||
|
||||
# Execute JS code if provided
|
||||
if self.js_code:
|
||||
self.driver.execute_script(self.js_code)
|
||||
# Optionally, wait for some condition after executing the JS code
|
||||
WebDriverWait(self.driver, 10).until(
|
||||
lambda driver: driver.execute_script("return document.readyState") == "complete"
|
||||
)
|
||||
|
||||
html = self.driver.page_source
|
||||
|
||||
# Store in cache
|
||||
cache_file_path = os.path.join(Path.home(), ".crawl4ai", "cache", url.replace("/", "_"))
|
||||
with open(cache_file_path, "w") as f:
|
||||
f.write(html)
|
||||
|
||||
return html
|
||||
except InvalidArgumentException:
|
||||
raise InvalidArgumentException(f"Invalid URL {url}")
|
||||
except Exception as e:
|
||||
raise Exception(f"Failed to crawl {url}: {str(e)}")
|
||||
|
||||
def quit(self):
|
||||
self.driver.quit()
|
||||
0
crawl4ai/crawlers/__init__.py
Normal file
0
crawl4ai/crawlers/__init__.py
Normal file
0
crawl4ai/crawlers/amazon_product/__init__.py
Normal file
0
crawl4ai/crawlers/amazon_product/__init__.py
Normal file
20
crawl4ai/crawlers/amazon_product/crawler.py
Normal file
20
crawl4ai/crawlers/amazon_product/crawler.py
Normal file
@@ -0,0 +1,20 @@
|
||||
from crawl4ai.hub import BaseCrawler
|
||||
|
||||
__meta__ = {
|
||||
"version": "1.2.0",
|
||||
"tested_on": ["amazon.com"],
|
||||
"rate_limit": "50 RPM",
|
||||
"schema": {"product": ["name", "price"]}
|
||||
}
|
||||
|
||||
class AmazonProductCrawler(BaseCrawler):
|
||||
async def run(self, url: str, **kwargs) -> str:
|
||||
try:
|
||||
self.logger.info(f"Crawling {url}")
|
||||
return '{"product": {"name": "Test Amazon Product"}}'
|
||||
except Exception as e:
|
||||
self.logger.error(f"Crawl failed: {str(e)}")
|
||||
return json.dumps({
|
||||
"error": str(e),
|
||||
"metadata": self.meta # Include meta in error response
|
||||
})
|
||||
0
crawl4ai/crawlers/google_search/__init__.py
Normal file
0
crawl4ai/crawlers/google_search/__init__.py
Normal file
131
crawl4ai/crawlers/google_search/crawler.py
Normal file
131
crawl4ai/crawlers/google_search/crawler.py
Normal file
@@ -0,0 +1,131 @@
|
||||
from crawl4ai import BrowserConfig, AsyncWebCrawler, CrawlerRunConfig, CacheMode
|
||||
from crawl4ai.hub import BaseCrawler
|
||||
from crawl4ai.utils import optimize_html, get_home_folder, preprocess_html_for_schema
|
||||
from crawl4ai.extraction_strategy import JsonCssExtractionStrategy
|
||||
from pathlib import Path
|
||||
import json
|
||||
import os
|
||||
from typing import Dict
|
||||
|
||||
|
||||
class GoogleSearchCrawler(BaseCrawler):
|
||||
__meta__ = {
|
||||
"version": "1.0.0",
|
||||
"tested_on": ["google.com/search*"],
|
||||
"rate_limit": "10 RPM",
|
||||
"description": "Crawls Google Search results (text + images)",
|
||||
}
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.js_script = (Path(__file__).parent /
|
||||
"script.js").read_text()
|
||||
|
||||
async def run(self, url="", query: str = "", search_type: str = "text", schema_cache_path = None, **kwargs) -> str:
|
||||
"""Crawl Google Search results for a query"""
|
||||
url = f"https://www.google.com/search?q={query}&gl=sg&hl=en" if search_type == "text" else f"https://www.google.com/search?q={query}&gl=sg&hl=en&tbs=qdr:d&udm=2"
|
||||
if kwargs.get("page_start", 1) > 1:
|
||||
url = f"{url}&start={kwargs['page_start'] * 10}"
|
||||
if kwargs.get("page_length", 1) > 1:
|
||||
url = f"{url}&num={kwargs['page_length']}"
|
||||
|
||||
browser_config = BrowserConfig(headless=True, verbose=True)
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
config = CrawlerRunConfig(
|
||||
cache_mode=kwargs.get("cache_mode", CacheMode.BYPASS),
|
||||
keep_attrs=["id", "class"],
|
||||
keep_data_attributes=True,
|
||||
delay_before_return_html=kwargs.get(
|
||||
"delay", 2 if search_type == "image" else 1),
|
||||
js_code=self.js_script if search_type == "image" else None,
|
||||
)
|
||||
|
||||
result = await crawler.arun(url=url, config=config)
|
||||
if not result.success:
|
||||
return json.dumps({"error": result.error})
|
||||
|
||||
if search_type == "image":
|
||||
if result.js_execution_result.get("success", False) is False:
|
||||
return json.dumps({"error": result.js_execution_result.get("error", "Unknown error")})
|
||||
if "results" in result.js_execution_result:
|
||||
image_result = result.js_execution_result['results'][0]
|
||||
if image_result.get("success", False) is False:
|
||||
return json.dumps({"error": image_result.get("error", "Unknown error")})
|
||||
return json.dumps(image_result["result"], indent=4)
|
||||
|
||||
# For text search, extract structured data
|
||||
schemas = await self._build_schemas(result.cleaned_html, schema_cache_path)
|
||||
extracted = {
|
||||
key: JsonCssExtractionStrategy(schema=schemas[key]).run(
|
||||
url=url, sections=[result.html]
|
||||
)
|
||||
for key in schemas
|
||||
}
|
||||
return json.dumps(extracted, indent=4)
|
||||
|
||||
async def _build_schemas(self, html: str, schema_cache_path: str = None) -> Dict[str, Dict]:
|
||||
"""Build extraction schemas (organic, top stories, etc.)"""
|
||||
home_dir = get_home_folder() if not schema_cache_path else schema_cache_path
|
||||
os.makedirs(f"{home_dir}/schema", exist_ok=True)
|
||||
|
||||
# cleaned_html = optimize_html(html, threshold=100)
|
||||
cleaned_html = preprocess_html_for_schema(html)
|
||||
|
||||
organic_schema = None
|
||||
if os.path.exists(f"{home_dir}/schema/organic_schema.json"):
|
||||
with open(f"{home_dir}/schema/organic_schema.json", "r") as f:
|
||||
organic_schema = json.load(f)
|
||||
else:
|
||||
organic_schema = JsonCssExtractionStrategy.generate_schema(
|
||||
html=cleaned_html,
|
||||
target_json_example="""{
|
||||
"title": "...",
|
||||
"link": "...",
|
||||
"snippet": "...",
|
||||
"date": "1 hour ago",
|
||||
}""",
|
||||
query="""The given html is the crawled html from Google search result. Please find the schema for organic search item in the given html, I am interested in title, link, snippet text. date."""
|
||||
)
|
||||
|
||||
with open(f"{home_dir}/schema/organic_schema.json", "w") as f:
|
||||
f.write(json.dumps(organic_schema))
|
||||
|
||||
top_stories_schema = None
|
||||
if os.path.exists(f"{home_dir}/schema/top_stories_schema.json"):
|
||||
with open(f"{home_dir}/schema/top_stories_schema.json", "r") as f:
|
||||
top_stories_schema = json.load(f)
|
||||
else:
|
||||
top_stories_schema = JsonCssExtractionStrategy.generate_schema(
|
||||
html=cleaned_html,
|
||||
target_json_example="""{
|
||||
"title": "...",
|
||||
"link": "...",
|
||||
"source": "Insider Monkey",
|
||||
"date": "1 hour ago",
|
||||
}""",
|
||||
query="""The given html is the crawled html from Google search result. Please find the schema for Top Story item int he given html, I am interested in title, link, source. date and imageUrl."""
|
||||
)
|
||||
|
||||
with open(f"{home_dir}/schema/top_stories_schema.json", "w") as f:
|
||||
f.write(json.dumps(top_stories_schema))
|
||||
|
||||
suggested_query_schema = None
|
||||
if os.path.exists(f"{home_dir}/schema/suggested_query_schema.json"):
|
||||
with open(f"{home_dir}/schema/suggested_query_schema.json", "r") as f:
|
||||
suggested_query_schema = json.load(f)
|
||||
else:
|
||||
suggested_query_schema = JsonCssExtractionStrategy.generate_schema(
|
||||
html=cleaned_html,
|
||||
target_json_example="""{
|
||||
"query": "A for Apple",
|
||||
}""",
|
||||
query="""The given HTML contains the crawled HTML from Google search results. Please find the schema for each suggested query in the section "People also search for" within the given HTML. I am interested in the queries only."""
|
||||
)
|
||||
with open(f"{home_dir}/schema/suggested_query_schema.json", "w") as f:
|
||||
f.write(json.dumps(suggested_query_schema))
|
||||
|
||||
return {
|
||||
"organic_schema": organic_schema,
|
||||
"top_stories_schema": top_stories_schema,
|
||||
"suggested_query_schema": suggested_query_schema,
|
||||
}
|
||||
115
crawl4ai/crawlers/google_search/script.js
Normal file
115
crawl4ai/crawlers/google_search/script.js
Normal file
@@ -0,0 +1,115 @@
|
||||
(() => {
|
||||
// Function to extract image data from Google Images page
|
||||
function extractImageData() {
|
||||
const keys = Object.keys(window.W_jd);
|
||||
let allImageData = [];
|
||||
let currentPosition = 0;
|
||||
|
||||
// Get the symbol we'll use (from first valid entry)
|
||||
let targetSymbol;
|
||||
for (let key of keys) {
|
||||
try {
|
||||
const symbols = Object.getOwnPropertySymbols(window.W_jd[key]);
|
||||
if (symbols.length > 0) {
|
||||
targetSymbol = symbols[0];
|
||||
break;
|
||||
}
|
||||
} catch (e) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (!targetSymbol) return [];
|
||||
|
||||
// Iterate through ALL keys
|
||||
for (let key of keys) {
|
||||
try {
|
||||
const o1 = window.W_jd[key][targetSymbol]
|
||||
if (!o1) continue;
|
||||
const data = Object.values(o1)[0]
|
||||
// const data = window.W_jd[key][targetSymbol]?.Ws;
|
||||
// Check if this is a valid image data entry
|
||||
if (data && Array.isArray(data[1])) {
|
||||
const processedData = processImageEntry(data, currentPosition);
|
||||
if (processedData) {
|
||||
allImageData.push(processedData);
|
||||
currentPosition++;
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
return allImageData;
|
||||
}
|
||||
|
||||
function processImageEntry(entry, position) {
|
||||
const imageData = entry[1];
|
||||
if (!Array.isArray(imageData)) return null;
|
||||
|
||||
// Extract the image ID
|
||||
const imageId = imageData[1];
|
||||
if (!imageId) return null;
|
||||
|
||||
// Find the corresponding DOM element
|
||||
const domElement = document.querySelector(`[data-docid="${imageId}"]`);
|
||||
if (!domElement) return null;
|
||||
|
||||
// Extract data from the array structure
|
||||
const [
|
||||
_,
|
||||
id,
|
||||
thumbnailInfo,
|
||||
imageInfo,
|
||||
__,
|
||||
___,
|
||||
rgb,
|
||||
____,
|
||||
_____,
|
||||
metadata
|
||||
] = imageData;
|
||||
|
||||
// Ensure we have the required data
|
||||
if (!thumbnailInfo || !imageInfo) return null;
|
||||
|
||||
// Extract metadata from DOM
|
||||
const title = domElement?.querySelector('.toI8Rb')?.textContent?.trim();
|
||||
const source = domElement?.querySelector('.guK3rf')?.textContent?.trim();
|
||||
const link = domElement?.querySelector('a.EZAeBe')?.href;
|
||||
|
||||
if (!link) return null;
|
||||
|
||||
// Build Google Image URL
|
||||
const googleUrl = buildGoogleImageUrl(imageInfo[0], link, imageId, imageInfo[1], imageInfo[2]);
|
||||
|
||||
return {
|
||||
title,
|
||||
imageUrl: imageInfo[0],
|
||||
imageWidth: imageInfo[2],
|
||||
imageHeight: imageInfo[1],
|
||||
thumbnailUrl: thumbnailInfo[0],
|
||||
thumbnailWidth: thumbnailInfo[2],
|
||||
thumbnailHeight: thumbnailInfo[1],
|
||||
source,
|
||||
domain: metadata['2000']?.[1] || new URL(link).hostname,
|
||||
link,
|
||||
googleUrl,
|
||||
position: position + 1
|
||||
};
|
||||
}
|
||||
|
||||
function buildGoogleImageUrl(imgUrl, refUrl, tbnid, height, width) {
|
||||
const params = new URLSearchParams({
|
||||
imgurl: imgUrl,
|
||||
tbnid: tbnid,
|
||||
imgrefurl: refUrl,
|
||||
docid: tbnid,
|
||||
w: width.toString(),
|
||||
h: height.toString(),
|
||||
});
|
||||
|
||||
return `https://www.google.com/imgres?${params.toString()}`;
|
||||
}
|
||||
return extractImageData();
|
||||
})();
|
||||
@@ -1,98 +0,0 @@
|
||||
import os
|
||||
from pathlib import Path
|
||||
import sqlite3
|
||||
from typing import Optional
|
||||
from typing import Optional, Tuple
|
||||
|
||||
DB_PATH = os.path.join(Path.home(), ".crawl4ai")
|
||||
os.makedirs(DB_PATH, exist_ok=True)
|
||||
DB_PATH = os.path.join(DB_PATH, "crawl4ai.db")
|
||||
|
||||
def init_db():
|
||||
global DB_PATH
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute('''
|
||||
CREATE TABLE IF NOT EXISTS crawled_data (
|
||||
url TEXT PRIMARY KEY,
|
||||
html TEXT,
|
||||
cleaned_html TEXT,
|
||||
markdown TEXT,
|
||||
extracted_content TEXT,
|
||||
success BOOLEAN
|
||||
)
|
||||
''')
|
||||
conn.commit()
|
||||
conn.close()
|
||||
|
||||
def check_db_path():
|
||||
if not DB_PATH:
|
||||
raise ValueError("Database path is not set or is empty.")
|
||||
|
||||
def get_cached_url(url: str) -> Optional[Tuple[str, str, str, str, str, bool]]:
|
||||
check_db_path()
|
||||
try:
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute('SELECT url, html, cleaned_html, markdown, extracted_content, success FROM crawled_data WHERE url = ?', (url,))
|
||||
result = cursor.fetchone()
|
||||
conn.close()
|
||||
return result
|
||||
except Exception as e:
|
||||
print(f"Error retrieving cached URL: {e}")
|
||||
return None
|
||||
|
||||
def cache_url(url: str, html: str, cleaned_html: str, markdown: str, extracted_content: str, success: bool):
|
||||
check_db_path()
|
||||
try:
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute('''
|
||||
INSERT INTO crawled_data (url, html, cleaned_html, markdown, extracted_content, success)
|
||||
VALUES (?, ?, ?, ?, ?, ?)
|
||||
ON CONFLICT(url) DO UPDATE SET
|
||||
html = excluded.html,
|
||||
cleaned_html = excluded.cleaned_html,
|
||||
markdown = excluded.markdown,
|
||||
extracted_content = excluded.extracted_content,
|
||||
success = excluded.success
|
||||
''', (url, html, cleaned_html, markdown, extracted_content, success))
|
||||
conn.commit()
|
||||
conn.close()
|
||||
except Exception as e:
|
||||
print(f"Error caching URL: {e}")
|
||||
|
||||
def get_total_count() -> int:
|
||||
check_db_path()
|
||||
try:
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute('SELECT COUNT(*) FROM crawled_data')
|
||||
result = cursor.fetchone()
|
||||
conn.close()
|
||||
return result[0]
|
||||
except Exception as e:
|
||||
print(f"Error getting total count: {e}")
|
||||
return 0
|
||||
|
||||
def clear_db():
|
||||
check_db_path()
|
||||
try:
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute('DELETE FROM crawled_data')
|
||||
conn.commit()
|
||||
conn.close()
|
||||
except Exception as e:
|
||||
print(f"Error clearing database: {e}")
|
||||
|
||||
def flush_db():
|
||||
check_db_path()
|
||||
try:
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute('DROP TABLE crawled_data')
|
||||
conn.commit()
|
||||
conn.close()
|
||||
except Exception as e:
|
||||
print(f"Error flushing database: {e}")
|
||||
47
crawl4ai/deep_crawling/__init__.py
Normal file
47
crawl4ai/deep_crawling/__init__.py
Normal file
@@ -0,0 +1,47 @@
|
||||
# deep_crawling/__init__.py
|
||||
from .base_strategy import DeepCrawlDecorator, DeepCrawlStrategy
|
||||
from .bfs_strategy import BFSDeepCrawlStrategy
|
||||
from .bff_strategy import BestFirstCrawlingStrategy
|
||||
from .dfs_strategy import DFSDeepCrawlStrategy
|
||||
from .filters import (
|
||||
FilterChain,
|
||||
ContentTypeFilter,
|
||||
DomainFilter,
|
||||
URLFilter,
|
||||
URLPatternFilter,
|
||||
FilterStats,
|
||||
ContentRelevanceFilter,
|
||||
SEOFilter
|
||||
)
|
||||
from .scorers import (
|
||||
KeywordRelevanceScorer,
|
||||
URLScorer,
|
||||
CompositeScorer,
|
||||
DomainAuthorityScorer,
|
||||
FreshnessScorer,
|
||||
PathDepthScorer,
|
||||
ContentTypeScorer
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"DeepCrawlDecorator",
|
||||
"DeepCrawlStrategy",
|
||||
"BFSDeepCrawlStrategy",
|
||||
"BestFirstCrawlingStrategy",
|
||||
"DFSDeepCrawlStrategy",
|
||||
"FilterChain",
|
||||
"ContentTypeFilter",
|
||||
"DomainFilter",
|
||||
"URLFilter",
|
||||
"URLPatternFilter",
|
||||
"FilterStats",
|
||||
"ContentRelevanceFilter",
|
||||
"SEOFilter",
|
||||
"KeywordRelevanceScorer",
|
||||
"URLScorer",
|
||||
"CompositeScorer",
|
||||
"DomainAuthorityScorer",
|
||||
"FreshnessScorer",
|
||||
"PathDepthScorer",
|
||||
"ContentTypeScorer",
|
||||
]
|
||||
159
crawl4ai/deep_crawling/base_strategy.py
Normal file
159
crawl4ai/deep_crawling/base_strategy.py
Normal file
@@ -0,0 +1,159 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import AsyncGenerator, Optional, Set, List, Dict
|
||||
from functools import wraps
|
||||
from contextvars import ContextVar
|
||||
from ..types import AsyncWebCrawler, CrawlerRunConfig, CrawlResult, RunManyReturn
|
||||
|
||||
|
||||
class DeepCrawlDecorator:
|
||||
"""Decorator that adds deep crawling capability to arun method."""
|
||||
deep_crawl_active = ContextVar("deep_crawl_active", default=False)
|
||||
|
||||
def __init__(self, crawler: AsyncWebCrawler):
|
||||
self.crawler = crawler
|
||||
|
||||
def __call__(self, original_arun):
|
||||
@wraps(original_arun)
|
||||
async def wrapped_arun(url: str, config: CrawlerRunConfig = None, **kwargs):
|
||||
# If deep crawling is already active, call the original method to avoid recursion.
|
||||
if config and config.deep_crawl_strategy and not self.deep_crawl_active.get():
|
||||
token = self.deep_crawl_active.set(True)
|
||||
# Await the arun call to get the actual result object.
|
||||
result_obj = await config.deep_crawl_strategy.arun(
|
||||
crawler=self.crawler,
|
||||
start_url=url,
|
||||
config=config
|
||||
)
|
||||
if config.stream:
|
||||
async def result_wrapper():
|
||||
try:
|
||||
async for result in result_obj:
|
||||
yield result
|
||||
finally:
|
||||
self.deep_crawl_active.reset(token)
|
||||
return result_wrapper()
|
||||
else:
|
||||
try:
|
||||
return result_obj
|
||||
finally:
|
||||
self.deep_crawl_active.reset(token)
|
||||
return await original_arun(url, config=config, **kwargs)
|
||||
return wrapped_arun
|
||||
|
||||
class DeepCrawlStrategy(ABC):
|
||||
"""
|
||||
Abstract base class for deep crawling strategies.
|
||||
|
||||
Core functions:
|
||||
- arun: Main entry point that returns an async generator of CrawlResults.
|
||||
- shutdown: Clean up resources.
|
||||
- can_process_url: Validate a URL and decide whether to process it.
|
||||
- _process_links: Extract and process links from a CrawlResult.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
async def _arun_batch(
|
||||
self,
|
||||
start_url: str,
|
||||
crawler: AsyncWebCrawler,
|
||||
config: CrawlerRunConfig,
|
||||
) -> List[CrawlResult]:
|
||||
"""
|
||||
Batch (non-streaming) mode:
|
||||
Processes one BFS level at a time, then yields all the results.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def _arun_stream(
|
||||
self,
|
||||
start_url: str,
|
||||
crawler: AsyncWebCrawler,
|
||||
config: CrawlerRunConfig,
|
||||
) -> AsyncGenerator[CrawlResult, None]:
|
||||
"""
|
||||
Streaming mode:
|
||||
Processes one BFS level at a time and yields results immediately as they arrive.
|
||||
"""
|
||||
pass
|
||||
|
||||
async def arun(
|
||||
self,
|
||||
start_url: str,
|
||||
crawler: AsyncWebCrawler,
|
||||
config: Optional[CrawlerRunConfig] = None,
|
||||
) -> RunManyReturn:
|
||||
"""
|
||||
Traverse the given URL using the specified crawler.
|
||||
|
||||
Args:
|
||||
start_url (str): The URL from which to start crawling.
|
||||
crawler (AsyncWebCrawler): The crawler instance to use.
|
||||
crawler_run_config (Optional[CrawlerRunConfig]): Crawler configuration.
|
||||
|
||||
Returns:
|
||||
Union[CrawlResultT, List[CrawlResultT], AsyncGenerator[CrawlResultT, None]]
|
||||
"""
|
||||
if config is None:
|
||||
raise ValueError("CrawlerRunConfig must be provided")
|
||||
|
||||
if config.stream:
|
||||
return self._arun_stream(start_url, crawler, config)
|
||||
else:
|
||||
return await self._arun_batch(start_url, crawler, config)
|
||||
|
||||
def __call__(self, start_url: str, crawler: AsyncWebCrawler, config: CrawlerRunConfig):
|
||||
return self.arun(start_url, crawler, config)
|
||||
|
||||
@abstractmethod
|
||||
async def shutdown(self) -> None:
|
||||
"""
|
||||
Clean up resources used by the deep crawl strategy.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def can_process_url(self, url: str, depth: int) -> bool:
|
||||
"""
|
||||
Validate the URL format and apply custom filtering logic.
|
||||
|
||||
Args:
|
||||
url (str): The URL to validate.
|
||||
depth (int): The current depth in the crawl.
|
||||
|
||||
Returns:
|
||||
bool: True if the URL should be processed, False otherwise.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def link_discovery(
|
||||
self,
|
||||
result: CrawlResult,
|
||||
source_url: str,
|
||||
current_depth: int,
|
||||
visited: Set[str],
|
||||
next_level: List[tuple],
|
||||
depths: Dict[str, int],
|
||||
) -> None:
|
||||
"""
|
||||
Extract and process links from the given crawl result.
|
||||
|
||||
This method should:
|
||||
- Validate each extracted URL using can_process_url.
|
||||
- Optionally score URLs.
|
||||
- Append valid URLs (and their parent references) to the next_level list.
|
||||
- Update the depths dictionary with the new depth for each URL.
|
||||
|
||||
Args:
|
||||
result (CrawlResult): The result from a crawl operation.
|
||||
source_url (str): The URL from which this result was obtained.
|
||||
current_depth (int): The depth at which the source URL was processed.
|
||||
visited (Set[str]): Set of already visited URLs.
|
||||
next_level (List[tuple]): List of tuples (url, parent_url) for the next BFS level.
|
||||
depths (Dict[str, int]): Mapping of URLs to their current depth.
|
||||
"""
|
||||
pass
|
||||
|
||||
257
crawl4ai/deep_crawling/bff_strategy.py
Normal file
257
crawl4ai/deep_crawling/bff_strategy.py
Normal file
@@ -0,0 +1,257 @@
|
||||
# best_first_crawling_strategy.py
|
||||
import asyncio
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from typing import AsyncGenerator, Optional, Set, Dict, List, Tuple
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from ..models import TraversalStats
|
||||
from .filters import FilterChain
|
||||
from .scorers import URLScorer
|
||||
from . import DeepCrawlStrategy
|
||||
|
||||
from ..types import AsyncWebCrawler, CrawlerRunConfig, CrawlResult, RunManyReturn
|
||||
from ..utils import normalize_url_for_deep_crawl
|
||||
|
||||
from math import inf as infinity
|
||||
|
||||
# Configurable batch size for processing items from the priority queue
|
||||
BATCH_SIZE = 10
|
||||
|
||||
|
||||
class BestFirstCrawlingStrategy(DeepCrawlStrategy):
|
||||
"""
|
||||
Best-First Crawling Strategy using a priority queue.
|
||||
|
||||
This strategy prioritizes URLs based on their score, ensuring that higher-value
|
||||
pages are crawled first. It reimplements the core traversal loop to use a priority
|
||||
queue while keeping URL validation and link discovery consistent with our design.
|
||||
|
||||
Core methods:
|
||||
- arun: Returns either a list (batch mode) or an async generator (stream mode).
|
||||
- _arun_best_first: Core generator that uses a priority queue to yield CrawlResults.
|
||||
- can_process_url: Validates URLs and applies filtering (inherited behavior).
|
||||
- link_discovery: Extracts and validates links from a CrawlResult.
|
||||
"""
|
||||
def __init__(
|
||||
self,
|
||||
max_depth: int,
|
||||
filter_chain: FilterChain = FilterChain(),
|
||||
url_scorer: Optional[URLScorer] = None,
|
||||
include_external: bool = False,
|
||||
max_pages: int = infinity,
|
||||
logger: Optional[logging.Logger] = None,
|
||||
):
|
||||
self.max_depth = max_depth
|
||||
self.filter_chain = filter_chain
|
||||
self.url_scorer = url_scorer
|
||||
self.include_external = include_external
|
||||
self.max_pages = max_pages
|
||||
self.logger = logger or logging.getLogger(__name__)
|
||||
self.stats = TraversalStats(start_time=datetime.now())
|
||||
self._cancel_event = asyncio.Event()
|
||||
self._pages_crawled = 0
|
||||
|
||||
async def can_process_url(self, url: str, depth: int) -> bool:
|
||||
"""
|
||||
Validate the URL format and apply filtering.
|
||||
For the starting URL (depth 0), filtering is bypassed.
|
||||
"""
|
||||
try:
|
||||
parsed = urlparse(url)
|
||||
if not parsed.scheme or not parsed.netloc:
|
||||
raise ValueError("Missing scheme or netloc")
|
||||
if parsed.scheme not in ("http", "https"):
|
||||
raise ValueError("Invalid scheme")
|
||||
if "." not in parsed.netloc:
|
||||
raise ValueError("Invalid domain")
|
||||
except Exception as e:
|
||||
self.logger.warning(f"Invalid URL: {url}, error: {e}")
|
||||
return False
|
||||
|
||||
if depth != 0 and not await self.filter_chain.apply(url):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
async def link_discovery(
|
||||
self,
|
||||
result: CrawlResult,
|
||||
source_url: str,
|
||||
current_depth: int,
|
||||
visited: Set[str],
|
||||
next_links: List[Tuple[str, Optional[str]]],
|
||||
depths: Dict[str, int],
|
||||
) -> None:
|
||||
"""
|
||||
Extract links from the crawl result, validate them, and append new URLs
|
||||
(with their parent references) to next_links.
|
||||
Also updates the depths dictionary.
|
||||
"""
|
||||
new_depth = current_depth + 1
|
||||
if new_depth > self.max_depth:
|
||||
return
|
||||
|
||||
# If we've reached the max pages limit, don't discover new links
|
||||
remaining_capacity = self.max_pages - self._pages_crawled
|
||||
if remaining_capacity <= 0:
|
||||
self.logger.info(f"Max pages limit ({self.max_pages}) reached, stopping link discovery")
|
||||
return
|
||||
|
||||
# Retrieve internal links; include external links if enabled.
|
||||
links = result.links.get("internal", [])
|
||||
if self.include_external:
|
||||
links += result.links.get("external", [])
|
||||
|
||||
# If we have more links than remaining capacity, limit how many we'll process
|
||||
valid_links = []
|
||||
for link in links:
|
||||
url = link.get("href")
|
||||
base_url = normalize_url_for_deep_crawl(url, source_url)
|
||||
if base_url in visited:
|
||||
continue
|
||||
if not await self.can_process_url(url, new_depth):
|
||||
self.stats.urls_skipped += 1
|
||||
continue
|
||||
|
||||
valid_links.append(base_url)
|
||||
|
||||
# If we have more valid links than capacity, limit them
|
||||
if len(valid_links) > remaining_capacity:
|
||||
valid_links = valid_links[:remaining_capacity]
|
||||
self.logger.info(f"Limiting to {remaining_capacity} URLs due to max_pages limit")
|
||||
|
||||
# Record the new depths and add to next_links
|
||||
for url in valid_links:
|
||||
depths[url] = new_depth
|
||||
next_links.append((url, source_url))
|
||||
|
||||
async def _arun_best_first(
|
||||
self,
|
||||
start_url: str,
|
||||
crawler: AsyncWebCrawler,
|
||||
config: CrawlerRunConfig,
|
||||
) -> AsyncGenerator[CrawlResult, None]:
|
||||
"""
|
||||
Core best-first crawl method using a priority queue.
|
||||
|
||||
The queue items are tuples of (score, depth, url, parent_url). Lower scores
|
||||
are treated as higher priority. URLs are processed in batches for efficiency.
|
||||
"""
|
||||
queue: asyncio.PriorityQueue = asyncio.PriorityQueue()
|
||||
# Push the initial URL with score 0 and depth 0.
|
||||
await queue.put((0, 0, start_url, None))
|
||||
visited: Set[str] = set()
|
||||
depths: Dict[str, int] = {start_url: 0}
|
||||
|
||||
while not queue.empty() and not self._cancel_event.is_set():
|
||||
# Stop if we've reached the max pages limit
|
||||
if self._pages_crawled >= self.max_pages:
|
||||
self.logger.info(f"Max pages limit ({self.max_pages}) reached, stopping crawl")
|
||||
break
|
||||
|
||||
batch: List[Tuple[float, int, str, Optional[str]]] = []
|
||||
# Retrieve up to BATCH_SIZE items from the priority queue.
|
||||
for _ in range(BATCH_SIZE):
|
||||
if queue.empty():
|
||||
break
|
||||
item = await queue.get()
|
||||
score, depth, url, parent_url = item
|
||||
if url in visited:
|
||||
continue
|
||||
visited.add(url)
|
||||
batch.append(item)
|
||||
|
||||
if not batch:
|
||||
continue
|
||||
|
||||
# Process the current batch of URLs.
|
||||
urls = [item[2] for item in batch]
|
||||
batch_config = config.clone(deep_crawl_strategy=None, stream=True)
|
||||
stream_gen = await crawler.arun_many(urls=urls, config=batch_config)
|
||||
async for result in stream_gen:
|
||||
result_url = result.url
|
||||
# Find the corresponding tuple from the batch.
|
||||
corresponding = next((item for item in batch if item[2] == result_url), None)
|
||||
if not corresponding:
|
||||
continue
|
||||
score, depth, url, parent_url = corresponding
|
||||
result.metadata = result.metadata or {}
|
||||
result.metadata["depth"] = depth
|
||||
result.metadata["parent_url"] = parent_url
|
||||
result.metadata["score"] = score
|
||||
|
||||
# Count only successful crawls toward max_pages limit
|
||||
if result.success:
|
||||
self._pages_crawled += 1
|
||||
|
||||
yield result
|
||||
|
||||
# Only discover links from successful crawls
|
||||
if result.success:
|
||||
# Discover new links from this result
|
||||
new_links: List[Tuple[str, Optional[str]]] = []
|
||||
await self.link_discovery(result, result_url, depth, visited, new_links, depths)
|
||||
|
||||
for new_url, new_parent in new_links:
|
||||
new_depth = depths.get(new_url, depth + 1)
|
||||
new_score = self.url_scorer.score(new_url) if self.url_scorer else 0
|
||||
await queue.put((new_score, new_depth, new_url, new_parent))
|
||||
|
||||
# End of crawl.
|
||||
|
||||
async def _arun_batch(
|
||||
self,
|
||||
start_url: str,
|
||||
crawler: AsyncWebCrawler,
|
||||
config: CrawlerRunConfig,
|
||||
) -> List[CrawlResult]:
|
||||
"""
|
||||
Best-first crawl in batch mode.
|
||||
|
||||
Aggregates all CrawlResults into a list.
|
||||
"""
|
||||
results: List[CrawlResult] = []
|
||||
async for result in self._arun_best_first(start_url, crawler, config):
|
||||
results.append(result)
|
||||
return results
|
||||
|
||||
async def _arun_stream(
|
||||
self,
|
||||
start_url: str,
|
||||
crawler: AsyncWebCrawler,
|
||||
config: CrawlerRunConfig,
|
||||
) -> AsyncGenerator[CrawlResult, None]:
|
||||
"""
|
||||
Best-first crawl in streaming mode.
|
||||
|
||||
Yields CrawlResults as they become available.
|
||||
"""
|
||||
async for result in self._arun_best_first(start_url, crawler, config):
|
||||
yield result
|
||||
|
||||
async def arun(
|
||||
self,
|
||||
start_url: str,
|
||||
crawler: AsyncWebCrawler,
|
||||
config: Optional[CrawlerRunConfig] = None,
|
||||
) -> "RunManyReturn":
|
||||
"""
|
||||
Main entry point for best-first crawling.
|
||||
|
||||
Returns either a list (batch mode) or an async generator (stream mode)
|
||||
of CrawlResults.
|
||||
"""
|
||||
if config is None:
|
||||
raise ValueError("CrawlerRunConfig must be provided")
|
||||
if config.stream:
|
||||
return self._arun_stream(start_url, crawler, config)
|
||||
else:
|
||||
return await self._arun_batch(start_url, crawler, config)
|
||||
|
||||
async def shutdown(self) -> None:
|
||||
"""
|
||||
Signal cancellation and clean up resources.
|
||||
"""
|
||||
self._cancel_event.set()
|
||||
self.stats.end_time = datetime.now()
|
||||
245
crawl4ai/deep_crawling/bfs_strategy.py
Normal file
245
crawl4ai/deep_crawling/bfs_strategy.py
Normal file
@@ -0,0 +1,245 @@
|
||||
# bfs_deep_crawl_strategy.py
|
||||
import asyncio
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from typing import AsyncGenerator, Optional, Set, Dict, List, Tuple
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from ..models import TraversalStats
|
||||
from .filters import FilterChain
|
||||
from .scorers import URLScorer
|
||||
from . import DeepCrawlStrategy
|
||||
from ..types import AsyncWebCrawler, CrawlerRunConfig, CrawlResult
|
||||
from ..utils import normalize_url_for_deep_crawl, efficient_normalize_url_for_deep_crawl
|
||||
from math import inf as infinity
|
||||
|
||||
class BFSDeepCrawlStrategy(DeepCrawlStrategy):
|
||||
"""
|
||||
Breadth-First Search deep crawling strategy.
|
||||
|
||||
Core functions:
|
||||
- arun: Main entry point; splits execution into batch or stream modes.
|
||||
- link_discovery: Extracts, filters, and (if needed) scores the outgoing URLs.
|
||||
- can_process_url: Validates URL format and applies the filter chain.
|
||||
"""
|
||||
def __init__(
|
||||
self,
|
||||
max_depth: int,
|
||||
filter_chain: FilterChain = FilterChain(),
|
||||
url_scorer: Optional[URLScorer] = None,
|
||||
include_external: bool = False,
|
||||
score_threshold: float = -infinity,
|
||||
max_pages: int = infinity,
|
||||
logger: Optional[logging.Logger] = None,
|
||||
):
|
||||
self.max_depth = max_depth
|
||||
self.filter_chain = filter_chain
|
||||
self.url_scorer = url_scorer
|
||||
self.include_external = include_external
|
||||
self.score_threshold = score_threshold
|
||||
self.max_pages = max_pages
|
||||
self.logger = logger or logging.getLogger(__name__)
|
||||
self.stats = TraversalStats(start_time=datetime.now())
|
||||
self._cancel_event = asyncio.Event()
|
||||
self._pages_crawled = 0
|
||||
|
||||
async def can_process_url(self, url: str, depth: int) -> bool:
|
||||
"""
|
||||
Validates the URL and applies the filter chain.
|
||||
For the start URL (depth 0) filtering is bypassed.
|
||||
"""
|
||||
try:
|
||||
parsed = urlparse(url)
|
||||
if not parsed.scheme or not parsed.netloc:
|
||||
raise ValueError("Missing scheme or netloc")
|
||||
if parsed.scheme not in ("http", "https"):
|
||||
raise ValueError("Invalid scheme")
|
||||
if "." not in parsed.netloc:
|
||||
raise ValueError("Invalid domain")
|
||||
except Exception as e:
|
||||
self.logger.warning(f"Invalid URL: {url}, error: {e}")
|
||||
return False
|
||||
|
||||
if depth != 0 and not await self.filter_chain.apply(url):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
async def link_discovery(
|
||||
self,
|
||||
result: CrawlResult,
|
||||
source_url: str,
|
||||
current_depth: int,
|
||||
visited: Set[str],
|
||||
next_level: List[Tuple[str, Optional[str]]],
|
||||
depths: Dict[str, int],
|
||||
) -> None:
|
||||
"""
|
||||
Extracts links from the crawl result, validates and scores them, and
|
||||
prepares the next level of URLs.
|
||||
Each valid URL is appended to next_level as a tuple (url, parent_url)
|
||||
and its depth is tracked.
|
||||
"""
|
||||
next_depth = current_depth + 1
|
||||
if next_depth > self.max_depth:
|
||||
return
|
||||
|
||||
# If we've reached the max pages limit, don't discover new links
|
||||
remaining_capacity = self.max_pages - self._pages_crawled
|
||||
if remaining_capacity <= 0:
|
||||
self.logger.info(f"Max pages limit ({self.max_pages}) reached, stopping link discovery")
|
||||
return
|
||||
|
||||
# Get internal links and, if enabled, external links.
|
||||
links = result.links.get("internal", [])
|
||||
if self.include_external:
|
||||
links += result.links.get("external", [])
|
||||
|
||||
valid_links = []
|
||||
|
||||
# First collect all valid links
|
||||
for link in links:
|
||||
url = link.get("href")
|
||||
# Strip URL fragments to avoid duplicate crawling
|
||||
# base_url = url.split('#')[0] if url else url
|
||||
base_url = normalize_url_for_deep_crawl(url, source_url)
|
||||
if base_url in visited:
|
||||
continue
|
||||
if not await self.can_process_url(url, next_depth):
|
||||
self.stats.urls_skipped += 1
|
||||
continue
|
||||
|
||||
# Score the URL if a scorer is provided
|
||||
score = self.url_scorer.score(base_url) if self.url_scorer else 0
|
||||
|
||||
# Skip URLs with scores below the threshold
|
||||
if score < self.score_threshold:
|
||||
self.logger.debug(f"URL {url} skipped: score {score} below threshold {self.score_threshold}")
|
||||
self.stats.urls_skipped += 1
|
||||
continue
|
||||
|
||||
visited.add(base_url)
|
||||
valid_links.append((base_url, score))
|
||||
|
||||
# If we have more valid links than capacity, sort by score and take the top ones
|
||||
if len(valid_links) > remaining_capacity:
|
||||
if self.url_scorer:
|
||||
# Sort by score in descending order
|
||||
valid_links.sort(key=lambda x: x[1], reverse=True)
|
||||
# Take only as many as we have capacity for
|
||||
valid_links = valid_links[:remaining_capacity]
|
||||
self.logger.info(f"Limiting to {remaining_capacity} URLs due to max_pages limit")
|
||||
|
||||
# Process the final selected links
|
||||
for url, score in valid_links:
|
||||
# attach the score to metadata if needed
|
||||
if score:
|
||||
result.metadata = result.metadata or {}
|
||||
result.metadata["score"] = score
|
||||
next_level.append((url, source_url))
|
||||
depths[url] = next_depth
|
||||
|
||||
async def _arun_batch(
|
||||
self,
|
||||
start_url: str,
|
||||
crawler: AsyncWebCrawler,
|
||||
config: CrawlerRunConfig,
|
||||
) -> List[CrawlResult]:
|
||||
"""
|
||||
Batch (non-streaming) mode:
|
||||
Processes one BFS level at a time, then yields all the results.
|
||||
"""
|
||||
visited: Set[str] = set()
|
||||
# current_level holds tuples: (url, parent_url)
|
||||
current_level: List[Tuple[str, Optional[str]]] = [(start_url, None)]
|
||||
depths: Dict[str, int] = {start_url: 0}
|
||||
|
||||
results: List[CrawlResult] = []
|
||||
|
||||
while current_level and not self._cancel_event.is_set():
|
||||
next_level: List[Tuple[str, Optional[str]]] = []
|
||||
urls = [url for url, _ in current_level]
|
||||
|
||||
# Clone the config to disable deep crawling recursion and enforce batch mode.
|
||||
batch_config = config.clone(deep_crawl_strategy=None, stream=False)
|
||||
batch_results = await crawler.arun_many(urls=urls, config=batch_config)
|
||||
|
||||
# Update pages crawled counter - count only successful crawls
|
||||
successful_results = [r for r in batch_results if r.success]
|
||||
self._pages_crawled += len(successful_results)
|
||||
|
||||
for result in batch_results:
|
||||
url = result.url
|
||||
depth = depths.get(url, 0)
|
||||
result.metadata = result.metadata or {}
|
||||
result.metadata["depth"] = depth
|
||||
parent_url = next((parent for (u, parent) in current_level if u == url), None)
|
||||
result.metadata["parent_url"] = parent_url
|
||||
results.append(result)
|
||||
|
||||
# Only discover links from successful crawls
|
||||
if result.success:
|
||||
# Link discovery will handle the max pages limit internally
|
||||
await self.link_discovery(result, url, depth, visited, next_level, depths)
|
||||
|
||||
current_level = next_level
|
||||
|
||||
return results
|
||||
|
||||
async def _arun_stream(
|
||||
self,
|
||||
start_url: str,
|
||||
crawler: AsyncWebCrawler,
|
||||
config: CrawlerRunConfig,
|
||||
) -> AsyncGenerator[CrawlResult, None]:
|
||||
"""
|
||||
Streaming mode:
|
||||
Processes one BFS level at a time and yields results immediately as they arrive.
|
||||
"""
|
||||
visited: Set[str] = set()
|
||||
current_level: List[Tuple[str, Optional[str]]] = [(start_url, None)]
|
||||
depths: Dict[str, int] = {start_url: 0}
|
||||
|
||||
while current_level and not self._cancel_event.is_set():
|
||||
next_level: List[Tuple[str, Optional[str]]] = []
|
||||
urls = [url for url, _ in current_level]
|
||||
visited.update(urls)
|
||||
|
||||
stream_config = config.clone(deep_crawl_strategy=None, stream=True)
|
||||
stream_gen = await crawler.arun_many(urls=urls, config=stream_config)
|
||||
|
||||
# Keep track of processed results for this batch
|
||||
results_count = 0
|
||||
async for result in stream_gen:
|
||||
url = result.url
|
||||
depth = depths.get(url, 0)
|
||||
result.metadata = result.metadata or {}
|
||||
result.metadata["depth"] = depth
|
||||
parent_url = next((parent for (u, parent) in current_level if u == url), None)
|
||||
result.metadata["parent_url"] = parent_url
|
||||
|
||||
# Count only successful crawls
|
||||
if result.success:
|
||||
self._pages_crawled += 1
|
||||
|
||||
results_count += 1
|
||||
yield result
|
||||
|
||||
# Only discover links from successful crawls
|
||||
if result.success:
|
||||
# Link discovery will handle the max pages limit internally
|
||||
await self.link_discovery(result, url, depth, visited, next_level, depths)
|
||||
|
||||
# If we didn't get results back (e.g. due to errors), avoid getting stuck in an infinite loop
|
||||
# by considering these URLs as visited but not counting them toward the max_pages limit
|
||||
if results_count == 0 and urls:
|
||||
self.logger.warning(f"No results returned for {len(urls)} URLs, marking as visited")
|
||||
|
||||
current_level = next_level
|
||||
|
||||
async def shutdown(self) -> None:
|
||||
"""
|
||||
Clean up resources and signal cancellation of the crawl.
|
||||
"""
|
||||
self._cancel_event.set()
|
||||
self.stats.end_time = datetime.now()
|
||||
432
crawl4ai/deep_crawling/crazy.py
Normal file
432
crawl4ai/deep_crawling/crazy.py
Normal file
@@ -0,0 +1,432 @@
|
||||
from __future__ import annotations
|
||||
# I just got crazy, trying to wrute K&R C but in Python. Right now I feel like I'm in a quantum state.
|
||||
# I probably won't use this; I just want to leave it here. A century later, the future human race will be like, "WTF?"
|
||||
|
||||
# ------ Imports That Will Make You Question Reality ------ #
|
||||
from functools import wraps
|
||||
from contextvars import ContextVar
|
||||
import inspect
|
||||
|
||||
from crawl4ai import CacheMode
|
||||
from crawl4ai.async_configs import CrawlerRunConfig
|
||||
from crawl4ai.models import CrawlResult, TraversalStats
|
||||
from crawl4ai.deep_crawling.filters import FilterChain
|
||||
from crawl4ai.async_webcrawler import AsyncWebCrawler
|
||||
import time
|
||||
import logging
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from collections import deque
|
||||
import asyncio
|
||||
from typing import (
|
||||
AsyncGenerator,
|
||||
Dict,
|
||||
List,
|
||||
TypeVar,
|
||||
Generic,
|
||||
Tuple,
|
||||
Callable,
|
||||
Awaitable,
|
||||
Union,
|
||||
)
|
||||
from functools import lru_cache
|
||||
import mmh3
|
||||
from bitarray import bitarray
|
||||
import numpy as np
|
||||
from heapq import heappush, heappop
|
||||
|
||||
# ------ Type Algebra Mastery ------ #
|
||||
CrawlResultT = TypeVar("CrawlResultT", bound="CrawlResult")
|
||||
PriorityT = TypeVar("PriorityT")
|
||||
P = TypeVar("P")
|
||||
|
||||
# ------ Hyperscalar Context Management ------ #
|
||||
deep_crawl_ctx = ContextVar("deep_crawl_stack", default=deque())
|
||||
|
||||
# ------ Algebraic Crawler Monoid ------ #
|
||||
class TraversalContext:
|
||||
__slots__ = ('visited', 'frontier', 'depths', 'priority_fn', 'current_depth')
|
||||
|
||||
def __init__(self,
|
||||
priority_fn: Callable[[str], Awaitable[float]] = lambda _: 1.0):
|
||||
self.visited: BloomFilter = BloomFilter(10**6, 0.01) # 1M items, 1% FP
|
||||
self.frontier: PriorityQueue = PriorityQueue()
|
||||
self.depths: Dict[str, int] = {}
|
||||
self.priority_fn = priority_fn
|
||||
self.current_depth = 0
|
||||
|
||||
def clone_for_level(self) -> TraversalContext:
|
||||
"""Monadic context propagation"""
|
||||
new_ctx = TraversalContext(self.priority_fn)
|
||||
new_ctx.visited = self.visited.copy()
|
||||
new_ctx.depths = self.depths.copy()
|
||||
new_ctx.current_depth = self.current_depth
|
||||
return new_ctx
|
||||
|
||||
class PriorityQueue(Generic[PriorityT]):
|
||||
"""Fibonacci heap-inspired priority queue with O(1) amortized operations"""
|
||||
__slots__ = ('_heap', '_index')
|
||||
|
||||
def __init__(self):
|
||||
self._heap: List[Tuple[PriorityT, float, P]] = []
|
||||
self._index: Dict[P, int] = {}
|
||||
|
||||
def insert(self, priority: PriorityT, item: P) -> None:
|
||||
tiebreaker = time.time() # Ensure FIFO for equal priorities
|
||||
heappush(self._heap, (priority, tiebreaker, item))
|
||||
self._index[item] = len(self._heap) - 1
|
||||
|
||||
def extract(self, top_n = 1) -> P:
|
||||
items = []
|
||||
for _ in range(top_n):
|
||||
if not self._heap:
|
||||
break
|
||||
priority, _, item = heappop(self._heap)
|
||||
del self._index[item]
|
||||
items.append(item)
|
||||
if not items:
|
||||
raise IndexError("Priority queue empty")
|
||||
return items
|
||||
# while self._heap:
|
||||
# _, _, item = heappop(self._heap)
|
||||
# if item in self._index:
|
||||
# del self._index[item]
|
||||
# return item
|
||||
raise IndexError("Priority queue empty")
|
||||
|
||||
|
||||
def is_empty(self) -> bool:
|
||||
return not bool(self._heap)
|
||||
|
||||
class BloomFilter:
|
||||
"""Optimal Bloom filter using murmur3 hash avalanche"""
|
||||
__slots__ = ('size', 'hashes', 'bits')
|
||||
|
||||
def __init__(self, capacity: int, error_rate: float):
|
||||
self.size = self._optimal_size(capacity, error_rate)
|
||||
self.hashes = self._optimal_hashes(capacity, self.size)
|
||||
self.bits = bitarray(self.size)
|
||||
self.bits.setall(False)
|
||||
|
||||
@staticmethod
|
||||
def _optimal_size(n: int, p: float) -> int:
|
||||
m = - (n * np.log(p)) / (np.log(2) ** 2)
|
||||
return int(np.ceil(m))
|
||||
|
||||
@staticmethod
|
||||
def _optimal_hashes(n: int, m: int) -> int:
|
||||
k = (m / n) * np.log(2)
|
||||
return int(np.ceil(k))
|
||||
|
||||
def add(self, item: str) -> None:
|
||||
for seed in range(self.hashes):
|
||||
digest = mmh3.hash(item, seed) % self.size
|
||||
self.bits[digest] = True
|
||||
|
||||
def __contains__(self, item: str) -> bool:
|
||||
return all(
|
||||
self.bits[mmh3.hash(item, seed) % self.size]
|
||||
for seed in range(self.hashes)
|
||||
)
|
||||
|
||||
def copy(self) -> BloomFilter:
|
||||
new = object.__new__(BloomFilter)
|
||||
new.size = self.size
|
||||
new.hashes = self.hashes
|
||||
new.bits = self.bits.copy()
|
||||
return new
|
||||
|
||||
def __len__(self) -> int:
|
||||
"""
|
||||
Estimates the number of items in the filter using the
|
||||
count of set bits and the formula:
|
||||
n = -m/k * ln(1 - X/m)
|
||||
where:
|
||||
m = size of bit array
|
||||
k = number of hash functions
|
||||
X = count of set bits
|
||||
"""
|
||||
set_bits = self.bits.count(True)
|
||||
if set_bits == 0:
|
||||
return 0
|
||||
|
||||
# Use the inverse bloom filter formula to estimate cardinality
|
||||
return int(
|
||||
-(self.size / self.hashes) *
|
||||
np.log(1 - set_bits / self.size)
|
||||
)
|
||||
|
||||
def bit_count(self) -> int:
|
||||
"""Returns the raw count of set bits in the filter"""
|
||||
return self.bits.count(True)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"BloomFilter(est_items={len(self)}, bits={self.bit_count()}/{self.size})"
|
||||
|
||||
# ------ Hyper-Optimal Deep Crawl Core ------ #
|
||||
class DeepCrawlDecorator:
|
||||
"""Metaprogramming marvel: Zero-cost deep crawl abstraction"""
|
||||
def __init__(self, crawler: AsyncWebCrawler):
|
||||
self.crawler = crawler
|
||||
|
||||
def __call__(self, original_arun: Callable) -> Callable:
|
||||
@wraps(original_arun)
|
||||
async def quantum_arun(url: str, config: CrawlerRunConfig = None, **kwargs):
|
||||
stack = deep_crawl_ctx.get()
|
||||
if config and config.deep_crawl_strategy and not stack:
|
||||
stack.append(self.crawler)
|
||||
try:
|
||||
deep_crawl_ctx.set(stack)
|
||||
async for result in config.deep_crawl_strategy.traverse(
|
||||
start_url=url,
|
||||
crawler=self.crawler,
|
||||
config=config
|
||||
):
|
||||
yield result
|
||||
finally:
|
||||
stack.pop()
|
||||
deep_crawl_ctx.set(stack)
|
||||
else:
|
||||
result = await original_arun(url, config=config, **kwargs)
|
||||
yield result
|
||||
return quantum_arun
|
||||
|
||||
|
||||
async def collect_results(url, crawler, config):
|
||||
if id(getattr(crawler, "arun")) != id(getattr(crawler, "original_arun")):
|
||||
setattr(crawler, "arun", getattr(crawler, "original_arun"))
|
||||
|
||||
ret = crawler.arun(url, config=config)
|
||||
# If arun is an async generator, iterate over it
|
||||
if inspect.isasyncgen(ret):
|
||||
return [r async for r in ret]
|
||||
# Otherwise, await the coroutine and normalize to a list
|
||||
result = await ret
|
||||
return result if isinstance(result, list) else [result]
|
||||
|
||||
async def collect_many_results(url, crawler, config):
|
||||
# Replace back arun to its original implementation
|
||||
if id(getattr(crawler, "arun")) != id(getattr(crawler, "original_arun")):
|
||||
setattr(crawler, "arun", getattr(crawler, "original_arun"))
|
||||
ret = crawler.arun_many(url, config=config)
|
||||
# If arun is an async generator, iterate over it
|
||||
if inspect.isasyncgen(ret):
|
||||
return [r async for r in ret]
|
||||
# Otherwise, await the coroutine and normalize to a list
|
||||
result = await ret
|
||||
return result if isinstance(result, list) else [result]
|
||||
|
||||
|
||||
# ------ Deep Crawl Strategy Interface ------ #
|
||||
CrawlResultT = TypeVar("CrawlResultT", bound=CrawlResult)
|
||||
# In batch mode we return List[CrawlResult] and in stream mode an AsyncGenerator.
|
||||
RunManyReturn = Union[CrawlResultT, List[CrawlResultT], AsyncGenerator[CrawlResultT, None]]
|
||||
|
||||
|
||||
class DeepCrawlStrategy(ABC):
|
||||
"""Abstract base class that will make Dijkstra smile"""
|
||||
@abstractmethod
|
||||
async def traverse(self,
|
||||
start_url: str,
|
||||
crawler: AsyncWebCrawler,
|
||||
config: CrawlerRunConfig) -> RunManyReturn:
|
||||
"""Traverse with O(1) memory complexity via generator fusion"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
def precompute_priority(self, url: str) -> Awaitable[float]:
|
||||
"""Quantum-inspired priority precomputation"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def link_hypercube(self, result: CrawlResult) -> AsyncGenerator[str, None]:
|
||||
"""Hilbert-curve optimized link generation"""
|
||||
pass
|
||||
|
||||
# ------ BFS That Would Make Knuth Proud ------ #
|
||||
|
||||
def calculate_quantum_batch_size(
|
||||
depth: int,
|
||||
max_depth: int,
|
||||
frontier_size: int,
|
||||
visited_size: int
|
||||
) -> int:
|
||||
"""
|
||||
Calculates optimal batch size for URL processing using quantum-inspired mathematical principles.
|
||||
|
||||
This function implements a sophisticated batch size calculation using:
|
||||
1. Golden Ratio (φ) based scaling for optimal irrationality
|
||||
2. Depth-aware amplitude modulation
|
||||
3. Harmonic series dampening
|
||||
4. Logarithmic growth control
|
||||
5. Dynamic frontier adaptation
|
||||
|
||||
The formula follows the quantum harmonic oscillator principle:
|
||||
N = ⌈φ^(2d) * log₂(|V|) * H(d)⁻¹ * min(20, |F|/10)⌉
|
||||
where:
|
||||
φ = Golden Ratio ((1 + √5) / 2)
|
||||
d = depth factor (normalized remaining depth)
|
||||
|V| = size of visited set
|
||||
H(d) = d-th harmonic number
|
||||
|F| = frontier size
|
||||
|
||||
Args:
|
||||
depth (int): Current traversal depth
|
||||
max_depth (int): Maximum allowed depth
|
||||
frontier_size (int): Current size of frontier queue
|
||||
visited_size (int): Number of URLs visited so far
|
||||
|
||||
Returns:
|
||||
int: Optimal batch size bounded between 1 and 100
|
||||
|
||||
Mathematical Properties:
|
||||
- Maintains O(log n) growth with respect to visited size
|
||||
- Provides φ-optimal distribution of resources
|
||||
- Ensures quantum-like state transitions between depths
|
||||
- Harmonically dampened to prevent exponential explosion
|
||||
"""
|
||||
# Golden ratio φ = (1 + √5) / 2
|
||||
φ = (1 + 5 ** 0.5) / 2
|
||||
|
||||
# Calculate normalized depth factor [0, 1]
|
||||
depth_factor = (max_depth - depth) / max_depth if depth < max_depth else 0
|
||||
|
||||
# Compute harmonic number for current depth
|
||||
harmonic = sum(1/k for k in range(1, depth + 2))
|
||||
|
||||
# Calculate quantum batch size
|
||||
batch_size = int(np.ceil(
|
||||
(φ ** (depth_factor * 2)) * # Golden ratio scaling
|
||||
np.log2(visited_size + 2) * # Logarithmic growth factor
|
||||
(1 / harmonic) * # Harmonic dampening
|
||||
max(1, min(20, frontier_size / 10)) # Frontier-aware scaling
|
||||
))
|
||||
|
||||
# Enforce practical bounds
|
||||
return max(1, min(100, batch_size))
|
||||
|
||||
|
||||
class BFSDeepCrawlStrategy(DeepCrawlStrategy):
|
||||
"""Breadth-First Search with Einstein-Rosen bridge optimization"""
|
||||
__slots__ = ('max_depth', 'filter_chain', 'priority_fn', 'stats', '_cancel')
|
||||
|
||||
def __init__(self,
|
||||
max_depth: int,
|
||||
filter_chain: FilterChain = FilterChain(),
|
||||
priority_fn: Callable[[str], Awaitable[float]] = lambda url: 1.0,
|
||||
logger: logging.Logger = None):
|
||||
self.max_depth = max_depth
|
||||
self.filter_chain = filter_chain
|
||||
self.priority_fn = priority_fn
|
||||
self.stats = TraversalStats()
|
||||
self._cancel = asyncio.Event()
|
||||
self.semaphore = asyncio.Semaphore(1000)
|
||||
|
||||
async def traverse(self,
|
||||
start_url: str,
|
||||
crawler: AsyncWebCrawler,
|
||||
config: CrawlerRunConfig) -> RunManyReturn:
|
||||
"""Non-blocking BFS with O(b^d) time complexity awareness"""
|
||||
ctx = TraversalContext(self.priority_fn)
|
||||
ctx.frontier.insert(self.priority_fn(start_url), (start_url, None, 0))
|
||||
ctx.visited.add(start_url)
|
||||
ctx.depths[start_url] = 0
|
||||
|
||||
while not ctx.frontier.is_empty() and not self._cancel.is_set():
|
||||
# Use the best algorith, to find top_n value
|
||||
top_n = calculate_quantum_batch_size(
|
||||
depth=ctx.current_depth,
|
||||
max_depth=self.max_depth,
|
||||
frontier_size=len(ctx.frontier._heap),
|
||||
visited_size=len(ctx.visited)
|
||||
)
|
||||
|
||||
urls = ctx.frontier.extract(top_n=top_n)
|
||||
# url, parent, depth = ctx.frontier.extract(top_n=top_n)
|
||||
if urls:
|
||||
ctx.current_depth = urls[0][2]
|
||||
|
||||
async with self.semaphore:
|
||||
results = await collect_many_results([url for (url, parent, depth) in urls], crawler, config)
|
||||
# results = await asyncio.gather(*[
|
||||
# collect_results(url, crawler, config) for (url, parent, depth) in urls
|
||||
# ])
|
||||
# result = _result[0]
|
||||
for ix, result in enumerate(results):
|
||||
url, parent, depth = result.url, urls[ix][1], urls[ix][2]
|
||||
result.metadata['depth'] = depth
|
||||
result.metadata['parent'] = parent
|
||||
yield result
|
||||
|
||||
if depth < self.max_depth:
|
||||
async for link in self.link_hypercube(result):
|
||||
if link not in ctx.visited:
|
||||
priority = self.priority_fn(link)
|
||||
ctx.frontier.insert(priority, (link, url, depth + 1))
|
||||
ctx.visited.add(link)
|
||||
ctx.depths[link] = depth + 1
|
||||
|
||||
@lru_cache(maxsize=65536)
|
||||
async def validate_url(self, url: str) -> bool:
|
||||
"""Memoized URL validation with λ-calculus purity"""
|
||||
try:
|
||||
parsed = urlparse(url)
|
||||
return (parsed.scheme in {'http', 'https'}
|
||||
and '.' in parsed.netloc
|
||||
and await self.filter_chain.apply(url))
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
async def link_hypercube(self, result: CrawlResult) -> AsyncGenerator[str, None]:
|
||||
"""Hilbert-ordered link generation with O(1) yield latency"""
|
||||
links = (link['href'] for link in result.links.get('internal', []))
|
||||
validated = filter(self.validate_url, links)
|
||||
for link in sorted(validated, key=lambda x: -self.priority_fn(x)):
|
||||
yield link
|
||||
|
||||
def __aiter__(self) -> AsyncGenerator[CrawlResult, None]:
|
||||
"""Native async iterator interface"""
|
||||
return self.traverse()
|
||||
|
||||
async def __anext__(self) -> CrawlResult:
|
||||
"""True async iterator protocol implementation"""
|
||||
result = await self.traverse().__anext__()
|
||||
if result:
|
||||
return result
|
||||
raise StopAsyncIteration
|
||||
|
||||
async def precompute_priority(self, url):
|
||||
return super().precompute_priority(url)
|
||||
|
||||
async def shutdown(self):
|
||||
self._cancel.set()
|
||||
|
||||
# ------ Usage That Will Drop Jaws ------ #
|
||||
async def main():
|
||||
"""Quantum crawl example"""
|
||||
strategy = BFSDeepCrawlStrategy(
|
||||
max_depth=2,
|
||||
priority_fn=lambda url: 1.0 / (len(url) + 1e-9), # Inverse length priority
|
||||
# filter_chain=FilterChain(...)
|
||||
)
|
||||
|
||||
config: CrawlerRunConfig = CrawlerRunConfig(
|
||||
deep_crawl_strategy=strategy,
|
||||
stream=False,
|
||||
verbose=True,
|
||||
cache_mode=CacheMode.BYPASS
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
run_decorator = DeepCrawlDecorator(crawler)
|
||||
setattr(crawler, "original_arun", crawler.arun)
|
||||
crawler.arun = run_decorator(crawler.arun)
|
||||
start_time = time.perf_counter()
|
||||
async for result in crawler.arun("https://docs.crawl4ai.com", config=config):
|
||||
print(f"🌀 {result.url} (Depth: {result.metadata['depth']})")
|
||||
print(f"Deep crawl completed in {time.perf_counter() - start_time:.2f}s")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
102
crawl4ai/deep_crawling/dfs_strategy.py
Normal file
102
crawl4ai/deep_crawling/dfs_strategy.py
Normal file
@@ -0,0 +1,102 @@
|
||||
# dfs_deep_crawl_strategy.py
|
||||
from typing import AsyncGenerator, Optional, Set, Dict, List, Tuple
|
||||
|
||||
from ..models import CrawlResult
|
||||
from .bfs_strategy import BFSDeepCrawlStrategy # noqa
|
||||
from ..types import AsyncWebCrawler, CrawlerRunConfig
|
||||
|
||||
class DFSDeepCrawlStrategy(BFSDeepCrawlStrategy):
|
||||
"""
|
||||
Depth-First Search (DFS) deep crawling strategy.
|
||||
|
||||
Inherits URL validation and link discovery from BFSDeepCrawlStrategy.
|
||||
Overrides _arun_batch and _arun_stream to use a stack (LIFO) for DFS traversal.
|
||||
"""
|
||||
async def _arun_batch(
|
||||
self,
|
||||
start_url: str,
|
||||
crawler: AsyncWebCrawler,
|
||||
config: CrawlerRunConfig,
|
||||
) -> List[CrawlResult]:
|
||||
"""
|
||||
Batch (non-streaming) DFS mode.
|
||||
Uses a stack to traverse URLs in DFS order, aggregating CrawlResults into a list.
|
||||
"""
|
||||
visited: Set[str] = set()
|
||||
# Stack items: (url, parent_url, depth)
|
||||
stack: List[Tuple[str, Optional[str], int]] = [(start_url, None, 0)]
|
||||
depths: Dict[str, int] = {start_url: 0}
|
||||
results: List[CrawlResult] = []
|
||||
|
||||
while stack and not self._cancel_event.is_set():
|
||||
url, parent, depth = stack.pop()
|
||||
if url in visited or depth > self.max_depth:
|
||||
continue
|
||||
visited.add(url)
|
||||
|
||||
# Clone config to disable recursive deep crawling.
|
||||
batch_config = config.clone(deep_crawl_strategy=None, stream=False)
|
||||
url_results = await crawler.arun_many(urls=[url], config=batch_config)
|
||||
|
||||
for result in url_results:
|
||||
result.metadata = result.metadata or {}
|
||||
result.metadata["depth"] = depth
|
||||
result.metadata["parent_url"] = parent
|
||||
if self.url_scorer:
|
||||
result.metadata["score"] = self.url_scorer.score(url)
|
||||
results.append(result)
|
||||
|
||||
# Count only successful crawls toward max_pages limit
|
||||
if result.success:
|
||||
self._pages_crawled += 1
|
||||
|
||||
# Only discover links from successful crawls
|
||||
new_links: List[Tuple[str, Optional[str]]] = []
|
||||
await self.link_discovery(result, url, depth, visited, new_links, depths)
|
||||
|
||||
# Push new links in reverse order so the first discovered is processed next.
|
||||
for new_url, new_parent in reversed(new_links):
|
||||
new_depth = depths.get(new_url, depth + 1)
|
||||
stack.append((new_url, new_parent, new_depth))
|
||||
return results
|
||||
|
||||
async def _arun_stream(
|
||||
self,
|
||||
start_url: str,
|
||||
crawler: AsyncWebCrawler,
|
||||
config: CrawlerRunConfig,
|
||||
) -> AsyncGenerator[CrawlResult, None]:
|
||||
"""
|
||||
Streaming DFS mode.
|
||||
Uses a stack to traverse URLs in DFS order and yields CrawlResults as they become available.
|
||||
"""
|
||||
visited: Set[str] = set()
|
||||
stack: List[Tuple[str, Optional[str], int]] = [(start_url, None, 0)]
|
||||
depths: Dict[str, int] = {start_url: 0}
|
||||
|
||||
while stack and not self._cancel_event.is_set():
|
||||
url, parent, depth = stack.pop()
|
||||
if url in visited or depth > self.max_depth:
|
||||
continue
|
||||
visited.add(url)
|
||||
|
||||
stream_config = config.clone(deep_crawl_strategy=None, stream=True)
|
||||
stream_gen = await crawler.arun_many(urls=[url], config=stream_config)
|
||||
async for result in stream_gen:
|
||||
result.metadata = result.metadata or {}
|
||||
result.metadata["depth"] = depth
|
||||
result.metadata["parent_url"] = parent
|
||||
if self.url_scorer:
|
||||
result.metadata["score"] = self.url_scorer.score(url)
|
||||
yield result
|
||||
|
||||
# Only count successful crawls toward max_pages limit
|
||||
# and only discover links from successful crawls
|
||||
if result.success:
|
||||
self._pages_crawled += 1
|
||||
|
||||
new_links: List[Tuple[str, Optional[str]]] = []
|
||||
await self.link_discovery(result, url, depth, visited, new_links, depths)
|
||||
for new_url, new_parent in reversed(new_links):
|
||||
new_depth = depths.get(new_url, depth + 1)
|
||||
stack.append((new_url, new_parent, new_depth))
|
||||
666
crawl4ai/deep_crawling/filters.py
Normal file
666
crawl4ai/deep_crawling/filters.py
Normal file
@@ -0,0 +1,666 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import List, Pattern, Set, Union
|
||||
from urllib.parse import urlparse
|
||||
from array import array
|
||||
import re
|
||||
import logging
|
||||
from functools import lru_cache
|
||||
import fnmatch
|
||||
from dataclasses import dataclass
|
||||
import weakref
|
||||
import math
|
||||
from collections import defaultdict
|
||||
from typing import Dict
|
||||
from ..utils import HeadPeekr
|
||||
import asyncio
|
||||
import inspect
|
||||
|
||||
|
||||
@dataclass
|
||||
class FilterStats:
|
||||
__slots__ = ("_counters",)
|
||||
|
||||
def __init__(self):
|
||||
# Use array of unsigned ints for atomic operations
|
||||
self._counters = array("I", [0, 0, 0]) # total, passed, rejected
|
||||
|
||||
@property
|
||||
def total_urls(self):
|
||||
return self._counters[0]
|
||||
|
||||
@property
|
||||
def passed_urls(self):
|
||||
return self._counters[1]
|
||||
|
||||
@property
|
||||
def rejected_urls(self):
|
||||
return self._counters[2]
|
||||
|
||||
|
||||
class URLFilter(ABC):
|
||||
"""Optimized base filter class"""
|
||||
|
||||
__slots__ = ("name", "stats", "_logger_ref")
|
||||
|
||||
def __init__(self, name: str = None):
|
||||
self.name = name or self.__class__.__name__
|
||||
self.stats = FilterStats()
|
||||
# Lazy logger initialization using weakref
|
||||
self._logger_ref = None
|
||||
|
||||
@property
|
||||
def logger(self):
|
||||
if self._logger_ref is None or self._logger_ref() is None:
|
||||
logger = logging.getLogger(f"urlfilter.{self.name}")
|
||||
self._logger_ref = weakref.ref(logger)
|
||||
return self._logger_ref()
|
||||
|
||||
@abstractmethod
|
||||
def apply(self, url: str) -> bool:
|
||||
pass
|
||||
|
||||
def _update_stats(self, passed: bool):
|
||||
# Use direct array index for speed
|
||||
self.stats._counters[0] += 1 # total
|
||||
self.stats._counters[1] += passed # passed
|
||||
self.stats._counters[2] += not passed # rejected
|
||||
|
||||
|
||||
class FilterChain:
|
||||
"""Optimized filter chain"""
|
||||
|
||||
__slots__ = ("filters", "stats", "_logger_ref")
|
||||
|
||||
def __init__(self, filters: List[URLFilter] = None):
|
||||
self.filters = tuple(filters or []) # Immutable tuple for speed
|
||||
self.stats = FilterStats()
|
||||
self._logger_ref = None
|
||||
|
||||
@property
|
||||
def logger(self):
|
||||
if self._logger_ref is None or self._logger_ref() is None:
|
||||
logger = logging.getLogger("urlfilter.chain")
|
||||
self._logger_ref = weakref.ref(logger)
|
||||
return self._logger_ref()
|
||||
|
||||
def add_filter(self, filter_: URLFilter) -> "FilterChain":
|
||||
"""Add a filter to the chain"""
|
||||
self.filters.append(filter_)
|
||||
return self # Enable method chaining
|
||||
|
||||
async def apply(self, url: str) -> bool:
|
||||
"""Apply all filters concurrently when possible"""
|
||||
self.stats._counters[0] += 1 # Total processed URLs
|
||||
|
||||
tasks = []
|
||||
for f in self.filters:
|
||||
result = f.apply(url)
|
||||
|
||||
if inspect.isawaitable(result):
|
||||
tasks.append(result) # Collect async tasks
|
||||
elif not result: # Sync rejection
|
||||
self.stats._counters[2] += 1 # Sync rejected
|
||||
return False
|
||||
|
||||
if tasks:
|
||||
results = await asyncio.gather(*tasks)
|
||||
|
||||
# Count how many filters rejected
|
||||
rejections = results.count(False)
|
||||
self.stats._counters[2] += rejections
|
||||
|
||||
if not all(results):
|
||||
return False # Stop early if any filter rejected
|
||||
|
||||
self.stats._counters[1] += 1 # Passed
|
||||
return True
|
||||
|
||||
|
||||
class URLPatternFilter(URLFilter):
|
||||
"""Pattern filter balancing speed and completeness"""
|
||||
|
||||
__slots__ = (
|
||||
"_simple_suffixes",
|
||||
"_simple_prefixes",
|
||||
"_domain_patterns",
|
||||
"_path_patterns",
|
||||
"_reverse",
|
||||
)
|
||||
|
||||
PATTERN_TYPES = {
|
||||
"SUFFIX": 1, # *.html
|
||||
"PREFIX": 2, # /foo/*
|
||||
"DOMAIN": 3, # *.example.com
|
||||
"PATH": 4, # Everything else
|
||||
"REGEX": 5,
|
||||
}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
patterns: Union[str, Pattern, List[Union[str, Pattern]]],
|
||||
use_glob: bool = True,
|
||||
reverse: bool = False,
|
||||
):
|
||||
super().__init__()
|
||||
self._reverse = reverse
|
||||
patterns = [patterns] if isinstance(patterns, (str, Pattern)) else patterns
|
||||
|
||||
self._simple_suffixes = set()
|
||||
self._simple_prefixes = set()
|
||||
self._domain_patterns = []
|
||||
self._path_patterns = []
|
||||
|
||||
for pattern in patterns:
|
||||
pattern_type = self._categorize_pattern(pattern)
|
||||
self._add_pattern(pattern, pattern_type)
|
||||
|
||||
def _categorize_pattern(self, pattern: str) -> int:
|
||||
"""Categorize pattern for specialized handling"""
|
||||
if not isinstance(pattern, str):
|
||||
return self.PATTERN_TYPES["PATH"]
|
||||
|
||||
# Check if it's a regex pattern
|
||||
if pattern.startswith("^") or pattern.endswith("$") or "\\d" in pattern:
|
||||
return self.PATTERN_TYPES["REGEX"]
|
||||
|
||||
if pattern.count("*") == 1:
|
||||
if pattern.startswith("*."):
|
||||
return self.PATTERN_TYPES["SUFFIX"]
|
||||
if pattern.endswith("/*"):
|
||||
return self.PATTERN_TYPES["PREFIX"]
|
||||
|
||||
if "://" in pattern and pattern.startswith("*."):
|
||||
return self.PATTERN_TYPES["DOMAIN"]
|
||||
|
||||
return self.PATTERN_TYPES["PATH"]
|
||||
|
||||
def _add_pattern(self, pattern: str, pattern_type: int):
|
||||
"""Add pattern to appropriate matcher"""
|
||||
if pattern_type == self.PATTERN_TYPES["REGEX"]:
|
||||
# For regex patterns, compile directly without glob translation
|
||||
if isinstance(pattern, str) and (
|
||||
pattern.startswith("^") or pattern.endswith("$") or "\\d" in pattern
|
||||
):
|
||||
self._path_patterns.append(re.compile(pattern))
|
||||
return
|
||||
elif pattern_type == self.PATTERN_TYPES["SUFFIX"]:
|
||||
self._simple_suffixes.add(pattern[2:])
|
||||
elif pattern_type == self.PATTERN_TYPES["PREFIX"]:
|
||||
self._simple_prefixes.add(pattern[:-2])
|
||||
elif pattern_type == self.PATTERN_TYPES["DOMAIN"]:
|
||||
self._domain_patterns.append(re.compile(pattern.replace("*.", r"[^/]+\.")))
|
||||
else:
|
||||
if isinstance(pattern, str):
|
||||
# Handle complex glob patterns
|
||||
if "**" in pattern:
|
||||
pattern = pattern.replace("**", ".*")
|
||||
if "{" in pattern:
|
||||
# Convert {a,b} to (a|b)
|
||||
pattern = re.sub(
|
||||
r"\{([^}]+)\}",
|
||||
lambda m: f'({"|".join(m.group(1).split(","))})',
|
||||
pattern,
|
||||
)
|
||||
pattern = fnmatch.translate(pattern)
|
||||
self._path_patterns.append(
|
||||
pattern if isinstance(pattern, Pattern) else re.compile(pattern)
|
||||
)
|
||||
|
||||
@lru_cache(maxsize=10000)
|
||||
def apply(self, url: str) -> bool:
|
||||
# Quick suffix check (*.html)
|
||||
if self._simple_suffixes:
|
||||
path = url.split("?")[0]
|
||||
if path.split("/")[-1].split(".")[-1] in self._simple_suffixes:
|
||||
result = True
|
||||
self._update_stats(result)
|
||||
return not result if self._reverse else result
|
||||
|
||||
# Domain check
|
||||
if self._domain_patterns:
|
||||
for pattern in self._domain_patterns:
|
||||
if pattern.match(url):
|
||||
result = True
|
||||
self._update_stats(result)
|
||||
return not result if self._reverse else result
|
||||
|
||||
# Prefix check (/foo/*)
|
||||
if self._simple_prefixes:
|
||||
path = url.split("?")[0]
|
||||
if any(path.startswith(p) for p in self._simple_prefixes):
|
||||
result = True
|
||||
self._update_stats(result)
|
||||
return not result if self._reverse else result
|
||||
|
||||
# Complex patterns
|
||||
if self._path_patterns:
|
||||
if any(p.search(url) for p in self._path_patterns):
|
||||
result = True
|
||||
self._update_stats(result)
|
||||
return not result if self._reverse else result
|
||||
|
||||
result = False
|
||||
self._update_stats(result)
|
||||
return not result if self._reverse else result
|
||||
|
||||
|
||||
class ContentTypeFilter(URLFilter):
|
||||
"""Optimized content type filter using fast lookups"""
|
||||
|
||||
__slots__ = ("allowed_types", "_ext_map", "_check_extension")
|
||||
|
||||
# Fast extension to mime type mapping
|
||||
_MIME_MAP = {
|
||||
# Text Formats
|
||||
"txt": "text/plain",
|
||||
"html": "text/html",
|
||||
"htm": "text/html",
|
||||
"xhtml": "application/xhtml+xml",
|
||||
"css": "text/css",
|
||||
"csv": "text/csv",
|
||||
"ics": "text/calendar",
|
||||
"js": "application/javascript",
|
||||
# Images
|
||||
"bmp": "image/bmp",
|
||||
"gif": "image/gif",
|
||||
"jpeg": "image/jpeg",
|
||||
"jpg": "image/jpeg",
|
||||
"png": "image/png",
|
||||
"svg": "image/svg+xml",
|
||||
"tiff": "image/tiff",
|
||||
"ico": "image/x-icon",
|
||||
"webp": "image/webp",
|
||||
# Audio
|
||||
"mp3": "audio/mpeg",
|
||||
"wav": "audio/wav",
|
||||
"ogg": "audio/ogg",
|
||||
"m4a": "audio/mp4",
|
||||
"aac": "audio/aac",
|
||||
# Video
|
||||
"mp4": "video/mp4",
|
||||
"mpeg": "video/mpeg",
|
||||
"webm": "video/webm",
|
||||
"avi": "video/x-msvideo",
|
||||
"mov": "video/quicktime",
|
||||
"flv": "video/x-flv",
|
||||
"wmv": "video/x-ms-wmv",
|
||||
"mkv": "video/x-matroska",
|
||||
# Applications
|
||||
"json": "application/json",
|
||||
"xml": "application/xml",
|
||||
"pdf": "application/pdf",
|
||||
"zip": "application/zip",
|
||||
"gz": "application/gzip",
|
||||
"tar": "application/x-tar",
|
||||
"rar": "application/vnd.rar",
|
||||
"7z": "application/x-7z-compressed",
|
||||
"exe": "application/vnd.microsoft.portable-executable",
|
||||
"msi": "application/x-msdownload",
|
||||
# Fonts
|
||||
"woff": "font/woff",
|
||||
"woff2": "font/woff2",
|
||||
"ttf": "font/ttf",
|
||||
"otf": "font/otf",
|
||||
# Microsoft Office
|
||||
"doc": "application/msword",
|
||||
"dot": "application/msword",
|
||||
"docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
||||
"xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
|
||||
"xls": "application/vnd.ms-excel",
|
||||
"ppt": "application/vnd.ms-powerpoint",
|
||||
"pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
|
||||
# OpenDocument Formats
|
||||
"odt": "application/vnd.oasis.opendocument.text",
|
||||
"ods": "application/vnd.oasis.opendocument.spreadsheet",
|
||||
"odp": "application/vnd.oasis.opendocument.presentation",
|
||||
# Archives
|
||||
"tar.gz": "application/gzip",
|
||||
"tgz": "application/gzip",
|
||||
"bz2": "application/x-bzip2",
|
||||
# Others
|
||||
"rtf": "application/rtf",
|
||||
"apk": "application/vnd.android.package-archive",
|
||||
"epub": "application/epub+zip",
|
||||
"jar": "application/java-archive",
|
||||
"swf": "application/x-shockwave-flash",
|
||||
"midi": "audio/midi",
|
||||
"mid": "audio/midi",
|
||||
"ps": "application/postscript",
|
||||
"ai": "application/postscript",
|
||||
"eps": "application/postscript",
|
||||
# Custom or less common
|
||||
"bin": "application/octet-stream",
|
||||
"dmg": "application/x-apple-diskimage",
|
||||
"iso": "application/x-iso9660-image",
|
||||
"deb": "application/x-debian-package",
|
||||
"rpm": "application/x-rpm",
|
||||
"sqlite": "application/vnd.sqlite3",
|
||||
# Placeholder
|
||||
"unknown": "application/octet-stream", # Fallback for unknown file types
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
@lru_cache(maxsize=1000)
|
||||
def _extract_extension(url: str) -> str:
|
||||
"""Extracts file extension from a URL."""
|
||||
# Remove scheme (http://, https://) if present
|
||||
if "://" in url:
|
||||
url = url.split("://", 1)[-1] # Get everything after '://'
|
||||
|
||||
# Remove domain (everything up to the first '/')
|
||||
path_start = url.find("/")
|
||||
path = url[path_start:] if path_start != -1 else ""
|
||||
|
||||
# Extract last filename in path
|
||||
filename = path.rsplit("/", 1)[-1] if "/" in path else ""
|
||||
|
||||
# Extract and validate extension
|
||||
if "." not in filename:
|
||||
return ""
|
||||
|
||||
return filename.rpartition(".")[-1].lower()
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
allowed_types: Union[str, List[str]],
|
||||
check_extension: bool = True,
|
||||
ext_map: Dict[str, str] = _MIME_MAP,
|
||||
):
|
||||
super().__init__()
|
||||
# Normalize and store as frozenset for fast lookup
|
||||
self.allowed_types = frozenset(
|
||||
t.lower()
|
||||
for t in (
|
||||
allowed_types if isinstance(allowed_types, list) else [allowed_types]
|
||||
)
|
||||
)
|
||||
self._check_extension = check_extension
|
||||
|
||||
# Pre-compute extension map for allowed types
|
||||
self._ext_map = frozenset(
|
||||
ext
|
||||
for ext, mime in self._MIME_MAP.items()
|
||||
if any(allowed in mime for allowed in self.allowed_types)
|
||||
)
|
||||
|
||||
@lru_cache(maxsize=1000)
|
||||
def _check_url_cached(self, url: str) -> bool:
|
||||
"""Cached URL checking"""
|
||||
if not self._check_extension:
|
||||
return True
|
||||
ext = self._extract_extension(url)
|
||||
if not ext:
|
||||
return True
|
||||
|
||||
return ext in self._ext_map
|
||||
|
||||
def apply(self, url: str) -> bool:
|
||||
"""Fast extension check with caching"""
|
||||
result = self._check_url_cached(url)
|
||||
self._update_stats(result)
|
||||
return result
|
||||
|
||||
|
||||
class DomainFilter(URLFilter):
|
||||
"""Optimized domain filter with fast lookups and caching"""
|
||||
|
||||
__slots__ = ("_allowed_domains", "_blocked_domains", "_domain_cache")
|
||||
|
||||
# Regex for fast domain extraction
|
||||
_DOMAIN_REGEX = re.compile(r"://([^/]+)")
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
allowed_domains: Union[str, List[str]] = None,
|
||||
blocked_domains: Union[str, List[str]] = None,
|
||||
):
|
||||
super().__init__()
|
||||
|
||||
# Convert inputs to frozensets for immutable, fast lookups
|
||||
self._allowed_domains = (
|
||||
frozenset(self._normalize_domains(allowed_domains))
|
||||
if allowed_domains
|
||||
else None
|
||||
)
|
||||
self._blocked_domains = (
|
||||
frozenset(self._normalize_domains(blocked_domains))
|
||||
if blocked_domains
|
||||
else frozenset()
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _normalize_domains(domains: Union[str, List[str]]) -> Set[str]:
|
||||
"""Fast domain normalization"""
|
||||
if isinstance(domains, str):
|
||||
return {domains.lower()}
|
||||
return {d.lower() for d in domains}
|
||||
|
||||
@staticmethod
|
||||
def _is_subdomain(domain: str, parent_domain: str) -> bool:
|
||||
"""Check if domain is a subdomain of parent_domain"""
|
||||
return domain == parent_domain or domain.endswith(f".{parent_domain}")
|
||||
|
||||
@staticmethod
|
||||
@lru_cache(maxsize=10000)
|
||||
def _extract_domain(url: str) -> str:
|
||||
"""Ultra-fast domain extraction with regex and caching"""
|
||||
match = DomainFilter._DOMAIN_REGEX.search(url)
|
||||
return match.group(1).lower() if match else ""
|
||||
|
||||
def apply(self, url: str) -> bool:
|
||||
"""Optimized domain checking with early returns"""
|
||||
# Skip processing if no filters
|
||||
if not self._blocked_domains and self._allowed_domains is None:
|
||||
self._update_stats(True)
|
||||
return True
|
||||
|
||||
domain = self._extract_domain(url)
|
||||
|
||||
# Check for blocked domains, including subdomains
|
||||
for blocked in self._blocked_domains:
|
||||
if self._is_subdomain(domain, blocked):
|
||||
self._update_stats(False)
|
||||
return False
|
||||
|
||||
# If no allowed domains specified, accept all non-blocked
|
||||
if self._allowed_domains is None:
|
||||
self._update_stats(True)
|
||||
return True
|
||||
|
||||
# Check if domain matches any allowed domain (including subdomains)
|
||||
for allowed in self._allowed_domains:
|
||||
if self._is_subdomain(domain, allowed):
|
||||
self._update_stats(True)
|
||||
return True
|
||||
|
||||
# No matches found
|
||||
self._update_stats(False)
|
||||
return False
|
||||
|
||||
|
||||
class ContentRelevanceFilter(URLFilter):
|
||||
"""BM25-based relevance filter using head section content"""
|
||||
|
||||
__slots__ = ("query_terms", "threshold", "k1", "b", "avgdl")
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
query: str,
|
||||
threshold: float,
|
||||
k1: float = 1.2,
|
||||
b: float = 0.75,
|
||||
avgdl: int = 1000,
|
||||
):
|
||||
super().__init__(name="BM25RelevanceFilter")
|
||||
self.query_terms = self._tokenize(query)
|
||||
self.threshold = threshold
|
||||
self.k1 = k1 # TF saturation parameter
|
||||
self.b = b # Length normalization parameter
|
||||
self.avgdl = avgdl # Average document length (empirical value)
|
||||
|
||||
async def apply(self, url: str) -> bool:
|
||||
head_content = await HeadPeekr.peek_html(url)
|
||||
if not head_content:
|
||||
self._update_stats(False)
|
||||
return False
|
||||
|
||||
# Field extraction with weighting
|
||||
fields = {
|
||||
"title": HeadPeekr.get_title(head_content) or "",
|
||||
"meta": HeadPeekr.extract_meta_tags(head_content),
|
||||
}
|
||||
doc_text = self._build_document(fields)
|
||||
|
||||
score = self._bm25(doc_text)
|
||||
decision = score >= self.threshold
|
||||
self._update_stats(decision)
|
||||
return decision
|
||||
|
||||
def _build_document(self, fields: Dict) -> str:
|
||||
"""Weighted document construction"""
|
||||
return " ".join(
|
||||
[
|
||||
fields["title"] * 3, # Title weight
|
||||
fields["meta"].get("description", "") * 2,
|
||||
fields["meta"].get("keywords", ""),
|
||||
" ".join(fields["meta"].values()),
|
||||
]
|
||||
)
|
||||
|
||||
def _tokenize(self, text: str) -> List[str]:
|
||||
"""Fast case-insensitive tokenization"""
|
||||
return text.lower().split()
|
||||
|
||||
def _bm25(self, document: str) -> float:
|
||||
"""Optimized BM25 implementation for head sections"""
|
||||
doc_terms = self._tokenize(document)
|
||||
doc_len = len(doc_terms)
|
||||
tf = defaultdict(int)
|
||||
|
||||
for term in doc_terms:
|
||||
tf[term] += 1
|
||||
|
||||
score = 0.0
|
||||
for term in set(self.query_terms):
|
||||
term_freq = tf[term]
|
||||
idf = math.log((1 + 1) / (term_freq + 0.5) + 1) # Simplified IDF
|
||||
numerator = term_freq * (self.k1 + 1)
|
||||
denominator = term_freq + self.k1 * (
|
||||
1 - self.b + self.b * (doc_len / self.avgdl)
|
||||
)
|
||||
score += idf * (numerator / denominator)
|
||||
|
||||
return score
|
||||
|
||||
|
||||
class SEOFilter(URLFilter):
|
||||
"""Quantitative SEO quality assessment filter using head section analysis"""
|
||||
|
||||
__slots__ = ("threshold", "_weights", "_kw_patterns")
|
||||
|
||||
# Based on SEMrush/Google ranking factors research
|
||||
DEFAULT_WEIGHTS = {
|
||||
"title_length": 0.15,
|
||||
"title_kw": 0.18,
|
||||
"meta_description": 0.12,
|
||||
"canonical": 0.10,
|
||||
"robot_ok": 0.20, # Most critical factor
|
||||
"schema_org": 0.10,
|
||||
"url_quality": 0.15,
|
||||
}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
threshold: float = 0.65,
|
||||
keywords: List[str] = None,
|
||||
weights: Dict[str, float] = None,
|
||||
):
|
||||
super().__init__(name="SEOFilter")
|
||||
self.threshold = threshold
|
||||
self._weights = weights or self.DEFAULT_WEIGHTS
|
||||
self._kw_patterns = (
|
||||
re.compile(
|
||||
r"\b({})\b".format("|".join(map(re.escape, keywords or []))), re.I
|
||||
)
|
||||
if keywords
|
||||
else None
|
||||
)
|
||||
|
||||
async def apply(self, url: str) -> bool:
|
||||
head_content = await HeadPeekr.peek_html(url)
|
||||
if not head_content:
|
||||
self._update_stats(False)
|
||||
return False
|
||||
|
||||
meta = HeadPeekr.extract_meta_tags(head_content)
|
||||
title = HeadPeekr.get_title(head_content) or ""
|
||||
parsed_url = urlparse(url)
|
||||
|
||||
scores = {
|
||||
"title_length": self._score_title_length(title),
|
||||
"title_kw": self._score_keyword_presence(title),
|
||||
"meta_description": self._score_meta_description(
|
||||
meta.get("description", "")
|
||||
),
|
||||
"canonical": self._score_canonical(meta.get("canonical"), url),
|
||||
"robot_ok": 1.0 if "noindex" not in meta.get("robots", "") else 0.0,
|
||||
"schema_org": self._score_schema_org(head_content),
|
||||
"url_quality": self._score_url_quality(parsed_url),
|
||||
}
|
||||
|
||||
total_score = sum(
|
||||
weight * scores[factor] for factor, weight in self._weights.items()
|
||||
)
|
||||
|
||||
decision = total_score >= self.threshold
|
||||
self._update_stats(decision)
|
||||
return decision
|
||||
|
||||
def _score_title_length(self, title: str) -> float:
|
||||
length = len(title)
|
||||
if 50 <= length <= 60:
|
||||
return 1.0
|
||||
if 40 <= length < 50 or 60 < length <= 70:
|
||||
return 0.7
|
||||
return 0.3 # Poor length
|
||||
|
||||
def _score_keyword_presence(self, text: str) -> float:
|
||||
if not self._kw_patterns:
|
||||
return 0.0
|
||||
matches = len(self._kw_patterns.findall(text))
|
||||
return min(matches * 0.3, 1.0) # Max 3 matches
|
||||
|
||||
def _score_meta_description(self, desc: str) -> float:
|
||||
length = len(desc)
|
||||
if 140 <= length <= 160:
|
||||
return 1.0
|
||||
return 0.5 if 120 <= length <= 200 else 0.2
|
||||
|
||||
def _score_canonical(self, canonical: str, original: str) -> float:
|
||||
if not canonical:
|
||||
return 0.5 # Neutral score
|
||||
return 1.0 if canonical == original else 0.2
|
||||
|
||||
def _score_schema_org(self, html: str) -> float:
|
||||
# Detect any schema.org markup in head
|
||||
return (
|
||||
1.0
|
||||
if re.search(r'<script[^>]+type=["\']application/ld\+json', html)
|
||||
else 0.0
|
||||
)
|
||||
|
||||
def _score_url_quality(self, parsed_url) -> float:
|
||||
score = 1.0
|
||||
path = parsed_url.path.lower()
|
||||
|
||||
# Penalty factors
|
||||
if len(path) > 80:
|
||||
score *= 0.7
|
||||
if re.search(r"\d{4}", path):
|
||||
score *= 0.8 # Numbers in path
|
||||
if parsed_url.query:
|
||||
score *= 0.6 # URL parameters
|
||||
if "_" in path:
|
||||
score *= 0.9 # Underscores vs hyphens
|
||||
|
||||
return score
|
||||
519
crawl4ai/deep_crawling/scorers.py
Normal file
519
crawl4ai/deep_crawling/scorers.py
Normal file
@@ -0,0 +1,519 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import List, Dict, Optional
|
||||
from dataclasses import dataclass
|
||||
from urllib.parse import urlparse, unquote
|
||||
import re
|
||||
import logging
|
||||
from functools import lru_cache
|
||||
from array import array
|
||||
import ctypes
|
||||
import platform
|
||||
PLATFORM = platform.system()
|
||||
|
||||
# Pre-computed scores for common year differences
|
||||
_SCORE_LOOKUP = [1.0, 0.5, 0.3333333333333333, 0.25]
|
||||
|
||||
# Pre-computed scores for common year differences
|
||||
_FRESHNESS_SCORES = [
|
||||
1.0, # Current year
|
||||
0.9, # Last year
|
||||
0.8, # 2 years ago
|
||||
0.7, # 3 years ago
|
||||
0.6, # 4 years ago
|
||||
0.5, # 5 years ago
|
||||
]
|
||||
|
||||
class ScoringStats:
|
||||
__slots__ = ('_urls_scored', '_total_score', '_min_score', '_max_score')
|
||||
|
||||
def __init__(self):
|
||||
self._urls_scored = 0
|
||||
self._total_score = 0.0
|
||||
self._min_score = None # Lazy initialization
|
||||
self._max_score = None
|
||||
|
||||
def update(self, score: float) -> None:
|
||||
"""Optimized update with minimal operations"""
|
||||
self._urls_scored += 1
|
||||
self._total_score += score
|
||||
|
||||
# Lazy min/max tracking - only if actually accessed
|
||||
if self._min_score is not None:
|
||||
if score < self._min_score:
|
||||
self._min_score = score
|
||||
if self._max_score is not None:
|
||||
if score > self._max_score:
|
||||
self._max_score = score
|
||||
|
||||
def get_average(self) -> float:
|
||||
"""Direct calculation instead of property"""
|
||||
return self._total_score / self._urls_scored if self._urls_scored else 0.0
|
||||
|
||||
def get_min(self) -> float:
|
||||
"""Lazy min calculation"""
|
||||
if self._min_score is None:
|
||||
self._min_score = self._total_score / self._urls_scored if self._urls_scored else 0.0
|
||||
return self._min_score
|
||||
|
||||
def get_max(self) -> float:
|
||||
"""Lazy max calculation"""
|
||||
if self._max_score is None:
|
||||
self._max_score = self._total_score / self._urls_scored if self._urls_scored else 0.0
|
||||
return self._max_score
|
||||
class URLScorer(ABC):
|
||||
__slots__ = ('_weight', '_stats')
|
||||
|
||||
def __init__(self, weight: float = 1.0):
|
||||
# Store weight directly as float32 for memory efficiency
|
||||
self._weight = ctypes.c_float(weight).value
|
||||
self._stats = ScoringStats()
|
||||
|
||||
@abstractmethod
|
||||
def _calculate_score(self, url: str) -> float:
|
||||
"""Calculate raw score for URL."""
|
||||
pass
|
||||
|
||||
def score(self, url: str) -> float:
|
||||
"""Calculate weighted score with minimal overhead."""
|
||||
score = self._calculate_score(url) * self._weight
|
||||
self._stats.update(score)
|
||||
return score
|
||||
|
||||
@property
|
||||
def stats(self):
|
||||
"""Access to scoring statistics."""
|
||||
return self._stats
|
||||
|
||||
@property
|
||||
def weight(self):
|
||||
return self._weight
|
||||
|
||||
class CompositeScorer(URLScorer):
|
||||
__slots__ = ('_scorers', '_normalize', '_weights_array', '_score_array')
|
||||
|
||||
def __init__(self, scorers: List[URLScorer], normalize: bool = True):
|
||||
"""Initialize composite scorer combining multiple scoring strategies.
|
||||
|
||||
Optimized for:
|
||||
- Fast parallel scoring
|
||||
- Memory efficient score aggregation
|
||||
- Quick short-circuit conditions
|
||||
- Pre-allocated arrays
|
||||
|
||||
Args:
|
||||
scorers: List of scoring strategies to combine
|
||||
normalize: Whether to normalize final score by scorer count
|
||||
"""
|
||||
super().__init__(weight=1.0)
|
||||
self._scorers = scorers
|
||||
self._normalize = normalize
|
||||
|
||||
# Pre-allocate arrays for scores and weights
|
||||
self._weights_array = array('f', [s.weight for s in scorers])
|
||||
self._score_array = array('f', [0.0] * len(scorers))
|
||||
|
||||
@lru_cache(maxsize=10000)
|
||||
def _calculate_score(self, url: str) -> float:
|
||||
"""Calculate combined score from all scoring strategies.
|
||||
|
||||
Uses:
|
||||
1. Pre-allocated arrays for scores
|
||||
2. Short-circuit on zero scores
|
||||
3. Optimized normalization
|
||||
4. Vectorized operations where possible
|
||||
|
||||
Args:
|
||||
url: URL to score
|
||||
|
||||
Returns:
|
||||
Combined and optionally normalized score
|
||||
"""
|
||||
total_score = 0.0
|
||||
scores = self._score_array
|
||||
|
||||
# Get scores from all scorers
|
||||
for i, scorer in enumerate(self._scorers):
|
||||
# Use public score() method which applies weight
|
||||
scores[i] = scorer.score(url)
|
||||
total_score += scores[i]
|
||||
|
||||
# Normalize if requested
|
||||
if self._normalize and self._scorers:
|
||||
count = len(self._scorers)
|
||||
return total_score / count
|
||||
|
||||
return total_score
|
||||
|
||||
def score(self, url: str) -> float:
|
||||
"""Public scoring interface with stats tracking.
|
||||
|
||||
Args:
|
||||
url: URL to score
|
||||
|
||||
Returns:
|
||||
Final combined score
|
||||
"""
|
||||
score = self._calculate_score(url)
|
||||
self.stats.update(score)
|
||||
return score
|
||||
|
||||
class KeywordRelevanceScorer(URLScorer):
|
||||
__slots__ = ('_weight', '_stats', '_keywords', '_case_sensitive')
|
||||
|
||||
def __init__(self, keywords: List[str], weight: float = 1.0, case_sensitive: bool = False):
|
||||
super().__init__(weight=weight)
|
||||
self._case_sensitive = case_sensitive
|
||||
# Pre-process keywords once
|
||||
self._keywords = [k if case_sensitive else k.lower() for k in keywords]
|
||||
|
||||
@lru_cache(maxsize=10000)
|
||||
def _url_bytes(self, url: str) -> bytes:
|
||||
"""Cache decoded URL bytes"""
|
||||
return url.encode('utf-8') if self._case_sensitive else url.lower().encode('utf-8')
|
||||
|
||||
|
||||
def _calculate_score(self, url: str) -> float:
|
||||
"""Fast string matching without regex or byte conversion"""
|
||||
if not self._case_sensitive:
|
||||
url = url.lower()
|
||||
|
||||
matches = sum(1 for k in self._keywords if k in url)
|
||||
|
||||
# Fast return paths
|
||||
if not matches:
|
||||
return 0.0
|
||||
if matches == len(self._keywords):
|
||||
return 1.0
|
||||
|
||||
return matches / len(self._keywords)
|
||||
|
||||
class PathDepthScorer(URLScorer):
|
||||
__slots__ = ('_weight', '_stats', '_optimal_depth') # Remove _url_cache
|
||||
|
||||
def __init__(self, optimal_depth: int = 3, weight: float = 1.0):
|
||||
super().__init__(weight=weight)
|
||||
self._optimal_depth = optimal_depth
|
||||
|
||||
@staticmethod
|
||||
@lru_cache(maxsize=10000)
|
||||
def _quick_depth(path: str) -> int:
|
||||
"""Ultra fast path depth calculation.
|
||||
|
||||
Examples:
|
||||
- "http://example.com" -> 0 # No path segments
|
||||
- "http://example.com/" -> 0 # Empty path
|
||||
- "http://example.com/a" -> 1
|
||||
- "http://example.com/a/b" -> 2
|
||||
"""
|
||||
if not path or path == '/':
|
||||
return 0
|
||||
|
||||
if '/' not in path:
|
||||
return 0
|
||||
|
||||
depth = 0
|
||||
last_was_slash = True
|
||||
|
||||
for c in path:
|
||||
if c == '/':
|
||||
if not last_was_slash:
|
||||
depth += 1
|
||||
last_was_slash = True
|
||||
else:
|
||||
last_was_slash = False
|
||||
|
||||
if not last_was_slash:
|
||||
depth += 1
|
||||
|
||||
return depth
|
||||
|
||||
@lru_cache(maxsize=10000) # Cache the whole calculation
|
||||
def _calculate_score(self, url: str) -> float:
|
||||
pos = url.find('/', url.find('://') + 3)
|
||||
if pos == -1:
|
||||
depth = 0
|
||||
else:
|
||||
depth = self._quick_depth(url[pos:])
|
||||
|
||||
# Use lookup table for common distances
|
||||
distance = depth - self._optimal_depth
|
||||
distance = distance if distance >= 0 else -distance # Faster than abs()
|
||||
|
||||
if distance < 4:
|
||||
return _SCORE_LOOKUP[distance]
|
||||
|
||||
return 1.0 / (1.0 + distance)
|
||||
|
||||
class ContentTypeScorer(URLScorer):
|
||||
__slots__ = ('_weight', '_exact_types', '_regex_types')
|
||||
|
||||
def __init__(self, type_weights: Dict[str, float], weight: float = 1.0):
|
||||
"""Initialize scorer with type weights map.
|
||||
|
||||
Args:
|
||||
type_weights: Dict mapping file extensions/patterns to scores (e.g. {'.html$': 1.0})
|
||||
weight: Overall weight multiplier for this scorer
|
||||
"""
|
||||
super().__init__(weight=weight)
|
||||
self._exact_types = {} # Fast lookup for simple extensions
|
||||
self._regex_types = [] # Fallback for complex patterns
|
||||
|
||||
# Split into exact vs regex matchers for performance
|
||||
for pattern, score in type_weights.items():
|
||||
if pattern.startswith('.') and pattern.endswith('$'):
|
||||
ext = pattern[1:-1]
|
||||
self._exact_types[ext] = score
|
||||
else:
|
||||
self._regex_types.append((re.compile(pattern), score))
|
||||
|
||||
# Sort complex patterns by score for early exit
|
||||
self._regex_types.sort(key=lambda x: -x[1])
|
||||
|
||||
@staticmethod
|
||||
@lru_cache(maxsize=10000)
|
||||
def _quick_extension(url: str) -> str:
|
||||
"""Extract file extension ultra-fast without regex/splits.
|
||||
|
||||
Handles:
|
||||
- Basic extensions: "example.html" -> "html"
|
||||
- Query strings: "page.php?id=1" -> "php"
|
||||
- Fragments: "doc.pdf#page=1" -> "pdf"
|
||||
- Path params: "file.jpg;width=100" -> "jpg"
|
||||
|
||||
Args:
|
||||
url: URL to extract extension from
|
||||
|
||||
Returns:
|
||||
Extension without dot, or empty string if none found
|
||||
"""
|
||||
pos = url.rfind('.')
|
||||
if pos == -1:
|
||||
return ''
|
||||
|
||||
# Find first non-alphanumeric char after extension
|
||||
end = len(url)
|
||||
for i in range(pos + 1, len(url)):
|
||||
c = url[i]
|
||||
# Stop at query string, fragment, path param or any non-alphanumeric
|
||||
if c in '?#;' or not c.isalnum():
|
||||
end = i
|
||||
break
|
||||
|
||||
return url[pos + 1:end].lower()
|
||||
|
||||
@lru_cache(maxsize=10000)
|
||||
def _calculate_score(self, url: str) -> float:
|
||||
"""Calculate content type score for URL.
|
||||
|
||||
Uses staged approach:
|
||||
1. Try exact extension match (fast path)
|
||||
2. Fall back to regex patterns if needed
|
||||
|
||||
Args:
|
||||
url: URL to score
|
||||
|
||||
Returns:
|
||||
Score between 0.0 and 1.0 * weight
|
||||
"""
|
||||
# Fast path: direct extension lookup
|
||||
ext = self._quick_extension(url)
|
||||
if ext:
|
||||
score = self._exact_types.get(ext, None)
|
||||
if score is not None:
|
||||
return score
|
||||
|
||||
# Slow path: regex patterns
|
||||
for pattern, score in self._regex_types:
|
||||
if pattern.search(url):
|
||||
return score
|
||||
|
||||
return 0.0
|
||||
|
||||
class FreshnessScorer(URLScorer):
|
||||
__slots__ = ('_weight', '_date_pattern', '_current_year')
|
||||
|
||||
def __init__(self, weight: float = 1.0, current_year: int = 2024):
|
||||
"""Initialize freshness scorer.
|
||||
|
||||
Extracts and scores dates from URLs using format:
|
||||
- YYYY/MM/DD
|
||||
- YYYY-MM-DD
|
||||
- YYYY_MM_DD
|
||||
- YYYY (year only)
|
||||
|
||||
Args:
|
||||
weight: Score multiplier
|
||||
current_year: Year to calculate freshness against (default 2024)
|
||||
"""
|
||||
super().__init__(weight=weight)
|
||||
self._current_year = current_year
|
||||
|
||||
# Combined pattern for all date formats
|
||||
# Uses non-capturing groups (?:) and alternation
|
||||
self._date_pattern = re.compile(
|
||||
r'(?:/' # Path separator
|
||||
r'|[-_])' # or date separators
|
||||
r'((?:19|20)\d{2})' # Year group (1900-2099)
|
||||
r'(?:' # Optional month/day group
|
||||
r'(?:/|[-_])' # Date separator
|
||||
r'(?:\d{2})' # Month
|
||||
r'(?:' # Optional day
|
||||
r'(?:/|[-_])' # Date separator
|
||||
r'(?:\d{2})' # Day
|
||||
r')?' # Day is optional
|
||||
r')?' # Month/day group is optional
|
||||
)
|
||||
|
||||
@lru_cache(maxsize=10000)
|
||||
def _extract_year(self, url: str) -> Optional[int]:
|
||||
"""Extract the most recent year from URL.
|
||||
|
||||
Args:
|
||||
url: URL to extract year from
|
||||
|
||||
Returns:
|
||||
Year as int or None if no valid year found
|
||||
"""
|
||||
matches = self._date_pattern.finditer(url)
|
||||
latest_year = None
|
||||
|
||||
# Find most recent year
|
||||
for match in matches:
|
||||
year = int(match.group(1))
|
||||
if (year <= self._current_year and # Sanity check
|
||||
(latest_year is None or year > latest_year)):
|
||||
latest_year = year
|
||||
|
||||
return latest_year
|
||||
|
||||
@lru_cache(maxsize=10000)
|
||||
def _calculate_score(self, url: str) -> float:
|
||||
"""Calculate freshness score based on URL date.
|
||||
|
||||
More recent years score higher. Uses pre-computed scoring
|
||||
table for common year differences.
|
||||
|
||||
Args:
|
||||
url: URL to score
|
||||
|
||||
Returns:
|
||||
Score between 0.0 and 1.0 * weight
|
||||
"""
|
||||
year = self._extract_year(url)
|
||||
if year is None:
|
||||
return 0.5 # Default score
|
||||
|
||||
# Use lookup table for common year differences
|
||||
year_diff = self._current_year - year
|
||||
if year_diff < len(_FRESHNESS_SCORES):
|
||||
return _FRESHNESS_SCORES[year_diff]
|
||||
|
||||
# Fallback calculation for older content
|
||||
return max(0.1, 1.0 - year_diff * 0.1)
|
||||
|
||||
class DomainAuthorityScorer(URLScorer):
|
||||
__slots__ = ('_weight', '_domain_weights', '_default_weight', '_top_domains')
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
domain_weights: Dict[str, float],
|
||||
default_weight: float = 0.5,
|
||||
weight: float = 1.0,
|
||||
):
|
||||
"""Initialize domain authority scorer.
|
||||
|
||||
Args:
|
||||
domain_weights: Dict mapping domains to authority scores
|
||||
default_weight: Score for unknown domains
|
||||
weight: Overall scorer weight multiplier
|
||||
|
||||
Example:
|
||||
{
|
||||
'python.org': 1.0,
|
||||
'github.com': 0.9,
|
||||
'medium.com': 0.7
|
||||
}
|
||||
"""
|
||||
super().__init__(weight=weight)
|
||||
|
||||
# Pre-process domains for faster lookup
|
||||
self._domain_weights = {
|
||||
domain.lower(): score
|
||||
for domain, score in domain_weights.items()
|
||||
}
|
||||
self._default_weight = default_weight
|
||||
|
||||
# Cache top domains for fast path
|
||||
self._top_domains = {
|
||||
domain: score
|
||||
for domain, score in sorted(
|
||||
domain_weights.items(),
|
||||
key=lambda x: -x[1]
|
||||
)[:5] # Keep top 5 highest scoring domains
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
@lru_cache(maxsize=10000)
|
||||
def _extract_domain(url: str) -> str:
|
||||
"""Extract domain from URL ultra-fast.
|
||||
|
||||
Handles:
|
||||
- Basic domains: "example.com"
|
||||
- Subdomains: "sub.example.com"
|
||||
- Ports: "example.com:8080"
|
||||
- IPv4: "192.168.1.1"
|
||||
|
||||
Args:
|
||||
url: Full URL to extract domain from
|
||||
|
||||
Returns:
|
||||
Lowercase domain without port
|
||||
"""
|
||||
# Find domain start
|
||||
start = url.find('://')
|
||||
if start == -1:
|
||||
start = 0
|
||||
else:
|
||||
start += 3
|
||||
|
||||
# Find domain end
|
||||
end = url.find('/', start)
|
||||
if end == -1:
|
||||
end = url.find('?', start)
|
||||
if end == -1:
|
||||
end = url.find('#', start)
|
||||
if end == -1:
|
||||
end = len(url)
|
||||
|
||||
# Extract domain and remove port
|
||||
domain = url[start:end]
|
||||
port_idx = domain.rfind(':')
|
||||
if port_idx != -1:
|
||||
domain = domain[:port_idx]
|
||||
|
||||
return domain.lower()
|
||||
|
||||
@lru_cache(maxsize=10000)
|
||||
def _calculate_score(self, url: str) -> float:
|
||||
"""Calculate domain authority score.
|
||||
|
||||
Uses staged approach:
|
||||
1. Check top domains (fastest)
|
||||
2. Check full domain weights
|
||||
3. Return default weight
|
||||
|
||||
Args:
|
||||
url: URL to score
|
||||
|
||||
Returns:
|
||||
Authority score between 0.0 and 1.0 * weight
|
||||
"""
|
||||
domain = self._extract_domain(url)
|
||||
|
||||
# Fast path: check top domains first
|
||||
score = self._top_domains.get(domain)
|
||||
if score is not None:
|
||||
return score
|
||||
|
||||
# Regular path: check all domains
|
||||
return self._domain_weights.get(domain, self._default_weight)
|
||||
170
crawl4ai/docker_client.py
Normal file
170
crawl4ai/docker_client.py
Normal file
@@ -0,0 +1,170 @@
|
||||
from typing import List, Optional, Union, AsyncGenerator, Dict, Any
|
||||
import httpx
|
||||
import json
|
||||
from urllib.parse import urljoin
|
||||
import asyncio
|
||||
|
||||
from .async_configs import BrowserConfig, CrawlerRunConfig
|
||||
from .models import CrawlResult
|
||||
from .async_logger import AsyncLogger, LogLevel
|
||||
|
||||
|
||||
class Crawl4aiClientError(Exception):
|
||||
"""Base exception for Crawl4ai Docker client errors."""
|
||||
pass
|
||||
|
||||
|
||||
class ConnectionError(Crawl4aiClientError):
|
||||
"""Raised when connection to the Docker server fails."""
|
||||
pass
|
||||
|
||||
|
||||
class RequestError(Crawl4aiClientError):
|
||||
"""Raised when the server returns an error response."""
|
||||
pass
|
||||
|
||||
|
||||
class Crawl4aiDockerClient:
|
||||
"""Client for interacting with Crawl4AI Docker server with token authentication."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
base_url: str = "http://localhost:8000",
|
||||
timeout: float = 30.0,
|
||||
verify_ssl: bool = True,
|
||||
verbose: bool = True,
|
||||
log_file: Optional[str] = None
|
||||
):
|
||||
self.base_url = base_url.rstrip('/')
|
||||
self.timeout = timeout
|
||||
self.logger = AsyncLogger(log_file=log_file, log_level=LogLevel.DEBUG, verbose=verbose)
|
||||
self._http_client = httpx.AsyncClient(
|
||||
timeout=timeout,
|
||||
verify=verify_ssl,
|
||||
headers={"Content-Type": "application/json"}
|
||||
)
|
||||
self._token: Optional[str] = None
|
||||
|
||||
async def authenticate(self, email: str) -> None:
|
||||
"""Authenticate with the server and store the token."""
|
||||
url = urljoin(self.base_url, "/token")
|
||||
try:
|
||||
self.logger.info(f"Authenticating with email: {email}", tag="AUTH")
|
||||
response = await self._http_client.post(url, json={"email": email})
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
self._token = data["access_token"]
|
||||
self._http_client.headers["Authorization"] = f"Bearer {self._token}"
|
||||
self.logger.success("Authentication successful", tag="AUTH")
|
||||
except (httpx.RequestError, httpx.HTTPStatusError) as e:
|
||||
error_msg = f"Authentication failed: {str(e)}"
|
||||
self.logger.error(error_msg, tag="ERROR")
|
||||
raise ConnectionError(error_msg)
|
||||
|
||||
async def _check_server(self) -> None:
|
||||
"""Check if server is reachable, raising an error if not."""
|
||||
try:
|
||||
await self._http_client.get(urljoin(self.base_url, "/health"))
|
||||
self.logger.success(f"Connected to {self.base_url}", tag="READY")
|
||||
except httpx.RequestError as e:
|
||||
self.logger.error(f"Server unreachable: {str(e)}", tag="ERROR")
|
||||
raise ConnectionError(f"Cannot connect to server: {str(e)}")
|
||||
|
||||
def _prepare_request(self, urls: List[str], browser_config: Optional[BrowserConfig] = None,
|
||||
crawler_config: Optional[CrawlerRunConfig] = None) -> Dict[str, Any]:
|
||||
"""Prepare request data from configs."""
|
||||
return {
|
||||
"urls": urls,
|
||||
"browser_config": browser_config.dump() if browser_config else {},
|
||||
"crawler_config": crawler_config.dump() if crawler_config else {}
|
||||
}
|
||||
|
||||
async def _request(self, method: str, endpoint: str, **kwargs) -> httpx.Response:
|
||||
"""Make an HTTP request with error handling."""
|
||||
url = urljoin(self.base_url, endpoint)
|
||||
try:
|
||||
response = await self._http_client.request(method, url, **kwargs)
|
||||
response.raise_for_status()
|
||||
return response
|
||||
except httpx.TimeoutException as e:
|
||||
raise ConnectionError(f"Request timed out: {str(e)}")
|
||||
except httpx.RequestError as e:
|
||||
raise ConnectionError(f"Failed to connect: {str(e)}")
|
||||
except httpx.HTTPStatusError as e:
|
||||
error_msg = (e.response.json().get("detail", str(e))
|
||||
if "application/json" in e.response.headers.get("content-type", "")
|
||||
else str(e))
|
||||
raise RequestError(f"Server error {e.response.status_code}: {error_msg}")
|
||||
|
||||
async def crawl(
|
||||
self,
|
||||
urls: List[str],
|
||||
browser_config: Optional[BrowserConfig] = None,
|
||||
crawler_config: Optional[CrawlerRunConfig] = None
|
||||
) -> Union[CrawlResult, List[CrawlResult], AsyncGenerator[CrawlResult, None]]:
|
||||
"""Execute a crawl operation."""
|
||||
if not self._token:
|
||||
raise Crawl4aiClientError("Authentication required. Call authenticate() first.")
|
||||
await self._check_server()
|
||||
|
||||
data = self._prepare_request(urls, browser_config, crawler_config)
|
||||
is_streaming = crawler_config and crawler_config.stream
|
||||
|
||||
self.logger.info(f"Crawling {len(urls)} URLs {'(streaming)' if is_streaming else ''}", tag="CRAWL")
|
||||
|
||||
if is_streaming:
|
||||
async def stream_results() -> AsyncGenerator[CrawlResult, None]:
|
||||
async with self._http_client.stream("POST", f"{self.base_url}/crawl/stream", json=data) as response:
|
||||
response.raise_for_status()
|
||||
async for line in response.aiter_lines():
|
||||
if line.strip():
|
||||
result = json.loads(line)
|
||||
if "error" in result:
|
||||
self.logger.error_status(url=result.get("url", "unknown"), error=result["error"])
|
||||
continue
|
||||
self.logger.url_status(url=result.get("url", "unknown"), success=True, timing=result.get("timing", 0.0))
|
||||
if result.get("status") == "completed":
|
||||
continue
|
||||
else:
|
||||
yield CrawlResult(**result)
|
||||
return stream_results()
|
||||
|
||||
response = await self._request("POST", "/crawl", json=data)
|
||||
result_data = response.json()
|
||||
if not result_data.get("success", False):
|
||||
raise RequestError(f"Crawl failed: {result_data.get('msg', 'Unknown error')}")
|
||||
|
||||
results = [CrawlResult(**r) for r in result_data.get("results", [])]
|
||||
self.logger.success(f"Crawl completed with {len(results)} results", tag="CRAWL")
|
||||
return results[0] if len(results) == 1 else results
|
||||
|
||||
async def get_schema(self) -> Dict[str, Any]:
|
||||
"""Retrieve configuration schemas."""
|
||||
if not self._token:
|
||||
raise Crawl4aiClientError("Authentication required. Call authenticate() first.")
|
||||
response = await self._request("GET", "/schema")
|
||||
return response.json()
|
||||
|
||||
async def close(self) -> None:
|
||||
"""Close the HTTP client session."""
|
||||
self.logger.info("Closing client", tag="CLOSE")
|
||||
await self._http_client.aclose()
|
||||
|
||||
async def __aenter__(self) -> "Crawl4aiDockerClient":
|
||||
return self
|
||||
|
||||
async def __aexit__(self, exc_type: Optional[type], exc_val: Optional[Exception], exc_tb: Optional[Any]) -> None:
|
||||
await self.close()
|
||||
|
||||
|
||||
# Example usage
|
||||
async def main():
|
||||
async with Crawl4aiDockerClient(verbose=True) as client:
|
||||
await client.authenticate("user@example.com")
|
||||
result = await client.crawl(["https://example.com"])
|
||||
print(result)
|
||||
schema = await client.get_schema()
|
||||
print(schema)
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
File diff suppressed because it is too large
Load Diff
1170
crawl4ai/html2text/__init__.py
Normal file
1170
crawl4ai/html2text/__init__.py
Normal file
File diff suppressed because it is too large
Load Diff
3
crawl4ai/html2text/__main__.py
Normal file
3
crawl4ai/html2text/__main__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from .cli import main
|
||||
|
||||
main()
|
||||
3
crawl4ai/html2text/_typing.py
Normal file
3
crawl4ai/html2text/_typing.py
Normal file
@@ -0,0 +1,3 @@
|
||||
class OutCallback:
|
||||
def __call__(self, s: str) -> None:
|
||||
...
|
||||
330
crawl4ai/html2text/cli.py
Normal file
330
crawl4ai/html2text/cli.py
Normal file
@@ -0,0 +1,330 @@
|
||||
import argparse
|
||||
import sys
|
||||
|
||||
from . import HTML2Text, __version__, config
|
||||
|
||||
|
||||
def main() -> None:
|
||||
baseurl = ""
|
||||
|
||||
class bcolors:
|
||||
HEADER = "\033[95m"
|
||||
OKBLUE = "\033[94m"
|
||||
OKGREEN = "\033[92m"
|
||||
WARNING = "\033[93m"
|
||||
FAIL = "\033[91m"
|
||||
ENDC = "\033[0m"
|
||||
BOLD = "\033[1m"
|
||||
UNDERLINE = "\033[4m"
|
||||
|
||||
p = argparse.ArgumentParser()
|
||||
p.add_argument(
|
||||
"--default-image-alt",
|
||||
dest="default_image_alt",
|
||||
default=config.DEFAULT_IMAGE_ALT,
|
||||
help="The default alt string for images with missing ones",
|
||||
)
|
||||
p.add_argument(
|
||||
"--pad-tables",
|
||||
dest="pad_tables",
|
||||
action="store_true",
|
||||
default=config.PAD_TABLES,
|
||||
help="pad the cells to equal column width in tables",
|
||||
)
|
||||
p.add_argument(
|
||||
"--no-wrap-links",
|
||||
dest="wrap_links",
|
||||
action="store_false",
|
||||
default=config.WRAP_LINKS,
|
||||
help="don't wrap links during conversion",
|
||||
)
|
||||
p.add_argument(
|
||||
"--wrap-list-items",
|
||||
dest="wrap_list_items",
|
||||
action="store_true",
|
||||
default=config.WRAP_LIST_ITEMS,
|
||||
help="wrap list items during conversion",
|
||||
)
|
||||
p.add_argument(
|
||||
"--wrap-tables",
|
||||
dest="wrap_tables",
|
||||
action="store_true",
|
||||
default=config.WRAP_TABLES,
|
||||
help="wrap tables",
|
||||
)
|
||||
p.add_argument(
|
||||
"--ignore-emphasis",
|
||||
dest="ignore_emphasis",
|
||||
action="store_true",
|
||||
default=config.IGNORE_EMPHASIS,
|
||||
help="don't include any formatting for emphasis",
|
||||
)
|
||||
p.add_argument(
|
||||
"--reference-links",
|
||||
dest="inline_links",
|
||||
action="store_false",
|
||||
default=config.INLINE_LINKS,
|
||||
help="use reference style links instead of inline links",
|
||||
)
|
||||
p.add_argument(
|
||||
"--ignore-links",
|
||||
dest="ignore_links",
|
||||
action="store_true",
|
||||
default=config.IGNORE_ANCHORS,
|
||||
help="don't include any formatting for links",
|
||||
)
|
||||
p.add_argument(
|
||||
"--ignore-mailto-links",
|
||||
action="store_true",
|
||||
dest="ignore_mailto_links",
|
||||
default=config.IGNORE_MAILTO_LINKS,
|
||||
help="don't include mailto: links",
|
||||
)
|
||||
p.add_argument(
|
||||
"--protect-links",
|
||||
dest="protect_links",
|
||||
action="store_true",
|
||||
default=config.PROTECT_LINKS,
|
||||
help="protect links from line breaks surrounding them with angle brackets",
|
||||
)
|
||||
p.add_argument(
|
||||
"--ignore-images",
|
||||
dest="ignore_images",
|
||||
action="store_true",
|
||||
default=config.IGNORE_IMAGES,
|
||||
help="don't include any formatting for images",
|
||||
)
|
||||
p.add_argument(
|
||||
"--images-as-html",
|
||||
dest="images_as_html",
|
||||
action="store_true",
|
||||
default=config.IMAGES_AS_HTML,
|
||||
help=(
|
||||
"Always write image tags as raw html; preserves `height`, `width` and "
|
||||
"`alt` if possible."
|
||||
),
|
||||
)
|
||||
p.add_argument(
|
||||
"--images-to-alt",
|
||||
dest="images_to_alt",
|
||||
action="store_true",
|
||||
default=config.IMAGES_TO_ALT,
|
||||
help="Discard image data, only keep alt text",
|
||||
)
|
||||
p.add_argument(
|
||||
"--images-with-size",
|
||||
dest="images_with_size",
|
||||
action="store_true",
|
||||
default=config.IMAGES_WITH_SIZE,
|
||||
help=(
|
||||
"Write image tags with height and width attrs as raw html to retain "
|
||||
"dimensions"
|
||||
),
|
||||
)
|
||||
p.add_argument(
|
||||
"-g",
|
||||
"--google-doc",
|
||||
action="store_true",
|
||||
dest="google_doc",
|
||||
default=False,
|
||||
help="convert an html-exported Google Document",
|
||||
)
|
||||
p.add_argument(
|
||||
"-d",
|
||||
"--dash-unordered-list",
|
||||
action="store_true",
|
||||
dest="ul_style_dash",
|
||||
default=False,
|
||||
help="use a dash rather than a star for unordered list items",
|
||||
)
|
||||
p.add_argument(
|
||||
"-e",
|
||||
"--asterisk-emphasis",
|
||||
action="store_true",
|
||||
dest="em_style_asterisk",
|
||||
default=False,
|
||||
help="use an asterisk rather than an underscore for emphasized text",
|
||||
)
|
||||
p.add_argument(
|
||||
"-b",
|
||||
"--body-width",
|
||||
dest="body_width",
|
||||
type=int,
|
||||
default=config.BODY_WIDTH,
|
||||
help="number of characters per output line, 0 for no wrap",
|
||||
)
|
||||
p.add_argument(
|
||||
"-i",
|
||||
"--google-list-indent",
|
||||
dest="list_indent",
|
||||
type=int,
|
||||
default=config.GOOGLE_LIST_INDENT,
|
||||
help="number of pixels Google indents nested lists",
|
||||
)
|
||||
p.add_argument(
|
||||
"-s",
|
||||
"--hide-strikethrough",
|
||||
action="store_true",
|
||||
dest="hide_strikethrough",
|
||||
default=False,
|
||||
help="hide strike-through text. only relevant when -g is " "specified as well",
|
||||
)
|
||||
p.add_argument(
|
||||
"--escape-all",
|
||||
action="store_true",
|
||||
dest="escape_snob",
|
||||
default=False,
|
||||
help=(
|
||||
"Escape all special characters. Output is less readable, but avoids "
|
||||
"corner case formatting issues."
|
||||
),
|
||||
)
|
||||
p.add_argument(
|
||||
"--bypass-tables",
|
||||
action="store_true",
|
||||
dest="bypass_tables",
|
||||
default=config.BYPASS_TABLES,
|
||||
help="Format tables in HTML rather than Markdown syntax.",
|
||||
)
|
||||
p.add_argument(
|
||||
"--ignore-tables",
|
||||
action="store_true",
|
||||
dest="ignore_tables",
|
||||
default=config.IGNORE_TABLES,
|
||||
help="Ignore table-related tags (table, th, td, tr) " "while keeping rows.",
|
||||
)
|
||||
p.add_argument(
|
||||
"--single-line-break",
|
||||
action="store_true",
|
||||
dest="single_line_break",
|
||||
default=config.SINGLE_LINE_BREAK,
|
||||
help=(
|
||||
"Use a single line break after a block element rather than two line "
|
||||
"breaks. NOTE: Requires --body-width=0"
|
||||
),
|
||||
)
|
||||
p.add_argument(
|
||||
"--unicode-snob",
|
||||
action="store_true",
|
||||
dest="unicode_snob",
|
||||
default=config.UNICODE_SNOB,
|
||||
help="Use unicode throughout document",
|
||||
)
|
||||
p.add_argument(
|
||||
"--no-automatic-links",
|
||||
action="store_false",
|
||||
dest="use_automatic_links",
|
||||
default=config.USE_AUTOMATIC_LINKS,
|
||||
help="Do not use automatic links wherever applicable",
|
||||
)
|
||||
p.add_argument(
|
||||
"--no-skip-internal-links",
|
||||
action="store_false",
|
||||
dest="skip_internal_links",
|
||||
default=config.SKIP_INTERNAL_LINKS,
|
||||
help="Do not skip internal links",
|
||||
)
|
||||
p.add_argument(
|
||||
"--links-after-para",
|
||||
action="store_true",
|
||||
dest="links_each_paragraph",
|
||||
default=config.LINKS_EACH_PARAGRAPH,
|
||||
help="Put links after each paragraph instead of document",
|
||||
)
|
||||
p.add_argument(
|
||||
"--mark-code",
|
||||
action="store_true",
|
||||
dest="mark_code",
|
||||
default=config.MARK_CODE,
|
||||
help="Mark program code blocks with [code]...[/code]",
|
||||
)
|
||||
p.add_argument(
|
||||
"--decode-errors",
|
||||
dest="decode_errors",
|
||||
default=config.DECODE_ERRORS,
|
||||
help=(
|
||||
"What to do in case of decode errors.'ignore', 'strict' and 'replace' are "
|
||||
"acceptable values"
|
||||
),
|
||||
)
|
||||
p.add_argument(
|
||||
"--open-quote",
|
||||
dest="open_quote",
|
||||
default=config.OPEN_QUOTE,
|
||||
help="The character used to open quotes",
|
||||
)
|
||||
p.add_argument(
|
||||
"--close-quote",
|
||||
dest="close_quote",
|
||||
default=config.CLOSE_QUOTE,
|
||||
help="The character used to close quotes",
|
||||
)
|
||||
p.add_argument(
|
||||
"--version", action="version", version=".".join(map(str, __version__))
|
||||
)
|
||||
p.add_argument("filename", nargs="?")
|
||||
p.add_argument("encoding", nargs="?", default="utf-8")
|
||||
p.add_argument(
|
||||
"--include-sup-sub",
|
||||
dest="include_sup_sub",
|
||||
action="store_true",
|
||||
default=config.INCLUDE_SUP_SUB,
|
||||
help="Include the sup and sub tags",
|
||||
)
|
||||
args = p.parse_args()
|
||||
|
||||
if args.filename and args.filename != "-":
|
||||
with open(args.filename, "rb") as fp:
|
||||
data = fp.read()
|
||||
else:
|
||||
data = sys.stdin.buffer.read()
|
||||
|
||||
try:
|
||||
html = data.decode(args.encoding, args.decode_errors)
|
||||
except UnicodeDecodeError as err:
|
||||
warning = bcolors.WARNING + "Warning:" + bcolors.ENDC
|
||||
warning += " Use the " + bcolors.OKGREEN
|
||||
warning += "--decode-errors=ignore" + bcolors.ENDC + " flag."
|
||||
print(warning)
|
||||
raise err
|
||||
|
||||
h = HTML2Text(baseurl=baseurl)
|
||||
# handle options
|
||||
if args.ul_style_dash:
|
||||
h.ul_item_mark = "-"
|
||||
if args.em_style_asterisk:
|
||||
h.emphasis_mark = "*"
|
||||
h.strong_mark = "__"
|
||||
|
||||
h.body_width = args.body_width
|
||||
h.google_list_indent = args.list_indent
|
||||
h.ignore_emphasis = args.ignore_emphasis
|
||||
h.ignore_links = args.ignore_links
|
||||
h.ignore_mailto_links = args.ignore_mailto_links
|
||||
h.protect_links = args.protect_links
|
||||
h.ignore_images = args.ignore_images
|
||||
h.images_as_html = args.images_as_html
|
||||
h.images_to_alt = args.images_to_alt
|
||||
h.images_with_size = args.images_with_size
|
||||
h.google_doc = args.google_doc
|
||||
h.hide_strikethrough = args.hide_strikethrough
|
||||
h.escape_snob = args.escape_snob
|
||||
h.bypass_tables = args.bypass_tables
|
||||
h.ignore_tables = args.ignore_tables
|
||||
h.single_line_break = args.single_line_break
|
||||
h.inline_links = args.inline_links
|
||||
h.unicode_snob = args.unicode_snob
|
||||
h.use_automatic_links = args.use_automatic_links
|
||||
h.skip_internal_links = args.skip_internal_links
|
||||
h.links_each_paragraph = args.links_each_paragraph
|
||||
h.mark_code = args.mark_code
|
||||
h.wrap_links = args.wrap_links
|
||||
h.wrap_list_items = args.wrap_list_items
|
||||
h.wrap_tables = args.wrap_tables
|
||||
h.pad_tables = args.pad_tables
|
||||
h.default_image_alt = args.default_image_alt
|
||||
h.open_quote = args.open_quote
|
||||
h.close_quote = args.close_quote
|
||||
h.include_sup_sub = args.include_sup_sub
|
||||
|
||||
sys.stdout.write(h.handle(html))
|
||||
172
crawl4ai/html2text/config.py
Normal file
172
crawl4ai/html2text/config.py
Normal file
@@ -0,0 +1,172 @@
|
||||
import re
|
||||
|
||||
# Use Unicode characters instead of their ascii pseudo-replacements
|
||||
UNICODE_SNOB = False
|
||||
|
||||
# Marker to use for marking tables for padding post processing
|
||||
TABLE_MARKER_FOR_PAD = "special_marker_for_table_padding"
|
||||
# Escape all special characters. Output is less readable, but avoids
|
||||
# corner case formatting issues.
|
||||
ESCAPE_SNOB = False
|
||||
ESCAPE_BACKSLASH = False
|
||||
ESCAPE_DOT = False
|
||||
ESCAPE_PLUS = False
|
||||
ESCAPE_DASH = False
|
||||
|
||||
# Put the links after each paragraph instead of at the end.
|
||||
LINKS_EACH_PARAGRAPH = False
|
||||
|
||||
# Wrap long lines at position. 0 for no wrapping.
|
||||
BODY_WIDTH = 78
|
||||
|
||||
# Don't show internal links (href="#local-anchor") -- corresponding link
|
||||
# targets won't be visible in the plain text file anyway.
|
||||
SKIP_INTERNAL_LINKS = True
|
||||
|
||||
# Use inline, rather than reference, formatting for images and links
|
||||
INLINE_LINKS = True
|
||||
|
||||
# Protect links from line breaks surrounding them with angle brackets (in
|
||||
# addition to their square brackets)
|
||||
PROTECT_LINKS = False
|
||||
# WRAP_LINKS = True
|
||||
WRAP_LINKS = True
|
||||
|
||||
# Wrap list items.
|
||||
WRAP_LIST_ITEMS = False
|
||||
|
||||
# Wrap tables
|
||||
WRAP_TABLES = False
|
||||
|
||||
# Number of pixels Google indents nested lists
|
||||
GOOGLE_LIST_INDENT = 36
|
||||
|
||||
# Values Google and others may use to indicate bold text
|
||||
BOLD_TEXT_STYLE_VALUES = ("bold", "700", "800", "900")
|
||||
|
||||
IGNORE_ANCHORS = False
|
||||
IGNORE_MAILTO_LINKS = False
|
||||
IGNORE_IMAGES = False
|
||||
IMAGES_AS_HTML = False
|
||||
IMAGES_TO_ALT = False
|
||||
IMAGES_WITH_SIZE = False
|
||||
IGNORE_EMPHASIS = False
|
||||
MARK_CODE = False
|
||||
DECODE_ERRORS = "strict"
|
||||
DEFAULT_IMAGE_ALT = ""
|
||||
PAD_TABLES = False
|
||||
|
||||
# Convert links with same href and text to <href> format
|
||||
# if they are absolute links
|
||||
USE_AUTOMATIC_LINKS = True
|
||||
|
||||
# For checking space-only lines on line 771
|
||||
RE_SPACE = re.compile(r"\s\+")
|
||||
|
||||
RE_ORDERED_LIST_MATCHER = re.compile(r"\d+\.\s")
|
||||
RE_UNORDERED_LIST_MATCHER = re.compile(r"[-\*\+]\s")
|
||||
RE_MD_CHARS_MATCHER = re.compile(r"([\\\[\]\(\)])")
|
||||
RE_MD_CHARS_MATCHER_ALL = re.compile(r"([`\*_{}\[\]\(\)#!])")
|
||||
|
||||
# to find links in the text
|
||||
RE_LINK = re.compile(r"(\[.*?\] ?\(.*?\))|(\[.*?\]:.*?)")
|
||||
|
||||
# to find table separators
|
||||
RE_TABLE = re.compile(r" \| ")
|
||||
|
||||
RE_MD_DOT_MATCHER = re.compile(
|
||||
r"""
|
||||
^ # start of line
|
||||
(\s*\d+) # optional whitespace and a number
|
||||
(\.) # dot
|
||||
(?=\s) # lookahead assert whitespace
|
||||
""",
|
||||
re.MULTILINE | re.VERBOSE,
|
||||
)
|
||||
RE_MD_PLUS_MATCHER = re.compile(
|
||||
r"""
|
||||
^
|
||||
(\s*)
|
||||
(\+)
|
||||
(?=\s)
|
||||
""",
|
||||
flags=re.MULTILINE | re.VERBOSE,
|
||||
)
|
||||
RE_MD_DASH_MATCHER = re.compile(
|
||||
r"""
|
||||
^
|
||||
(\s*)
|
||||
(-)
|
||||
(?=\s|\-) # followed by whitespace (bullet list, or spaced out hr)
|
||||
# or another dash (header or hr)
|
||||
""",
|
||||
flags=re.MULTILINE | re.VERBOSE,
|
||||
)
|
||||
RE_SLASH_CHARS = r"\`*_{}[]()#+-.!"
|
||||
RE_MD_BACKSLASH_MATCHER = re.compile(
|
||||
r"""
|
||||
(\\) # match one slash
|
||||
(?=[%s]) # followed by a char that requires escaping
|
||||
"""
|
||||
% re.escape(RE_SLASH_CHARS),
|
||||
flags=re.VERBOSE,
|
||||
)
|
||||
|
||||
UNIFIABLE = {
|
||||
"rsquo": "'",
|
||||
"lsquo": "'",
|
||||
"rdquo": '"',
|
||||
"ldquo": '"',
|
||||
"copy": "(C)",
|
||||
"mdash": "--",
|
||||
"nbsp": " ",
|
||||
"rarr": "->",
|
||||
"larr": "<-",
|
||||
"middot": "*",
|
||||
"ndash": "-",
|
||||
"oelig": "oe",
|
||||
"aelig": "ae",
|
||||
"agrave": "a",
|
||||
"aacute": "a",
|
||||
"acirc": "a",
|
||||
"atilde": "a",
|
||||
"auml": "a",
|
||||
"aring": "a",
|
||||
"egrave": "e",
|
||||
"eacute": "e",
|
||||
"ecirc": "e",
|
||||
"euml": "e",
|
||||
"igrave": "i",
|
||||
"iacute": "i",
|
||||
"icirc": "i",
|
||||
"iuml": "i",
|
||||
"ograve": "o",
|
||||
"oacute": "o",
|
||||
"ocirc": "o",
|
||||
"otilde": "o",
|
||||
"ouml": "o",
|
||||
"ugrave": "u",
|
||||
"uacute": "u",
|
||||
"ucirc": "u",
|
||||
"uuml": "u",
|
||||
"lrm": "",
|
||||
"rlm": "",
|
||||
}
|
||||
|
||||
# Format tables in HTML rather than Markdown syntax
|
||||
BYPASS_TABLES = False
|
||||
# Ignore table-related tags (table, th, td, tr) while keeping rows
|
||||
IGNORE_TABLES = False
|
||||
|
||||
|
||||
# Use a single line break after a block element rather than two line breaks.
|
||||
# NOTE: Requires body width setting to be 0.
|
||||
SINGLE_LINE_BREAK = False
|
||||
|
||||
|
||||
# Use double quotation marks when converting the <q> tag.
|
||||
OPEN_QUOTE = '"'
|
||||
CLOSE_QUOTE = '"'
|
||||
|
||||
# Include the <sup> and <sub> tags
|
||||
INCLUDE_SUP_SUB = False
|
||||
18
crawl4ai/html2text/elements.py
Normal file
18
crawl4ai/html2text/elements.py
Normal file
@@ -0,0 +1,18 @@
|
||||
from typing import Dict, Optional
|
||||
|
||||
|
||||
class AnchorElement:
|
||||
__slots__ = ["attrs", "count", "outcount"]
|
||||
|
||||
def __init__(self, attrs: Dict[str, Optional[str]], count: int, outcount: int):
|
||||
self.attrs = attrs
|
||||
self.count = count
|
||||
self.outcount = outcount
|
||||
|
||||
|
||||
class ListElement:
|
||||
__slots__ = ["name", "num"]
|
||||
|
||||
def __init__(self, name: str, num: int):
|
||||
self.name = name
|
||||
self.num = num
|
||||
304
crawl4ai/html2text/utils.py
Normal file
304
crawl4ai/html2text/utils.py
Normal file
@@ -0,0 +1,304 @@
|
||||
import html.entities
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
from . import config
|
||||
|
||||
unifiable_n = {
|
||||
html.entities.name2codepoint[k]: v
|
||||
for k, v in config.UNIFIABLE.items()
|
||||
if k != "nbsp"
|
||||
}
|
||||
|
||||
|
||||
def hn(tag: str) -> int:
|
||||
if tag[0] == "h" and len(tag) == 2:
|
||||
n = tag[1]
|
||||
if "0" < n <= "9":
|
||||
return int(n)
|
||||
return 0
|
||||
|
||||
|
||||
def dumb_property_dict(style: str) -> Dict[str, str]:
|
||||
"""
|
||||
:returns: A hash of css attributes
|
||||
"""
|
||||
return {
|
||||
x.strip().lower(): y.strip().lower()
|
||||
for x, y in [z.split(":", 1) for z in style.split(";") if ":" in z]
|
||||
}
|
||||
|
||||
|
||||
def dumb_css_parser(data: str) -> Dict[str, Dict[str, str]]:
|
||||
"""
|
||||
:type data: str
|
||||
|
||||
:returns: A hash of css selectors, each of which contains a hash of
|
||||
css attributes.
|
||||
:rtype: dict
|
||||
"""
|
||||
# remove @import sentences
|
||||
data += ";"
|
||||
importIndex = data.find("@import")
|
||||
while importIndex != -1:
|
||||
data = data[0:importIndex] + data[data.find(";", importIndex) + 1 :]
|
||||
importIndex = data.find("@import")
|
||||
|
||||
# parse the css. reverted from dictionary comprehension in order to
|
||||
# support older pythons
|
||||
pairs = [x.split("{") for x in data.split("}") if "{" in x.strip()]
|
||||
try:
|
||||
elements = {a.strip(): dumb_property_dict(b) for a, b in pairs}
|
||||
except ValueError:
|
||||
elements = {} # not that important
|
||||
|
||||
return elements
|
||||
|
||||
|
||||
def element_style(
|
||||
attrs: Dict[str, Optional[str]],
|
||||
style_def: Dict[str, Dict[str, str]],
|
||||
parent_style: Dict[str, str],
|
||||
) -> Dict[str, str]:
|
||||
"""
|
||||
:type attrs: dict
|
||||
:type style_def: dict
|
||||
:type style_def: dict
|
||||
|
||||
:returns: A hash of the 'final' style attributes of the element
|
||||
:rtype: dict
|
||||
"""
|
||||
style = parent_style.copy()
|
||||
if "class" in attrs:
|
||||
assert attrs["class"] is not None
|
||||
for css_class in attrs["class"].split():
|
||||
css_style = style_def.get("." + css_class, {})
|
||||
style.update(css_style)
|
||||
if "style" in attrs:
|
||||
assert attrs["style"] is not None
|
||||
immediate_style = dumb_property_dict(attrs["style"])
|
||||
style.update(immediate_style)
|
||||
|
||||
return style
|
||||
|
||||
|
||||
def google_list_style(style: Dict[str, str]) -> str:
|
||||
"""
|
||||
Finds out whether this is an ordered or unordered list
|
||||
|
||||
:type style: dict
|
||||
|
||||
:rtype: str
|
||||
"""
|
||||
if "list-style-type" in style:
|
||||
list_style = style["list-style-type"]
|
||||
if list_style in ["disc", "circle", "square", "none"]:
|
||||
return "ul"
|
||||
|
||||
return "ol"
|
||||
|
||||
|
||||
def google_has_height(style: Dict[str, str]) -> bool:
|
||||
"""
|
||||
Check if the style of the element has the 'height' attribute
|
||||
explicitly defined
|
||||
|
||||
:type style: dict
|
||||
|
||||
:rtype: bool
|
||||
"""
|
||||
return "height" in style
|
||||
|
||||
|
||||
def google_text_emphasis(style: Dict[str, str]) -> List[str]:
|
||||
"""
|
||||
:type style: dict
|
||||
|
||||
:returns: A list of all emphasis modifiers of the element
|
||||
:rtype: list
|
||||
"""
|
||||
emphasis = []
|
||||
if "text-decoration" in style:
|
||||
emphasis.append(style["text-decoration"])
|
||||
if "font-style" in style:
|
||||
emphasis.append(style["font-style"])
|
||||
if "font-weight" in style:
|
||||
emphasis.append(style["font-weight"])
|
||||
|
||||
return emphasis
|
||||
|
||||
|
||||
def google_fixed_width_font(style: Dict[str, str]) -> bool:
|
||||
"""
|
||||
Check if the css of the current element defines a fixed width font
|
||||
|
||||
:type style: dict
|
||||
|
||||
:rtype: bool
|
||||
"""
|
||||
font_family = ""
|
||||
if "font-family" in style:
|
||||
font_family = style["font-family"]
|
||||
return "courier new" == font_family or "consolas" == font_family
|
||||
|
||||
|
||||
def list_numbering_start(attrs: Dict[str, Optional[str]]) -> int:
|
||||
"""
|
||||
Extract numbering from list element attributes
|
||||
|
||||
:type attrs: dict
|
||||
|
||||
:rtype: int or None
|
||||
"""
|
||||
if "start" in attrs:
|
||||
assert attrs["start"] is not None
|
||||
try:
|
||||
return int(attrs["start"]) - 1
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
def skipwrap(
|
||||
para: str, wrap_links: bool, wrap_list_items: bool, wrap_tables: bool
|
||||
) -> bool:
|
||||
# If it appears to contain a link
|
||||
# don't wrap
|
||||
if not wrap_links and config.RE_LINK.search(para):
|
||||
return True
|
||||
# If the text begins with four spaces or one tab, it's a code block;
|
||||
# don't wrap
|
||||
if para[0:4] == " " or para[0] == "\t":
|
||||
return True
|
||||
|
||||
# If the text begins with only two "--", possibly preceded by
|
||||
# whitespace, that's an emdash; so wrap.
|
||||
stripped = para.lstrip()
|
||||
if stripped[0:2] == "--" and len(stripped) > 2 and stripped[2] != "-":
|
||||
return False
|
||||
|
||||
# I'm not sure what this is for; I thought it was to detect lists,
|
||||
# but there's a <br>-inside-<span> case in one of the tests that
|
||||
# also depends upon it.
|
||||
if stripped[0:1] in ("-", "*") and not stripped[0:2] == "**":
|
||||
return not wrap_list_items
|
||||
|
||||
# If text contains a pipe character it is likely a table
|
||||
if not wrap_tables and config.RE_TABLE.search(para):
|
||||
return True
|
||||
|
||||
# If the text begins with a single -, *, or +, followed by a space,
|
||||
# or an integer, followed by a ., followed by a space (in either
|
||||
# case optionally proceeded by whitespace), it's a list; don't wrap.
|
||||
return bool(
|
||||
config.RE_ORDERED_LIST_MATCHER.match(stripped)
|
||||
or config.RE_UNORDERED_LIST_MATCHER.match(stripped)
|
||||
)
|
||||
|
||||
|
||||
def escape_md(text: str) -> str:
|
||||
"""
|
||||
Escapes markdown-sensitive characters within other markdown
|
||||
constructs.
|
||||
"""
|
||||
return config.RE_MD_CHARS_MATCHER.sub(r"\\\1", text)
|
||||
|
||||
|
||||
def escape_md_section(
|
||||
text: str,
|
||||
escape_backslash: bool = True,
|
||||
snob: bool = False,
|
||||
escape_dot: bool = True,
|
||||
escape_plus: bool = True,
|
||||
escape_dash: bool = True,
|
||||
) -> str:
|
||||
"""
|
||||
Escapes markdown-sensitive characters across whole document sections.
|
||||
Each escaping operation can be controlled individually.
|
||||
"""
|
||||
if escape_backslash:
|
||||
text = config.RE_MD_BACKSLASH_MATCHER.sub(r"\\\1", text)
|
||||
|
||||
if snob:
|
||||
text = config.RE_MD_CHARS_MATCHER_ALL.sub(r"\\\1", text)
|
||||
|
||||
if escape_dot:
|
||||
text = config.RE_MD_DOT_MATCHER.sub(r"\1\\\2", text)
|
||||
|
||||
if escape_plus:
|
||||
text = config.RE_MD_PLUS_MATCHER.sub(r"\1\\\2", text)
|
||||
|
||||
if escape_dash:
|
||||
text = config.RE_MD_DASH_MATCHER.sub(r"\1\\\2", text)
|
||||
|
||||
return text
|
||||
|
||||
|
||||
def reformat_table(lines: List[str], right_margin: int) -> List[str]:
|
||||
"""
|
||||
Given the lines of a table
|
||||
padds the cells and returns the new lines
|
||||
"""
|
||||
# find the maximum width of the columns
|
||||
max_width = [len(x.rstrip()) + right_margin for x in lines[0].split("|")]
|
||||
max_cols = len(max_width)
|
||||
for line in lines:
|
||||
cols = [x.rstrip() for x in line.split("|")]
|
||||
num_cols = len(cols)
|
||||
|
||||
# don't drop any data if colspan attributes result in unequal lengths
|
||||
if num_cols < max_cols:
|
||||
cols += [""] * (max_cols - num_cols)
|
||||
elif max_cols < num_cols:
|
||||
max_width += [len(x) + right_margin for x in cols[-(num_cols - max_cols) :]]
|
||||
max_cols = num_cols
|
||||
|
||||
max_width = [
|
||||
max(len(x) + right_margin, old_len) for x, old_len in zip(cols, max_width)
|
||||
]
|
||||
|
||||
# reformat
|
||||
new_lines = []
|
||||
for line in lines:
|
||||
cols = [x.rstrip() for x in line.split("|")]
|
||||
if set(line.strip()) == set("-|"):
|
||||
filler = "-"
|
||||
new_cols = [
|
||||
x.rstrip() + (filler * (M - len(x.rstrip())))
|
||||
for x, M in zip(cols, max_width)
|
||||
]
|
||||
new_lines.append("|-" + "|".join(new_cols) + "|")
|
||||
else:
|
||||
filler = " "
|
||||
new_cols = [
|
||||
x.rstrip() + (filler * (M - len(x.rstrip())))
|
||||
for x, M in zip(cols, max_width)
|
||||
]
|
||||
new_lines.append("| " + "|".join(new_cols) + "|")
|
||||
return new_lines
|
||||
|
||||
|
||||
def pad_tables_in_text(text: str, right_margin: int = 1) -> str:
|
||||
"""
|
||||
Provide padding for tables in the text
|
||||
"""
|
||||
lines = text.split("\n")
|
||||
table_buffer = [] # type: List[str]
|
||||
table_started = False
|
||||
new_lines = []
|
||||
for line in lines:
|
||||
# Toggle table started
|
||||
if config.TABLE_MARKER_FOR_PAD in line:
|
||||
table_started = not table_started
|
||||
if not table_started:
|
||||
table = reformat_table(table_buffer, right_margin)
|
||||
new_lines.extend(table)
|
||||
table_buffer = []
|
||||
new_lines.append("")
|
||||
continue
|
||||
# Process lines
|
||||
if table_started:
|
||||
table_buffer.append(line)
|
||||
else:
|
||||
new_lines.append(line)
|
||||
return "\n".join(new_lines)
|
||||
69
crawl4ai/hub.py
Normal file
69
crawl4ai/hub.py
Normal file
@@ -0,0 +1,69 @@
|
||||
# crawl4ai/hub.py
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Dict, Type, Union
|
||||
import logging
|
||||
import importlib
|
||||
from pathlib import Path
|
||||
import inspect
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BaseCrawler(ABC):
|
||||
def __init__(self):
|
||||
self.logger = logging.getLogger(self.__class__.__name__)
|
||||
|
||||
@abstractmethod
|
||||
async def run(self, url: str = "", **kwargs) -> str:
|
||||
"""
|
||||
Implement this method to return JSON string.
|
||||
Must accept URL + arbitrary kwargs for flexibility.
|
||||
"""
|
||||
pass
|
||||
|
||||
def __init_subclass__(cls, **kwargs):
|
||||
"""Enforce interface validation on subclassing"""
|
||||
super().__init_subclass__(**kwargs)
|
||||
|
||||
# Verify run method signature
|
||||
run_method = cls.run
|
||||
if not run_method.__code__.co_argcount >= 2: # self + url
|
||||
raise TypeError(f"{cls.__name__} must implement 'run(self, url: str, **kwargs)'")
|
||||
|
||||
# Verify async nature
|
||||
if not inspect.iscoroutinefunction(run_method):
|
||||
raise TypeError(f"{cls.__name__}.run must be async")
|
||||
|
||||
class CrawlerHub:
|
||||
_crawlers: Dict[str, Type[BaseCrawler]] = {}
|
||||
|
||||
@classmethod
|
||||
def _discover_crawlers(cls):
|
||||
"""Dynamically load crawlers from /crawlers in 3 lines"""
|
||||
base_path = Path(__file__).parent / "crawlers"
|
||||
for crawler_dir in base_path.iterdir():
|
||||
if crawler_dir.is_dir():
|
||||
try:
|
||||
module = importlib.import_module(
|
||||
f"crawl4ai.crawlers.{crawler_dir.name}.crawler"
|
||||
)
|
||||
for attr in dir(module):
|
||||
cls._maybe_register_crawler(
|
||||
getattr(module, attr), crawler_dir.name
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed {crawler_dir.name}: {str(e)}")
|
||||
|
||||
@classmethod
|
||||
def _maybe_register_crawler(cls, obj, name: str):
|
||||
"""Brilliant one-liner registration"""
|
||||
if isinstance(obj, type) and issubclass(obj, BaseCrawler) and obj != BaseCrawler:
|
||||
module = importlib.import_module(obj.__module__)
|
||||
obj.meta = getattr(module, "__meta__", {})
|
||||
cls._crawlers[name] = obj
|
||||
|
||||
@classmethod
|
||||
def get(cls, name: str) -> Union[Type[BaseCrawler], None]:
|
||||
if not cls._crawlers:
|
||||
cls._discover_crawlers()
|
||||
return cls._crawlers.get(name)
|
||||
186
crawl4ai/install.py
Normal file
186
crawl4ai/install.py
Normal file
@@ -0,0 +1,186 @@
|
||||
import subprocess
|
||||
import sys
|
||||
import asyncio
|
||||
from .async_logger import AsyncLogger, LogLevel
|
||||
from pathlib import Path
|
||||
import os
|
||||
import shutil
|
||||
|
||||
# Initialize logger
|
||||
logger = AsyncLogger(log_level=LogLevel.DEBUG, verbose=True)
|
||||
|
||||
def setup_home_directory():
|
||||
"""Set up the .crawl4ai folder structure in the user's home directory."""
|
||||
base_dir = os.getenv("CRAWL4_AI_BASE_DIRECTORY")
|
||||
crawl4ai_folder = Path(base_dir) if base_dir else Path.home()
|
||||
crawl4ai_config = crawl4ai_folder / "global.yml"
|
||||
crawl4ai_folder = crawl4ai_folder / ".crawl4ai"
|
||||
cache_folder = crawl4ai_folder / "cache"
|
||||
content_folders = [
|
||||
"html_content",
|
||||
"cleaned_html",
|
||||
"markdown_content",
|
||||
"extracted_content",
|
||||
"screenshots",
|
||||
]
|
||||
|
||||
# Clean up old cache if exists
|
||||
if cache_folder.exists():
|
||||
shutil.rmtree(cache_folder)
|
||||
|
||||
# Create new folder structure
|
||||
crawl4ai_folder.mkdir(exist_ok=True)
|
||||
cache_folder.mkdir(exist_ok=True)
|
||||
for folder in content_folders:
|
||||
(crawl4ai_folder / folder).mkdir(exist_ok=True)
|
||||
|
||||
# If config file does not exist, create it
|
||||
if not crawl4ai_config.exists():
|
||||
with open(crawl4ai_config, "w") as f:
|
||||
f.write("")
|
||||
|
||||
def post_install():
|
||||
"""
|
||||
Run all post-installation tasks.
|
||||
Checks CRAWL4AI_MODE environment variable. If set to 'api',
|
||||
skips Playwright browser installation.
|
||||
"""
|
||||
logger.info("Running post-installation setup...", tag="INIT")
|
||||
setup_home_directory()
|
||||
|
||||
# Check environment variable to conditionally skip Playwright install
|
||||
run_mode = os.getenv('CRAWL4AI_MODE')
|
||||
if run_mode == 'api':
|
||||
logger.warning(
|
||||
"CRAWL4AI_MODE=api detected. Skipping Playwright browser installation.",
|
||||
tag="SETUP"
|
||||
)
|
||||
else:
|
||||
# Proceed with installation only if mode is not 'api'
|
||||
install_playwright()
|
||||
|
||||
run_migration()
|
||||
# TODO: Will be added in the future
|
||||
# setup_builtin_browser()
|
||||
logger.success("Post-installation setup completed!", tag="COMPLETE")
|
||||
|
||||
def setup_builtin_browser():
|
||||
"""Set up a builtin browser for use with Crawl4AI"""
|
||||
try:
|
||||
logger.info("Setting up builtin browser...", tag="INIT")
|
||||
asyncio.run(_setup_builtin_browser())
|
||||
logger.success("Builtin browser setup completed!", tag="COMPLETE")
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to set up builtin browser: {e}")
|
||||
logger.warning("You can manually set up a builtin browser using 'crawl4ai-doctor builtin-browser-start'")
|
||||
|
||||
async def _setup_builtin_browser():
|
||||
try:
|
||||
# Import BrowserProfiler here to avoid circular imports
|
||||
from .browser_profiler import BrowserProfiler
|
||||
profiler = BrowserProfiler(logger=logger)
|
||||
|
||||
# Launch the builtin browser
|
||||
cdp_url = await profiler.launch_builtin_browser(headless=True)
|
||||
if cdp_url:
|
||||
logger.success(f"Builtin browser launched at {cdp_url}", tag="BROWSER")
|
||||
else:
|
||||
logger.warning("Failed to launch builtin browser", tag="BROWSER")
|
||||
except Exception as e:
|
||||
logger.warning(f"Error setting up builtin browser: {e}", tag="BROWSER")
|
||||
raise
|
||||
|
||||
|
||||
def install_playwright():
|
||||
logger.info("Installing Playwright browsers...", tag="INIT")
|
||||
try:
|
||||
# subprocess.check_call([sys.executable, "-m", "playwright", "install", "--with-deps", "--force", "chrome"])
|
||||
subprocess.check_call(
|
||||
[
|
||||
sys.executable,
|
||||
"-m",
|
||||
"playwright",
|
||||
"install",
|
||||
"--with-deps",
|
||||
"--force",
|
||||
"chromium",
|
||||
]
|
||||
)
|
||||
logger.success(
|
||||
"Playwright installation completed successfully.", tag="COMPLETE"
|
||||
)
|
||||
except subprocess.CalledProcessError:
|
||||
# logger.error(f"Error during Playwright installation: {e}", tag="ERROR")
|
||||
logger.warning(
|
||||
f"Please run '{sys.executable} -m playwright install --with-deps' manually after the installation."
|
||||
)
|
||||
except Exception:
|
||||
# logger.error(f"Unexpected error during Playwright installation: {e}", tag="ERROR")
|
||||
logger.warning(
|
||||
f"Please run '{sys.executable} -m playwright install --with-deps' manually after the installation."
|
||||
)
|
||||
|
||||
|
||||
def run_migration():
|
||||
"""Initialize database during installation"""
|
||||
try:
|
||||
logger.info("Starting database initialization...", tag="INIT")
|
||||
from crawl4ai.async_database import async_db_manager
|
||||
|
||||
asyncio.run(async_db_manager.initialize())
|
||||
logger.success(
|
||||
"Database initialization completed successfully.", tag="COMPLETE"
|
||||
)
|
||||
except ImportError:
|
||||
logger.warning("Database module not found. Will initialize on first use.")
|
||||
except Exception as e:
|
||||
logger.warning(f"Database initialization failed: {e}")
|
||||
logger.warning("Database will be initialized on first use")
|
||||
|
||||
|
||||
async def run_doctor():
|
||||
"""Test if Crawl4AI is working properly"""
|
||||
logger.info("Running Crawl4AI health check...", tag="INIT")
|
||||
try:
|
||||
from .async_webcrawler import (
|
||||
AsyncWebCrawler,
|
||||
BrowserConfig,
|
||||
CrawlerRunConfig,
|
||||
CacheMode,
|
||||
)
|
||||
|
||||
browser_config = BrowserConfig(
|
||||
headless=True,
|
||||
browser_type="chromium",
|
||||
ignore_https_errors=True,
|
||||
light_mode=True,
|
||||
viewport_width=1280,
|
||||
viewport_height=720,
|
||||
)
|
||||
|
||||
run_config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
screenshot=True,
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
logger.info("Testing crawling capabilities...", tag="TEST")
|
||||
result = await crawler.arun(url="https://crawl4ai.com", config=run_config)
|
||||
|
||||
if result and result.markdown:
|
||||
logger.success("✅ Crawling test passed!", tag="COMPLETE")
|
||||
return True
|
||||
else:
|
||||
raise Exception("Failed to get content")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Test failed: {e}", tag="ERROR")
|
||||
return False
|
||||
|
||||
|
||||
def doctor():
|
||||
"""Entry point for the doctor command"""
|
||||
import asyncio
|
||||
|
||||
asyncio.run(run_doctor())
|
||||
sys.exit(0)
|
||||
18
crawl4ai/js_snippet/__init__.py
Normal file
18
crawl4ai/js_snippet/__init__.py
Normal file
@@ -0,0 +1,18 @@
|
||||
import os
|
||||
|
||||
|
||||
# Create a function get name of a js script, then load from the CURRENT folder of this script and return its content as string, make sure its error free
|
||||
def load_js_script(script_name):
|
||||
# Get the path of the current script
|
||||
current_script_path = os.path.dirname(os.path.realpath(__file__))
|
||||
# Get the path of the script to load
|
||||
script_path = os.path.join(current_script_path, script_name + ".js")
|
||||
# Check if the script exists
|
||||
if not os.path.exists(script_path):
|
||||
raise ValueError(
|
||||
f"Script {script_name} not found in the folder {current_script_path}"
|
||||
)
|
||||
# Load the content of the script
|
||||
with open(script_path, "r") as f:
|
||||
script_content = f.read()
|
||||
return script_content
|
||||
25
crawl4ai/js_snippet/navigator_overrider.js
Normal file
25
crawl4ai/js_snippet/navigator_overrider.js
Normal file
@@ -0,0 +1,25 @@
|
||||
// Pass the Permissions Test.
|
||||
const originalQuery = window.navigator.permissions.query;
|
||||
window.navigator.permissions.query = (parameters) =>
|
||||
parameters.name === "notifications"
|
||||
? Promise.resolve({ state: Notification.permission })
|
||||
: originalQuery(parameters);
|
||||
Object.defineProperty(navigator, "webdriver", {
|
||||
get: () => undefined,
|
||||
});
|
||||
window.navigator.chrome = {
|
||||
runtime: {},
|
||||
// Add other properties if necessary
|
||||
};
|
||||
Object.defineProperty(navigator, "plugins", {
|
||||
get: () => [1, 2, 3, 4, 5],
|
||||
});
|
||||
Object.defineProperty(navigator, "languages", {
|
||||
get: () => ["en-US", "en"],
|
||||
});
|
||||
Object.defineProperty(document, "hidden", {
|
||||
get: () => false,
|
||||
});
|
||||
Object.defineProperty(document, "visibilityState", {
|
||||
get: () => "visible",
|
||||
});
|
||||
120
crawl4ai/js_snippet/remove_overlay_elements.js
Normal file
120
crawl4ai/js_snippet/remove_overlay_elements.js
Normal file
@@ -0,0 +1,120 @@
|
||||
async () => {
|
||||
// Function to check if element is visible
|
||||
const isVisible = (elem) => {
|
||||
const style = window.getComputedStyle(elem);
|
||||
return style.display !== "none" && style.visibility !== "hidden" && style.opacity !== "0";
|
||||
};
|
||||
|
||||
// Common selectors for popups and overlays
|
||||
const commonSelectors = [
|
||||
// Close buttons first
|
||||
'button[class*="close" i]',
|
||||
'button[class*="dismiss" i]',
|
||||
'button[aria-label*="close" i]',
|
||||
'button[title*="close" i]',
|
||||
'a[class*="close" i]',
|
||||
'span[class*="close" i]',
|
||||
|
||||
// Cookie notices
|
||||
'[class*="cookie-banner" i]',
|
||||
'[id*="cookie-banner" i]',
|
||||
'[class*="cookie-consent" i]',
|
||||
'[id*="cookie-consent" i]',
|
||||
|
||||
// Newsletter/subscription dialogs
|
||||
'[class*="newsletter" i]',
|
||||
'[class*="subscribe" i]',
|
||||
|
||||
// Generic popups/modals
|
||||
'[class*="popup" i]',
|
||||
'[class*="modal" i]',
|
||||
'[class*="overlay" i]',
|
||||
'[class*="dialog" i]',
|
||||
'[role="dialog"]',
|
||||
'[role="alertdialog"]',
|
||||
];
|
||||
|
||||
// Try to click close buttons first
|
||||
for (const selector of commonSelectors.slice(0, 6)) {
|
||||
const closeButtons = document.querySelectorAll(selector);
|
||||
for (const button of closeButtons) {
|
||||
if (isVisible(button)) {
|
||||
try {
|
||||
button.click();
|
||||
await new Promise((resolve) => setTimeout(resolve, 100));
|
||||
} catch (e) {
|
||||
console.log("Error clicking button:", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Remove remaining overlay elements
|
||||
const removeOverlays = () => {
|
||||
// Find elements with high z-index
|
||||
const allElements = document.querySelectorAll("*");
|
||||
for (const elem of allElements) {
|
||||
const style = window.getComputedStyle(elem);
|
||||
const zIndex = parseInt(style.zIndex);
|
||||
const position = style.position;
|
||||
|
||||
if (
|
||||
isVisible(elem) &&
|
||||
(zIndex > 999 || position === "fixed" || position === "absolute") &&
|
||||
(elem.offsetWidth > window.innerWidth * 0.5 ||
|
||||
elem.offsetHeight > window.innerHeight * 0.5 ||
|
||||
style.backgroundColor.includes("rgba") ||
|
||||
parseFloat(style.opacity) < 1)
|
||||
) {
|
||||
elem.remove();
|
||||
}
|
||||
}
|
||||
|
||||
// Remove elements matching common selectors
|
||||
for (const selector of commonSelectors) {
|
||||
const elements = document.querySelectorAll(selector);
|
||||
elements.forEach((elem) => {
|
||||
if (isVisible(elem)) {
|
||||
elem.remove();
|
||||
}
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
// Remove overlay elements
|
||||
removeOverlays();
|
||||
|
||||
// Remove any fixed/sticky position elements at the top/bottom
|
||||
const removeFixedElements = () => {
|
||||
const elements = document.querySelectorAll("*");
|
||||
elements.forEach((elem) => {
|
||||
const style = window.getComputedStyle(elem);
|
||||
if ((style.position === "fixed" || style.position === "sticky") && isVisible(elem)) {
|
||||
elem.remove();
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
removeFixedElements();
|
||||
|
||||
// Remove empty block elements as: div, p, span, etc.
|
||||
const removeEmptyBlockElements = () => {
|
||||
const blockElements = document.querySelectorAll(
|
||||
"div, p, span, section, article, header, footer, aside, nav, main, ul, ol, li, dl, dt, dd, h1, h2, h3, h4, h5, h6"
|
||||
);
|
||||
blockElements.forEach((elem) => {
|
||||
if (elem.innerText.trim() === "") {
|
||||
elem.remove();
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
// Remove margin-right and padding-right from body (often added by modal scripts)
|
||||
document.body.style.marginRight = "0px";
|
||||
document.body.style.paddingRight = "0px";
|
||||
document.body.style.overflow = "auto";
|
||||
|
||||
// Wait a bit for any animations to complete
|
||||
document.body.scrollIntoView(false);
|
||||
await new Promise((resolve) => setTimeout(resolve, 50));
|
||||
};
|
||||
54
crawl4ai/js_snippet/update_image_dimensions.js
Normal file
54
crawl4ai/js_snippet/update_image_dimensions.js
Normal file
@@ -0,0 +1,54 @@
|
||||
() => {
|
||||
return new Promise((resolve) => {
|
||||
const filterImage = (img) => {
|
||||
// Filter out images that are too small
|
||||
if (img.width < 100 && img.height < 100) return false;
|
||||
|
||||
// Filter out images that are not visible
|
||||
const rect = img.getBoundingClientRect();
|
||||
if (rect.width === 0 || rect.height === 0) return false;
|
||||
|
||||
// Filter out images with certain class names (e.g., icons, thumbnails)
|
||||
if (img.classList.contains("icon") || img.classList.contains("thumbnail")) return false;
|
||||
|
||||
// Filter out images with certain patterns in their src (e.g., placeholder images)
|
||||
if (img.src.includes("placeholder") || img.src.includes("icon")) return false;
|
||||
|
||||
return true;
|
||||
};
|
||||
|
||||
const images = Array.from(document.querySelectorAll("img")).filter(filterImage);
|
||||
let imagesLeft = images.length;
|
||||
|
||||
if (imagesLeft === 0) {
|
||||
resolve();
|
||||
return;
|
||||
}
|
||||
|
||||
const checkImage = (img) => {
|
||||
if (img.complete && img.naturalWidth !== 0) {
|
||||
img.setAttribute("width", img.naturalWidth);
|
||||
img.setAttribute("height", img.naturalHeight);
|
||||
imagesLeft--;
|
||||
if (imagesLeft === 0) resolve();
|
||||
}
|
||||
};
|
||||
|
||||
images.forEach((img) => {
|
||||
checkImage(img);
|
||||
if (!img.complete) {
|
||||
img.onload = () => {
|
||||
checkImage(img);
|
||||
};
|
||||
img.onerror = () => {
|
||||
imagesLeft--;
|
||||
if (imagesLeft === 0) resolve();
|
||||
};
|
||||
}
|
||||
});
|
||||
|
||||
// Fallback timeout of 5 seconds
|
||||
// setTimeout(() => resolve(), 5000);
|
||||
resolve();
|
||||
});
|
||||
};
|
||||
0
crawl4ai/legacy/__init__.py
Normal file
0
crawl4ai/legacy/__init__.py
Normal file
123
crawl4ai/legacy/cli.py
Normal file
123
crawl4ai/legacy/cli.py
Normal file
@@ -0,0 +1,123 @@
|
||||
import click
|
||||
import sys
|
||||
import asyncio
|
||||
from typing import List
|
||||
from .docs_manager import DocsManager
|
||||
from .async_logger import AsyncLogger
|
||||
|
||||
logger = AsyncLogger(verbose=True)
|
||||
docs_manager = DocsManager(logger)
|
||||
|
||||
|
||||
def print_table(headers: List[str], rows: List[List[str]], padding: int = 2):
|
||||
"""Print formatted table with headers and rows"""
|
||||
widths = [max(len(str(cell)) for cell in col) for col in zip(headers, *rows)]
|
||||
border = "+" + "+".join("-" * (w + 2 * padding) for w in widths) + "+"
|
||||
|
||||
def format_row(row):
|
||||
return (
|
||||
"|"
|
||||
+ "|".join(
|
||||
f"{' ' * padding}{str(cell):<{w}}{' ' * padding}"
|
||||
for cell, w in zip(row, widths)
|
||||
)
|
||||
+ "|"
|
||||
)
|
||||
|
||||
click.echo(border)
|
||||
click.echo(format_row(headers))
|
||||
click.echo(border)
|
||||
for row in rows:
|
||||
click.echo(format_row(row))
|
||||
click.echo(border)
|
||||
|
||||
|
||||
@click.group()
|
||||
def cli():
|
||||
"""Crawl4AI Command Line Interface"""
|
||||
pass
|
||||
|
||||
|
||||
@cli.group()
|
||||
def docs():
|
||||
"""Documentation operations"""
|
||||
pass
|
||||
|
||||
|
||||
@docs.command()
|
||||
@click.argument("sections", nargs=-1)
|
||||
@click.option(
|
||||
"--mode", type=click.Choice(["extended", "condensed"]), default="extended"
|
||||
)
|
||||
def combine(sections: tuple, mode: str):
|
||||
"""Combine documentation sections"""
|
||||
try:
|
||||
asyncio.run(docs_manager.ensure_docs_exist())
|
||||
click.echo(docs_manager.generate(sections, mode))
|
||||
except Exception as e:
|
||||
logger.error(str(e), tag="ERROR")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
@docs.command()
|
||||
@click.argument("query")
|
||||
@click.option("--top-k", "-k", default=5)
|
||||
@click.option("--build-index", is_flag=True, help="Build index if missing")
|
||||
def search(query: str, top_k: int, build_index: bool):
|
||||
"""Search documentation"""
|
||||
try:
|
||||
result = docs_manager.search(query, top_k)
|
||||
if result == "No search index available. Call build_search_index() first.":
|
||||
if build_index or click.confirm("No search index found. Build it now?"):
|
||||
asyncio.run(docs_manager.llm_text.generate_index_files())
|
||||
result = docs_manager.search(query, top_k)
|
||||
click.echo(result)
|
||||
except Exception as e:
|
||||
click.echo(f"Error: {str(e)}", err=True)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
@docs.command()
|
||||
def update():
|
||||
"""Update docs from GitHub"""
|
||||
try:
|
||||
asyncio.run(docs_manager.fetch_docs())
|
||||
click.echo("Documentation updated successfully")
|
||||
except Exception as e:
|
||||
click.echo(f"Error: {str(e)}", err=True)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
@docs.command()
|
||||
@click.option("--force-facts", is_flag=True, help="Force regenerate fact files")
|
||||
@click.option("--clear-cache", is_flag=True, help="Clear BM25 cache")
|
||||
def index(force_facts: bool, clear_cache: bool):
|
||||
"""Build or rebuild search indexes"""
|
||||
try:
|
||||
asyncio.run(docs_manager.ensure_docs_exist())
|
||||
asyncio.run(
|
||||
docs_manager.llm_text.generate_index_files(
|
||||
force_generate_facts=force_facts, clear_bm25_cache=clear_cache
|
||||
)
|
||||
)
|
||||
click.echo("Search indexes built successfully")
|
||||
except Exception as e:
|
||||
click.echo(f"Error: {str(e)}", err=True)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
# Add docs list command
|
||||
@docs.command()
|
||||
def list():
|
||||
"""List available documentation sections"""
|
||||
try:
|
||||
sections = docs_manager.list()
|
||||
print_table(["Sections"], [[section] for section in sections])
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"Error: {str(e)}", err=True)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
cli()
|
||||
394
crawl4ai/legacy/crawler_strategy.py
Normal file
394
crawl4ai/legacy/crawler_strategy.py
Normal file
@@ -0,0 +1,394 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from selenium import webdriver
|
||||
from selenium.webdriver.chrome.service import Service
|
||||
from selenium.webdriver.common.by import By
|
||||
from selenium.webdriver.support.ui import WebDriverWait
|
||||
from selenium.webdriver.support import expected_conditions as EC
|
||||
from selenium.webdriver.chrome.options import Options
|
||||
from selenium.common.exceptions import InvalidArgumentException, WebDriverException
|
||||
# from selenium.webdriver.chrome.service import Service as ChromeService
|
||||
# from webdriver_manager.chrome import ChromeDriverManager
|
||||
# from urllib3.exceptions import MaxRetryError
|
||||
|
||||
from .config import *
|
||||
import logging, time
|
||||
import base64
|
||||
from PIL import Image, ImageDraw, ImageFont
|
||||
from io import BytesIO
|
||||
from typing import Callable
|
||||
import requests
|
||||
import os
|
||||
from pathlib import Path
|
||||
from .utils import *
|
||||
|
||||
logger = logging.getLogger("selenium.webdriver.remote.remote_connection")
|
||||
logger.setLevel(logging.WARNING)
|
||||
|
||||
logger_driver = logging.getLogger("selenium.webdriver.common.service")
|
||||
logger_driver.setLevel(logging.WARNING)
|
||||
|
||||
urllib3_logger = logging.getLogger("urllib3.connectionpool")
|
||||
urllib3_logger.setLevel(logging.WARNING)
|
||||
|
||||
# Disable http.client logging
|
||||
http_client_logger = logging.getLogger("http.client")
|
||||
http_client_logger.setLevel(logging.WARNING)
|
||||
|
||||
# Disable driver_finder and service logging
|
||||
driver_finder_logger = logging.getLogger("selenium.webdriver.common.driver_finder")
|
||||
driver_finder_logger.setLevel(logging.WARNING)
|
||||
|
||||
|
||||
class CrawlerStrategy(ABC):
|
||||
@abstractmethod
|
||||
def crawl(self, url: str, **kwargs) -> str:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def take_screenshot(self, save_path: str):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def update_user_agent(self, user_agent: str):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def set_hook(self, hook_type: str, hook: Callable):
|
||||
pass
|
||||
|
||||
|
||||
class CloudCrawlerStrategy(CrawlerStrategy):
|
||||
def __init__(self, use_cached_html=False):
|
||||
super().__init__()
|
||||
self.use_cached_html = use_cached_html
|
||||
|
||||
def crawl(self, url: str) -> str:
|
||||
data = {
|
||||
"urls": [url],
|
||||
"include_raw_html": True,
|
||||
"forced": True,
|
||||
"extract_blocks": False,
|
||||
}
|
||||
|
||||
response = requests.post("http://crawl4ai.uccode.io/crawl", json=data)
|
||||
response = response.json()
|
||||
html = response["results"][0]["html"]
|
||||
return sanitize_input_encode(html)
|
||||
|
||||
|
||||
class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
|
||||
def __init__(self, use_cached_html=False, js_code=None, **kwargs):
|
||||
super().__init__()
|
||||
print("[LOG] 🚀 Initializing LocalSeleniumCrawlerStrategy")
|
||||
self.options = Options()
|
||||
self.options.headless = True
|
||||
if kwargs.get("proxy"):
|
||||
self.options.add_argument("--proxy-server={}".format(kwargs.get("proxy")))
|
||||
if kwargs.get("user_agent"):
|
||||
self.options.add_argument("--user-agent=" + kwargs.get("user_agent"))
|
||||
else:
|
||||
user_agent = kwargs.get(
|
||||
"user_agent",
|
||||
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
|
||||
)
|
||||
self.options.add_argument(f"--user-agent={user_agent}")
|
||||
self.options.add_argument(
|
||||
"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
|
||||
)
|
||||
|
||||
self.options.headless = kwargs.get("headless", True)
|
||||
if self.options.headless:
|
||||
self.options.add_argument("--headless")
|
||||
|
||||
self.options.add_argument("--disable-gpu")
|
||||
self.options.add_argument("--window-size=1920,1080")
|
||||
self.options.add_argument("--no-sandbox")
|
||||
self.options.add_argument("--disable-dev-shm-usage")
|
||||
self.options.add_argument("--disable-blink-features=AutomationControlled")
|
||||
|
||||
# self.options.add_argument("--disable-dev-shm-usage")
|
||||
self.options.add_argument("--disable-gpu")
|
||||
# self.options.add_argument("--disable-extensions")
|
||||
# self.options.add_argument("--disable-infobars")
|
||||
# self.options.add_argument("--disable-logging")
|
||||
# self.options.add_argument("--disable-popup-blocking")
|
||||
# self.options.add_argument("--disable-translate")
|
||||
# self.options.add_argument("--disable-default-apps")
|
||||
# self.options.add_argument("--disable-background-networking")
|
||||
# self.options.add_argument("--disable-sync")
|
||||
# self.options.add_argument("--disable-features=NetworkService,NetworkServiceInProcess")
|
||||
# self.options.add_argument("--disable-browser-side-navigation")
|
||||
# self.options.add_argument("--dns-prefetch-disable")
|
||||
# self.options.add_argument("--disable-web-security")
|
||||
self.options.add_argument("--log-level=3")
|
||||
self.use_cached_html = use_cached_html
|
||||
self.use_cached_html = use_cached_html
|
||||
self.js_code = js_code
|
||||
self.verbose = kwargs.get("verbose", False)
|
||||
|
||||
# Hooks
|
||||
self.hooks = {
|
||||
"on_driver_created": None,
|
||||
"on_user_agent_updated": None,
|
||||
"before_get_url": None,
|
||||
"after_get_url": None,
|
||||
"before_return_html": None,
|
||||
}
|
||||
|
||||
# chromedriver_autoinstaller.install()
|
||||
# import chromedriver_autoinstaller
|
||||
# crawl4ai_folder = os.path.join(os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()), ".crawl4ai")
|
||||
# driver = webdriver.Chrome(service=ChromeService(ChromeDriverManager().install()), options=self.options)
|
||||
# chromedriver_path = chromedriver_autoinstaller.install()
|
||||
# chromedriver_path = chromedriver_autoinstaller.utils.download_chromedriver()
|
||||
# self.service = Service(chromedriver_autoinstaller.install())
|
||||
|
||||
# chromedriver_path = ChromeDriverManager().install()
|
||||
# self.service = Service(chromedriver_path)
|
||||
# self.service.log_path = "NUL"
|
||||
# self.driver = webdriver.Chrome(service=self.service, options=self.options)
|
||||
|
||||
# Use selenium-manager (built into Selenium 4.10.0+)
|
||||
self.service = Service()
|
||||
self.driver = webdriver.Chrome(options=self.options)
|
||||
|
||||
self.driver = self.execute_hook("on_driver_created", self.driver)
|
||||
|
||||
if kwargs.get("cookies"):
|
||||
for cookie in kwargs.get("cookies"):
|
||||
self.driver.add_cookie(cookie)
|
||||
|
||||
def set_hook(self, hook_type: str, hook: Callable):
|
||||
if hook_type in self.hooks:
|
||||
self.hooks[hook_type] = hook
|
||||
else:
|
||||
raise ValueError(f"Invalid hook type: {hook_type}")
|
||||
|
||||
def execute_hook(self, hook_type: str, *args):
|
||||
hook = self.hooks.get(hook_type)
|
||||
if hook:
|
||||
result = hook(*args)
|
||||
if result is not None:
|
||||
if isinstance(result, webdriver.Chrome):
|
||||
return result
|
||||
else:
|
||||
raise TypeError(
|
||||
f"Hook {hook_type} must return an instance of webdriver.Chrome or None."
|
||||
)
|
||||
# If the hook returns None or there is no hook, return self.driver
|
||||
return self.driver
|
||||
|
||||
def update_user_agent(self, user_agent: str):
|
||||
self.options.add_argument(f"user-agent={user_agent}")
|
||||
self.driver.quit()
|
||||
self.driver = webdriver.Chrome(service=self.service, options=self.options)
|
||||
self.driver = self.execute_hook("on_user_agent_updated", self.driver)
|
||||
|
||||
def set_custom_headers(self, headers: dict):
|
||||
# Enable Network domain for sending headers
|
||||
self.driver.execute_cdp_cmd("Network.enable", {})
|
||||
# Set extra HTTP headers
|
||||
self.driver.execute_cdp_cmd("Network.setExtraHTTPHeaders", {"headers": headers})
|
||||
|
||||
def _ensure_page_load(self, max_checks=6, check_interval=0.01):
|
||||
initial_length = len(self.driver.page_source)
|
||||
|
||||
for ix in range(max_checks):
|
||||
# print(f"Checking page load: {ix}")
|
||||
time.sleep(check_interval)
|
||||
current_length = len(self.driver.page_source)
|
||||
|
||||
if current_length != initial_length:
|
||||
break
|
||||
|
||||
return self.driver.page_source
|
||||
|
||||
def crawl(self, url: str, **kwargs) -> str:
|
||||
# Create md5 hash of the URL
|
||||
import hashlib
|
||||
|
||||
url_hash = hashlib.md5(url.encode()).hexdigest()
|
||||
|
||||
if self.use_cached_html:
|
||||
cache_file_path = os.path.join(
|
||||
os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()),
|
||||
".crawl4ai",
|
||||
"cache",
|
||||
url_hash,
|
||||
)
|
||||
if os.path.exists(cache_file_path):
|
||||
with open(cache_file_path, "r") as f:
|
||||
return sanitize_input_encode(f.read())
|
||||
|
||||
try:
|
||||
self.driver = self.execute_hook("before_get_url", self.driver)
|
||||
if self.verbose:
|
||||
print(f"[LOG] 🕸️ Crawling {url} using LocalSeleniumCrawlerStrategy...")
|
||||
self.driver.get(url) # <html><head></head><body></body></html>
|
||||
|
||||
WebDriverWait(self.driver, 20).until(
|
||||
lambda d: d.execute_script("return document.readyState") == "complete"
|
||||
)
|
||||
WebDriverWait(self.driver, 10).until(
|
||||
EC.presence_of_all_elements_located((By.TAG_NAME, "body"))
|
||||
)
|
||||
|
||||
self.driver.execute_script(
|
||||
"window.scrollTo(0, document.body.scrollHeight);"
|
||||
)
|
||||
|
||||
self.driver = self.execute_hook("after_get_url", self.driver)
|
||||
html = sanitize_input_encode(
|
||||
self._ensure_page_load()
|
||||
) # self.driver.page_source
|
||||
can_not_be_done_headless = (
|
||||
False # Look at my creativity for naming variables
|
||||
)
|
||||
|
||||
# TODO: Very ugly approach, but promise to change it!
|
||||
if (
|
||||
kwargs.get("bypass_headless", False)
|
||||
or html == "<html><head></head><body></body></html>"
|
||||
):
|
||||
print(
|
||||
"[LOG] 🙌 Page could not be loaded in headless mode. Trying non-headless mode..."
|
||||
)
|
||||
can_not_be_done_headless = True
|
||||
options = Options()
|
||||
options.headless = False
|
||||
# set window size very small
|
||||
options.add_argument("--window-size=5,5")
|
||||
driver = webdriver.Chrome(service=self.service, options=options)
|
||||
driver.get(url)
|
||||
self.driver = self.execute_hook("after_get_url", driver)
|
||||
html = sanitize_input_encode(driver.page_source)
|
||||
driver.quit()
|
||||
|
||||
# Execute JS code if provided
|
||||
self.js_code = kwargs.get("js_code", self.js_code)
|
||||
if self.js_code and type(self.js_code) == str:
|
||||
self.driver.execute_script(self.js_code)
|
||||
# Optionally, wait for some condition after executing the JS code
|
||||
WebDriverWait(self.driver, 10).until(
|
||||
lambda driver: driver.execute_script("return document.readyState")
|
||||
== "complete"
|
||||
)
|
||||
elif self.js_code and type(self.js_code) == list:
|
||||
for js in self.js_code:
|
||||
self.driver.execute_script(js)
|
||||
WebDriverWait(self.driver, 10).until(
|
||||
lambda driver: driver.execute_script(
|
||||
"return document.readyState"
|
||||
)
|
||||
== "complete"
|
||||
)
|
||||
|
||||
# Optionally, wait for some condition after executing the JS code : Contributed by (https://github.com/jonymusky)
|
||||
wait_for = kwargs.get("wait_for", False)
|
||||
if wait_for:
|
||||
if callable(wait_for):
|
||||
print("[LOG] 🔄 Waiting for condition...")
|
||||
WebDriverWait(self.driver, 20).until(wait_for)
|
||||
else:
|
||||
print("[LOG] 🔄 Waiting for condition...")
|
||||
WebDriverWait(self.driver, 20).until(
|
||||
EC.presence_of_element_located((By.CSS_SELECTOR, wait_for))
|
||||
)
|
||||
|
||||
if not can_not_be_done_headless:
|
||||
html = sanitize_input_encode(self.driver.page_source)
|
||||
self.driver = self.execute_hook("before_return_html", self.driver, html)
|
||||
|
||||
# Store in cache
|
||||
cache_file_path = os.path.join(
|
||||
os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()),
|
||||
".crawl4ai",
|
||||
"cache",
|
||||
url_hash,
|
||||
)
|
||||
with open(cache_file_path, "w", encoding="utf-8") as f:
|
||||
f.write(html)
|
||||
|
||||
if self.verbose:
|
||||
print(f"[LOG] ✅ Crawled {url} successfully!")
|
||||
|
||||
return html
|
||||
except InvalidArgumentException as e:
|
||||
if not hasattr(e, "msg"):
|
||||
e.msg = sanitize_input_encode(str(e))
|
||||
raise InvalidArgumentException(f"Failed to crawl {url}: {e.msg}")
|
||||
except WebDriverException as e:
|
||||
# If e does nlt have msg attribute create it and set it to str(e)
|
||||
if not hasattr(e, "msg"):
|
||||
e.msg = sanitize_input_encode(str(e))
|
||||
raise WebDriverException(f"Failed to crawl {url}: {e.msg}")
|
||||
except Exception as e:
|
||||
if not hasattr(e, "msg"):
|
||||
e.msg = sanitize_input_encode(str(e))
|
||||
raise Exception(f"Failed to crawl {url}: {e.msg}")
|
||||
|
||||
def take_screenshot(self) -> str:
|
||||
try:
|
||||
# Get the dimensions of the page
|
||||
total_width = self.driver.execute_script("return document.body.scrollWidth")
|
||||
total_height = self.driver.execute_script(
|
||||
"return document.body.scrollHeight"
|
||||
)
|
||||
|
||||
# Set the window size to the dimensions of the page
|
||||
self.driver.set_window_size(total_width, total_height)
|
||||
|
||||
# Take screenshot
|
||||
screenshot = self.driver.get_screenshot_as_png()
|
||||
|
||||
# Open the screenshot with PIL
|
||||
image = Image.open(BytesIO(screenshot))
|
||||
|
||||
# Convert image to RGB mode (this will handle both RGB and RGBA images)
|
||||
rgb_image = image.convert("RGB")
|
||||
|
||||
# Convert to JPEG and compress
|
||||
buffered = BytesIO()
|
||||
rgb_image.save(buffered, format="JPEG", quality=85)
|
||||
img_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
||||
|
||||
if self.verbose:
|
||||
print("[LOG] 📸 Screenshot taken and converted to base64")
|
||||
|
||||
return img_base64
|
||||
except Exception as e:
|
||||
error_message = sanitize_input_encode(
|
||||
f"Failed to take screenshot: {str(e)}"
|
||||
)
|
||||
print(error_message)
|
||||
|
||||
# Generate an image with black background
|
||||
img = Image.new("RGB", (800, 600), color="black")
|
||||
draw = ImageDraw.Draw(img)
|
||||
|
||||
# Load a font
|
||||
try:
|
||||
font = ImageFont.truetype("arial.ttf", 40)
|
||||
except IOError:
|
||||
font = ImageFont.load_default()
|
||||
|
||||
# Define text color and wrap the text
|
||||
text_color = (255, 255, 255)
|
||||
max_width = 780
|
||||
wrapped_text = wrap_text(draw, error_message, font, max_width)
|
||||
|
||||
# Calculate text position
|
||||
text_position = (10, 10)
|
||||
|
||||
# Draw the text on the image
|
||||
draw.text(text_position, wrapped_text, fill=text_color, font=font)
|
||||
|
||||
# Convert to base64
|
||||
buffered = BytesIO()
|
||||
img.save(buffered, format="JPEG")
|
||||
img_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
||||
|
||||
return img_base64
|
||||
|
||||
def quit(self):
|
||||
self.driver.quit()
|
||||
180
crawl4ai/legacy/database.py
Normal file
180
crawl4ai/legacy/database.py
Normal file
@@ -0,0 +1,180 @@
|
||||
import os
|
||||
from pathlib import Path
|
||||
import sqlite3
|
||||
from typing import Optional, Tuple
|
||||
|
||||
DB_PATH = os.path.join(os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()), ".crawl4ai")
|
||||
os.makedirs(DB_PATH, exist_ok=True)
|
||||
DB_PATH = os.path.join(DB_PATH, "crawl4ai.db")
|
||||
|
||||
|
||||
def init_db():
|
||||
global DB_PATH
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS crawled_data (
|
||||
url TEXT PRIMARY KEY,
|
||||
html TEXT,
|
||||
cleaned_html TEXT,
|
||||
markdown TEXT,
|
||||
extracted_content TEXT,
|
||||
success BOOLEAN,
|
||||
media TEXT DEFAULT "{}",
|
||||
links TEXT DEFAULT "{}",
|
||||
metadata TEXT DEFAULT "{}",
|
||||
screenshot TEXT DEFAULT ""
|
||||
)
|
||||
"""
|
||||
)
|
||||
conn.commit()
|
||||
conn.close()
|
||||
|
||||
|
||||
def alter_db_add_screenshot(new_column: str = "media"):
|
||||
check_db_path()
|
||||
try:
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute(
|
||||
f'ALTER TABLE crawled_data ADD COLUMN {new_column} TEXT DEFAULT ""'
|
||||
)
|
||||
conn.commit()
|
||||
conn.close()
|
||||
except Exception as e:
|
||||
print(f"Error altering database to add screenshot column: {e}")
|
||||
|
||||
|
||||
def check_db_path():
|
||||
if not DB_PATH:
|
||||
raise ValueError("Database path is not set or is empty.")
|
||||
|
||||
|
||||
def get_cached_url(
|
||||
url: str,
|
||||
) -> Optional[Tuple[str, str, str, str, str, str, str, bool, str]]:
|
||||
check_db_path()
|
||||
try:
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute(
|
||||
"SELECT url, html, cleaned_html, markdown, extracted_content, success, media, links, metadata, screenshot FROM crawled_data WHERE url = ?",
|
||||
(url,),
|
||||
)
|
||||
result = cursor.fetchone()
|
||||
conn.close()
|
||||
return result
|
||||
except Exception as e:
|
||||
print(f"Error retrieving cached URL: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def cache_url(
|
||||
url: str,
|
||||
html: str,
|
||||
cleaned_html: str,
|
||||
markdown: str,
|
||||
extracted_content: str,
|
||||
success: bool,
|
||||
media: str = "{}",
|
||||
links: str = "{}",
|
||||
metadata: str = "{}",
|
||||
screenshot: str = "",
|
||||
):
|
||||
check_db_path()
|
||||
try:
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute(
|
||||
"""
|
||||
INSERT INTO crawled_data (url, html, cleaned_html, markdown, extracted_content, success, media, links, metadata, screenshot)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
ON CONFLICT(url) DO UPDATE SET
|
||||
html = excluded.html,
|
||||
cleaned_html = excluded.cleaned_html,
|
||||
markdown = excluded.markdown,
|
||||
extracted_content = excluded.extracted_content,
|
||||
success = excluded.success,
|
||||
media = excluded.media,
|
||||
links = excluded.links,
|
||||
metadata = excluded.metadata,
|
||||
screenshot = excluded.screenshot
|
||||
""",
|
||||
(
|
||||
url,
|
||||
html,
|
||||
cleaned_html,
|
||||
markdown,
|
||||
extracted_content,
|
||||
success,
|
||||
media,
|
||||
links,
|
||||
metadata,
|
||||
screenshot,
|
||||
),
|
||||
)
|
||||
conn.commit()
|
||||
conn.close()
|
||||
except Exception as e:
|
||||
print(f"Error caching URL: {e}")
|
||||
|
||||
|
||||
def get_total_count() -> int:
|
||||
check_db_path()
|
||||
try:
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("SELECT COUNT(*) FROM crawled_data")
|
||||
result = cursor.fetchone()
|
||||
conn.close()
|
||||
return result[0]
|
||||
except Exception as e:
|
||||
print(f"Error getting total count: {e}")
|
||||
return 0
|
||||
|
||||
|
||||
def clear_db():
|
||||
check_db_path()
|
||||
try:
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("DELETE FROM crawled_data")
|
||||
conn.commit()
|
||||
conn.close()
|
||||
except Exception as e:
|
||||
print(f"Error clearing database: {e}")
|
||||
|
||||
|
||||
def flush_db():
|
||||
check_db_path()
|
||||
try:
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("DROP TABLE crawled_data")
|
||||
conn.commit()
|
||||
conn.close()
|
||||
except Exception as e:
|
||||
print(f"Error flushing database: {e}")
|
||||
|
||||
|
||||
def update_existing_records(new_column: str = "media", default_value: str = "{}"):
|
||||
check_db_path()
|
||||
try:
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute(
|
||||
f'UPDATE crawled_data SET {new_column} = "{default_value}" WHERE screenshot IS NULL'
|
||||
)
|
||||
conn.commit()
|
||||
conn.close()
|
||||
except Exception as e:
|
||||
print(f"Error updating existing records: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Delete the existing database file
|
||||
if os.path.exists(DB_PATH):
|
||||
os.remove(DB_PATH)
|
||||
init_db()
|
||||
# alter_db_add_screenshot("COL_NAME")
|
||||
75
crawl4ai/legacy/docs_manager.py
Normal file
75
crawl4ai/legacy/docs_manager.py
Normal file
@@ -0,0 +1,75 @@
|
||||
import requests
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
from crawl4ai.async_logger import AsyncLogger
|
||||
from crawl4ai.llmtxt import AsyncLLMTextManager
|
||||
|
||||
|
||||
class DocsManager:
|
||||
def __init__(self, logger=None):
|
||||
self.docs_dir = Path.home() / ".crawl4ai" / "docs"
|
||||
self.local_docs = Path(__file__).parent.parent / "docs" / "llm.txt"
|
||||
self.docs_dir.mkdir(parents=True, exist_ok=True)
|
||||
self.logger = logger or AsyncLogger(verbose=True)
|
||||
self.llm_text = AsyncLLMTextManager(self.docs_dir, self.logger)
|
||||
|
||||
async def ensure_docs_exist(self):
|
||||
"""Fetch docs if not present"""
|
||||
if not any(self.docs_dir.iterdir()):
|
||||
await self.fetch_docs()
|
||||
|
||||
async def fetch_docs(self) -> bool:
|
||||
"""Copy from local docs or download from GitHub"""
|
||||
try:
|
||||
# Try local first
|
||||
if self.local_docs.exists() and (
|
||||
any(self.local_docs.glob("*.md"))
|
||||
or any(self.local_docs.glob("*.tokens"))
|
||||
):
|
||||
# Empty the local docs directory
|
||||
for file_path in self.docs_dir.glob("*.md"):
|
||||
file_path.unlink()
|
||||
# for file_path in self.docs_dir.glob("*.tokens"):
|
||||
# file_path.unlink()
|
||||
for file_path in self.local_docs.glob("*.md"):
|
||||
shutil.copy2(file_path, self.docs_dir / file_path.name)
|
||||
# for file_path in self.local_docs.glob("*.tokens"):
|
||||
# shutil.copy2(file_path, self.docs_dir / file_path.name)
|
||||
return True
|
||||
|
||||
# Fallback to GitHub
|
||||
response = requests.get(
|
||||
"https://api.github.com/repos/unclecode/crawl4ai/contents/docs/llm.txt",
|
||||
headers={"Accept": "application/vnd.github.v3+json"},
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
for item in response.json():
|
||||
if item["type"] == "file" and item["name"].endswith(".md"):
|
||||
content = requests.get(item["download_url"]).text
|
||||
with open(self.docs_dir / item["name"], "w", encoding="utf-8") as f:
|
||||
f.write(content)
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Failed to fetch docs: {str(e)}")
|
||||
raise
|
||||
|
||||
def list(self) -> list[str]:
|
||||
"""List available topics"""
|
||||
names = [file_path.stem for file_path in self.docs_dir.glob("*.md")]
|
||||
# Remove [0-9]+_ prefix
|
||||
names = [name.split("_", 1)[1] if name[0].isdigit() else name for name in names]
|
||||
# Exclude those end with .xs.md and .q.md
|
||||
names = [
|
||||
name
|
||||
for name in names
|
||||
if not name.endswith(".xs") and not name.endswith(".q")
|
||||
]
|
||||
return names
|
||||
|
||||
def generate(self, sections, mode="extended"):
|
||||
return self.llm_text.generate(sections, mode)
|
||||
|
||||
def search(self, query: str, top_k: int = 5):
|
||||
return self.llm_text.search(query, top_k)
|
||||
546
crawl4ai/legacy/llmtxt.py
Normal file
546
crawl4ai/legacy/llmtxt.py
Normal file
@@ -0,0 +1,546 @@
|
||||
import os
|
||||
from pathlib import Path
|
||||
import re
|
||||
from typing import Dict, List, Tuple, Optional, Any
|
||||
import json
|
||||
from tqdm import tqdm
|
||||
import time
|
||||
import psutil
|
||||
import numpy as np
|
||||
from rank_bm25 import BM25Okapi
|
||||
from nltk.tokenize import word_tokenize
|
||||
from nltk.corpus import stopwords
|
||||
from nltk.stem import WordNetLemmatizer
|
||||
from litellm import batch_completion
|
||||
from .async_logger import AsyncLogger
|
||||
import litellm
|
||||
import pickle
|
||||
import hashlib # <--- ADDED for file-hash
|
||||
import glob
|
||||
|
||||
litellm.set_verbose = False
|
||||
|
||||
|
||||
def _compute_file_hash(file_path: Path) -> str:
|
||||
"""Compute MD5 hash for the file's entire content."""
|
||||
hash_md5 = hashlib.md5()
|
||||
with file_path.open("rb") as f:
|
||||
for chunk in iter(lambda: f.read(4096), b""):
|
||||
hash_md5.update(chunk)
|
||||
return hash_md5.hexdigest()
|
||||
|
||||
|
||||
class AsyncLLMTextManager:
|
||||
def __init__(
|
||||
self,
|
||||
docs_dir: Path,
|
||||
logger: Optional[AsyncLogger] = None,
|
||||
max_concurrent_calls: int = 5,
|
||||
batch_size: int = 3,
|
||||
) -> None:
|
||||
self.docs_dir = docs_dir
|
||||
self.logger = logger
|
||||
self.max_concurrent_calls = max_concurrent_calls
|
||||
self.batch_size = batch_size
|
||||
self.bm25_index = None
|
||||
self.document_map: Dict[str, Any] = {}
|
||||
self.tokenized_facts: List[str] = []
|
||||
self.bm25_index_file = self.docs_dir / "bm25_index.pkl"
|
||||
|
||||
async def _process_document_batch(self, doc_batch: List[Path]) -> None:
|
||||
"""Process a batch of documents in parallel"""
|
||||
contents = []
|
||||
for file_path in doc_batch:
|
||||
try:
|
||||
with open(file_path, "r", encoding="utf-8") as f:
|
||||
contents.append(f.read())
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error reading {file_path}: {str(e)}")
|
||||
contents.append("") # Add empty content to maintain batch alignment
|
||||
|
||||
prompt = """Given a documentation file, generate a list of atomic facts where each fact:
|
||||
1. Represents a single piece of knowledge
|
||||
2. Contains variations in terminology for the same concept
|
||||
3. References relevant code patterns if they exist
|
||||
4. Is written in a way that would match natural language queries
|
||||
|
||||
Each fact should follow this format:
|
||||
<main_concept>: <fact_statement> | <related_terms> | <code_reference>
|
||||
|
||||
Example Facts:
|
||||
browser_config: Configure headless mode and browser type for AsyncWebCrawler | headless, browser_type, chromium, firefox | BrowserConfig(browser_type="chromium", headless=True)
|
||||
redis_connection: Redis client connection requires host and port configuration | redis setup, redis client, connection params | Redis(host='localhost', port=6379, db=0)
|
||||
pandas_filtering: Filter DataFrame rows using boolean conditions | dataframe filter, query, boolean indexing | df[df['column'] > 5]
|
||||
|
||||
Wrap your response in <index>...</index> tags.
|
||||
"""
|
||||
|
||||
# Prepare messages for batch processing
|
||||
messages_list = [
|
||||
[
|
||||
{
|
||||
"role": "user",
|
||||
"content": f"{prompt}\n\nGenerate index for this documentation:\n\n{content}",
|
||||
}
|
||||
]
|
||||
for content in contents
|
||||
if content
|
||||
]
|
||||
|
||||
try:
|
||||
responses = batch_completion(
|
||||
model="anthropic/claude-3-5-sonnet-latest",
|
||||
messages=messages_list,
|
||||
logger_fn=None,
|
||||
)
|
||||
|
||||
# Process responses and save index files
|
||||
for response, file_path in zip(responses, doc_batch):
|
||||
try:
|
||||
index_content_match = re.search(
|
||||
r"<index>(.*?)</index>",
|
||||
response.choices[0].message.content,
|
||||
re.DOTALL,
|
||||
)
|
||||
if not index_content_match:
|
||||
self.logger.warning(
|
||||
f"No <index>...</index> content found for {file_path}"
|
||||
)
|
||||
continue
|
||||
|
||||
index_content = re.sub(
|
||||
r"\n\s*\n", "\n", index_content_match.group(1)
|
||||
).strip()
|
||||
if index_content:
|
||||
index_file = file_path.with_suffix(".q.md")
|
||||
with open(index_file, "w", encoding="utf-8") as f:
|
||||
f.write(index_content)
|
||||
self.logger.info(f"Created index file: {index_file}")
|
||||
else:
|
||||
self.logger.warning(
|
||||
f"No index content found in response for {file_path}"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(
|
||||
f"Error processing response for {file_path}: {str(e)}"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error in batch completion: {str(e)}")
|
||||
|
||||
def _validate_fact_line(self, line: str) -> Tuple[bool, Optional[str]]:
|
||||
if "|" not in line:
|
||||
return False, "Missing separator '|'"
|
||||
|
||||
parts = [p.strip() for p in line.split("|")]
|
||||
if len(parts) != 3:
|
||||
return False, f"Expected 3 parts, got {len(parts)}"
|
||||
|
||||
concept_part = parts[0]
|
||||
if ":" not in concept_part:
|
||||
return False, "Missing ':' in concept definition"
|
||||
|
||||
return True, None
|
||||
|
||||
def _load_or_create_token_cache(self, fact_file: Path) -> Dict:
|
||||
"""
|
||||
Load token cache from .q.tokens if present and matching file hash.
|
||||
Otherwise return a new structure with updated file-hash.
|
||||
"""
|
||||
cache_file = fact_file.with_suffix(".q.tokens")
|
||||
current_hash = _compute_file_hash(fact_file)
|
||||
|
||||
if cache_file.exists():
|
||||
try:
|
||||
with open(cache_file, "r") as f:
|
||||
cache = json.load(f)
|
||||
# If the hash matches, return it directly
|
||||
if cache.get("content_hash") == current_hash:
|
||||
return cache
|
||||
# Otherwise, we signal that it's changed
|
||||
self.logger.info(f"Hash changed for {fact_file}, reindex needed.")
|
||||
except json.JSONDecodeError:
|
||||
self.logger.warning(f"Corrupt token cache for {fact_file}, rebuilding.")
|
||||
except Exception as e:
|
||||
self.logger.warning(f"Error reading cache for {fact_file}: {str(e)}")
|
||||
|
||||
# Return a fresh cache
|
||||
return {"facts": {}, "content_hash": current_hash}
|
||||
|
||||
def _save_token_cache(self, fact_file: Path, cache: Dict) -> None:
|
||||
cache_file = fact_file.with_suffix(".q.tokens")
|
||||
# Always ensure we're saving the correct file-hash
|
||||
cache["content_hash"] = _compute_file_hash(fact_file)
|
||||
with open(cache_file, "w") as f:
|
||||
json.dump(cache, f)
|
||||
|
||||
def preprocess_text(self, text: str) -> List[str]:
|
||||
parts = [x.strip() for x in text.split("|")] if "|" in text else [text]
|
||||
# Remove : after the first word of parts[0]
|
||||
parts[0] = re.sub(r"^(.*?):", r"\1", parts[0])
|
||||
|
||||
lemmatizer = WordNetLemmatizer()
|
||||
stop_words = set(stopwords.words("english")) - {
|
||||
"how",
|
||||
"what",
|
||||
"when",
|
||||
"where",
|
||||
"why",
|
||||
"which",
|
||||
}
|
||||
|
||||
tokens = []
|
||||
for part in parts:
|
||||
if "(" in part and ")" in part:
|
||||
code_tokens = re.findall(
|
||||
r'[\w_]+(?=\()|[\w_]+(?==[\'"]{1}[\w_]+[\'"]{1})', part
|
||||
)
|
||||
tokens.extend(code_tokens)
|
||||
|
||||
words = word_tokenize(part.lower())
|
||||
tokens.extend(
|
||||
[
|
||||
lemmatizer.lemmatize(token)
|
||||
for token in words
|
||||
if token not in stop_words
|
||||
]
|
||||
)
|
||||
|
||||
return tokens
|
||||
|
||||
def maybe_load_bm25_index(self, clear_cache=False) -> bool:
|
||||
"""
|
||||
Load existing BM25 index from disk, if present and clear_cache=False.
|
||||
"""
|
||||
if not clear_cache and os.path.exists(self.bm25_index_file):
|
||||
self.logger.info("Loading existing BM25 index from disk.")
|
||||
with open(self.bm25_index_file, "rb") as f:
|
||||
data = pickle.load(f)
|
||||
self.tokenized_facts = data["tokenized_facts"]
|
||||
self.bm25_index = data["bm25_index"]
|
||||
return True
|
||||
return False
|
||||
|
||||
def build_search_index(self, clear_cache=False) -> None:
|
||||
"""
|
||||
Checks for new or modified .q.md files by comparing file-hash.
|
||||
If none need reindexing and clear_cache is False, loads existing index if available.
|
||||
Otherwise, reindexes only changed/new files and merges or creates a new index.
|
||||
"""
|
||||
# If clear_cache is True, we skip partial logic: rebuild everything from scratch
|
||||
if clear_cache:
|
||||
self.logger.info("Clearing cache and rebuilding full search index.")
|
||||
if self.bm25_index_file.exists():
|
||||
self.bm25_index_file.unlink()
|
||||
|
||||
process = psutil.Process()
|
||||
self.logger.info("Checking which .q.md files need (re)indexing...")
|
||||
|
||||
# Gather all .q.md files
|
||||
q_files = [
|
||||
self.docs_dir / f for f in os.listdir(self.docs_dir) if f.endswith(".q.md")
|
||||
]
|
||||
|
||||
# We'll store known (unchanged) facts in these lists
|
||||
existing_facts: List[str] = []
|
||||
existing_tokens: List[List[str]] = []
|
||||
|
||||
# Keep track of invalid lines for logging
|
||||
invalid_lines = []
|
||||
needSet = [] # files that must be (re)indexed
|
||||
|
||||
for qf in q_files:
|
||||
token_cache_file = qf.with_suffix(".q.tokens")
|
||||
|
||||
# If no .q.tokens or clear_cache is True → definitely reindex
|
||||
if clear_cache or not token_cache_file.exists():
|
||||
needSet.append(qf)
|
||||
continue
|
||||
|
||||
# Otherwise, load the existing cache and compare hash
|
||||
cache = self._load_or_create_token_cache(qf)
|
||||
# If the .q.tokens was out of date (i.e. changed hash), we reindex
|
||||
if len(cache["facts"]) == 0 or cache.get(
|
||||
"content_hash"
|
||||
) != _compute_file_hash(qf):
|
||||
needSet.append(qf)
|
||||
else:
|
||||
# File is unchanged → retrieve cached token data
|
||||
for line, cache_data in cache["facts"].items():
|
||||
existing_facts.append(line)
|
||||
existing_tokens.append(cache_data["tokens"])
|
||||
self.document_map[line] = qf # track the doc for that fact
|
||||
|
||||
if not needSet and not clear_cache:
|
||||
# If no file needs reindexing, try loading existing index
|
||||
if self.maybe_load_bm25_index(clear_cache=False):
|
||||
self.logger.info(
|
||||
"No new/changed .q.md files found. Using existing BM25 index."
|
||||
)
|
||||
return
|
||||
else:
|
||||
# If there's no existing index, we must build a fresh index from the old caches
|
||||
self.logger.info(
|
||||
"No existing BM25 index found. Building from cached facts."
|
||||
)
|
||||
if existing_facts:
|
||||
self.logger.info(
|
||||
f"Building BM25 index with {len(existing_facts)} cached facts."
|
||||
)
|
||||
self.bm25_index = BM25Okapi(existing_tokens)
|
||||
self.tokenized_facts = existing_facts
|
||||
with open(self.bm25_index_file, "wb") as f:
|
||||
pickle.dump(
|
||||
{
|
||||
"bm25_index": self.bm25_index,
|
||||
"tokenized_facts": self.tokenized_facts,
|
||||
},
|
||||
f,
|
||||
)
|
||||
else:
|
||||
self.logger.warning("No facts found at all. Index remains empty.")
|
||||
return
|
||||
|
||||
# ----------------------------------------------------- /Users/unclecode/.crawl4ai/docs/14_proxy_security.q.q.tokens '/Users/unclecode/.crawl4ai/docs/14_proxy_security.q.md'
|
||||
# If we reach here, we have new or changed .q.md files
|
||||
# We'll parse them, reindex them, and then combine with existing_facts
|
||||
# -----------------------------------------------------
|
||||
|
||||
self.logger.info(f"{len(needSet)} file(s) need reindexing. Parsing now...")
|
||||
|
||||
# 1) Parse the new or changed .q.md files
|
||||
new_facts = []
|
||||
new_tokens = []
|
||||
with tqdm(total=len(needSet), desc="Indexing changed files") as file_pbar:
|
||||
for file in needSet:
|
||||
# We'll build up a fresh cache
|
||||
fresh_cache = {"facts": {}, "content_hash": _compute_file_hash(file)}
|
||||
try:
|
||||
with open(file, "r", encoding="utf-8") as f_obj:
|
||||
content = f_obj.read().strip()
|
||||
lines = [l.strip() for l in content.split("\n") if l.strip()]
|
||||
|
||||
for line in lines:
|
||||
is_valid, error = self._validate_fact_line(line)
|
||||
if not is_valid:
|
||||
invalid_lines.append((file, line, error))
|
||||
continue
|
||||
|
||||
tokens = self.preprocess_text(line)
|
||||
fresh_cache["facts"][line] = {
|
||||
"tokens": tokens,
|
||||
"added": time.time(),
|
||||
}
|
||||
new_facts.append(line)
|
||||
new_tokens.append(tokens)
|
||||
self.document_map[line] = file
|
||||
|
||||
# Save the new .q.tokens with updated hash
|
||||
self._save_token_cache(file, fresh_cache)
|
||||
|
||||
mem_usage = process.memory_info().rss / 1024 / 1024
|
||||
self.logger.debug(
|
||||
f"Memory usage after {file.name}: {mem_usage:.2f}MB"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error processing {file}: {str(e)}")
|
||||
|
||||
file_pbar.update(1)
|
||||
|
||||
if invalid_lines:
|
||||
self.logger.warning(f"Found {len(invalid_lines)} invalid fact lines:")
|
||||
for file, line, error in invalid_lines:
|
||||
self.logger.warning(f"{file}: {error} in line: {line[:50]}...")
|
||||
|
||||
# 2) Merge newly tokenized facts with the existing ones
|
||||
all_facts = existing_facts + new_facts
|
||||
all_tokens = existing_tokens + new_tokens
|
||||
|
||||
# 3) Build BM25 index from combined facts
|
||||
self.logger.info(
|
||||
f"Building BM25 index with {len(all_facts)} total facts (old + new)."
|
||||
)
|
||||
self.bm25_index = BM25Okapi(all_tokens)
|
||||
self.tokenized_facts = all_facts
|
||||
|
||||
# 4) Save the updated BM25 index to disk
|
||||
with open(self.bm25_index_file, "wb") as f:
|
||||
pickle.dump(
|
||||
{
|
||||
"bm25_index": self.bm25_index,
|
||||
"tokenized_facts": self.tokenized_facts,
|
||||
},
|
||||
f,
|
||||
)
|
||||
|
||||
final_mem = process.memory_info().rss / 1024 / 1024
|
||||
self.logger.info(f"Search index updated. Final memory usage: {final_mem:.2f}MB")
|
||||
|
||||
async def generate_index_files(
|
||||
self, force_generate_facts: bool = False, clear_bm25_cache: bool = False
|
||||
) -> None:
|
||||
"""
|
||||
Generate index files for all documents in parallel batches
|
||||
|
||||
Args:
|
||||
force_generate_facts (bool): If True, regenerate indexes even if they exist
|
||||
clear_bm25_cache (bool): If True, clear existing BM25 index cache
|
||||
"""
|
||||
self.logger.info("Starting index generation for documentation files.")
|
||||
|
||||
md_files = [
|
||||
self.docs_dir / f
|
||||
for f in os.listdir(self.docs_dir)
|
||||
if f.endswith(".md") and not any(f.endswith(x) for x in [".q.md", ".xs.md"])
|
||||
]
|
||||
|
||||
# Filter out files that already have .q files unless force=True
|
||||
if not force_generate_facts:
|
||||
md_files = [
|
||||
f
|
||||
for f in md_files
|
||||
if not (self.docs_dir / f.name.replace(".md", ".q.md")).exists()
|
||||
]
|
||||
|
||||
if not md_files:
|
||||
self.logger.info("All index files exist. Use force=True to regenerate.")
|
||||
else:
|
||||
# Process documents in batches
|
||||
for i in range(0, len(md_files), self.batch_size):
|
||||
batch = md_files[i : i + self.batch_size]
|
||||
self.logger.info(
|
||||
f"Processing batch {i//self.batch_size + 1}/{(len(md_files)//self.batch_size) + 1}"
|
||||
)
|
||||
await self._process_document_batch(batch)
|
||||
|
||||
self.logger.info("Index generation complete, building/updating search index.")
|
||||
self.build_search_index(clear_cache=clear_bm25_cache)
|
||||
|
||||
def generate(self, sections: List[str], mode: str = "extended") -> str:
|
||||
# Get all markdown files
|
||||
all_files = glob.glob(str(self.docs_dir / "[0-9]*.md")) + glob.glob(
|
||||
str(self.docs_dir / "[0-9]*.xs.md")
|
||||
)
|
||||
|
||||
# Extract base names without extensions
|
||||
base_docs = {
|
||||
Path(f).name.split(".")[0]
|
||||
for f in all_files
|
||||
if not Path(f).name.endswith(".q.md")
|
||||
}
|
||||
|
||||
# Filter by sections if provided
|
||||
if sections:
|
||||
base_docs = {
|
||||
doc
|
||||
for doc in base_docs
|
||||
if any(section.lower() in doc.lower() for section in sections)
|
||||
}
|
||||
|
||||
# Get file paths based on mode
|
||||
files = []
|
||||
for doc in sorted(
|
||||
base_docs,
|
||||
key=lambda x: int(x.split("_")[0]) if x.split("_")[0].isdigit() else 999999,
|
||||
):
|
||||
if mode == "condensed":
|
||||
xs_file = self.docs_dir / f"{doc}.xs.md"
|
||||
regular_file = self.docs_dir / f"{doc}.md"
|
||||
files.append(str(xs_file if xs_file.exists() else regular_file))
|
||||
else:
|
||||
files.append(str(self.docs_dir / f"{doc}.md"))
|
||||
|
||||
# Read and format content
|
||||
content = []
|
||||
for file in files:
|
||||
try:
|
||||
with open(file, "r", encoding="utf-8") as f:
|
||||
fname = Path(file).name
|
||||
content.append(f"{'#'*20}\n# {fname}\n{'#'*20}\n\n{f.read()}")
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error reading {file}: {str(e)}")
|
||||
|
||||
return "\n\n---\n\n".join(content) if content else ""
|
||||
|
||||
def search(self, query: str, top_k: int = 5) -> str:
|
||||
if not self.bm25_index:
|
||||
return "No search index available. Call build_search_index() first."
|
||||
|
||||
query_tokens = self.preprocess_text(query)
|
||||
doc_scores = self.bm25_index.get_scores(query_tokens)
|
||||
|
||||
mean_score = np.mean(doc_scores)
|
||||
std_score = np.std(doc_scores)
|
||||
score_threshold = mean_score + (0.25 * std_score)
|
||||
|
||||
file_data = self._aggregate_search_scores(
|
||||
doc_scores=doc_scores,
|
||||
score_threshold=score_threshold,
|
||||
query_tokens=query_tokens,
|
||||
)
|
||||
|
||||
ranked_files = sorted(
|
||||
file_data.items(),
|
||||
key=lambda x: (
|
||||
x[1]["code_match_score"] * 2.0
|
||||
+ x[1]["match_count"] * 1.5
|
||||
+ x[1]["total_score"]
|
||||
),
|
||||
reverse=True,
|
||||
)[:top_k]
|
||||
|
||||
results = []
|
||||
for file, _ in ranked_files:
|
||||
main_doc = str(file).replace(".q.md", ".md")
|
||||
if os.path.exists(self.docs_dir / main_doc):
|
||||
with open(self.docs_dir / main_doc, "r", encoding="utf-8") as f:
|
||||
only_file_name = main_doc.split("/")[-1]
|
||||
content = ["#" * 20, f"# {only_file_name}", "#" * 20, "", f.read()]
|
||||
results.append("\n".join(content))
|
||||
|
||||
return "\n\n---\n\n".join(results)
|
||||
|
||||
def _aggregate_search_scores(
|
||||
self, doc_scores: List[float], score_threshold: float, query_tokens: List[str]
|
||||
) -> Dict:
|
||||
file_data = {}
|
||||
|
||||
for idx, score in enumerate(doc_scores):
|
||||
if score <= score_threshold:
|
||||
continue
|
||||
|
||||
fact = self.tokenized_facts[idx]
|
||||
file_path = self.document_map[fact]
|
||||
|
||||
if file_path not in file_data:
|
||||
file_data[file_path] = {
|
||||
"total_score": 0,
|
||||
"match_count": 0,
|
||||
"code_match_score": 0,
|
||||
"matched_facts": [],
|
||||
}
|
||||
|
||||
components = fact.split("|") if "|" in fact else [fact]
|
||||
|
||||
code_match_score = 0
|
||||
if len(components) == 3:
|
||||
code_ref = components[2].strip()
|
||||
code_tokens = self.preprocess_text(code_ref)
|
||||
code_match_score = len(set(query_tokens) & set(code_tokens)) / len(
|
||||
query_tokens
|
||||
)
|
||||
|
||||
file_data[file_path]["total_score"] += score
|
||||
file_data[file_path]["match_count"] += 1
|
||||
file_data[file_path]["code_match_score"] = max(
|
||||
file_data[file_path]["code_match_score"], code_match_score
|
||||
)
|
||||
file_data[file_path]["matched_facts"].append(fact)
|
||||
|
||||
return file_data
|
||||
|
||||
def refresh_index(self) -> None:
|
||||
"""Convenience method for a full rebuild."""
|
||||
self.build_search_index(clear_cache=True)
|
||||
29
crawl4ai/legacy/version_manager.py
Normal file
29
crawl4ai/legacy/version_manager.py
Normal file
@@ -0,0 +1,29 @@
|
||||
# version_manager.py
|
||||
from pathlib import Path
|
||||
from packaging import version
|
||||
from . import __version__
|
||||
|
||||
|
||||
class VersionManager:
|
||||
def __init__(self):
|
||||
self.home_dir = Path.home() / ".crawl4ai"
|
||||
self.version_file = self.home_dir / "version.txt"
|
||||
|
||||
def get_installed_version(self):
|
||||
"""Get the version recorded in home directory"""
|
||||
if not self.version_file.exists():
|
||||
return None
|
||||
try:
|
||||
return version.parse(self.version_file.read_text().strip())
|
||||
except:
|
||||
return None
|
||||
|
||||
def update_version(self):
|
||||
"""Update the version file to current library version"""
|
||||
self.version_file.write_text(__version__.__version__)
|
||||
|
||||
def needs_update(self):
|
||||
"""Check if database needs update based on version"""
|
||||
installed = self.get_installed_version()
|
||||
current = version.parse(__version__.__version__)
|
||||
return installed is None or installed < current
|
||||
294
crawl4ai/legacy/web_crawler.py
Normal file
294
crawl4ai/legacy/web_crawler.py
Normal file
@@ -0,0 +1,294 @@
|
||||
import os, time
|
||||
|
||||
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
||||
from pathlib import Path
|
||||
|
||||
from .models import UrlModel, CrawlResult
|
||||
from .database import init_db, get_cached_url, cache_url
|
||||
from .utils import *
|
||||
from .chunking_strategy import *
|
||||
from .extraction_strategy import *
|
||||
from .crawler_strategy import *
|
||||
from typing import List
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from .content_scraping_strategy import WebScrapingStrategy
|
||||
from .config import *
|
||||
import warnings
|
||||
import json
|
||||
|
||||
warnings.filterwarnings(
|
||||
"ignore",
|
||||
message='Field "model_name" has conflict with protected namespace "model_".',
|
||||
)
|
||||
|
||||
|
||||
class WebCrawler:
|
||||
def __init__(
|
||||
self,
|
||||
crawler_strategy: CrawlerStrategy = None,
|
||||
always_by_pass_cache: bool = False,
|
||||
verbose: bool = False,
|
||||
):
|
||||
self.crawler_strategy = crawler_strategy or LocalSeleniumCrawlerStrategy(
|
||||
verbose=verbose
|
||||
)
|
||||
self.always_by_pass_cache = always_by_pass_cache
|
||||
self.crawl4ai_folder = os.path.join(
|
||||
os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()), ".crawl4ai"
|
||||
)
|
||||
os.makedirs(self.crawl4ai_folder, exist_ok=True)
|
||||
os.makedirs(f"{self.crawl4ai_folder}/cache", exist_ok=True)
|
||||
init_db()
|
||||
self.ready = False
|
||||
|
||||
def warmup(self):
|
||||
print("[LOG] 🌤️ Warming up the WebCrawler")
|
||||
self.run(
|
||||
url="https://google.com/",
|
||||
word_count_threshold=5,
|
||||
extraction_strategy=NoExtractionStrategy(),
|
||||
bypass_cache=False,
|
||||
verbose=False,
|
||||
)
|
||||
self.ready = True
|
||||
print("[LOG] 🌞 WebCrawler is ready to crawl")
|
||||
|
||||
def fetch_page(
|
||||
self,
|
||||
url_model: UrlModel,
|
||||
provider: str = DEFAULT_PROVIDER,
|
||||
api_token: str = None,
|
||||
extract_blocks_flag: bool = True,
|
||||
word_count_threshold=MIN_WORD_THRESHOLD,
|
||||
css_selector: str = None,
|
||||
screenshot: bool = False,
|
||||
use_cached_html: bool = False,
|
||||
extraction_strategy: ExtractionStrategy = None,
|
||||
chunking_strategy: ChunkingStrategy = RegexChunking(),
|
||||
**kwargs,
|
||||
) -> CrawlResult:
|
||||
return self.run(
|
||||
url_model.url,
|
||||
word_count_threshold,
|
||||
extraction_strategy or NoExtractionStrategy(),
|
||||
chunking_strategy,
|
||||
bypass_cache=url_model.forced,
|
||||
css_selector=css_selector,
|
||||
screenshot=screenshot,
|
||||
**kwargs,
|
||||
)
|
||||
pass
|
||||
|
||||
def fetch_pages(
|
||||
self,
|
||||
url_models: List[UrlModel],
|
||||
provider: str = DEFAULT_PROVIDER,
|
||||
api_token: str = None,
|
||||
extract_blocks_flag: bool = True,
|
||||
word_count_threshold=MIN_WORD_THRESHOLD,
|
||||
use_cached_html: bool = False,
|
||||
css_selector: str = None,
|
||||
screenshot: bool = False,
|
||||
extraction_strategy: ExtractionStrategy = None,
|
||||
chunking_strategy: ChunkingStrategy = RegexChunking(),
|
||||
**kwargs,
|
||||
) -> List[CrawlResult]:
|
||||
extraction_strategy = extraction_strategy or NoExtractionStrategy()
|
||||
|
||||
def fetch_page_wrapper(url_model, *args, **kwargs):
|
||||
return self.fetch_page(url_model, *args, **kwargs)
|
||||
|
||||
with ThreadPoolExecutor() as executor:
|
||||
results = list(
|
||||
executor.map(
|
||||
fetch_page_wrapper,
|
||||
url_models,
|
||||
[provider] * len(url_models),
|
||||
[api_token] * len(url_models),
|
||||
[extract_blocks_flag] * len(url_models),
|
||||
[word_count_threshold] * len(url_models),
|
||||
[css_selector] * len(url_models),
|
||||
[screenshot] * len(url_models),
|
||||
[use_cached_html] * len(url_models),
|
||||
[extraction_strategy] * len(url_models),
|
||||
[chunking_strategy] * len(url_models),
|
||||
*[kwargs] * len(url_models),
|
||||
)
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
def run(
|
||||
self,
|
||||
url: str,
|
||||
word_count_threshold=MIN_WORD_THRESHOLD,
|
||||
extraction_strategy: ExtractionStrategy = None,
|
||||
chunking_strategy: ChunkingStrategy = RegexChunking(),
|
||||
bypass_cache: bool = False,
|
||||
css_selector: str = None,
|
||||
screenshot: bool = False,
|
||||
user_agent: str = None,
|
||||
verbose=True,
|
||||
**kwargs,
|
||||
) -> CrawlResult:
|
||||
try:
|
||||
extraction_strategy = extraction_strategy or NoExtractionStrategy()
|
||||
extraction_strategy.verbose = verbose
|
||||
if not isinstance(extraction_strategy, ExtractionStrategy):
|
||||
raise ValueError("Unsupported extraction strategy")
|
||||
if not isinstance(chunking_strategy, ChunkingStrategy):
|
||||
raise ValueError("Unsupported chunking strategy")
|
||||
|
||||
word_count_threshold = max(word_count_threshold, MIN_WORD_THRESHOLD)
|
||||
|
||||
cached = None
|
||||
screenshot_data = None
|
||||
extracted_content = None
|
||||
if not bypass_cache and not self.always_by_pass_cache:
|
||||
cached = get_cached_url(url)
|
||||
|
||||
if kwargs.get("warmup", True) and not self.ready:
|
||||
return None
|
||||
|
||||
if cached:
|
||||
html = sanitize_input_encode(cached[1])
|
||||
extracted_content = sanitize_input_encode(cached[4])
|
||||
if screenshot:
|
||||
screenshot_data = cached[9]
|
||||
if not screenshot_data:
|
||||
cached = None
|
||||
|
||||
if not cached or not html:
|
||||
if user_agent:
|
||||
self.crawler_strategy.update_user_agent(user_agent)
|
||||
t1 = time.time()
|
||||
html = sanitize_input_encode(self.crawler_strategy.crawl(url, **kwargs))
|
||||
t2 = time.time()
|
||||
if verbose:
|
||||
print(
|
||||
f"[LOG] 🚀 Crawling done for {url}, success: {bool(html)}, time taken: {t2 - t1:.2f} seconds"
|
||||
)
|
||||
if screenshot:
|
||||
screenshot_data = self.crawler_strategy.take_screenshot()
|
||||
|
||||
crawl_result = self.process_html(
|
||||
url,
|
||||
html,
|
||||
extracted_content,
|
||||
word_count_threshold,
|
||||
extraction_strategy,
|
||||
chunking_strategy,
|
||||
css_selector,
|
||||
screenshot_data,
|
||||
verbose,
|
||||
bool(cached),
|
||||
**kwargs,
|
||||
)
|
||||
crawl_result.success = bool(html)
|
||||
return crawl_result
|
||||
except Exception as e:
|
||||
if not hasattr(e, "msg"):
|
||||
e.msg = str(e)
|
||||
print(f"[ERROR] 🚫 Failed to crawl {url}, error: {e.msg}")
|
||||
return CrawlResult(url=url, html="", success=False, error_message=e.msg)
|
||||
|
||||
def process_html(
|
||||
self,
|
||||
url: str,
|
||||
html: str,
|
||||
extracted_content: str,
|
||||
word_count_threshold: int,
|
||||
extraction_strategy: ExtractionStrategy,
|
||||
chunking_strategy: ChunkingStrategy,
|
||||
css_selector: str,
|
||||
screenshot: bool,
|
||||
verbose: bool,
|
||||
is_cached: bool,
|
||||
**kwargs,
|
||||
) -> CrawlResult:
|
||||
t = time.time()
|
||||
# Extract content from HTML
|
||||
try:
|
||||
t1 = time.time()
|
||||
scrapping_strategy = WebScrapingStrategy()
|
||||
extra_params = {
|
||||
k: v
|
||||
for k, v in kwargs.items()
|
||||
if k not in ["only_text", "image_description_min_word_threshold"]
|
||||
}
|
||||
result = scrapping_strategy.scrap(
|
||||
url,
|
||||
html,
|
||||
word_count_threshold=word_count_threshold,
|
||||
css_selector=css_selector,
|
||||
only_text=kwargs.get("only_text", False),
|
||||
image_description_min_word_threshold=kwargs.get(
|
||||
"image_description_min_word_threshold",
|
||||
IMAGE_DESCRIPTION_MIN_WORD_THRESHOLD,
|
||||
),
|
||||
**extra_params,
|
||||
)
|
||||
|
||||
# result = get_content_of_website_optimized(url, html, word_count_threshold, css_selector=css_selector, only_text=kwargs.get("only_text", False))
|
||||
if verbose:
|
||||
print(
|
||||
f"[LOG] 🚀 Content extracted for {url}, success: True, time taken: {time.time() - t1:.2f} seconds"
|
||||
)
|
||||
|
||||
if result is None:
|
||||
raise ValueError(f"Failed to extract content from the website: {url}")
|
||||
except InvalidCSSSelectorError as e:
|
||||
raise ValueError(str(e))
|
||||
|
||||
cleaned_html = sanitize_input_encode(result.get("cleaned_html", ""))
|
||||
markdown = sanitize_input_encode(result.get("markdown", ""))
|
||||
media = result.get("media", [])
|
||||
links = result.get("links", [])
|
||||
metadata = result.get("metadata", {})
|
||||
|
||||
if extracted_content is None:
|
||||
if verbose:
|
||||
print(
|
||||
f"[LOG] 🔥 Extracting semantic blocks for {url}, Strategy: {extraction_strategy.name}"
|
||||
)
|
||||
|
||||
sections = chunking_strategy.chunk(markdown)
|
||||
extracted_content = extraction_strategy.run(url, sections)
|
||||
extracted_content = json.dumps(
|
||||
extracted_content, indent=4, default=str, ensure_ascii=False
|
||||
)
|
||||
|
||||
if verbose:
|
||||
print(
|
||||
f"[LOG] 🚀 Extraction done for {url}, time taken: {time.time() - t:.2f} seconds."
|
||||
)
|
||||
|
||||
screenshot = None if not screenshot else screenshot
|
||||
|
||||
if not is_cached:
|
||||
cache_url(
|
||||
url,
|
||||
html,
|
||||
cleaned_html,
|
||||
markdown,
|
||||
extracted_content,
|
||||
True,
|
||||
json.dumps(media),
|
||||
json.dumps(links),
|
||||
json.dumps(metadata),
|
||||
screenshot=screenshot,
|
||||
)
|
||||
|
||||
return CrawlResult(
|
||||
url=url,
|
||||
html=html,
|
||||
cleaned_html=format_html(cleaned_html),
|
||||
markdown=markdown,
|
||||
media=media,
|
||||
links=links,
|
||||
metadata=metadata,
|
||||
screenshot=screenshot,
|
||||
extracted_content=extracted_content,
|
||||
success=True,
|
||||
error_message="",
|
||||
)
|
||||
260
crawl4ai/markdown_generation_strategy.py
Normal file
260
crawl4ai/markdown_generation_strategy.py
Normal file
@@ -0,0 +1,260 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Optional, Dict, Any, Tuple
|
||||
from .models import MarkdownGenerationResult
|
||||
from .html2text import CustomHTML2Text
|
||||
# from .types import RelevantContentFilter
|
||||
from .content_filter_strategy import RelevantContentFilter
|
||||
import re
|
||||
from urllib.parse import urljoin
|
||||
|
||||
# Pre-compile the regex pattern
|
||||
LINK_PATTERN = re.compile(r'!?\[([^\]]+)\]\(([^)]+?)(?:\s+"([^"]*)")?\)')
|
||||
|
||||
|
||||
def fast_urljoin(base: str, url: str) -> str:
|
||||
"""Fast URL joining for common cases."""
|
||||
if url.startswith(("http://", "https://", "mailto:", "//")):
|
||||
return url
|
||||
if url.startswith("/"):
|
||||
# Handle absolute paths
|
||||
if base.endswith("/"):
|
||||
return base[:-1] + url
|
||||
return base + url
|
||||
return urljoin(base, url)
|
||||
|
||||
|
||||
class MarkdownGenerationStrategy(ABC):
|
||||
"""Abstract base class for markdown generation strategies."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
content_filter: Optional[RelevantContentFilter] = None,
|
||||
options: Optional[Dict[str, Any]] = None,
|
||||
verbose: bool = False,
|
||||
content_source: str = "cleaned_html",
|
||||
):
|
||||
self.content_filter = content_filter
|
||||
self.options = options or {}
|
||||
self.verbose = verbose
|
||||
self.content_source = content_source
|
||||
|
||||
@abstractmethod
|
||||
def generate_markdown(
|
||||
self,
|
||||
input_html: str,
|
||||
base_url: str = "",
|
||||
html2text_options: Optional[Dict[str, Any]] = None,
|
||||
content_filter: Optional[RelevantContentFilter] = None,
|
||||
citations: bool = True,
|
||||
**kwargs,
|
||||
) -> MarkdownGenerationResult:
|
||||
"""Generate markdown from the selected input HTML."""
|
||||
pass
|
||||
|
||||
|
||||
class DefaultMarkdownGenerator(MarkdownGenerationStrategy):
|
||||
"""
|
||||
Default implementation of markdown generation strategy.
|
||||
|
||||
How it works:
|
||||
1. Generate raw markdown from cleaned HTML.
|
||||
2. Convert links to citations.
|
||||
3. Generate fit markdown if content filter is provided.
|
||||
4. Return MarkdownGenerationResult.
|
||||
|
||||
Args:
|
||||
content_filter (Optional[RelevantContentFilter]): Content filter for generating fit markdown.
|
||||
options (Optional[Dict[str, Any]]): Additional options for markdown generation. Defaults to None.
|
||||
content_source (str): Source of content to generate markdown from. Options: "cleaned_html", "raw_html", "fit_html". Defaults to "cleaned_html".
|
||||
|
||||
Returns:
|
||||
MarkdownGenerationResult: Result containing raw markdown, fit markdown, fit HTML, and references markdown.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
content_filter: Optional[RelevantContentFilter] = None,
|
||||
options: Optional[Dict[str, Any]] = None,
|
||||
content_source: str = "cleaned_html",
|
||||
):
|
||||
super().__init__(content_filter, options, verbose=False, content_source=content_source)
|
||||
|
||||
def convert_links_to_citations(
|
||||
self, markdown: str, base_url: str = ""
|
||||
) -> Tuple[str, str]:
|
||||
"""
|
||||
Convert links in markdown to citations.
|
||||
|
||||
How it works:
|
||||
1. Find all links in the markdown.
|
||||
2. Convert links to citations.
|
||||
3. Return converted markdown and references markdown.
|
||||
|
||||
Note:
|
||||
This function uses a regex pattern to find links in markdown.
|
||||
|
||||
Args:
|
||||
markdown (str): Markdown text.
|
||||
base_url (str): Base URL for URL joins.
|
||||
|
||||
Returns:
|
||||
Tuple[str, str]: Converted markdown and references markdown.
|
||||
"""
|
||||
link_map = {}
|
||||
url_cache = {} # Cache for URL joins
|
||||
parts = []
|
||||
last_end = 0
|
||||
counter = 1
|
||||
|
||||
for match in LINK_PATTERN.finditer(markdown):
|
||||
parts.append(markdown[last_end : match.start()])
|
||||
text, url, title = match.groups()
|
||||
|
||||
# Use cached URL if available, otherwise compute and cache
|
||||
if base_url and not url.startswith(("http://", "https://", "mailto:")):
|
||||
if url not in url_cache:
|
||||
url_cache[url] = fast_urljoin(base_url, url)
|
||||
url = url_cache[url]
|
||||
|
||||
if url not in link_map:
|
||||
desc = []
|
||||
if title:
|
||||
desc.append(title)
|
||||
if text and text != title:
|
||||
desc.append(text)
|
||||
link_map[url] = (counter, ": " + " - ".join(desc) if desc else "")
|
||||
counter += 1
|
||||
|
||||
num = link_map[url][0]
|
||||
parts.append(
|
||||
f"{text}⟨{num}⟩"
|
||||
if not match.group(0).startswith("!")
|
||||
else f"![{text}⟨{num}⟩]"
|
||||
)
|
||||
last_end = match.end()
|
||||
|
||||
parts.append(markdown[last_end:])
|
||||
converted_text = "".join(parts)
|
||||
|
||||
# Pre-build reference strings
|
||||
references = ["\n\n## References\n\n"]
|
||||
references.extend(
|
||||
f"⟨{num}⟩ {url}{desc}\n"
|
||||
for url, (num, desc) in sorted(link_map.items(), key=lambda x: x[1][0])
|
||||
)
|
||||
|
||||
return converted_text, "".join(references)
|
||||
|
||||
def generate_markdown(
|
||||
self,
|
||||
input_html: str,
|
||||
base_url: str = "",
|
||||
html2text_options: Optional[Dict[str, Any]] = None,
|
||||
options: Optional[Dict[str, Any]] = None,
|
||||
content_filter: Optional[RelevantContentFilter] = None,
|
||||
citations: bool = True,
|
||||
**kwargs,
|
||||
) -> MarkdownGenerationResult:
|
||||
"""
|
||||
Generate markdown with citations from the provided input HTML.
|
||||
|
||||
How it works:
|
||||
1. Generate raw markdown from the input HTML.
|
||||
2. Convert links to citations.
|
||||
3. Generate fit markdown if content filter is provided.
|
||||
4. Return MarkdownGenerationResult.
|
||||
|
||||
Args:
|
||||
input_html (str): The HTML content to process (selected based on content_source).
|
||||
base_url (str): Base URL for URL joins.
|
||||
html2text_options (Optional[Dict[str, Any]]): HTML2Text options.
|
||||
options (Optional[Dict[str, Any]]): Additional options for markdown generation.
|
||||
content_filter (Optional[RelevantContentFilter]): Content filter for generating fit markdown.
|
||||
citations (bool): Whether to generate citations.
|
||||
|
||||
Returns:
|
||||
MarkdownGenerationResult: Result containing raw markdown, fit markdown, fit HTML, and references markdown.
|
||||
"""
|
||||
try:
|
||||
# Initialize HTML2Text with default options for better conversion
|
||||
h = CustomHTML2Text(baseurl=base_url)
|
||||
default_options = {
|
||||
"body_width": 0, # Disable text wrapping
|
||||
"ignore_emphasis": False,
|
||||
"ignore_links": False,
|
||||
"ignore_images": False,
|
||||
"protect_links": False,
|
||||
"single_line_break": True,
|
||||
"mark_code": True,
|
||||
"escape_snob": False,
|
||||
}
|
||||
|
||||
# Update with custom options if provided
|
||||
if html2text_options:
|
||||
default_options.update(html2text_options)
|
||||
elif options:
|
||||
default_options.update(options)
|
||||
elif self.options:
|
||||
default_options.update(self.options)
|
||||
|
||||
h.update_params(**default_options)
|
||||
|
||||
# Ensure we have valid input
|
||||
if not input_html:
|
||||
input_html = ""
|
||||
elif not isinstance(input_html, str):
|
||||
input_html = str(input_html)
|
||||
|
||||
# Generate raw markdown
|
||||
try:
|
||||
raw_markdown = h.handle(input_html)
|
||||
except Exception as e:
|
||||
raw_markdown = f"Error converting HTML to markdown: {str(e)}"
|
||||
|
||||
raw_markdown = raw_markdown.replace(" ```", "```")
|
||||
|
||||
# Convert links to citations
|
||||
markdown_with_citations: str = raw_markdown
|
||||
references_markdown: str = ""
|
||||
if citations:
|
||||
try:
|
||||
(
|
||||
markdown_with_citations,
|
||||
references_markdown,
|
||||
) = self.convert_links_to_citations(raw_markdown, base_url)
|
||||
except Exception as e:
|
||||
markdown_with_citations = raw_markdown
|
||||
references_markdown = f"Error generating citations: {str(e)}"
|
||||
|
||||
# Generate fit markdown if content filter is provided
|
||||
fit_markdown: Optional[str] = ""
|
||||
filtered_html: Optional[str] = ""
|
||||
if content_filter or self.content_filter:
|
||||
try:
|
||||
content_filter = content_filter or self.content_filter
|
||||
filtered_html = content_filter.filter_content(input_html)
|
||||
filtered_html = "\n".join(
|
||||
"<div>{}</div>".format(s) for s in filtered_html
|
||||
)
|
||||
fit_markdown = h.handle(filtered_html)
|
||||
except Exception as e:
|
||||
fit_markdown = f"Error generating fit markdown: {str(e)}"
|
||||
filtered_html = ""
|
||||
|
||||
return MarkdownGenerationResult(
|
||||
raw_markdown=raw_markdown or "",
|
||||
markdown_with_citations=markdown_with_citations or "",
|
||||
references_markdown=references_markdown or "",
|
||||
fit_markdown=fit_markdown or "",
|
||||
fit_html=filtered_html or "",
|
||||
)
|
||||
except Exception as e:
|
||||
# If anything fails, return empty strings with error message
|
||||
error_msg = f"Error in markdown generation: {str(e)}"
|
||||
return MarkdownGenerationResult(
|
||||
raw_markdown=error_msg,
|
||||
markdown_with_citations=error_msg,
|
||||
references_markdown="",
|
||||
fit_markdown="",
|
||||
fit_html="",
|
||||
)
|
||||
194
crawl4ai/migrations.py
Normal file
194
crawl4ai/migrations.py
Normal file
@@ -0,0 +1,194 @@
|
||||
import os
|
||||
import asyncio
|
||||
from pathlib import Path
|
||||
import aiosqlite
|
||||
from typing import Optional
|
||||
import xxhash
|
||||
import aiofiles
|
||||
import shutil
|
||||
from datetime import datetime
|
||||
from .async_logger import AsyncLogger, LogLevel
|
||||
|
||||
# Initialize logger
|
||||
logger = AsyncLogger(log_level=LogLevel.DEBUG, verbose=True)
|
||||
|
||||
# logging.basicConfig(level=logging.INFO)
|
||||
# logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DatabaseMigration:
|
||||
def __init__(self, db_path: str):
|
||||
self.db_path = db_path
|
||||
self.content_paths = self._ensure_content_dirs(os.path.dirname(db_path))
|
||||
|
||||
def _ensure_content_dirs(self, base_path: str) -> dict:
|
||||
dirs = {
|
||||
"html": "html_content",
|
||||
"cleaned": "cleaned_html",
|
||||
"markdown": "markdown_content",
|
||||
"extracted": "extracted_content",
|
||||
"screenshots": "screenshots",
|
||||
}
|
||||
content_paths = {}
|
||||
for key, dirname in dirs.items():
|
||||
path = os.path.join(base_path, dirname)
|
||||
os.makedirs(path, exist_ok=True)
|
||||
content_paths[key] = path
|
||||
return content_paths
|
||||
|
||||
def _generate_content_hash(self, content: str) -> str:
|
||||
x = xxhash.xxh64()
|
||||
x.update(content.encode())
|
||||
content_hash = x.hexdigest()
|
||||
return content_hash
|
||||
# return hashlib.sha256(content.encode()).hexdigest()
|
||||
|
||||
async def _store_content(self, content: str, content_type: str) -> str:
|
||||
if not content:
|
||||
return ""
|
||||
|
||||
content_hash = self._generate_content_hash(content)
|
||||
file_path = os.path.join(self.content_paths[content_type], content_hash)
|
||||
|
||||
if not os.path.exists(file_path):
|
||||
async with aiofiles.open(file_path, "w", encoding="utf-8") as f:
|
||||
await f.write(content)
|
||||
|
||||
return content_hash
|
||||
|
||||
async def migrate_database(self):
|
||||
"""Migrate existing database to file-based storage"""
|
||||
# logger.info("Starting database migration...")
|
||||
logger.info("Starting database migration...", tag="INIT")
|
||||
|
||||
try:
|
||||
async with aiosqlite.connect(self.db_path) as db:
|
||||
# Get all rows
|
||||
async with db.execute(
|
||||
"""SELECT url, html, cleaned_html, markdown,
|
||||
extracted_content, screenshot FROM crawled_data"""
|
||||
) as cursor:
|
||||
rows = await cursor.fetchall()
|
||||
|
||||
migrated_count = 0
|
||||
for row in rows:
|
||||
(
|
||||
url,
|
||||
html,
|
||||
cleaned_html,
|
||||
markdown,
|
||||
extracted_content,
|
||||
screenshot,
|
||||
) = row
|
||||
|
||||
# Store content in files and get hashes
|
||||
html_hash = await self._store_content(html, "html")
|
||||
cleaned_hash = await self._store_content(cleaned_html, "cleaned")
|
||||
markdown_hash = await self._store_content(markdown, "markdown")
|
||||
extracted_hash = await self._store_content(
|
||||
extracted_content, "extracted"
|
||||
)
|
||||
screenshot_hash = await self._store_content(
|
||||
screenshot, "screenshots"
|
||||
)
|
||||
|
||||
# Update database with hashes
|
||||
await db.execute(
|
||||
"""
|
||||
UPDATE crawled_data
|
||||
SET html = ?,
|
||||
cleaned_html = ?,
|
||||
markdown = ?,
|
||||
extracted_content = ?,
|
||||
screenshot = ?
|
||||
WHERE url = ?
|
||||
""",
|
||||
(
|
||||
html_hash,
|
||||
cleaned_hash,
|
||||
markdown_hash,
|
||||
extracted_hash,
|
||||
screenshot_hash,
|
||||
url,
|
||||
),
|
||||
)
|
||||
|
||||
migrated_count += 1
|
||||
if migrated_count % 100 == 0:
|
||||
logger.info(f"Migrated {migrated_count} records...", tag="INIT")
|
||||
|
||||
await db.commit()
|
||||
logger.success(
|
||||
f"Migration completed. {migrated_count} records processed.",
|
||||
tag="COMPLETE",
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
# logger.error(f"Migration failed: {e}")
|
||||
logger.error(
|
||||
message="Migration failed: {error}",
|
||||
tag="ERROR",
|
||||
params={"error": str(e)},
|
||||
)
|
||||
raise e
|
||||
|
||||
|
||||
async def backup_database(db_path: str) -> str:
|
||||
"""Create backup of existing database"""
|
||||
if not os.path.exists(db_path):
|
||||
logger.info("No existing database found. Skipping backup.", tag="INIT")
|
||||
return None
|
||||
|
||||
# Create backup with timestamp
|
||||
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
backup_path = f"{db_path}.backup_{timestamp}"
|
||||
|
||||
try:
|
||||
# Wait for any potential write operations to finish
|
||||
await asyncio.sleep(1)
|
||||
|
||||
# Create backup
|
||||
shutil.copy2(db_path, backup_path)
|
||||
logger.info(f"Database backup created at: {backup_path}", tag="COMPLETE")
|
||||
return backup_path
|
||||
except Exception as e:
|
||||
# logger.error(f"Backup failed: {e}")
|
||||
logger.error(
|
||||
message="Migration failed: {error}", tag="ERROR", params={"error": str(e)}
|
||||
)
|
||||
raise e
|
||||
|
||||
|
||||
async def run_migration(db_path: Optional[str] = None):
|
||||
"""Run database migration"""
|
||||
if db_path is None:
|
||||
db_path = os.path.join(Path.home(), ".crawl4ai", "crawl4ai.db")
|
||||
|
||||
if not os.path.exists(db_path):
|
||||
logger.info("No existing database found. Skipping migration.", tag="INIT")
|
||||
return
|
||||
|
||||
# Create backup first
|
||||
backup_path = await backup_database(db_path)
|
||||
if not backup_path:
|
||||
return
|
||||
|
||||
migration = DatabaseMigration(db_path)
|
||||
await migration.migrate_database()
|
||||
|
||||
|
||||
def main():
|
||||
"""CLI entry point for migration"""
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Migrate Crawl4AI database to file-based storage"
|
||||
)
|
||||
parser.add_argument("--db-path", help="Custom database path")
|
||||
args = parser.parse_args()
|
||||
|
||||
asyncio.run(run_migration(args.db_path))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -2,67 +2,157 @@ from functools import lru_cache
|
||||
from pathlib import Path
|
||||
import subprocess, os
|
||||
import shutil
|
||||
from crawl4ai.config import MODEL_REPO_BRANCH
|
||||
from .model_loader import *
|
||||
import argparse
|
||||
from crawl4ai.config import MODEL_REPO_BRANCH
|
||||
|
||||
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
|
||||
|
||||
def get_home_folder():
|
||||
home_folder = os.path.join(Path.home(), ".crawl4ai")
|
||||
os.makedirs(home_folder, exist_ok=True)
|
||||
os.makedirs(f"{home_folder}/cache", exist_ok=True)
|
||||
os.makedirs(f"{home_folder}/models", exist_ok=True)
|
||||
return home_folder
|
||||
|
||||
@lru_cache()
|
||||
def load_bert_base_uncased():
|
||||
from transformers import BertTokenizer, BertModel, AutoTokenizer, AutoModel
|
||||
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', resume_download=None)
|
||||
model = BertModel.from_pretrained('bert-base-uncased', resume_download=None)
|
||||
return tokenizer, model
|
||||
|
||||
@lru_cache()
|
||||
def load_bge_small_en_v1_5():
|
||||
from transformers import BertTokenizer, BertModel, AutoTokenizer, AutoModel
|
||||
tokenizer = AutoTokenizer.from_pretrained('BAAI/bge-small-en-v1.5', resume_download=None)
|
||||
model = AutoModel.from_pretrained('BAAI/bge-small-en-v1.5', resume_download=None)
|
||||
model.eval()
|
||||
return tokenizer, model
|
||||
|
||||
@lru_cache()
|
||||
def load_text_classifier():
|
||||
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
||||
from transformers import pipeline
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("dstefa/roberta-base_topic_classification_nyt_news")
|
||||
model = AutoModelForSequenceClassification.from_pretrained("dstefa/roberta-base_topic_classification_nyt_news")
|
||||
pipe = pipeline("text-classification", model=model, tokenizer=tokenizer)
|
||||
|
||||
return pipe
|
||||
|
||||
@lru_cache()
|
||||
def load_text_multilabel_classifier():
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
||||
import numpy as np
|
||||
from scipy.special import expit
|
||||
def get_available_memory(device):
|
||||
import torch
|
||||
|
||||
MODEL = "cardiffnlp/tweet-topic-21-multi"
|
||||
tokenizer = AutoTokenizer.from_pretrained(MODEL, resume_download=None)
|
||||
model = AutoModelForSequenceClassification.from_pretrained(MODEL, resume_download=None)
|
||||
class_mapping = model.config.id2label
|
||||
if device.type == "cuda":
|
||||
return torch.cuda.get_device_properties(device).total_memory
|
||||
elif device.type == "mps":
|
||||
return 48 * 1024**3 # Assuming 8GB for MPS, as a conservative estimate
|
||||
else:
|
||||
return 0
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def calculate_batch_size(device):
|
||||
available_memory = get_available_memory(device)
|
||||
|
||||
if device.type == "cpu":
|
||||
return 16
|
||||
elif device.type in ["cuda", "mps"]:
|
||||
# Adjust these thresholds based on your model size and available memory
|
||||
if available_memory >= 31 * 1024**3: # > 32GB
|
||||
return 256
|
||||
elif available_memory >= 15 * 1024**3: # > 16GB to 32GB
|
||||
return 128
|
||||
elif available_memory >= 8 * 1024**3: # 8GB to 16GB
|
||||
return 64
|
||||
else:
|
||||
return 32
|
||||
else:
|
||||
return 16 # Default batch size
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def get_device():
|
||||
import torch
|
||||
|
||||
# Check for available device: CUDA, MPS (for Apple Silicon), or CPU
|
||||
if torch.cuda.is_available():
|
||||
device = torch.device("cuda")
|
||||
elif torch.backends.mps.is_available():
|
||||
device = torch.device("mps")
|
||||
else:
|
||||
device = torch.device("cpu")
|
||||
return device
|
||||
|
||||
|
||||
def set_model_device(model):
|
||||
device = get_device()
|
||||
model.to(device)
|
||||
return model, device
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def get_home_folder():
|
||||
home_folder = os.path.join(
|
||||
os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()), ".crawl4ai"
|
||||
)
|
||||
os.makedirs(home_folder, exist_ok=True)
|
||||
os.makedirs(f"{home_folder}/cache", exist_ok=True)
|
||||
os.makedirs(f"{home_folder}/models", exist_ok=True)
|
||||
return home_folder
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def load_bert_base_uncased():
|
||||
from transformers import BertTokenizer, BertModel
|
||||
|
||||
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased", resume_download=None)
|
||||
model = BertModel.from_pretrained("bert-base-uncased", resume_download=None)
|
||||
model.eval()
|
||||
model, device = set_model_device(model)
|
||||
return tokenizer, model
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def load_HF_embedding_model(model_name="BAAI/bge-small-en-v1.5") -> tuple:
|
||||
"""Load the Hugging Face model for embedding.
|
||||
|
||||
Args:
|
||||
model_name (str, optional): The model name to load. Defaults to "BAAI/bge-small-en-v1.5".
|
||||
|
||||
Returns:
|
||||
tuple: The tokenizer and model.
|
||||
"""
|
||||
from transformers import AutoTokenizer, AutoModel
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_name, resume_download=None)
|
||||
model = AutoModel.from_pretrained(model_name, resume_download=None)
|
||||
model.eval()
|
||||
model, device = set_model_device(model)
|
||||
return tokenizer, model
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def load_text_classifier():
|
||||
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
||||
from transformers import pipeline
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
"dstefa/roberta-base_topic_classification_nyt_news"
|
||||
)
|
||||
model = AutoModelForSequenceClassification.from_pretrained(
|
||||
"dstefa/roberta-base_topic_classification_nyt_news"
|
||||
)
|
||||
model.eval()
|
||||
model, device = set_model_device(model)
|
||||
pipe = pipeline("text-classification", model=model, tokenizer=tokenizer)
|
||||
return pipe
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def load_text_multilabel_classifier():
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
||||
from scipy.special import expit
|
||||
import torch
|
||||
|
||||
# # Check for available device: CUDA, MPS (for Apple Silicon), or CPU
|
||||
# if torch.cuda.is_available():
|
||||
# device = torch.device("cuda")
|
||||
# elif torch.backends.mps.is_available():
|
||||
# device = torch.device("mps")
|
||||
# else:
|
||||
# device = torch.device("cpu")
|
||||
# # return load_spacy_model(), torch.device("cpu")
|
||||
|
||||
MODEL = "cardiffnlp/tweet-topic-21-multi"
|
||||
tokenizer = AutoTokenizer.from_pretrained(MODEL, resume_download=None)
|
||||
model = AutoModelForSequenceClassification.from_pretrained(
|
||||
MODEL, resume_download=None
|
||||
)
|
||||
model.eval()
|
||||
model, device = set_model_device(model)
|
||||
class_mapping = model.config.id2label
|
||||
|
||||
def _classifier(texts, threshold=0.5, max_length=64):
|
||||
tokens = tokenizer(texts, return_tensors='pt', padding=True, truncation=True, max_length=max_length)
|
||||
tokens = {key: val.to(device) for key, val in tokens.items()} # Move tokens to the selected device
|
||||
tokens = tokenizer(
|
||||
texts,
|
||||
return_tensors="pt",
|
||||
padding=True,
|
||||
truncation=True,
|
||||
max_length=max_length,
|
||||
)
|
||||
tokens = {
|
||||
key: val.to(device) for key, val in tokens.items()
|
||||
} # Move tokens to the selected device
|
||||
|
||||
with torch.no_grad():
|
||||
output = model(**tokens)
|
||||
@@ -73,21 +163,91 @@ def load_text_multilabel_classifier():
|
||||
|
||||
batch_labels = []
|
||||
for prediction in predictions:
|
||||
labels = [class_mapping[i] for i, value in enumerate(prediction) if value == 1]
|
||||
labels = [
|
||||
class_mapping[i] for i, value in enumerate(prediction) if value == 1
|
||||
]
|
||||
batch_labels.append(labels)
|
||||
|
||||
return batch_labels
|
||||
|
||||
return _classifier
|
||||
return _classifier, device
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def load_nltk_punkt():
|
||||
import nltk
|
||||
|
||||
try:
|
||||
nltk.data.find('tokenizers/punkt')
|
||||
nltk.data.find("tokenizers/punkt")
|
||||
except LookupError:
|
||||
nltk.download('punkt')
|
||||
return nltk.data.find('tokenizers/punkt')
|
||||
nltk.download("punkt")
|
||||
return nltk.data.find("tokenizers/punkt")
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def load_spacy_model():
|
||||
import spacy
|
||||
|
||||
name = "models/reuters"
|
||||
home_folder = get_home_folder()
|
||||
model_folder = Path(home_folder) / name
|
||||
|
||||
# Check if the model directory already exists
|
||||
if not (model_folder.exists() and any(model_folder.iterdir())):
|
||||
repo_url = "https://github.com/unclecode/crawl4ai.git"
|
||||
branch = MODEL_REPO_BRANCH
|
||||
repo_folder = Path(home_folder) / "crawl4ai"
|
||||
|
||||
print("[LOG] ⏬ Downloading Spacy model for the first time...")
|
||||
|
||||
# Remove existing repo folder if it exists
|
||||
if repo_folder.exists():
|
||||
try:
|
||||
shutil.rmtree(repo_folder)
|
||||
if model_folder.exists():
|
||||
shutil.rmtree(model_folder)
|
||||
except PermissionError:
|
||||
print(
|
||||
"[WARNING] Unable to remove existing folders. Please manually delete the following folders and try again:"
|
||||
)
|
||||
print(f"- {repo_folder}")
|
||||
print(f"- {model_folder}")
|
||||
return None
|
||||
|
||||
try:
|
||||
# Clone the repository
|
||||
subprocess.run(
|
||||
["git", "clone", "-b", branch, repo_url, str(repo_folder)],
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.DEVNULL,
|
||||
check=True,
|
||||
)
|
||||
|
||||
# Create the models directory if it doesn't exist
|
||||
models_folder = Path(home_folder) / "models"
|
||||
models_folder.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Copy the reuters model folder to the models directory
|
||||
source_folder = repo_folder / "models" / "reuters"
|
||||
shutil.copytree(source_folder, model_folder)
|
||||
|
||||
# Remove the cloned repository
|
||||
shutil.rmtree(repo_folder)
|
||||
|
||||
print("[LOG] ✅ Spacy Model downloaded successfully")
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(f"An error occurred while cloning the repository: {e}")
|
||||
return None
|
||||
except Exception as e:
|
||||
print(f"An error occurred: {e}")
|
||||
return None
|
||||
|
||||
try:
|
||||
return spacy.load(str(model_folder))
|
||||
except Exception as e:
|
||||
print(f"Error loading spacy model: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def download_all_models(remove_existing=False):
|
||||
"""Download all models required for Crawl4AI."""
|
||||
@@ -104,24 +264,33 @@ def download_all_models(remove_existing=False):
|
||||
print("[LOG] Existing models removed.")
|
||||
|
||||
# Load each model to trigger download
|
||||
print("[LOG] Downloading BERT Base Uncased...")
|
||||
load_bert_base_uncased()
|
||||
print("[LOG] Downloading BGE Small EN v1.5...")
|
||||
load_bge_small_en_v1_5()
|
||||
# print("[LOG] Downloading BERT Base Uncased...")
|
||||
# load_bert_base_uncased()
|
||||
# print("[LOG] Downloading BGE Small EN v1.5...")
|
||||
# load_bge_small_en_v1_5()
|
||||
# print("[LOG] Downloading ONNX model...")
|
||||
# load_onnx_all_MiniLM_l6_v2()
|
||||
print("[LOG] Downloading text classifier...")
|
||||
load_text_multilabel_classifier
|
||||
_, device = load_text_multilabel_classifier()
|
||||
print(f"[LOG] Text classifier loaded on {device}")
|
||||
print("[LOG] Downloading custom NLTK Punkt model...")
|
||||
load_nltk_punkt()
|
||||
print("[LOG] ✅ All models downloaded successfully.")
|
||||
|
||||
|
||||
def main():
|
||||
print("[LOG] Welcome to the Crawl4AI Model Downloader!")
|
||||
print("[LOG] This script will download all the models required for Crawl4AI.")
|
||||
parser = argparse.ArgumentParser(description="Crawl4AI Model Downloader")
|
||||
parser.add_argument('--remove-existing', action='store_true', help="Remove existing models before downloading")
|
||||
parser.add_argument(
|
||||
"--remove-existing",
|
||||
action="store_true",
|
||||
help="Remove existing models before downloading",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
download_all_models(remove_existing=args.remove_existing)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
main()
|
||||
|
||||
@@ -1,16 +1,371 @@
|
||||
from pydantic import BaseModel, HttpUrl
|
||||
from typing import List
|
||||
from pydantic import BaseModel, HttpUrl, PrivateAttr, Field
|
||||
from typing import List, Dict, Optional, Callable, Awaitable, Union, Any
|
||||
from typing import AsyncGenerator
|
||||
from typing import Generic, TypeVar
|
||||
from enum import Enum
|
||||
from dataclasses import dataclass
|
||||
from .ssl_certificate import SSLCertificate
|
||||
from datetime import datetime
|
||||
from datetime import timedelta
|
||||
|
||||
|
||||
###############################
|
||||
# Dispatcher Models
|
||||
###############################
|
||||
@dataclass
|
||||
class DomainState:
|
||||
last_request_time: float = 0
|
||||
current_delay: float = 0
|
||||
fail_count: int = 0
|
||||
|
||||
|
||||
@dataclass
|
||||
class CrawlerTaskResult:
|
||||
task_id: str
|
||||
url: str
|
||||
result: "CrawlResult"
|
||||
memory_usage: float
|
||||
peak_memory: float
|
||||
start_time: Union[datetime, float]
|
||||
end_time: Union[datetime, float]
|
||||
error_message: str = ""
|
||||
retry_count: int = 0
|
||||
wait_time: float = 0.0
|
||||
|
||||
@property
|
||||
def success(self) -> bool:
|
||||
return self.result.success
|
||||
|
||||
class CrawlStatus(Enum):
|
||||
QUEUED = "QUEUED"
|
||||
IN_PROGRESS = "IN_PROGRESS"
|
||||
COMPLETED = "COMPLETED"
|
||||
FAILED = "FAILED"
|
||||
|
||||
@dataclass
|
||||
class CrawlStats:
|
||||
task_id: str
|
||||
url: str
|
||||
status: CrawlStatus
|
||||
start_time: Optional[Union[datetime, float]] = None
|
||||
end_time: Optional[Union[datetime, float]] = None
|
||||
memory_usage: float = 0.0
|
||||
peak_memory: float = 0.0
|
||||
error_message: str = ""
|
||||
wait_time: float = 0.0
|
||||
retry_count: int = 0
|
||||
counted_requeue: bool = False
|
||||
|
||||
@property
|
||||
def duration(self) -> str:
|
||||
if not self.start_time:
|
||||
return "0:00"
|
||||
|
||||
# Convert start_time to datetime if it's a float
|
||||
start = self.start_time
|
||||
if isinstance(start, float):
|
||||
start = datetime.fromtimestamp(start)
|
||||
|
||||
# Get end time or use current time
|
||||
end = self.end_time or datetime.now()
|
||||
# Convert end_time to datetime if it's a float
|
||||
if isinstance(end, float):
|
||||
end = datetime.fromtimestamp(end)
|
||||
|
||||
duration = end - start
|
||||
return str(timedelta(seconds=int(duration.total_seconds())))
|
||||
|
||||
class DisplayMode(Enum):
|
||||
DETAILED = "DETAILED"
|
||||
AGGREGATED = "AGGREGATED"
|
||||
|
||||
|
||||
###############################
|
||||
# Crawler Models
|
||||
###############################
|
||||
@dataclass
|
||||
class TokenUsage:
|
||||
completion_tokens: int = 0
|
||||
prompt_tokens: int = 0
|
||||
total_tokens: int = 0
|
||||
completion_tokens_details: Optional[dict] = None
|
||||
prompt_tokens_details: Optional[dict] = None
|
||||
|
||||
class UrlModel(BaseModel):
|
||||
url: HttpUrl
|
||||
forced: bool = False
|
||||
|
||||
|
||||
|
||||
@dataclass
|
||||
class TraversalStats:
|
||||
"""Statistics for the traversal process"""
|
||||
|
||||
start_time: datetime = datetime.now()
|
||||
urls_processed: int = 0
|
||||
urls_failed: int = 0
|
||||
urls_skipped: int = 0
|
||||
total_depth_reached: int = 0
|
||||
current_depth: int = 0
|
||||
|
||||
class DispatchResult(BaseModel):
|
||||
task_id: str
|
||||
memory_usage: float
|
||||
peak_memory: float
|
||||
start_time: Union[datetime, float]
|
||||
end_time: Union[datetime, float]
|
||||
error_message: str = ""
|
||||
|
||||
class MarkdownGenerationResult(BaseModel):
|
||||
raw_markdown: str
|
||||
markdown_with_citations: str
|
||||
references_markdown: str
|
||||
fit_markdown: Optional[str] = None
|
||||
fit_html: Optional[str] = None
|
||||
|
||||
def __str__(self):
|
||||
return self.raw_markdown
|
||||
|
||||
class CrawlResult(BaseModel):
|
||||
url: str
|
||||
html: str
|
||||
fit_html: Optional[str] = None
|
||||
success: bool
|
||||
cleaned_html: str = None
|
||||
markdown: str = None
|
||||
extracted_content: str = None
|
||||
metadata: dict = None
|
||||
error_message: str = None
|
||||
cleaned_html: Optional[str] = None
|
||||
media: Dict[str, List[Dict]] = {}
|
||||
links: Dict[str, List[Dict]] = {}
|
||||
downloaded_files: Optional[List[str]] = None
|
||||
js_execution_result: Optional[Dict[str, Any]] = None
|
||||
screenshot: Optional[str] = None
|
||||
pdf: Optional[bytes] = None
|
||||
mhtml: Optional[str] = None
|
||||
_markdown: Optional[MarkdownGenerationResult] = PrivateAttr(default=None)
|
||||
extracted_content: Optional[str] = None
|
||||
metadata: Optional[dict] = None
|
||||
error_message: Optional[str] = None
|
||||
session_id: Optional[str] = None
|
||||
response_headers: Optional[dict] = None
|
||||
status_code: Optional[int] = None
|
||||
ssl_certificate: Optional[SSLCertificate] = None
|
||||
dispatch_result: Optional[DispatchResult] = None
|
||||
redirected_url: Optional[str] = None
|
||||
network_requests: Optional[List[Dict[str, Any]]] = None
|
||||
console_messages: Optional[List[Dict[str, Any]]] = None
|
||||
tables: List[Dict] = Field(default_factory=list) # NEW – [{headers,rows,caption,summary}]
|
||||
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
# NOTE: The StringCompatibleMarkdown class, custom __init__ method, property getters/setters,
|
||||
# and model_dump override all exist to support a smooth transition from markdown as a string
|
||||
# to markdown as a MarkdownGenerationResult object, while maintaining backward compatibility.
|
||||
#
|
||||
# This allows code that expects markdown to be a string to continue working, while also
|
||||
# providing access to the full MarkdownGenerationResult object's properties.
|
||||
#
|
||||
# The markdown_v2 property is deprecated and raises an error directing users to use markdown.
|
||||
#
|
||||
# When backward compatibility is no longer needed in future versions, this entire mechanism
|
||||
# can be simplified to a standard field with no custom accessors or serialization logic.
|
||||
|
||||
def __init__(self, **data):
|
||||
markdown_result = data.pop('markdown', None)
|
||||
super().__init__(**data)
|
||||
if markdown_result is not None:
|
||||
self._markdown = (
|
||||
MarkdownGenerationResult(**markdown_result)
|
||||
if isinstance(markdown_result, dict)
|
||||
else markdown_result
|
||||
)
|
||||
|
||||
@property
|
||||
def markdown(self):
|
||||
"""
|
||||
Property that returns a StringCompatibleMarkdown object that behaves like
|
||||
a string but also provides access to MarkdownGenerationResult attributes.
|
||||
|
||||
This approach allows backward compatibility with code that expects 'markdown'
|
||||
to be a string, while providing access to the full MarkdownGenerationResult.
|
||||
"""
|
||||
if self._markdown is None:
|
||||
return None
|
||||
return StringCompatibleMarkdown(self._markdown)
|
||||
|
||||
@markdown.setter
|
||||
def markdown(self, value):
|
||||
"""
|
||||
Setter for the markdown property.
|
||||
"""
|
||||
self._markdown = value
|
||||
|
||||
@property
|
||||
def markdown_v2(self):
|
||||
"""
|
||||
Deprecated property that raises an AttributeError when accessed.
|
||||
|
||||
This property exists to inform users that 'markdown_v2' has been
|
||||
deprecated and they should use 'markdown' instead.
|
||||
"""
|
||||
raise AttributeError(
|
||||
"The 'markdown_v2' attribute is deprecated and has been removed. "
|
||||
"""Please use 'markdown' instead, which now returns a MarkdownGenerationResult, with
|
||||
following properties:
|
||||
- raw_markdown: The raw markdown string
|
||||
- markdown_with_citations: The markdown string with citations
|
||||
- references_markdown: The markdown string with references
|
||||
- fit_markdown: The markdown string with fit text
|
||||
"""
|
||||
)
|
||||
|
||||
@property
|
||||
def fit_markdown(self):
|
||||
"""
|
||||
Deprecated property that raises an AttributeError when accessed.
|
||||
"""
|
||||
raise AttributeError(
|
||||
"The 'fit_markdown' attribute is deprecated and has been removed. "
|
||||
"Please use 'markdown.fit_markdown' instead."
|
||||
)
|
||||
|
||||
@property
|
||||
def fit_html(self):
|
||||
"""
|
||||
Deprecated property that raises an AttributeError when accessed.
|
||||
"""
|
||||
raise AttributeError(
|
||||
"The 'fit_html' attribute is deprecated and has been removed. "
|
||||
"Please use 'markdown.fit_html' instead."
|
||||
)
|
||||
|
||||
def model_dump(self, *args, **kwargs):
|
||||
"""
|
||||
Override model_dump to include the _markdown private attribute in serialization.
|
||||
|
||||
This override is necessary because:
|
||||
1. PrivateAttr fields are excluded from serialization by default
|
||||
2. We need to maintain backward compatibility by including the 'markdown' field
|
||||
in the serialized output
|
||||
3. We're transitioning from 'markdown_v2' to enhancing 'markdown' to hold
|
||||
the same type of data
|
||||
|
||||
Future developers: This method ensures that the markdown content is properly
|
||||
serialized despite being stored in a private attribute. If the serialization
|
||||
requirements change, this is where you would update the logic.
|
||||
"""
|
||||
result = super().model_dump(*args, **kwargs)
|
||||
if self._markdown is not None:
|
||||
result["markdown"] = self._markdown.model_dump()
|
||||
return result
|
||||
|
||||
class StringCompatibleMarkdown(str):
|
||||
"""A string subclass that also provides access to MarkdownGenerationResult attributes"""
|
||||
def __new__(cls, markdown_result):
|
||||
return super().__new__(cls, markdown_result.raw_markdown)
|
||||
|
||||
def __init__(self, markdown_result):
|
||||
self._markdown_result = markdown_result
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self._markdown_result, name)
|
||||
|
||||
CrawlResultT = TypeVar('CrawlResultT', bound=CrawlResult)
|
||||
|
||||
class CrawlResultContainer(Generic[CrawlResultT]):
|
||||
def __init__(self, results: Union[CrawlResultT, List[CrawlResultT]]):
|
||||
# Normalize to a list
|
||||
if isinstance(results, list):
|
||||
self._results = results
|
||||
else:
|
||||
self._results = [results]
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self._results)
|
||||
|
||||
def __getitem__(self, index):
|
||||
return self._results[index]
|
||||
|
||||
def __len__(self):
|
||||
return len(self._results)
|
||||
|
||||
def __getattr__(self, attr):
|
||||
# Delegate attribute access to the first element.
|
||||
if self._results:
|
||||
return getattr(self._results[0], attr)
|
||||
raise AttributeError(f"{self.__class__.__name__} object has no attribute '{attr}'")
|
||||
|
||||
def __repr__(self):
|
||||
return f"{self.__class__.__name__}({self._results!r})"
|
||||
|
||||
RunManyReturn = Union[
|
||||
CrawlResultContainer[CrawlResultT],
|
||||
AsyncGenerator[CrawlResultT, None]
|
||||
]
|
||||
|
||||
|
||||
# END of backward compatibility code for markdown/markdown_v2.
|
||||
# When removing this code in the future, make sure to:
|
||||
# 1. Replace the private attribute and property with a standard field
|
||||
# 2. Update any serialization logic that might depend on the current behavior
|
||||
|
||||
class AsyncCrawlResponse(BaseModel):
|
||||
html: str
|
||||
response_headers: Dict[str, str]
|
||||
js_execution_result: Optional[Dict[str, Any]] = None
|
||||
status_code: int
|
||||
screenshot: Optional[str] = None
|
||||
pdf_data: Optional[bytes] = None
|
||||
mhtml_data: Optional[str] = None
|
||||
get_delayed_content: Optional[Callable[[Optional[float]], Awaitable[str]]] = None
|
||||
downloaded_files: Optional[List[str]] = None
|
||||
ssl_certificate: Optional[SSLCertificate] = None
|
||||
redirected_url: Optional[str] = None
|
||||
network_requests: Optional[List[Dict[str, Any]]] = None
|
||||
console_messages: Optional[List[Dict[str, Any]]] = None
|
||||
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
###############################
|
||||
# Scraping Models
|
||||
###############################
|
||||
class MediaItem(BaseModel):
|
||||
src: Optional[str] = ""
|
||||
data: Optional[str] = ""
|
||||
alt: Optional[str] = ""
|
||||
desc: Optional[str] = ""
|
||||
score: Optional[int] = 0
|
||||
type: str = "image"
|
||||
group_id: Optional[int] = 0
|
||||
format: Optional[str] = None
|
||||
width: Optional[int] = None
|
||||
|
||||
|
||||
class Link(BaseModel):
|
||||
href: Optional[str] = ""
|
||||
text: Optional[str] = ""
|
||||
title: Optional[str] = ""
|
||||
base_domain: Optional[str] = ""
|
||||
|
||||
|
||||
class Media(BaseModel):
|
||||
images: List[MediaItem] = []
|
||||
videos: List[
|
||||
MediaItem
|
||||
] = [] # Using MediaItem model for now, can be extended with Video model if needed
|
||||
audios: List[
|
||||
MediaItem
|
||||
] = [] # Using MediaItem model for now, can be extended with Audio model if needed
|
||||
tables: List[Dict] = [] # Table data extracted from HTML tables
|
||||
|
||||
|
||||
class Links(BaseModel):
|
||||
internal: List[Link] = []
|
||||
external: List[Link] = []
|
||||
|
||||
|
||||
class ScrapingResult(BaseModel):
|
||||
cleaned_html: str
|
||||
success: bool
|
||||
media: Media = Media()
|
||||
links: Links = Links()
|
||||
metadata: Dict[str, Any] = {}
|
||||
|
||||
195
crawl4ai/processors/pdf/__init__.py
Normal file
195
crawl4ai/processors/pdf/__init__.py
Normal file
@@ -0,0 +1,195 @@
|
||||
from pathlib import Path
|
||||
import asyncio
|
||||
from dataclasses import asdict
|
||||
from crawl4ai.async_logger import AsyncLogger
|
||||
from crawl4ai.async_crawler_strategy import AsyncCrawlerStrategy
|
||||
from crawl4ai.models import AsyncCrawlResponse, ScrapingResult
|
||||
from crawl4ai.content_scraping_strategy import ContentScrapingStrategy
|
||||
from .processor import NaivePDFProcessorStrategy # Assuming your current PDF code is in pdf_processor.py
|
||||
|
||||
class PDFCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
def __init__(self, logger: AsyncLogger = None):
|
||||
self.logger = logger
|
||||
|
||||
async def crawl(self, url: str, **kwargs) -> AsyncCrawlResponse:
|
||||
# Just pass through with empty HTML - scraper will handle actual processing
|
||||
return AsyncCrawlResponse(
|
||||
html="Scraper will handle the real work", # Scraper will handle the real work
|
||||
response_headers={"Content-Type": "application/pdf"},
|
||||
status_code=200
|
||||
)
|
||||
|
||||
async def close(self):
|
||||
pass
|
||||
|
||||
async def __aenter__(self):
|
||||
return self
|
||||
|
||||
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
||||
await self.close()
|
||||
|
||||
class PDFContentScrapingStrategy(ContentScrapingStrategy):
|
||||
"""
|
||||
A content scraping strategy for PDF files.
|
||||
|
||||
Attributes:
|
||||
save_images_locally (bool): Whether to save images locally.
|
||||
extract_images (bool): Whether to extract images from PDF.
|
||||
image_save_dir (str): Directory to save extracted images.
|
||||
logger (AsyncLogger): Logger instance for recording events and errors.
|
||||
|
||||
Methods:
|
||||
scrap(url: str, html: str, **params) -> ScrapingResult:
|
||||
Scrap content from a PDF file.
|
||||
ascrap(url: str, html: str, **kwargs) -> ScrapingResult:
|
||||
Asynchronous version of scrap.
|
||||
|
||||
Usage:
|
||||
strategy = PDFContentScrapingStrategy(
|
||||
save_images_locally=False,
|
||||
extract_images=False,
|
||||
image_save_dir=None,
|
||||
logger=logger
|
||||
)
|
||||
|
||||
"""
|
||||
def __init__(self,
|
||||
save_images_locally : bool = False,
|
||||
extract_images : bool = False,
|
||||
image_save_dir : str = None,
|
||||
batch_size: int = 4,
|
||||
logger: AsyncLogger = None):
|
||||
self.logger = logger
|
||||
self.pdf_processor = NaivePDFProcessorStrategy(
|
||||
save_images_locally=save_images_locally,
|
||||
extract_images=extract_images,
|
||||
image_save_dir=image_save_dir,
|
||||
batch_size=batch_size
|
||||
)
|
||||
self._temp_files = [] # Track temp files for cleanup
|
||||
|
||||
def scrap(self, url: str, html: str, **params) -> ScrapingResult:
|
||||
"""
|
||||
Scrap content from a PDF file.
|
||||
|
||||
Args:
|
||||
url (str): The URL of the PDF file.
|
||||
html (str): The HTML content of the page.
|
||||
**params: Additional parameters.
|
||||
|
||||
Returns:
|
||||
ScrapingResult: The scraped content.
|
||||
"""
|
||||
# Download if URL or use local path
|
||||
pdf_path = self._get_pdf_path(url)
|
||||
try:
|
||||
# Process PDF
|
||||
# result = self.pdf_processor.process(Path(pdf_path))
|
||||
result = self.pdf_processor.process_batch(Path(pdf_path))
|
||||
|
||||
# Combine page HTML
|
||||
cleaned_html = f"""
|
||||
<html>
|
||||
<head><meta name="pdf-pages" content="{len(result.pages)}"></head>
|
||||
<body>
|
||||
{''.join(f'<div class="pdf-page" data-page="{i+1}">{page.html}</div>'
|
||||
for i, page in enumerate(result.pages))}
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
|
||||
# Accumulate media and links with page numbers
|
||||
media = {"images": []}
|
||||
links = {"urls": []}
|
||||
|
||||
for page in result.pages:
|
||||
# Add page number to each image
|
||||
for img in page.images:
|
||||
img["page"] = page.page_number
|
||||
media["images"].append(img)
|
||||
|
||||
# Add page number to each link
|
||||
for link in page.links:
|
||||
links["urls"].append({
|
||||
"url": link,
|
||||
"page": page.page_number
|
||||
})
|
||||
|
||||
return ScrapingResult(
|
||||
cleaned_html=cleaned_html,
|
||||
success=True,
|
||||
media=media,
|
||||
links=links,
|
||||
metadata=asdict(result.metadata)
|
||||
)
|
||||
finally:
|
||||
# Cleanup temp file if downloaded
|
||||
if url.startswith(("http://", "https://")):
|
||||
try:
|
||||
Path(pdf_path).unlink(missing_ok=True)
|
||||
if pdf_path in self._temp_files:
|
||||
self._temp_files.remove(pdf_path)
|
||||
except Exception as e:
|
||||
if self.logger:
|
||||
self.logger.warning(f"Failed to cleanup temp file {pdf_path}: {e}")
|
||||
|
||||
async def ascrap(self, url: str, html: str, **kwargs) -> ScrapingResult:
|
||||
# For simple cases, you can use the sync version
|
||||
return await asyncio.to_thread(self.scrap, url, html, **kwargs)
|
||||
|
||||
|
||||
def _get_pdf_path(self, url: str) -> str:
|
||||
if url.startswith(("http://", "https://")):
|
||||
import tempfile
|
||||
import requests
|
||||
|
||||
# Create temp file with .pdf extension
|
||||
temp_file = tempfile.NamedTemporaryFile(suffix='.pdf', delete=False)
|
||||
self._temp_files.append(temp_file.name)
|
||||
|
||||
try:
|
||||
if self.logger:
|
||||
self.logger.info(f"Downloading PDF from {url}...")
|
||||
|
||||
# Download PDF with streaming and timeout
|
||||
# Connection timeout: 10s, Read timeout: 300s (5 minutes for large PDFs)
|
||||
response = requests.get(url, stream=True, timeout=(20, 60 * 10))
|
||||
response.raise_for_status()
|
||||
|
||||
# Get file size if available
|
||||
total_size = int(response.headers.get('content-length', 0))
|
||||
downloaded = 0
|
||||
|
||||
# Write to temp file
|
||||
with open(temp_file.name, 'wb') as f:
|
||||
for chunk in response.iter_content(chunk_size=8192):
|
||||
f.write(chunk)
|
||||
downloaded += len(chunk)
|
||||
if self.logger and total_size > 0:
|
||||
progress = (downloaded / total_size) * 100
|
||||
if progress % 10 < 0.1: # Log every 10%
|
||||
self.logger.debug(f"PDF download progress: {progress:.0f}%")
|
||||
|
||||
if self.logger:
|
||||
self.logger.info(f"PDF downloaded successfully: {temp_file.name}")
|
||||
|
||||
return temp_file.name
|
||||
|
||||
except requests.exceptions.Timeout as e:
|
||||
# Clean up temp file if download fails
|
||||
Path(temp_file.name).unlink(missing_ok=True)
|
||||
self._temp_files.remove(temp_file.name)
|
||||
raise RuntimeError(f"Timeout downloading PDF from {url}: {str(e)}")
|
||||
except Exception as e:
|
||||
# Clean up temp file if download fails
|
||||
Path(temp_file.name).unlink(missing_ok=True)
|
||||
self._temp_files.remove(temp_file.name)
|
||||
raise RuntimeError(f"Failed to download PDF from {url}: {str(e)}")
|
||||
|
||||
elif url.startswith("file://"):
|
||||
return url[7:] # Strip file:// prefix
|
||||
|
||||
return url # Assume local path
|
||||
|
||||
|
||||
__all__ = ["PDFCrawlerStrategy", "PDFContentScrapingStrategy"]
|
||||
487
crawl4ai/processors/pdf/processor.py
Normal file
487
crawl4ai/processors/pdf/processor.py
Normal file
@@ -0,0 +1,487 @@
|
||||
import logging
|
||||
import re
|
||||
from abc import ABC, abstractmethod
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from time import time
|
||||
from dataclasses import dataclass, asdict, field
|
||||
from typing import Dict, List, Optional, Any, Union
|
||||
import base64
|
||||
import tempfile
|
||||
from .utils import *
|
||||
from .utils import (
|
||||
apply_png_predictor,
|
||||
clean_pdf_text,
|
||||
clean_pdf_text_to_html,
|
||||
)
|
||||
|
||||
# Remove direct PyPDF2 imports from the top
|
||||
# import PyPDF2
|
||||
# from PyPDF2 import PdfReader
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@dataclass
|
||||
class PDFMetadata:
|
||||
title: Optional[str] = None
|
||||
author: Optional[str] = None
|
||||
producer: Optional[str] = None
|
||||
created: Optional[datetime] = None
|
||||
modified: Optional[datetime] = None
|
||||
pages: int = 0
|
||||
encrypted: bool = False
|
||||
file_size: Optional[int] = None
|
||||
|
||||
@dataclass
|
||||
class PDFPage:
|
||||
page_number: int
|
||||
raw_text: str = ""
|
||||
markdown: str = ""
|
||||
html: str = ""
|
||||
images: List[Dict] = field(default_factory=list)
|
||||
links: List[str] = field(default_factory=list)
|
||||
layout: List[Dict] = field(default_factory=list)
|
||||
|
||||
@dataclass
|
||||
class PDFProcessResult:
|
||||
metadata: PDFMetadata
|
||||
pages: List[PDFPage]
|
||||
processing_time: float = 0.0
|
||||
version: str = "1.0"
|
||||
|
||||
class PDFProcessorStrategy(ABC):
|
||||
@abstractmethod
|
||||
def process(self, pdf_path: Path) -> PDFProcessResult:
|
||||
pass
|
||||
|
||||
class NaivePDFProcessorStrategy(PDFProcessorStrategy):
|
||||
def __init__(self, image_dpi: int = 144, image_quality: int = 85, extract_images: bool = True,
|
||||
save_images_locally: bool = False, image_save_dir: Optional[Path] = None, batch_size: int = 4):
|
||||
# Import check at initialization time
|
||||
try:
|
||||
import PyPDF2
|
||||
except ImportError:
|
||||
raise ImportError("PyPDF2 is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
|
||||
|
||||
self.image_dpi = image_dpi
|
||||
self.image_quality = image_quality
|
||||
self.current_page_number = 0
|
||||
self.extract_images = extract_images
|
||||
self.save_images_locally = save_images_locally
|
||||
self.image_save_dir = image_save_dir
|
||||
self.batch_size = batch_size
|
||||
self._temp_dir = None
|
||||
|
||||
def process(self, pdf_path: Path) -> PDFProcessResult:
|
||||
# Import inside method to allow dependency to be optional
|
||||
try:
|
||||
from PyPDF2 import PdfReader
|
||||
except ImportError:
|
||||
raise ImportError("PyPDF2 is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
|
||||
|
||||
start_time = time()
|
||||
result = PDFProcessResult(
|
||||
metadata=PDFMetadata(),
|
||||
pages=[],
|
||||
version="1.1"
|
||||
)
|
||||
|
||||
try:
|
||||
with pdf_path.open('rb') as file:
|
||||
reader = PdfReader(file)
|
||||
result.metadata = self._extract_metadata(pdf_path, reader)
|
||||
|
||||
# Handle image directory
|
||||
image_dir = None
|
||||
if self.extract_images and self.save_images_locally:
|
||||
if self.image_save_dir:
|
||||
image_dir = Path(self.image_save_dir)
|
||||
image_dir.mkdir(exist_ok=True, parents=True)
|
||||
else:
|
||||
self._temp_dir = tempfile.mkdtemp(prefix='pdf_images_')
|
||||
image_dir = Path(self._temp_dir)
|
||||
|
||||
for page_num, page in enumerate(reader.pages):
|
||||
self.current_page_number = page_num + 1
|
||||
pdf_page = self._process_page(page, image_dir)
|
||||
result.pages.append(pdf_page)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to process PDF: {str(e)}")
|
||||
raise
|
||||
finally:
|
||||
# Cleanup temp directory if it was created
|
||||
if self._temp_dir and not self.image_save_dir:
|
||||
import shutil
|
||||
try:
|
||||
shutil.rmtree(self._temp_dir)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to cleanup temp directory: {str(e)}")
|
||||
|
||||
result.processing_time = time() - start_time
|
||||
return result
|
||||
|
||||
def process_batch(self, pdf_path: Path) -> PDFProcessResult:
|
||||
"""Like process() but processes PDF pages in parallel batches"""
|
||||
# Import inside method to allow dependency to be optional
|
||||
try:
|
||||
from PyPDF2 import PdfReader
|
||||
import PyPDF2 # For type checking
|
||||
except ImportError:
|
||||
raise ImportError("PyPDF2 is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
|
||||
|
||||
import concurrent.futures
|
||||
import threading
|
||||
|
||||
# Initialize PyPDF2 thread support
|
||||
if not hasattr(threading.current_thread(), "_children"):
|
||||
threading.current_thread()._children = set()
|
||||
|
||||
start_time = time()
|
||||
result = PDFProcessResult(
|
||||
metadata=PDFMetadata(),
|
||||
pages=[],
|
||||
version="1.1"
|
||||
)
|
||||
|
||||
try:
|
||||
# Get metadata and page count from main thread
|
||||
with pdf_path.open('rb') as file:
|
||||
reader = PdfReader(file)
|
||||
result.metadata = self._extract_metadata(pdf_path, reader)
|
||||
total_pages = len(reader.pages)
|
||||
|
||||
# Handle image directory setup
|
||||
image_dir = None
|
||||
if self.extract_images and self.save_images_locally:
|
||||
if self.image_save_dir:
|
||||
image_dir = Path(self.image_save_dir)
|
||||
image_dir.mkdir(exist_ok=True, parents=True)
|
||||
else:
|
||||
self._temp_dir = tempfile.mkdtemp(prefix='pdf_images_')
|
||||
image_dir = Path(self._temp_dir)
|
||||
|
||||
def process_page_safely(page_num: int):
|
||||
# Each thread opens its own file handle
|
||||
with pdf_path.open('rb') as file:
|
||||
thread_reader = PdfReader(file)
|
||||
page = thread_reader.pages[page_num]
|
||||
self.current_page_number = page_num + 1
|
||||
return self._process_page(page, image_dir)
|
||||
|
||||
# Process pages in parallel batches
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=self.batch_size) as executor:
|
||||
futures = []
|
||||
for page_num in range(total_pages):
|
||||
future = executor.submit(process_page_safely, page_num)
|
||||
futures.append((page_num + 1, future))
|
||||
|
||||
# Collect results in order
|
||||
result.pages = [None] * total_pages
|
||||
for page_num, future in futures:
|
||||
try:
|
||||
pdf_page = future.result()
|
||||
result.pages[page_num - 1] = pdf_page
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to process page {page_num}: {str(e)}")
|
||||
raise
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to process PDF: {str(e)}")
|
||||
raise
|
||||
finally:
|
||||
# Cleanup temp directory if it was created
|
||||
if self._temp_dir and not self.image_save_dir:
|
||||
import shutil
|
||||
try:
|
||||
shutil.rmtree(self._temp_dir)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to cleanup temp directory: {str(e)}")
|
||||
|
||||
result.processing_time = time() - start_time
|
||||
return result
|
||||
|
||||
def _process_page(self, page, image_dir: Optional[Path]) -> PDFPage:
|
||||
pdf_page = PDFPage(
|
||||
page_number=self.current_page_number,
|
||||
)
|
||||
|
||||
# Text and font extraction
|
||||
def visitor_text(text, cm, tm, font_dict, font_size):
|
||||
pdf_page.raw_text += text
|
||||
pdf_page.layout.append({
|
||||
"type": "text",
|
||||
"text": text,
|
||||
"x": tm[4],
|
||||
"y": tm[5],
|
||||
})
|
||||
|
||||
page.extract_text(visitor_text=visitor_text)
|
||||
|
||||
# Image extraction
|
||||
if self.extract_images:
|
||||
pdf_page.images = self._extract_images(page, image_dir)
|
||||
|
||||
# Link extraction
|
||||
pdf_page.links = self._extract_links(page)
|
||||
|
||||
# Add markdown content
|
||||
pdf_page.markdown = clean_pdf_text(self.current_page_number, pdf_page.raw_text)
|
||||
pdf_page.html = clean_pdf_text_to_html(self.current_page_number, pdf_page.raw_text)
|
||||
|
||||
return pdf_page
|
||||
|
||||
def _extract_images(self, page, image_dir: Optional[Path]) -> List[Dict]:
|
||||
# Import PyPDF2 for type checking only when needed
|
||||
try:
|
||||
import PyPDF2
|
||||
except ImportError:
|
||||
raise ImportError("PyPDF2 is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
|
||||
|
||||
if not self.extract_images:
|
||||
return []
|
||||
|
||||
images = []
|
||||
try:
|
||||
resources = page.get("/Resources")
|
||||
if resources: # Check if resources exist
|
||||
resources = resources.get_object() # Resolve IndirectObject
|
||||
if '/XObject' in resources:
|
||||
xobjects = resources['/XObject'].get_object()
|
||||
img_count = 0
|
||||
for obj_name in xobjects:
|
||||
xobj = xobjects[obj_name]
|
||||
if hasattr(xobj, 'get_object') and callable(xobj.get_object):
|
||||
xobj = xobj.get_object()
|
||||
if xobj.get('/Subtype') == '/Image':
|
||||
try:
|
||||
img_count += 1
|
||||
img_filename = f"page_{self.current_page_number}_img_{img_count}"
|
||||
data = xobj.get_data()
|
||||
filters = xobj.get('/Filter', [])
|
||||
if not isinstance(filters, list):
|
||||
filters = [filters]
|
||||
|
||||
# Resolve IndirectObjects in properties
|
||||
width = xobj.get('/Width', 0)
|
||||
height = xobj.get('/Height', 0)
|
||||
color_space = xobj.get('/ColorSpace', '/DeviceRGB')
|
||||
if isinstance(color_space, PyPDF2.generic.IndirectObject):
|
||||
color_space = color_space.get_object()
|
||||
|
||||
# Handle different image encodings
|
||||
success = False
|
||||
image_format = 'bin'
|
||||
image_data = None
|
||||
|
||||
if '/FlateDecode' in filters:
|
||||
try:
|
||||
decode_parms = xobj.get('/DecodeParms', {})
|
||||
if isinstance(decode_parms, PyPDF2.generic.IndirectObject):
|
||||
decode_parms = decode_parms.get_object()
|
||||
|
||||
predictor = decode_parms.get('/Predictor', 1)
|
||||
bits = xobj.get('/BitsPerComponent', 8)
|
||||
colors = 3 if color_space == '/DeviceRGB' else 1
|
||||
|
||||
if predictor >= 10:
|
||||
data = apply_png_predictor(data, width, bits, colors)
|
||||
|
||||
# Create PIL Image
|
||||
from PIL import Image
|
||||
mode = 'RGB' if color_space == '/DeviceRGB' else 'L'
|
||||
img = Image.frombytes(mode, (width, height), data)
|
||||
|
||||
if self.save_images_locally:
|
||||
final_path = (image_dir / img_filename).with_suffix('.png')
|
||||
img.save(final_path)
|
||||
image_data = str(final_path)
|
||||
else:
|
||||
import io
|
||||
img_byte_arr = io.BytesIO()
|
||||
img.save(img_byte_arr, format='PNG')
|
||||
image_data = base64.b64encode(img_byte_arr.getvalue()).decode('utf-8')
|
||||
|
||||
success = True
|
||||
image_format = 'png'
|
||||
except Exception as e:
|
||||
logger.error(f"FlateDecode error: {str(e)}")
|
||||
|
||||
elif '/DCTDecode' in filters:
|
||||
# JPEG image
|
||||
try:
|
||||
if self.save_images_locally:
|
||||
final_path = (image_dir / img_filename).with_suffix('.jpg')
|
||||
with open(final_path, 'wb') as f:
|
||||
f.write(data)
|
||||
image_data = str(final_path)
|
||||
else:
|
||||
image_data = base64.b64encode(data).decode('utf-8')
|
||||
success = True
|
||||
image_format = 'jpeg'
|
||||
except Exception as e:
|
||||
logger.error(f"JPEG save error: {str(e)}")
|
||||
|
||||
elif '/CCITTFaxDecode' in filters:
|
||||
try:
|
||||
if data[:4] != b'II*\x00':
|
||||
# Add TIFF header if missing
|
||||
tiff_header = b'II*\x00\x08\x00\x00\x00\x0e\x00\x00\x01\x03\x00\x01\x00\x00\x00' + \
|
||||
width.to_bytes(4, 'little') + \
|
||||
b'\x01\x03\x00\x01\x00\x00\x00' + \
|
||||
height.to_bytes(4, 'little') + \
|
||||
b'\x01\x12\x00\x03\x00\x00\x00\x01\x00\x01\x00\x00\x01\x17\x00\x04\x00\x00\x00\x01\x00\x00\x00J\x01\x1B\x00\x05\x00\x00\x00\x01\x00\x00\x00R\x01\x28\x00\x03\x00\x00\x00\x01\x00\x02\x00\x00'
|
||||
data = tiff_header + data
|
||||
|
||||
if self.save_images_locally:
|
||||
final_path = (image_dir / img_filename).with_suffix('.tiff')
|
||||
with open(final_path, 'wb') as f:
|
||||
f.write(data)
|
||||
image_data = str(final_path)
|
||||
else:
|
||||
image_data = base64.b64encode(data).decode('utf-8')
|
||||
success = True
|
||||
image_format = 'tiff'
|
||||
except Exception as e:
|
||||
logger.error(f"CCITT save error: {str(e)}")
|
||||
|
||||
elif '/JPXDecode' in filters:
|
||||
# JPEG 2000
|
||||
try:
|
||||
if self.save_images_locally:
|
||||
final_path = (image_dir / img_filename).with_suffix('.jp2')
|
||||
with open(final_path, 'wb') as f:
|
||||
f.write(data)
|
||||
image_data = str(final_path)
|
||||
else:
|
||||
image_data = base64.b64encode(data).decode('utf-8')
|
||||
success = True
|
||||
image_format = 'jpeg2000'
|
||||
except Exception as e:
|
||||
logger.error(f"JPEG2000 save error: {str(e)}")
|
||||
|
||||
if success and image_data:
|
||||
image_info = {
|
||||
"format": image_format,
|
||||
"width": width,
|
||||
"height": height,
|
||||
"color_space": str(color_space),
|
||||
"bits_per_component": xobj.get('/BitsPerComponent', 1)
|
||||
}
|
||||
|
||||
if self.save_images_locally:
|
||||
image_info["path"] = image_data
|
||||
else:
|
||||
image_info["data"] = image_data
|
||||
|
||||
images.append(image_info)
|
||||
else:
|
||||
# Fallback: Save raw data
|
||||
if self.save_images_locally:
|
||||
final_path = (image_dir / img_filename).with_suffix('.bin')
|
||||
with open(final_path, 'wb') as f:
|
||||
f.write(data)
|
||||
logger.warning(f"Saved raw image data to {final_path}")
|
||||
else:
|
||||
image_data = base64.b64encode(data).decode('utf-8')
|
||||
images.append({
|
||||
"format": "bin",
|
||||
"width": width,
|
||||
"height": height,
|
||||
"color_space": str(color_space),
|
||||
"bits_per_component": xobj.get('/BitsPerComponent', 1),
|
||||
"data": image_data
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing image: {str(e)}")
|
||||
except Exception as e:
|
||||
logger.error(f"Image extraction error: {str(e)}")
|
||||
|
||||
return images
|
||||
|
||||
def _extract_links(self, page) -> List[str]:
|
||||
links = []
|
||||
if '/Annots' in page:
|
||||
try:
|
||||
for annot in page['/Annots']:
|
||||
a = annot.get_object()
|
||||
if '/A' in a and '/URI' in a['/A']:
|
||||
links.append(a['/A']['/URI'])
|
||||
except Exception as e:
|
||||
print(f"Link error: {str(e)}")
|
||||
return links
|
||||
|
||||
def _extract_metadata(self, pdf_path: Path, reader = None) -> PDFMetadata:
|
||||
# Import inside method to allow dependency to be optional
|
||||
if reader is None:
|
||||
try:
|
||||
from PyPDF2 import PdfReader
|
||||
reader = PdfReader(pdf_path)
|
||||
except ImportError:
|
||||
raise ImportError("PyPDF2 is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
|
||||
|
||||
meta = reader.metadata or {}
|
||||
created = self._parse_pdf_date(meta.get('/CreationDate', ''))
|
||||
modified = self._parse_pdf_date(meta.get('/ModDate', ''))
|
||||
|
||||
return PDFMetadata(
|
||||
title=meta.get('/Title'),
|
||||
author=meta.get('/Author'),
|
||||
producer=meta.get('/Producer'),
|
||||
created=created,
|
||||
modified=modified,
|
||||
pages=len(reader.pages),
|
||||
encrypted=reader.is_encrypted,
|
||||
file_size=pdf_path.stat().st_size
|
||||
)
|
||||
|
||||
def _parse_pdf_date(self, date_str: str) -> Optional[datetime]:
|
||||
try:
|
||||
match = re.match(r'D:(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})', date_str)
|
||||
if not match:
|
||||
return None
|
||||
|
||||
return datetime(
|
||||
year=int(match[1]),
|
||||
month=int(match[2]),
|
||||
day=int(match[3]),
|
||||
hour=int(match[4]),
|
||||
minute=int(match[5]),
|
||||
second=int(match[6])
|
||||
)
|
||||
except:
|
||||
return None
|
||||
|
||||
# Usage example
|
||||
if __name__ == "__main__":
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
try:
|
||||
# Import PyPDF2 only when running the file directly
|
||||
import PyPDF2
|
||||
from PyPDF2 import PdfReader
|
||||
except ImportError:
|
||||
print("PyPDF2 is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
|
||||
exit(1)
|
||||
|
||||
current_dir = Path(__file__).resolve().parent
|
||||
pdf_path = f'{current_dir}/test.pdf'
|
||||
|
||||
strategy = NaivePDFProcessorStrategy()
|
||||
result = strategy.process(Path(pdf_path))
|
||||
|
||||
# Convert to JSON
|
||||
json_output = asdict(result)
|
||||
print(json.dumps(json_output, indent=2, default=str))
|
||||
|
||||
with open(f'{current_dir}/test.html', 'w') as f:
|
||||
for page in result.pages:
|
||||
f.write(f'<h1>Page {page["page_number"]}</h1>')
|
||||
f.write(page['html'])
|
||||
with open(f'{current_dir}/test.md', 'w') as f:
|
||||
for page in result.pages:
|
||||
f.write(f'# Page {page["page_number"]}\n\n')
|
||||
f.write(clean_pdf_text(page["page_number"], page['raw_text']))
|
||||
f.write('\n\n')
|
||||
350
crawl4ai/processors/pdf/utils.py
Normal file
350
crawl4ai/processors/pdf/utils.py
Normal file
@@ -0,0 +1,350 @@
|
||||
import re
|
||||
|
||||
def apply_png_predictor(data, width, bits, color_channels):
|
||||
"""Decode PNG predictor (PDF 1.5+ filter)"""
|
||||
bytes_per_pixel = (bits * color_channels) // 8
|
||||
if (bits * color_channels) % 8 != 0:
|
||||
bytes_per_pixel += 1
|
||||
|
||||
stride = width * bytes_per_pixel
|
||||
scanline_length = stride + 1 # +1 for filter byte
|
||||
|
||||
if len(data) % scanline_length != 0:
|
||||
raise ValueError("Invalid scanline structure")
|
||||
|
||||
num_lines = len(data) // scanline_length
|
||||
output = bytearray()
|
||||
prev_line = b'\x00' * stride
|
||||
|
||||
for i in range(num_lines):
|
||||
line = data[i*scanline_length:(i+1)*scanline_length]
|
||||
filter_type = line[0]
|
||||
filtered = line[1:]
|
||||
|
||||
if filter_type == 0: # None
|
||||
decoded = filtered
|
||||
elif filter_type == 1: # Sub
|
||||
decoded = bytearray(filtered)
|
||||
for j in range(bytes_per_pixel, len(decoded)):
|
||||
decoded[j] = (decoded[j] + decoded[j - bytes_per_pixel]) % 256
|
||||
elif filter_type == 2: # Up
|
||||
decoded = bytearray([(filtered[j] + prev_line[j]) % 256
|
||||
for j in range(len(filtered))])
|
||||
elif filter_type == 3: # Average
|
||||
decoded = bytearray(filtered)
|
||||
for j in range(len(decoded)):
|
||||
left = decoded[j - bytes_per_pixel] if j >= bytes_per_pixel else 0
|
||||
up = prev_line[j]
|
||||
avg = (left + up) // 2
|
||||
decoded[j] = (decoded[j] + avg) % 256
|
||||
elif filter_type == 4: # Paeth
|
||||
decoded = bytearray(filtered)
|
||||
for j in range(len(decoded)):
|
||||
left = decoded[j - bytes_per_pixel] if j >= bytes_per_pixel else 0
|
||||
up = prev_line[j]
|
||||
up_left = prev_line[j - bytes_per_pixel] if j >= bytes_per_pixel else 0
|
||||
paeth = paeth_predictor(left, up, up_left)
|
||||
decoded[j] = (decoded[j] + paeth) % 256
|
||||
else:
|
||||
raise ValueError(f"Unsupported filter type: {filter_type}")
|
||||
|
||||
output.extend(decoded)
|
||||
prev_line = decoded
|
||||
|
||||
return bytes(output)
|
||||
|
||||
def paeth_predictor(a, b, c):
|
||||
p = a + b - c
|
||||
pa = abs(p - a)
|
||||
pb = abs(p - b)
|
||||
pc = abs(p - c)
|
||||
if pa <= pb and pa <= pc:
|
||||
return a
|
||||
elif pb <= pc:
|
||||
return b
|
||||
else:
|
||||
return c
|
||||
|
||||
import re
|
||||
import html
|
||||
|
||||
def clean_pdf_text_to_html(page_number, text):
|
||||
# Decode Unicode escapes and handle surrogate pairs
|
||||
try:
|
||||
decoded = text.encode('latin-1').decode('unicode-escape')
|
||||
decoded = decoded.encode('utf-16', 'surrogatepass').decode('utf-16')
|
||||
except Exception as e:
|
||||
decoded = text # Fallback if decoding fails
|
||||
|
||||
article_title_detected = False
|
||||
# decoded = re.sub(r'\.\n', '.\n\n', decoded)
|
||||
# decoded = re.sub(r'\.\n', '<|break|>', decoded)
|
||||
lines = decoded.split('\n')
|
||||
output = []
|
||||
current_paragraph = []
|
||||
in_header = False
|
||||
email_pattern = re.compile(r'\{.*?\}')
|
||||
affiliation_pattern = re.compile(r'^†')
|
||||
quote_pattern = re.compile(r'^["“]')
|
||||
author_pattern = re.compile(
|
||||
r'^\s*[A-Z][a-zA-Z]+(?:\s+[A-Z][a-zA-Z]+)*\s*(?:[†*0-9]+)?'
|
||||
r'(?:,\s*[A-Z][a-zA-Z]+(?:\s+[A-Z][a-zA-Z]+)*\s*(?:[†*0-9]+)?)*'
|
||||
r'(?:,\s*(?:and|&)\s+[A-Z][a-zA-Z]+(?:\s+[A-Z][a-zA-Z]+)*\s*(?:[†*0-9]+)?)?\s*$'
|
||||
)
|
||||
|
||||
def flush_paragraph():
|
||||
if current_paragraph:
|
||||
para = ' '.join(current_paragraph)
|
||||
para = re.sub(r'\s+', ' ', para).strip()
|
||||
if para:
|
||||
# escaped_para = html.escape(para)
|
||||
escaped_para = para
|
||||
# escaped_para = re.sub(r'\.\n', '.\n\n', escaped_para)
|
||||
# Split escaped_para by <|break|> to avoid HTML escaping
|
||||
escaped_para = escaped_para.split('.\n\n')
|
||||
# Wrap each part in <p> tag
|
||||
escaped_para = [f'<p>{part}</p>' for part in escaped_para]
|
||||
output.append(f'<div class="paragraph">{"".join(escaped_para)}</div><hr/>')
|
||||
current_paragraph.clear()
|
||||
|
||||
for i, line in enumerate(lines):
|
||||
line = line.strip()
|
||||
|
||||
# Handle empty lines
|
||||
if not line:
|
||||
flush_paragraph()
|
||||
continue
|
||||
|
||||
# Detect article title (first line with reasonable length)
|
||||
if not article_title_detected and i == 0 and 3 <= len(line.split()) <= 8 and len(lines) > 1:
|
||||
flush_paragraph()
|
||||
escaped_line = html.escape(line)
|
||||
output.append(f'<h2>{escaped_line}</h2>')
|
||||
article_title_detected = True
|
||||
continue
|
||||
|
||||
# Detect numbered headers like "2.1 Background"
|
||||
numbered_header = re.match(r'^(\d+(?:\.\d+)*)\s+(.+)$', line)
|
||||
if i > 0 and not lines[i-1].strip() and numbered_header:
|
||||
flush_paragraph()
|
||||
level = numbered_header.group(1).count('.') + 1
|
||||
header_text = numbered_header.group(2)
|
||||
md_level = min(level + 1, 6)
|
||||
escaped_header = html.escape(header_text)
|
||||
output.append(f'<h{md_level}>{escaped_header}</h{md_level}>')
|
||||
in_header = True
|
||||
continue
|
||||
|
||||
# Detect authors
|
||||
if page_number == 1 and author_pattern.match(line):
|
||||
authors = re.sub(r'[†â€]', '', line)
|
||||
authors = re.split(r', | and ', authors)
|
||||
formatted_authors = []
|
||||
for author in authors:
|
||||
if author.strip():
|
||||
parts = [p for p in author.strip().split() if p]
|
||||
formatted = ' '.join(parts)
|
||||
escaped_author = html.escape(formatted)
|
||||
formatted_authors.append(f'<strong>{escaped_author}</strong>')
|
||||
|
||||
if len(formatted_authors) > 1:
|
||||
joined = ', '.join(formatted_authors[:-1]) + ' and ' + formatted_authors[-1]
|
||||
else:
|
||||
joined = formatted_authors[0]
|
||||
|
||||
output.append(f'<p>{joined}</p>')
|
||||
continue
|
||||
|
||||
# Detect affiliation
|
||||
if affiliation_pattern.match(line):
|
||||
escaped_line = html.escape(line)
|
||||
output.append(f'<p><em>{escaped_line}</em></p>')
|
||||
continue
|
||||
|
||||
# Detect emails
|
||||
if email_pattern.match(line):
|
||||
escaped_line = html.escape(line)
|
||||
output.append(f'<p><code>{escaped_line}</code></p>')
|
||||
continue
|
||||
|
||||
# Detect section headers
|
||||
if re.match(r'^(Abstract|\d+\s+[A-Z]|References|Appendix|Figure|Table)', line):
|
||||
flush_paragraph()
|
||||
escaped_line = html.escape(line)
|
||||
output.append(f'<h2 class="section-header"><em>{escaped_line}</em></h2>')
|
||||
in_header = True
|
||||
continue
|
||||
|
||||
# Handle quotes
|
||||
if quote_pattern.match(line):
|
||||
flush_paragraph()
|
||||
escaped_line = html.escape(line)
|
||||
output.append(f'<blockquote><p>{escaped_line}</p></blockquote>')
|
||||
continue
|
||||
|
||||
# Handle hyphenated words
|
||||
if line.endswith('-'):
|
||||
current_paragraph.append(line[:-1].strip())
|
||||
else:
|
||||
current_paragraph.append(line)
|
||||
|
||||
# Handle paragraph breaks after headers
|
||||
if in_header and not line.endswith(('.', '!', '?')):
|
||||
flush_paragraph()
|
||||
in_header = False
|
||||
|
||||
flush_paragraph()
|
||||
|
||||
# Post-process HTML
|
||||
html_output = '\n'.join(output)
|
||||
|
||||
# Fix common citation patterns
|
||||
html_output = re.sub(r'\(([A-Z][a-z]+ et al\. \d{4})\)', r'<cite>\1</cite>', html_output)
|
||||
|
||||
# Fix escaped characters
|
||||
html_output = html_output.replace('\\ud835', '').replace('\\u2020', '†')
|
||||
|
||||
# Remove leftover hyphens and fix spacing
|
||||
html_output = re.sub(r'\s+-\s+', '', html_output)
|
||||
html_output = re.sub(r'\s+([.,!?)])', r'\1', html_output)
|
||||
|
||||
return html_output
|
||||
|
||||
def clean_pdf_text(page_number, text):
|
||||
# Decode Unicode escapes and handle surrogate pairs
|
||||
try:
|
||||
decoded = text.encode('latin-1').decode('unicode-escape')
|
||||
decoded = decoded.encode('utf-16', 'surrogatepass').decode('utf-16')
|
||||
except Exception as e:
|
||||
decoded = text # Fallback if decoding fails
|
||||
|
||||
article_title_detected = False
|
||||
decoded = re.sub(r'\.\n', '.\n\n', decoded)
|
||||
lines = decoded.split('\n')
|
||||
output = []
|
||||
current_paragraph = []
|
||||
in_header = False
|
||||
email_pattern = re.compile(r'\{.*?\}')
|
||||
affiliation_pattern = re.compile(r'^†')
|
||||
quote_pattern = re.compile(r'^["“]')
|
||||
author_pattern = re.compile(
|
||||
r'^\s*[A-Z][a-zA-Z]+(?:\s+[A-Z][a-zA-Z]+)*\s*(?:[†*0-9]+)?'
|
||||
r'(?:,\s*[A-Z][a-zA-Z]+(?:\s+[A-Z][a-zA-Z]+)*\s*(?:[†*0-9]+)?)*'
|
||||
r'(?:,\s*(?:and|&)\s+[A-Z][a-zA-Z]+(?:\s+[A-Z][a-zA-Z]+)*\s*(?:[†*0-9]+)?)?\s*$'
|
||||
)
|
||||
|
||||
def flush_paragraph():
|
||||
if current_paragraph:
|
||||
para = ' '.join(current_paragraph)
|
||||
para = re.sub(r'\s+', ' ', para).strip()
|
||||
if para:
|
||||
output.append(para)
|
||||
current_paragraph.clear()
|
||||
|
||||
for i, line in enumerate(lines):
|
||||
line = line.strip()
|
||||
|
||||
# Handle special patterns
|
||||
if not line:
|
||||
flush_paragraph()
|
||||
continue
|
||||
|
||||
# Detect headline (first line, reasonable length, surrounded by empty lines)
|
||||
if not article_title_detected and i == 0 and 3 <= len(line.split()) <= 8 and (len(lines) > 1):
|
||||
flush_paragraph()
|
||||
output.append(f'## {line}')
|
||||
continue
|
||||
|
||||
# Detect paragraph breaks for ALL paragraphs
|
||||
if not line and current_paragraph:
|
||||
flush_paragraph()
|
||||
output.append('') # Add empty line between paragraphs
|
||||
continue
|
||||
|
||||
# Detect numbered headers like "2.1 Background"
|
||||
numbered_header = re.match(r'^(\d+(?:\.\d+)*)\s+(.+)$', line)
|
||||
if not lines[i-1].strip() and numbered_header:
|
||||
flush_paragraph()
|
||||
level = numbered_header.group(1).count('.') + 1 # Convert 2.1 → level 2
|
||||
header_text = numbered_header.group(2)
|
||||
# Never go beyond ### for subsections
|
||||
md_level = min(level + 1, 6) # 1 → ##, 2 → ###, 3 → #### etc
|
||||
output.append(f'{"#" * md_level} {header_text}')
|
||||
in_header = True
|
||||
continue
|
||||
|
||||
|
||||
# Detect authors
|
||||
if page_number == 1 and author_pattern.match(line):
|
||||
# Clean and format author names
|
||||
authors = re.sub(r'[†â€]', '', line) # Remove affiliation markers
|
||||
authors = re.split(r', | and ', authors)
|
||||
formatted_authors = []
|
||||
for author in authors:
|
||||
if author.strip():
|
||||
# Handle "First Last" formatting
|
||||
parts = [p for p in author.strip().split() if p]
|
||||
formatted = ' '.join(parts)
|
||||
formatted_authors.append(f'**{formatted}**')
|
||||
|
||||
# Join with commas and "and"
|
||||
if len(formatted_authors) > 1:
|
||||
joined = ', '.join(formatted_authors[:-1]) + ' and ' + formatted_authors[-1]
|
||||
else:
|
||||
joined = formatted_authors[0]
|
||||
|
||||
output.append(joined)
|
||||
continue
|
||||
|
||||
# Detect affiliation
|
||||
if affiliation_pattern.match(line):
|
||||
output.append(f'*{line}*')
|
||||
continue
|
||||
|
||||
# Detect emails
|
||||
if email_pattern.match(line):
|
||||
output.append(f'`{line}`')
|
||||
continue
|
||||
|
||||
# Detect section headers
|
||||
if re.match(r'^(Abstract|\d+\s+[A-Z]|References|Appendix|Figure|Table)', line):
|
||||
flush_paragraph()
|
||||
output.append(f'_[{line}]_')
|
||||
in_header = True
|
||||
continue
|
||||
|
||||
|
||||
# Handle quotes
|
||||
if quote_pattern.match(line):
|
||||
flush_paragraph()
|
||||
output.append(f'> {line}')
|
||||
continue
|
||||
|
||||
# Handle hyphenated words
|
||||
if line.endswith('-'):
|
||||
current_paragraph.append(line[:-1].strip())
|
||||
else:
|
||||
current_paragraph.append(line)
|
||||
|
||||
# Handle paragraph breaks after headers
|
||||
if in_header and not line.endswith(('.', '!', '?')):
|
||||
flush_paragraph()
|
||||
in_header = False
|
||||
|
||||
flush_paragraph()
|
||||
|
||||
# Post-processing
|
||||
markdown = '\n\n'.join(output)
|
||||
|
||||
# Fix common citation patterns
|
||||
markdown = re.sub(r'\(([A-Z][a-z]+ et al\. \d{4})\)', r'[\1]', markdown)
|
||||
|
||||
# Fix escaped characters
|
||||
markdown = markdown.replace('\\ud835', '').replace('\\u2020', '†')
|
||||
|
||||
# Remove leftover hyphens and fix spacing
|
||||
markdown = re.sub(r'\s+-\s+', '', markdown) # Join hyphenated words
|
||||
markdown = re.sub(r'\s+([.,!?)])', r'\1', markdown) # Fix punctuation spacing
|
||||
|
||||
|
||||
return markdown
|
||||
@@ -1,4 +1,4 @@
|
||||
PROMPT_EXTRACT_BLOCKS = """YHere is the URL of the webpage:
|
||||
PROMPT_EXTRACT_BLOCKS = """Here is the URL of the webpage:
|
||||
<url>{URL}</url>
|
||||
|
||||
And here is the cleaned HTML content of that webpage:
|
||||
@@ -29,7 +29,7 @@ To generate the JSON objects:
|
||||
|
||||
5. Make sure the generated JSON is complete and parsable, with no errors or omissions.
|
||||
|
||||
6. Make sur to escape any special characters in the HTML content, and also single or double quote to avoid JSON parsing issues.
|
||||
6. Make sure to escape any special characters in the HTML content, and also single or double quote to avoid JSON parsing issues.
|
||||
|
||||
Please provide your output within <blocks> tags, like this:
|
||||
|
||||
@@ -79,7 +79,7 @@ To generate the JSON objects:
|
||||
2. For each block:
|
||||
a. Assign it an index based on its order in the content.
|
||||
b. Analyze the content and generate ONE semantic tag that describe what the block is about.
|
||||
c. Extract the text content, EXACTLY SAME AS GIVE DATA, clean it up if needed, and store it as a list of strings in the "content" field.
|
||||
c. Extract the text content, EXACTLY SAME AS THE GIVE DATA, clean it up if needed, and store it as a list of strings in the "content" field.
|
||||
|
||||
3. Ensure that the order of the JSON objects matches the order of the blocks as they appear in the original HTML content.
|
||||
|
||||
@@ -87,7 +87,7 @@ To generate the JSON objects:
|
||||
|
||||
5. Make sure the generated JSON is complete and parsable, with no errors or omissions.
|
||||
|
||||
6. Make sur to escape any special characters in the HTML content, and also single or double quote to avoid JSON parsing issues.
|
||||
6. Make sure to escape any special characters in the HTML content, and also single or double quote to avoid JSON parsing issues.
|
||||
|
||||
7. Never alter the extracted content, just copy and paste it as it is.
|
||||
|
||||
@@ -142,7 +142,7 @@ To generate the JSON objects:
|
||||
|
||||
5. Make sure the generated JSON is complete and parsable, with no errors or omissions.
|
||||
|
||||
6. Make sur to escape any special characters in the HTML content, and also single or double quote to avoid JSON parsing issues.
|
||||
6. Make sure to escape any special characters in the HTML content, and also single or double quote to avoid JSON parsing issues.
|
||||
|
||||
7. Never alter the extracted content, just copy and paste it as it is.
|
||||
|
||||
@@ -164,4 +164,894 @@ Please provide your output within <blocks> tags, like this:
|
||||
|
||||
**Make sure to follow the user instruction to extract blocks aligin with the instruction.**
|
||||
|
||||
Remember, the output should be a complete, parsable JSON wrapped in <blocks> tags, with no omissions or errors. The JSON objects should semantically break down the content into relevant blocks, maintaining the original order."""
|
||||
Remember, the output should be a complete, parsable JSON wrapped in <blocks> tags, with no omissions or errors. The JSON objects should semantically break down the content into relevant blocks, maintaining the original order."""
|
||||
|
||||
PROMPT_EXTRACT_SCHEMA_WITH_INSTRUCTION = """Here is the content from the URL:
|
||||
<url>{URL}</url>
|
||||
|
||||
<url_content>
|
||||
{HTML}
|
||||
</url_content>
|
||||
|
||||
The user has made the following request for what information to extract from the above content:
|
||||
|
||||
<user_request>
|
||||
{REQUEST}
|
||||
</user_request>
|
||||
|
||||
<schema_block>
|
||||
{SCHEMA}
|
||||
</schema_block>
|
||||
|
||||
Please carefully read the URL content and the user's request. If the user provided a desired JSON schema in the <schema_block> above, extract the requested information from the URL content according to that schema. If no schema was provided, infer an appropriate JSON schema based on the user's request that will best capture the key information they are looking for.
|
||||
|
||||
Extraction instructions:
|
||||
Return the extracted information as a list of JSON objects, with each object in the list corresponding to a block of content from the URL, in the same order as it appears on the page. Wrap the entire JSON list in <blocks>...</blocks> XML tags.
|
||||
|
||||
Quality Reflection:
|
||||
Before outputting your final answer, double check that the JSON you are returning is complete, containing all the information requested by the user, and is valid JSON that could be parsed by json.loads() with no errors or omissions. The outputted JSON objects should fully match the schema, either provided or inferred.
|
||||
|
||||
Quality Score:
|
||||
After reflecting, score the quality and completeness of the JSON data you are about to return on a scale of 1 to 5. Write the score inside <score> tags.
|
||||
|
||||
Avoid Common Mistakes:
|
||||
- Do NOT add any comments using "//" or "#" in the JSON output. It causes parsing errors.
|
||||
- Make sure the JSON is properly formatted with curly braces, square brackets, and commas in the right places.
|
||||
- Do not miss closing </blocks> tag at the end of the JSON output.
|
||||
- Do not generate the Python code show me how to do the task, this is your task to extract the information and return it in JSON format.
|
||||
|
||||
Result
|
||||
Output the final list of JSON objects, wrapped in <blocks>...</blocks> XML tags. Make sure to close the tag properly."""
|
||||
|
||||
PROMPT_EXTRACT_INFERRED_SCHEMA = """Here is the content from the URL:
|
||||
<url>{URL}</url>
|
||||
|
||||
<url_content>
|
||||
{HTML}
|
||||
</url_content>
|
||||
|
||||
Please carefully read the URL content and the user's request. Analyze the page structure and infer the most appropriate JSON schema based on the content and request.
|
||||
|
||||
Extraction Strategy:
|
||||
1. First, determine if the page contains repetitive items (like multiple products, articles, etc.) or a single content item (like a single article or page).
|
||||
2. For repetitive items: Identify the common pattern and extract each instance as a separate JSON object in an array.
|
||||
3. For single content: Extract the key information into a comprehensive JSON object that captures the essential details.
|
||||
|
||||
Extraction instructions:
|
||||
Return the extracted information as a list of JSON objects. For repetitive content, each object in the list should correspond to a distinct item. For single content, you may return just one detailed JSON object. Wrap the entire JSON list in <blocks>...</blocks> XML tags.
|
||||
|
||||
Schema Design Guidelines:
|
||||
- Create meaningful property names that clearly describe the data they contain
|
||||
- Use nested objects for hierarchical information
|
||||
- Use arrays for lists of related items
|
||||
- Include all information requested by the user
|
||||
- Maintain consistency in property names and data structures
|
||||
- Only include properties that are actually present in the content
|
||||
- For dates, prefer ISO format (YYYY-MM-DD)
|
||||
- For prices or numeric values, extract them without currency symbols when possible
|
||||
|
||||
Quality Reflection:
|
||||
Before outputting your final answer, double check that:
|
||||
1. The inferred schema makes logical sense for the type of content
|
||||
2. All requested information is included
|
||||
3. The JSON is valid and could be parsed without errors
|
||||
4. Property names are consistent and descriptive
|
||||
5. The structure is optimal for the type of data being represented
|
||||
|
||||
Avoid Common Mistakes:
|
||||
- Do NOT add any comments using "//" or "#" in the JSON output. It causes parsing errors.
|
||||
- Make sure the JSON is properly formatted with curly braces, square brackets, and commas in the right places.
|
||||
- Do not miss closing </blocks> tag at the end of the JSON output.
|
||||
- Do not generate Python code showing how to do the task; this is your task to extract the information and return it in JSON format.
|
||||
- Ensure consistency in property names across all objects
|
||||
- Don't include empty properties or null values unless they're meaningful
|
||||
- For repetitive content, ensure all objects follow the same schema
|
||||
|
||||
Important: If user specific instruction is provided, then stress significantly on what user is requesting and describing about the schema of end result (if any). If user is requesting to extract specific information, then focus on that and ignore the rest of the content.
|
||||
<user_request>
|
||||
{REQUEST}
|
||||
</user_request>
|
||||
|
||||
Result:
|
||||
Output the final list of JSON objects, wrapped in <blocks>...</blocks> XML tags. Make sure to close the tag properly.
|
||||
|
||||
DO NOT ADD ANY PRE OR POST COMMENTS. JUST RETURN THE JSON OBJECTS INSIDE <blocks>...</blocks> TAGS.
|
||||
|
||||
CRITICAL: The content inside the <blocks> tags MUST be a direct array of JSON objects (starting with '[' and ending with ']'), not a dictionary/object containing an array. For example, use <blocks>[{...}, {...}]</blocks> instead of <blocks>{"items": [{...}, {...}]}</blocks>. This is essential for proper parsing.
|
||||
"""
|
||||
|
||||
PROMPT_FILTER_CONTENT = """Your task is to filter and convert HTML content into clean, focused markdown that's optimized for use with LLMs and information retrieval systems.
|
||||
|
||||
TASK DETAILS:
|
||||
1. Content Selection
|
||||
- DO: Keep essential information, main content, key details
|
||||
- DO: Preserve hierarchical structure using markdown headers
|
||||
- DO: Keep code blocks, tables, key lists
|
||||
- DON'T: Include navigation menus, ads, footers, cookie notices
|
||||
- DON'T: Keep social media widgets, sidebars, related content
|
||||
|
||||
2. Content Transformation
|
||||
- DO: Use proper markdown syntax (#, ##, **, `, etc)
|
||||
- DO: Convert tables to markdown tables
|
||||
- DO: Preserve code formatting with ```language blocks
|
||||
- DO: Maintain link texts but remove tracking parameters
|
||||
- DON'T: Include HTML tags in output
|
||||
- DON'T: Keep class names, ids, or other HTML attributes
|
||||
|
||||
3. Content Organization
|
||||
- DO: Maintain logical flow of information
|
||||
- DO: Group related content under appropriate headers
|
||||
- DO: Use consistent header levels
|
||||
- DON'T: Fragment related content
|
||||
- DON'T: Duplicate information
|
||||
|
||||
IMPORTANT: If user specific instruction is provided, ignore above guideline and prioritize those requirements over these general guidelines.
|
||||
|
||||
OUTPUT FORMAT:
|
||||
Wrap your response in <content> tags. Use proper markdown throughout.
|
||||
<content>
|
||||
[Your markdown content here]
|
||||
</content>
|
||||
|
||||
Begin filtering now.
|
||||
|
||||
--------------------------------------------
|
||||
|
||||
<|HTML_CONTENT_START|>
|
||||
{HTML}
|
||||
<|HTML_CONTENT_END|>
|
||||
|
||||
<|USER_INSTRUCTION_START|>
|
||||
{REQUEST}
|
||||
<|USER_INSTRUCTION_END|>
|
||||
"""
|
||||
|
||||
JSON_SCHEMA_BUILDER= """
|
||||
# HTML Schema Generation Instructions
|
||||
You are a specialized model designed to analyze HTML patterns and generate extraction schemas. Your primary job is to create structured JSON schemas that can be used to extract data from HTML in a consistent and reliable way. When presented with HTML content, you must analyze its structure and generate a schema that captures all relevant data points.
|
||||
|
||||
## Your Core Responsibilities:
|
||||
1. Analyze HTML structure to identify repeating patterns and important data points
|
||||
2. Generate valid JSON schemas following the specified format
|
||||
3. Create appropriate selectors that will work reliably for data extraction
|
||||
4. Name fields meaningfully based on their content and purpose
|
||||
5. Handle both specific user requests and autonomous pattern detection
|
||||
|
||||
## Available Schema Types You Can Generate:
|
||||
|
||||
<schema_types>
|
||||
1. Basic Single-Level Schema
|
||||
- Use for simple, flat data structures
|
||||
- Example: Product cards, user profiles
|
||||
- Direct field extractions
|
||||
|
||||
2. Nested Object Schema
|
||||
- Use for hierarchical data
|
||||
- Example: Articles with author details
|
||||
- Contains objects within objects
|
||||
|
||||
3. List Schema
|
||||
- Use for repeating elements
|
||||
- Example: Comment sections, product lists
|
||||
- Handles arrays of similar items
|
||||
|
||||
4. Complex Nested Lists
|
||||
- Use for multi-level data
|
||||
- Example: Categories with subcategories
|
||||
- Multiple levels of nesting
|
||||
|
||||
5. Transformation Schema
|
||||
- Use for data requiring processing
|
||||
- Supports regex and text transformations
|
||||
- Special attribute handling
|
||||
</schema_types>
|
||||
|
||||
<schema_structure>
|
||||
Your output must always be a JSON object with this structure:
|
||||
{
|
||||
"name": "Descriptive name of the pattern",
|
||||
"baseSelector": "CSS selector for the repeating element",
|
||||
"fields": [
|
||||
{
|
||||
"name": "field_name",
|
||||
"selector": "CSS selector",
|
||||
"type": "text|attribute|nested|list|regex",
|
||||
"attribute": "attribute_name", // Optional
|
||||
"transform": "transformation_type", // Optional
|
||||
"pattern": "regex_pattern", // Optional
|
||||
"fields": [] // For nested/list types
|
||||
}
|
||||
]
|
||||
}
|
||||
</schema_structure>
|
||||
|
||||
<type_definitions>
|
||||
Available field types:
|
||||
- text: Direct text extraction
|
||||
- attribute: HTML attribute extraction
|
||||
- nested: Object containing other fields
|
||||
- list: Array of similar items
|
||||
- regex: Pattern-based extraction
|
||||
</type_definitions>
|
||||
|
||||
<behavior_rules>
|
||||
1. When given a specific query:
|
||||
- Focus on extracting requested data points
|
||||
- Use most specific selectors possible
|
||||
- Include all fields mentioned in the query
|
||||
|
||||
2. When no query is provided:
|
||||
- Identify main content areas
|
||||
- Extract all meaningful data points
|
||||
- Use semantic structure to determine importance
|
||||
- Include prices, dates, titles, and other common data types
|
||||
|
||||
3. Always:
|
||||
- Use reliable CSS selectors
|
||||
- Handle dynamic class names appropriately
|
||||
- Create descriptive field names
|
||||
- Follow consistent naming conventions
|
||||
</behavior_rules>
|
||||
|
||||
<examples>
|
||||
1. Basic Product Card Example:
|
||||
<html>
|
||||
<div class="product-card" data-cat-id="electronics" data-subcat-id="laptops">
|
||||
<h2 class="product-title">Gaming Laptop</h2>
|
||||
<span class="price">$999.99</span>
|
||||
<img src="laptop.jpg" alt="Gaming Laptop">
|
||||
</div>
|
||||
</html>
|
||||
|
||||
Generated Schema:
|
||||
{
|
||||
"name": "Product Cards",
|
||||
"baseSelector": ".product-card",
|
||||
"baseFields": [
|
||||
{"name": "data_cat_id", "type": "attribute", "attribute": "data-cat-id"},
|
||||
{"name": "data_subcat_id", "type": "attribute", "attribute": "data-subcat-id"}
|
||||
],
|
||||
"fields": [
|
||||
{
|
||||
"name": "title",
|
||||
"selector": ".product-title",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "price",
|
||||
"selector": ".price",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "image_url",
|
||||
"selector": "img",
|
||||
"type": "attribute",
|
||||
"attribute": "src"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
2. Article with Author Details Example:
|
||||
<html>
|
||||
<article>
|
||||
<h1>The Future of AI</h1>
|
||||
<div class="author-info">
|
||||
<span class="author-name">Dr. Smith</span>
|
||||
<img src="author.jpg" alt="Dr. Smith">
|
||||
</div>
|
||||
</article>
|
||||
</html>
|
||||
|
||||
Generated Schema:
|
||||
{
|
||||
"name": "Article Details",
|
||||
"baseSelector": "article",
|
||||
"fields": [
|
||||
{
|
||||
"name": "title",
|
||||
"selector": "h1",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "author",
|
||||
"type": "nested",
|
||||
"selector": ".author-info",
|
||||
"fields": [
|
||||
{
|
||||
"name": "name",
|
||||
"selector": ".author-name",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "avatar",
|
||||
"selector": "img",
|
||||
"type": "attribute",
|
||||
"attribute": "src"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
3. Comments Section Example:
|
||||
<html>
|
||||
<div class="comments-container">
|
||||
<div class="comment" data-user-id="123">
|
||||
<div class="user-name">John123</div>
|
||||
<p class="comment-text">Great article!</p>
|
||||
</div>
|
||||
<div class="comment" data-user-id="456">
|
||||
<div class="user-name">Alice456</div>
|
||||
<p class="comment-text">Thanks for sharing.</p>
|
||||
</div>
|
||||
</div>
|
||||
</html>
|
||||
|
||||
Generated Schema:
|
||||
{
|
||||
"name": "Comment Section",
|
||||
"baseSelector": ".comments-container",
|
||||
"baseFields": [
|
||||
{"name": "data_user_id", "type": "attribute", "attribute": "data-user-id"}
|
||||
],
|
||||
"fields": [
|
||||
{
|
||||
"name": "comments",
|
||||
"type": "list",
|
||||
"selector": ".comment",
|
||||
"fields": [
|
||||
{
|
||||
"name": "user",
|
||||
"selector": ".user-name",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "content",
|
||||
"selector": ".comment-text",
|
||||
"type": "text"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
4. E-commerce Categories Example:
|
||||
<html>
|
||||
<div class="category-section" data-category="electronics">
|
||||
<h2>Electronics</h2>
|
||||
<div class="subcategory">
|
||||
<h3>Laptops</h3>
|
||||
<div class="product">
|
||||
<span class="product-name">MacBook Pro</span>
|
||||
<span class="price">$1299</span>
|
||||
</div>
|
||||
<div class="product">
|
||||
<span class="product-name">Dell XPS</span>
|
||||
<span class="price">$999</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</html>
|
||||
|
||||
Generated Schema:
|
||||
{
|
||||
"name": "E-commerce Categories",
|
||||
"baseSelector": ".category-section",
|
||||
"baseFields": [
|
||||
{"name": "data_category", "type": "attribute", "attribute": "data-category"}
|
||||
],
|
||||
"fields": [
|
||||
{
|
||||
"name": "category_name",
|
||||
"selector": "h2",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "subcategories",
|
||||
"type": "nested_list",
|
||||
"selector": ".subcategory",
|
||||
"fields": [
|
||||
{
|
||||
"name": "name",
|
||||
"selector": "h3",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "products",
|
||||
"type": "list",
|
||||
"selector": ".product",
|
||||
"fields": [
|
||||
{
|
||||
"name": "name",
|
||||
"selector": ".product-name",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "price",
|
||||
"selector": ".price",
|
||||
"type": "text"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
5. Job Listings with Transformations Example:
|
||||
<html>
|
||||
<div class="job-post">
|
||||
<h3 class="job-title">Senior Developer</h3>
|
||||
<span class="salary-text">Salary: $120,000/year</span>
|
||||
<span class="location"> New York, NY </span>
|
||||
</div>
|
||||
</html>
|
||||
|
||||
Generated Schema:
|
||||
{
|
||||
"name": "Job Listings",
|
||||
"baseSelector": ".job-post",
|
||||
"fields": [
|
||||
{
|
||||
"name": "title",
|
||||
"selector": ".job-title",
|
||||
"type": "text",
|
||||
"transform": "uppercase"
|
||||
},
|
||||
{
|
||||
"name": "salary",
|
||||
"selector": ".salary-text",
|
||||
"type": "regex",
|
||||
"pattern": "\\$([\\d,]+)"
|
||||
},
|
||||
{
|
||||
"name": "location",
|
||||
"selector": ".location",
|
||||
"type": "text",
|
||||
"transform": "strip"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
6. Skyscanner Place Card Example:
|
||||
<html>
|
||||
<div class="PlaceCard_descriptionContainer__M2NjN" data-testid="description-container">
|
||||
<div class="PlaceCard_nameContainer__ZjZmY" tabindex="0" role="link">
|
||||
<div class="PlaceCard_nameContent__ODUwZ">
|
||||
<span class="BpkText_bpk-text__MjhhY BpkText_bpk-text--heading-4__Y2FlY">Doha</span>
|
||||
</div>
|
||||
<span class="BpkText_bpk-text__MjhhY BpkText_bpk-text--heading-4__Y2FlY PlaceCard_subName__NTVkY">Qatar</span>
|
||||
</div>
|
||||
<span class="PlaceCard_advertLabel__YTM0N">Sunny days and the warmest welcome awaits</span>
|
||||
<a class="BpkLink_bpk-link__MmQwY PlaceCard_descriptionLink__NzYwN" href="/flights/del/doha/" data-testid="flights-link">
|
||||
<div class="PriceDescription_container__NjEzM">
|
||||
<span class="BpkText_bpk-text--heading-5__MTRjZ">₹17,559</span>
|
||||
</div>
|
||||
</a>
|
||||
</div>
|
||||
</html>
|
||||
|
||||
Generated Schema:
|
||||
{
|
||||
"name": "Skyscanner Place Cards",
|
||||
"baseSelector": "div[class^='PlaceCard_descriptionContainer__']",
|
||||
"baseFields": [
|
||||
{"name": "data_testid", "type": "attribute", "attribute": "data-testid"}
|
||||
],
|
||||
"fields": [
|
||||
{
|
||||
"name": "city_name",
|
||||
"selector": "div[class^='PlaceCard_nameContent__'] .BpkText_bpk-text--heading-4__",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "country_name",
|
||||
"selector": "span[class*='PlaceCard_subName__']",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "description",
|
||||
"selector": "span[class*='PlaceCard_advertLabel__']",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "flight_price",
|
||||
"selector": "a[data-testid='flights-link'] .BpkText_bpk-text--heading-5__",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "flight_url",
|
||||
"selector": "a[data-testid='flights-link']",
|
||||
"type": "attribute",
|
||||
"attribute": "href"
|
||||
}
|
||||
]
|
||||
}
|
||||
</examples>
|
||||
|
||||
|
||||
<output_requirements>
|
||||
Your output must:
|
||||
1. Be valid JSON only
|
||||
2. Include no explanatory text
|
||||
3. Follow the exact schema structure provided
|
||||
4. Use appropriate field types
|
||||
5. Include all required fields
|
||||
6. Use valid CSS selectors
|
||||
</output_requirements>
|
||||
|
||||
"""
|
||||
|
||||
JSON_SCHEMA_BUILDER_XPATH = """
|
||||
# HTML Schema Generation Instructions
|
||||
You are a specialized model designed to analyze HTML patterns and generate extraction schemas. Your primary job is to create structured JSON schemas that can be used to extract data from HTML in a consistent and reliable way. When presented with HTML content, you must analyze its structure and generate a schema that captures all relevant data points.
|
||||
|
||||
## Your Core Responsibilities:
|
||||
1. Analyze HTML structure to identify repeating patterns and important data points
|
||||
2. Generate valid JSON schemas following the specified format
|
||||
3. Create appropriate XPath selectors that will work reliably for data extraction
|
||||
4. Name fields meaningfully based on their content and purpose
|
||||
5. Handle both specific user requests and autonomous pattern detection
|
||||
|
||||
## Available Schema Types You Can Generate:
|
||||
|
||||
<schema_types>
|
||||
1. Basic Single-Level Schema
|
||||
- Use for simple, flat data structures
|
||||
- Example: Product cards, user profiles
|
||||
- Direct field extractions
|
||||
|
||||
2. Nested Object Schema
|
||||
- Use for hierarchical data
|
||||
- Example: Articles with author details
|
||||
- Contains objects within objects
|
||||
|
||||
3. List Schema
|
||||
- Use for repeating elements
|
||||
- Example: Comment sections, product lists
|
||||
- Handles arrays of similar items
|
||||
|
||||
4. Complex Nested Lists
|
||||
- Use for multi-level data
|
||||
- Example: Categories with subcategories
|
||||
- Multiple levels of nesting
|
||||
|
||||
5. Transformation Schema
|
||||
- Use for data requiring processing
|
||||
- Supports regex and text transformations
|
||||
- Special attribute handling
|
||||
</schema_types>
|
||||
|
||||
<schema_structure>
|
||||
Your output must always be a JSON object with this structure:
|
||||
{
|
||||
"name": "Descriptive name of the pattern",
|
||||
"baseSelector": "XPath selector for the repeating element",
|
||||
"fields": [
|
||||
{
|
||||
"name": "field_name",
|
||||
"selector": "XPath selector",
|
||||
"type": "text|attribute|nested|list|regex",
|
||||
"attribute": "attribute_name", // Optional
|
||||
"transform": "transformation_type", // Optional
|
||||
"pattern": "regex_pattern", // Optional
|
||||
"fields": [] // For nested/list types
|
||||
}
|
||||
]
|
||||
}
|
||||
</schema_structure>
|
||||
|
||||
<type_definitions>
|
||||
Available field types:
|
||||
- text: Direct text extraction
|
||||
- attribute: HTML attribute extraction
|
||||
- nested: Object containing other fields
|
||||
- list: Array of similar items
|
||||
- regex: Pattern-based extraction
|
||||
</type_definitions>
|
||||
|
||||
<behavior_rules>
|
||||
1. When given a specific query:
|
||||
- Focus on extracting requested data points
|
||||
- Use most specific selectors possible
|
||||
- Include all fields mentioned in the query
|
||||
|
||||
2. When no query is provided:
|
||||
- Identify main content areas
|
||||
- Extract all meaningful data points
|
||||
- Use semantic structure to determine importance
|
||||
- Include prices, dates, titles, and other common data types
|
||||
|
||||
3. Always:
|
||||
- Use reliable XPath selectors
|
||||
- Handle dynamic element IDs appropriately
|
||||
- Create descriptive field names
|
||||
- Follow consistent naming conventions
|
||||
</behavior_rules>
|
||||
|
||||
<examples>
|
||||
1. Basic Product Card Example:
|
||||
<html>
|
||||
<div class="product-card" data-cat-id="electronics" data-subcat-id="laptops">
|
||||
<h2 class="product-title">Gaming Laptop</h2>
|
||||
<span class="price">$999.99</span>
|
||||
<img src="laptop.jpg" alt="Gaming Laptop">
|
||||
</div>
|
||||
</html>
|
||||
|
||||
Generated Schema:
|
||||
{
|
||||
"name": "Product Cards",
|
||||
"baseSelector": "//div[@class='product-card']",
|
||||
"baseFields": [
|
||||
{"name": "data_cat_id", "type": "attribute", "attribute": "data-cat-id"},
|
||||
{"name": "data_subcat_id", "type": "attribute", "attribute": "data-subcat-id"}
|
||||
],
|
||||
"fields": [
|
||||
{
|
||||
"name": "title",
|
||||
"selector": ".//h2[@class='product-title']",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "price",
|
||||
"selector": ".//span[@class='price']",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "image_url",
|
||||
"selector": ".//img",
|
||||
"type": "attribute",
|
||||
"attribute": "src"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
2. Article with Author Details Example:
|
||||
<html>
|
||||
<article>
|
||||
<h1>The Future of AI</h1>
|
||||
<div class="author-info">
|
||||
<span class="author-name">Dr. Smith</span>
|
||||
<img src="author.jpg" alt="Dr. Smith">
|
||||
</div>
|
||||
</article>
|
||||
</html>
|
||||
|
||||
Generated Schema:
|
||||
{
|
||||
"name": "Article Details",
|
||||
"baseSelector": "//article",
|
||||
"fields": [
|
||||
{
|
||||
"name": "title",
|
||||
"selector": ".//h1",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "author",
|
||||
"type": "nested",
|
||||
"selector": ".//div[@class='author-info']",
|
||||
"fields": [
|
||||
{
|
||||
"name": "name",
|
||||
"selector": ".//span[@class='author-name']",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "avatar",
|
||||
"selector": ".//img",
|
||||
"type": "attribute",
|
||||
"attribute": "src"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
3. Comments Section Example:
|
||||
<html>
|
||||
<div class="comments-container">
|
||||
<div class="comment" data-user-id="123">
|
||||
<div class="user-name">John123</div>
|
||||
<p class="comment-text">Great article!</p>
|
||||
</div>
|
||||
<div class="comment" data-user-id="456">
|
||||
<div class="user-name">Alice456</div>
|
||||
<p class="comment-text">Thanks for sharing.</p>
|
||||
</div>
|
||||
</div>
|
||||
</html>
|
||||
|
||||
Generated Schema:
|
||||
{
|
||||
"name": "Comment Section",
|
||||
"baseSelector": "//div[@class='comments-container']",
|
||||
"fields": [
|
||||
{
|
||||
"name": "comments",
|
||||
"type": "list",
|
||||
"selector": ".//div[@class='comment']",
|
||||
"baseFields": [
|
||||
{"name": "data_user_id", "type": "attribute", "attribute": "data-user-id"}
|
||||
],
|
||||
"fields": [
|
||||
{
|
||||
"name": "user",
|
||||
"selector": ".//div[@class='user-name']",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "content",
|
||||
"selector": ".//p[@class='comment-text']",
|
||||
"type": "text"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
4. E-commerce Categories Example:
|
||||
<html>
|
||||
<div class="category-section" data-category="electronics">
|
||||
<h2>Electronics</h2>
|
||||
<div class="subcategory">
|
||||
<h3>Laptops</h3>
|
||||
<div class="product">
|
||||
<span class="product-name">MacBook Pro</span>
|
||||
<span class="price">$1299</span>
|
||||
</div>
|
||||
<div class="product">
|
||||
<span class="product-name">Dell XPS</span>
|
||||
<span class="price">$999</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</html>
|
||||
|
||||
Generated Schema:
|
||||
{
|
||||
"name": "E-commerce Categories",
|
||||
"baseSelector": "//div[@class='category-section']",
|
||||
"baseFields": [
|
||||
{"name": "data_category", "type": "attribute", "attribute": "data-category"}
|
||||
],
|
||||
"fields": [
|
||||
{
|
||||
"name": "category_name",
|
||||
"selector": ".//h2",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "subcategories",
|
||||
"type": "nested_list",
|
||||
"selector": ".//div[@class='subcategory']",
|
||||
"fields": [
|
||||
{
|
||||
"name": "name",
|
||||
"selector": ".//h3",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "products",
|
||||
"type": "list",
|
||||
"selector": ".//div[@class='product']",
|
||||
"fields": [
|
||||
{
|
||||
"name": "name",
|
||||
"selector": ".//span[@class='product-name']",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "price",
|
||||
"selector": ".//span[@class='price']",
|
||||
"type": "text"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
5. Job Listings with Transformations Example:
|
||||
<html>
|
||||
<div class="job-post">
|
||||
<h3 class="job-title">Senior Developer</h3>
|
||||
<span class="salary-text">Salary: $120,000/year</span>
|
||||
<span class="location"> New York, NY </span>
|
||||
</div>
|
||||
</html>
|
||||
|
||||
Generated Schema:
|
||||
{
|
||||
"name": "Job Listings",
|
||||
"baseSelector": "//div[@class='job-post']",
|
||||
"fields": [
|
||||
{
|
||||
"name": "title",
|
||||
"selector": ".//h3[@class='job-title']",
|
||||
"type": "text",
|
||||
"transform": "uppercase"
|
||||
},
|
||||
{
|
||||
"name": "salary",
|
||||
"selector": ".//span[@class='salary-text']",
|
||||
"type": "regex",
|
||||
"pattern": "\\$([\\d,]+)"
|
||||
},
|
||||
{
|
||||
"name": "location",
|
||||
"selector": ".//span[@class='location']",
|
||||
"type": "text",
|
||||
"transform": "strip"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
6. Skyscanner Place Card Example:
|
||||
<html>
|
||||
<div class="PlaceCard_descriptionContainer__M2NjN" data-testid="description-container">
|
||||
<div class="PlaceCard_nameContainer__ZjZmY" tabindex="0" role="link">
|
||||
<div class="PlaceCard_nameContent__ODUwZ">
|
||||
<span class="BpkText_bpk-text__MjhhY BpkText_bpk-text--heading-4__Y2FlY">Doha</span>
|
||||
</div>
|
||||
<span class="BpkText_bpk-text__MjhhY BpkText_bpk-text--heading-4__Y2FlY PlaceCard_subName__NTVkY">Qatar</span>
|
||||
</div>
|
||||
<span class="PlaceCard_advertLabel__YTM0N">Sunny days and the warmest welcome awaits</span>
|
||||
<a class="BpkLink_bpk-link__MmQwY PlaceCard_descriptionLink__NzYwN" href="/flights/del/doha/" data-testid="flights-link">
|
||||
<div class="PriceDescription_container__NjEzM">
|
||||
<span class="BpkText_bpk-text--heading-5__MTRjZ">₹17,559</span>
|
||||
</div>
|
||||
</a>
|
||||
</div>
|
||||
</html>
|
||||
|
||||
Generated Schema:
|
||||
{
|
||||
"name": "Skyscanner Place Cards",
|
||||
"baseSelector": "//div[contains(@class, 'PlaceCard_descriptionContainer__')]",
|
||||
"baseFields": [
|
||||
{"name": "data_testid", "type": "attribute", "attribute": "data-testid"}
|
||||
],
|
||||
"fields": [
|
||||
{
|
||||
"name": "city_name",
|
||||
"selector": ".//div[contains(@class, 'PlaceCard_nameContent__')]//span[contains(@class, 'BpkText_bpk-text--heading-4__')]",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "country_name",
|
||||
"selector": ".//span[contains(@class, 'PlaceCard_subName__')]",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "description",
|
||||
"selector": ".//span[contains(@class, 'PlaceCard_advertLabel__')]",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "flight_price",
|
||||
"selector": ".//a[@data-testid='flights-link']//span[contains(@class, 'BpkText_bpk-text--heading-5__')]",
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"name": "flight_url",
|
||||
"selector": ".//a[@data-testid='flights-link']",
|
||||
"type": "attribute",
|
||||
"attribute": "href"
|
||||
}
|
||||
]
|
||||
}
|
||||
</examples>
|
||||
|
||||
<output_requirements>
|
||||
Your output must:
|
||||
1. Be valid JSON only
|
||||
2. Include no explanatory text
|
||||
3. Follow the exact schema structure provided
|
||||
4. Use appropriate field types
|
||||
5. Include all required fields
|
||||
6. Use valid XPath selectors
|
||||
</output_requirements>
|
||||
"""
|
||||
158
crawl4ai/proxy_strategy.py
Normal file
158
crawl4ai/proxy_strategy.py
Normal file
@@ -0,0 +1,158 @@
|
||||
from typing import List, Dict, Optional
|
||||
from abc import ABC, abstractmethod
|
||||
from itertools import cycle
|
||||
import os
|
||||
|
||||
|
||||
########### ATTENTION PEOPLE OF EARTH ###########
|
||||
# I have moved this config to async_configs.py, kept it here, in case someone still importing it, however
|
||||
# be a dear and follow `from crawl4ai import ProxyConfig` instead :)
|
||||
class ProxyConfig:
|
||||
def __init__(
|
||||
self,
|
||||
server: str,
|
||||
username: Optional[str] = None,
|
||||
password: Optional[str] = None,
|
||||
ip: Optional[str] = None,
|
||||
):
|
||||
"""Configuration class for a single proxy.
|
||||
|
||||
Args:
|
||||
server: Proxy server URL (e.g., "http://127.0.0.1:8080")
|
||||
username: Optional username for proxy authentication
|
||||
password: Optional password for proxy authentication
|
||||
ip: Optional IP address for verification purposes
|
||||
"""
|
||||
self.server = server
|
||||
self.username = username
|
||||
self.password = password
|
||||
|
||||
# Extract IP from server if not explicitly provided
|
||||
self.ip = ip or self._extract_ip_from_server()
|
||||
|
||||
def _extract_ip_from_server(self) -> Optional[str]:
|
||||
"""Extract IP address from server URL."""
|
||||
try:
|
||||
# Simple extraction assuming http://ip:port format
|
||||
if "://" in self.server:
|
||||
parts = self.server.split("://")[1].split(":")
|
||||
return parts[0]
|
||||
else:
|
||||
parts = self.server.split(":")
|
||||
return parts[0]
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def from_string(proxy_str: str) -> "ProxyConfig":
|
||||
"""Create a ProxyConfig from a string in the format 'ip:port:username:password'."""
|
||||
parts = proxy_str.split(":")
|
||||
if len(parts) == 4: # ip:port:username:password
|
||||
ip, port, username, password = parts
|
||||
return ProxyConfig(
|
||||
server=f"http://{ip}:{port}",
|
||||
username=username,
|
||||
password=password,
|
||||
ip=ip
|
||||
)
|
||||
elif len(parts) == 2: # ip:port only
|
||||
ip, port = parts
|
||||
return ProxyConfig(
|
||||
server=f"http://{ip}:{port}",
|
||||
ip=ip
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Invalid proxy string format: {proxy_str}")
|
||||
|
||||
@staticmethod
|
||||
def from_dict(proxy_dict: Dict) -> "ProxyConfig":
|
||||
"""Create a ProxyConfig from a dictionary."""
|
||||
return ProxyConfig(
|
||||
server=proxy_dict.get("server"),
|
||||
username=proxy_dict.get("username"),
|
||||
password=proxy_dict.get("password"),
|
||||
ip=proxy_dict.get("ip")
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def from_env(env_var: str = "PROXIES") -> List["ProxyConfig"]:
|
||||
"""Load proxies from environment variable.
|
||||
|
||||
Args:
|
||||
env_var: Name of environment variable containing comma-separated proxy strings
|
||||
|
||||
Returns:
|
||||
List of ProxyConfig objects
|
||||
"""
|
||||
proxies = []
|
||||
try:
|
||||
proxy_list = os.getenv(env_var, "").split(",")
|
||||
for proxy in proxy_list:
|
||||
if not proxy:
|
||||
continue
|
||||
proxies.append(ProxyConfig.from_string(proxy))
|
||||
except Exception as e:
|
||||
print(f"Error loading proxies from environment: {e}")
|
||||
return proxies
|
||||
|
||||
def to_dict(self) -> Dict:
|
||||
"""Convert to dictionary representation."""
|
||||
return {
|
||||
"server": self.server,
|
||||
"username": self.username,
|
||||
"password": self.password,
|
||||
"ip": self.ip
|
||||
}
|
||||
|
||||
def clone(self, **kwargs) -> "ProxyConfig":
|
||||
"""Create a copy of this configuration with updated values.
|
||||
|
||||
Args:
|
||||
**kwargs: Key-value pairs of configuration options to update
|
||||
|
||||
Returns:
|
||||
ProxyConfig: A new instance with the specified updates
|
||||
"""
|
||||
config_dict = self.to_dict()
|
||||
config_dict.update(kwargs)
|
||||
return ProxyConfig.from_dict(config_dict)
|
||||
|
||||
|
||||
class ProxyRotationStrategy(ABC):
|
||||
"""Base abstract class for proxy rotation strategies"""
|
||||
|
||||
@abstractmethod
|
||||
async def get_next_proxy(self) -> Optional[ProxyConfig]:
|
||||
"""Get next proxy configuration from the strategy"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def add_proxies(self, proxies: List[ProxyConfig]):
|
||||
"""Add proxy configurations to the strategy"""
|
||||
pass
|
||||
|
||||
class RoundRobinProxyStrategy:
|
||||
"""Simple round-robin proxy rotation strategy using ProxyConfig objects"""
|
||||
|
||||
def __init__(self, proxies: List[ProxyConfig] = None):
|
||||
"""
|
||||
Initialize with optional list of proxy configurations
|
||||
|
||||
Args:
|
||||
proxies: List of ProxyConfig objects
|
||||
"""
|
||||
self._proxies = []
|
||||
self._proxy_cycle = None
|
||||
if proxies:
|
||||
self.add_proxies(proxies)
|
||||
|
||||
def add_proxies(self, proxies: List[ProxyConfig]):
|
||||
"""Add new proxies to the rotation pool"""
|
||||
self._proxies.extend(proxies)
|
||||
self._proxy_cycle = cycle(self._proxies)
|
||||
|
||||
async def get_next_proxy(self) -> Optional[ProxyConfig]:
|
||||
"""Get next proxy in round-robin fashion"""
|
||||
if not self._proxy_cycle:
|
||||
return None
|
||||
return next(self._proxy_cycle)
|
||||
204
crawl4ai/ssl_certificate.py
Normal file
204
crawl4ai/ssl_certificate.py
Normal file
@@ -0,0 +1,204 @@
|
||||
"""SSL Certificate class for handling certificate operations."""
|
||||
|
||||
import ssl
|
||||
import socket
|
||||
import base64
|
||||
import json
|
||||
from typing import Dict, Any, Optional
|
||||
from urllib.parse import urlparse
|
||||
import OpenSSL.crypto
|
||||
from pathlib import Path
|
||||
|
||||
# === Inherit from dict ===
|
||||
class SSLCertificate(dict):
|
||||
"""
|
||||
A class representing an SSL certificate, behaving like a dictionary
|
||||
for direct JSON serialization. It stores the certificate information internally
|
||||
and provides methods for export and property access.
|
||||
|
||||
Inherits from dict, so instances are directly JSON serializable.
|
||||
"""
|
||||
|
||||
# Use __slots__ for potential memory optimization if desired, though less common when inheriting dict
|
||||
# __slots__ = ("_cert_info",) # If using slots, be careful with dict inheritance interaction
|
||||
|
||||
def __init__(self, cert_info: Dict[str, Any]):
|
||||
"""
|
||||
Initializes the SSLCertificate object.
|
||||
|
||||
Args:
|
||||
cert_info (Dict[str, Any]): The raw certificate dictionary.
|
||||
"""
|
||||
# 1. Decode the data (handle bytes -> str)
|
||||
decoded_info = self._decode_cert_data(cert_info)
|
||||
|
||||
# 2. Store the decoded info internally (optional but good practice)
|
||||
# self._cert_info = decoded_info # You can keep this if methods rely on it
|
||||
|
||||
# 3. Initialize the dictionary part of the object with the decoded data
|
||||
super().__init__(decoded_info)
|
||||
|
||||
@staticmethod
|
||||
def _decode_cert_data(data: Any) -> Any:
|
||||
"""Helper method to decode bytes in certificate data."""
|
||||
if isinstance(data, bytes):
|
||||
try:
|
||||
# Try UTF-8 first, fallback to latin-1 for arbitrary bytes
|
||||
return data.decode("utf-8")
|
||||
except UnicodeDecodeError:
|
||||
return data.decode("latin-1") # Or handle as needed, maybe hex representation
|
||||
elif isinstance(data, dict):
|
||||
return {
|
||||
(
|
||||
k.decode("utf-8") if isinstance(k, bytes) else k
|
||||
): SSLCertificate._decode_cert_data(v)
|
||||
for k, v in data.items()
|
||||
}
|
||||
elif isinstance(data, list):
|
||||
return [SSLCertificate._decode_cert_data(item) for item in data]
|
||||
return data
|
||||
|
||||
@staticmethod
|
||||
def from_url(url: str, timeout: int = 10) -> Optional["SSLCertificate"]:
|
||||
"""
|
||||
Create SSLCertificate instance from a URL. Fetches cert info and initializes.
|
||||
(Fetching logic remains the same)
|
||||
"""
|
||||
cert_info_raw = None # Variable to hold the fetched dict
|
||||
try:
|
||||
hostname = urlparse(url).netloc
|
||||
if ":" in hostname:
|
||||
hostname = hostname.split(":")[0]
|
||||
|
||||
context = ssl.create_default_context()
|
||||
# Set check_hostname to False and verify_mode to CERT_NONE temporarily
|
||||
# for potentially problematic certificates during fetch, but parse the result regardless.
|
||||
# context.check_hostname = False
|
||||
# context.verify_mode = ssl.CERT_NONE
|
||||
|
||||
with socket.create_connection((hostname, 443), timeout=timeout) as sock:
|
||||
with context.wrap_socket(sock, server_hostname=hostname) as ssock:
|
||||
cert_binary = ssock.getpeercert(binary_form=True)
|
||||
if not cert_binary:
|
||||
print(f"Warning: No certificate returned for {hostname}")
|
||||
return None
|
||||
|
||||
x509 = OpenSSL.crypto.load_certificate(
|
||||
OpenSSL.crypto.FILETYPE_ASN1, cert_binary
|
||||
)
|
||||
|
||||
# Create the dictionary directly
|
||||
cert_info_raw = {
|
||||
"subject": dict(x509.get_subject().get_components()),
|
||||
"issuer": dict(x509.get_issuer().get_components()),
|
||||
"version": x509.get_version(),
|
||||
"serial_number": hex(x509.get_serial_number()),
|
||||
"not_before": x509.get_notBefore(), # Keep as bytes initially, _decode handles it
|
||||
"not_after": x509.get_notAfter(), # Keep as bytes initially
|
||||
"fingerprint": x509.digest("sha256").hex(), # hex() is already string
|
||||
"signature_algorithm": x509.get_signature_algorithm(), # Keep as bytes
|
||||
"raw_cert": base64.b64encode(cert_binary), # Base64 is bytes, _decode handles it
|
||||
}
|
||||
|
||||
# Add extensions
|
||||
extensions = []
|
||||
for i in range(x509.get_extension_count()):
|
||||
ext = x509.get_extension(i)
|
||||
# get_short_name() returns bytes, str(ext) handles value conversion
|
||||
extensions.append(
|
||||
{"name": ext.get_short_name(), "value": str(ext)}
|
||||
)
|
||||
cert_info_raw["extensions"] = extensions
|
||||
|
||||
except ssl.SSLCertVerificationError as e:
|
||||
print(f"SSL Verification Error for {url}: {e}")
|
||||
# Decide if you want to proceed or return None based on your needs
|
||||
# You might try fetching without verification here if needed, but be cautious.
|
||||
return None
|
||||
except socket.gaierror:
|
||||
print(f"Could not resolve hostname: {hostname}")
|
||||
return None
|
||||
except socket.timeout:
|
||||
print(f"Connection timed out for {url}")
|
||||
return None
|
||||
except Exception as e:
|
||||
print(f"Error fetching/processing certificate for {url}: {e}")
|
||||
# Log the full error details if needed: logging.exception("Cert fetch error")
|
||||
return None
|
||||
|
||||
# If successful, create the SSLCertificate instance from the dictionary
|
||||
if cert_info_raw:
|
||||
return SSLCertificate(cert_info_raw)
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
# --- Properties now access the dictionary items directly via self[] ---
|
||||
@property
|
||||
def issuer(self) -> Dict[str, str]:
|
||||
return self.get("issuer", {}) # Use self.get for safety
|
||||
|
||||
@property
|
||||
def subject(self) -> Dict[str, str]:
|
||||
return self.get("subject", {})
|
||||
|
||||
@property
|
||||
def valid_from(self) -> str:
|
||||
return self.get("not_before", "")
|
||||
|
||||
@property
|
||||
def valid_until(self) -> str:
|
||||
return self.get("not_after", "")
|
||||
|
||||
@property
|
||||
def fingerprint(self) -> str:
|
||||
return self.get("fingerprint", "")
|
||||
|
||||
# --- Export methods can use `self` directly as it is the dict ---
|
||||
def to_json(self, filepath: Optional[str] = None) -> Optional[str]:
|
||||
"""Export certificate as JSON."""
|
||||
# `self` is already the dictionary we want to serialize
|
||||
json_str = json.dumps(self, indent=2, ensure_ascii=False)
|
||||
if filepath:
|
||||
Path(filepath).write_text(json_str, encoding="utf-8")
|
||||
return None
|
||||
return json_str
|
||||
|
||||
def to_pem(self, filepath: Optional[str] = None) -> Optional[str]:
|
||||
"""Export certificate as PEM."""
|
||||
try:
|
||||
# Decode the raw_cert (which should be string due to _decode)
|
||||
raw_cert_bytes = base64.b64decode(self.get("raw_cert", ""))
|
||||
x509 = OpenSSL.crypto.load_certificate(
|
||||
OpenSSL.crypto.FILETYPE_ASN1, raw_cert_bytes
|
||||
)
|
||||
pem_data = OpenSSL.crypto.dump_certificate(
|
||||
OpenSSL.crypto.FILETYPE_PEM, x509
|
||||
).decode("utf-8")
|
||||
|
||||
if filepath:
|
||||
Path(filepath).write_text(pem_data, encoding="utf-8")
|
||||
return None
|
||||
return pem_data
|
||||
except Exception as e:
|
||||
print(f"Error converting to PEM: {e}")
|
||||
return None
|
||||
|
||||
def to_der(self, filepath: Optional[str] = None) -> Optional[bytes]:
|
||||
"""Export certificate as DER."""
|
||||
try:
|
||||
# Decode the raw_cert (which should be string due to _decode)
|
||||
der_data = base64.b64decode(self.get("raw_cert", ""))
|
||||
if filepath:
|
||||
Path(filepath).write_bytes(der_data)
|
||||
return None
|
||||
return der_data
|
||||
except Exception as e:
|
||||
print(f"Error converting to DER: {e}")
|
||||
return None
|
||||
|
||||
# Optional: Add __repr__ for better debugging
|
||||
def __repr__(self) -> str:
|
||||
subject_cn = self.subject.get('CN', 'N/A')
|
||||
issuer_cn = self.issuer.get('CN', 'N/A')
|
||||
return f"<SSLCertificate Subject='{subject_cn}' Issuer='{issuer_cn}'>"
|
||||
@@ -1,146 +0,0 @@
|
||||
import spacy
|
||||
from spacy.training import Example
|
||||
import random
|
||||
import nltk
|
||||
from nltk.corpus import reuters
|
||||
import torch
|
||||
|
||||
def save_spacy_model_as_torch(nlp, model_dir="models/reuters"):
|
||||
# Extract the TextCategorizer component
|
||||
textcat = nlp.get_pipe("textcat_multilabel")
|
||||
|
||||
# Convert the weights to a PyTorch state dictionary
|
||||
state_dict = {name: torch.tensor(param.data) for name, param in textcat.model.named_parameters()}
|
||||
|
||||
# Save the state dictionary
|
||||
torch.save(state_dict, f"{model_dir}/model_weights.pth")
|
||||
|
||||
# Extract and save the vocabulary
|
||||
vocab = extract_vocab(nlp)
|
||||
with open(f"{model_dir}/vocab.txt", "w") as vocab_file:
|
||||
for word, idx in vocab.items():
|
||||
vocab_file.write(f"{word}\t{idx}\n")
|
||||
|
||||
print(f"Model weights and vocabulary saved to: {model_dir}")
|
||||
|
||||
def extract_vocab(nlp):
|
||||
# Extract vocabulary from the SpaCy model
|
||||
vocab = {word: i for i, word in enumerate(nlp.vocab.strings)}
|
||||
return vocab
|
||||
|
||||
nlp = spacy.load("models/reuters")
|
||||
save_spacy_model_as_torch(nlp, model_dir="models")
|
||||
|
||||
def train_and_save_reuters_model(model_dir="models/reuters"):
|
||||
# Ensure the Reuters corpus is downloaded
|
||||
nltk.download('reuters')
|
||||
nltk.download('punkt')
|
||||
if not reuters.fileids():
|
||||
print("Reuters corpus not found.")
|
||||
return
|
||||
|
||||
# Load a blank English spaCy model
|
||||
nlp = spacy.blank("en")
|
||||
|
||||
# Create a TextCategorizer with the ensemble model for multi-label classification
|
||||
textcat = nlp.add_pipe("textcat_multilabel")
|
||||
|
||||
# Add labels to text classifier
|
||||
for label in reuters.categories():
|
||||
textcat.add_label(label)
|
||||
|
||||
# Prepare training data
|
||||
train_examples = []
|
||||
for fileid in reuters.fileids():
|
||||
categories = reuters.categories(fileid)
|
||||
text = reuters.raw(fileid)
|
||||
cats = {label: label in categories for label in reuters.categories()}
|
||||
# Prepare spacy Example objects
|
||||
doc = nlp.make_doc(text)
|
||||
example = Example.from_dict(doc, {'cats': cats})
|
||||
train_examples.append(example)
|
||||
|
||||
# Initialize the text categorizer with the example objects
|
||||
nlp.initialize(lambda: train_examples)
|
||||
|
||||
# Train the model
|
||||
random.seed(1)
|
||||
spacy.util.fix_random_seed(1)
|
||||
for i in range(5): # Adjust iterations for better accuracy
|
||||
random.shuffle(train_examples)
|
||||
losses = {}
|
||||
# Create batches of data
|
||||
batches = spacy.util.minibatch(train_examples, size=8)
|
||||
for batch in batches:
|
||||
nlp.update(batch, drop=0.2, losses=losses)
|
||||
print(f"Losses at iteration {i}: {losses}")
|
||||
|
||||
# Save the trained model
|
||||
nlp.to_disk(model_dir)
|
||||
print(f"Model saved to: {model_dir}")
|
||||
|
||||
def train_model(model_dir, additional_epochs=0):
|
||||
# Load the model if it exists, otherwise start with a blank model
|
||||
try:
|
||||
nlp = spacy.load(model_dir)
|
||||
print("Model loaded from disk.")
|
||||
except IOError:
|
||||
print("No existing model found. Starting with a new model.")
|
||||
nlp = spacy.blank("en")
|
||||
textcat = nlp.add_pipe("textcat_multilabel")
|
||||
for label in reuters.categories():
|
||||
textcat.add_label(label)
|
||||
|
||||
# Prepare training data
|
||||
train_examples = []
|
||||
for fileid in reuters.fileids():
|
||||
categories = reuters.categories(fileid)
|
||||
text = reuters.raw(fileid)
|
||||
cats = {label: label in categories for label in reuters.categories()}
|
||||
doc = nlp.make_doc(text)
|
||||
example = Example.from_dict(doc, {'cats': cats})
|
||||
train_examples.append(example)
|
||||
|
||||
# Initialize the model if it was newly created
|
||||
if 'textcat_multilabel' not in nlp.pipe_names:
|
||||
nlp.initialize(lambda: train_examples)
|
||||
else:
|
||||
print("Continuing training with existing model.")
|
||||
|
||||
# Train the model
|
||||
random.seed(1)
|
||||
spacy.util.fix_random_seed(1)
|
||||
num_epochs = 5 + additional_epochs
|
||||
for i in range(num_epochs):
|
||||
random.shuffle(train_examples)
|
||||
losses = {}
|
||||
batches = spacy.util.minibatch(train_examples, size=8)
|
||||
for batch in batches:
|
||||
nlp.update(batch, drop=0.2, losses=losses)
|
||||
print(f"Losses at iteration {i}: {losses}")
|
||||
|
||||
# Save the trained model
|
||||
nlp.to_disk(model_dir)
|
||||
print(f"Model saved to: {model_dir}")
|
||||
|
||||
def load_model_and_predict(model_dir, text, tok_k = 3):
|
||||
# Load the trained model from the specified directory
|
||||
nlp = spacy.load(model_dir)
|
||||
|
||||
# Process the text with the loaded model
|
||||
doc = nlp(text)
|
||||
|
||||
# gee top 3 categories
|
||||
top_categories = sorted(doc.cats.items(), key=lambda x: x[1], reverse=True)[:tok_k]
|
||||
print(f"Top {tok_k} categories:")
|
||||
|
||||
return top_categories
|
||||
|
||||
if __name__ == "__main__":
|
||||
train_and_save_reuters_model()
|
||||
train_model("models/reuters", additional_epochs=5)
|
||||
model_directory = "reuters_model_10"
|
||||
print(reuters.categories())
|
||||
example_text = "Apple Inc. is reportedly buying a startup for $1 billion"
|
||||
r =load_model_and_predict(model_directory, example_text)
|
||||
print(r)
|
||||
195
crawl4ai/types.py
Normal file
195
crawl4ai/types.py
Normal file
@@ -0,0 +1,195 @@
|
||||
from typing import TYPE_CHECKING, Union
|
||||
|
||||
# Logger types
|
||||
AsyncLoggerBase = Union['AsyncLoggerBaseType']
|
||||
AsyncLogger = Union['AsyncLoggerType']
|
||||
|
||||
# Crawler core types
|
||||
AsyncWebCrawler = Union['AsyncWebCrawlerType']
|
||||
CacheMode = Union['CacheModeType']
|
||||
CrawlResult = Union['CrawlResultType']
|
||||
CrawlerHub = Union['CrawlerHubType']
|
||||
BrowserProfiler = Union['BrowserProfilerType']
|
||||
# NEW: Add AsyncUrlSeederType
|
||||
AsyncUrlSeeder = Union['AsyncUrlSeederType']
|
||||
|
||||
# Configuration types
|
||||
BrowserConfig = Union['BrowserConfigType']
|
||||
CrawlerRunConfig = Union['CrawlerRunConfigType']
|
||||
HTTPCrawlerConfig = Union['HTTPCrawlerConfigType']
|
||||
LLMConfig = Union['LLMConfigType']
|
||||
# NEW: Add SeedingConfigType
|
||||
SeedingConfig = Union['SeedingConfigType']
|
||||
|
||||
# Content scraping types
|
||||
ContentScrapingStrategy = Union['ContentScrapingStrategyType']
|
||||
WebScrapingStrategy = Union['WebScrapingStrategyType']
|
||||
LXMLWebScrapingStrategy = Union['LXMLWebScrapingStrategyType']
|
||||
|
||||
# Proxy types
|
||||
ProxyRotationStrategy = Union['ProxyRotationStrategyType']
|
||||
RoundRobinProxyStrategy = Union['RoundRobinProxyStrategyType']
|
||||
|
||||
# Extraction types
|
||||
ExtractionStrategy = Union['ExtractionStrategyType']
|
||||
LLMExtractionStrategy = Union['LLMExtractionStrategyType']
|
||||
CosineStrategy = Union['CosineStrategyType']
|
||||
JsonCssExtractionStrategy = Union['JsonCssExtractionStrategyType']
|
||||
JsonXPathExtractionStrategy = Union['JsonXPathExtractionStrategyType']
|
||||
|
||||
# Chunking types
|
||||
ChunkingStrategy = Union['ChunkingStrategyType']
|
||||
RegexChunking = Union['RegexChunkingType']
|
||||
|
||||
# Markdown generation types
|
||||
DefaultMarkdownGenerator = Union['DefaultMarkdownGeneratorType']
|
||||
MarkdownGenerationResult = Union['MarkdownGenerationResultType']
|
||||
|
||||
# Content filter types
|
||||
RelevantContentFilter = Union['RelevantContentFilterType']
|
||||
PruningContentFilter = Union['PruningContentFilterType']
|
||||
BM25ContentFilter = Union['BM25ContentFilterType']
|
||||
LLMContentFilter = Union['LLMContentFilterType']
|
||||
|
||||
# Dispatcher types
|
||||
BaseDispatcher = Union['BaseDispatcherType']
|
||||
MemoryAdaptiveDispatcher = Union['MemoryAdaptiveDispatcherType']
|
||||
SemaphoreDispatcher = Union['SemaphoreDispatcherType']
|
||||
RateLimiter = Union['RateLimiterType']
|
||||
CrawlerMonitor = Union['CrawlerMonitorType']
|
||||
DisplayMode = Union['DisplayModeType']
|
||||
RunManyReturn = Union['RunManyReturnType']
|
||||
|
||||
# Docker client
|
||||
Crawl4aiDockerClient = Union['Crawl4aiDockerClientType']
|
||||
|
||||
# Deep crawling types
|
||||
DeepCrawlStrategy = Union['DeepCrawlStrategyType']
|
||||
BFSDeepCrawlStrategy = Union['BFSDeepCrawlStrategyType']
|
||||
FilterChain = Union['FilterChainType']
|
||||
ContentTypeFilter = Union['ContentTypeFilterType']
|
||||
DomainFilter = Union['DomainFilterType']
|
||||
URLFilter = Union['URLFilterType']
|
||||
FilterStats = Union['FilterStatsType']
|
||||
SEOFilter = Union['SEOFilterType']
|
||||
KeywordRelevanceScorer = Union['KeywordRelevanceScorerType']
|
||||
URLScorer = Union['URLScorerType']
|
||||
CompositeScorer = Union['CompositeScorerType']
|
||||
DomainAuthorityScorer = Union['DomainAuthorityScorerType']
|
||||
FreshnessScorer = Union['FreshnessScorerType']
|
||||
PathDepthScorer = Union['PathDepthScorerType']
|
||||
BestFirstCrawlingStrategy = Union['BestFirstCrawlingStrategyType']
|
||||
DFSDeepCrawlStrategy = Union['DFSDeepCrawlStrategyType']
|
||||
DeepCrawlDecorator = Union['DeepCrawlDecoratorType']
|
||||
|
||||
# Only import types during type checking to avoid circular imports
|
||||
if TYPE_CHECKING:
|
||||
# Logger imports
|
||||
from .async_logger import (
|
||||
AsyncLoggerBase as AsyncLoggerBaseType,
|
||||
AsyncLogger as AsyncLoggerType,
|
||||
)
|
||||
|
||||
# Crawler core imports
|
||||
from .async_webcrawler import (
|
||||
AsyncWebCrawler as AsyncWebCrawlerType,
|
||||
CacheMode as CacheModeType,
|
||||
)
|
||||
from .models import CrawlResult as CrawlResultType
|
||||
from .hub import CrawlerHub as CrawlerHubType
|
||||
from .browser_profiler import BrowserProfiler as BrowserProfilerType
|
||||
# NEW: Import AsyncUrlSeeder for type checking
|
||||
from .async_url_seeder import AsyncUrlSeeder as AsyncUrlSeederType
|
||||
|
||||
# Configuration imports
|
||||
from .async_configs import (
|
||||
BrowserConfig as BrowserConfigType,
|
||||
CrawlerRunConfig as CrawlerRunConfigType,
|
||||
HTTPCrawlerConfig as HTTPCrawlerConfigType,
|
||||
LLMConfig as LLMConfigType,
|
||||
# NEW: Import SeedingConfig for type checking
|
||||
SeedingConfig as SeedingConfigType,
|
||||
)
|
||||
|
||||
# Content scraping imports
|
||||
from .content_scraping_strategy import (
|
||||
ContentScrapingStrategy as ContentScrapingStrategyType,
|
||||
WebScrapingStrategy as WebScrapingStrategyType,
|
||||
LXMLWebScrapingStrategy as LXMLWebScrapingStrategyType,
|
||||
)
|
||||
|
||||
# Proxy imports
|
||||
from .proxy_strategy import (
|
||||
ProxyRotationStrategy as ProxyRotationStrategyType,
|
||||
RoundRobinProxyStrategy as RoundRobinProxyStrategyType,
|
||||
)
|
||||
|
||||
# Extraction imports
|
||||
from .extraction_strategy import (
|
||||
ExtractionStrategy as ExtractionStrategyType,
|
||||
LLMExtractionStrategy as LLMExtractionStrategyType,
|
||||
CosineStrategy as CosineStrategyType,
|
||||
JsonCssExtractionStrategy as JsonCssExtractionStrategyType,
|
||||
JsonXPathExtractionStrategy as JsonXPathExtractionStrategyType,
|
||||
)
|
||||
|
||||
# Chunking imports
|
||||
from .chunking_strategy import (
|
||||
ChunkingStrategy as ChunkingStrategyType,
|
||||
RegexChunking as RegexChunkingType,
|
||||
)
|
||||
|
||||
# Markdown generation imports
|
||||
from .markdown_generation_strategy import (
|
||||
DefaultMarkdownGenerator as DefaultMarkdownGeneratorType,
|
||||
)
|
||||
from .models import MarkdownGenerationResult as MarkdownGenerationResultType
|
||||
|
||||
# Content filter imports
|
||||
from .content_filter_strategy import (
|
||||
RelevantContentFilter as RelevantContentFilterType,
|
||||
PruningContentFilter as PruningContentFilterType,
|
||||
BM25ContentFilter as BM25ContentFilterType,
|
||||
LLMContentFilter as LLMContentFilterType,
|
||||
)
|
||||
|
||||
# Dispatcher imports
|
||||
from .async_dispatcher import (
|
||||
BaseDispatcher as BaseDispatcherType,
|
||||
MemoryAdaptiveDispatcher as MemoryAdaptiveDispatcherType,
|
||||
SemaphoreDispatcher as SemaphoreDispatcherType,
|
||||
RateLimiter as RateLimiterType,
|
||||
CrawlerMonitor as CrawlerMonitorType,
|
||||
DisplayMode as DisplayModeType,
|
||||
RunManyReturn as RunManyReturnType,
|
||||
)
|
||||
|
||||
# Docker client
|
||||
from .docker_client import Crawl4aiDockerClient as Crawl4aiDockerClientType
|
||||
|
||||
# Deep crawling imports
|
||||
from .deep_crawling import (
|
||||
DeepCrawlStrategy as DeepCrawlStrategyType,
|
||||
BFSDeepCrawlStrategy as BFSDeepCrawlStrategyType,
|
||||
FilterChain as FilterChainType,
|
||||
ContentTypeFilter as ContentTypeFilterType,
|
||||
DomainFilter as DomainFilterType,
|
||||
URLFilter as URLFilterType,
|
||||
FilterStats as FilterStatsType,
|
||||
SEOFilter as SEOFilterType,
|
||||
KeywordRelevanceScorer as KeywordRelevanceScorerType,
|
||||
URLScorer as URLScorerType,
|
||||
CompositeScorer as CompositeScorerType,
|
||||
DomainAuthorityScorer as DomainAuthorityScorerType,
|
||||
FreshnessScorer as FreshnessScorerType,
|
||||
PathDepthScorer as PathDepthScorerType,
|
||||
BestFirstCrawlingStrategy as BestFirstCrawlingStrategyType,
|
||||
DFSDeepCrawlStrategy as DFSDeepCrawlStrategyType,
|
||||
DeepCrawlDecorator as DeepCrawlDecoratorType,
|
||||
)
|
||||
|
||||
|
||||
|
||||
def create_llm_config(*args, **kwargs) -> 'LLMConfigType':
|
||||
from .async_configs import LLMConfig
|
||||
return LLMConfig(*args, **kwargs)
|
||||
428
crawl4ai/user_agent_generator.py
Normal file
428
crawl4ai/user_agent_generator.py
Normal file
@@ -0,0 +1,428 @@
|
||||
import random
|
||||
from typing import Optional, Literal, List, Dict, Tuple
|
||||
import re
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from fake_useragent import UserAgent
|
||||
import requests
|
||||
from lxml import html
|
||||
import json
|
||||
from typing import Union
|
||||
|
||||
class UAGen(ABC):
|
||||
@abstractmethod
|
||||
def generate(self,
|
||||
browsers: Optional[List[str]] = None,
|
||||
os: Optional[Union[str, List[str]]] = None,
|
||||
min_version: float = 0.0,
|
||||
platforms: Optional[Union[str, List[str]]] = None,
|
||||
pct_threshold: Optional[float] = None,
|
||||
fallback: str = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 Chrome/116.0.0.0 Safari/537.36") -> Union[str, Dict]:
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def generate_client_hints( user_agent: str) -> str:
|
||||
"""Generate Sec-CH-UA header value based on user agent string"""
|
||||
def _parse_user_agent(user_agent: str) -> Dict[str, str]:
|
||||
"""Parse a user agent string to extract browser and version information"""
|
||||
browsers = {
|
||||
"chrome": r"Chrome/(\d+)",
|
||||
"edge": r"Edg/(\d+)",
|
||||
"safari": r"Version/(\d+)",
|
||||
"firefox": r"Firefox/(\d+)",
|
||||
}
|
||||
|
||||
result = {}
|
||||
for browser, pattern in browsers.items():
|
||||
match = re.search(pattern, user_agent)
|
||||
if match:
|
||||
result[browser] = match.group(1)
|
||||
|
||||
return result
|
||||
browsers = _parse_user_agent(user_agent)
|
||||
|
||||
# Client hints components
|
||||
hints = []
|
||||
|
||||
# Handle different browser combinations
|
||||
if "chrome" in browsers:
|
||||
hints.append(f'"Chromium";v="{browsers["chrome"]}"')
|
||||
hints.append('"Not_A Brand";v="8"')
|
||||
|
||||
if "edge" in browsers:
|
||||
hints.append(f'"Microsoft Edge";v="{browsers["edge"]}"')
|
||||
else:
|
||||
hints.append(f'"Google Chrome";v="{browsers["chrome"]}"')
|
||||
|
||||
elif "firefox" in browsers:
|
||||
# Firefox doesn't typically send Sec-CH-UA
|
||||
return '""'
|
||||
|
||||
elif "safari" in browsers:
|
||||
# Safari's format for client hints
|
||||
hints.append(f'"Safari";v="{browsers["safari"]}"')
|
||||
hints.append('"Not_A Brand";v="8"')
|
||||
|
||||
return ", ".join(hints)
|
||||
|
||||
class ValidUAGenerator(UAGen):
|
||||
def __init__(self):
|
||||
self.ua = UserAgent()
|
||||
|
||||
def generate(self,
|
||||
browsers: Optional[List[str]] = None,
|
||||
os: Optional[Union[str, List[str]]] = None,
|
||||
min_version: float = 0.0,
|
||||
platforms: Optional[Union[str, List[str]]] = None,
|
||||
pct_threshold: Optional[float] = None,
|
||||
fallback: str = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 Chrome/116.0.0.0 Safari/537.36") -> str:
|
||||
|
||||
self.ua = UserAgent(
|
||||
browsers=browsers or ['Chrome', 'Firefox', 'Edge'],
|
||||
os=os or ['Windows', 'Mac OS X'],
|
||||
min_version=min_version,
|
||||
platforms=platforms or ['desktop'],
|
||||
fallback=fallback
|
||||
)
|
||||
return self.ua.random
|
||||
|
||||
class OnlineUAGenerator(UAGen):
|
||||
def __init__(self):
|
||||
self.agents = []
|
||||
self._fetch_agents()
|
||||
|
||||
def _fetch_agents(self):
|
||||
try:
|
||||
response = requests.get(
|
||||
'https://www.useragents.me/',
|
||||
timeout=5,
|
||||
headers={'Accept': 'text/html,application/xhtml+xml'}
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
tree = html.fromstring(response.content)
|
||||
json_text = tree.cssselect('#most-common-desktop-useragents-json-csv > div:nth-child(1) > textarea')[0].text
|
||||
self.agents = json.loads(json_text)
|
||||
except Exception as e:
|
||||
print(f"Error fetching agents: {e}")
|
||||
|
||||
def generate(self,
|
||||
browsers: Optional[List[str]] = None,
|
||||
os: Optional[Union[str, List[str]]] = None,
|
||||
min_version: float = 0.0,
|
||||
platforms: Optional[Union[str, List[str]]] = None,
|
||||
pct_threshold: Optional[float] = None,
|
||||
fallback: str = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 Chrome/116.0.0.0 Safari/537.36") -> Dict:
|
||||
|
||||
if not self.agents:
|
||||
self._fetch_agents()
|
||||
|
||||
filtered_agents = self.agents
|
||||
|
||||
if pct_threshold:
|
||||
filtered_agents = [a for a in filtered_agents if a['pct'] >= pct_threshold]
|
||||
|
||||
if browsers:
|
||||
filtered_agents = [a for a in filtered_agents
|
||||
if any(b.lower() in a['ua'].lower() for b in browsers)]
|
||||
|
||||
if os:
|
||||
os_list = [os] if isinstance(os, str) else os
|
||||
filtered_agents = [a for a in filtered_agents
|
||||
if any(o.lower() in a['ua'].lower() for o in os_list)]
|
||||
|
||||
if platforms:
|
||||
platform_list = [platforms] if isinstance(platforms, str) else platforms
|
||||
filtered_agents = [a for a in filtered_agents
|
||||
if any(p.lower() in a['ua'].lower() for p in platform_list)]
|
||||
|
||||
return filtered_agents[0] if filtered_agents else {'ua': fallback, 'pct': 0}
|
||||
|
||||
|
||||
|
||||
class UserAgentGenerator():
|
||||
"""
|
||||
Generate random user agents with specified constraints.
|
||||
|
||||
Attributes:
|
||||
desktop_platforms (dict): A dictionary of possible desktop platforms and their corresponding user agent strings.
|
||||
mobile_platforms (dict): A dictionary of possible mobile platforms and their corresponding user agent strings.
|
||||
browser_combinations (dict): A dictionary of possible browser combinations and their corresponding user agent strings.
|
||||
rendering_engines (dict): A dictionary of possible rendering engines and their corresponding user agent strings.
|
||||
chrome_versions (list): A list of possible Chrome browser versions.
|
||||
firefox_versions (list): A list of possible Firefox browser versions.
|
||||
edge_versions (list): A list of possible Edge browser versions.
|
||||
safari_versions (list): A list of possible Safari browser versions.
|
||||
ios_versions (list): A list of possible iOS browser versions.
|
||||
android_versions (list): A list of possible Android browser versions.
|
||||
|
||||
Methods:
|
||||
generate_user_agent(
|
||||
platform: Literal["desktop", "mobile"] = "desktop",
|
||||
browser: str = "chrome",
|
||||
rendering_engine: str = "chrome_webkit",
|
||||
chrome_version: Optional[str] = None,
|
||||
firefox_version: Optional[str] = None,
|
||||
edge_version: Optional[str] = None,
|
||||
safari_version: Optional[str] = None,
|
||||
ios_version: Optional[str] = None,
|
||||
android_version: Optional[str] = None
|
||||
): Generates a random user agent string based on the specified parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
# Previous platform definitions remain the same...
|
||||
self.desktop_platforms = {
|
||||
"windows": {
|
||||
"10_64": "(Windows NT 10.0; Win64; x64)",
|
||||
"10_32": "(Windows NT 10.0; WOW64)",
|
||||
},
|
||||
"macos": {
|
||||
"intel": "(Macintosh; Intel Mac OS X 10_15_7)",
|
||||
"newer": "(Macintosh; Intel Mac OS X 10.15; rv:109.0)",
|
||||
},
|
||||
"linux": {
|
||||
"generic": "(X11; Linux x86_64)",
|
||||
"ubuntu": "(X11; Ubuntu; Linux x86_64)",
|
||||
"chrome_os": "(X11; CrOS x86_64 14541.0.0)",
|
||||
},
|
||||
}
|
||||
|
||||
self.mobile_platforms = {
|
||||
"android": {
|
||||
"samsung": "(Linux; Android 13; SM-S901B)",
|
||||
"pixel": "(Linux; Android 12; Pixel 6)",
|
||||
"oneplus": "(Linux; Android 13; OnePlus 9 Pro)",
|
||||
"xiaomi": "(Linux; Android 12; M2102J20SG)",
|
||||
},
|
||||
"ios": {
|
||||
"iphone": "(iPhone; CPU iPhone OS 16_5 like Mac OS X)",
|
||||
"ipad": "(iPad; CPU OS 16_5 like Mac OS X)",
|
||||
},
|
||||
}
|
||||
|
||||
# Browser Combinations
|
||||
self.browser_combinations = {
|
||||
1: [["chrome"], ["firefox"], ["safari"], ["edge"]],
|
||||
2: [["gecko", "firefox"], ["chrome", "safari"], ["webkit", "safari"]],
|
||||
3: [["chrome", "safari", "edge"], ["webkit", "chrome", "safari"]],
|
||||
}
|
||||
|
||||
# Rendering Engines with versions
|
||||
self.rendering_engines = {
|
||||
"chrome_webkit": "AppleWebKit/537.36",
|
||||
"safari_webkit": "AppleWebKit/605.1.15",
|
||||
"gecko": [ # Added Gecko versions
|
||||
"Gecko/20100101",
|
||||
"Gecko/20100101", # Firefox usually uses this constant version
|
||||
"Gecko/2010010",
|
||||
],
|
||||
}
|
||||
|
||||
# Browser Versions
|
||||
self.chrome_versions = [
|
||||
"Chrome/119.0.6045.199",
|
||||
"Chrome/118.0.5993.117",
|
||||
"Chrome/117.0.5938.149",
|
||||
"Chrome/116.0.5845.187",
|
||||
"Chrome/115.0.5790.171",
|
||||
]
|
||||
|
||||
self.edge_versions = [
|
||||
"Edg/119.0.2151.97",
|
||||
"Edg/118.0.2088.76",
|
||||
"Edg/117.0.2045.47",
|
||||
"Edg/116.0.1938.81",
|
||||
"Edg/115.0.1901.203",
|
||||
]
|
||||
|
||||
self.safari_versions = [
|
||||
"Safari/537.36", # For Chrome-based
|
||||
"Safari/605.1.15",
|
||||
"Safari/604.1",
|
||||
"Safari/602.1",
|
||||
"Safari/601.5.17",
|
||||
]
|
||||
|
||||
# Added Firefox versions
|
||||
self.firefox_versions = [
|
||||
"Firefox/119.0",
|
||||
"Firefox/118.0.2",
|
||||
"Firefox/117.0.1",
|
||||
"Firefox/116.0",
|
||||
"Firefox/115.0.3",
|
||||
"Firefox/114.0.2",
|
||||
"Firefox/113.0.1",
|
||||
"Firefox/112.0",
|
||||
"Firefox/111.0.1",
|
||||
"Firefox/110.0",
|
||||
]
|
||||
|
||||
def get_browser_stack(self, num_browsers: int = 1) -> List[str]:
|
||||
"""
|
||||
Get a valid combination of browser versions.
|
||||
|
||||
How it works:
|
||||
1. Check if the number of browsers is supported.
|
||||
2. Randomly choose a combination of browsers.
|
||||
3. Iterate through the combination and add browser versions.
|
||||
4. Return the browser stack.
|
||||
|
||||
Args:
|
||||
num_browsers: Number of browser specifications (1-3)
|
||||
|
||||
Returns:
|
||||
List[str]: A list of browser versions.
|
||||
"""
|
||||
if num_browsers not in self.browser_combinations:
|
||||
raise ValueError(f"Unsupported number of browsers: {num_browsers}")
|
||||
|
||||
combination = random.choice(self.browser_combinations[num_browsers])
|
||||
browser_stack = []
|
||||
|
||||
for browser in combination:
|
||||
if browser == "chrome":
|
||||
browser_stack.append(random.choice(self.chrome_versions))
|
||||
elif browser == "firefox":
|
||||
browser_stack.append(random.choice(self.firefox_versions))
|
||||
elif browser == "safari":
|
||||
browser_stack.append(random.choice(self.safari_versions))
|
||||
elif browser == "edge":
|
||||
browser_stack.append(random.choice(self.edge_versions))
|
||||
elif browser == "gecko":
|
||||
browser_stack.append(random.choice(self.rendering_engines["gecko"]))
|
||||
elif browser == "webkit":
|
||||
browser_stack.append(self.rendering_engines["chrome_webkit"])
|
||||
|
||||
return browser_stack
|
||||
|
||||
def generate(
|
||||
self,
|
||||
device_type: Optional[Literal["desktop", "mobile"]] = None,
|
||||
os_type: Optional[str] = None,
|
||||
device_brand: Optional[str] = None,
|
||||
browser_type: Optional[Literal["chrome", "edge", "safari", "firefox"]] = None,
|
||||
num_browsers: int = 3,
|
||||
) -> str:
|
||||
"""
|
||||
Generate a random user agent with specified constraints.
|
||||
|
||||
Args:
|
||||
device_type: 'desktop' or 'mobile'
|
||||
os_type: 'windows', 'macos', 'linux', 'android', 'ios'
|
||||
device_brand: Specific device brand
|
||||
browser_type: 'chrome', 'edge', 'safari', or 'firefox'
|
||||
num_browsers: Number of browser specifications (1-3)
|
||||
"""
|
||||
# Get platform string
|
||||
platform = self.get_random_platform(device_type, os_type, device_brand)
|
||||
|
||||
# Start with Mozilla
|
||||
components = ["Mozilla/5.0", platform]
|
||||
|
||||
# Add browser stack
|
||||
browser_stack = self.get_browser_stack(num_browsers)
|
||||
|
||||
# Add appropriate legacy token based on browser stack
|
||||
if "Firefox" in str(browser_stack) or browser_type == "firefox":
|
||||
components.append(random.choice(self.rendering_engines["gecko"]))
|
||||
elif "Chrome" in str(browser_stack) or "Safari" in str(browser_stack) or browser_type == "chrome":
|
||||
components.append(self.rendering_engines["chrome_webkit"])
|
||||
components.append("(KHTML, like Gecko)")
|
||||
elif "Edge" in str(browser_stack) or browser_type == "edge":
|
||||
components.append(self.rendering_engines["safari_webkit"])
|
||||
components.append("(KHTML, like Gecko)")
|
||||
elif "Safari" in str(browser_stack) or browser_type == "safari":
|
||||
components.append(self.rendering_engines["chrome_webkit"])
|
||||
components.append("(KHTML, like Gecko)")
|
||||
|
||||
# Add browser versions
|
||||
components.extend(browser_stack)
|
||||
|
||||
return " ".join(components)
|
||||
|
||||
def generate_with_client_hints(self, **kwargs) -> Tuple[str, str]:
|
||||
"""Generate both user agent and matching client hints"""
|
||||
user_agent = self.generate(**kwargs)
|
||||
client_hints = self.generate_client_hints(user_agent)
|
||||
return user_agent, client_hints
|
||||
|
||||
def get_random_platform(self, device_type, os_type, device_brand):
|
||||
"""Helper method to get random platform based on constraints"""
|
||||
platforms = (
|
||||
self.desktop_platforms
|
||||
if device_type == "desktop"
|
||||
else self.mobile_platforms
|
||||
if device_type == "mobile"
|
||||
else {**self.desktop_platforms, **self.mobile_platforms}
|
||||
)
|
||||
|
||||
if os_type:
|
||||
for platform_group in [self.desktop_platforms, self.mobile_platforms]:
|
||||
if os_type in platform_group:
|
||||
platforms = {os_type: platform_group[os_type]}
|
||||
break
|
||||
|
||||
os_key = random.choice(list(platforms.keys()))
|
||||
if device_brand and device_brand in platforms[os_key]:
|
||||
return platforms[os_key][device_brand]
|
||||
return random.choice(list(platforms[os_key].values()))
|
||||
|
||||
def parse_user_agent(self, user_agent: str) -> Dict[str, str]:
|
||||
"""Parse a user agent string to extract browser and version information"""
|
||||
browsers = {
|
||||
"chrome": r"Chrome/(\d+)",
|
||||
"edge": r"Edg/(\d+)",
|
||||
"safari": r"Version/(\d+)",
|
||||
"firefox": r"Firefox/(\d+)",
|
||||
}
|
||||
|
||||
result = {}
|
||||
for browser, pattern in browsers.items():
|
||||
match = re.search(pattern, user_agent)
|
||||
if match:
|
||||
result[browser] = match.group(1)
|
||||
|
||||
return result
|
||||
|
||||
def generate_client_hints(self, user_agent: str) -> str:
|
||||
"""Generate Sec-CH-UA header value based on user agent string"""
|
||||
browsers = self.parse_user_agent(user_agent)
|
||||
|
||||
# Client hints components
|
||||
hints = []
|
||||
|
||||
# Handle different browser combinations
|
||||
if "chrome" in browsers:
|
||||
hints.append(f'"Chromium";v="{browsers["chrome"]}"')
|
||||
hints.append('"Not_A Brand";v="8"')
|
||||
|
||||
if "edge" in browsers:
|
||||
hints.append(f'"Microsoft Edge";v="{browsers["edge"]}"')
|
||||
else:
|
||||
hints.append(f'"Google Chrome";v="{browsers["chrome"]}"')
|
||||
|
||||
elif "firefox" in browsers:
|
||||
# Firefox doesn't typically send Sec-CH-UA
|
||||
return '""'
|
||||
|
||||
elif "safari" in browsers:
|
||||
# Safari's format for client hints
|
||||
hints.append(f'"Safari";v="{browsers["safari"]}"')
|
||||
hints.append('"Not_A Brand";v="8"')
|
||||
|
||||
return ", ".join(hints)
|
||||
|
||||
|
||||
# Example usage:
|
||||
if __name__ == "__main__":
|
||||
|
||||
# Usage example:
|
||||
generator = ValidUAGenerator()
|
||||
ua = generator.generate()
|
||||
print(ua)
|
||||
|
||||
generator = OnlineUAGenerator()
|
||||
ua = generator.generate()
|
||||
print(ua)
|
||||
|
||||
2785
crawl4ai/utils.py
2785
crawl4ai/utils.py
File diff suppressed because it is too large
Load Diff
@@ -1,209 +0,0 @@
|
||||
import os, time
|
||||
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
||||
from pathlib import Path
|
||||
|
||||
from .models import UrlModel, CrawlResult
|
||||
from .database import init_db, get_cached_url, cache_url, DB_PATH, flush_db
|
||||
from .utils import *
|
||||
from .chunking_strategy import *
|
||||
from .extraction_strategy import *
|
||||
from .crawler_strategy import *
|
||||
from typing import List
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from .config import *
|
||||
|
||||
|
||||
class WebCrawler:
|
||||
def __init__(
|
||||
self,
|
||||
# db_path: str = None,
|
||||
crawler_strategy: CrawlerStrategy = None,
|
||||
always_by_pass_cache: bool = False,
|
||||
):
|
||||
# self.db_path = db_path
|
||||
self.crawler_strategy = crawler_strategy or LocalSeleniumCrawlerStrategy()
|
||||
self.always_by_pass_cache = always_by_pass_cache
|
||||
|
||||
# Create the .crawl4ai folder in the user's home directory if it doesn't exist
|
||||
self.crawl4ai_folder = os.path.join(Path.home(), ".crawl4ai")
|
||||
os.makedirs(self.crawl4ai_folder, exist_ok=True)
|
||||
os.makedirs(f"{self.crawl4ai_folder}/cache", exist_ok=True)
|
||||
|
||||
# If db_path is not provided, use the default path
|
||||
# if not db_path:
|
||||
# self.db_path = f"{self.crawl4ai_folder}/crawl4ai.db"
|
||||
|
||||
# flush_db()
|
||||
init_db()
|
||||
|
||||
self.ready = False
|
||||
|
||||
def warmup(self):
|
||||
print("[LOG] 🌤️ Warming up the WebCrawler")
|
||||
result = self.run(
|
||||
url='https://crawl4ai.uccode.io/',
|
||||
word_count_threshold=5,
|
||||
extraction_strategy= NoExtractionStrategy(),
|
||||
bypass_cache=False,
|
||||
verbose = False
|
||||
)
|
||||
self.ready = True
|
||||
print("[LOG] 🌞 WebCrawler is ready to crawl")
|
||||
|
||||
|
||||
def fetch_page(
|
||||
self,
|
||||
url_model: UrlModel,
|
||||
provider: str = DEFAULT_PROVIDER,
|
||||
api_token: str = None,
|
||||
extract_blocks_flag: bool = True,
|
||||
word_count_threshold=MIN_WORD_THRESHOLD,
|
||||
use_cached_html: bool = False,
|
||||
extraction_strategy: ExtractionStrategy = None,
|
||||
chunking_strategy: ChunkingStrategy = RegexChunking(),
|
||||
**kwargs,
|
||||
) -> CrawlResult:
|
||||
return self.run(
|
||||
url_model.url,
|
||||
word_count_threshold,
|
||||
extraction_strategy or NoExtractionStrategy(),
|
||||
chunking_strategy,
|
||||
bypass_cache=url_model.forced,
|
||||
**kwargs,
|
||||
)
|
||||
pass
|
||||
|
||||
|
||||
def run(
|
||||
self,
|
||||
url: str,
|
||||
word_count_threshold=MIN_WORD_THRESHOLD,
|
||||
extraction_strategy: ExtractionStrategy = None,
|
||||
chunking_strategy: ChunkingStrategy = RegexChunking(),
|
||||
bypass_cache: bool = False,
|
||||
css_selector: str = None,
|
||||
verbose=True,
|
||||
**kwargs,
|
||||
) -> CrawlResult:
|
||||
extraction_strategy = extraction_strategy or NoExtractionStrategy()
|
||||
extraction_strategy.verbose = verbose
|
||||
# Check if extraction strategy is an instance of ExtractionStrategy if not raise an error
|
||||
if not isinstance(extraction_strategy, ExtractionStrategy):
|
||||
raise ValueError("Unsupported extraction strategy")
|
||||
if not isinstance(chunking_strategy, ChunkingStrategy):
|
||||
raise ValueError("Unsupported chunking strategy")
|
||||
|
||||
# make sure word_count_threshold is not lesser than MIN_WORD_THRESHOLD
|
||||
if word_count_threshold < MIN_WORD_THRESHOLD:
|
||||
word_count_threshold = MIN_WORD_THRESHOLD
|
||||
|
||||
# Check cache first
|
||||
if not bypass_cache and not self.always_by_pass_cache:
|
||||
cached = get_cached_url(url)
|
||||
if cached:
|
||||
return CrawlResult(
|
||||
**{
|
||||
"url": cached[0],
|
||||
"html": cached[1],
|
||||
"cleaned_html": cached[2],
|
||||
"markdown": cached[3],
|
||||
"extracted_content": cached[4],
|
||||
"success": cached[5],
|
||||
"error_message": "",
|
||||
}
|
||||
)
|
||||
|
||||
# Initialize WebDriver for crawling
|
||||
t = time.time()
|
||||
html = self.crawler_strategy.crawl(url)
|
||||
success = True
|
||||
error_message = ""
|
||||
# Extract content from HTML
|
||||
try:
|
||||
result = get_content_of_website(html, word_count_threshold, css_selector=css_selector)
|
||||
if result is None:
|
||||
raise ValueError(f"Failed to extract content from the website: {url}")
|
||||
except InvalidCSSSelectorError as e:
|
||||
raise ValueError(str(e))
|
||||
|
||||
cleaned_html = result.get("cleaned_html", html)
|
||||
markdown = result.get("markdown", "")
|
||||
|
||||
# Print a profession LOG style message, show time taken and say crawling is done
|
||||
if verbose:
|
||||
print(
|
||||
f"[LOG] 🚀 Crawling done for {url}, success: {success}, time taken: {time.time() - t} seconds"
|
||||
)
|
||||
|
||||
extracted_content = []
|
||||
if verbose:
|
||||
print(f"[LOG] 🔥 Extracting semantic blocks for {url}, Strategy: {extraction_strategy.name}")
|
||||
t = time.time()
|
||||
# Split markdown into sections
|
||||
sections = chunking_strategy.chunk(markdown)
|
||||
# sections = merge_chunks_based_on_token_threshold(sections, CHUNK_TOKEN_THRESHOLD)
|
||||
|
||||
extracted_content = extraction_strategy.run(
|
||||
url, sections,
|
||||
)
|
||||
extracted_content = json.dumps(extracted_content)
|
||||
|
||||
if verbose:
|
||||
print(
|
||||
f"[LOG] 🚀 Extraction done for {url}, time taken: {time.time() - t} seconds."
|
||||
)
|
||||
|
||||
# Cache the result
|
||||
cleaned_html = beautify_html(cleaned_html)
|
||||
cache_url(
|
||||
url,
|
||||
html,
|
||||
cleaned_html,
|
||||
markdown,
|
||||
extracted_content,
|
||||
success,
|
||||
)
|
||||
|
||||
return CrawlResult(
|
||||
url=url,
|
||||
html=html,
|
||||
cleaned_html=cleaned_html,
|
||||
markdown=markdown,
|
||||
extracted_content=extracted_content,
|
||||
success=success,
|
||||
error_message=error_message,
|
||||
)
|
||||
|
||||
def fetch_pages(
|
||||
self,
|
||||
url_models: List[UrlModel],
|
||||
provider: str = DEFAULT_PROVIDER,
|
||||
api_token: str = None,
|
||||
extract_blocks_flag: bool = True,
|
||||
word_count_threshold=MIN_WORD_THRESHOLD,
|
||||
use_cached_html: bool = False,
|
||||
extraction_strategy: ExtractionStrategy = None,
|
||||
chunking_strategy: ChunkingStrategy = RegexChunking(),
|
||||
**kwargs,
|
||||
) -> List[CrawlResult]:
|
||||
extraction_strategy = extraction_strategy or NoExtractionStrategy()
|
||||
def fetch_page_wrapper(url_model, *args, **kwargs):
|
||||
return self.fetch_page(url_model, *args, **kwargs)
|
||||
|
||||
with ThreadPoolExecutor() as executor:
|
||||
results = list(
|
||||
executor.map(
|
||||
fetch_page_wrapper,
|
||||
url_models,
|
||||
[provider] * len(url_models),
|
||||
[api_token] * len(url_models),
|
||||
[extract_blocks_flag] * len(url_models),
|
||||
[word_count_threshold] * len(url_models),
|
||||
[use_cached_html] * len(url_models),
|
||||
[extraction_strategy] * len(url_models),
|
||||
[chunking_strategy] * len(url_models),
|
||||
*[kwargs] * len(url_models),
|
||||
)
|
||||
)
|
||||
|
||||
return results
|
||||
31
deploy/docker/.dockerignore
Normal file
31
deploy/docker/.dockerignore
Normal file
@@ -0,0 +1,31 @@
|
||||
# .dockerignore
|
||||
*
|
||||
|
||||
# Allow specific files and directories when using local installation
|
||||
!crawl4ai/
|
||||
!docs/
|
||||
!deploy/docker/
|
||||
!setup.py
|
||||
!pyproject.toml
|
||||
!README.md
|
||||
!LICENSE
|
||||
!MANIFEST.in
|
||||
!setup.cfg
|
||||
!mkdocs.yml
|
||||
|
||||
.git/
|
||||
__pycache__/
|
||||
*.pyc
|
||||
*.pyo
|
||||
*.pyd
|
||||
.DS_Store
|
||||
.env
|
||||
.venv
|
||||
venv/
|
||||
tests/
|
||||
coverage.xml
|
||||
*.log
|
||||
*.swp
|
||||
*.egg-info/
|
||||
dist/
|
||||
build/
|
||||
8
deploy/docker/.llm.env.example
Normal file
8
deploy/docker/.llm.env.example
Normal file
@@ -0,0 +1,8 @@
|
||||
# LLM Provider Keys
|
||||
OPENAI_API_KEY=your_openai_key_here
|
||||
DEEPSEEK_API_KEY=your_deepseek_key_here
|
||||
ANTHROPIC_API_KEY=your_anthropic_key_here
|
||||
GROQ_API_KEY=your_groq_key_here
|
||||
TOGETHER_API_KEY=your_together_key_here
|
||||
MISTRAL_API_KEY=your_mistral_key_here
|
||||
GEMINI_API_TOKEN=your_gemini_key_here
|
||||
821
deploy/docker/README.md
Normal file
821
deploy/docker/README.md
Normal file
@@ -0,0 +1,821 @@
|
||||
# Crawl4AI Docker Guide 🐳
|
||||
|
||||
## Table of Contents
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Installation](#installation)
|
||||
- [Option 1: Using Pre-built Docker Hub Images (Recommended)](#option-1-using-pre-built-docker-hub-images-recommended)
|
||||
- [Option 2: Using Docker Compose](#option-2-using-docker-compose)
|
||||
- [Option 3: Manual Local Build & Run](#option-3-manual-local-build--run)
|
||||
- [Dockerfile Parameters](#dockerfile-parameters)
|
||||
- [Using the API](#using-the-api)
|
||||
- [Playground Interface](#playground-interface)
|
||||
- [Python SDK](#python-sdk)
|
||||
- [Understanding Request Schema](#understanding-request-schema)
|
||||
- [REST API Examples](#rest-api-examples)
|
||||
- [Additional API Endpoints](#additional-api-endpoints)
|
||||
- [HTML Extraction Endpoint](#html-extraction-endpoint)
|
||||
- [Screenshot Endpoint](#screenshot-endpoint)
|
||||
- [PDF Export Endpoint](#pdf-export-endpoint)
|
||||
- [JavaScript Execution Endpoint](#javascript-execution-endpoint)
|
||||
- [Library Context Endpoint](#library-context-endpoint)
|
||||
- [MCP (Model Context Protocol) Support](#mcp-model-context-protocol-support)
|
||||
- [What is MCP?](#what-is-mcp)
|
||||
- [Connecting via MCP](#connecting-via-mcp)
|
||||
- [Using with Claude Code](#using-with-claude-code)
|
||||
- [Available MCP Tools](#available-mcp-tools)
|
||||
- [Testing MCP Connections](#testing-mcp-connections)
|
||||
- [MCP Schemas](#mcp-schemas)
|
||||
- [Metrics & Monitoring](#metrics--monitoring)
|
||||
- [Deployment Scenarios](#deployment-scenarios)
|
||||
- [Complete Examples](#complete-examples)
|
||||
- [Server Configuration](#server-configuration)
|
||||
- [Understanding config.yml](#understanding-configyml)
|
||||
- [JWT Authentication](#jwt-authentication)
|
||||
- [Configuration Tips and Best Practices](#configuration-tips-and-best-practices)
|
||||
- [Customizing Your Configuration](#customizing-your-configuration)
|
||||
- [Configuration Recommendations](#configuration-recommendations)
|
||||
- [Getting Help](#getting-help)
|
||||
- [Summary](#summary)
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Before we dive in, make sure you have:
|
||||
- Docker installed and running (version 20.10.0 or higher), including `docker compose` (usually bundled with Docker Desktop).
|
||||
- `git` for cloning the repository.
|
||||
- At least 4GB of RAM available for the container (more recommended for heavy use).
|
||||
- Python 3.10+ (if using the Python SDK).
|
||||
- Node.js 16+ (if using the Node.js examples).
|
||||
|
||||
> 💡 **Pro tip**: Run `docker info` to check your Docker installation and available resources.
|
||||
|
||||
## Installation
|
||||
|
||||
We offer several ways to get the Crawl4AI server running. The quickest way is to use our pre-built Docker Hub images.
|
||||
|
||||
### Option 1: Using Pre-built Docker Hub Images (Recommended)
|
||||
|
||||
Pull and run images directly from Docker Hub without building locally.
|
||||
|
||||
#### 1. Pull the Image
|
||||
|
||||
Our latest release candidate is `0.6.0-r1`. Images are built with multi-arch manifests, so Docker automatically pulls the correct version for your system.
|
||||
|
||||
```bash
|
||||
# Pull the release candidate (recommended for latest features)
|
||||
docker pull unclecode/crawl4ai:0.6.0-rN # Use your favorite revision number
|
||||
|
||||
# Or pull the latest stable version
|
||||
docker pull unclecode/crawl4ai:latest
|
||||
```
|
||||
|
||||
#### 2. Setup Environment (API Keys)
|
||||
|
||||
If you plan to use LLMs, create a `.llm.env` file in your working directory:
|
||||
|
||||
```bash
|
||||
# Create a .llm.env file with your API keys
|
||||
cat > .llm.env << EOL
|
||||
# OpenAI
|
||||
OPENAI_API_KEY=sk-your-key
|
||||
|
||||
# Anthropic
|
||||
ANTHROPIC_API_KEY=your-anthropic-key
|
||||
|
||||
# Other providers as needed
|
||||
# DEEPSEEK_API_KEY=your-deepseek-key
|
||||
# GROQ_API_KEY=your-groq-key
|
||||
# TOGETHER_API_KEY=your-together-key
|
||||
# MISTRAL_API_KEY=your-mistral-key
|
||||
# GEMINI_API_TOKEN=your-gemini-token
|
||||
EOL
|
||||
```
|
||||
> 🔑 **Note**: Keep your API keys secure! Never commit `.llm.env` to version control.
|
||||
|
||||
#### 3. Run the Container
|
||||
|
||||
* **Basic run:**
|
||||
```bash
|
||||
docker run -d \
|
||||
-p 11235:11235 \
|
||||
--name crawl4ai \
|
||||
--shm-size=1g \
|
||||
unclecode/crawl4ai:0.6.0-rN # Use your favorite revision number
|
||||
```
|
||||
|
||||
* **With LLM support:**
|
||||
```bash
|
||||
# Make sure .llm.env is in the current directory
|
||||
docker run -d \
|
||||
-p 11235:11235 \
|
||||
--name crawl4ai \
|
||||
--env-file .llm.env \
|
||||
--shm-size=1g \
|
||||
unclecode/crawl4ai:0.6.0-rN # Use your favorite revision number
|
||||
```
|
||||
|
||||
> The server will be available at `http://localhost:11235`. Visit `/playground` to access the interactive testing interface.
|
||||
|
||||
#### 4. Stopping the Container
|
||||
|
||||
```bash
|
||||
docker stop crawl4ai && docker rm crawl4ai
|
||||
```
|
||||
|
||||
#### Docker Hub Versioning Explained
|
||||
|
||||
* **Image Name:** `unclecode/crawl4ai`
|
||||
* **Tag Format:** `LIBRARY_VERSION[-SUFFIX]` (e.g., `0.6.0-r1`)
|
||||
* `LIBRARY_VERSION`: The semantic version of the core `crawl4ai` Python library
|
||||
* `SUFFIX`: Optional tag for release candidates (``) and revisions (`r1`)
|
||||
* **`latest` Tag:** Points to the most recent stable version
|
||||
* **Multi-Architecture Support:** All images support both `linux/amd64` and `linux/arm64` architectures through a single tag
|
||||
|
||||
### Option 2: Using Docker Compose
|
||||
|
||||
Docker Compose simplifies building and running the service, especially for local development and testing.
|
||||
|
||||
#### 1. Clone Repository
|
||||
|
||||
```bash
|
||||
git clone https://github.com/unclecode/crawl4ai.git
|
||||
cd crawl4ai
|
||||
```
|
||||
|
||||
#### 2. Environment Setup (API Keys)
|
||||
|
||||
If you plan to use LLMs, copy the example environment file and add your API keys. This file should be in the **project root directory**.
|
||||
|
||||
```bash
|
||||
# Make sure you are in the 'crawl4ai' root directory
|
||||
cp deploy/docker/.llm.env.example .llm.env
|
||||
|
||||
# Now edit .llm.env and add your API keys
|
||||
```
|
||||
|
||||
#### 3. Build and Run with Compose
|
||||
|
||||
The `docker-compose.yml` file in the project root provides a simplified approach that automatically handles architecture detection using buildx.
|
||||
|
||||
* **Run Pre-built Image from Docker Hub:**
|
||||
```bash
|
||||
# Pulls and runs the release candidate from Docker Hub
|
||||
# Automatically selects the correct architecture
|
||||
IMAGE=unclecode/crawl4ai:0.6.0-rN # Use your favorite revision number docker compose up -d
|
||||
```
|
||||
|
||||
* **Build and Run Locally:**
|
||||
```bash
|
||||
# Builds the image locally using Dockerfile and runs it
|
||||
# Automatically uses the correct architecture for your machine
|
||||
docker compose up --build -d
|
||||
```
|
||||
|
||||
* **Customize the Build:**
|
||||
```bash
|
||||
# Build with all features (includes torch and transformers)
|
||||
INSTALL_TYPE=all docker compose up --build -d
|
||||
|
||||
# Build with GPU support (for AMD64 platforms)
|
||||
ENABLE_GPU=true docker compose up --build -d
|
||||
```
|
||||
|
||||
> The server will be available at `http://localhost:11235`.
|
||||
|
||||
#### 4. Stopping the Service
|
||||
|
||||
```bash
|
||||
# Stop the service
|
||||
docker compose down
|
||||
```
|
||||
|
||||
### Option 3: Manual Local Build & Run
|
||||
|
||||
If you prefer not to use Docker Compose for direct control over the build and run process.
|
||||
|
||||
#### 1. Clone Repository & Setup Environment
|
||||
|
||||
Follow steps 1 and 2 from the Docker Compose section above (clone repo, `cd crawl4ai`, create `.llm.env` in the root).
|
||||
|
||||
#### 2. Build the Image (Multi-Arch)
|
||||
|
||||
Use `docker buildx` to build the image. Crawl4AI now uses buildx to handle multi-architecture builds automatically.
|
||||
|
||||
```bash
|
||||
# Make sure you are in the 'crawl4ai' root directory
|
||||
# Build for the current architecture and load it into Docker
|
||||
docker buildx build -t crawl4ai-local:latest --load .
|
||||
|
||||
# Or build for multiple architectures (useful for publishing)
|
||||
docker buildx build --platform linux/amd64,linux/arm64 -t crawl4ai-local:latest --load .
|
||||
|
||||
# Build with additional options
|
||||
docker buildx build \
|
||||
--build-arg INSTALL_TYPE=all \
|
||||
--build-arg ENABLE_GPU=false \
|
||||
-t crawl4ai-local:latest --load .
|
||||
```
|
||||
|
||||
#### 3. Run the Container
|
||||
|
||||
* **Basic run (no LLM support):**
|
||||
```bash
|
||||
docker run -d \
|
||||
-p 11235:11235 \
|
||||
--name crawl4ai-standalone \
|
||||
--shm-size=1g \
|
||||
crawl4ai-local:latest
|
||||
```
|
||||
|
||||
* **With LLM support:**
|
||||
```bash
|
||||
# Make sure .llm.env is in the current directory (project root)
|
||||
docker run -d \
|
||||
-p 11235:11235 \
|
||||
--name crawl4ai-standalone \
|
||||
--env-file .llm.env \
|
||||
--shm-size=1g \
|
||||
crawl4ai-local:latest
|
||||
```
|
||||
|
||||
> The server will be available at `http://localhost:11235`.
|
||||
|
||||
#### 4. Stopping the Manual Container
|
||||
|
||||
```bash
|
||||
docker stop crawl4ai-standalone && docker rm crawl4ai-standalone
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## MCP (Model Context Protocol) Support
|
||||
|
||||
Crawl4AI server includes support for the Model Context Protocol (MCP), allowing you to connect the server's capabilities directly to MCP-compatible clients like Claude Code.
|
||||
|
||||
### What is MCP?
|
||||
|
||||
MCP is an open protocol that standardizes how applications provide context to LLMs. It allows AI models to access external tools, data sources, and services through a standardized interface.
|
||||
|
||||
### Connecting via MCP
|
||||
|
||||
The Crawl4AI server exposes two MCP endpoints:
|
||||
|
||||
- **Server-Sent Events (SSE)**: `http://localhost:11235/mcp/sse`
|
||||
- **WebSocket**: `ws://localhost:11235/mcp/ws`
|
||||
|
||||
### Using with Claude Code
|
||||
|
||||
You can add Crawl4AI as an MCP tool provider in Claude Code with a simple command:
|
||||
|
||||
```bash
|
||||
# Add the Crawl4AI server as an MCP provider
|
||||
claude mcp add --transport sse c4ai-sse http://localhost:11235/mcp/sse
|
||||
|
||||
# List all MCP providers to verify it was added
|
||||
claude mcp list
|
||||
```
|
||||
|
||||
Once connected, Claude Code can directly use Crawl4AI's capabilities like screenshot capture, PDF generation, and HTML processing without having to make separate API calls.
|
||||
|
||||
### Available MCP Tools
|
||||
|
||||
When connected via MCP, the following tools are available:
|
||||
|
||||
- `md` - Generate markdown from web content
|
||||
- `html` - Extract preprocessed HTML
|
||||
- `screenshot` - Capture webpage screenshots
|
||||
- `pdf` - Generate PDF documents
|
||||
- `execute_js` - Run JavaScript on web pages
|
||||
- `crawl` - Perform multi-URL crawling
|
||||
- `ask` - Query the Crawl4AI library context
|
||||
|
||||
### Testing MCP Connections
|
||||
|
||||
You can test the MCP WebSocket connection using the test file included in the repository:
|
||||
|
||||
```bash
|
||||
# From the repository root
|
||||
python tests/mcp/test_mcp_socket.py
|
||||
```
|
||||
|
||||
### MCP Schemas
|
||||
|
||||
Access the MCP tool schemas at `http://localhost:11235/mcp/schema` for detailed information on each tool's parameters and capabilities.
|
||||
|
||||
---
|
||||
|
||||
## Additional API Endpoints
|
||||
|
||||
In addition to the core `/crawl` and `/crawl/stream` endpoints, the server provides several specialized endpoints:
|
||||
|
||||
### HTML Extraction Endpoint
|
||||
|
||||
```
|
||||
POST /html
|
||||
```
|
||||
|
||||
Crawls the URL and returns preprocessed HTML optimized for schema extraction.
|
||||
|
||||
```json
|
||||
{
|
||||
"url": "https://example.com"
|
||||
}
|
||||
```
|
||||
|
||||
### Screenshot Endpoint
|
||||
|
||||
```
|
||||
POST /screenshot
|
||||
```
|
||||
|
||||
Captures a full-page PNG screenshot of the specified URL.
|
||||
|
||||
```json
|
||||
{
|
||||
"url": "https://example.com",
|
||||
"screenshot_wait_for": 2,
|
||||
"output_path": "/path/to/save/screenshot.png"
|
||||
}
|
||||
```
|
||||
|
||||
- `screenshot_wait_for`: Optional delay in seconds before capture (default: 2)
|
||||
- `output_path`: Optional path to save the screenshot (recommended)
|
||||
|
||||
### PDF Export Endpoint
|
||||
|
||||
```
|
||||
POST /pdf
|
||||
```
|
||||
|
||||
Generates a PDF document of the specified URL.
|
||||
|
||||
```json
|
||||
{
|
||||
"url": "https://example.com",
|
||||
"output_path": "/path/to/save/document.pdf"
|
||||
}
|
||||
```
|
||||
|
||||
- `output_path`: Optional path to save the PDF (recommended)
|
||||
|
||||
### JavaScript Execution Endpoint
|
||||
|
||||
```
|
||||
POST /execute_js
|
||||
```
|
||||
|
||||
Executes JavaScript snippets on the specified URL and returns the full crawl result.
|
||||
|
||||
```json
|
||||
{
|
||||
"url": "https://example.com",
|
||||
"scripts": [
|
||||
"return document.title",
|
||||
"return Array.from(document.querySelectorAll('a')).map(a => a.href)"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
- `scripts`: List of JavaScript snippets to execute sequentially
|
||||
|
||||
---
|
||||
|
||||
## Dockerfile Parameters
|
||||
|
||||
You can customize the image build process using build arguments (`--build-arg`). These are typically used via `docker buildx build` or within the `docker-compose.yml` file.
|
||||
|
||||
```bash
|
||||
# Example: Build with 'all' features using buildx
|
||||
docker buildx build \
|
||||
--platform linux/amd64,linux/arm64 \
|
||||
--build-arg INSTALL_TYPE=all \
|
||||
-t yourname/crawl4ai-all:latest \
|
||||
--load \
|
||||
. # Build from root context
|
||||
```
|
||||
|
||||
### Build Arguments Explained
|
||||
|
||||
| Argument | Description | Default | Options |
|
||||
| :----------- | :--------------------------------------- | :-------- | :--------------------------------- |
|
||||
| INSTALL_TYPE | Feature set | `default` | `default`, `all`, `torch`, `transformer` |
|
||||
| ENABLE_GPU | GPU support (CUDA for AMD64) | `false` | `true`, `false` |
|
||||
| APP_HOME | Install path inside container (advanced) | `/app` | any valid path |
|
||||
| USE_LOCAL | Install library from local source | `true` | `true`, `false` |
|
||||
| GITHUB_REPO | Git repo to clone if USE_LOCAL=false | *(see Dockerfile)* | any git URL |
|
||||
| GITHUB_BRANCH| Git branch to clone if USE_LOCAL=false | `main` | any branch name |
|
||||
|
||||
*(Note: PYTHON_VERSION is fixed by the `FROM` instruction in the Dockerfile)*
|
||||
|
||||
### Build Best Practices
|
||||
|
||||
1. **Choose the Right Install Type**
|
||||
* `default`: Basic installation, smallest image size. Suitable for most standard web scraping and markdown generation.
|
||||
* `all`: Full features including `torch` and `transformers` for advanced extraction strategies (e.g., CosineStrategy, certain LLM filters). Significantly larger image. Ensure you need these extras.
|
||||
2. **Platform Considerations**
|
||||
* Use `buildx` for building multi-architecture images, especially for pushing to registries.
|
||||
* Use `docker compose` profiles (`local-amd64`, `local-arm64`) for easy platform-specific local builds.
|
||||
3. **Performance Optimization**
|
||||
* The image automatically includes platform-specific optimizations (OpenMP for AMD64, OpenBLAS for ARM64).
|
||||
|
||||
---
|
||||
|
||||
## Using the API
|
||||
|
||||
Communicate with the running Docker server via its REST API (defaulting to `http://localhost:11235`). You can use the Python SDK or make direct HTTP requests.
|
||||
|
||||
### Playground Interface
|
||||
|
||||
A built-in web playground is available at `http://localhost:11235/playground` for testing and generating API requests. The playground allows you to:
|
||||
|
||||
1. Configure `CrawlerRunConfig` and `BrowserConfig` using the main library's Python syntax
|
||||
2. Test crawling operations directly from the interface
|
||||
3. Generate corresponding JSON for REST API requests based on your configuration
|
||||
|
||||
This is the easiest way to translate Python configuration to JSON requests when building integrations.
|
||||
|
||||
### Python SDK
|
||||
|
||||
Install the SDK: `pip install crawl4ai`
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
from crawl4ai.docker_client import Crawl4aiDockerClient
|
||||
from crawl4ai import BrowserConfig, CrawlerRunConfig, CacheMode # Assuming you have crawl4ai installed
|
||||
|
||||
async def main():
|
||||
# Point to the correct server port
|
||||
async with Crawl4aiDockerClient(base_url="http://localhost:11235", verbose=True) as client:
|
||||
# If JWT is enabled on the server, authenticate first:
|
||||
# await client.authenticate("user@example.com") # See Server Configuration section
|
||||
|
||||
# Example Non-streaming crawl
|
||||
print("--- Running Non-Streaming Crawl ---")
|
||||
results = await client.crawl(
|
||||
["https://httpbin.org/html"],
|
||||
browser_config=BrowserConfig(headless=True), # Use library classes for config aid
|
||||
crawler_config=CrawlerRunConfig(cache_mode=CacheMode.BYPASS)
|
||||
)
|
||||
if results: # client.crawl returns None on failure
|
||||
print(f"Non-streaming results success: {results.success}")
|
||||
if results.success:
|
||||
for result in results: # Iterate through the CrawlResultContainer
|
||||
print(f"URL: {result.url}, Success: {result.success}")
|
||||
else:
|
||||
print("Non-streaming crawl failed.")
|
||||
|
||||
|
||||
# Example Streaming crawl
|
||||
print("\n--- Running Streaming Crawl ---")
|
||||
stream_config = CrawlerRunConfig(stream=True, cache_mode=CacheMode.BYPASS)
|
||||
try:
|
||||
async for result in await client.crawl( # client.crawl returns an async generator for streaming
|
||||
["https://httpbin.org/html", "https://httpbin.org/links/5/0"],
|
||||
browser_config=BrowserConfig(headless=True),
|
||||
crawler_config=stream_config
|
||||
):
|
||||
print(f"Streamed result: URL: {result.url}, Success: {result.success}")
|
||||
except Exception as e:
|
||||
print(f"Streaming crawl failed: {e}")
|
||||
|
||||
|
||||
# Example Get schema
|
||||
print("\n--- Getting Schema ---")
|
||||
schema = await client.get_schema()
|
||||
print(f"Schema received: {bool(schema)}") # Print whether schema was received
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
```
|
||||
|
||||
*(SDK parameters like timeout, verify_ssl etc. remain the same)*
|
||||
|
||||
### Second Approach: Direct API Calls
|
||||
|
||||
Crucially, when sending configurations directly via JSON, they **must** follow the `{"type": "ClassName", "params": {...}}` structure for any non-primitive value (like config objects or strategies). Dictionaries must be wrapped as `{"type": "dict", "value": {...}}`.
|
||||
|
||||
*(Keep the detailed explanation of Configuration Structure, Basic Pattern, Simple vs Complex, Strategy Pattern, Complex Nested Example, Quick Grammar Overview, Important Rules, Pro Tip)*
|
||||
|
||||
#### More Examples *(Ensure Schema example uses type/value wrapper)*
|
||||
|
||||
**Advanced Crawler Configuration**
|
||||
*(Keep example, ensure cache_mode uses valid enum value like "bypass")*
|
||||
|
||||
**Extraction Strategy**
|
||||
```json
|
||||
{
|
||||
"crawler_config": {
|
||||
"type": "CrawlerRunConfig",
|
||||
"params": {
|
||||
"extraction_strategy": {
|
||||
"type": "JsonCssExtractionStrategy",
|
||||
"params": {
|
||||
"schema": {
|
||||
"type": "dict",
|
||||
"value": {
|
||||
"baseSelector": "article.post",
|
||||
"fields": [
|
||||
{"name": "title", "selector": "h1", "type": "text"},
|
||||
{"name": "content", "selector": ".content", "type": "html"}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**LLM Extraction Strategy** *(Keep example, ensure schema uses type/value wrapper)*
|
||||
*(Keep Deep Crawler Example)*
|
||||
|
||||
### REST API Examples
|
||||
|
||||
Update URLs to use port `11235`.
|
||||
|
||||
#### Simple Crawl
|
||||
|
||||
```python
|
||||
import requests
|
||||
|
||||
# Configuration objects converted to the required JSON structure
|
||||
browser_config_payload = {
|
||||
"type": "BrowserConfig",
|
||||
"params": {"headless": True}
|
||||
}
|
||||
crawler_config_payload = {
|
||||
"type": "CrawlerRunConfig",
|
||||
"params": {"stream": False, "cache_mode": "bypass"} # Use string value of enum
|
||||
}
|
||||
|
||||
crawl_payload = {
|
||||
"urls": ["https://httpbin.org/html"],
|
||||
"browser_config": browser_config_payload,
|
||||
"crawler_config": crawler_config_payload
|
||||
}
|
||||
response = requests.post(
|
||||
"http://localhost:11235/crawl", # Updated port
|
||||
# headers={"Authorization": f"Bearer {token}"}, # If JWT is enabled
|
||||
json=crawl_payload
|
||||
)
|
||||
print(f"Status Code: {response.status_code}")
|
||||
if response.ok:
|
||||
print(response.json())
|
||||
else:
|
||||
print(f"Error: {response.text}")
|
||||
|
||||
```
|
||||
|
||||
#### Streaming Results
|
||||
|
||||
```python
|
||||
import json
|
||||
import httpx # Use httpx for async streaming example
|
||||
|
||||
async def test_stream_crawl(token: str = None): # Made token optional
|
||||
"""Test the /crawl/stream endpoint with multiple URLs."""
|
||||
url = "http://localhost:11235/crawl/stream" # Updated port
|
||||
payload = {
|
||||
"urls": [
|
||||
"https://httpbin.org/html",
|
||||
"https://httpbin.org/links/5/0",
|
||||
],
|
||||
"browser_config": {
|
||||
"type": "BrowserConfig",
|
||||
"params": {"headless": True, "viewport": {"type": "dict", "value": {"width": 1200, "height": 800}}} # Viewport needs type:dict
|
||||
},
|
||||
"crawler_config": {
|
||||
"type": "CrawlerRunConfig",
|
||||
"params": {"stream": True, "cache_mode": "bypass"}
|
||||
}
|
||||
}
|
||||
|
||||
headers = {}
|
||||
# if token:
|
||||
# headers = {"Authorization": f"Bearer {token}"} # If JWT is enabled
|
||||
|
||||
try:
|
||||
async with httpx.AsyncClient() as client:
|
||||
async with client.stream("POST", url, json=payload, headers=headers, timeout=120.0) as response:
|
||||
print(f"Status: {response.status_code} (Expected: 200)")
|
||||
response.raise_for_status() # Raise exception for bad status codes
|
||||
|
||||
# Read streaming response line-by-line (NDJSON)
|
||||
async for line in response.aiter_lines():
|
||||
if line:
|
||||
try:
|
||||
data = json.loads(line)
|
||||
# Check for completion marker
|
||||
if data.get("status") == "completed":
|
||||
print("Stream completed.")
|
||||
break
|
||||
print(f"Streamed Result: {json.dumps(data, indent=2)}")
|
||||
except json.JSONDecodeError:
|
||||
print(f"Warning: Could not decode JSON line: {line}")
|
||||
|
||||
except httpx.HTTPStatusError as e:
|
||||
print(f"HTTP error occurred: {e.response.status_code} - {e.response.text}")
|
||||
except Exception as e:
|
||||
print(f"Error in streaming crawl test: {str(e)}")
|
||||
|
||||
# To run this example:
|
||||
# import asyncio
|
||||
# asyncio.run(test_stream_crawl())
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Metrics & Monitoring
|
||||
|
||||
Keep an eye on your crawler with these endpoints:
|
||||
|
||||
- `/health` - Quick health check
|
||||
- `/metrics` - Detailed Prometheus metrics
|
||||
- `/schema` - Full API schema
|
||||
|
||||
Example health check:
|
||||
```bash
|
||||
curl http://localhost:11235/health
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
*(Deployment Scenarios and Complete Examples sections remain the same, maybe update links if examples moved)*
|
||||
|
||||
---
|
||||
|
||||
## Server Configuration
|
||||
|
||||
The server's behavior can be customized through the `config.yml` file.
|
||||
|
||||
### Understanding config.yml
|
||||
|
||||
The configuration file is loaded from `/app/config.yml` inside the container. By default, the file from `deploy/docker/config.yml` in the repository is copied there during the build.
|
||||
|
||||
Here's a detailed breakdown of the configuration options (using defaults from `deploy/docker/config.yml`):
|
||||
|
||||
```yaml
|
||||
# Application Configuration
|
||||
app:
|
||||
title: "Crawl4AI API"
|
||||
version: "1.0.0" # Consider setting this to match library version, e.g., "0.5.1"
|
||||
host: "0.0.0.0"
|
||||
port: 8020 # NOTE: This port is used ONLY when running server.py directly. Gunicorn overrides this (see supervisord.conf).
|
||||
reload: False # Default set to False - suitable for production
|
||||
timeout_keep_alive: 300
|
||||
|
||||
# Default LLM Configuration
|
||||
llm:
|
||||
provider: "openai/gpt-4o-mini"
|
||||
api_key_env: "OPENAI_API_KEY"
|
||||
# api_key: sk-... # If you pass the API key directly then api_key_env will be ignored
|
||||
|
||||
# Redis Configuration (Used by internal Redis server managed by supervisord)
|
||||
redis:
|
||||
host: "localhost"
|
||||
port: 6379
|
||||
db: 0
|
||||
password: ""
|
||||
# ... other redis options ...
|
||||
|
||||
# Rate Limiting Configuration
|
||||
rate_limiting:
|
||||
enabled: True
|
||||
default_limit: "1000/minute"
|
||||
trusted_proxies: []
|
||||
storage_uri: "memory://" # Use "redis://localhost:6379" if you need persistent/shared limits
|
||||
|
||||
# Security Configuration
|
||||
security:
|
||||
enabled: false # Master toggle for security features
|
||||
jwt_enabled: false # Enable JWT authentication (requires security.enabled=true)
|
||||
https_redirect: false # Force HTTPS (requires security.enabled=true)
|
||||
trusted_hosts: ["*"] # Allowed hosts (use specific domains in production)
|
||||
headers: # Security headers (applied if security.enabled=true)
|
||||
x_content_type_options: "nosniff"
|
||||
x_frame_options: "DENY"
|
||||
content_security_policy: "default-src 'self'"
|
||||
strict_transport_security: "max-age=63072000; includeSubDomains"
|
||||
|
||||
# Crawler Configuration
|
||||
crawler:
|
||||
memory_threshold_percent: 95.0
|
||||
rate_limiter:
|
||||
base_delay: [1.0, 2.0] # Min/max delay between requests in seconds for dispatcher
|
||||
timeouts:
|
||||
stream_init: 30.0 # Timeout for stream initialization
|
||||
batch_process: 300.0 # Timeout for non-streaming /crawl processing
|
||||
|
||||
# Logging Configuration
|
||||
logging:
|
||||
level: "INFO"
|
||||
format: "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
||||
|
||||
# Observability Configuration
|
||||
observability:
|
||||
prometheus:
|
||||
enabled: True
|
||||
endpoint: "/metrics"
|
||||
health_check:
|
||||
endpoint: "/health"
|
||||
```
|
||||
|
||||
*(JWT Authentication section remains the same, just note the default port is now 11235 for requests)*
|
||||
|
||||
*(Configuration Tips and Best Practices remain the same)*
|
||||
|
||||
### Customizing Your Configuration
|
||||
|
||||
You can override the default `config.yml`.
|
||||
|
||||
#### Method 1: Modify Before Build
|
||||
|
||||
1. Edit the `deploy/docker/config.yml` file in your local repository clone.
|
||||
2. Build the image using `docker buildx` or `docker compose --profile local-... up --build`. The modified file will be copied into the image.
|
||||
|
||||
#### Method 2: Runtime Mount (Recommended for Custom Deploys)
|
||||
|
||||
1. Create your custom configuration file, e.g., `my-custom-config.yml` locally. Ensure it contains all necessary sections.
|
||||
2. Mount it when running the container:
|
||||
|
||||
* **Using `docker run`:**
|
||||
```bash
|
||||
# Assumes my-custom-config.yml is in the current directory
|
||||
docker run -d -p 11235:11235 \
|
||||
--name crawl4ai-custom-config \
|
||||
--env-file .llm.env \
|
||||
--shm-size=1g \
|
||||
-v $(pwd)/my-custom-config.yml:/app/config.yml \
|
||||
unclecode/crawl4ai:latest # Or your specific tag
|
||||
```
|
||||
|
||||
* **Using `docker-compose.yml`:** Add a `volumes` section to the service definition:
|
||||
```yaml
|
||||
services:
|
||||
crawl4ai-hub-amd64: # Or your chosen service
|
||||
image: unclecode/crawl4ai:latest
|
||||
profiles: ["hub-amd64"]
|
||||
<<: *base-config
|
||||
volumes:
|
||||
# Mount local custom config over the default one in the container
|
||||
- ./my-custom-config.yml:/app/config.yml
|
||||
# Keep the shared memory volume from base-config
|
||||
- /dev/shm:/dev/shm
|
||||
```
|
||||
*(Note: Ensure `my-custom-config.yml` is in the same directory as `docker-compose.yml`)*
|
||||
|
||||
> 💡 When mounting, your custom file *completely replaces* the default one. Ensure it's a valid and complete configuration.
|
||||
|
||||
### Configuration Recommendations
|
||||
|
||||
1. **Security First** 🔒
|
||||
- Always enable security in production
|
||||
- Use specific trusted_hosts instead of wildcards
|
||||
- Set up proper rate limiting to protect your server
|
||||
- Consider your environment before enabling HTTPS redirect
|
||||
|
||||
2. **Resource Management** 💻
|
||||
- Adjust memory_threshold_percent based on available RAM
|
||||
- Set timeouts according to your content size and network conditions
|
||||
- Use Redis for rate limiting in multi-container setups
|
||||
|
||||
3. **Monitoring** 📊
|
||||
- Enable Prometheus if you need metrics
|
||||
- Set DEBUG logging in development, INFO in production
|
||||
- Regular health check monitoring is crucial
|
||||
|
||||
4. **Performance Tuning** ⚡
|
||||
- Start with conservative rate limiter delays
|
||||
- Increase batch_process timeout for large content
|
||||
- Adjust stream_init timeout based on initial response times
|
||||
|
||||
## Getting Help
|
||||
|
||||
We're here to help you succeed with Crawl4AI! Here's how to get support:
|
||||
|
||||
- 📖 Check our [full documentation](https://docs.crawl4ai.com)
|
||||
- 🐛 Found a bug? [Open an issue](https://github.com/unclecode/crawl4ai/issues)
|
||||
- 💬 Join our [Discord community](https://discord.gg/crawl4ai)
|
||||
- ⭐ Star us on GitHub to show support!
|
||||
|
||||
## Summary
|
||||
|
||||
In this guide, we've covered everything you need to get started with Crawl4AI's Docker deployment:
|
||||
- Building and running the Docker container
|
||||
- Configuring the environment
|
||||
- Using the interactive playground for testing
|
||||
- Making API requests with proper typing
|
||||
- Using the Python SDK
|
||||
- Leveraging specialized endpoints for screenshots, PDFs, and JavaScript execution
|
||||
- Connecting via the Model Context Protocol (MCP)
|
||||
- Monitoring your deployment
|
||||
|
||||
The new playground interface at `http://localhost:11235/playground` makes it much easier to test configurations and generate the corresponding JSON for API requests.
|
||||
|
||||
For AI application developers, the MCP integration allows tools like Claude Code to directly access Crawl4AI's capabilities without complex API handling.
|
||||
|
||||
Remember, the examples in the `examples` folder are your friends - they show real-world usage patterns that you can adapt for your needs.
|
||||
|
||||
Keep exploring, and don't hesitate to reach out if you need help! We're building something amazing together. 🚀
|
||||
|
||||
Happy crawling! 🕷️
|
||||
571
deploy/docker/api.py
Normal file
571
deploy/docker/api.py
Normal file
@@ -0,0 +1,571 @@
|
||||
import os
|
||||
import json
|
||||
import asyncio
|
||||
from typing import List, Tuple, Dict
|
||||
from functools import partial
|
||||
from uuid import uuid4
|
||||
from datetime import datetime
|
||||
|
||||
import logging
|
||||
from typing import Optional, AsyncGenerator
|
||||
from urllib.parse import unquote
|
||||
from fastapi import HTTPException, Request, status
|
||||
from fastapi.background import BackgroundTasks
|
||||
from fastapi.responses import JSONResponse
|
||||
from redis import asyncio as aioredis
|
||||
|
||||
from crawl4ai import (
|
||||
AsyncWebCrawler,
|
||||
CrawlerRunConfig,
|
||||
LLMExtractionStrategy,
|
||||
CacheMode,
|
||||
BrowserConfig,
|
||||
MemoryAdaptiveDispatcher,
|
||||
RateLimiter,
|
||||
LLMConfig
|
||||
)
|
||||
from crawl4ai.utils import perform_completion_with_backoff
|
||||
from crawl4ai.content_filter_strategy import (
|
||||
PruningContentFilter,
|
||||
BM25ContentFilter,
|
||||
LLMContentFilter
|
||||
)
|
||||
from crawl4ai.markdown_generation_strategy import DefaultMarkdownGenerator
|
||||
from crawl4ai.content_scraping_strategy import LXMLWebScrapingStrategy
|
||||
|
||||
from utils import (
|
||||
TaskStatus,
|
||||
FilterType,
|
||||
get_base_url,
|
||||
is_task_id,
|
||||
should_cleanup_task,
|
||||
decode_redis_hash
|
||||
)
|
||||
|
||||
import psutil, time
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# --- Helper to get memory ---
|
||||
def _get_memory_mb():
|
||||
try:
|
||||
return psutil.Process().memory_info().rss / (1024 * 1024)
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not get memory info: {e}")
|
||||
return None
|
||||
|
||||
|
||||
async def handle_llm_qa(
|
||||
url: str,
|
||||
query: str,
|
||||
config: dict
|
||||
) -> str:
|
||||
"""Process QA using LLM with crawled content as context."""
|
||||
try:
|
||||
if not url.startswith(('http://', 'https://')):
|
||||
url = 'https://' + url
|
||||
# Extract base URL by finding last '?q=' occurrence
|
||||
last_q_index = url.rfind('?q=')
|
||||
if last_q_index != -1:
|
||||
url = url[:last_q_index]
|
||||
|
||||
# Get markdown content
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(url)
|
||||
if not result.success:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=result.error_message
|
||||
)
|
||||
content = result.markdown.fit_markdown or result.markdown.raw_markdown
|
||||
|
||||
# Create prompt and get LLM response
|
||||
prompt = f"""Use the following content as context to answer the question.
|
||||
Content:
|
||||
{content}
|
||||
|
||||
Question: {query}
|
||||
|
||||
Answer:"""
|
||||
|
||||
response = perform_completion_with_backoff(
|
||||
provider=config["llm"]["provider"],
|
||||
prompt_with_variables=prompt,
|
||||
api_token=os.environ.get(config["llm"].get("api_key_env", ""))
|
||||
)
|
||||
|
||||
return response.choices[0].message.content
|
||||
except Exception as e:
|
||||
logger.error(f"QA processing error: {str(e)}", exc_info=True)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=str(e)
|
||||
)
|
||||
|
||||
async def process_llm_extraction(
|
||||
redis: aioredis.Redis,
|
||||
config: dict,
|
||||
task_id: str,
|
||||
url: str,
|
||||
instruction: str,
|
||||
schema: Optional[str] = None,
|
||||
cache: str = "0"
|
||||
) -> None:
|
||||
"""Process LLM extraction in background."""
|
||||
try:
|
||||
# If config['llm'] has api_key then ignore the api_key_env
|
||||
api_key = ""
|
||||
if "api_key" in config["llm"]:
|
||||
api_key = config["llm"]["api_key"]
|
||||
else:
|
||||
api_key = os.environ.get(config["llm"].get("api_key_env", None), "")
|
||||
llm_strategy = LLMExtractionStrategy(
|
||||
llm_config=LLMConfig(
|
||||
provider=config["llm"]["provider"],
|
||||
api_token=api_key
|
||||
),
|
||||
instruction=instruction,
|
||||
schema=json.loads(schema) if schema else None,
|
||||
)
|
||||
|
||||
cache_mode = CacheMode.ENABLED if cache == "1" else CacheMode.WRITE_ONLY
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(
|
||||
url=url,
|
||||
config=CrawlerRunConfig(
|
||||
extraction_strategy=llm_strategy,
|
||||
scraping_strategy=LXMLWebScrapingStrategy(),
|
||||
cache_mode=cache_mode
|
||||
)
|
||||
)
|
||||
|
||||
if not result.success:
|
||||
await redis.hset(f"task:{task_id}", mapping={
|
||||
"status": TaskStatus.FAILED,
|
||||
"error": result.error_message
|
||||
})
|
||||
return
|
||||
|
||||
try:
|
||||
content = json.loads(result.extracted_content)
|
||||
except json.JSONDecodeError:
|
||||
content = result.extracted_content
|
||||
await redis.hset(f"task:{task_id}", mapping={
|
||||
"status": TaskStatus.COMPLETED,
|
||||
"result": json.dumps(content)
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"LLM extraction error: {str(e)}", exc_info=True)
|
||||
await redis.hset(f"task:{task_id}", mapping={
|
||||
"status": TaskStatus.FAILED,
|
||||
"error": str(e)
|
||||
})
|
||||
|
||||
async def handle_markdown_request(
|
||||
url: str,
|
||||
filter_type: FilterType,
|
||||
query: Optional[str] = None,
|
||||
cache: str = "0",
|
||||
config: Optional[dict] = None
|
||||
) -> str:
|
||||
"""Handle markdown generation requests."""
|
||||
try:
|
||||
decoded_url = unquote(url)
|
||||
if not decoded_url.startswith(('http://', 'https://')):
|
||||
decoded_url = 'https://' + decoded_url
|
||||
|
||||
if filter_type == FilterType.RAW:
|
||||
md_generator = DefaultMarkdownGenerator()
|
||||
else:
|
||||
content_filter = {
|
||||
FilterType.FIT: PruningContentFilter(),
|
||||
FilterType.BM25: BM25ContentFilter(user_query=query or ""),
|
||||
FilterType.LLM: LLMContentFilter(
|
||||
llm_config=LLMConfig(
|
||||
provider=config["llm"]["provider"],
|
||||
api_token=os.environ.get(config["llm"].get("api_key_env", None), ""),
|
||||
),
|
||||
instruction=query or "Extract main content"
|
||||
)
|
||||
}[filter_type]
|
||||
md_generator = DefaultMarkdownGenerator(content_filter=content_filter)
|
||||
|
||||
cache_mode = CacheMode.ENABLED if cache == "1" else CacheMode.WRITE_ONLY
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(
|
||||
url=decoded_url,
|
||||
config=CrawlerRunConfig(
|
||||
markdown_generator=md_generator,
|
||||
scraping_strategy=LXMLWebScrapingStrategy(),
|
||||
cache_mode=cache_mode
|
||||
)
|
||||
)
|
||||
|
||||
if not result.success:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=result.error_message
|
||||
)
|
||||
|
||||
return (result.markdown.raw_markdown
|
||||
if filter_type == FilterType.RAW
|
||||
else result.markdown.fit_markdown)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Markdown error: {str(e)}", exc_info=True)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=str(e)
|
||||
)
|
||||
|
||||
async def handle_llm_request(
|
||||
redis: aioredis.Redis,
|
||||
background_tasks: BackgroundTasks,
|
||||
request: Request,
|
||||
input_path: str,
|
||||
query: Optional[str] = None,
|
||||
schema: Optional[str] = None,
|
||||
cache: str = "0",
|
||||
config: Optional[dict] = None
|
||||
) -> JSONResponse:
|
||||
"""Handle LLM extraction requests."""
|
||||
base_url = get_base_url(request)
|
||||
|
||||
try:
|
||||
if is_task_id(input_path):
|
||||
return await handle_task_status(
|
||||
redis, input_path, base_url
|
||||
)
|
||||
|
||||
if not query:
|
||||
return JSONResponse({
|
||||
"message": "Please provide an instruction",
|
||||
"_links": {
|
||||
"example": {
|
||||
"href": f"{base_url}/llm/{input_path}?q=Extract+main+content",
|
||||
"title": "Try this example"
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
return await create_new_task(
|
||||
redis,
|
||||
background_tasks,
|
||||
input_path,
|
||||
query,
|
||||
schema,
|
||||
cache,
|
||||
base_url,
|
||||
config
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"LLM endpoint error: {str(e)}", exc_info=True)
|
||||
return JSONResponse({
|
||||
"error": str(e),
|
||||
"_links": {
|
||||
"retry": {"href": str(request.url)}
|
||||
}
|
||||
}, status_code=status.HTTP_500_INTERNAL_SERVER_ERROR)
|
||||
|
||||
async def handle_task_status(
|
||||
redis: aioredis.Redis,
|
||||
task_id: str,
|
||||
base_url: str,
|
||||
*,
|
||||
keep: bool = False
|
||||
) -> JSONResponse:
|
||||
"""Handle task status check requests."""
|
||||
task = await redis.hgetall(f"task:{task_id}")
|
||||
if not task:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail="Task not found"
|
||||
)
|
||||
|
||||
task = decode_redis_hash(task)
|
||||
response = create_task_response(task, task_id, base_url)
|
||||
|
||||
if task["status"] in [TaskStatus.COMPLETED, TaskStatus.FAILED]:
|
||||
if not keep and should_cleanup_task(task["created_at"]):
|
||||
await redis.delete(f"task:{task_id}")
|
||||
|
||||
return JSONResponse(response)
|
||||
|
||||
async def create_new_task(
|
||||
redis: aioredis.Redis,
|
||||
background_tasks: BackgroundTasks,
|
||||
input_path: str,
|
||||
query: str,
|
||||
schema: Optional[str],
|
||||
cache: str,
|
||||
base_url: str,
|
||||
config: dict
|
||||
) -> JSONResponse:
|
||||
"""Create and initialize a new task."""
|
||||
decoded_url = unquote(input_path)
|
||||
if not decoded_url.startswith(('http://', 'https://')):
|
||||
decoded_url = 'https://' + decoded_url
|
||||
|
||||
from datetime import datetime
|
||||
task_id = f"llm_{int(datetime.now().timestamp())}_{id(background_tasks)}"
|
||||
|
||||
await redis.hset(f"task:{task_id}", mapping={
|
||||
"status": TaskStatus.PROCESSING,
|
||||
"created_at": datetime.now().isoformat(),
|
||||
"url": decoded_url
|
||||
})
|
||||
|
||||
background_tasks.add_task(
|
||||
process_llm_extraction,
|
||||
redis,
|
||||
config,
|
||||
task_id,
|
||||
decoded_url,
|
||||
query,
|
||||
schema,
|
||||
cache
|
||||
)
|
||||
|
||||
return JSONResponse({
|
||||
"task_id": task_id,
|
||||
"status": TaskStatus.PROCESSING,
|
||||
"url": decoded_url,
|
||||
"_links": {
|
||||
"self": {"href": f"{base_url}/llm/{task_id}"},
|
||||
"status": {"href": f"{base_url}/llm/{task_id}"}
|
||||
}
|
||||
})
|
||||
|
||||
def create_task_response(task: dict, task_id: str, base_url: str) -> dict:
|
||||
"""Create response for task status check."""
|
||||
response = {
|
||||
"task_id": task_id,
|
||||
"status": task["status"],
|
||||
"created_at": task["created_at"],
|
||||
"url": task["url"],
|
||||
"_links": {
|
||||
"self": {"href": f"{base_url}/llm/{task_id}"},
|
||||
"refresh": {"href": f"{base_url}/llm/{task_id}"}
|
||||
}
|
||||
}
|
||||
|
||||
if task["status"] == TaskStatus.COMPLETED:
|
||||
response["result"] = json.loads(task["result"])
|
||||
elif task["status"] == TaskStatus.FAILED:
|
||||
response["error"] = task["error"]
|
||||
|
||||
return response
|
||||
|
||||
async def stream_results(crawler: AsyncWebCrawler, results_gen: AsyncGenerator) -> AsyncGenerator[bytes, None]:
|
||||
"""Stream results with heartbeats and completion markers."""
|
||||
import json
|
||||
from utils import datetime_handler
|
||||
|
||||
try:
|
||||
async for result in results_gen:
|
||||
try:
|
||||
server_memory_mb = _get_memory_mb()
|
||||
result_dict = result.model_dump()
|
||||
result_dict['server_memory_mb'] = server_memory_mb
|
||||
logger.info(f"Streaming result for {result_dict.get('url', 'unknown')}")
|
||||
data = json.dumps(result_dict, default=datetime_handler) + "\n"
|
||||
yield data.encode('utf-8')
|
||||
except Exception as e:
|
||||
logger.error(f"Serialization error: {e}")
|
||||
error_response = {"error": str(e), "url": getattr(result, 'url', 'unknown')}
|
||||
yield (json.dumps(error_response) + "\n").encode('utf-8')
|
||||
|
||||
yield json.dumps({"status": "completed"}).encode('utf-8')
|
||||
|
||||
except asyncio.CancelledError:
|
||||
logger.warning("Client disconnected during streaming")
|
||||
finally:
|
||||
# try:
|
||||
# await crawler.close()
|
||||
# except Exception as e:
|
||||
# logger.error(f"Crawler cleanup error: {e}")
|
||||
pass
|
||||
|
||||
async def handle_crawl_request(
|
||||
urls: List[str],
|
||||
browser_config: dict,
|
||||
crawler_config: dict,
|
||||
config: dict
|
||||
) -> dict:
|
||||
"""Handle non-streaming crawl requests."""
|
||||
start_mem_mb = _get_memory_mb() # <--- Get memory before
|
||||
start_time = time.time()
|
||||
mem_delta_mb = None
|
||||
peak_mem_mb = start_mem_mb
|
||||
|
||||
try:
|
||||
urls = [('https://' + url) if not url.startswith(('http://', 'https://')) else url for url in urls]
|
||||
browser_config = BrowserConfig.load(browser_config)
|
||||
crawler_config = CrawlerRunConfig.load(crawler_config)
|
||||
|
||||
dispatcher = MemoryAdaptiveDispatcher(
|
||||
memory_threshold_percent=config["crawler"]["memory_threshold_percent"],
|
||||
rate_limiter=RateLimiter(
|
||||
base_delay=tuple(config["crawler"]["rate_limiter"]["base_delay"])
|
||||
) if config["crawler"]["rate_limiter"]["enabled"] else None
|
||||
)
|
||||
|
||||
from crawler_pool import get_crawler
|
||||
crawler = await get_crawler(browser_config)
|
||||
|
||||
# crawler: AsyncWebCrawler = AsyncWebCrawler(config=browser_config)
|
||||
# await crawler.start()
|
||||
|
||||
base_config = config["crawler"]["base_config"]
|
||||
# Iterate on key-value pairs in global_config then use haseattr to set them
|
||||
for key, value in base_config.items():
|
||||
if hasattr(crawler_config, key):
|
||||
setattr(crawler_config, key, value)
|
||||
|
||||
results = []
|
||||
func = getattr(crawler, "arun" if len(urls) == 1 else "arun_many")
|
||||
partial_func = partial(func,
|
||||
urls[0] if len(urls) == 1 else urls,
|
||||
config=crawler_config,
|
||||
dispatcher=dispatcher)
|
||||
results = await partial_func()
|
||||
|
||||
# await crawler.close()
|
||||
|
||||
end_mem_mb = _get_memory_mb() # <--- Get memory after
|
||||
end_time = time.time()
|
||||
|
||||
if start_mem_mb is not None and end_mem_mb is not None:
|
||||
mem_delta_mb = end_mem_mb - start_mem_mb # <--- Calculate delta
|
||||
peak_mem_mb = max(peak_mem_mb if peak_mem_mb else 0, end_mem_mb) # <--- Get peak memory
|
||||
logger.info(f"Memory usage: Start: {start_mem_mb} MB, End: {end_mem_mb} MB, Delta: {mem_delta_mb} MB, Peak: {peak_mem_mb} MB")
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"results": [result.model_dump() for result in results],
|
||||
"server_processing_time_s": end_time - start_time,
|
||||
"server_memory_delta_mb": mem_delta_mb,
|
||||
"server_peak_memory_mb": peak_mem_mb
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Crawl error: {str(e)}", exc_info=True)
|
||||
if 'crawler' in locals() and crawler.ready: # Check if crawler was initialized and started
|
||||
# try:
|
||||
# await crawler.close()
|
||||
# except Exception as close_e:
|
||||
# logger.error(f"Error closing crawler during exception handling: {close_e}")
|
||||
logger.error(f"Error closing crawler during exception handling: {close_e}")
|
||||
|
||||
# Measure memory even on error if possible
|
||||
end_mem_mb_error = _get_memory_mb()
|
||||
if start_mem_mb is not None and end_mem_mb_error is not None:
|
||||
mem_delta_mb = end_mem_mb_error - start_mem_mb
|
||||
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=json.dumps({ # Send structured error
|
||||
"error": str(e),
|
||||
"server_memory_delta_mb": mem_delta_mb,
|
||||
"server_peak_memory_mb": max(peak_mem_mb if peak_mem_mb else 0, end_mem_mb_error or 0)
|
||||
})
|
||||
)
|
||||
|
||||
async def handle_stream_crawl_request(
|
||||
urls: List[str],
|
||||
browser_config: dict,
|
||||
crawler_config: dict,
|
||||
config: dict
|
||||
) -> Tuple[AsyncWebCrawler, AsyncGenerator]:
|
||||
"""Handle streaming crawl requests."""
|
||||
try:
|
||||
browser_config = BrowserConfig.load(browser_config)
|
||||
# browser_config.verbose = True # Set to False or remove for production stress testing
|
||||
browser_config.verbose = False
|
||||
crawler_config = CrawlerRunConfig.load(crawler_config)
|
||||
crawler_config.scraping_strategy = LXMLWebScrapingStrategy()
|
||||
crawler_config.stream = True
|
||||
|
||||
dispatcher = MemoryAdaptiveDispatcher(
|
||||
memory_threshold_percent=config["crawler"]["memory_threshold_percent"],
|
||||
rate_limiter=RateLimiter(
|
||||
base_delay=tuple(config["crawler"]["rate_limiter"]["base_delay"])
|
||||
)
|
||||
)
|
||||
|
||||
from crawler_pool import get_crawler
|
||||
crawler = await get_crawler(browser_config)
|
||||
|
||||
# crawler = AsyncWebCrawler(config=browser_config)
|
||||
# await crawler.start()
|
||||
|
||||
results_gen = await crawler.arun_many(
|
||||
urls=urls,
|
||||
config=crawler_config,
|
||||
dispatcher=dispatcher
|
||||
)
|
||||
|
||||
return crawler, results_gen
|
||||
|
||||
except Exception as e:
|
||||
# Make sure to close crawler if started during an error here
|
||||
if 'crawler' in locals() and crawler.ready:
|
||||
# try:
|
||||
# await crawler.close()
|
||||
# except Exception as close_e:
|
||||
# logger.error(f"Error closing crawler during stream setup exception: {close_e}")
|
||||
logger.error(f"Error closing crawler during stream setup exception: {close_e}")
|
||||
logger.error(f"Stream crawl error: {str(e)}", exc_info=True)
|
||||
# Raising HTTPException here will prevent streaming response
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=str(e)
|
||||
)
|
||||
|
||||
async def handle_crawl_job(
|
||||
redis,
|
||||
background_tasks: BackgroundTasks,
|
||||
urls: List[str],
|
||||
browser_config: Dict,
|
||||
crawler_config: Dict,
|
||||
config: Dict,
|
||||
) -> Dict:
|
||||
"""
|
||||
Fire-and-forget version of handle_crawl_request.
|
||||
Creates a task in Redis, runs the heavy work in a background task,
|
||||
lets /crawl/job/{task_id} polling fetch the result.
|
||||
"""
|
||||
task_id = f"crawl_{uuid4().hex[:8]}"
|
||||
await redis.hset(f"task:{task_id}", mapping={
|
||||
"status": TaskStatus.PROCESSING, # <-- keep enum values consistent
|
||||
"created_at": datetime.utcnow().isoformat(),
|
||||
"url": json.dumps(urls), # store list as JSON string
|
||||
"result": "",
|
||||
"error": "",
|
||||
})
|
||||
|
||||
async def _runner():
|
||||
try:
|
||||
result = await handle_crawl_request(
|
||||
urls=urls,
|
||||
browser_config=browser_config,
|
||||
crawler_config=crawler_config,
|
||||
config=config,
|
||||
)
|
||||
await redis.hset(f"task:{task_id}", mapping={
|
||||
"status": TaskStatus.COMPLETED,
|
||||
"result": json.dumps(result),
|
||||
})
|
||||
await asyncio.sleep(5) # Give Redis time to process the update
|
||||
except Exception as exc:
|
||||
await redis.hset(f"task:{task_id}", mapping={
|
||||
"status": TaskStatus.FAILED,
|
||||
"error": str(exc),
|
||||
})
|
||||
|
||||
background_tasks.add_task(_runner)
|
||||
return {"task_id": task_id}
|
||||
55
deploy/docker/auth.py
Normal file
55
deploy/docker/auth.py
Normal file
@@ -0,0 +1,55 @@
|
||||
import os
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from typing import Dict, Optional
|
||||
from jwt import JWT, jwk_from_dict
|
||||
from jwt.utils import get_int_from_datetime
|
||||
from fastapi import Depends, HTTPException
|
||||
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
|
||||
from pydantic import EmailStr
|
||||
from pydantic.main import BaseModel
|
||||
import base64
|
||||
|
||||
instance = JWT()
|
||||
security = HTTPBearer(auto_error=False)
|
||||
SECRET_KEY = os.environ.get("SECRET_KEY", "mysecret")
|
||||
ACCESS_TOKEN_EXPIRE_MINUTES = 60
|
||||
|
||||
def get_jwk_from_secret(secret: str):
|
||||
"""Convert a secret string into a JWK object."""
|
||||
secret_bytes = secret.encode('utf-8')
|
||||
b64_secret = base64.urlsafe_b64encode(secret_bytes).rstrip(b'=').decode('utf-8')
|
||||
return jwk_from_dict({"kty": "oct", "k": b64_secret})
|
||||
|
||||
def create_access_token(data: dict, expires_delta: Optional[timedelta] = None) -> str:
|
||||
"""Create a JWT access token with an expiration."""
|
||||
to_encode = data.copy()
|
||||
expire = datetime.now(timezone.utc) + (expires_delta or timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES))
|
||||
to_encode.update({"exp": get_int_from_datetime(expire)})
|
||||
signing_key = get_jwk_from_secret(SECRET_KEY)
|
||||
return instance.encode(to_encode, signing_key, alg='HS256')
|
||||
|
||||
def verify_token(credentials: HTTPAuthorizationCredentials = Depends(security)) -> Dict:
|
||||
"""Verify the JWT token from the Authorization header."""
|
||||
|
||||
if credentials is None:
|
||||
return None
|
||||
token = credentials.credentials
|
||||
verifying_key = get_jwk_from_secret(SECRET_KEY)
|
||||
try:
|
||||
payload = instance.decode(token, verifying_key, do_time_check=True, algorithms='HS256')
|
||||
return payload
|
||||
except Exception:
|
||||
raise HTTPException(status_code=401, detail="Invalid or expired token")
|
||||
|
||||
|
||||
def get_token_dependency(config: Dict):
|
||||
"""Return the token dependency if JWT is enabled, else a function that returns None."""
|
||||
|
||||
if config.get("security", {}).get("jwt_enabled", False):
|
||||
return verify_token
|
||||
else:
|
||||
return lambda: None
|
||||
|
||||
|
||||
class TokenRequest(BaseModel):
|
||||
email: EmailStr
|
||||
11631
deploy/docker/c4ai-code-context.md
Normal file
11631
deploy/docker/c4ai-code-context.md
Normal file
File diff suppressed because it is too large
Load Diff
8899
deploy/docker/c4ai-doc-context.md
Normal file
8899
deploy/docker/c4ai-doc-context.md
Normal file
File diff suppressed because it is too large
Load Diff
91
deploy/docker/config.yml
Normal file
91
deploy/docker/config.yml
Normal file
@@ -0,0 +1,91 @@
|
||||
# Application Configuration
|
||||
app:
|
||||
title: "Crawl4AI API"
|
||||
version: "1.0.0"
|
||||
host: "0.0.0.0"
|
||||
port: 11234
|
||||
reload: False
|
||||
workers: 1
|
||||
timeout_keep_alive: 300
|
||||
|
||||
# Default LLM Configuration
|
||||
llm:
|
||||
provider: "openai/gpt-4o-mini"
|
||||
api_key_env: "OPENAI_API_KEY"
|
||||
# api_key: sk-... # If you pass the API key directly then api_key_env will be ignored
|
||||
|
||||
# Redis Configuration
|
||||
redis:
|
||||
host: "localhost"
|
||||
port: 6379
|
||||
db: 0
|
||||
password: ""
|
||||
ssl: False
|
||||
ssl_cert_reqs: None
|
||||
ssl_ca_certs: None
|
||||
ssl_certfile: None
|
||||
ssl_keyfile: None
|
||||
ssl_cert_reqs: None
|
||||
ssl_ca_certs: None
|
||||
ssl_certfile: None
|
||||
ssl_keyfile: None
|
||||
|
||||
# Rate Limiting Configuration
|
||||
rate_limiting:
|
||||
enabled: True
|
||||
default_limit: "1000/minute"
|
||||
trusted_proxies: []
|
||||
storage_uri: "memory://" # Use "redis://localhost:6379" for production
|
||||
|
||||
# Security Configuration
|
||||
security:
|
||||
enabled: false
|
||||
jwt_enabled: false
|
||||
https_redirect: false
|
||||
trusted_hosts: ["*"]
|
||||
headers:
|
||||
x_content_type_options: "nosniff"
|
||||
x_frame_options: "DENY"
|
||||
content_security_policy: "default-src 'self'"
|
||||
strict_transport_security: "max-age=63072000; includeSubDomains"
|
||||
|
||||
# Crawler Configuration
|
||||
crawler:
|
||||
base_config:
|
||||
simulate_user: true
|
||||
memory_threshold_percent: 95.0
|
||||
rate_limiter:
|
||||
enabled: true
|
||||
base_delay: [1.0, 2.0]
|
||||
timeouts:
|
||||
stream_init: 30.0 # Timeout for stream initialization
|
||||
batch_process: 300.0 # Timeout for batch processing
|
||||
pool:
|
||||
max_pages: 40 # ← GLOBAL_SEM permits
|
||||
idle_ttl_sec: 1800 # ← 30 min janitor cutoff
|
||||
browser:
|
||||
kwargs:
|
||||
headless: true
|
||||
text_mode: true
|
||||
extra_args:
|
||||
# - "--single-process"
|
||||
- "--no-sandbox"
|
||||
- "--disable-dev-shm-usage"
|
||||
- "--disable-gpu"
|
||||
- "--disable-software-rasterizer"
|
||||
- "--disable-web-security"
|
||||
- "--allow-insecure-localhost"
|
||||
- "--ignore-certificate-errors"
|
||||
|
||||
# Logging Configuration
|
||||
logging:
|
||||
level: "INFO"
|
||||
format: "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
||||
|
||||
# Observability Configuration
|
||||
observability:
|
||||
prometheus:
|
||||
enabled: True
|
||||
endpoint: "/metrics"
|
||||
health_check:
|
||||
endpoint: "/health"
|
||||
60
deploy/docker/crawler_pool.py
Normal file
60
deploy/docker/crawler_pool.py
Normal file
@@ -0,0 +1,60 @@
|
||||
# crawler_pool.py (new file)
|
||||
import asyncio, json, hashlib, time, psutil
|
||||
from contextlib import suppress
|
||||
from typing import Dict
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig
|
||||
from typing import Dict
|
||||
from utils import load_config
|
||||
|
||||
CONFIG = load_config()
|
||||
|
||||
POOL: Dict[str, AsyncWebCrawler] = {}
|
||||
LAST_USED: Dict[str, float] = {}
|
||||
LOCK = asyncio.Lock()
|
||||
|
||||
MEM_LIMIT = CONFIG.get("crawler", {}).get("memory_threshold_percent", 95.0) # % RAM – refuse new browsers above this
|
||||
IDLE_TTL = CONFIG.get("crawler", {}).get("pool", {}).get("idle_ttl_sec", 1800) # close if unused for 30 min
|
||||
|
||||
def _sig(cfg: BrowserConfig) -> str:
|
||||
payload = json.dumps(cfg.to_dict(), sort_keys=True, separators=(",",":"))
|
||||
return hashlib.sha1(payload.encode()).hexdigest()
|
||||
|
||||
async def get_crawler(cfg: BrowserConfig) -> AsyncWebCrawler:
|
||||
try:
|
||||
sig = _sig(cfg)
|
||||
async with LOCK:
|
||||
if sig in POOL:
|
||||
LAST_USED[sig] = time.time();
|
||||
return POOL[sig]
|
||||
if psutil.virtual_memory().percent >= MEM_LIMIT:
|
||||
raise MemoryError("RAM pressure – new browser denied")
|
||||
crawler = AsyncWebCrawler(config=cfg, thread_safe=False)
|
||||
await crawler.start()
|
||||
POOL[sig] = crawler; LAST_USED[sig] = time.time()
|
||||
return crawler
|
||||
except MemoryError as e:
|
||||
raise MemoryError(f"RAM pressure – new browser denied: {e}")
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Failed to start browser: {e}")
|
||||
finally:
|
||||
if sig in POOL:
|
||||
LAST_USED[sig] = time.time()
|
||||
else:
|
||||
# If we failed to start the browser, we should remove it from the pool
|
||||
POOL.pop(sig, None)
|
||||
LAST_USED.pop(sig, None)
|
||||
# If we failed to start the browser, we should remove it from the pool
|
||||
async def close_all():
|
||||
async with LOCK:
|
||||
await asyncio.gather(*(c.close() for c in POOL.values()), return_exceptions=True)
|
||||
POOL.clear(); LAST_USED.clear()
|
||||
|
||||
async def janitor():
|
||||
while True:
|
||||
await asyncio.sleep(60)
|
||||
now = time.time()
|
||||
async with LOCK:
|
||||
for sig, crawler in list(POOL.items()):
|
||||
if now - LAST_USED[sig] > IDLE_TTL:
|
||||
with suppress(Exception): await crawler.close()
|
||||
POOL.pop(sig, None); LAST_USED.pop(sig, None)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user