Compare commits
654 Commits
v0.4.243
...
fix/async-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a30548a98f | ||
|
|
2c918155aa | ||
|
|
854694ef33 | ||
|
|
6534ece026 | ||
|
|
89e28d4eee | ||
|
|
c0f1865287 | ||
|
|
46ef1116c4 | ||
|
|
4df83893ac | ||
|
|
13e116610d | ||
|
|
613097d121 | ||
|
|
44ef0682b0 | ||
|
|
b74524fdfb | ||
|
|
bcac486921 | ||
|
|
6aef5a120f | ||
|
|
7cac008c10 | ||
|
|
7e8fb3a8f3 | ||
|
|
3efb59fb9a | ||
|
|
c7b7475b92 | ||
|
|
b71d624168 | ||
|
|
d670dcde0a | ||
|
|
f8606f6865 | ||
|
|
52da8d72bc | ||
|
|
8b7e67566e | ||
|
|
7388baa205 | ||
|
|
897bc3a493 | ||
|
|
8a37710313 | ||
|
|
97c92c4f62 | ||
|
|
f6a02c4358 | ||
|
|
6d1a398419 | ||
|
|
c107617920 | ||
|
|
69d0ef89dd | ||
|
|
1bf85bcb1a | ||
|
|
749232ba1a | ||
|
|
c7288dd2f1 | ||
|
|
fdbcddbf1a | ||
|
|
564d437d97 | ||
|
|
9cd06ea7eb | ||
|
|
c91b235cb7 | ||
|
|
eb257c2ba3 | ||
|
|
8d364a0731 | ||
|
|
6aff0e55aa | ||
|
|
38a0742708 | ||
|
|
a720a3a9fe | ||
|
|
017144c2dd | ||
|
|
32887ea40d | ||
|
|
eea41bf1ca | ||
|
|
21c302f439 | ||
|
|
8fc1747225 | ||
|
|
aadab30c3d | ||
|
|
4a04b8506a | ||
|
|
7dadb65b80 | ||
|
|
a3f057e19f | ||
|
|
216019f29a | ||
|
|
abe8a92561 | ||
|
|
5a4f21fad9 | ||
|
|
611d48f93b | ||
|
|
936397ee0e | ||
|
|
2c373f0642 | ||
|
|
d2c7f345ab | ||
|
|
8c62277718 | ||
|
|
46e1a67f61 | ||
|
|
7dfe528d43 | ||
|
|
5145d42df7 | ||
|
|
9900f63f97 | ||
|
|
9292b265fc | ||
|
|
80aa6c11d9 | ||
|
|
749d200866 | ||
|
|
408ad1b750 | ||
|
|
35dd206925 | ||
|
|
8d30662647 | ||
|
|
ef46df10da | ||
|
|
0d8d043109 | ||
|
|
70af81d9d7 | ||
|
|
2dc6588573 | ||
|
|
361499d291 | ||
|
|
3fe49a766c | ||
|
|
fef715a891 | ||
|
|
69e8ca3d0d | ||
|
|
a1950afd98 | ||
|
|
d0eb5a6ffe | ||
|
|
77559f3373 | ||
|
|
3899ac3d3b | ||
|
|
23431d8109 | ||
|
|
1717827732 | ||
|
|
f8eaf01ed1 | ||
|
|
14b42b1f9a | ||
|
|
3bc56dd028 | ||
|
|
1874a7b8d2 | ||
|
|
0482c1eafc | ||
|
|
6a3b3e9d38 | ||
|
|
1eacea1d2d | ||
|
|
bc6d8147d2 | ||
|
|
487839640f | ||
|
|
6772134a3a | ||
|
|
ae67d66b81 | ||
|
|
af28e84a21 | ||
|
|
5e7fcb17e1 | ||
|
|
6e728096fa | ||
|
|
2de200c1ba | ||
|
|
9749e2832d | ||
|
|
70f473b84d | ||
|
|
bdacf61ca9 | ||
|
|
f566c5a376 | ||
|
|
4ed33fce9e | ||
|
|
f7a3366f72 | ||
|
|
4e1c4bd24e | ||
|
|
2ad3fb5fc8 | ||
|
|
cce3390a2d | ||
|
|
4fe2d01361 | ||
|
|
159207b86f | ||
|
|
38f3ea42a7 | ||
|
|
102352eac4 | ||
|
|
f2da460bb9 | ||
|
|
b1dff5a4d3 | ||
|
|
40ab287c90 | ||
|
|
c09a57644f | ||
|
|
90af453506 | ||
|
|
8bb0e68cce | ||
|
|
95051020f4 | ||
|
|
69961cf40b | ||
|
|
ef174a4c7a | ||
|
|
f4206d6ba1 | ||
|
|
9447054a65 | ||
|
|
dad7c51481 | ||
|
|
f4a432829e | ||
|
|
e651e045c4 | ||
|
|
5398acc7d2 | ||
|
|
22c7932ba3 | ||
|
|
2ab0bf27c2 | ||
|
|
d30dc9fdc1 | ||
|
|
e6044e6053 | ||
|
|
a50e47adad | ||
|
|
ada7441bd1 | ||
|
|
9f7fee91a9 | ||
|
|
7f48655cf1 | ||
|
|
1417a67e90 | ||
|
|
19398d33ef | ||
|
|
263d362daa | ||
|
|
bac92a47e4 | ||
|
|
a51545c883 | ||
|
|
ecbe5ffb84 | ||
|
|
11b310edef | ||
|
|
926e41aab8 | ||
|
|
489981e670 | ||
|
|
b92be4ef66 | ||
|
|
7c0edaf266 | ||
|
|
dfcfd8ae57 | ||
|
|
955110a8b0 | ||
|
|
f30811b524 | ||
|
|
8146d477e9 | ||
|
|
96c4b0de67 | ||
|
|
57c14db7cb | ||
|
|
88a9fbbb7e | ||
|
|
be63c98db3 | ||
|
|
cd2dd68e4c | ||
|
|
f0ce7b2710 | ||
|
|
21f79fe166 | ||
|
|
a9a2d798b4 | ||
|
|
612270fcb0 | ||
|
|
bc099fdd76 | ||
|
|
18504d782e | ||
|
|
ad547607b9 | ||
|
|
18ad3ef159 | ||
|
|
0541b61405 | ||
|
|
b61b2ee676 | ||
|
|
89cf5aba2b | ||
|
|
6b0b5301ba | ||
|
|
7a8190ecb6 | ||
|
|
6735c68288 | ||
|
|
64f37792a7 | ||
|
|
a5bcac4c9d | ||
|
|
45d8327d23 | ||
|
|
437395e490 | ||
|
|
fddae303fb | ||
|
|
ff6ea41ac3 | ||
|
|
31a435fb0e | ||
|
|
5de6a28055 | ||
|
|
de1561ad14 | ||
|
|
337b588732 | ||
|
|
7a6ad547f0 | ||
|
|
e6692b987d | ||
|
|
307fe28b32 | ||
|
|
438a103b17 | ||
|
|
a03e68fa2f | ||
|
|
864d87afb2 | ||
|
|
508b6fc233 | ||
|
|
8e3c411a3e | ||
|
|
e3281935bc | ||
|
|
48647300b4 | ||
|
|
9f9ea3bb3b | ||
|
|
d58b93c207 | ||
|
|
e2b4705010 | ||
|
|
4a1abd5086 | ||
|
|
04258cd4f2 | ||
|
|
84e462d9f8 | ||
|
|
9546773a07 | ||
|
|
66a979ad11 | ||
|
|
0c31e91b53 | ||
|
|
1b6a31f88f | ||
|
|
b8c261780f | ||
|
|
db6ad7a79d | ||
|
|
004d514f33 | ||
|
|
3a9e2c716e | ||
|
|
0163bd797c | ||
|
|
26bad799e4 | ||
|
|
cf8badfe27 | ||
|
|
805c498adf | ||
|
|
6a728cbe5b | ||
|
|
ccbe3c105c | ||
|
|
761c19d54b | ||
|
|
14b0ecb137 | ||
|
|
0eaa9f9895 | ||
|
|
1d1970ae69 | ||
|
|
205df1e330 | ||
|
|
2640dc73a5 | ||
|
|
58024755c5 | ||
|
|
5c33cbcca2 | ||
|
|
dd5ee752cf | ||
|
|
bde1bba6a2 | ||
|
|
7b80eb6b99 | ||
|
|
14f690d751 | ||
|
|
7b9ba3015f | ||
|
|
0c8bb742b7 | ||
|
|
ba2ed53ff1 | ||
|
|
a93efcb650 | ||
|
|
8794852a26 | ||
|
|
fb25a4a769 | ||
|
|
afe852935e | ||
|
|
0ebce590f8 | ||
|
|
026e96a2df | ||
|
|
36429a63de | ||
|
|
a3d41c7951 | ||
|
|
fee4c5c783 | ||
|
|
0f210f6e02 | ||
|
|
1a73fb60db | ||
|
|
74705c1f67 | ||
|
|
048d9b0f5b | ||
|
|
ee25c771d8 | ||
|
|
a353515271 | ||
|
|
539a324cf6 | ||
|
|
5c9c305dbf | ||
|
|
02f3127ded | ||
|
|
e528086341 | ||
|
|
414f16e975 | ||
|
|
b7a6e02236 | ||
|
|
9332326457 | ||
|
|
6cd34b3157 | ||
|
|
871d4f1158 | ||
|
|
c4d625fb3c | ||
|
|
ef722766f0 | ||
|
|
dc85481180 | ||
|
|
5d9213a0e9 | ||
|
|
c0fd36982d | ||
|
|
4679ee023d | ||
|
|
f9b7090084 | ||
|
|
cab457e9c7 | ||
|
|
2a0c0ed18d | ||
|
|
c73a130c50 | ||
|
|
ef6f4329fa | ||
|
|
4eb90b41b6 | ||
|
|
9442597f81 | ||
|
|
0ac12da9f3 | ||
|
|
74b06d4b80 | ||
|
|
40640badad | ||
|
|
926592649e | ||
|
|
b870bfdb6c | ||
|
|
6f3a0ea38e | ||
|
|
451b0d6c9a | ||
|
|
8b215e17af | ||
|
|
b4bb0ccea0 | ||
|
|
08a2cdae53 | ||
|
|
ca03acbc82 | ||
|
|
3f6f2e998c | ||
|
|
5ac19a61d7 | ||
|
|
022cc2d92a | ||
|
|
e731596315 | ||
|
|
641526af81 | ||
|
|
82a25c037a | ||
|
|
c6fc5c0518 | ||
|
|
b5c2732f88 | ||
|
|
09fd3e152a | ||
|
|
3f9424e884 | ||
|
|
3048cc1ff9 | ||
|
|
fcc2abe4db | ||
|
|
cc95d3abd4 | ||
|
|
5ce3e682f3 | ||
|
|
28125c1980 | ||
|
|
773ed7b281 | ||
|
|
58c1e17170 | ||
|
|
4bcb7171a3 | ||
|
|
b55e27d2ef | ||
|
|
3b766e1aac | ||
|
|
c3b7b7e918 | ||
|
|
7d0b447e1c | ||
|
|
33b0e222ca | ||
|
|
1fc45ffac8 | ||
|
|
9c2cc7f73c | ||
|
|
1c5e76d51a | ||
|
|
7665a6832f | ||
|
|
a06710ff03 | ||
|
|
ad078c3f18 | ||
|
|
400a6621ee | ||
|
|
3d46d89759 | ||
|
|
da8f0dbb93 | ||
|
|
33a0c7a17a | ||
|
|
bf56787874 | ||
|
|
08ad7ef257 | ||
|
|
984524ca1c | ||
|
|
1c0ce41328 | ||
|
|
cb8d581e47 | ||
|
|
a55c2b3f88 | ||
|
|
ce09648af1 | ||
|
|
a97654270b | ||
|
|
b4fc60a555 | ||
|
|
137ac014fb | ||
|
|
faa98eefbc | ||
|
|
85ac6fa523 | ||
|
|
becc4624bb | ||
|
|
754ba731fa | ||
|
|
ac9981a1f5 | ||
|
|
83ef15fd47 | ||
|
|
a3cb938675 | ||
|
|
9b60988232 | ||
|
|
98e951f611 | ||
|
|
baca2df8df | ||
|
|
8a5e23d374 | ||
|
|
22725ca87b | ||
|
|
e0fbd2b0a0 | ||
|
|
32966bea11 | ||
|
|
a3b0cab52a | ||
|
|
137556b3dc | ||
|
|
260e2dc347 | ||
|
|
25d97d56e4 | ||
|
|
98a56e6e01 | ||
|
|
1e1c887a2f | ||
|
|
897e017361 | ||
|
|
a3e9ef91ad | ||
|
|
76dd86d1b3 | ||
|
|
206a9dfabd | ||
|
|
1af3d1c2e0 | ||
|
|
c1041b9bbe | ||
|
|
f6e25e2a6b | ||
|
|
ee93acbd06 | ||
|
|
2b17f234f8 | ||
|
|
eebb8c84f0 | ||
|
|
12783fabda | ||
|
|
39e3b792a1 | ||
|
|
aaf05910eb | ||
|
|
a0555d5fa6 | ||
|
|
38ebcbb304 | ||
|
|
9b5ccac76e | ||
|
|
87d4b0fff4 | ||
|
|
bd5a9ac632 | ||
|
|
6650b2f34a | ||
|
|
5cc58f9bb3 | ||
|
|
baf7f6a6f5 | ||
|
|
e0cd3e10de | ||
|
|
94e9959fe0 | ||
|
|
7c2fd5202e | ||
|
|
ee01b81f3e | ||
|
|
0e5d672763 | ||
|
|
cd2b490b40 | ||
|
|
50f0b83fcd | ||
|
|
1d6a2b9979 | ||
|
|
039be1b1ce | ||
|
|
9499164d3c | ||
|
|
53245e4e0e | ||
|
|
2140d9aca4 | ||
|
|
ccec40ed17 | ||
|
|
094201ab2a | ||
|
|
ad4dfb21e1 | ||
|
|
7784b2468e | ||
|
|
146f9d415f | ||
|
|
37fd80e4b9 | ||
|
|
949a93982e | ||
|
|
c4f5651199 | ||
|
|
b0aa8bc9f7 | ||
|
|
c98ffe2130 | ||
|
|
4812f08a73 | ||
|
|
f3ebb38edf | ||
|
|
0007aea204 | ||
|
|
b5c25731e6 | ||
|
|
5297e362f3 | ||
|
|
14a31456ef | ||
|
|
a58c8000aa | ||
|
|
b27bb367e8 | ||
|
|
d2648eaa39 | ||
|
|
c2902fd200 | ||
|
|
16b2318242 | ||
|
|
907cba194f | ||
|
|
3bf78ff47a | ||
|
|
921e0c46b6 | ||
|
|
fd899f66aa | ||
|
|
30ec4f571f | ||
|
|
7db6b468d9 | ||
|
|
0886153d6a | ||
|
|
0ec3c4a788 | ||
|
|
eed7f88f29 | ||
|
|
94d486579c | ||
|
|
5206c6f2d6 | ||
|
|
230f22da86 | ||
|
|
05085b6e3d | ||
|
|
793668a413 | ||
|
|
82aa53aa59 | ||
|
|
cd7ff6f9c1 | ||
|
|
c56974cf59 | ||
|
|
1f3b1251d0 | ||
|
|
7b9aabc64a | ||
|
|
dcc265458c | ||
|
|
ecec53a8c1 | ||
|
|
7d8e81fb2e | ||
|
|
9fc5d315af | ||
|
|
d84508b4d5 | ||
|
|
022f5c9e25 | ||
|
|
3179d6ad0c | ||
|
|
b2f3cb0dfa | ||
|
|
18e8227dfb | ||
|
|
7c358a1aee | ||
|
|
108b2a8bfb | ||
|
|
66ac07b4f3 | ||
|
|
a2061bf31e | ||
|
|
6f7ab9c927 | ||
|
|
9038e9acbd | ||
|
|
02e627e0bd | ||
|
|
5b66208a7e | ||
|
|
591f55edc7 | ||
|
|
e1d9e2489c | ||
|
|
b1693b1c21 | ||
|
|
49d904ca0a | ||
|
|
ca9351252a | ||
|
|
935d9d39f8 | ||
|
|
f8213c32b9 | ||
|
|
14894b4d70 | ||
|
|
7155778eac | ||
|
|
4133e5460d | ||
|
|
73fda8a6ec | ||
|
|
86df20234b | ||
|
|
179921a131 | ||
|
|
9e16a4bb26 | ||
|
|
c5cac2b459 | ||
|
|
555455d710 | ||
|
|
765f856ed4 | ||
|
|
757e3177ed | ||
|
|
d8357e80d2 | ||
|
|
ef1f0c4102 | ||
|
|
1119f2f5b5 | ||
|
|
bb02398086 | ||
|
|
3ff7eec8f3 | ||
|
|
d8cbeff386 | ||
|
|
64f20ab44a | ||
|
|
57e0423b3a | ||
|
|
c635f6b9a2 | ||
|
|
7be5427283 | ||
|
|
7f93e88379 | ||
|
|
40d4dd36c9 | ||
|
|
d8f38f2298 | ||
|
|
5c88d1310d | ||
|
|
4a20d7f7c2 | ||
|
|
585e5e5973 | ||
|
|
e3111d0a32 | ||
|
|
2f0e217751 | ||
|
|
6405cf0a6f | ||
|
|
6eed4adc65 | ||
|
|
bdd9db579a | ||
|
|
1107fa1d62 | ||
|
|
efa73257c5 | ||
|
|
8c08521301 | ||
|
|
462d5765e2 | ||
|
|
6eeb2e4076 | ||
|
|
0094cac675 | ||
|
|
4ab0893ffb | ||
|
|
e01d1e73e1 | ||
|
|
471d110c5e | ||
|
|
f89113377a | ||
|
|
6740e87b4d | ||
|
|
8b761f232b | ||
|
|
e0c2a7c284 | ||
|
|
ac2f9ae533 | ||
|
|
eedda1ae5c | ||
|
|
8cecbec7a7 | ||
|
|
6432ff1257 | ||
|
|
4359b12003 | ||
|
|
5358ac0fc2 | ||
|
|
529a79725e | ||
|
|
9109ecd8fc | ||
|
|
84883be513 | ||
|
|
79328e4292 | ||
|
|
a24799918c | ||
|
|
a31d7b86be | ||
|
|
7884a98be7 | ||
|
|
c190ba816d | ||
|
|
a3954dd4c6 | ||
|
|
6e3c048328 | ||
|
|
b750542e6d | ||
|
|
cbb8755972 | ||
|
|
dc36997a08 | ||
|
|
1630fbdafe | ||
|
|
341b7a5f2a | ||
|
|
9547bada3a | ||
|
|
9d69fce834 | ||
|
|
c6a605ccce | ||
|
|
4aeb7ef9ad | ||
|
|
a68cbb232b | ||
|
|
e1b3bfe6fb | ||
|
|
f78c46446b | ||
|
|
1b72880007 | ||
|
|
29f7915b79 | ||
|
|
2327db6fdc | ||
|
|
fd02dc782d | ||
|
|
3a234ec950 | ||
|
|
9e89d27fcd | ||
|
|
b3ec7ce960 | ||
|
|
baee4949d3 | ||
|
|
14fe5ef873 | ||
|
|
fc425023f5 | ||
|
|
9c58e4ce2e | ||
|
|
df6a6d5f4f | ||
|
|
e896c08f9c | ||
|
|
56bc3c6e45 | ||
|
|
cbef406f9b | ||
|
|
8a76563018 | ||
|
|
415c1c5bee | ||
|
|
f334daa979 | ||
|
|
504207faa6 | ||
|
|
d024749633 | ||
|
|
f14e4a4b67 | ||
|
|
1e819cdb26 | ||
|
|
5edfea279d | ||
|
|
c612f9a852 | ||
|
|
95175cb394 | ||
|
|
cba4a466e5 | ||
|
|
7c1705712d | ||
|
|
a9e24307cc | ||
|
|
3a87b4e43b | ||
|
|
4bcd4cbda1 | ||
|
|
71ce01c9e1 | ||
|
|
c6d48080a4 | ||
|
|
46d2f12851 | ||
|
|
367cd71db9 | ||
|
|
2af958e12c | ||
|
|
3cb28875c3 | ||
|
|
dad592c801 | ||
|
|
c171891999 | ||
|
|
3b1025abbb | ||
|
|
f00dcc276f | ||
|
|
392c923980 | ||
|
|
2864015469 | ||
|
|
27af4cc27b | ||
|
|
8bb799068e | ||
|
|
063df572b0 | ||
|
|
966fb47e64 | ||
|
|
43e09da694 | ||
|
|
69705df0b3 | ||
|
|
91a5fea11f | ||
|
|
467be9ac76 | ||
|
|
19df96ed56 | ||
|
|
b957ff2ecd | ||
|
|
91073c1244 | ||
|
|
926beee832 | ||
|
|
a9415aaaf6 | ||
|
|
c308a794e8 | ||
|
|
bc7559586f | ||
|
|
04bc643cec | ||
|
|
33a21d6a7a | ||
|
|
7b1ef07c41 | ||
|
|
2f15976b34 | ||
|
|
20920fa17b | ||
|
|
53ac3ec0b4 | ||
|
|
ce4f04dad2 | ||
|
|
f81712eb91 | ||
|
|
31938fb922 | ||
|
|
f8fd9d9eff | ||
|
|
dde14eba7d | ||
|
|
54c84079c4 | ||
|
|
d0586f09a9 | ||
|
|
09ac7ed008 | ||
|
|
97796f39d2 | ||
|
|
4d7f91b378 | ||
|
|
69a77222ef | ||
|
|
0afc3e9e5e | ||
|
|
65d33bcc0f | ||
|
|
6a01008a2b | ||
|
|
6dc01eae3a | ||
|
|
7b7fe84e0d | ||
|
|
5c36f4308f | ||
|
|
45809d1c91 | ||
|
|
357414c345 | ||
|
|
260b9120c3 | ||
|
|
976ea52167 | ||
|
|
2d69bf2366 | ||
|
|
dee5fe9851 | ||
|
|
88697c4630 | ||
|
|
16b8d4945b | ||
|
|
d09c611d15 | ||
|
|
9247877037 | ||
|
|
2cec527a22 | ||
|
|
4b1309cbf2 | ||
|
|
8b6fe6a98f | ||
|
|
91463e34f1 | ||
|
|
1221be30a3 | ||
|
|
6dfa9cb703 | ||
|
|
e363234172 | ||
|
|
3d09b6a221 | ||
|
|
2d6b19e1a2 | ||
|
|
ece9202b61 | ||
|
|
9d694da939 | ||
|
|
20c027b79c | ||
|
|
8878b3d032 | ||
|
|
1ab9d115cf | ||
|
|
8ec12d7d68 | ||
|
|
c3370ec5da | ||
|
|
f3ae5a657c | ||
|
|
825c78a048 | ||
|
|
3865342c93 | ||
|
|
ac5f461d40 | ||
|
|
f9c601eb7e | ||
|
|
e8b4ac6046 | ||
|
|
051a6cf974 | ||
|
|
1c9464b988 | ||
|
|
6838901788 | ||
|
|
ad5e5d21ca | ||
|
|
26d821c0de | ||
|
|
010677cbee | ||
|
|
c110d459fb | ||
|
|
4d1975e0a7 | ||
|
|
82734a750c | ||
|
|
56fa4e1e42 | ||
|
|
ca3e33122e | ||
|
|
fe52311bf4 | ||
|
|
01b73950ee | ||
|
|
12880f1ffa | ||
|
|
53be88b677 | ||
|
|
3427ead8b8 | ||
|
|
32652189b0 | ||
|
|
ae376f15fb | ||
|
|
72fbdac467 | ||
|
|
0857c7b448 | ||
|
|
07b4c1c0ed | ||
|
|
196dc79ec7 | ||
|
|
24b3da717a | ||
|
|
98acc4254d | ||
|
|
eac78c7993 | ||
|
|
da1bc0f7bf | ||
|
|
aa4f92f458 | ||
|
|
a96e05d4ae | ||
|
|
5c95fd92b4 | ||
|
|
4cb2a62551 | ||
|
|
5b4fad9e25 | ||
|
|
ea0ac25f38 | ||
|
|
7688aca7d6 | ||
|
|
a7215ad972 | ||
|
|
8e2403a7da | ||
|
|
318554e6bf | ||
|
|
3e769a9c6c |
28
.claude/settings.local.json
Normal file
28
.claude/settings.local.json
Normal file
@@ -0,0 +1,28 @@
|
||||
{
|
||||
"permissions": {
|
||||
"allow": [
|
||||
"Bash(cd:*)",
|
||||
"Bash(python3:*)",
|
||||
"Bash(python:*)",
|
||||
"Bash(grep:*)",
|
||||
"Bash(mkdir:*)",
|
||||
"Bash(cp:*)",
|
||||
"Bash(rm:*)",
|
||||
"Bash(true)",
|
||||
"Bash(./package-extension.sh:*)",
|
||||
"Bash(find:*)",
|
||||
"Bash(chmod:*)",
|
||||
"Bash(rg:*)",
|
||||
"Bash(/Users/unclecode/.npm-global/lib/node_modules/@anthropic-ai/claude-code/vendor/ripgrep/arm64-darwin/rg -A 5 -B 5 \"Script Builder\" docs/md_v2/apps/crawl4ai-assistant/)",
|
||||
"Bash(/Users/unclecode/.npm-global/lib/node_modules/@anthropic-ai/claude-code/vendor/ripgrep/arm64-darwin/rg -A 30 \"generateCode\\(events, format\\)\" docs/md_v2/apps/crawl4ai-assistant/content/content.js)",
|
||||
"Bash(/Users/unclecode/.npm-global/lib/node_modules/@anthropic-ai/claude-code/vendor/ripgrep/arm64-darwin/rg \"<style>\" docs/md_v2/apps/crawl4ai-assistant/index.html -A 5)",
|
||||
"Bash(git checkout:*)",
|
||||
"Bash(docker logs:*)",
|
||||
"Bash(curl:*)",
|
||||
"Bash(docker compose:*)",
|
||||
"Bash(./test-final-integration.sh:*)",
|
||||
"Bash(mv:*)"
|
||||
]
|
||||
},
|
||||
"enableAllProjectMcpServers": false
|
||||
}
|
||||
12
.gitattributes
vendored
Normal file
12
.gitattributes
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
# Documentation
|
||||
*.html linguist-documentation
|
||||
docs/* linguist-documentation
|
||||
docs/examples/* linguist-documentation
|
||||
docs/md_v2/* linguist-documentation
|
||||
|
||||
# Explicitly mark Python as the main language
|
||||
*.py linguist-detectable=true
|
||||
*.py linguist-language=Python
|
||||
|
||||
# Exclude HTML from language statistics
|
||||
*.html linguist-detectable=false
|
||||
59
.github/DISCUSSION_TEMPLATE/feature-requests.yml
vendored
Normal file
59
.github/DISCUSSION_TEMPLATE/feature-requests.yml
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
title: "[Feature Request]: "
|
||||
labels: ["⚙️ New"]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thank you for your interest in suggesting a new feature! Before you submit, please take a moment to check if already exists in
|
||||
this discussions category to avoid duplicates. 😊
|
||||
|
||||
- type: textarea
|
||||
id: needs_to_be_done
|
||||
attributes:
|
||||
label: What needs to be done?
|
||||
description: Please describe the feature or functionality you'd like to see.
|
||||
placeholder: "e.g., Return alt text along with images scraped from a webpages in Result"
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: problem_to_solve
|
||||
attributes:
|
||||
label: What problem does this solve?
|
||||
description: Explain the pain point or issue this feature will help address.
|
||||
placeholder: "e.g., Bypass Captchas added by cloudflare"
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: target_users
|
||||
attributes:
|
||||
label: Target users/beneficiaries
|
||||
description: Who would benefit from this feature? (e.g., specific teams, developers, users, etc.)
|
||||
placeholder: "e.g., Marketing teams, developers"
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
id: current_workarounds
|
||||
attributes:
|
||||
label: Current alternatives/workarounds
|
||||
description: Are there any existing solutions or workarounds? How does this feature improve upon them?
|
||||
placeholder: "e.g., Users manually select the css classes mapped to data fields to extract them"
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
### 💡 Implementation Ideas
|
||||
|
||||
- type: textarea
|
||||
id: proposed_approach
|
||||
attributes:
|
||||
label: Proposed approach
|
||||
description: Share any ideas you have for how this feature could be implemented. Point out any challenges your foresee
|
||||
and the success metrics for this feature
|
||||
placeholder: "e.g., Implement a breadth first traversal algorithm for scraper"
|
||||
validations:
|
||||
required: false
|
||||
7
.github/FUNDING.yml
vendored
Normal file
7
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
# These are supported funding model platforms
|
||||
|
||||
# GitHub Sponsors
|
||||
github: unclecode
|
||||
|
||||
# Custom links for enterprise inquiries (uncomment when ready)
|
||||
# custom: ["https://crawl4ai.com/enterprise"]
|
||||
127
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
127
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
Normal file
@@ -0,0 +1,127 @@
|
||||
name: Bug Report
|
||||
description: Report a bug with the Crawl4AI.
|
||||
title: "[Bug]: "
|
||||
labels: ["🐞 Bug","🩺 Needs Triage"]
|
||||
body:
|
||||
- type: input
|
||||
id: crawl4ai_version
|
||||
attributes:
|
||||
label: crawl4ai version
|
||||
description: Specify the version of crawl4ai you are using.
|
||||
placeholder: "e.g., 2.0.0"
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: expected_behavior
|
||||
attributes:
|
||||
label: Expected Behavior
|
||||
description: Describe what you expected to happen.
|
||||
placeholder: "Provide a detailed explanation of the expected outcome."
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: current_behavior
|
||||
attributes:
|
||||
label: Current Behavior
|
||||
description: Describe what is happening instead of the expected behavior.
|
||||
placeholder: "Describe the actual result or issue you encountered."
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
id: reproducible
|
||||
attributes:
|
||||
label: Is this reproducible?
|
||||
description: Indicate whether this bug can be reproduced consistently.
|
||||
options:
|
||||
- "Yes"
|
||||
- "No"
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: inputs
|
||||
attributes:
|
||||
label: Inputs Causing the Bug
|
||||
description: Provide details about the inputs causing the issue.
|
||||
placeholder: |
|
||||
- URL(s):
|
||||
- Settings used:
|
||||
- Input data (if applicable):
|
||||
render: bash
|
||||
|
||||
- type: textarea
|
||||
id: steps_to_reproduce
|
||||
attributes:
|
||||
label: Steps to Reproduce
|
||||
description: Provide step-by-step instructions to reproduce the issue.
|
||||
placeholder: |
|
||||
1. Go to...
|
||||
2. Click on...
|
||||
3. Observe the issue...
|
||||
render: bash
|
||||
|
||||
- type: textarea
|
||||
id: code_snippets
|
||||
attributes:
|
||||
label: Code snippets
|
||||
description: Provide code snippets(if any). Add comments as necessary
|
||||
placeholder: print("Hello world")
|
||||
render: python
|
||||
|
||||
# Header Section with Title
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
## Supporting Information
|
||||
Please provide the following details to help us understand and resolve your issue. This will assist us in reproducing and diagnosing the problem
|
||||
|
||||
- type: input
|
||||
id: os
|
||||
attributes:
|
||||
label: OS
|
||||
description: Please provide the operating system & distro where the issue occurs.
|
||||
placeholder: "e.g., Windows, macOS, Linux"
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: python_version
|
||||
attributes:
|
||||
label: Python version
|
||||
description: Specify the Python version being used.
|
||||
placeholder: "e.g., 3.8.5"
|
||||
validations:
|
||||
required: true
|
||||
|
||||
# Browser Field
|
||||
- type: input
|
||||
id: browser
|
||||
attributes:
|
||||
label: Browser
|
||||
description: Provide the name of the browser you are using.
|
||||
placeholder: "e.g., Chrome, Firefox, Safari"
|
||||
validations:
|
||||
required: false
|
||||
|
||||
# Browser Version Field
|
||||
- type: input
|
||||
id: browser_version
|
||||
attributes:
|
||||
label: Browser version
|
||||
description: Provide the version of the browser you are using.
|
||||
placeholder: "e.g., 91.0.4472.124"
|
||||
validations:
|
||||
required: false
|
||||
|
||||
# Error Logs Field (Text Area)
|
||||
- type: textarea
|
||||
id: error_logs
|
||||
attributes:
|
||||
label: Error logs & Screenshots (if applicable)
|
||||
description: If you encountered any errors, please provide the error logs. Attach any relevant screenshots to help us understand the issue.
|
||||
placeholder: "Paste error logs here and attach your screenshots"
|
||||
validations:
|
||||
required: false
|
||||
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: Feature Requests
|
||||
url: https://github.com/unclecode/crawl4ai/discussions/categories/feature-requests
|
||||
about: "Suggest new features or enhancements for Crawl4AI"
|
||||
- name: Forums - Q&A
|
||||
url: https://github.com/unclecode/crawl4ai/discussions/categories/forums-q-a
|
||||
about: "Ask questions or engage in general discussions about Crawl4AI"
|
||||
19
.github/pull_request_template.md
vendored
Normal file
19
.github/pull_request_template.md
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
## Summary
|
||||
Please include a summary of the change and/or which issues are fixed.
|
||||
|
||||
eg: `Fixes #123` (Tag GitHub issue numbers in this format, so it automatically links the issues with your PR)
|
||||
|
||||
## List of files changed and why
|
||||
eg: quickstart.py - To update the example as per new changes
|
||||
|
||||
## How Has This Been Tested?
|
||||
Please describe the tests that you ran to verify your changes.
|
||||
|
||||
## Checklist:
|
||||
|
||||
- [ ] My code follows the style guidelines of this project
|
||||
- [ ] I have performed a self-review of my own code
|
||||
- [ ] I have commented my code, particularly in hard-to-understand areas
|
||||
- [ ] I have made corresponding changes to the documentation
|
||||
- [ ] I have added/updated unit tests that prove my fix is effective or that my feature works
|
||||
- [ ] New and existing unit tests pass locally with my changes
|
||||
81
.github/workflows/docker-release.yml
vendored
Normal file
81
.github/workflows/docker-release.yml
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
name: Docker Release
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
push:
|
||||
tags:
|
||||
- 'docker-rebuild-v*' # Allow manual Docker rebuilds via tags
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Extract version from release or tag
|
||||
id: get_version
|
||||
run: |
|
||||
if [ "${{ github.event_name }}" == "release" ]; then
|
||||
# Triggered by release event
|
||||
VERSION="${{ github.event.release.tag_name }}"
|
||||
VERSION=${VERSION#v} # Remove 'v' prefix
|
||||
else
|
||||
# Triggered by docker-rebuild-v* tag
|
||||
VERSION=${GITHUB_REF#refs/tags/docker-rebuild-v}
|
||||
fi
|
||||
echo "VERSION=$VERSION" >> $GITHUB_OUTPUT
|
||||
echo "Building Docker images for version: $VERSION"
|
||||
|
||||
- name: Extract major and minor versions
|
||||
id: versions
|
||||
run: |
|
||||
VERSION=${{ steps.get_version.outputs.VERSION }}
|
||||
MAJOR=$(echo $VERSION | cut -d. -f1)
|
||||
MINOR=$(echo $VERSION | cut -d. -f1-2)
|
||||
echo "MAJOR=$MAJOR" >> $GITHUB_OUTPUT
|
||||
echo "MINOR=$MINOR" >> $GITHUB_OUTPUT
|
||||
echo "Semantic versions - Major: $MAJOR, Minor: $MINOR"
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_TOKEN }}
|
||||
|
||||
- name: Build and push Docker images
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: |
|
||||
unclecode/crawl4ai:${{ steps.get_version.outputs.VERSION }}
|
||||
unclecode/crawl4ai:${{ steps.versions.outputs.MINOR }}
|
||||
unclecode/crawl4ai:${{ steps.versions.outputs.MAJOR }}
|
||||
unclecode/crawl4ai:latest
|
||||
platforms: linux/amd64,linux/arm64
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Summary
|
||||
run: |
|
||||
echo "## 🐳 Docker Release Complete!" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### Published Images" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`unclecode/crawl4ai:${{ steps.get_version.outputs.VERSION }}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`unclecode/crawl4ai:${{ steps.versions.outputs.MINOR }}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`unclecode/crawl4ai:${{ steps.versions.outputs.MAJOR }}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`unclecode/crawl4ai:latest\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### Platforms" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- linux/amd64" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- linux/arm64" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### 🚀 Pull Command" >> $GITHUB_STEP_SUMMARY
|
||||
echo "\`\`\`bash" >> $GITHUB_STEP_SUMMARY
|
||||
echo "docker pull unclecode/crawl4ai:${{ steps.get_version.outputs.VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "\`\`\`" >> $GITHUB_STEP_SUMMARY
|
||||
917
.github/workflows/docs/ARCHITECTURE.md
vendored
Normal file
917
.github/workflows/docs/ARCHITECTURE.md
vendored
Normal file
@@ -0,0 +1,917 @@
|
||||
# Workflow Architecture Documentation
|
||||
|
||||
## Overview
|
||||
|
||||
This document describes the technical architecture of the split release pipeline for Crawl4AI.
|
||||
|
||||
---
|
||||
|
||||
## Architecture Diagram
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ Developer │
|
||||
│ │ │
|
||||
│ ▼ │
|
||||
│ git tag v1.2.3 │
|
||||
│ git push --tags │
|
||||
└──────────────────────────────┬──────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ GitHub Repository │
|
||||
│ │
|
||||
│ ┌────────────────────────────────────────────────────────┐ │
|
||||
│ │ Tag Event: v1.2.3 │ │
|
||||
│ └────────────────────────────────────────────────────────┘ │
|
||||
│ │ │
|
||||
│ ▼ │
|
||||
│ ┌────────────────────────────────────────────────────────┐ │
|
||||
│ │ release.yml (Release Pipeline) │ │
|
||||
│ │ ┌──────────────────────────────────────────────┐ │ │
|
||||
│ │ │ 1. Extract Version │ │ │
|
||||
│ │ │ v1.2.3 → 1.2.3 │ │ │
|
||||
│ │ └──────────────────────────────────────────────┘ │ │
|
||||
│ │ ┌──────────────────────────────────────────────┐ │ │
|
||||
│ │ │ 2. Validate Version │ │ │
|
||||
│ │ │ Tag == __version__.py │ │ │
|
||||
│ │ └──────────────────────────────────────────────┘ │ │
|
||||
│ │ ┌──────────────────────────────────────────────┐ │ │
|
||||
│ │ │ 3. Build Python Package │ │ │
|
||||
│ │ │ - Source dist (.tar.gz) │ │ │
|
||||
│ │ │ - Wheel (.whl) │ │ │
|
||||
│ │ └──────────────────────────────────────────────┘ │ │
|
||||
│ │ ┌──────────────────────────────────────────────┐ │ │
|
||||
│ │ │ 4. Upload to PyPI │ │ │
|
||||
│ │ │ - Authenticate with token │ │ │
|
||||
│ │ │ - Upload dist/* │ │ │
|
||||
│ │ └──────────────────────────────────────────────┘ │ │
|
||||
│ │ ┌──────────────────────────────────────────────┐ │ │
|
||||
│ │ │ 5. Create GitHub Release │ │ │
|
||||
│ │ │ - Tag: v1.2.3 │ │ │
|
||||
│ │ │ - Body: Install instructions │ │ │
|
||||
│ │ │ - Status: Published │ │ │
|
||||
│ │ └──────────────────────────────────────────────┘ │ │
|
||||
│ └────────────────────────────────────────────────────────┘ │
|
||||
│ │ │
|
||||
│ ▼ │
|
||||
│ ┌────────────────────────────────────────────────────────┐ │
|
||||
│ │ Release Event: published (v1.2.3) │ │
|
||||
│ └────────────────────────────────────────────────────────┘ │
|
||||
│ │ │
|
||||
│ ▼ │
|
||||
│ ┌────────────────────────────────────────────────────────┐ │
|
||||
│ │ docker-release.yml (Docker Pipeline) │ │
|
||||
│ │ ┌──────────────────────────────────────────────┐ │ │
|
||||
│ │ │ 1. Extract Version from Release │ │ │
|
||||
│ │ │ github.event.release.tag_name → 1.2.3 │ │ │
|
||||
│ │ └──────────────────────────────────────────────┘ │ │
|
||||
│ │ ┌──────────────────────────────────────────────┐ │ │
|
||||
│ │ │ 2. Parse Semantic Versions │ │ │
|
||||
│ │ │ 1.2.3 → Major: 1, Minor: 1.2 │ │ │
|
||||
│ │ └──────────────────────────────────────────────┘ │ │
|
||||
│ │ ┌──────────────────────────────────────────────┐ │ │
|
||||
│ │ │ 3. Setup Multi-Arch Build │ │ │
|
||||
│ │ │ - Docker Buildx │ │ │
|
||||
│ │ │ - QEMU emulation │ │ │
|
||||
│ │ └──────────────────────────────────────────────┘ │ │
|
||||
│ │ ┌──────────────────────────────────────────────┐ │ │
|
||||
│ │ │ 4. Authenticate Docker Hub │ │ │
|
||||
│ │ │ - Username: DOCKER_USERNAME │ │ │
|
||||
│ │ │ - Token: DOCKER_TOKEN │ │ │
|
||||
│ │ └──────────────────────────────────────────────┘ │ │
|
||||
│ │ ┌──────────────────────────────────────────────┐ │ │
|
||||
│ │ │ 5. Build Multi-Arch Images │ │ │
|
||||
│ │ │ ┌────────────────┬────────────────┐ │ │ │
|
||||
│ │ │ │ linux/amd64 │ linux/arm64 │ │ │ │
|
||||
│ │ │ └────────────────┴────────────────┘ │ │ │
|
||||
│ │ │ Cache: GitHub Actions (type=gha) │ │ │
|
||||
│ │ └──────────────────────────────────────────────┘ │ │
|
||||
│ │ ┌──────────────────────────────────────────────┐ │ │
|
||||
│ │ │ 6. Push to Docker Hub │ │ │
|
||||
│ │ │ Tags: │ │ │
|
||||
│ │ │ - unclecode/crawl4ai:1.2.3 │ │ │
|
||||
│ │ │ - unclecode/crawl4ai:1.2 │ │ │
|
||||
│ │ │ - unclecode/crawl4ai:1 │ │ │
|
||||
│ │ │ - unclecode/crawl4ai:latest │ │ │
|
||||
│ │ └──────────────────────────────────────────────┘ │ │
|
||||
│ └────────────────────────────────────────────────────────┘ │
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ External Services │
|
||||
│ │
|
||||
│ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ │
|
||||
│ │ PyPI │ │ Docker Hub │ │ GitHub │ │
|
||||
│ │ │ │ │ │ │ │
|
||||
│ │ crawl4ai │ │ unclecode/ │ │ Releases │ │
|
||||
│ │ 1.2.3 │ │ crawl4ai │ │ v1.2.3 │ │
|
||||
│ └──────────────┘ └──────────────┘ └──────────────┘ │
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Component Details
|
||||
|
||||
### 1. Release Pipeline (release.yml)
|
||||
|
||||
#### Purpose
|
||||
Fast publication of Python package and GitHub release.
|
||||
|
||||
#### Input
|
||||
- **Trigger**: Git tag matching `v*` (excluding `test-v*`)
|
||||
- **Example**: `v1.2.3`
|
||||
|
||||
#### Processing Stages
|
||||
|
||||
##### Stage 1: Version Extraction
|
||||
```bash
|
||||
Input: refs/tags/v1.2.3
|
||||
Output: VERSION=1.2.3
|
||||
```
|
||||
|
||||
**Implementation**:
|
||||
```bash
|
||||
TAG_VERSION=${GITHUB_REF#refs/tags/v} # Remove 'refs/tags/v' prefix
|
||||
echo "VERSION=$TAG_VERSION" >> $GITHUB_OUTPUT
|
||||
```
|
||||
|
||||
##### Stage 2: Version Validation
|
||||
```bash
|
||||
Input: TAG_VERSION=1.2.3
|
||||
Check: crawl4ai/__version__.py contains __version__ = "1.2.3"
|
||||
Output: Pass/Fail
|
||||
```
|
||||
|
||||
**Implementation**:
|
||||
```bash
|
||||
PACKAGE_VERSION=$(python -c "from crawl4ai.__version__ import __version__; print(__version__)")
|
||||
if [ "$TAG_VERSION" != "$PACKAGE_VERSION" ]; then
|
||||
exit 1
|
||||
fi
|
||||
```
|
||||
|
||||
##### Stage 3: Package Build
|
||||
```bash
|
||||
Input: Source code + pyproject.toml
|
||||
Output: dist/crawl4ai-1.2.3.tar.gz
|
||||
dist/crawl4ai-1.2.3-py3-none-any.whl
|
||||
```
|
||||
|
||||
**Implementation**:
|
||||
```bash
|
||||
python -m build
|
||||
# Uses build backend defined in pyproject.toml
|
||||
```
|
||||
|
||||
##### Stage 4: PyPI Upload
|
||||
```bash
|
||||
Input: dist/*.{tar.gz,whl}
|
||||
Auth: PYPI_TOKEN
|
||||
Output: Package published to PyPI
|
||||
```
|
||||
|
||||
**Implementation**:
|
||||
```bash
|
||||
twine upload dist/*
|
||||
# Environment:
|
||||
# TWINE_USERNAME: __token__
|
||||
# TWINE_PASSWORD: ${{ secrets.PYPI_TOKEN }}
|
||||
```
|
||||
|
||||
##### Stage 5: GitHub Release Creation
|
||||
```bash
|
||||
Input: Tag: v1.2.3
|
||||
Body: Markdown content
|
||||
Output: Published GitHub release
|
||||
```
|
||||
|
||||
**Implementation**:
|
||||
```yaml
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
tag_name: v1.2.3
|
||||
name: Release v1.2.3
|
||||
body: |
|
||||
Installation instructions and changelog
|
||||
draft: false
|
||||
prerelease: false
|
||||
```
|
||||
|
||||
#### Output
|
||||
- **PyPI Package**: https://pypi.org/project/crawl4ai/1.2.3/
|
||||
- **GitHub Release**: Published release on repository
|
||||
- **Event**: `release.published` (triggers Docker workflow)
|
||||
|
||||
#### Timeline
|
||||
```
|
||||
0:00 - Tag pushed
|
||||
0:01 - Checkout + Python setup
|
||||
0:02 - Version validation
|
||||
0:03 - Package build
|
||||
0:04 - PyPI upload starts
|
||||
0:06 - PyPI upload complete
|
||||
0:07 - GitHub release created
|
||||
0:08 - Workflow complete
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 2. Docker Release Pipeline (docker-release.yml)
|
||||
|
||||
#### Purpose
|
||||
Build and publish multi-architecture Docker images.
|
||||
|
||||
#### Inputs
|
||||
|
||||
##### Input 1: Release Event (Automatic)
|
||||
```yaml
|
||||
Event: release.published
|
||||
Data: github.event.release.tag_name = "v1.2.3"
|
||||
```
|
||||
|
||||
##### Input 2: Docker Rebuild Tag (Manual)
|
||||
```yaml
|
||||
Tag: docker-rebuild-v1.2.3
|
||||
```
|
||||
|
||||
#### Processing Stages
|
||||
|
||||
##### Stage 1: Version Detection
|
||||
```bash
|
||||
# From release event:
|
||||
VERSION = github.event.release.tag_name.strip("v")
|
||||
# Result: "1.2.3"
|
||||
|
||||
# From rebuild tag:
|
||||
VERSION = GITHUB_REF.replace("refs/tags/docker-rebuild-v", "")
|
||||
# Result: "1.2.3"
|
||||
```
|
||||
|
||||
##### Stage 2: Semantic Version Parsing
|
||||
```bash
|
||||
Input: VERSION=1.2.3
|
||||
Output: MAJOR=1
|
||||
MINOR=1.2
|
||||
PATCH=3 (implicit)
|
||||
```
|
||||
|
||||
**Implementation**:
|
||||
```bash
|
||||
MAJOR=$(echo $VERSION | cut -d. -f1) # Extract first component
|
||||
MINOR=$(echo $VERSION | cut -d. -f1-2) # Extract first two components
|
||||
```
|
||||
|
||||
##### Stage 3: Multi-Architecture Setup
|
||||
```yaml
|
||||
Setup:
|
||||
- Docker Buildx (multi-platform builder)
|
||||
- QEMU (ARM emulation on x86)
|
||||
|
||||
Platforms:
|
||||
- linux/amd64 (x86_64)
|
||||
- linux/arm64 (aarch64)
|
||||
```
|
||||
|
||||
**Architecture**:
|
||||
```
|
||||
GitHub Runner (linux/amd64)
|
||||
├─ Buildx Builder
|
||||
│ ├─ Native: Build linux/amd64 image
|
||||
│ └─ QEMU: Emulate ARM to build linux/arm64 image
|
||||
└─ Generate manifest list (points to both images)
|
||||
```
|
||||
|
||||
##### Stage 4: Docker Hub Authentication
|
||||
```bash
|
||||
Input: DOCKER_USERNAME
|
||||
DOCKER_TOKEN
|
||||
Output: Authenticated Docker client
|
||||
```
|
||||
|
||||
##### Stage 5: Build with Cache
|
||||
```yaml
|
||||
Cache Configuration:
|
||||
cache-from: type=gha # Read from GitHub Actions cache
|
||||
cache-to: type=gha,mode=max # Write all layers
|
||||
|
||||
Cache Key Components:
|
||||
- Workflow file path
|
||||
- Branch name
|
||||
- Architecture (amd64/arm64)
|
||||
```
|
||||
|
||||
**Cache Hierarchy**:
|
||||
```
|
||||
Cache Entry: main/docker-release.yml/linux-amd64
|
||||
├─ Layer: sha256:abc123... (FROM python:3.12)
|
||||
├─ Layer: sha256:def456... (RUN apt-get update)
|
||||
├─ Layer: sha256:ghi789... (COPY requirements.txt)
|
||||
├─ Layer: sha256:jkl012... (RUN pip install)
|
||||
└─ Layer: sha256:mno345... (COPY . /app)
|
||||
|
||||
Cache Hit/Miss Logic:
|
||||
- If layer input unchanged → cache hit → skip build
|
||||
- If layer input changed → cache miss → rebuild + all subsequent layers
|
||||
```
|
||||
|
||||
##### Stage 6: Tag Generation
|
||||
```bash
|
||||
Input: VERSION=1.2.3, MAJOR=1, MINOR=1.2
|
||||
|
||||
Output Tags:
|
||||
- unclecode/crawl4ai:1.2.3 (exact version)
|
||||
- unclecode/crawl4ai:1.2 (minor version)
|
||||
- unclecode/crawl4ai:1 (major version)
|
||||
- unclecode/crawl4ai:latest (latest stable)
|
||||
```
|
||||
|
||||
**Tag Strategy**:
|
||||
- All tags point to same image SHA
|
||||
- Users can pin to desired stability level
|
||||
- Pushing new version updates `1`, `1.2`, and `latest` automatically
|
||||
|
||||
##### Stage 7: Push to Registry
|
||||
```bash
|
||||
For each tag:
|
||||
For each platform (amd64, arm64):
|
||||
Push image to Docker Hub
|
||||
|
||||
Create manifest list:
|
||||
Manifest: unclecode/crawl4ai:1.2.3
|
||||
├─ linux/amd64: sha256:abc...
|
||||
└─ linux/arm64: sha256:def...
|
||||
|
||||
Docker CLI automatically selects correct platform on pull
|
||||
```
|
||||
|
||||
#### Output
|
||||
- **Docker Images**: 4 tags × 2 platforms = 8 image variants + 4 manifests
|
||||
- **Docker Hub**: https://hub.docker.com/r/unclecode/crawl4ai/tags
|
||||
|
||||
#### Timeline
|
||||
|
||||
**Cold Cache (First Build)**:
|
||||
```
|
||||
0:00 - Release event received
|
||||
0:01 - Checkout + Buildx setup
|
||||
0:02 - Docker Hub auth
|
||||
0:03 - Start build (amd64)
|
||||
0:08 - Complete amd64 build
|
||||
0:09 - Start build (arm64)
|
||||
0:14 - Complete arm64 build
|
||||
0:15 - Generate manifests
|
||||
0:16 - Push all tags
|
||||
0:17 - Workflow complete
|
||||
```
|
||||
|
||||
**Warm Cache (Code Change Only)**:
|
||||
```
|
||||
0:00 - Release event received
|
||||
0:01 - Checkout + Buildx setup
|
||||
0:02 - Docker Hub auth
|
||||
0:03 - Start build (amd64) - cache hit for layers 1-4
|
||||
0:04 - Complete amd64 build (only layer 5 rebuilt)
|
||||
0:05 - Start build (arm64) - cache hit for layers 1-4
|
||||
0:06 - Complete arm64 build (only layer 5 rebuilt)
|
||||
0:07 - Generate manifests
|
||||
0:08 - Push all tags
|
||||
0:09 - Workflow complete
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Data Flow
|
||||
|
||||
### Version Information Flow
|
||||
|
||||
```
|
||||
Developer
|
||||
│
|
||||
▼
|
||||
crawl4ai/__version__.py
|
||||
__version__ = "1.2.3"
|
||||
│
|
||||
├─► Git Tag
|
||||
│ v1.2.3
|
||||
│ │
|
||||
│ ▼
|
||||
│ release.yml
|
||||
│ │
|
||||
│ ├─► Validation
|
||||
│ │ ✓ Match
|
||||
│ │
|
||||
│ ├─► PyPI Package
|
||||
│ │ crawl4ai==1.2.3
|
||||
│ │
|
||||
│ └─► GitHub Release
|
||||
│ v1.2.3
|
||||
│ │
|
||||
│ ▼
|
||||
│ docker-release.yml
|
||||
│ │
|
||||
│ └─► Docker Tags
|
||||
│ 1.2.3, 1.2, 1, latest
|
||||
│
|
||||
└─► Package Metadata
|
||||
pyproject.toml
|
||||
version = "1.2.3"
|
||||
```
|
||||
|
||||
### Secrets Flow
|
||||
|
||||
```
|
||||
GitHub Secrets (Encrypted at Rest)
|
||||
│
|
||||
├─► PYPI_TOKEN
|
||||
│ │
|
||||
│ ▼
|
||||
│ release.yml
|
||||
│ │
|
||||
│ ▼
|
||||
│ TWINE_PASSWORD env var (masked in logs)
|
||||
│ │
|
||||
│ ▼
|
||||
│ PyPI API (HTTPS)
|
||||
│
|
||||
├─► DOCKER_USERNAME
|
||||
│ │
|
||||
│ ▼
|
||||
│ docker-release.yml
|
||||
│ │
|
||||
│ ▼
|
||||
│ docker/login-action (masked in logs)
|
||||
│ │
|
||||
│ ▼
|
||||
│ Docker Hub API (HTTPS)
|
||||
│
|
||||
└─► DOCKER_TOKEN
|
||||
│
|
||||
▼
|
||||
docker-release.yml
|
||||
│
|
||||
▼
|
||||
docker/login-action (masked in logs)
|
||||
│
|
||||
▼
|
||||
Docker Hub API (HTTPS)
|
||||
```
|
||||
|
||||
### Artifact Flow
|
||||
|
||||
```
|
||||
Source Code
|
||||
│
|
||||
├─► release.yml
|
||||
│ │
|
||||
│ ▼
|
||||
│ python -m build
|
||||
│ │
|
||||
│ ├─► crawl4ai-1.2.3.tar.gz
|
||||
│ │ │
|
||||
│ │ ▼
|
||||
│ │ PyPI Storage
|
||||
│ │ │
|
||||
│ │ ▼
|
||||
│ │ pip install crawl4ai
|
||||
│ │
|
||||
│ └─► crawl4ai-1.2.3-py3-none-any.whl
|
||||
│ │
|
||||
│ ▼
|
||||
│ PyPI Storage
|
||||
│ │
|
||||
│ ▼
|
||||
│ pip install crawl4ai
|
||||
│
|
||||
└─► docker-release.yml
|
||||
│
|
||||
▼
|
||||
docker build
|
||||
│
|
||||
├─► Image: linux/amd64
|
||||
│ │
|
||||
│ └─► Docker Hub
|
||||
│ unclecode/crawl4ai:1.2.3-amd64
|
||||
│
|
||||
└─► Image: linux/arm64
|
||||
│
|
||||
└─► Docker Hub
|
||||
unclecode/crawl4ai:1.2.3-arm64
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## State Machines
|
||||
|
||||
### Release Pipeline State Machine
|
||||
|
||||
```
|
||||
┌─────────┐
|
||||
│ START │
|
||||
└────┬────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────┐
|
||||
│ Extract │
|
||||
│ Version │
|
||||
└──────┬───────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────┐ ┌─────────┐
|
||||
│ Validate │─────►│ FAILED │
|
||||
│ Version │ No │ (Exit 1)│
|
||||
└──────┬───────┘ └─────────┘
|
||||
│ Yes
|
||||
▼
|
||||
┌──────────────┐
|
||||
│ Build │
|
||||
│ Package │
|
||||
└──────┬───────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────┐ ┌─────────┐
|
||||
│ Upload │─────►│ FAILED │
|
||||
│ to PyPI │ Error│ (Exit 1)│
|
||||
└──────┬───────┘ └─────────┘
|
||||
│ Success
|
||||
▼
|
||||
┌──────────────┐
|
||||
│ Create │
|
||||
│ GH Release │
|
||||
└──────┬───────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────┐
|
||||
│ SUCCESS │
|
||||
│ (Emit Event) │
|
||||
└──────────────┘
|
||||
```
|
||||
|
||||
### Docker Pipeline State Machine
|
||||
|
||||
```
|
||||
┌─────────┐
|
||||
│ START │
|
||||
│ (Event) │
|
||||
└────┬────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────┐
|
||||
│ Detect │
|
||||
│ Version │
|
||||
│ Source │
|
||||
└──────┬───────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────┐
|
||||
│ Parse │
|
||||
│ Semantic │
|
||||
│ Versions │
|
||||
└──────┬───────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────┐ ┌─────────┐
|
||||
│ Authenticate │─────►│ FAILED │
|
||||
│ Docker Hub │ Error│ (Exit 1)│
|
||||
└──────┬───────┘ └─────────┘
|
||||
│ Success
|
||||
▼
|
||||
┌──────────────┐
|
||||
│ Build │
|
||||
│ amd64 │
|
||||
└──────┬───────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────┐ ┌─────────┐
|
||||
│ Build │─────►│ FAILED │
|
||||
│ arm64 │ Error│ (Exit 1)│
|
||||
└──────┬───────┘ └─────────┘
|
||||
│ Success
|
||||
▼
|
||||
┌──────────────┐
|
||||
│ Push All │
|
||||
│ Tags │
|
||||
└──────┬───────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────┐
|
||||
│ SUCCESS │
|
||||
└──────────────┘
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Security Architecture
|
||||
|
||||
### Threat Model
|
||||
|
||||
#### Threats Mitigated
|
||||
|
||||
1. **Secret Exposure**
|
||||
- Mitigation: GitHub Actions secret masking
|
||||
- Evidence: Secrets never appear in logs
|
||||
|
||||
2. **Unauthorized Package Upload**
|
||||
- Mitigation: Scoped PyPI tokens
|
||||
- Evidence: Token limited to `crawl4ai` project
|
||||
|
||||
3. **Man-in-the-Middle**
|
||||
- Mitigation: HTTPS for all API calls
|
||||
- Evidence: PyPI, Docker Hub, GitHub all use TLS
|
||||
|
||||
4. **Supply Chain Tampering**
|
||||
- Mitigation: Immutable artifacts, content checksums
|
||||
- Evidence: PyPI stores SHA256, Docker uses content-addressable storage
|
||||
|
||||
#### Trust Boundaries
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────┐
|
||||
│ Trusted Zone │
|
||||
│ ┌────────────────────────────────┐ │
|
||||
│ │ GitHub Actions Runner │ │
|
||||
│ │ - Ephemeral VM │ │
|
||||
│ │ - Isolated environment │ │
|
||||
│ │ - Access to secrets │ │
|
||||
│ └────────────────────────────────┘ │
|
||||
│ │ │
|
||||
│ │ HTTPS (TLS 1.2+) │
|
||||
│ ▼ │
|
||||
└─────────────────────────────────────────┘
|
||||
│
|
||||
┌────────────┼────────────┐
|
||||
│ │ │
|
||||
▼ ▼ ▼
|
||||
┌────────┐ ┌─────────┐ ┌──────────┐
|
||||
│ PyPI │ │ Docker │ │ GitHub │
|
||||
│ API │ │ Hub │ │ API │
|
||||
└────────┘ └─────────┘ └──────────┘
|
||||
External External External
|
||||
Service Service Service
|
||||
```
|
||||
|
||||
### Secret Management
|
||||
|
||||
#### Secret Lifecycle
|
||||
|
||||
```
|
||||
Creation (Developer)
|
||||
│
|
||||
├─► PyPI: Create API token (scoped to project)
|
||||
├─► Docker Hub: Create access token (read/write)
|
||||
│
|
||||
▼
|
||||
Storage (GitHub)
|
||||
│
|
||||
├─► Encrypted at rest (AES-256)
|
||||
├─► Access controlled (repo-scoped)
|
||||
│
|
||||
▼
|
||||
Usage (Workflow)
|
||||
│
|
||||
├─► Injected as env vars
|
||||
├─► Masked in logs (GitHub redacts on output)
|
||||
├─► Never persisted to disk (in-memory only)
|
||||
│
|
||||
▼
|
||||
Transmission (API Call)
|
||||
│
|
||||
├─► HTTPS only
|
||||
├─► TLS 1.2+ with strong ciphers
|
||||
│
|
||||
▼
|
||||
Rotation (Manual)
|
||||
│
|
||||
└─► Regenerate on PyPI/Docker Hub
|
||||
Update GitHub secret
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Performance Characteristics
|
||||
|
||||
### Release Pipeline Performance
|
||||
|
||||
| Metric | Value | Notes |
|
||||
|--------|-------|-------|
|
||||
| Cold start | ~2-3 min | First run on new runner |
|
||||
| Warm start | ~2-3 min | Minimal caching benefit |
|
||||
| PyPI upload | ~30-60 sec | Network-bound |
|
||||
| Package build | ~30 sec | CPU-bound |
|
||||
| Parallelization | None | Sequential by design |
|
||||
|
||||
### Docker Pipeline Performance
|
||||
|
||||
| Metric | Cold Cache | Warm Cache (code) | Warm Cache (deps) |
|
||||
|--------|-----------|-------------------|-------------------|
|
||||
| Total time | 10-15 min | 1-2 min | 3-5 min |
|
||||
| amd64 build | 5-7 min | 30-60 sec | 1-2 min |
|
||||
| arm64 build | 5-7 min | 30-60 sec | 1-2 min |
|
||||
| Push time | 1-2 min | 30 sec | 30 sec |
|
||||
| Cache hit rate | 0% | 85% | 60% |
|
||||
|
||||
### Cache Performance Model
|
||||
|
||||
```python
|
||||
def estimate_build_time(changes):
|
||||
base_time = 60 # seconds (setup + push)
|
||||
|
||||
if "Dockerfile" in changes:
|
||||
return base_time + (10 * 60) # Full rebuild: ~11 min
|
||||
elif "requirements.txt" in changes:
|
||||
return base_time + (3 * 60) # Deps rebuild: ~4 min
|
||||
elif any(f.endswith(".py") for f in changes):
|
||||
return base_time + 60 # Code only: ~2 min
|
||||
else:
|
||||
return base_time # No changes: ~1 min
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Scalability Considerations
|
||||
|
||||
### Current Limits
|
||||
|
||||
| Resource | Limit | Impact |
|
||||
|----------|-------|--------|
|
||||
| Workflow concurrency | 20 (default) | Max 20 releases in parallel |
|
||||
| Artifact storage | 500 MB/artifact | PyPI packages small (<10 MB) |
|
||||
| Cache storage | 10 GB/repo | Docker layers fit comfortably |
|
||||
| Workflow run time | 6 hours | Plenty of headroom |
|
||||
|
||||
### Scaling Strategies
|
||||
|
||||
#### Horizontal Scaling (Multiple Repos)
|
||||
```
|
||||
crawl4ai (main)
|
||||
├─ release.yml
|
||||
└─ docker-release.yml
|
||||
|
||||
crawl4ai-plugins (separate)
|
||||
├─ release.yml
|
||||
└─ docker-release.yml
|
||||
|
||||
Each repo has independent:
|
||||
- Secrets
|
||||
- Cache (10 GB each)
|
||||
- Concurrency limits (20 each)
|
||||
```
|
||||
|
||||
#### Vertical Scaling (Larger Runners)
|
||||
```yaml
|
||||
jobs:
|
||||
docker:
|
||||
runs-on: ubuntu-latest-8-cores # GitHub-hosted larger runner
|
||||
# 4x faster builds for CPU-bound layers
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Disaster Recovery
|
||||
|
||||
### Failure Scenarios
|
||||
|
||||
#### Scenario 1: Release Pipeline Fails
|
||||
|
||||
**Failure Point**: PyPI upload fails (network error)
|
||||
|
||||
**State**:
|
||||
- ✓ Version validated
|
||||
- ✓ Package built
|
||||
- ✗ PyPI upload
|
||||
- ✗ GitHub release
|
||||
|
||||
**Recovery**:
|
||||
```bash
|
||||
# Manual upload
|
||||
twine upload dist/*
|
||||
|
||||
# Retry workflow (re-run from GitHub Actions UI)
|
||||
```
|
||||
|
||||
**Prevention**: Add retry logic to PyPI upload
|
||||
|
||||
#### Scenario 2: Docker Pipeline Fails
|
||||
|
||||
**Failure Point**: ARM build fails (dependency issue)
|
||||
|
||||
**State**:
|
||||
- ✓ PyPI published
|
||||
- ✓ GitHub release created
|
||||
- ✓ amd64 image built
|
||||
- ✗ arm64 image build
|
||||
|
||||
**Recovery**:
|
||||
```bash
|
||||
# Fix Dockerfile
|
||||
git commit -am "fix: ARM build dependency"
|
||||
|
||||
# Trigger rebuild
|
||||
git tag docker-rebuild-v1.2.3
|
||||
git push origin docker-rebuild-v1.2.3
|
||||
```
|
||||
|
||||
**Impact**: PyPI package available, only Docker ARM users affected
|
||||
|
||||
#### Scenario 3: Partial Release
|
||||
|
||||
**Failure Point**: GitHub release creation fails
|
||||
|
||||
**State**:
|
||||
- ✓ PyPI published
|
||||
- ✗ GitHub release
|
||||
- ✗ Docker images
|
||||
|
||||
**Recovery**:
|
||||
```bash
|
||||
# Create release manually
|
||||
gh release create v1.2.3 \
|
||||
--title "Release v1.2.3" \
|
||||
--notes "..."
|
||||
|
||||
# This triggers docker-release.yml automatically
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Monitoring and Observability
|
||||
|
||||
### Metrics to Track
|
||||
|
||||
#### Release Pipeline
|
||||
- Success rate (target: >99%)
|
||||
- Duration (target: <3 min)
|
||||
- PyPI upload time (target: <60 sec)
|
||||
|
||||
#### Docker Pipeline
|
||||
- Success rate (target: >95%)
|
||||
- Duration (target: <15 min cold, <2 min warm)
|
||||
- Cache hit rate (target: >80% for code changes)
|
||||
|
||||
### Alerting
|
||||
|
||||
**Critical Alerts**:
|
||||
- Release pipeline failure (blocks release)
|
||||
- PyPI authentication failure (expired token)
|
||||
|
||||
**Warning Alerts**:
|
||||
- Docker build >15 min (performance degradation)
|
||||
- Cache hit rate <50% (cache issue)
|
||||
|
||||
### Logging
|
||||
|
||||
**GitHub Actions Logs**:
|
||||
- Retention: 90 days
|
||||
- Downloadable: Yes
|
||||
- Searchable: Limited
|
||||
|
||||
**Recommended External Logging**:
|
||||
```yaml
|
||||
- name: Send logs to external service
|
||||
if: failure()
|
||||
run: |
|
||||
curl -X POST https://logs.example.com/api/v1/logs \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"workflow\": \"${{ github.workflow }}\", \"status\": \"failed\"}"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Future Enhancements
|
||||
|
||||
### Planned Improvements
|
||||
|
||||
1. **Automated Changelog Generation**
|
||||
- Use conventional commits
|
||||
- Generate CHANGELOG.md automatically
|
||||
|
||||
2. **Pre-release Testing**
|
||||
- Test builds on `test-v*` tags
|
||||
- Upload to TestPyPI
|
||||
|
||||
3. **Notification System**
|
||||
- Slack/Discord notifications on release
|
||||
- Email on failure
|
||||
|
||||
4. **Performance Optimization**
|
||||
- Parallel Docker builds (amd64 + arm64 simultaneously)
|
||||
- Persistent runners for better caching
|
||||
|
||||
5. **Enhanced Validation**
|
||||
- Smoke tests after PyPI upload
|
||||
- Container security scanning
|
||||
|
||||
---
|
||||
|
||||
## References
|
||||
|
||||
- [GitHub Actions Architecture](https://docs.github.com/en/actions/learn-github-actions/understanding-github-actions)
|
||||
- [Docker Build Cache](https://docs.docker.com/build/cache/)
|
||||
- [PyPI API Documentation](https://warehouse.pypa.io/api-reference/)
|
||||
|
||||
---
|
||||
|
||||
**Last Updated**: 2025-01-21
|
||||
**Version**: 2.0
|
||||
1029
.github/workflows/docs/README.md
vendored
Normal file
1029
.github/workflows/docs/README.md
vendored
Normal file
File diff suppressed because it is too large
Load Diff
287
.github/workflows/docs/WORKFLOW_REFERENCE.md
vendored
Normal file
287
.github/workflows/docs/WORKFLOW_REFERENCE.md
vendored
Normal file
@@ -0,0 +1,287 @@
|
||||
# Workflow Quick Reference
|
||||
|
||||
## Quick Commands
|
||||
|
||||
### Standard Release
|
||||
```bash
|
||||
# 1. Update version
|
||||
vim crawl4ai/__version__.py # Set to "1.2.3"
|
||||
|
||||
# 2. Commit and tag
|
||||
git add crawl4ai/__version__.py
|
||||
git commit -m "chore: bump version to 1.2.3"
|
||||
git tag v1.2.3
|
||||
git push origin main
|
||||
git push origin v1.2.3
|
||||
|
||||
# 3. Monitor
|
||||
# - PyPI: ~2-3 minutes
|
||||
# - Docker: ~1-15 minutes
|
||||
```
|
||||
|
||||
### Docker Rebuild Only
|
||||
```bash
|
||||
git tag docker-rebuild-v1.2.3
|
||||
git push origin docker-rebuild-v1.2.3
|
||||
```
|
||||
|
||||
### Delete Tag (Undo Release)
|
||||
```bash
|
||||
# Local
|
||||
git tag -d v1.2.3
|
||||
|
||||
# Remote
|
||||
git push --delete origin v1.2.3
|
||||
|
||||
# GitHub Release
|
||||
gh release delete v1.2.3
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Workflow Triggers
|
||||
|
||||
### release.yml
|
||||
| Event | Pattern | Example |
|
||||
|-------|---------|---------|
|
||||
| Tag push | `v*` | `v1.2.3` |
|
||||
| Excludes | `test-v*` | `test-v1.2.3` |
|
||||
|
||||
### docker-release.yml
|
||||
| Event | Pattern | Example |
|
||||
|-------|---------|---------|
|
||||
| Release published | `release.published` | Automatic |
|
||||
| Tag push | `docker-rebuild-v*` | `docker-rebuild-v1.2.3` |
|
||||
|
||||
---
|
||||
|
||||
## Environment Variables
|
||||
|
||||
### release.yml
|
||||
| Variable | Source | Example |
|
||||
|----------|--------|---------|
|
||||
| `VERSION` | Git tag | `1.2.3` |
|
||||
| `TWINE_USERNAME` | Static | `__token__` |
|
||||
| `TWINE_PASSWORD` | Secret | `pypi-Ag...` |
|
||||
| `GITHUB_TOKEN` | Auto | `ghp_...` |
|
||||
|
||||
### docker-release.yml
|
||||
| Variable | Source | Example |
|
||||
|----------|--------|---------|
|
||||
| `VERSION` | Release/Tag | `1.2.3` |
|
||||
| `MAJOR` | Computed | `1` |
|
||||
| `MINOR` | Computed | `1.2` |
|
||||
| `DOCKER_USERNAME` | Secret | `unclecode` |
|
||||
| `DOCKER_TOKEN` | Secret | `dckr_pat_...` |
|
||||
|
||||
---
|
||||
|
||||
## Docker Tags Generated
|
||||
|
||||
| Version | Tags Created |
|
||||
|---------|-------------|
|
||||
| v1.0.0 | `1.0.0`, `1.0`, `1`, `latest` |
|
||||
| v1.1.0 | `1.1.0`, `1.1`, `1`, `latest` |
|
||||
| v1.2.3 | `1.2.3`, `1.2`, `1`, `latest` |
|
||||
| v2.0.0 | `2.0.0`, `2.0`, `2`, `latest` |
|
||||
|
||||
---
|
||||
|
||||
## Workflow Outputs
|
||||
|
||||
### release.yml
|
||||
| Output | Location | Time |
|
||||
|--------|----------|------|
|
||||
| PyPI Package | https://pypi.org/project/crawl4ai/ | ~2-3 min |
|
||||
| GitHub Release | Repository → Releases | ~2-3 min |
|
||||
| Workflow Summary | Actions → Run → Summary | Immediate |
|
||||
|
||||
### docker-release.yml
|
||||
| Output | Location | Time |
|
||||
|--------|----------|------|
|
||||
| Docker Images | https://hub.docker.com/r/unclecode/crawl4ai | ~1-15 min |
|
||||
| Workflow Summary | Actions → Run → Summary | Immediate |
|
||||
|
||||
---
|
||||
|
||||
## Common Issues
|
||||
|
||||
| Issue | Solution |
|
||||
|-------|----------|
|
||||
| Version mismatch | Update `crawl4ai/__version__.py` to match tag |
|
||||
| PyPI 403 Forbidden | Check `PYPI_TOKEN` secret |
|
||||
| PyPI 400 File exists | Version already published, increment version |
|
||||
| Docker auth failed | Regenerate `DOCKER_TOKEN` |
|
||||
| Docker build timeout | Check Dockerfile, review build logs |
|
||||
| Cache not working | First build on branch always cold |
|
||||
|
||||
---
|
||||
|
||||
## Secrets Checklist
|
||||
|
||||
- [ ] `PYPI_TOKEN` - PyPI API token (project or account scope)
|
||||
- [ ] `DOCKER_USERNAME` - Docker Hub username
|
||||
- [ ] `DOCKER_TOKEN` - Docker Hub access token (read/write)
|
||||
- [ ] `GITHUB_TOKEN` - Auto-provided (no action needed)
|
||||
|
||||
---
|
||||
|
||||
## Workflow Dependencies
|
||||
|
||||
### release.yml Dependencies
|
||||
```yaml
|
||||
Python: 3.12
|
||||
Actions:
|
||||
- actions/checkout@v4
|
||||
- actions/setup-python@v5
|
||||
- softprops/action-gh-release@v2
|
||||
PyPI Packages:
|
||||
- build
|
||||
- twine
|
||||
```
|
||||
|
||||
### docker-release.yml Dependencies
|
||||
```yaml
|
||||
Actions:
|
||||
- actions/checkout@v4
|
||||
- docker/setup-buildx-action@v3
|
||||
- docker/login-action@v3
|
||||
- docker/build-push-action@v5
|
||||
Docker:
|
||||
- Buildx
|
||||
- QEMU (for multi-arch)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Cache Information
|
||||
|
||||
### Type
|
||||
- GitHub Actions Cache (`type=gha`)
|
||||
|
||||
### Storage
|
||||
- **Limit**: 10GB per repository
|
||||
- **Retention**: 7 days for unused entries
|
||||
- **Cleanup**: Automatic LRU eviction
|
||||
|
||||
### Performance
|
||||
| Scenario | Cache Hit | Build Time |
|
||||
|----------|-----------|------------|
|
||||
| First build | 0% | 10-15 min |
|
||||
| Code change only | 85% | 1-2 min |
|
||||
| Dependency update | 60% | 3-5 min |
|
||||
| No changes | 100% | 30-60 sec |
|
||||
|
||||
---
|
||||
|
||||
## Build Platforms
|
||||
|
||||
| Platform | Architecture | Devices |
|
||||
|----------|--------------|---------|
|
||||
| linux/amd64 | x86_64 | Intel/AMD servers, AWS EC2, GCP |
|
||||
| linux/arm64 | aarch64 | Apple Silicon, AWS Graviton, Raspberry Pi |
|
||||
|
||||
---
|
||||
|
||||
## Version Validation
|
||||
|
||||
### Pre-Tag Checklist
|
||||
```bash
|
||||
# Check current version
|
||||
python -c "from crawl4ai.__version__ import __version__; print(__version__)"
|
||||
|
||||
# Verify it matches intended tag
|
||||
# If tag is v1.2.3, version should be "1.2.3"
|
||||
```
|
||||
|
||||
### Post-Release Verification
|
||||
```bash
|
||||
# PyPI
|
||||
pip install crawl4ai==1.2.3
|
||||
python -c "import crawl4ai; print(crawl4ai.__version__)"
|
||||
|
||||
# Docker
|
||||
docker pull unclecode/crawl4ai:1.2.3
|
||||
docker run unclecode/crawl4ai:1.2.3 python -c "import crawl4ai; print(crawl4ai.__version__)"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Monitoring URLs
|
||||
|
||||
| Service | URL |
|
||||
|---------|-----|
|
||||
| GitHub Actions | `https://github.com/{owner}/{repo}/actions` |
|
||||
| PyPI Project | `https://pypi.org/project/crawl4ai/` |
|
||||
| Docker Hub | `https://hub.docker.com/r/unclecode/crawl4ai` |
|
||||
| GitHub Releases | `https://github.com/{owner}/{repo}/releases` |
|
||||
|
||||
---
|
||||
|
||||
## Rollback Strategy
|
||||
|
||||
### PyPI (Cannot Delete)
|
||||
```bash
|
||||
# Increment patch version
|
||||
git tag v1.2.4
|
||||
git push origin v1.2.4
|
||||
```
|
||||
|
||||
### Docker (Can Overwrite)
|
||||
```bash
|
||||
# Rebuild with fix
|
||||
git tag docker-rebuild-v1.2.3
|
||||
git push origin docker-rebuild-v1.2.3
|
||||
```
|
||||
|
||||
### GitHub Release
|
||||
```bash
|
||||
# Delete release
|
||||
gh release delete v1.2.3
|
||||
|
||||
# Delete tag
|
||||
git push --delete origin v1.2.3
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Status Badge Markdown
|
||||
|
||||
```markdown
|
||||
[](https://github.com/{owner}/{repo}/actions/workflows/release.yml)
|
||||
|
||||
[](https://github.com/{owner}/{repo}/actions/workflows/docker-release.yml)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Timeline Example
|
||||
|
||||
```
|
||||
0:00 - Push tag v1.2.3
|
||||
0:01 - release.yml starts
|
||||
0:02 - Version validation passes
|
||||
0:03 - Package built
|
||||
0:04 - PyPI upload starts
|
||||
0:06 - PyPI upload complete ✓
|
||||
0:07 - GitHub release created ✓
|
||||
0:08 - release.yml complete
|
||||
0:08 - docker-release.yml triggered
|
||||
0:10 - Docker build starts
|
||||
0:12 - amd64 image built (cache hit)
|
||||
0:14 - arm64 image built (cache hit)
|
||||
0:15 - Images pushed to Docker Hub ✓
|
||||
0:16 - docker-release.yml complete
|
||||
|
||||
Total: ~16 minutes
|
||||
Critical path (PyPI + GitHub): ~8 minutes
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Contact
|
||||
|
||||
For workflow issues:
|
||||
1. Check Actions tab for logs
|
||||
2. Review this reference
|
||||
3. See [README.md](./README.md) for detailed docs
|
||||
46
.github/workflows/main.yml
vendored
Normal file
46
.github/workflows/main.yml
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
name: Discord GitHub Notifications
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [opened]
|
||||
issue_comment:
|
||||
types: [created]
|
||||
pull_request:
|
||||
types: [opened]
|
||||
discussion:
|
||||
types: [created]
|
||||
watch:
|
||||
types: [started]
|
||||
|
||||
jobs:
|
||||
notify-discord:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Send to Google Apps Script (Stars only)
|
||||
if: github.event_name == 'watch'
|
||||
run: |
|
||||
curl -fSs -X POST "${{ secrets.GOOGLE_SCRIPT_ENDPOINT }}" \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{"url":"${{ github.event.sender.html_url }}"}'
|
||||
- name: Set webhook based on event type
|
||||
id: set-webhook
|
||||
run: |
|
||||
if [ "${{ github.event_name }}" == "discussion" ]; then
|
||||
echo "webhook=${{ secrets.DISCORD_DISCUSSIONS_WEBHOOK }}" >> $GITHUB_OUTPUT
|
||||
elif [ "${{ github.event_name }}" == "watch" ]; then
|
||||
echo "webhook=${{ secrets.DISCORD_STAR_GAZERS }}" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "webhook=${{ secrets.DISCORD_WEBHOOK }}" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Discord Notification
|
||||
uses: Ilshidur/action-discord@master
|
||||
env:
|
||||
DISCORD_WEBHOOK: ${{ steps.set-webhook.outputs.webhook }}
|
||||
with:
|
||||
args: |
|
||||
${{ github.event_name == 'issues' && format('📣 New issue created: **{0}** by {1} - {2}', github.event.issue.title, github.event.issue.user.login, github.event.issue.html_url) ||
|
||||
github.event_name == 'issue_comment' && format('💬 New comment on issue **{0}** by {1} - {2}', github.event.issue.title, github.event.comment.user.login, github.event.comment.html_url) ||
|
||||
github.event_name == 'pull_request' && format('🔄 New PR opened: **{0}** by {1} - {2}', github.event.pull_request.title, github.event.pull_request.user.login, github.event.pull_request.html_url) ||
|
||||
github.event_name == 'watch' && format('⭐ {0} starred Crawl4AI 🥳! Check out their profile: {1}', github.event.sender.login, github.event.sender.html_url) ||
|
||||
format('💬 New discussion started: **{0}** by {1} - {2}', github.event.discussion.title, github.event.discussion.user.login, github.event.discussion.html_url) }}
|
||||
113
.github/workflows/release.yml
vendored
Normal file
113
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,113 @@
|
||||
name: Release Pipeline
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
- '!test-v*' # Exclude test tags
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write # Required for creating releases
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.12'
|
||||
|
||||
- name: Extract version from tag
|
||||
id: get_version
|
||||
run: |
|
||||
TAG_VERSION=${GITHUB_REF#refs/tags/v}
|
||||
echo "VERSION=$TAG_VERSION" >> $GITHUB_OUTPUT
|
||||
echo "Releasing version: $TAG_VERSION"
|
||||
|
||||
- name: Install package dependencies
|
||||
run: |
|
||||
pip install -e .
|
||||
|
||||
- name: Check version consistency
|
||||
run: |
|
||||
TAG_VERSION=${{ steps.get_version.outputs.VERSION }}
|
||||
PACKAGE_VERSION=$(python -c "from crawl4ai.__version__ import __version__; print(__version__)")
|
||||
|
||||
echo "Tag version: $TAG_VERSION"
|
||||
echo "Package version: $PACKAGE_VERSION"
|
||||
|
||||
if [ "$TAG_VERSION" != "$PACKAGE_VERSION" ]; then
|
||||
echo "❌ Version mismatch! Tag: $TAG_VERSION, Package: $PACKAGE_VERSION"
|
||||
echo "Please update crawl4ai/__version__.py to match the tag version"
|
||||
exit 1
|
||||
fi
|
||||
echo "✅ Version check passed: $TAG_VERSION"
|
||||
|
||||
- name: Install build dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install build twine
|
||||
|
||||
- name: Build package
|
||||
run: python -m build
|
||||
|
||||
- name: Check package
|
||||
run: twine check dist/*
|
||||
|
||||
- name: Upload to PyPI
|
||||
env:
|
||||
TWINE_USERNAME: __token__
|
||||
TWINE_PASSWORD: ${{ secrets.PYPI_TOKEN }}
|
||||
run: |
|
||||
echo "📦 Uploading to PyPI..."
|
||||
twine upload dist/*
|
||||
echo "✅ Package uploaded to https://pypi.org/project/crawl4ai/"
|
||||
|
||||
- name: Create GitHub Release
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
tag_name: v${{ steps.get_version.outputs.VERSION }}
|
||||
name: Release v${{ steps.get_version.outputs.VERSION }}
|
||||
body: |
|
||||
## 🎉 Crawl4AI v${{ steps.get_version.outputs.VERSION }} Released!
|
||||
|
||||
### 📦 Installation
|
||||
|
||||
**PyPI:**
|
||||
```bash
|
||||
pip install crawl4ai==${{ steps.get_version.outputs.VERSION }}
|
||||
```
|
||||
|
||||
**Docker:**
|
||||
```bash
|
||||
docker pull unclecode/crawl4ai:${{ steps.get_version.outputs.VERSION }}
|
||||
docker pull unclecode/crawl4ai:latest
|
||||
```
|
||||
|
||||
**Note:** Docker images are being built and will be available shortly.
|
||||
Check the [Docker Release workflow](https://github.com/${{ github.repository }}/actions/workflows/docker-release.yml) for build status.
|
||||
|
||||
### 📝 What's Changed
|
||||
See [CHANGELOG.md](https://github.com/${{ github.repository }}/blob/main/CHANGELOG.md) for details.
|
||||
draft: false
|
||||
prerelease: false
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Summary
|
||||
run: |
|
||||
echo "## 🚀 Release Complete!" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### 📦 PyPI Package" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Version: ${{ steps.get_version.outputs.VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- URL: https://pypi.org/project/crawl4ai/" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Install: \`pip install crawl4ai==${{ steps.get_version.outputs.VERSION }}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### 📋 GitHub Release" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- https://github.com/${{ github.repository }}/releases/tag/v${{ steps.get_version.outputs.VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### 🐳 Docker Images" >> $GITHUB_STEP_SUMMARY
|
||||
echo "Docker images are being built in a separate workflow." >> $GITHUB_STEP_SUMMARY
|
||||
echo "Check: https://github.com/${{ github.repository }}/actions/workflows/docker-release.yml" >> $GITHUB_STEP_SUMMARY
|
||||
142
.github/workflows/release.yml.backup
vendored
Normal file
142
.github/workflows/release.yml.backup
vendored
Normal file
@@ -0,0 +1,142 @@
|
||||
name: Release Pipeline
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
- '!test-v*' # Exclude test tags
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write # Required for creating releases
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.12'
|
||||
|
||||
- name: Extract version from tag
|
||||
id: get_version
|
||||
run: |
|
||||
TAG_VERSION=${GITHUB_REF#refs/tags/v}
|
||||
echo "VERSION=$TAG_VERSION" >> $GITHUB_OUTPUT
|
||||
echo "Releasing version: $TAG_VERSION"
|
||||
|
||||
- name: Install package dependencies
|
||||
run: |
|
||||
pip install -e .
|
||||
|
||||
- name: Check version consistency
|
||||
run: |
|
||||
TAG_VERSION=${{ steps.get_version.outputs.VERSION }}
|
||||
PACKAGE_VERSION=$(python -c "from crawl4ai.__version__ import __version__; print(__version__)")
|
||||
|
||||
echo "Tag version: $TAG_VERSION"
|
||||
echo "Package version: $PACKAGE_VERSION"
|
||||
|
||||
if [ "$TAG_VERSION" != "$PACKAGE_VERSION" ]; then
|
||||
echo "❌ Version mismatch! Tag: $TAG_VERSION, Package: $PACKAGE_VERSION"
|
||||
echo "Please update crawl4ai/__version__.py to match the tag version"
|
||||
exit 1
|
||||
fi
|
||||
echo "✅ Version check passed: $TAG_VERSION"
|
||||
|
||||
- name: Install build dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install build twine
|
||||
|
||||
- name: Build package
|
||||
run: python -m build
|
||||
|
||||
- name: Check package
|
||||
run: twine check dist/*
|
||||
|
||||
- name: Upload to PyPI
|
||||
env:
|
||||
TWINE_USERNAME: __token__
|
||||
TWINE_PASSWORD: ${{ secrets.PYPI_TOKEN }}
|
||||
run: |
|
||||
echo "📦 Uploading to PyPI..."
|
||||
twine upload dist/*
|
||||
echo "✅ Package uploaded to https://pypi.org/project/crawl4ai/"
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_TOKEN }}
|
||||
|
||||
- name: Extract major and minor versions
|
||||
id: versions
|
||||
run: |
|
||||
VERSION=${{ steps.get_version.outputs.VERSION }}
|
||||
MAJOR=$(echo $VERSION | cut -d. -f1)
|
||||
MINOR=$(echo $VERSION | cut -d. -f1-2)
|
||||
echo "MAJOR=$MAJOR" >> $GITHUB_OUTPUT
|
||||
echo "MINOR=$MINOR" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Build and push Docker images
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: |
|
||||
unclecode/crawl4ai:${{ steps.get_version.outputs.VERSION }}
|
||||
unclecode/crawl4ai:${{ steps.versions.outputs.MINOR }}
|
||||
unclecode/crawl4ai:${{ steps.versions.outputs.MAJOR }}
|
||||
unclecode/crawl4ai:latest
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
||||
- name: Create GitHub Release
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
tag_name: v${{ steps.get_version.outputs.VERSION }}
|
||||
name: Release v${{ steps.get_version.outputs.VERSION }}
|
||||
body: |
|
||||
## 🎉 Crawl4AI v${{ steps.get_version.outputs.VERSION }} Released!
|
||||
|
||||
### 📦 Installation
|
||||
|
||||
**PyPI:**
|
||||
```bash
|
||||
pip install crawl4ai==${{ steps.get_version.outputs.VERSION }}
|
||||
```
|
||||
|
||||
**Docker:**
|
||||
```bash
|
||||
docker pull unclecode/crawl4ai:${{ steps.get_version.outputs.VERSION }}
|
||||
docker pull unclecode/crawl4ai:latest
|
||||
```
|
||||
|
||||
### 📝 What's Changed
|
||||
See [CHANGELOG.md](https://github.com/${{ github.repository }}/blob/main/CHANGELOG.md) for details.
|
||||
draft: false
|
||||
prerelease: false
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Summary
|
||||
run: |
|
||||
echo "## 🚀 Release Complete!" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### 📦 PyPI Package" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Version: ${{ steps.get_version.outputs.VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- URL: https://pypi.org/project/crawl4ai/" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Install: \`pip install crawl4ai==${{ steps.get_version.outputs.VERSION }}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### 🐳 Docker Images" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`unclecode/crawl4ai:${{ steps.get_version.outputs.VERSION }}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`unclecode/crawl4ai:${{ steps.versions.outputs.MINOR }}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`unclecode/crawl4ai:${{ steps.versions.outputs.MAJOR }}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`unclecode/crawl4ai:latest\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### 📋 GitHub Release" >> $GITHUB_STEP_SUMMARY
|
||||
echo "https://github.com/${{ github.repository }}/releases/tag/v${{ steps.get_version.outputs.VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
116
.github/workflows/test-release.yml.disabled
vendored
Normal file
116
.github/workflows/test-release.yml.disabled
vendored
Normal file
@@ -0,0 +1,116 @@
|
||||
name: Test Release Pipeline
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'test-v*'
|
||||
|
||||
jobs:
|
||||
test-release:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.12'
|
||||
|
||||
- name: Extract version from tag
|
||||
id: get_version
|
||||
run: |
|
||||
TAG_VERSION=${GITHUB_REF#refs/tags/test-v}
|
||||
echo "VERSION=$TAG_VERSION" >> $GITHUB_OUTPUT
|
||||
echo "Testing with version: $TAG_VERSION"
|
||||
|
||||
- name: Install package dependencies
|
||||
run: |
|
||||
pip install -e .
|
||||
|
||||
- name: Check version consistency
|
||||
run: |
|
||||
TAG_VERSION=${{ steps.get_version.outputs.VERSION }}
|
||||
PACKAGE_VERSION=$(python -c "from crawl4ai.__version__ import __version__; print(__version__)")
|
||||
|
||||
echo "Tag version: $TAG_VERSION"
|
||||
echo "Package version: $PACKAGE_VERSION"
|
||||
|
||||
if [ "$TAG_VERSION" != "$PACKAGE_VERSION" ]; then
|
||||
echo "❌ Version mismatch! Tag: $TAG_VERSION, Package: $PACKAGE_VERSION"
|
||||
echo "Please update crawl4ai/__version__.py to match the tag version"
|
||||
exit 1
|
||||
fi
|
||||
echo "✅ Version check passed: $TAG_VERSION"
|
||||
|
||||
- name: Install build dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install build twine
|
||||
|
||||
- name: Build package
|
||||
run: python -m build
|
||||
|
||||
- name: Check package
|
||||
run: twine check dist/*
|
||||
|
||||
- name: Upload to Test PyPI
|
||||
env:
|
||||
TWINE_USERNAME: __token__
|
||||
TWINE_PASSWORD: ${{ secrets.TEST_PYPI_TOKEN }}
|
||||
run: |
|
||||
echo "📦 Uploading to Test PyPI..."
|
||||
twine upload --repository testpypi dist/* || {
|
||||
if [ $? -eq 1 ]; then
|
||||
echo "⚠️ Upload failed - likely version already exists on Test PyPI"
|
||||
echo "Continuing anyway for test purposes..."
|
||||
else
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
echo "✅ Test PyPI step complete"
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_TOKEN }}
|
||||
|
||||
- name: Build and push Docker test images
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: |
|
||||
unclecode/crawl4ai:test-${{ steps.get_version.outputs.VERSION }}
|
||||
unclecode/crawl4ai:test-latest
|
||||
platforms: linux/amd64,linux/arm64
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Summary
|
||||
run: |
|
||||
echo "## 🎉 Test Release Complete!" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### 📦 Test PyPI Package" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Version: ${{ steps.get_version.outputs.VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- URL: https://test.pypi.org/project/crawl4ai/" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- Install: \`pip install -i https://test.pypi.org/simple/ crawl4ai==${{ steps.get_version.outputs.VERSION }}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### 🐳 Docker Test Images" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`unclecode/crawl4ai:test-${{ steps.get_version.outputs.VERSION }}\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`unclecode/crawl4ai:test-latest\`" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "### 🧹 Cleanup Commands" >> $GITHUB_STEP_SUMMARY
|
||||
echo "\`\`\`bash" >> $GITHUB_STEP_SUMMARY
|
||||
echo "# Remove test tag" >> $GITHUB_STEP_SUMMARY
|
||||
echo "git tag -d test-v${{ steps.get_version.outputs.VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "git push origin :test-v${{ steps.get_version.outputs.VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "# Remove Docker test images" >> $GITHUB_STEP_SUMMARY
|
||||
echo "docker rmi unclecode/crawl4ai:test-${{ steps.get_version.outputs.VERSION }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "docker rmi unclecode/crawl4ai:test-latest" >> $GITHUB_STEP_SUMMARY
|
||||
echo "\`\`\`" >> $GITHUB_STEP_SUMMARY
|
||||
57
.gitignore
vendored
57
.gitignore
vendored
@@ -1,3 +1,13 @@
|
||||
# Scripts folder (private tools)
|
||||
.scripts/
|
||||
|
||||
# Database files
|
||||
*.db
|
||||
|
||||
# Environment files
|
||||
.env
|
||||
.env.local
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
@@ -225,3 +235,50 @@ tree.md
|
||||
.scripts
|
||||
.local
|
||||
.do
|
||||
/plans
|
||||
plans/
|
||||
|
||||
# Codeium
|
||||
.codeiumignore
|
||||
todo/
|
||||
|
||||
# Continue development files
|
||||
.continue/
|
||||
.continuerc.json
|
||||
continue.lock
|
||||
continue_core.log
|
||||
contextProviders/
|
||||
continue_workspace/
|
||||
.continue-cache/
|
||||
continue_config.json
|
||||
|
||||
# Continue temporary files
|
||||
.continue-temp/
|
||||
.continue-logs/
|
||||
.continue-downloads/
|
||||
|
||||
# Continue VS Code specific
|
||||
.vscode-continue/
|
||||
.vscode-continue-cache/
|
||||
|
||||
.prompts/
|
||||
|
||||
.llm.env
|
||||
.private/
|
||||
|
||||
.claude/
|
||||
|
||||
CLAUDE_MONITOR.md
|
||||
CLAUDE.md
|
||||
|
||||
tests/**/test_site
|
||||
tests/**/reports
|
||||
tests/**/benchmark_reports
|
||||
test_scripts/
|
||||
docs/**/data
|
||||
.codecat/
|
||||
|
||||
docs/apps/linkdin/debug*/
|
||||
docs/apps/linkdin/samples/insights/*
|
||||
|
||||
scripts/
|
||||
|
||||
554
CHANGELOG.md
554
CHANGELOG.md
@@ -5,6 +5,554 @@ All notable changes to Crawl4AI will be documented in this file.
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
### Added
|
||||
- **🔒 HTTPS Preservation for Internal Links**: New `preserve_https_for_internal_links` configuration flag
|
||||
- Maintains HTTPS scheme for internal links even when servers redirect to HTTP
|
||||
- Prevents security downgrades during deep crawling
|
||||
- Useful for security-conscious crawling and sites supporting both protocols
|
||||
- Fully backward compatible with opt-in flag (default: `False`)
|
||||
- Fixes issue #1410 where HTTPS URLs were being downgraded to HTTP
|
||||
|
||||
## [0.7.3] - 2025-08-09
|
||||
|
||||
### Added
|
||||
- **🕵️ Undetected Browser Support**: New browser adapter pattern with stealth capabilities
|
||||
- `browser_adapter.py` with undetected Chrome integration
|
||||
- Bypass sophisticated bot detection systems (Cloudflare, Akamai, custom solutions)
|
||||
- Support for headless stealth mode with anti-detection techniques
|
||||
- Human-like behavior simulation with random mouse movements and scrolling
|
||||
- Comprehensive examples for anti-bot strategies and stealth crawling
|
||||
- Full documentation guide for undetected browser usage
|
||||
|
||||
- **🎨 Multi-URL Configuration System**: URL-specific crawler configurations for batch processing
|
||||
- Different crawling strategies for different URL patterns in a single batch
|
||||
- Support for string patterns with wildcards (`"*.pdf"`, `"*/blog/*"`)
|
||||
- Lambda function matchers for complex URL logic
|
||||
- Mixed matchers combining strings and functions with AND/OR logic
|
||||
- Fallback configuration support when no patterns match
|
||||
- First-match-wins configuration selection with optional fallback
|
||||
|
||||
- **🧠 Memory Monitoring & Optimization**: Comprehensive memory usage tracking
|
||||
- New `memory_utils.py` module for memory monitoring and optimization
|
||||
- Real-time memory usage tracking during crawl sessions
|
||||
- Memory leak detection and reporting
|
||||
- Performance optimization recommendations
|
||||
- Peak memory usage analysis and efficiency metrics
|
||||
- Automatic cleanup suggestions for memory-intensive operations
|
||||
|
||||
- **📊 Enhanced Table Extraction**: Improved table access and DataFrame conversion
|
||||
- Direct `result.tables` interface replacing generic `result.media` approach
|
||||
- Instant pandas DataFrame conversion with `pd.DataFrame(table['data'])`
|
||||
- Enhanced table detection algorithms for better accuracy
|
||||
- Table metadata including source XPath and headers
|
||||
- Improved table structure preservation during extraction
|
||||
|
||||
- **💰 GitHub Sponsors Integration**: 4-tier sponsorship system
|
||||
- Supporter ($5/month): Community support + early feature previews
|
||||
- Professional ($25/month): Priority support + beta access
|
||||
- Business ($100/month): Direct consultation + custom integrations
|
||||
- Enterprise ($500/month): Dedicated support + feature development
|
||||
- Custom arrangement options for larger organizations
|
||||
|
||||
- **🐳 Docker LLM Provider Flexibility**: Environment-based LLM configuration
|
||||
- `LLM_PROVIDER` environment variable support for dynamic provider switching
|
||||
- `.llm.env` file support for secure configuration management
|
||||
- Per-request provider override capabilities in API endpoints
|
||||
- Support for OpenAI, Groq, and other providers without rebuilding images
|
||||
- Enhanced Docker documentation with deployment examples
|
||||
|
||||
### Fixed
|
||||
- **URL Matcher Fallback**: Resolved edge cases in URL pattern matching logic
|
||||
- **Memory Management**: Fixed memory leaks in long-running crawl sessions
|
||||
- **Sitemap Processing**: Improved redirect handling in sitemap fetching
|
||||
- **Table Extraction**: Enhanced table detection and extraction accuracy
|
||||
- **Error Handling**: Better error messages and recovery from network failures
|
||||
|
||||
### Changed
|
||||
- **Architecture Refactoring**: Major cleanup and optimization
|
||||
- Moved 2,450+ lines from main `async_crawler_strategy.py` to backup
|
||||
- Cleaner separation of concerns in crawler architecture
|
||||
- Better maintainability and code organization
|
||||
- Preserved backward compatibility while improving performance
|
||||
|
||||
### Documentation
|
||||
- **Comprehensive Examples**: Added real-world URLs and practical use cases
|
||||
- **API Documentation**: Complete CrawlResult field documentation with all available fields
|
||||
- **Migration Guides**: Updated table extraction patterns from `result.media` to `result.tables`
|
||||
- **Undetected Browser Guide**: Full documentation for stealth mode and anti-bot strategies
|
||||
- **Multi-Config Examples**: Detailed examples for URL-specific configurations
|
||||
- **Docker Deployment**: Enhanced Docker documentation with LLM provider configuration
|
||||
|
||||
## [0.7.x] - 2025-06-29
|
||||
|
||||
### Added
|
||||
- **Virtual Scroll Support**: New `VirtualScrollConfig` for handling virtualized scrolling on modern websites
|
||||
- Automatically detects and handles three scrolling scenarios:
|
||||
- Content unchanged (continue scrolling)
|
||||
- Content appended (traditional infinite scroll)
|
||||
- Content replaced (true virtual scroll - Twitter/Instagram style)
|
||||
- Captures ALL content from pages that replace DOM elements during scroll
|
||||
- Intelligent deduplication based on normalized text content
|
||||
- Configurable scroll amount, count, and wait times
|
||||
- Seamless integration with existing extraction strategies
|
||||
- Comprehensive examples including Twitter timeline, Instagram grid, and mixed content scenarios
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
### Added
|
||||
- **Flexible LLM Provider Configuration** (Docker):
|
||||
- Support for `LLM_PROVIDER` environment variable to override default provider
|
||||
- Per-request provider override via optional `provider` parameter in API endpoints
|
||||
- Automatic provider validation with clear error messages
|
||||
- Updated Docker documentation and examples
|
||||
|
||||
### Changed
|
||||
- **WebScrapingStrategy Refactoring**: Simplified content scraping architecture
|
||||
- `WebScrapingStrategy` is now an alias for `LXMLWebScrapingStrategy` for backward compatibility
|
||||
- Removed redundant BeautifulSoup-based implementation (~1000 lines of code)
|
||||
- `LXMLWebScrapingStrategy` now inherits directly from `ContentScrapingStrategy`
|
||||
- All existing code using `WebScrapingStrategy` continues to work without modification
|
||||
- Default scraping strategy remains `LXMLWebScrapingStrategy` for optimal performance
|
||||
|
||||
### Added
|
||||
- **AsyncUrlSeeder**: High-performance URL discovery system for intelligent crawling at scale
|
||||
- Discover URLs from sitemaps and Common Crawl index
|
||||
- Extract and analyze page metadata without full crawling
|
||||
- BM25 relevance scoring for query-based URL filtering
|
||||
- Multi-domain parallel discovery with `many_urls()` method
|
||||
- Automatic caching with TTL for discovered URLs
|
||||
- Rate limiting and concurrent request management
|
||||
- Live URL validation with HEAD requests
|
||||
- JSON-LD and Open Graph metadata extraction
|
||||
- **SeedingConfig**: Configuration class for URL seeding operations
|
||||
- Support for multiple discovery sources (`sitemap`, `cc`, `sitemap+cc`)
|
||||
- Pattern-based URL filtering with wildcards
|
||||
- Configurable concurrency and rate limiting
|
||||
- Query-based relevance scoring with BM25
|
||||
- Score threshold filtering for quality control
|
||||
- Comprehensive documentation for URL seeding feature
|
||||
- Detailed comparison with deep crawling approaches
|
||||
- Complete API reference with examples
|
||||
- Integration guide with AsyncWebCrawler
|
||||
- Performance benchmarks and best practices
|
||||
- Example scripts demonstrating URL seeding:
|
||||
- `url_seeder_demo.py`: Interactive Rich-based demonstration
|
||||
- `url_seeder_quick_demo.py`: Screenshot-friendly examples
|
||||
- Test suite for URL seeding with BM25 scoring
|
||||
|
||||
### Changed
|
||||
- Updated `__init__.py` to export AsyncUrlSeeder and SeedingConfig
|
||||
- Enhanced documentation with URL seeding integration examples
|
||||
|
||||
### Fixed
|
||||
- Corrected examples to properly extract URLs from seeder results before passing to `arun_many()`
|
||||
- Fixed logger color compatibility issue (changed `lightblack` to `bright_black`)
|
||||
|
||||
## [0.6.2] - 2025-05-02
|
||||
|
||||
### Added
|
||||
- New `RegexExtractionStrategy` for fast pattern-based extraction without requiring LLM
|
||||
- Built-in patterns for emails, URLs, phone numbers, dates, and more
|
||||
- Support for custom regex patterns
|
||||
- `generate_pattern` utility for LLM-assisted pattern creation (one-time use)
|
||||
- Added `fit_html` as a top-level field in `CrawlResult` for optimized HTML extraction
|
||||
- Added support for network response body capture in network request tracking
|
||||
|
||||
### Changed
|
||||
- Updated documentation for no-LLM extraction strategies
|
||||
- Enhanced API reference to include RegexExtractionStrategy examples and usage
|
||||
- Improved HTML preprocessing with optimized performance for extraction strategies
|
||||
|
||||
## [0.6.1] - 2025-04-24
|
||||
|
||||
### Added
|
||||
- New dedicated `tables` field in `CrawlResult` model for better table extraction handling
|
||||
- Updated crypto_analysis_example.py to use the new tables field with backward compatibility
|
||||
|
||||
### Changed
|
||||
- Improved playground UI in Docker deployment with better endpoint handling and UI feedback
|
||||
|
||||
## [0.6.0] ‑ 2025‑04‑22
|
||||
|
||||
### Added
|
||||
- Browser pooling with page pre‑warming and fine‑grained **geolocation, locale, and timezone** controls
|
||||
- Crawler pool manager (SDK + Docker API) for smarter resource allocation
|
||||
- Network & console log capture plus MHTML snapshot export
|
||||
- **Table extractor**: turn HTML `<table>`s into DataFrames or CSV with one flag
|
||||
- High‑volume stress‑test framework in `tests/memory` and API load scripts
|
||||
- MCP protocol endpoints with socket & SSE support; playground UI scaffold
|
||||
- Docs v2 revamp: TOC, GitHub badge, copy‑code buttons, Docker API demo
|
||||
- “Ask AI” helper button *(work‑in‑progress, shipping soon)*
|
||||
- New examples: geo‑location usage, network/console capture, Docker API, markdown source selection, crypto analysis
|
||||
- Expanded automated test suites for browser, Docker, MCP and memory benchmarks
|
||||
|
||||
### Changed
|
||||
- Consolidated and renamed browser strategies; legacy docker strategy modules removed
|
||||
- `ProxyConfig` moved to `async_configs`
|
||||
- Server migrated to pool‑based crawler management
|
||||
- FastAPI validators replace custom query validation
|
||||
- Docker build now uses Chromium base image
|
||||
- Large‑scale repo tidy‑up (≈36 k insertions, ≈5 k deletions)
|
||||
|
||||
### Fixed
|
||||
- Async crawler session leak, duplicate‑visit handling, URL normalisation
|
||||
- Target‑element regressions in scraping strategies
|
||||
- Logged‑URL readability, encoded‑URL decoding, middle truncation for long URLs
|
||||
- Closed issues: #701, #733, #756, #774, #804, #822, #839, #841, #842, #843, #867, #902, #911
|
||||
|
||||
### Removed
|
||||
- Obsolete modules under `crawl4ai/browser/*` superseded by the new pooled browser layer
|
||||
|
||||
### Deprecated
|
||||
- Old markdown generator names now alias `DefaultMarkdownGenerator` and emit warnings
|
||||
|
||||
---
|
||||
|
||||
#### Upgrade notes
|
||||
1. Update any direct imports from `crawl4ai/browser/*` to the new pooled browser modules
|
||||
2. If you override `AsyncPlaywrightCrawlerStrategy.get_page`, adopt the new signature
|
||||
3. Rebuild Docker images to pull the new Chromium layer
|
||||
4. Switch to `DefaultMarkdownGenerator` (or silence the deprecation warning)
|
||||
|
||||
---
|
||||
|
||||
`121 files changed, ≈36 223 insertions, ≈4 975 deletions` :contentReference[oaicite:0]{index=0}​:contentReference[oaicite:1]{index=1}
|
||||
|
||||
|
||||
### [Feature] 2025-04-21
|
||||
- Implemented MCP protocol for machine-to-machine communication
|
||||
- Added WebSocket and SSE transport for MCP server
|
||||
- Exposed server endpoints via MCP protocol
|
||||
- Created tests for MCP socket and SSE communication
|
||||
- Enhanced Docker server with file handling and intelligent search
|
||||
- Added PDF and screenshot endpoints with file saving capability
|
||||
- Added JavaScript execution endpoint for page interaction
|
||||
- Implemented advanced context search with BM25 and code chunking
|
||||
- Added file path output support for generated assets
|
||||
- Improved server endpoints and API surface
|
||||
- Added intelligent context search with query filtering
|
||||
- Added syntax-aware code function chunking
|
||||
- Implemented efficient HTML processing pipeline
|
||||
- Added support for controlling browser geolocation via new GeolocationConfig class
|
||||
- Added locale and timezone configuration options to CrawlerRunConfig
|
||||
- Added example script demonstrating geolocation and locale usage
|
||||
- Added documentation for location-based identity features
|
||||
|
||||
### [Refactor] 2025-04-20
|
||||
- Replaced crawler_manager.py with simpler crawler_pool.py implementation
|
||||
- Added global page semaphore for hard concurrency cap
|
||||
- Implemented browser pool with idle cleanup
|
||||
- Added playground UI for testing and stress testing
|
||||
- Updated API handlers to use pooled crawlers
|
||||
- Enhanced logging levels and symbols
|
||||
- Added memory tests and stress test utilities
|
||||
|
||||
### [Added] 2025-04-17
|
||||
- Added content source selection feature for markdown generation
|
||||
- New `content_source` parameter allows choosing between `cleaned_html`, `raw_html`, and `fit_html`
|
||||
- Provides flexibility in how HTML content is processed before markdown conversion
|
||||
- Added examples and documentation for the new feature
|
||||
- Includes backward compatibility with default `cleaned_html` behavior
|
||||
|
||||
## Version 0.5.0.post5 (2025-03-14)
|
||||
|
||||
### Added
|
||||
|
||||
- *(crawler)* Add experimental parameters dictionary to CrawlerRunConfig to support beta features
|
||||
- *(tables)* Add comprehensive table detection and extraction functionality with scoring system
|
||||
- *(monitor)* Add real-time crawler monitoring system with memory management
|
||||
- *(content)* Add target_elements parameter for selective content extraction
|
||||
- *(browser)* Add standalone CDP browser launch capability
|
||||
- *(schema)* Add preprocess_html_for_schema utility for better HTML cleaning
|
||||
- *(api)* Add special handling for single URL requests in Docker API
|
||||
|
||||
### Changed
|
||||
|
||||
- *(filters)* Add reverse option to URLPatternFilter for inverting filter logic
|
||||
- *(browser)* Make CSP nonce headers optional via experimental config
|
||||
- *(browser)* Remove default cookie injection from page initialization
|
||||
- *(crawler)* Optimize response handling for single-URL processing
|
||||
- *(api)* Refactor crawl request handling to streamline processing
|
||||
- *(config)* Update default provider to gpt-4o
|
||||
- *(cache)* Change default cache_mode from aggressive to bypass in examples
|
||||
|
||||
### Fixed
|
||||
|
||||
- *(browser)* Clean up browser context creation code
|
||||
- *(api)* Improve code formatting in API handler
|
||||
|
||||
### Breaking Changes
|
||||
|
||||
- WebScrapingStrategy no longer returns 'scraped_html' in its output dictionary
|
||||
- Table extraction logic has been modified to better handle thead/tbody structures
|
||||
- Default cookie injection has been removed from page initialization
|
||||
|
||||
## Version 0.5.0 (2025-03-02)
|
||||
|
||||
### Added
|
||||
|
||||
- *(profiles)* Add BrowserProfiler class for dedicated browser profile management
|
||||
- *(cli)* Add interactive profile management to CLI with rich UI
|
||||
- *(profiles)* Add ability to crawl directly from profile management interface
|
||||
- *(browser)* Support identity-based browsing with persistent profiles
|
||||
- *(deep-crawling)* Add max_pages parameter to limit the number of pages crawled in all deep crawling strategies
|
||||
- *(deep-crawling)* Add score_threshold parameter to BFS and DFS strategies to filter URLs by score
|
||||
|
||||
### Changed
|
||||
|
||||
- *(browser)* Refactor profile management from ManagedBrowser to BrowserProfiler class
|
||||
- *(cli)* Enhance CLI with profile selection and status display for crawling
|
||||
- *(examples)* Update identity-based browsing example to use BrowserProfiler class
|
||||
- *(docs)* Update identity-based crawling documentation
|
||||
- *(docs)* Update deep crawling documentation with max_pages and score_threshold parameters
|
||||
- *(examples)* Add example demonstrating the use of max_pages and score_threshold parameters
|
||||
|
||||
### Fixed
|
||||
|
||||
- *(browser)* Fix profile detection and management on different platforms
|
||||
- *(cli)* Fix CLI command structure for better user experience
|
||||
- *(deep-crawling)* Improve BFS and DFS strategies to handle page count limits more efficiently
|
||||
|
||||
|
||||
## Version 0.5.0 (2025-02-21)
|
||||
|
||||
### Added
|
||||
|
||||
- *(crawler)* [**breaking**] Add memory-adaptive dispatcher with rate limiting
|
||||
- *(scraping)* [**breaking**] Add LXML-based scraping mode for improved performance
|
||||
- *(content-filter)* Add LLMContentFilter for intelligent markdown generation
|
||||
- *(dispatcher)* [**breaking**] Add streaming support for URL processing
|
||||
- *(browser)* [**breaking**] Improve browser context management and add shared data support
|
||||
- *(config)* [**breaking**] Add streaming support and config cloning
|
||||
- *(crawler)* Add URL redirection tracking
|
||||
- *(extraction)* Add LLM-powered schema generation utility
|
||||
- *(proxy)* Add proxy configuration support to CrawlerRunConfig
|
||||
- *(robots)* Add robots.txt compliance support
|
||||
- *(release)* [**breaking**] Prepare v0.4.3 beta release
|
||||
- *(proxy)* Add proxy rotation support and documentation
|
||||
- *(browser)* Add CDP URL configuration support
|
||||
- *(demo)* Uncomment feature demos and add fake-useragent dependency
|
||||
- *(pdf)* Add PDF processing capabilities
|
||||
- *(crawler)* [**breaking**] Enhance JavaScript execution and PDF processing
|
||||
- *(docker)* Add Docker deployment configuration and API server
|
||||
- *(docker)* Add Docker service integration and config serialization
|
||||
- *(docker)* [**breaking**] Enhance Docker deployment setup and configuration
|
||||
- *(api)* Improve cache handling and add API tests
|
||||
- *(crawler)* [**breaking**] Add deep crawling capabilities with BFS strategy
|
||||
- *(proxy)* [**breaking**] Add proxy rotation strategy
|
||||
- *(deep-crawling)* Add DFS strategy and update exports; refactor CLI entry point
|
||||
- *(cli)* Add command line interface with comprehensive features
|
||||
- *(config)* Enhance serialization and add deep crawling exports
|
||||
- *(crawler)* Add HTTP crawler strategy for lightweight web scraping
|
||||
- *(docker)* [**breaking**] Implement supervisor and secure API endpoints
|
||||
- *(docker)* [**breaking**] Add JWT authentication and improve server architecture
|
||||
|
||||
### Changed
|
||||
|
||||
- *(browser)* Update browser channel default to 'chromium' in BrowserConfig.from_args method
|
||||
- *(crawler)* Optimize response handling and default settings
|
||||
- *(crawler)* - Update hello_world example with proper content filtering
|
||||
- - Update hello_world.py example
|
||||
- *(docs)* [**breaking**] Reorganize documentation structure and update styles
|
||||
- *(dispatcher)* [**breaking**] Migrate to modular dispatcher system with enhanced monitoring
|
||||
- *(scraping)* [**breaking**] Replace ScrapingMode enum with strategy pattern
|
||||
- *(browser)* Improve browser path management
|
||||
- *(models)* Rename final_url to redirected_url for consistency
|
||||
- *(core)* [**breaking**] Improve type hints and remove unused file
|
||||
- *(docs)* Improve code formatting in features demo
|
||||
- *(user-agent)* Improve user agent generation system
|
||||
- *(core)* [**breaking**] Reorganize project structure and remove legacy code
|
||||
- *(docker)* Clean up import statements in server.py
|
||||
- *(docker)* Remove unused models and utilities for cleaner codebase
|
||||
- *(docker)* [**breaking**] Improve server architecture and configuration
|
||||
- *(deep-crawl)* [**breaking**] Reorganize deep crawling functionality into dedicated module
|
||||
- *(deep-crawling)* [**breaking**] Reorganize deep crawling strategies and add new implementations
|
||||
- *(crawling)* [**breaking**] Improve type hints and code cleanup
|
||||
- *(crawler)* [**breaking**] Improve HTML handling and cleanup codebase
|
||||
- *(crawler)* [**breaking**] Remove content filter functionality
|
||||
- *(examples)* Update API usage in features demo
|
||||
- *(config)* [**breaking**] Enhance serialization and config handling
|
||||
|
||||
### Docs
|
||||
|
||||
- Add Code of Conduct for the project (#410)
|
||||
|
||||
### Documentation
|
||||
|
||||
- *(extraction)* Add clarifying comments for CSS selector behavior
|
||||
- *(readme)* Update personal story and project vision
|
||||
- *(urls)* [**breaking**] Update documentation URLs to new domain
|
||||
- *(api)* Add streaming mode documentation and examples
|
||||
- *(readme)* Update version and feature announcements for v0.4.3b1
|
||||
- *(examples)* Update demo scripts and fix output formats
|
||||
- *(examples)* Update v0.4.3 features demo to v0.4.3b2
|
||||
- *(readme)* Update version references and fix links
|
||||
- *(multi-url)* [**breaking**] Improve documentation clarity and update examples
|
||||
- *(examples)* Update proxy rotation demo and disable other demos
|
||||
- *(api)* Improve formatting and readability of API documentation
|
||||
- *(examples)* Add SERP API project example
|
||||
- *(urls)* Update documentation URLs to new domain
|
||||
- *(readme)* Resolve merge conflict and update version info
|
||||
|
||||
### Fixed
|
||||
|
||||
- *(browser)* Update default browser channel to chromium and simplify channel selection logic
|
||||
- *(browser)* [**breaking**] Default to Chromium channel for new headless mode (#387)
|
||||
- *(browser)* Resolve merge conflicts in browser channel configuration
|
||||
- Prevent memory leaks by ensuring proper closure of Playwright pages
|
||||
- Not working long page screenshot (#403)
|
||||
- *(extraction)* JsonCss selector and crawler improvements
|
||||
- *(models)* [**breaking**] Make model fields optional with default values
|
||||
- *(dispatcher)* Adjust memory threshold and fix dispatcher initialization
|
||||
- *(install)* Ensure proper exit after running doctor command
|
||||
|
||||
### Miscellaneous Tasks
|
||||
|
||||
- *(cleanup)* Remove unused files and improve type hints
|
||||
- Add .gitattributes file
|
||||
|
||||
## License Update
|
||||
|
||||
Crawl4AI v0.5.0 updates the license to Apache 2.0 *with a required attribution clause*. This means you are free to use, modify, and distribute Crawl4AI (even commercially), but you *must* clearly attribute the project in any public use or distribution. See the updated `LICENSE` file for the full legal text and specific requirements.
|
||||
|
||||
---
|
||||
|
||||
## Version 0.4.3b2 (2025-01-21)
|
||||
|
||||
This release introduces several powerful new features, including robots.txt compliance, dynamic proxy support, LLM-powered schema generation, and improved documentation.
|
||||
|
||||
### Features
|
||||
|
||||
- **Robots.txt Compliance:**
|
||||
- Added robots.txt compliance support with efficient SQLite-based caching.
|
||||
- New `check_robots_txt` parameter in `CrawlerRunConfig` to enable robots.txt checking before crawling a URL.
|
||||
- Automated robots.txt checking is now integrated into `AsyncWebCrawler` with 403 status codes for blocked URLs.
|
||||
|
||||
- **Proxy Configuration:**
|
||||
- Added proxy configuration support to `CrawlerRunConfig`, allowing dynamic proxy settings per crawl request.
|
||||
- Updated documentation with examples for using proxy configuration in crawl operations.
|
||||
|
||||
- **LLM-Powered Schema Generation:**
|
||||
- Introduced a new utility for automatic CSS and XPath schema generation using OpenAI or Ollama models.
|
||||
- Added comprehensive documentation and examples for schema generation.
|
||||
- New prompt templates optimized for HTML schema analysis.
|
||||
|
||||
- **URL Redirection Tracking:**
|
||||
- Added URL redirection tracking to capture the final URL after any redirects.
|
||||
- The final URL is now available in the `redirected_url` field of the `AsyncCrawlResponse` object.
|
||||
|
||||
- **Enhanced Streamlined Documentation:**
|
||||
- Refactored and improved the documentation structure for clarity and ease of use.
|
||||
- Added detailed explanations of new features and updated examples.
|
||||
|
||||
- **Improved Browser Context Management:**
|
||||
- Enhanced the management of browser contexts and added shared data support.
|
||||
- Introduced the `shared_data` parameter in `CrawlerRunConfig` to pass data between hooks.
|
||||
|
||||
- **Memory Dispatcher System:**
|
||||
- Migrated to a memory dispatcher system with enhanced monitoring capabilities.
|
||||
- Introduced `MemoryAdaptiveDispatcher` and `SemaphoreDispatcher` for improved resource management.
|
||||
- Added `RateLimiter` for rate limiting support.
|
||||
- New `CrawlerMonitor` for real-time monitoring of crawler operations.
|
||||
|
||||
- **Streaming Support:**
|
||||
- Added streaming support for processing crawled URLs as they are processed.
|
||||
- Enabled streaming mode with the `stream` parameter in `CrawlerRunConfig`.
|
||||
|
||||
- **Content Scraping Strategy:**
|
||||
- Introduced a new `LXMLWebScrapingStrategy` for faster content scraping.
|
||||
- Added support for selecting the scraping strategy via the `scraping_strategy` parameter in `CrawlerRunConfig`.
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- **Browser Path Management:**
|
||||
- Improved browser path management for consistent behavior across different environments.
|
||||
|
||||
- **Memory Threshold:**
|
||||
- Adjusted the default memory threshold to improve resource utilization.
|
||||
|
||||
- **Pydantic Model Fields:**
|
||||
- Made several model fields optional with default values to improve flexibility.
|
||||
|
||||
### Refactor
|
||||
|
||||
- **Documentation Structure:**
|
||||
- Reorganized documentation structure to improve navigation and readability.
|
||||
- Updated styles and added new sections for advanced features.
|
||||
|
||||
- **Scraping Mode:**
|
||||
- Replaced the `ScrapingMode` enum with a strategy pattern for more flexible content scraping.
|
||||
|
||||
- **Version Update:**
|
||||
- Updated the version to `0.4.248`.
|
||||
|
||||
- **Code Cleanup:**
|
||||
- Removed unused files and improved type hints.
|
||||
- Applied Ruff corrections for code quality.
|
||||
|
||||
- **Updated dependencies:**
|
||||
- Updated dependencies to their latest versions to ensure compatibility and security.
|
||||
|
||||
- **Ignored certain patterns and directories:**
|
||||
- Updated `.gitignore` and `.codeiumignore` to ignore additional patterns and directories, streamlining the development environment.
|
||||
|
||||
- **Simplified Personal Story in README:**
|
||||
- Streamlined the personal story and project vision in the `README.md` for clarity.
|
||||
|
||||
- **Removed Deprecated Files:**
|
||||
- Deleted several deprecated files and examples that are no longer relevant.
|
||||
|
||||
---
|
||||
**Previous Releases:**
|
||||
|
||||
### 0.4.24x (2024-12-31)
|
||||
- **Enhanced SSL & Security**: New SSL certificate handling with custom paths and validation options for secure crawling.
|
||||
- **Smart Content Filtering**: Advanced filtering system with regex support and efficient chunking strategies.
|
||||
- **Improved JSON Extraction**: Support for complex JSONPath, JSON-CSS, and Microdata extraction.
|
||||
- **New Field Types**: Added `computed`, `conditional`, `aggregate`, and `template` field types.
|
||||
- **Performance Boost**: Optimized caching, parallel processing, and memory management.
|
||||
- **Better Error Handling**: Enhanced debugging capabilities with detailed error tracking.
|
||||
- **Security Features**: Improved input validation and safe expression evaluation.
|
||||
|
||||
### 0.4.247 (2025-01-06)
|
||||
|
||||
#### Added
|
||||
- **Windows Event Loop Configuration**: Introduced a utility function `configure_windows_event_loop` to resolve `NotImplementedError` for asyncio subprocesses on Windows. ([#utils.py](crawl4ai/utils.py), [#tutorials/async-webcrawler-basics.md](docs/md_v3/tutorials/async-webcrawler-basics.md))
|
||||
- **`page_need_scroll` Method**: Added a method to determine if a page requires scrolling before taking actions in `AsyncPlaywrightCrawlerStrategy`. ([#async_crawler_strategy.py](crawl4ai/async_crawler_strategy.py))
|
||||
|
||||
#### Changed
|
||||
- **Version Bump**: Updated the version from `0.4.246` to `0.4.247`. ([#__version__.py](crawl4ai/__version__.py))
|
||||
- **Improved Scrolling Logic**: Enhanced scrolling methods in `AsyncPlaywrightCrawlerStrategy` by adding a `scroll_delay` parameter for better control. ([#async_crawler_strategy.py](crawl4ai/async_crawler_strategy.py))
|
||||
- **Markdown Generation Example**: Updated the `hello_world.py` example to reflect the latest API changes and better illustrate features. ([#examples/hello_world.py](docs/examples/hello_world.py))
|
||||
- **Documentation Update**:
|
||||
- Added Windows-specific instructions for handling asyncio event loops. ([#async-webcrawler-basics.md](docs/md_v3/tutorials/async-webcrawler-basics.md))
|
||||
|
||||
#### Removed
|
||||
- **Legacy Markdown Generation Code**: Removed outdated and unused code for markdown generation in `content_scraping_strategy.py`. ([#content_scraping_strategy.py](crawl4ai/content_scraping_strategy.py))
|
||||
|
||||
#### Fixed
|
||||
- **Page Closing to Prevent Memory Leaks**:
|
||||
- **Description**: Added a `finally` block to ensure pages are closed when no `session_id` is provided.
|
||||
- **Impact**: Prevents memory leaks caused by lingering pages after a crawl.
|
||||
- **File**: [`async_crawler_strategy.py`](crawl4ai/async_crawler_strategy.py)
|
||||
- **Code**:
|
||||
```python
|
||||
finally:
|
||||
# If no session_id is given we should close the page
|
||||
if not config.session_id:
|
||||
await page.close()
|
||||
```
|
||||
- **Multiple Element Selection**: Modified `_get_elements` in `JsonCssExtractionStrategy` to return all matching elements instead of just the first one, ensuring comprehensive extraction. ([#extraction_strategy.py](crawl4ai/extraction_strategy.py))
|
||||
- **Error Handling in Scrolling**: Added robust error handling to ensure scrolling proceeds safely even if a configuration is missing. ([#async_crawler_strategy.py](crawl4ai/async_crawler_strategy.py))
|
||||
|
||||
## [0.4.267] - 2025 - 01 - 06
|
||||
|
||||
### Added
|
||||
- **Windows Event Loop Configuration**: Introduced a utility function `configure_windows_event_loop` to resolve `NotImplementedError` for asyncio subprocesses on Windows. ([#utils.py](crawl4ai/utils.py), [#tutorials/async-webcrawler-basics.md](docs/md_v3/tutorials/async-webcrawler-basics.md))
|
||||
- **`page_need_scroll` Method**: Added a method to determine if a page requires scrolling before taking actions in `AsyncPlaywrightCrawlerStrategy`. ([#async_crawler_strategy.py](crawl4ai/async_crawler_strategy.py))
|
||||
|
||||
## [0.4.24] - 2024-12-31
|
||||
|
||||
### Added
|
||||
@@ -147,12 +695,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
- Fixed potential viewport mismatches by ensuring consistent use of `self.viewport_width` and `self.viewport_height` throughout the code.
|
||||
- Improved robustness of dynamic content loading to avoid timeouts and failed evaluations.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
## [0.3.75] December 1, 2024
|
||||
|
||||
### PruningContentFilter
|
||||
|
||||
131
CODE_OF_CONDUCT.md
Normal file
131
CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1,131 @@
|
||||
# Crawl4AI Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
We as members, contributors, and leaders pledge to make participation in our
|
||||
community a harassment-free experience for everyone, regardless of age, body
|
||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||
identity and expression, level of experience, education, socio-economic status,
|
||||
nationality, personal appearance, race, caste, color, religion, or sexual
|
||||
identity and orientation.
|
||||
|
||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||
diverse, inclusive, and healthy community.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to a positive environment for our
|
||||
community include:
|
||||
|
||||
* Demonstrating empathy and kindness toward other people
|
||||
* Being respectful of differing opinions, viewpoints, and experiences
|
||||
* Giving and gracefully accepting constructive feedback
|
||||
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||
and learning from the experience
|
||||
* Focusing on what is best not just for us as individuals, but for the overall
|
||||
community
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
* The use of sexualized language or imagery, and sexual attention or advances of
|
||||
any kind
|
||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or email address,
|
||||
without their explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
Community leaders are responsible for clarifying and enforcing our standards of
|
||||
acceptable behavior and will take appropriate and fair corrective action in
|
||||
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||
or harmful.
|
||||
|
||||
Community leaders have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||
decisions when appropriate.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all community spaces, and also applies when
|
||||
an individual is officially representing the community in public spaces.
|
||||
Examples of representing our community include using an official email address,
|
||||
posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported to the community leaders responsible for enforcement at
|
||||
unclecode@crawl4ai.com. All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
All community leaders are obligated to respect the privacy and security of the
|
||||
reporter of any incident.
|
||||
|
||||
## Enforcement Guidelines
|
||||
|
||||
Community leaders will follow these Community Impact Guidelines in determining
|
||||
the consequences for any action they deem in violation of this Code of Conduct:
|
||||
|
||||
### 1. Correction
|
||||
|
||||
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||
unprofessional or unwelcome in the community.
|
||||
|
||||
**Consequence**: A private, written warning from community leaders, providing
|
||||
clarity around the nature of the violation and an explanation of why the
|
||||
behavior was inappropriate. A public apology may be requested.
|
||||
|
||||
### 2. Warning
|
||||
|
||||
**Community Impact**: A violation through a single incident or series of
|
||||
actions.
|
||||
|
||||
**Consequence**: A warning with consequences for continued behavior. No
|
||||
interaction with the people involved, including unsolicited interaction with
|
||||
those enforcing the Code of Conduct, for a specified period of time. This
|
||||
includes avoiding interactions in community spaces as well as external channels
|
||||
like social media. Violating these terms may lead to a temporary or permanent
|
||||
ban.
|
||||
|
||||
### 3. Temporary Ban
|
||||
|
||||
**Community Impact**: A serious violation of community standards, including
|
||||
sustained inappropriate behavior.
|
||||
|
||||
**Consequence**: A temporary ban from any sort of interaction or public
|
||||
communication with the community for a specified period of time. No public or
|
||||
private interaction with the people involved, including unsolicited interaction
|
||||
with those enforcing the Code of Conduct, is allowed during this period.
|
||||
Violating these terms may lead to a permanent ban.
|
||||
|
||||
### 4. Permanent Ban
|
||||
|
||||
**Community Impact**: Demonstrating a pattern of violation of community
|
||||
standards, including sustained inappropriate behavior, harassment of an
|
||||
individual, or aggression toward or disparagement of classes of individuals.
|
||||
|
||||
**Consequence**: A permanent ban from any sort of public interaction within the
|
||||
community.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||
version 2.1, available at
|
||||
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
|
||||
|
||||
Community Impact Guidelines were inspired by
|
||||
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
|
||||
|
||||
For answers to common questions about this code of conduct, see the FAQ at
|
||||
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
|
||||
[https://www.contributor-covenant.org/translations][translations].
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
|
||||
[Mozilla CoC]: https://github.com/mozilla/diversity
|
||||
[FAQ]: https://www.contributor-covenant.org/faq
|
||||
[translations]: https://www.contributor-covenant.org/translations
|
||||
@@ -6,7 +6,7 @@ We would like to thank the following people for their contributions to Crawl4AI:
|
||||
|
||||
- [Unclecode](https://github.com/unclecode) - Project Creator and Main Developer
|
||||
- [Nasrin](https://github.com/ntohidi) - Project Manager and Developer
|
||||
- [Aravind Karnam](https://github.com/aravindkarnam) - Developer
|
||||
- [Aravind Karnam](https://github.com/aravindkarnam) - Head of Community and Product
|
||||
|
||||
## Community Contributors
|
||||
|
||||
@@ -24,6 +24,14 @@ We would like to thank the following people for their contributions to Crawl4AI:
|
||||
- [NanmiCoder](https://github.com/NanmiCoder) - fix: crawler strategy exception handling and fixes [#271](https://github.com/unclecode/crawl4ai/pull/271)
|
||||
- [paulokuong](https://github.com/paulokuong) - fix: RAWL4_AI_BASE_DIRECTORY should be Path object instead of string [#298](https://github.com/unclecode/crawl4ai/pull/298)
|
||||
|
||||
#### Feb-Alpha-1
|
||||
- [sufianuddin](https://github.com/sufianuddin) - fix: [Documentation for JsonCssExtractionStrategy](https://github.com/unclecode/crawl4ai/issues/651)
|
||||
- [tautikAg](https://github.com/tautikAg) - fix: [Markdown output has incorect spacing](https://github.com/unclecode/crawl4ai/issues/599)
|
||||
- [cardit1](https://github.com/cardit1) - fix: ['AsyncPlaywrightCrawlerStrategy' object has no attribute 'downloads_path'](https://github.com/unclecode/crawl4ai/issues/585)
|
||||
- [dmurat](https://github.com/dmurat) - fix: [ Incorrect rendering of inline code inside of links ](https://github.com/unclecode/crawl4ai/issues/583)
|
||||
- [Sparshsing](https://github.com/Sparshsing) - fix: [Relative Urls in the webpage not extracted properly ](https://github.com/unclecode/crawl4ai/issues/570)
|
||||
|
||||
|
||||
|
||||
## Other Contributors
|
||||
|
||||
@@ -31,6 +39,11 @@ We would like to thank the following people for their contributions to Crawl4AI:
|
||||
- [Shiv Kumar](https://github.com/shivkumar0757)
|
||||
- [QIN2DIM](https://github.com/QIN2DIM)
|
||||
|
||||
#### Typo fixes
|
||||
- [ssoydan](https://github.com/ssoydan)
|
||||
- [Darshan](https://github.com/Darshan2104)
|
||||
- [tuhinmallick](https://github.com/tuhinmallick)
|
||||
|
||||
## Acknowledgements
|
||||
|
||||
We also want to thank all the users who have reported bugs, suggested features, or helped in any other way to make Crawl4AI better.
|
||||
|
||||
174
Dockerfile
174
Dockerfile
@@ -1,32 +1,36 @@
|
||||
# syntax=docker/dockerfile:1.4
|
||||
FROM python:3.12-slim-bookworm AS build
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
ARG BUILDPLATFORM
|
||||
# C4ai version
|
||||
ARG C4AI_VER=0.7.6
|
||||
ENV C4AI_VERSION=$C4AI_VER
|
||||
LABEL c4ai.version=$C4AI_VER
|
||||
|
||||
# Other build arguments
|
||||
ARG PYTHON_VERSION=3.10
|
||||
# Set build arguments
|
||||
ARG APP_HOME=/app
|
||||
ARG GITHUB_REPO=https://github.com/unclecode/crawl4ai.git
|
||||
ARG GITHUB_BRANCH=main
|
||||
ARG USE_LOCAL=true
|
||||
|
||||
# Base stage with system dependencies
|
||||
FROM python:${PYTHON_VERSION}-slim as base
|
||||
ENV PYTHONFAULTHANDLER=1 \
|
||||
PYTHONHASHSEED=random \
|
||||
PYTHONUNBUFFERED=1 \
|
||||
PIP_NO_CACHE_DIR=1 \
|
||||
PYTHONDONTWRITEBYTECODE=1 \
|
||||
PIP_DISABLE_PIP_VERSION_CHECK=1 \
|
||||
PIP_DEFAULT_TIMEOUT=100 \
|
||||
DEBIAN_FRONTEND=noninteractive \
|
||||
REDIS_HOST=localhost \
|
||||
REDIS_PORT=6379
|
||||
|
||||
# Declare ARG variables again within the build stage
|
||||
ARG INSTALL_TYPE=all
|
||||
ARG PYTHON_VERSION=3.12
|
||||
ARG INSTALL_TYPE=default
|
||||
ARG ENABLE_GPU=false
|
||||
ARG TARGETARCH
|
||||
|
||||
# Platform-specific labels
|
||||
LABEL maintainer="unclecode"
|
||||
LABEL description="🔥🕷️ Crawl4AI: Open-source LLM Friendly Web Crawler & scraper"
|
||||
LABEL version="1.0"
|
||||
|
||||
# Environment setup
|
||||
ENV PYTHONUNBUFFERED=1 \
|
||||
PYTHONDONTWRITEBYTECODE=1 \
|
||||
PIP_NO_CACHE_DIR=1 \
|
||||
PIP_DISABLE_PIP_VERSION_CHECK=1 \
|
||||
PIP_DEFAULT_TIMEOUT=100 \
|
||||
DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# Install system dependencies
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
build-essential \
|
||||
curl \
|
||||
@@ -37,10 +41,11 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
pkg-config \
|
||||
python3-dev \
|
||||
libjpeg-dev \
|
||||
libpng-dev \
|
||||
redis-server \
|
||||
supervisor \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Playwright system dependencies for Linux
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
libglib2.0-0 \
|
||||
libnss3 \
|
||||
@@ -63,30 +68,66 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
libcairo2 \
|
||||
libasound2 \
|
||||
libatspi2.0-0 \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# GPU support if enabled and architecture is supported
|
||||
RUN if [ "$ENABLE_GPU" = "true" ] && [ "$TARGETPLATFORM" = "linux/amd64" ] ; then \
|
||||
RUN apt-get update && apt-get dist-upgrade -y \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN if [ "$ENABLE_GPU" = "true" ] && [ "$TARGETARCH" = "amd64" ] ; then \
|
||||
apt-get update && apt-get install -y --no-install-recommends \
|
||||
nvidia-cuda-toolkit \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* ; \
|
||||
else \
|
||||
echo "Skipping NVIDIA CUDA Toolkit installation (unsupported platform or GPU disabled)"; \
|
||||
fi
|
||||
|
||||
# Create and set working directory
|
||||
WORKDIR /app
|
||||
RUN if [ "$TARGETARCH" = "arm64" ]; then \
|
||||
echo "🦾 Installing ARM-specific optimizations"; \
|
||||
apt-get update && apt-get install -y --no-install-recommends \
|
||||
libopenblas-dev \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*; \
|
||||
elif [ "$TARGETARCH" = "amd64" ]; then \
|
||||
echo "🖥️ Installing AMD64-specific optimizations"; \
|
||||
apt-get update && apt-get install -y --no-install-recommends \
|
||||
libomp-dev \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*; \
|
||||
else \
|
||||
echo "Skipping platform-specific optimizations (unsupported platform)"; \
|
||||
fi
|
||||
|
||||
# Copy the entire project
|
||||
COPY . .
|
||||
# Create a non-root user and group
|
||||
RUN groupadd -r appuser && useradd --no-log-init -r -g appuser appuser
|
||||
|
||||
# Install base requirements
|
||||
# Create and set permissions for appuser home directory
|
||||
RUN mkdir -p /home/appuser && chown -R appuser:appuser /home/appuser
|
||||
|
||||
WORKDIR ${APP_HOME}
|
||||
|
||||
RUN echo '#!/bin/bash\n\
|
||||
if [ "$USE_LOCAL" = "true" ]; then\n\
|
||||
echo "📦 Installing from local source..."\n\
|
||||
pip install --no-cache-dir /tmp/project/\n\
|
||||
else\n\
|
||||
echo "🌐 Installing from GitHub..."\n\
|
||||
for i in {1..3}; do \n\
|
||||
git clone --branch ${GITHUB_BRANCH} ${GITHUB_REPO} /tmp/crawl4ai && break || \n\
|
||||
{ echo "Attempt $i/3 failed! Taking a short break... ☕"; sleep 5; }; \n\
|
||||
done\n\
|
||||
pip install --no-cache-dir /tmp/crawl4ai\n\
|
||||
fi' > /tmp/install.sh && chmod +x /tmp/install.sh
|
||||
|
||||
COPY . /tmp/project/
|
||||
|
||||
# Copy supervisor config first (might need root later, but okay for now)
|
||||
COPY deploy/docker/supervisord.conf .
|
||||
|
||||
COPY deploy/docker/requirements.txt .
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
# Install required library for FastAPI
|
||||
RUN pip install fastapi uvicorn psutil
|
||||
|
||||
# Install ML dependencies first for better layer caching
|
||||
RUN if [ "$INSTALL_TYPE" = "all" ] ; then \
|
||||
pip install --no-cache-dir \
|
||||
torch \
|
||||
@@ -99,38 +140,61 @@ RUN if [ "$INSTALL_TYPE" = "all" ] ; then \
|
||||
python -m nltk.downloader punkt stopwords ; \
|
||||
fi
|
||||
|
||||
# Install the package
|
||||
RUN if [ "$INSTALL_TYPE" = "all" ] ; then \
|
||||
pip install ".[all]" && \
|
||||
pip install "/tmp/project/[all]" && \
|
||||
python -m crawl4ai.model_loader ; \
|
||||
elif [ "$INSTALL_TYPE" = "torch" ] ; then \
|
||||
pip install ".[torch]" ; \
|
||||
pip install "/tmp/project/[torch]" ; \
|
||||
elif [ "$INSTALL_TYPE" = "transformer" ] ; then \
|
||||
pip install ".[transformer]" && \
|
||||
pip install "/tmp/project/[transformer]" && \
|
||||
python -m crawl4ai.model_loader ; \
|
||||
else \
|
||||
pip install "." ; \
|
||||
pip install "/tmp/project" ; \
|
||||
fi
|
||||
|
||||
# Install MkDocs and required plugins
|
||||
RUN pip install --no-cache-dir \
|
||||
mkdocs \
|
||||
mkdocs-material \
|
||||
mkdocs-terminal \
|
||||
pymdown-extensions
|
||||
RUN pip install --no-cache-dir --upgrade pip && \
|
||||
/tmp/install.sh && \
|
||||
python -c "import crawl4ai; print('✅ crawl4ai is ready to rock!')" && \
|
||||
python -c "from playwright.sync_api import sync_playwright; print('✅ Playwright is feeling dramatic!')"
|
||||
|
||||
# Build MkDocs documentation
|
||||
RUN mkdocs build
|
||||
RUN crawl4ai-setup
|
||||
|
||||
# Install Playwright and browsers
|
||||
RUN if [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
|
||||
playwright install chromium; \
|
||||
elif [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
|
||||
playwright install chromium; \
|
||||
fi
|
||||
RUN playwright install --with-deps
|
||||
|
||||
# Expose port
|
||||
EXPOSE 8000 11235 9222 8080
|
||||
RUN mkdir -p /home/appuser/.cache/ms-playwright \
|
||||
&& cp -r /root/.cache/ms-playwright/chromium-* /home/appuser/.cache/ms-playwright/ \
|
||||
&& chown -R appuser:appuser /home/appuser/.cache/ms-playwright
|
||||
|
||||
# Start the FastAPI server
|
||||
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "11235"]
|
||||
RUN crawl4ai-doctor
|
||||
|
||||
# Copy application code
|
||||
COPY deploy/docker/* ${APP_HOME}/
|
||||
|
||||
# copy the playground + any future static assets
|
||||
COPY deploy/docker/static ${APP_HOME}/static
|
||||
|
||||
# Change ownership of the application directory to the non-root user
|
||||
RUN chown -R appuser:appuser ${APP_HOME}
|
||||
|
||||
# give permissions to redis persistence dirs if used
|
||||
RUN mkdir -p /var/lib/redis /var/log/redis && chown -R appuser:appuser /var/lib/redis /var/log/redis
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD bash -c '\
|
||||
MEM=$(free -m | awk "/^Mem:/{print \$2}"); \
|
||||
if [ $MEM -lt 2048 ]; then \
|
||||
echo "⚠️ Warning: Less than 2GB RAM available! Your container might need a memory boost! 🚀"; \
|
||||
exit 1; \
|
||||
fi && \
|
||||
redis-cli ping > /dev/null && \
|
||||
curl -f http://localhost:11235/health || exit 1'
|
||||
|
||||
EXPOSE 6379
|
||||
# Switch to the non-root user before starting the application
|
||||
USER appuser
|
||||
|
||||
# Set environment variables to ptoduction
|
||||
ENV PYTHON_ENV=production
|
||||
|
||||
# Start the application using supervisord
|
||||
CMD ["supervisord", "-c", "supervisord.conf"]
|
||||
339
JOURNAL.md
Normal file
339
JOURNAL.md
Normal file
@@ -0,0 +1,339 @@
|
||||
# Development Journal
|
||||
|
||||
This journal tracks significant feature additions, bug fixes, and architectural decisions in the crawl4ai project. It serves as both documentation and a historical record of the project's evolution.
|
||||
|
||||
## [2025-04-17] Added Content Source Selection for Markdown Generation
|
||||
|
||||
**Feature:** Configurable content source for markdown generation
|
||||
|
||||
**Changes Made:**
|
||||
1. Added `content_source: str = "cleaned_html"` parameter to `MarkdownGenerationStrategy` class
|
||||
2. Updated `DefaultMarkdownGenerator` to accept and pass the content source parameter
|
||||
3. Renamed the `cleaned_html` parameter to `input_html` in the `generate_markdown` method
|
||||
4. Modified `AsyncWebCrawler.aprocess_html` to select the appropriate HTML source based on the generator's config
|
||||
5. Added `preprocess_html_for_schema` import in `async_webcrawler.py`
|
||||
|
||||
**Implementation Details:**
|
||||
- Added a new `content_source` parameter to specify which HTML input to use for markdown generation
|
||||
- Options include: "cleaned_html" (default), "raw_html", and "fit_html"
|
||||
- Used a dictionary dispatch pattern in `aprocess_html` to select the appropriate HTML source
|
||||
- Added proper error handling with fallback to cleaned_html if content source selection fails
|
||||
- Ensured backward compatibility by defaulting to "cleaned_html" option
|
||||
|
||||
**Files Modified:**
|
||||
- `crawl4ai/markdown_generation_strategy.py`: Added content_source parameter and updated the method signature
|
||||
- `crawl4ai/async_webcrawler.py`: Added HTML source selection logic and updated imports
|
||||
|
||||
**Examples:**
|
||||
- Created `docs/examples/content_source_example.py` demonstrating how to use the new parameter
|
||||
|
||||
**Challenges:**
|
||||
- Maintaining backward compatibility while reorganizing the parameter flow
|
||||
- Ensuring proper error handling for all content source options
|
||||
- Making the change with minimal code modifications
|
||||
|
||||
**Why This Feature:**
|
||||
The content source selection feature allows users to choose which HTML content to use as input for markdown generation:
|
||||
1. "cleaned_html" - Uses the post-processed HTML after scraping strategy (original behavior)
|
||||
2. "raw_html" - Uses the original raw HTML directly from the web page
|
||||
3. "fit_html" - Uses the preprocessed HTML optimized for schema extraction
|
||||
|
||||
This feature provides greater flexibility in how users generate markdown, enabling them to:
|
||||
- Capture more detailed content from the original HTML when needed
|
||||
- Use schema-optimized HTML when working with structured data
|
||||
- Choose the approach that best suits their specific use case
|
||||
## [2025-04-17] Implemented High Volume Stress Testing Solution for SDK
|
||||
|
||||
**Feature:** Comprehensive stress testing framework using `arun_many` and the dispatcher system to evaluate performance, concurrency handling, and identify potential issues under high-volume crawling scenarios.
|
||||
|
||||
**Changes Made:**
|
||||
1. Created a dedicated stress testing framework in the `benchmarking/` (or similar) directory.
|
||||
2. Implemented local test site generation (`SiteGenerator`) with configurable heavy HTML pages.
|
||||
3. Added basic memory usage tracking (`SimpleMemoryTracker`) using platform-specific commands (avoiding `psutil` dependency for this specific test).
|
||||
4. Utilized `CrawlerMonitor` from `crawl4ai` for rich terminal UI and real-time monitoring of test progress and dispatcher activity.
|
||||
5. Implemented detailed result summary saving (JSON) and memory sample logging (CSV).
|
||||
6. Developed `run_benchmark.py` to orchestrate tests with predefined configurations.
|
||||
7. Created `run_all.sh` as a simple wrapper for `run_benchmark.py`.
|
||||
|
||||
**Implementation Details:**
|
||||
- Generates a local test site with configurable pages containing heavy text and image content.
|
||||
- Uses Python's built-in `http.server` for local serving, minimizing network variance.
|
||||
- Leverages `crawl4ai`'s `arun_many` method for processing URLs.
|
||||
- Utilizes `MemoryAdaptiveDispatcher` to manage concurrency via the `max_sessions` parameter (note: memory adaptation features require `psutil`, not used by `SimpleMemoryTracker`).
|
||||
- Tracks memory usage via `SimpleMemoryTracker`, recording samples throughout test execution to a CSV file.
|
||||
- Uses `CrawlerMonitor` (which uses the `rich` library) for clear terminal visualization and progress reporting directly from the dispatcher.
|
||||
- Stores detailed final metrics in a JSON summary file.
|
||||
|
||||
**Files Created/Updated:**
|
||||
- `stress_test_sdk.py`: Main stress testing implementation using `arun_many`.
|
||||
- `benchmark_report.py`: (Assumed) Report generator for comparing test results.
|
||||
- `run_benchmark.py`: Test runner script with predefined configurations.
|
||||
- `run_all.sh`: Simple bash script wrapper for `run_benchmark.py`.
|
||||
- `USAGE.md`: Comprehensive documentation on usage and interpretation (updated).
|
||||
|
||||
**Testing Approach:**
|
||||
- Creates a controlled, reproducible test environment with a local HTTP server.
|
||||
- Processes URLs using `arun_many`, allowing the dispatcher to manage concurrency up to `max_sessions`.
|
||||
- Optionally logs per-batch summaries (when not in streaming mode) after processing chunks.
|
||||
- Supports different test sizes via `run_benchmark.py` configurations.
|
||||
- Records memory samples via platform commands for basic trend analysis.
|
||||
- Includes cleanup functionality for the test environment.
|
||||
|
||||
**Challenges:**
|
||||
- Ensuring proper cleanup of HTTP server processes.
|
||||
- Getting reliable memory tracking across platforms without adding heavy dependencies (`psutil`) to this specific test script.
|
||||
- Designing `run_benchmark.py` to correctly pass arguments to `stress_test_sdk.py`.
|
||||
|
||||
**Why This Feature:**
|
||||
The high volume stress testing solution addresses critical needs for ensuring Crawl4AI's `arun_many` reliability:
|
||||
1. Provides a reproducible way to evaluate performance under concurrent load.
|
||||
2. Allows testing the dispatcher's concurrency control (`max_session_permit`) and queue management.
|
||||
3. Enables performance tuning by observing throughput (`URLs/sec`) under different `max_sessions` settings.
|
||||
4. Creates a controlled environment for testing `arun_many` behavior.
|
||||
5. Supports continuous integration by providing deterministic test conditions for `arun_many`.
|
||||
|
||||
**Design Decisions:**
|
||||
- Chose local site generation for reproducibility and isolation from network issues.
|
||||
- Utilized the built-in `CrawlerMonitor` for real-time feedback, leveraging its `rich` integration.
|
||||
- Implemented optional per-batch logging in `stress_test_sdk.py` (when not streaming) to provide chunk-level summaries alongside the continuous monitor.
|
||||
- Adopted `arun_many` with a `MemoryAdaptiveDispatcher` as the core mechanism for parallel execution, reflecting the intended SDK usage.
|
||||
- Created `run_benchmark.py` to simplify running standard test configurations.
|
||||
- Used `SimpleMemoryTracker` to provide basic memory insights without requiring `psutil` for this particular test runner.
|
||||
|
||||
**Future Enhancements to Consider:**
|
||||
- Create a separate test variant that *does* use `psutil` to specifically stress the memory-adaptive features of the dispatcher.
|
||||
- Add support for generated JavaScript content.
|
||||
- Add support for Docker-based testing with explicit memory limits.
|
||||
- Enhance `benchmark_report.py` to provide more sophisticated analysis of performance and memory trends from the generated JSON/CSV files.
|
||||
|
||||
---
|
||||
|
||||
## [2025-04-17] Refined Stress Testing System Parameters and Execution
|
||||
|
||||
**Changes Made:**
|
||||
1. Corrected `run_benchmark.py` and `stress_test_sdk.py` to use `--max-sessions` instead of the incorrect `--workers` parameter, accurately reflecting dispatcher configuration.
|
||||
2. Updated `run_benchmark.py` argument handling to correctly pass all relevant custom parameters (including `--stream`, `--monitor-mode`, etc.) to `stress_test_sdk.py`.
|
||||
3. (Assuming changes in `benchmark_report.py`) Applied dark theme to benchmark reports for better readability.
|
||||
4. (Assuming changes in `benchmark_report.py`) Improved visualization code to eliminate matplotlib warnings.
|
||||
5. Updated `run_benchmark.py` to provide clickable `file://` links to generated reports in the terminal output.
|
||||
6. Updated `USAGE.md` with comprehensive parameter descriptions reflecting the final script arguments.
|
||||
7. Updated `run_all.sh` wrapper to correctly invoke `run_benchmark.py` with flexible arguments.
|
||||
|
||||
**Details of Changes:**
|
||||
|
||||
1. **Parameter Correction (`--max-sessions`)**:
|
||||
* Identified the fundamental misunderstanding where `--workers` was used incorrectly.
|
||||
* Refactored `stress_test_sdk.py` to accept `--max-sessions` and configure the `MemoryAdaptiveDispatcher`'s `max_session_permit` accordingly.
|
||||
* Updated `run_benchmark.py` argument parsing and command construction to use `--max-sessions`.
|
||||
* Updated `TEST_CONFIGS` in `run_benchmark.py` to use `max_sessions`.
|
||||
|
||||
2. **Argument Handling (`run_benchmark.py`)**:
|
||||
* Improved logic to collect all command-line arguments provided to `run_benchmark.py`.
|
||||
* Ensured all relevant arguments (like `--stream`, `--monitor-mode`, `--port`, `--use-rate-limiter`, etc.) are correctly forwarded when calling `stress_test_sdk.py` as a subprocess.
|
||||
|
||||
3. **Dark Theme & Visualization Fixes (Assumed in `benchmark_report.py`)**:
|
||||
* (Describes changes assumed to be made in the separate reporting script).
|
||||
|
||||
4. **Clickable Links (`run_benchmark.py`)**:
|
||||
* Added logic to find the latest HTML report and PNG chart in the `benchmark_reports` directory after `benchmark_report.py` runs.
|
||||
* Used `pathlib` to generate correct `file://` URLs for terminal output.
|
||||
|
||||
5. **Documentation Improvements (`USAGE.md`)**:
|
||||
* Rewrote sections to explain `arun_many`, dispatchers, and `--max-sessions`.
|
||||
* Updated parameter tables for all scripts (`stress_test_sdk.py`, `run_benchmark.py`).
|
||||
* Clarified the difference between batch and streaming modes and their effect on logging.
|
||||
* Updated examples to use correct arguments.
|
||||
|
||||
**Files Modified:**
|
||||
- `stress_test_sdk.py`: Changed `--workers` to `--max-sessions`, added new arguments, used `arun_many`.
|
||||
- `run_benchmark.py`: Changed argument handling, updated configs, calls `stress_test_sdk.py`.
|
||||
- `run_all.sh`: Updated to call `run_benchmark.py` correctly.
|
||||
- `USAGE.md`: Updated documentation extensively.
|
||||
- `benchmark_report.py`: (Assumed modifications for dark theme and viz fixes).
|
||||
|
||||
**Testing:**
|
||||
- Verified that `--max-sessions` correctly limits concurrency via the `CrawlerMonitor` output.
|
||||
- Confirmed that custom arguments passed to `run_benchmark.py` are forwarded to `stress_test_sdk.py`.
|
||||
- Validated clickable links work in supporting terminals.
|
||||
- Ensured documentation matches the final script parameters and behavior.
|
||||
|
||||
**Why These Changes:**
|
||||
These refinements correct the fundamental approach of the stress test to align with `crawl4ai`'s actual architecture and intended usage:
|
||||
1. Ensures the test evaluates the correct components (`arun_many`, `MemoryAdaptiveDispatcher`).
|
||||
2. Makes test configurations more accurate and flexible.
|
||||
3. Improves the usability of the testing framework through better argument handling and documentation.
|
||||
|
||||
|
||||
**Future Enhancements to Consider:**
|
||||
- Add support for generated JavaScript content to test JS rendering performance
|
||||
- Implement more sophisticated memory analysis like generational garbage collection tracking
|
||||
- Add support for Docker-based testing with memory limits to force OOM conditions
|
||||
- Create visualization tools for analyzing memory usage patterns across test runs
|
||||
- Add benchmark comparisons between different crawler versions or configurations
|
||||
|
||||
## [2025-04-17] Fixed Issues in Stress Testing System
|
||||
|
||||
**Changes Made:**
|
||||
1. Fixed custom parameter handling in run_benchmark.py
|
||||
2. Applied dark theme to benchmark reports for better readability
|
||||
3. Improved visualization code to eliminate matplotlib warnings
|
||||
4. Added clickable links to generated reports in terminal output
|
||||
5. Enhanced documentation with comprehensive parameter descriptions
|
||||
|
||||
**Details of Changes:**
|
||||
|
||||
1. **Custom Parameter Handling Fix**
|
||||
- Identified bug where custom URL count was being ignored in run_benchmark.py
|
||||
- Rewrote argument handling to use a custom args dictionary
|
||||
- Properly passed parameters to the test_simple_stress.py command
|
||||
- Added better UI indication of custom parameters in use
|
||||
|
||||
2. **Dark Theme Implementation**
|
||||
- Added complete dark theme to HTML benchmark reports
|
||||
- Applied dark styling to all visualization components
|
||||
- Used Nord-inspired color palette for charts and graphs
|
||||
- Improved contrast and readability for data visualization
|
||||
- Updated text colors and backgrounds for better eye comfort
|
||||
|
||||
3. **Matplotlib Warning Fixes**
|
||||
- Resolved warnings related to improper use of set_xticklabels()
|
||||
- Implemented correct x-axis positioning for bar charts
|
||||
- Ensured proper alignment of bar labels and data points
|
||||
- Updated plotting code to use modern matplotlib practices
|
||||
|
||||
4. **Documentation Improvements**
|
||||
- Created comprehensive USAGE.md with detailed instructions
|
||||
- Added parameter documentation for all scripts
|
||||
- Included examples for all common use cases
|
||||
- Provided detailed explanations for interpreting results
|
||||
- Added troubleshooting guide for common issues
|
||||
|
||||
**Files Modified:**
|
||||
- `tests/memory/run_benchmark.py`: Fixed custom parameter handling
|
||||
- `tests/memory/benchmark_report.py`: Added dark theme and fixed visualization warnings
|
||||
- `tests/memory/run_all.sh`: Added clickable links to reports
|
||||
- `tests/memory/USAGE.md`: Created comprehensive documentation
|
||||
|
||||
**Testing:**
|
||||
- Verified that custom URL counts are now correctly used
|
||||
- Confirmed dark theme is properly applied to all report elements
|
||||
- Checked that matplotlib warnings are no longer appearing
|
||||
- Validated clickable links to reports work in terminals that support them
|
||||
|
||||
**Why These Changes:**
|
||||
These improvements address several usability issues with the stress testing system:
|
||||
1. Better parameter handling ensures test configurations work as expected
|
||||
2. Dark theme reduces eye strain during extended test review sessions
|
||||
3. Fixing visualization warnings improves code quality and output clarity
|
||||
4. Enhanced documentation makes the system more accessible for future use
|
||||
|
||||
**Future Enhancements:**
|
||||
- Add additional visualization options for different types of analysis
|
||||
- Implement theme toggle to support both light and dark preferences
|
||||
- Add export options for embedding reports in other documentation
|
||||
- Create dedicated CI/CD integration templates for automated testing
|
||||
|
||||
## [2025-04-09] Added MHTML Capture Feature
|
||||
|
||||
**Feature:** MHTML snapshot capture of crawled pages
|
||||
|
||||
**Changes Made:**
|
||||
1. Added `capture_mhtml: bool = False` parameter to `CrawlerRunConfig` class
|
||||
2. Added `mhtml: Optional[str] = None` field to `CrawlResult` model
|
||||
3. Added `mhtml_data: Optional[str] = None` field to `AsyncCrawlResponse` class
|
||||
4. Implemented `capture_mhtml()` method in `AsyncPlaywrightCrawlerStrategy` class to capture MHTML via CDP
|
||||
5. Modified the crawler to capture MHTML when enabled and pass it to the result
|
||||
|
||||
**Implementation Details:**
|
||||
- MHTML capture uses Chrome DevTools Protocol (CDP) via Playwright's CDP session API
|
||||
- The implementation waits for page to fully load before capturing MHTML content
|
||||
- Enhanced waiting for JavaScript content with requestAnimationFrame for better JS content capture
|
||||
- We ensure all browser resources are properly cleaned up after capture
|
||||
|
||||
**Files Modified:**
|
||||
- `crawl4ai/models.py`: Added the mhtml field to CrawlResult
|
||||
- `crawl4ai/async_configs.py`: Added capture_mhtml parameter to CrawlerRunConfig
|
||||
- `crawl4ai/async_crawler_strategy.py`: Implemented MHTML capture logic
|
||||
- `crawl4ai/async_webcrawler.py`: Added mapping from AsyncCrawlResponse.mhtml_data to CrawlResult.mhtml
|
||||
|
||||
**Testing:**
|
||||
- Created comprehensive tests in `tests/20241401/test_mhtml.py` covering:
|
||||
- Capturing MHTML when enabled
|
||||
- Ensuring mhtml is None when disabled explicitly
|
||||
- Ensuring mhtml is None by default
|
||||
- Capturing MHTML on JavaScript-enabled pages
|
||||
|
||||
**Challenges:**
|
||||
- Had to improve page loading detection to ensure JavaScript content was fully rendered
|
||||
- Tests needed to be run independently due to Playwright browser instance management
|
||||
- Modified test expected content to match actual MHTML output
|
||||
|
||||
**Why This Feature:**
|
||||
The MHTML capture feature allows users to capture complete web pages including all resources (CSS, images, etc.) in a single file. This is valuable for:
|
||||
1. Offline viewing of captured pages
|
||||
2. Creating permanent snapshots of web content for archival
|
||||
3. Ensuring consistent content for later analysis, even if the original site changes
|
||||
|
||||
**Future Enhancements to Consider:**
|
||||
- Add option to save MHTML to file
|
||||
- Support for filtering what resources get included in MHTML
|
||||
- Add support for specifying MHTML capture options
|
||||
|
||||
## [2025-04-10] Added Network Request and Console Message Capturing
|
||||
|
||||
**Feature:** Comprehensive capturing of network requests/responses and browser console messages during crawling
|
||||
|
||||
**Changes Made:**
|
||||
1. Added `capture_network_requests: bool = False` and `capture_console_messages: bool = False` parameters to `CrawlerRunConfig` class
|
||||
2. Added `network_requests: Optional[List[Dict[str, Any]]] = None` and `console_messages: Optional[List[Dict[str, Any]]] = None` fields to both `AsyncCrawlResponse` and `CrawlResult` models
|
||||
3. Implemented event listeners in `AsyncPlaywrightCrawlerStrategy._crawl_web()` to capture browser network events and console messages
|
||||
4. Added proper event listener cleanup in the finally block to prevent resource leaks
|
||||
5. Modified the crawler flow to pass captured data from AsyncCrawlResponse to CrawlResult
|
||||
|
||||
**Implementation Details:**
|
||||
- Network capture uses Playwright event listeners (`request`, `response`, and `requestfailed`) to record all network activity
|
||||
- Console capture uses Playwright event listeners (`console` and `pageerror`) to record console messages and errors
|
||||
- Each network event includes metadata like URL, headers, status, and timing information
|
||||
- Each console message includes type, text content, and source location when available
|
||||
- All captured events include timestamps for chronological analysis
|
||||
- Error handling ensures even failed capture attempts won't crash the main crawling process
|
||||
|
||||
**Files Modified:**
|
||||
- `crawl4ai/models.py`: Added new fields to AsyncCrawlResponse and CrawlResult
|
||||
- `crawl4ai/async_configs.py`: Added new configuration parameters to CrawlerRunConfig
|
||||
- `crawl4ai/async_crawler_strategy.py`: Implemented capture logic using event listeners
|
||||
- `crawl4ai/async_webcrawler.py`: Added data transfer from AsyncCrawlResponse to CrawlResult
|
||||
|
||||
**Documentation:**
|
||||
- Created detailed documentation in `docs/md_v2/advanced/network-console-capture.md`
|
||||
- Added feature to site navigation in `mkdocs.yml`
|
||||
- Updated CrawlResult documentation in `docs/md_v2/api/crawl-result.md`
|
||||
- Created comprehensive example in `docs/examples/network_console_capture_example.py`
|
||||
|
||||
**Testing:**
|
||||
- Created `tests/general/test_network_console_capture.py` with tests for:
|
||||
- Verifying capture is disabled by default
|
||||
- Testing network request capturing
|
||||
- Testing console message capturing
|
||||
- Ensuring both capture types can be enabled simultaneously
|
||||
- Checking correct content is captured in expected formats
|
||||
|
||||
**Challenges:**
|
||||
- Initial implementation had synchronous/asynchronous mismatches in event handlers
|
||||
- Needed to fix type of property access vs. method calls in handlers
|
||||
- Required careful cleanup of event listeners to prevent memory leaks
|
||||
|
||||
**Why This Feature:**
|
||||
The network and console capture feature provides deep visibility into web page activity, enabling:
|
||||
1. Debugging complex web applications by seeing all network requests and errors
|
||||
2. Security analysis to detect unexpected third-party requests and data flows
|
||||
3. Performance profiling to identify slow-loading resources
|
||||
4. API discovery in single-page applications
|
||||
5. Comprehensive analysis of web application behavior
|
||||
|
||||
**Future Enhancements to Consider:**
|
||||
- Option to filter captured events by type, domain, or content
|
||||
- Support for capturing response bodies (with size limits)
|
||||
- Aggregate statistics calculation for performance metrics
|
||||
- Integration with visualization tools for network waterfall analysis
|
||||
- Exporting captures in HAR format for use with external tools
|
||||
20
LICENSE
20
LICENSE
@@ -48,4 +48,22 @@ You may add Your own copyright statement to Your modifications and may provide a
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
---
|
||||
Attribution Requirement
|
||||
|
||||
All distributions, publications, or public uses of this software, or derivative works based on this software, must include the following attribution:
|
||||
|
||||
"This product includes software developed by UncleCode (https://x.com/unclecode) as part of the Crawl4AI project (https://github.com/unclecode/crawl4ai)."
|
||||
|
||||
This attribution must be displayed in a prominent and easily accessible location, such as:
|
||||
|
||||
- For software distributions: In a NOTICE file, README file, or equivalent documentation.
|
||||
- For publications (research papers, articles, blog posts): In the acknowledgments section or a footnote.
|
||||
- For websites/web applications: In an "About" or "Credits" section.
|
||||
- For command-line tools: In the help/usage output.
|
||||
|
||||
This requirement ensures proper credit is given for the use of Crawl4AI and helps promote the project.
|
||||
|
||||
---
|
||||
320
PROGRESSIVE_CRAWLING.md
Normal file
320
PROGRESSIVE_CRAWLING.md
Normal file
@@ -0,0 +1,320 @@
|
||||
# Progressive Web Crawling with Adaptive Information Foraging
|
||||
|
||||
## Abstract
|
||||
|
||||
This paper presents a novel approach to web crawling that adaptively determines when sufficient information has been gathered to answer a given query. Unlike traditional exhaustive crawling methods, our Progressive Information Sufficiency (PIS) framework uses statistical measures to balance information completeness against crawling efficiency. We introduce a multi-strategy architecture supporting pure statistical, embedding-enhanced, and LLM-assisted approaches, with theoretical guarantees on convergence and practical evaluation methods using synthetic datasets.
|
||||
|
||||
## 1. Introduction
|
||||
|
||||
Traditional web crawling approaches follow predetermined patterns (breadth-first, depth-first) without consideration for information sufficiency. This work addresses the fundamental question: *"When do we have enough information to answer a query and similar queries in its domain?"*
|
||||
|
||||
We formalize this as an optimal stopping problem in information foraging, introducing metrics for coverage, consistency, and saturation that enable crawlers to make intelligent decisions about when to stop crawling and which links to follow.
|
||||
|
||||
## 2. Problem Formulation
|
||||
|
||||
### 2.1 Definitions
|
||||
|
||||
Let:
|
||||
- **K** = {d₁, d₂, ..., dₙ} be the current knowledge base (crawled documents)
|
||||
- **Q** be the user query
|
||||
- **L** = {l₁, l₂, ..., lₘ} be available links with preview metadata
|
||||
- **θ** be the confidence threshold for information sufficiency
|
||||
|
||||
### 2.2 Objectives
|
||||
|
||||
1. **Minimize** |K| (number of crawled pages)
|
||||
2. **Maximize** P(answers(Q) | K) (probability of answering Q given K)
|
||||
3. **Ensure** coverage of Q's domain (similar queries)
|
||||
|
||||
## 3. Mathematical Framework
|
||||
|
||||
### 3.1 Information Sufficiency Metric
|
||||
|
||||
We define Information Sufficiency as:
|
||||
|
||||
```
|
||||
IS(K, Q) = min(Coverage(K, Q), Consistency(K, Q), 1 - Redundancy(K)) × DomainCoverage(K, Q)
|
||||
```
|
||||
|
||||
### 3.2 Coverage Score
|
||||
|
||||
Coverage measures how well current knowledge covers query terms and related concepts:
|
||||
|
||||
```
|
||||
Coverage(K, Q) = Σ(t ∈ Q) log(df(t, K) + 1) × idf(t) / |Q|
|
||||
```
|
||||
|
||||
Where:
|
||||
- df(t, K) = document frequency of term t in knowledge base K
|
||||
- idf(t) = inverse document frequency weight
|
||||
|
||||
### 3.3 Consistency Score
|
||||
|
||||
Consistency measures information coherence across documents:
|
||||
|
||||
```
|
||||
Consistency(K, Q) = 1 - Var(answers from random subsets of K)
|
||||
```
|
||||
|
||||
This captures the principle that sufficient knowledge should provide stable answers regardless of document subset.
|
||||
|
||||
### 3.4 Saturation Score
|
||||
|
||||
Saturation detects diminishing returns:
|
||||
|
||||
```
|
||||
Saturation(K) = 1 - (ΔInfo(Kₙ) / ΔInfo(K₁))
|
||||
```
|
||||
|
||||
Where ΔInfo represents marginal information gain from the nth crawl.
|
||||
|
||||
### 3.5 Link Value Prediction
|
||||
|
||||
Expected information gain from uncrawled links:
|
||||
|
||||
```
|
||||
ExpectedGain(l) = Relevance(l, Q) × Novelty(l, K) × Authority(l)
|
||||
```
|
||||
|
||||
Components:
|
||||
- **Relevance**: BM25(preview_text, Q)
|
||||
- **Novelty**: 1 - max_similarity(preview, K)
|
||||
- **Authority**: f(url_structure, domain_metrics)
|
||||
|
||||
## 4. Algorithmic Approach
|
||||
|
||||
### 4.1 Progressive Crawling Algorithm
|
||||
|
||||
```
|
||||
Algorithm: ProgressiveCrawl(start_url, query, θ)
|
||||
K ← ∅
|
||||
crawled ← {start_url}
|
||||
pending ← extract_links(crawl(start_url))
|
||||
|
||||
while IS(K, Q) < θ and |crawled| < max_pages:
|
||||
candidates ← rank_by_expected_gain(pending, Q, K)
|
||||
if max(ExpectedGain(candidates)) < min_gain:
|
||||
break // Diminishing returns
|
||||
|
||||
to_crawl ← top_k(candidates)
|
||||
new_docs ← parallel_crawl(to_crawl)
|
||||
K ← K ∪ new_docs
|
||||
crawled ← crawled ∪ to_crawl
|
||||
pending ← extract_new_links(new_docs) - crawled
|
||||
|
||||
return K
|
||||
```
|
||||
|
||||
### 4.2 Stopping Criteria
|
||||
|
||||
Crawling terminates when:
|
||||
1. IS(K, Q) ≥ θ (sufficient information)
|
||||
2. d(IS)/d(crawls) < ε (plateau reached)
|
||||
3. |crawled| ≥ max_pages (resource limit)
|
||||
4. max(ExpectedGain) < min_gain (no promising links)
|
||||
|
||||
## 5. Multi-Strategy Architecture
|
||||
|
||||
### 5.1 Strategy Pattern Design
|
||||
|
||||
```
|
||||
AbstractStrategy
|
||||
├── StatisticalStrategy (no LLM, no embeddings)
|
||||
├── EmbeddingStrategy (with semantic similarity)
|
||||
└── LLMStrategy (with language model assistance)
|
||||
```
|
||||
|
||||
### 5.2 Statistical Strategy
|
||||
|
||||
Pure statistical approach using:
|
||||
- BM25 for relevance scoring
|
||||
- Term frequency analysis for coverage
|
||||
- Graph structure for authority
|
||||
- No external models required
|
||||
|
||||
**Advantages**: Fast, no API costs, works offline
|
||||
**Best for**: Technical documentation, specific terminology
|
||||
|
||||
### 5.3 Embedding Strategy (Implemented)
|
||||
|
||||
Semantic understanding through embeddings:
|
||||
- Query expansion into semantic variations
|
||||
- Coverage mapping in embedding space
|
||||
- Gap-driven link selection
|
||||
- Validation-based stopping criteria
|
||||
|
||||
**Mathematical Framework**:
|
||||
```
|
||||
Coverage(K, Q) = mean(max_similarity(q, K) for q in Q_expanded)
|
||||
Gap(q) = 1 - max_similarity(q, K)
|
||||
LinkScore(l) = Σ(Gap(q) × relevance(l, q)) × (1 - redundancy(l, K))
|
||||
```
|
||||
|
||||
**Key Parameters**:
|
||||
- `embedding_k_exp`: Exponential decay factor for distance-to-score mapping
|
||||
- `embedding_coverage_radius`: Distance threshold for query coverage
|
||||
- `embedding_min_confidence_threshold`: Minimum relevance threshold
|
||||
|
||||
**Advantages**: Semantic understanding, handles ambiguity, detects irrelevance
|
||||
**Best for**: Research queries, conceptual topics, diverse content
|
||||
|
||||
### 5.4 Progressive Enhancement Path
|
||||
|
||||
1. **Level 0**: Statistical only (implemented)
|
||||
2. **Level 1**: + Embeddings for semantic similarity (implemented)
|
||||
3. **Level 2**: + LLM for query understanding (future)
|
||||
|
||||
## 6. Evaluation Methodology
|
||||
|
||||
### 6.1 Synthetic Dataset Generation
|
||||
|
||||
Using LLM to create evaluation data:
|
||||
|
||||
```python
|
||||
def generate_synthetic_dataset(domain_url):
|
||||
# 1. Fully crawl domain
|
||||
full_knowledge = exhaustive_crawl(domain_url)
|
||||
|
||||
# 2. Generate answerable queries
|
||||
queries = llm_generate_queries(full_knowledge)
|
||||
|
||||
# 3. Create query variations
|
||||
for q in queries:
|
||||
variations = generate_variations(q) # synonyms, sub/super queries
|
||||
|
||||
return queries, variations, full_knowledge
|
||||
```
|
||||
|
||||
### 6.2 Evaluation Metrics
|
||||
|
||||
1. **Efficiency**: Information gained / Pages crawled
|
||||
2. **Completeness**: Answerable queries / Total queries
|
||||
3. **Redundancy**: 1 - (Unique information / Total information)
|
||||
4. **Convergence Rate**: Pages to 95% completeness
|
||||
|
||||
### 6.3 Ablation Studies
|
||||
|
||||
- Impact of each score component (coverage, consistency, saturation)
|
||||
- Sensitivity to threshold parameters
|
||||
- Performance across different domain types
|
||||
|
||||
## 7. Theoretical Properties
|
||||
|
||||
### 7.1 Convergence Guarantee
|
||||
|
||||
**Theorem**: For finite websites, ProgressiveCrawl converges to IS(K, Q) ≥ θ or exhausts all reachable pages.
|
||||
|
||||
**Proof sketch**: IS(K, Q) is monotonically non-decreasing with each crawl, bounded above by 1.
|
||||
|
||||
### 7.2 Optimality
|
||||
|
||||
Under certain assumptions about link preview accuracy:
|
||||
- Expected crawls ≤ 2 × optimal_crawls
|
||||
- Approximation ratio improves with preview quality
|
||||
|
||||
## 8. Implementation Design
|
||||
|
||||
### 8.1 Core Components
|
||||
|
||||
1. **CrawlState**: Maintains crawl history and metrics
|
||||
2. **AdaptiveConfig**: Configuration parameters
|
||||
3. **CrawlStrategy**: Pluggable strategy interface
|
||||
4. **AdaptiveCrawler**: Main orchestrator
|
||||
|
||||
### 8.2 Integration with Crawl4AI
|
||||
|
||||
- Wraps existing AsyncWebCrawler
|
||||
- Leverages link preview functionality
|
||||
- Maintains backward compatibility
|
||||
|
||||
### 8.3 Persistence
|
||||
|
||||
Knowledge base serialization for:
|
||||
- Resumable crawls
|
||||
- Knowledge sharing
|
||||
- Offline analysis
|
||||
|
||||
## 9. Future Directions
|
||||
|
||||
### 9.1 Advanced Scoring
|
||||
|
||||
- Temporal information value
|
||||
- Multi-query optimization
|
||||
- Active learning from user feedback
|
||||
|
||||
### 9.2 Distributed Crawling
|
||||
|
||||
- Collaborative knowledge building
|
||||
- Federated information sufficiency
|
||||
|
||||
### 9.3 Domain Adaptation
|
||||
|
||||
- Transfer learning across domains
|
||||
- Meta-learning for threshold selection
|
||||
|
||||
## 10. Conclusion
|
||||
|
||||
Progressive crawling with adaptive information foraging provides a principled approach to efficient web information extraction. By combining coverage, consistency, and saturation metrics, we can determine information sufficiency without ground truth labels. The multi-strategy architecture allows graceful enhancement from pure statistical to LLM-assisted approaches based on requirements and resources.
|
||||
|
||||
## References
|
||||
|
||||
1. Manning, C. D., Raghavan, P., & Schütze, H. (2008). Introduction to Information Retrieval. Cambridge University Press.
|
||||
|
||||
2. Robertson, S., & Zaragoza, H. (2009). The Probabilistic Relevance Framework: BM25 and Beyond. Foundations and Trends in Information Retrieval.
|
||||
|
||||
3. Pirolli, P., & Card, S. (1999). Information Foraging. Psychological Review, 106(4), 643-675.
|
||||
|
||||
4. Dasgupta, S. (2005). Analysis of a greedy active learning strategy. Advances in Neural Information Processing Systems.
|
||||
|
||||
## Appendix A: Implementation Pseudocode
|
||||
|
||||
```python
|
||||
class StatisticalStrategy:
|
||||
def calculate_confidence(self, state):
|
||||
coverage = self.calculate_coverage(state)
|
||||
consistency = self.calculate_consistency(state)
|
||||
saturation = self.calculate_saturation(state)
|
||||
return min(coverage, consistency, saturation)
|
||||
|
||||
def calculate_coverage(self, state):
|
||||
# BM25-based term coverage
|
||||
term_scores = []
|
||||
for term in state.query.split():
|
||||
df = state.document_frequencies.get(term, 0)
|
||||
idf = self.idf_cache.get(term, 1.0)
|
||||
term_scores.append(log(df + 1) * idf)
|
||||
return mean(term_scores) / max_possible_score
|
||||
|
||||
def rank_links(self, state):
|
||||
scored_links = []
|
||||
for link in state.pending_links:
|
||||
relevance = self.bm25_score(link.preview_text, state.query)
|
||||
novelty = self.calculate_novelty(link, state.knowledge_base)
|
||||
authority = self.url_authority(link.href)
|
||||
score = relevance * novelty * authority
|
||||
scored_links.append((link, score))
|
||||
return sorted(scored_links, key=lambda x: x[1], reverse=True)
|
||||
```
|
||||
|
||||
## Appendix B: Evaluation Protocol
|
||||
|
||||
1. **Dataset Creation**:
|
||||
- Select diverse domains (documentation, blogs, e-commerce)
|
||||
- Generate 100 queries per domain using LLM
|
||||
- Create query variations (5-10 per query)
|
||||
|
||||
2. **Baseline Comparisons**:
|
||||
- BFS crawler (depth-limited)
|
||||
- DFS crawler (depth-limited)
|
||||
- Random crawler
|
||||
- Oracle (knows relevant pages)
|
||||
|
||||
3. **Metrics Collection**:
|
||||
- Pages crawled vs query answerability
|
||||
- Time to sufficient confidence
|
||||
- False positive/negative rates
|
||||
|
||||
4. **Statistical Analysis**:
|
||||
- ANOVA for strategy comparison
|
||||
- Regression for parameter sensitivity
|
||||
- Bootstrap for confidence intervals
|
||||
809
README-first.md
Normal file
809
README-first.md
Normal file
@@ -0,0 +1,809 @@
|
||||
# 🚀🤖 Crawl4AI: Open-source LLM Friendly Web Crawler & Scraper.
|
||||
|
||||
<div align="center">
|
||||
|
||||
<a href="https://trendshift.io/repositories/11716" target="_blank"><img src="https://trendshift.io/api/badge/repositories/11716" alt="unclecode%2Fcrawl4ai | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
||||
|
||||
[](https://github.com/unclecode/crawl4ai/stargazers)
|
||||
[](https://github.com/unclecode/crawl4ai/network/members)
|
||||
|
||||
[](https://badge.fury.io/py/crawl4ai)
|
||||
[](https://pypi.org/project/crawl4ai/)
|
||||
[](https://pepy.tech/project/crawl4ai)
|
||||
[](https://github.com/sponsors/unclecode)
|
||||
|
||||
<p align="center">
|
||||
<a href="https://x.com/crawl4ai">
|
||||
<img src="https://img.shields.io/badge/Follow%20on%20X-000000?style=for-the-badge&logo=x&logoColor=white" alt="Follow on X" />
|
||||
</a>
|
||||
<a href="https://www.linkedin.com/company/crawl4ai">
|
||||
<img src="https://img.shields.io/badge/Follow%20on%20LinkedIn-0077B5?style=for-the-badge&logo=linkedin&logoColor=white" alt="Follow on LinkedIn" />
|
||||
</a>
|
||||
<a href="https://discord.gg/jP8KfhDhyN">
|
||||
<img src="https://img.shields.io/badge/Join%20our%20Discord-5865F2?style=for-the-badge&logo=discord&logoColor=white" alt="Join our Discord" />
|
||||
</a>
|
||||
</p>
|
||||
</div>
|
||||
|
||||
Crawl4AI is the #1 trending GitHub repository, actively maintained by a vibrant community. It delivers blazing-fast, AI-ready web crawling tailored for LLMs, AI agents, and data pipelines. Open source, flexible, and built for real-time performance, Crawl4AI empowers developers with unmatched speed, precision, and deployment ease.
|
||||
|
||||
[✨ Check out latest update v0.7.0](#-recent-updates)
|
||||
|
||||
🎉 **Version 0.7.0 is now available!** The Adaptive Intelligence Update introduces groundbreaking features: Adaptive Crawling that learns website patterns, Virtual Scroll support for infinite pages, intelligent Link Preview with 3-layer scoring, Async URL Seeder for massive discovery, and significant performance improvements. [Read the release notes →](https://github.com/unclecode/crawl4ai/blob/main/docs/blog/release-v0.7.0.md)
|
||||
|
||||
<details>
|
||||
<summary>🤓 <strong>My Personal Story</strong></summary>
|
||||
|
||||
My journey with computers started in childhood when my dad, a computer scientist, introduced me to an Amstrad computer. Those early days sparked a fascination with technology, leading me to pursue computer science and specialize in NLP during my postgraduate studies. It was during this time that I first delved into web crawling, building tools to help researchers organize papers and extract information from publications a challenging yet rewarding experience that honed my skills in data extraction.
|
||||
|
||||
Fast forward to 2023, I was working on a tool for a project and needed a crawler to convert a webpage into markdown. While exploring solutions, I found one that claimed to be open-source but required creating an account and generating an API token. Worse, it turned out to be a SaaS model charging $16, and its quality didn’t meet my standards. Frustrated, I realized this was a deeper problem. That frustration turned into turbo anger mode, and I decided to build my own solution. In just a few days, I created Crawl4AI. To my surprise, it went viral, earning thousands of GitHub stars and resonating with a global community.
|
||||
|
||||
I made Crawl4AI open-source for two reasons. First, it’s my way of giving back to the open-source community that has supported me throughout my career. Second, I believe data should be accessible to everyone, not locked behind paywalls or monopolized by a few. Open access to data lays the foundation for the democratization of AI, a vision where individuals can train their own models and take ownership of their information. This library is the first step in a larger journey to create the best open-source data extraction and generation tool the world has ever seen, built collaboratively by a passionate community.
|
||||
|
||||
Thank you to everyone who has supported this project, used it, and shared feedback. Your encouragement motivates me to dream even bigger. Join us, file issues, submit PRs, or spread the word. Together, we can build a tool that truly empowers people to access their own data and reshape the future of AI.
|
||||
</details>
|
||||
|
||||
## 🧐 Why Crawl4AI?
|
||||
|
||||
1. **Built for LLMs**: Creates smart, concise Markdown optimized for RAG and fine-tuning applications.
|
||||
2. **Lightning Fast**: Delivers results faster with real-time, cost-efficient performance.
|
||||
3. **Flexible Browser Control**: Offers session management, proxies, and custom hooks for seamless data access.
|
||||
4. **Heuristic Intelligence**: Uses advanced algorithms for efficient extraction, reducing reliance on costly models.
|
||||
5. **Open Source & Deployable**: Fully open-source with no API keys—ready for Docker and cloud integration.
|
||||
6. **Thriving Community**: Actively maintained by a vibrant community and the #1 trending GitHub repository.
|
||||
|
||||
## 🚀 Quick Start
|
||||
|
||||
1. Install Crawl4AI:
|
||||
```bash
|
||||
# Install the package
|
||||
pip install -U crawl4ai
|
||||
|
||||
# For pre release versions
|
||||
pip install crawl4ai --pre
|
||||
|
||||
# Run post-installation setup
|
||||
crawl4ai-setup
|
||||
|
||||
# Verify your installation
|
||||
crawl4ai-doctor
|
||||
```
|
||||
|
||||
If you encounter any browser-related issues, you can install them manually:
|
||||
```bash
|
||||
python -m playwright install --with-deps chromium
|
||||
```
|
||||
|
||||
2. Run a simple web crawl with Python:
|
||||
```python
|
||||
import asyncio
|
||||
from crawl4ai import *
|
||||
|
||||
async def main():
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://www.nbcnews.com/business",
|
||||
)
|
||||
print(result.markdown)
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
```
|
||||
|
||||
3. Or use the new command-line interface:
|
||||
```bash
|
||||
# Basic crawl with markdown output
|
||||
crwl https://www.nbcnews.com/business -o markdown
|
||||
|
||||
# Deep crawl with BFS strategy, max 10 pages
|
||||
crwl https://docs.crawl4ai.com --deep-crawl bfs --max-pages 10
|
||||
|
||||
# Use LLM extraction with a specific question
|
||||
crwl https://www.example.com/products -q "Extract all product prices"
|
||||
```
|
||||
|
||||
## ✨ Features
|
||||
|
||||
<details>
|
||||
<summary>📝 <strong>Markdown Generation</strong></summary>
|
||||
|
||||
- 🧹 **Clean Markdown**: Generates clean, structured Markdown with accurate formatting.
|
||||
- 🎯 **Fit Markdown**: Heuristic-based filtering to remove noise and irrelevant parts for AI-friendly processing.
|
||||
- 🔗 **Citations and References**: Converts page links into a numbered reference list with clean citations.
|
||||
- 🛠️ **Custom Strategies**: Users can create their own Markdown generation strategies tailored to specific needs.
|
||||
- 📚 **BM25 Algorithm**: Employs BM25-based filtering for extracting core information and removing irrelevant content.
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>📊 <strong>Structured Data Extraction</strong></summary>
|
||||
|
||||
- 🤖 **LLM-Driven Extraction**: Supports all LLMs (open-source and proprietary) for structured data extraction.
|
||||
- 🧱 **Chunking Strategies**: Implements chunking (topic-based, regex, sentence-level) for targeted content processing.
|
||||
- 🌌 **Cosine Similarity**: Find relevant content chunks based on user queries for semantic extraction.
|
||||
- 🔎 **CSS-Based Extraction**: Fast schema-based data extraction using XPath and CSS selectors.
|
||||
- 🔧 **Schema Definition**: Define custom schemas for extracting structured JSON from repetitive patterns.
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>🌐 <strong>Browser Integration</strong></summary>
|
||||
|
||||
- 🖥️ **Managed Browser**: Use user-owned browsers with full control, avoiding bot detection.
|
||||
- 🔄 **Remote Browser Control**: Connect to Chrome Developer Tools Protocol for remote, large-scale data extraction.
|
||||
- 👤 **Browser Profiler**: Create and manage persistent profiles with saved authentication states, cookies, and settings.
|
||||
- 🔒 **Session Management**: Preserve browser states and reuse them for multi-step crawling.
|
||||
- 🧩 **Proxy Support**: Seamlessly connect to proxies with authentication for secure access.
|
||||
- ⚙️ **Full Browser Control**: Modify headers, cookies, user agents, and more for tailored crawling setups.
|
||||
- 🌍 **Multi-Browser Support**: Compatible with Chromium, Firefox, and WebKit.
|
||||
- 📐 **Dynamic Viewport Adjustment**: Automatically adjusts the browser viewport to match page content, ensuring complete rendering and capturing of all elements.
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>🔎 <strong>Crawling & Scraping</strong></summary>
|
||||
|
||||
- 🖼️ **Media Support**: Extract images, audio, videos, and responsive image formats like `srcset` and `picture`.
|
||||
- 🚀 **Dynamic Crawling**: Execute JS and wait for async or sync for dynamic content extraction.
|
||||
- 📸 **Screenshots**: Capture page screenshots during crawling for debugging or analysis.
|
||||
- 📂 **Raw Data Crawling**: Directly process raw HTML (`raw:`) or local files (`file://`).
|
||||
- 🔗 **Comprehensive Link Extraction**: Extracts internal, external links, and embedded iframe content.
|
||||
- 🛠️ **Customizable Hooks**: Define hooks at every step to customize crawling behavior.
|
||||
- 💾 **Caching**: Cache data for improved speed and to avoid redundant fetches.
|
||||
- 📄 **Metadata Extraction**: Retrieve structured metadata from web pages.
|
||||
- 📡 **IFrame Content Extraction**: Seamless extraction from embedded iframe content.
|
||||
- 🕵️ **Lazy Load Handling**: Waits for images to fully load, ensuring no content is missed due to lazy loading.
|
||||
- 🔄 **Full-Page Scanning**: Simulates scrolling to load and capture all dynamic content, perfect for infinite scroll pages.
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>🚀 <strong>Deployment</strong></summary>
|
||||
|
||||
- 🐳 **Dockerized Setup**: Optimized Docker image with FastAPI server for easy deployment.
|
||||
- 🔑 **Secure Authentication**: Built-in JWT token authentication for API security.
|
||||
- 🔄 **API Gateway**: One-click deployment with secure token authentication for API-based workflows.
|
||||
- 🌐 **Scalable Architecture**: Designed for mass-scale production and optimized server performance.
|
||||
- ☁️ **Cloud Deployment**: Ready-to-deploy configurations for major cloud platforms.
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>🎯 <strong>Additional Features</strong></summary>
|
||||
|
||||
- 🕶️ **Stealth Mode**: Avoid bot detection by mimicking real users.
|
||||
- 🏷️ **Tag-Based Content Extraction**: Refine crawling based on custom tags, headers, or metadata.
|
||||
- 🔗 **Link Analysis**: Extract and analyze all links for detailed data exploration.
|
||||
- 🛡️ **Error Handling**: Robust error management for seamless execution.
|
||||
- 🔐 **CORS & Static Serving**: Supports filesystem-based caching and cross-origin requests.
|
||||
- 📖 **Clear Documentation**: Simplified and updated guides for onboarding and advanced usage.
|
||||
- 🙌 **Community Recognition**: Acknowledges contributors and pull requests for transparency.
|
||||
|
||||
</details>
|
||||
|
||||
## Try it Now!
|
||||
|
||||
✨ Play around with this [](https://colab.research.google.com/drive/1SgRPrByQLzjRfwoRNq1wSGE9nYY_EE8C?usp=sharing)
|
||||
|
||||
✨ Visit our [Documentation Website](https://docs.crawl4ai.com/)
|
||||
|
||||
## Installation 🛠️
|
||||
|
||||
Crawl4AI offers flexible installation options to suit various use cases. You can install it as a Python package or use Docker.
|
||||
|
||||
<details>
|
||||
<summary>🐍 <strong>Using pip</strong></summary>
|
||||
|
||||
Choose the installation option that best fits your needs:
|
||||
|
||||
### Basic Installation
|
||||
|
||||
For basic web crawling and scraping tasks:
|
||||
|
||||
```bash
|
||||
pip install crawl4ai
|
||||
crawl4ai-setup # Setup the browser
|
||||
```
|
||||
|
||||
By default, this will install the asynchronous version of Crawl4AI, using Playwright for web crawling.
|
||||
|
||||
👉 **Note**: When you install Crawl4AI, the `crawl4ai-setup` should automatically install and set up Playwright. However, if you encounter any Playwright-related errors, you can manually install it using one of these methods:
|
||||
|
||||
1. Through the command line:
|
||||
|
||||
```bash
|
||||
playwright install
|
||||
```
|
||||
|
||||
2. If the above doesn't work, try this more specific command:
|
||||
|
||||
```bash
|
||||
python -m playwright install chromium
|
||||
```
|
||||
|
||||
This second method has proven to be more reliable in some cases.
|
||||
|
||||
---
|
||||
|
||||
### Installation with Synchronous Version
|
||||
|
||||
The sync version is deprecated and will be removed in future versions. If you need the synchronous version using Selenium:
|
||||
|
||||
```bash
|
||||
pip install crawl4ai[sync]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Development Installation
|
||||
|
||||
For contributors who plan to modify the source code:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/unclecode/crawl4ai.git
|
||||
cd crawl4ai
|
||||
pip install -e . # Basic installation in editable mode
|
||||
```
|
||||
|
||||
Install optional features:
|
||||
|
||||
```bash
|
||||
pip install -e ".[torch]" # With PyTorch features
|
||||
pip install -e ".[transformer]" # With Transformer features
|
||||
pip install -e ".[cosine]" # With cosine similarity features
|
||||
pip install -e ".[sync]" # With synchronous crawling (Selenium)
|
||||
pip install -e ".[all]" # Install all optional features
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>🐳 <strong>Docker Deployment</strong></summary>
|
||||
|
||||
> 🚀 **Now Available!** Our completely redesigned Docker implementation is here! This new solution makes deployment more efficient and seamless than ever.
|
||||
|
||||
### New Docker Features
|
||||
|
||||
The new Docker implementation includes:
|
||||
- **Browser pooling** with page pre-warming for faster response times
|
||||
- **Interactive playground** to test and generate request code
|
||||
- **MCP integration** for direct connection to AI tools like Claude Code
|
||||
- **Comprehensive API endpoints** including HTML extraction, screenshots, PDF generation, and JavaScript execution
|
||||
- **Multi-architecture support** with automatic detection (AMD64/ARM64)
|
||||
- **Optimized resources** with improved memory management
|
||||
|
||||
### Getting Started
|
||||
|
||||
```bash
|
||||
# Pull and run the latest release candidate
|
||||
docker pull unclecode/crawl4ai:0.7.0
|
||||
docker run -d -p 11235:11235 --name crawl4ai --shm-size=1g unclecode/crawl4ai:0.7.0
|
||||
|
||||
# Visit the playground at http://localhost:11235/playground
|
||||
```
|
||||
|
||||
For complete documentation, see our [Docker Deployment Guide](https://docs.crawl4ai.com/core/docker-deployment/).
|
||||
|
||||
</details>
|
||||
|
||||
---
|
||||
|
||||
### Quick Test
|
||||
|
||||
Run a quick test (works for both Docker options):
|
||||
|
||||
```python
|
||||
import requests
|
||||
|
||||
# Submit a crawl job
|
||||
response = requests.post(
|
||||
"http://localhost:11235/crawl",
|
||||
json={"urls": ["https://example.com"], "priority": 10}
|
||||
)
|
||||
if response.status_code == 200:
|
||||
print("Crawl job submitted successfully.")
|
||||
|
||||
if "results" in response.json():
|
||||
results = response.json()["results"]
|
||||
print("Crawl job completed. Results:")
|
||||
for result in results:
|
||||
print(result)
|
||||
else:
|
||||
task_id = response.json()["task_id"]
|
||||
print(f"Crawl job submitted. Task ID:: {task_id}")
|
||||
result = requests.get(f"http://localhost:11235/task/{task_id}")
|
||||
```
|
||||
|
||||
For more examples, see our [Docker Examples](https://github.com/unclecode/crawl4ai/blob/main/docs/examples/docker_example.py). For advanced configuration, environment variables, and usage examples, see our [Docker Deployment Guide](https://docs.crawl4ai.com/basic/docker-deployment/).
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
## 🔬 Advanced Usage Examples 🔬
|
||||
|
||||
You can check the project structure in the directory [docs/examples](https://github.com/unclecode/crawl4ai/tree/main/docs/examples). Over there, you can find a variety of examples; here, some popular examples are shared.
|
||||
|
||||
<details>
|
||||
<summary>📝 <strong>Heuristic Markdown Generation with Clean and Fit Markdown</strong></summary>
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
|
||||
from crawl4ai.content_filter_strategy import PruningContentFilter, BM25ContentFilter
|
||||
from crawl4ai.markdown_generation_strategy import DefaultMarkdownGenerator
|
||||
|
||||
async def main():
|
||||
browser_config = BrowserConfig(
|
||||
headless=True,
|
||||
verbose=True,
|
||||
)
|
||||
run_config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.ENABLED,
|
||||
markdown_generator=DefaultMarkdownGenerator(
|
||||
content_filter=PruningContentFilter(threshold=0.48, threshold_type="fixed", min_word_threshold=0)
|
||||
),
|
||||
# markdown_generator=DefaultMarkdownGenerator(
|
||||
# content_filter=BM25ContentFilter(user_query="WHEN_WE_FOCUS_BASED_ON_A_USER_QUERY", bm25_threshold=1.0)
|
||||
# ),
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://docs.micronaut.io/4.7.6/guide/",
|
||||
config=run_config
|
||||
)
|
||||
print(len(result.markdown.raw_markdown))
|
||||
print(len(result.markdown.fit_markdown))
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>🖥️ <strong>Executing JavaScript & Extract Structured Data without LLMs</strong></summary>
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
|
||||
from crawl4ai import JsonCssExtractionStrategy
|
||||
import json
|
||||
|
||||
async def main():
|
||||
schema = {
|
||||
"name": "KidoCode Courses",
|
||||
"baseSelector": "section.charge-methodology .w-tab-content > div",
|
||||
"fields": [
|
||||
{
|
||||
"name": "section_title",
|
||||
"selector": "h3.heading-50",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "section_description",
|
||||
"selector": ".charge-content",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "course_name",
|
||||
"selector": ".text-block-93",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "course_description",
|
||||
"selector": ".course-content-text",
|
||||
"type": "text",
|
||||
},
|
||||
{
|
||||
"name": "course_icon",
|
||||
"selector": ".image-92",
|
||||
"type": "attribute",
|
||||
"attribute": "src"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
extraction_strategy = JsonCssExtractionStrategy(schema, verbose=True)
|
||||
|
||||
browser_config = BrowserConfig(
|
||||
headless=False,
|
||||
verbose=True
|
||||
)
|
||||
run_config = CrawlerRunConfig(
|
||||
extraction_strategy=extraction_strategy,
|
||||
js_code=["""(async () => {const tabs = document.querySelectorAll("section.charge-methodology .tabs-menu-3 > div");for(let tab of tabs) {tab.scrollIntoView();tab.click();await new Promise(r => setTimeout(r, 500));}})();"""],
|
||||
cache_mode=CacheMode.BYPASS
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
|
||||
result = await crawler.arun(
|
||||
url="https://www.kidocode.com/degrees/technology",
|
||||
config=run_config
|
||||
)
|
||||
|
||||
companies = json.loads(result.extracted_content)
|
||||
print(f"Successfully extracted {len(companies)} companies")
|
||||
print(json.dumps(companies[0], indent=2))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>📚 <strong>Extracting Structured Data with LLMs</strong></summary>
|
||||
|
||||
```python
|
||||
import os
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode, LLMConfig
|
||||
from crawl4ai import LLMExtractionStrategy
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
class OpenAIModelFee(BaseModel):
|
||||
model_name: str = Field(..., description="Name of the OpenAI model.")
|
||||
input_fee: str = Field(..., description="Fee for input token for the OpenAI model.")
|
||||
output_fee: str = Field(..., description="Fee for output token for the OpenAI model.")
|
||||
|
||||
async def main():
|
||||
browser_config = BrowserConfig(verbose=True)
|
||||
run_config = CrawlerRunConfig(
|
||||
word_count_threshold=1,
|
||||
extraction_strategy=LLMExtractionStrategy(
|
||||
# Here you can use any provider that Litellm library supports, for instance: ollama/qwen2
|
||||
# provider="ollama/qwen2", api_token="no-token",
|
||||
llm_config = LLMConfig(provider="openai/gpt-4o", api_token=os.getenv('OPENAI_API_KEY')),
|
||||
schema=OpenAIModelFee.schema(),
|
||||
extraction_type="schema",
|
||||
instruction="""From the crawled content, extract all mentioned model names along with their fees for input and output tokens.
|
||||
Do not miss any models in the entire content. One extracted model JSON format should look like this:
|
||||
{"model_name": "GPT-4", "input_fee": "US$10.00 / 1M tokens", "output_fee": "US$30.00 / 1M tokens"}."""
|
||||
),
|
||||
cache_mode=CacheMode.BYPASS,
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
result = await crawler.arun(
|
||||
url='https://openai.com/api/pricing/',
|
||||
config=run_config
|
||||
)
|
||||
print(result.extracted_content)
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>🤖 <strong>Using Your own Browser with Custom User Profile</strong></summary>
|
||||
|
||||
```python
|
||||
import os, sys
|
||||
from pathlib import Path
|
||||
import asyncio, time
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
|
||||
|
||||
async def test_news_crawl():
|
||||
# Create a persistent user data directory
|
||||
user_data_dir = os.path.join(Path.home(), ".crawl4ai", "browser_profile")
|
||||
os.makedirs(user_data_dir, exist_ok=True)
|
||||
|
||||
browser_config = BrowserConfig(
|
||||
verbose=True,
|
||||
headless=True,
|
||||
user_data_dir=user_data_dir,
|
||||
use_persistent_context=True,
|
||||
)
|
||||
run_config = CrawlerRunConfig(
|
||||
cache_mode=CacheMode.BYPASS
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
url = "ADDRESS_OF_A_CHALLENGING_WEBSITE"
|
||||
|
||||
result = await crawler.arun(
|
||||
url,
|
||||
config=run_config,
|
||||
magic=True,
|
||||
)
|
||||
|
||||
print(f"Successfully crawled {url}")
|
||||
print(f"Content length: {len(result.markdown)}")
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
## ✨ Recent Updates
|
||||
|
||||
### Version 0.7.0 Release Highlights - The Adaptive Intelligence Update
|
||||
|
||||
- **🧠 Adaptive Crawling**: Your crawler now learns and adapts to website patterns automatically:
|
||||
```python
|
||||
config = AdaptiveConfig(
|
||||
confidence_threshold=0.7, # Min confidence to stop crawling
|
||||
max_depth=5, # Maximum crawl depth
|
||||
max_pages=20, # Maximum number of pages to crawl
|
||||
strategy="statistical"
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
adaptive_crawler = AdaptiveCrawler(crawler, config)
|
||||
state = await adaptive_crawler.digest(
|
||||
start_url="https://news.example.com",
|
||||
query="latest news content"
|
||||
)
|
||||
# Crawler learns patterns and improves extraction over time
|
||||
```
|
||||
|
||||
- **🌊 Virtual Scroll Support**: Complete content extraction from infinite scroll pages:
|
||||
```python
|
||||
scroll_config = VirtualScrollConfig(
|
||||
container_selector="[data-testid='feed']",
|
||||
scroll_count=20,
|
||||
scroll_by="container_height",
|
||||
wait_after_scroll=1.0
|
||||
)
|
||||
|
||||
result = await crawler.arun(url, config=CrawlerRunConfig(
|
||||
virtual_scroll_config=scroll_config
|
||||
))
|
||||
```
|
||||
|
||||
- **🔗 Intelligent Link Analysis**: 3-layer scoring system for smart link prioritization:
|
||||
```python
|
||||
link_config = LinkPreviewConfig(
|
||||
query="machine learning tutorials",
|
||||
score_threshold=0.3,
|
||||
concurrent_requests=10
|
||||
)
|
||||
|
||||
result = await crawler.arun(url, config=CrawlerRunConfig(
|
||||
link_preview_config=link_config,
|
||||
score_links=True
|
||||
))
|
||||
# Links ranked by relevance and quality
|
||||
```
|
||||
|
||||
- **🎣 Async URL Seeder**: Discover thousands of URLs in seconds:
|
||||
```python
|
||||
seeder = AsyncUrlSeeder(SeedingConfig(
|
||||
source="sitemap+cc",
|
||||
pattern="*/blog/*",
|
||||
query="python tutorials",
|
||||
score_threshold=0.4
|
||||
))
|
||||
|
||||
urls = await seeder.discover("https://example.com")
|
||||
```
|
||||
|
||||
- **⚡ Performance Boost**: Up to 3x faster with optimized resource handling and memory efficiency
|
||||
|
||||
Read the full details in our [0.7.0 Release Notes](https://docs.crawl4ai.com/blog/release-v0.7.0) or check the [CHANGELOG](https://github.com/unclecode/crawl4ai/blob/main/CHANGELOG.md).
|
||||
|
||||
## Version Numbering in Crawl4AI
|
||||
|
||||
Crawl4AI follows standard Python version numbering conventions (PEP 440) to help users understand the stability and features of each release.
|
||||
|
||||
### Version Numbers Explained
|
||||
|
||||
Our version numbers follow this pattern: `MAJOR.MINOR.PATCH` (e.g., 0.4.3)
|
||||
|
||||
#### Pre-release Versions
|
||||
We use different suffixes to indicate development stages:
|
||||
|
||||
- `dev` (0.4.3dev1): Development versions, unstable
|
||||
- `a` (0.4.3a1): Alpha releases, experimental features
|
||||
- `b` (0.4.3b1): Beta releases, feature complete but needs testing
|
||||
- `rc` (0.4.3): Release candidates, potential final version
|
||||
|
||||
#### Installation
|
||||
- Regular installation (stable version):
|
||||
```bash
|
||||
pip install -U crawl4ai
|
||||
```
|
||||
|
||||
- Install pre-release versions:
|
||||
```bash
|
||||
pip install crawl4ai --pre
|
||||
```
|
||||
|
||||
- Install specific version:
|
||||
```bash
|
||||
pip install crawl4ai==0.4.3b1
|
||||
```
|
||||
|
||||
#### Why Pre-releases?
|
||||
We use pre-releases to:
|
||||
- Test new features in real-world scenarios
|
||||
- Gather feedback before final releases
|
||||
- Ensure stability for production users
|
||||
- Allow early adopters to try new features
|
||||
|
||||
For production environments, we recommend using the stable version. For testing new features, you can opt-in to pre-releases using the `--pre` flag.
|
||||
|
||||
## 📖 Documentation & Roadmap
|
||||
|
||||
> 🚨 **Documentation Update Alert**: We're undertaking a major documentation overhaul next week to reflect recent updates and improvements. Stay tuned for a more comprehensive and up-to-date guide!
|
||||
|
||||
For current documentation, including installation instructions, advanced features, and API reference, visit our [Documentation Website](https://docs.crawl4ai.com/).
|
||||
|
||||
To check our development plans and upcoming features, visit our [Roadmap](https://github.com/unclecode/crawl4ai/blob/main/ROADMAP.md).
|
||||
|
||||
<details>
|
||||
<summary>📈 <strong>Development TODOs</strong></summary>
|
||||
|
||||
- [x] 0. Graph Crawler: Smart website traversal using graph search algorithms for comprehensive nested page extraction
|
||||
- [ ] 1. Question-Based Crawler: Natural language driven web discovery and content extraction
|
||||
- [ ] 2. Knowledge-Optimal Crawler: Smart crawling that maximizes knowledge while minimizing data extraction
|
||||
- [ ] 3. Agentic Crawler: Autonomous system for complex multi-step crawling operations
|
||||
- [ ] 4. Automated Schema Generator: Convert natural language to extraction schemas
|
||||
- [ ] 5. Domain-Specific Scrapers: Pre-configured extractors for common platforms (academic, e-commerce)
|
||||
- [ ] 6. Web Embedding Index: Semantic search infrastructure for crawled content
|
||||
- [ ] 7. Interactive Playground: Web UI for testing, comparing strategies with AI assistance
|
||||
- [ ] 8. Performance Monitor: Real-time insights into crawler operations
|
||||
- [ ] 9. Cloud Integration: One-click deployment solutions across cloud providers
|
||||
- [ ] 10. Sponsorship Program: Structured support system with tiered benefits
|
||||
- [ ] 11. Educational Content: "How to Crawl" video series and interactive tutorials
|
||||
|
||||
</details>
|
||||
|
||||
## 🤝 Contributing
|
||||
|
||||
We welcome contributions from the open-source community. Check out our [contribution guidelines](https://github.com/unclecode/crawl4ai/blob/main/CONTRIBUTORS.md) for more information.
|
||||
|
||||
I'll help modify the license section with badges. For the halftone effect, here's a version with it:
|
||||
|
||||
Here's the updated license section:
|
||||
|
||||
## 📄 License & Attribution
|
||||
|
||||
This project is licensed under the Apache License 2.0, attribution is recommended via the badges below. See the [Apache 2.0 License](https://github.com/unclecode/crawl4ai/blob/main/LICENSE) file for details.
|
||||
|
||||
### Attribution Requirements
|
||||
When using Crawl4AI, you must include one of the following attribution methods:
|
||||
|
||||
#### 1. Badge Attribution (Recommended)
|
||||
Add one of these badges to your README, documentation, or website:
|
||||
|
||||
| Theme | Badge |
|
||||
|-------|-------|
|
||||
| **Disco Theme (Animated)** | <a href="https://github.com/unclecode/crawl4ai"><img src="./docs/assets/powered-by-disco.svg" alt="Powered by Crawl4AI" width="200"/></a> |
|
||||
| **Night Theme (Dark with Neon)** | <a href="https://github.com/unclecode/crawl4ai"><img src="./docs/assets/powered-by-night.svg" alt="Powered by Crawl4AI" width="200"/></a> |
|
||||
| **Dark Theme (Classic)** | <a href="https://github.com/unclecode/crawl4ai"><img src="./docs/assets/powered-by-dark.svg" alt="Powered by Crawl4AI" width="200"/></a> |
|
||||
| **Light Theme (Classic)** | <a href="https://github.com/unclecode/crawl4ai"><img src="./docs/assets/powered-by-light.svg" alt="Powered by Crawl4AI" width="200"/></a> |
|
||||
|
||||
|
||||
HTML code for adding the badges:
|
||||
```html
|
||||
<!-- Disco Theme (Animated) -->
|
||||
<a href="https://github.com/unclecode/crawl4ai">
|
||||
<img src="https://raw.githubusercontent.com/unclecode/crawl4ai/main/docs/assets/powered-by-disco.svg" alt="Powered by Crawl4AI" width="200"/>
|
||||
</a>
|
||||
|
||||
<!-- Night Theme (Dark with Neon) -->
|
||||
<a href="https://github.com/unclecode/crawl4ai">
|
||||
<img src="https://raw.githubusercontent.com/unclecode/crawl4ai/main/docs/assets/powered-by-night.svg" alt="Powered by Crawl4AI" width="200"/>
|
||||
</a>
|
||||
|
||||
<!-- Dark Theme (Classic) -->
|
||||
<a href="https://github.com/unclecode/crawl4ai">
|
||||
<img src="https://raw.githubusercontent.com/unclecode/crawl4ai/main/docs/assets/powered-by-dark.svg" alt="Powered by Crawl4AI" width="200"/>
|
||||
</a>
|
||||
|
||||
<!-- Light Theme (Classic) -->
|
||||
<a href="https://github.com/unclecode/crawl4ai">
|
||||
<img src="https://raw.githubusercontent.com/unclecode/crawl4ai/main/docs/assets/powered-by-light.svg" alt="Powered by Crawl4AI" width="200"/>
|
||||
</a>
|
||||
|
||||
<!-- Simple Shield Badge -->
|
||||
<a href="https://github.com/unclecode/crawl4ai">
|
||||
<img src="https://img.shields.io/badge/Powered%20by-Crawl4AI-blue?style=flat-square" alt="Powered by Crawl4AI"/>
|
||||
</a>
|
||||
```
|
||||
|
||||
#### 2. Text Attribution
|
||||
Add this line to your documentation:
|
||||
```
|
||||
This project uses Crawl4AI (https://github.com/unclecode/crawl4ai) for web data extraction.
|
||||
```
|
||||
|
||||
## 📚 Citation
|
||||
|
||||
If you use Crawl4AI in your research or project, please cite:
|
||||
|
||||
```bibtex
|
||||
@software{crawl4ai2024,
|
||||
author = {UncleCode},
|
||||
title = {Crawl4AI: Open-source LLM Friendly Web Crawler & Scraper},
|
||||
year = {2024},
|
||||
publisher = {GitHub},
|
||||
journal = {GitHub Repository},
|
||||
howpublished = {\url{https://github.com/unclecode/crawl4ai}},
|
||||
commit = {Please use the commit hash you're working with}
|
||||
}
|
||||
```
|
||||
|
||||
Text citation format:
|
||||
```
|
||||
UncleCode. (2024). Crawl4AI: Open-source LLM Friendly Web Crawler & Scraper [Computer software].
|
||||
GitHub. https://github.com/unclecode/crawl4ai
|
||||
```
|
||||
|
||||
## 📧 Contact
|
||||
|
||||
For questions, suggestions, or feedback, feel free to reach out:
|
||||
|
||||
- GitHub: [unclecode](https://github.com/unclecode)
|
||||
- Twitter: [@unclecode](https://twitter.com/unclecode)
|
||||
- Website: [crawl4ai.com](https://crawl4ai.com)
|
||||
|
||||
Happy Crawling! 🕸️🚀
|
||||
|
||||
## 💖 Support Crawl4AI
|
||||
|
||||
> 🎉 **Sponsorship Program Just Launched!** Be among the first 50 **Founding Sponsors** and get permanent recognition in our Hall of Fame!
|
||||
|
||||
Crawl4AI is the #1 trending open-source web crawler with 51K+ stars. Your support ensures we stay independent, innovative, and free forever.
|
||||
|
||||
<div align="center">
|
||||
|
||||
[](https://github.com/sponsors/unclecode)
|
||||
[](https://github.com/sponsors/unclecode)
|
||||
|
||||
</div>
|
||||
|
||||
### 🤝 Sponsorship Tiers
|
||||
|
||||
- **🌱 Believer ($5/mo)**: Join the movement for data democratization
|
||||
- **🚀 Builder ($50/mo)**: Get priority support and early feature access
|
||||
- **💼 Growing Team ($500/mo)**: Bi-weekly syncs and optimization help
|
||||
- **🏢 Data Infrastructure Partner ($2000/mo)**: Full partnership with dedicated support
|
||||
|
||||
**Why sponsor?** Every tier includes real benefits. No more rate-limited APIs. Own your data pipeline. Build data sovereignty together.
|
||||
|
||||
[View All Tiers & Benefits →](https://github.com/sponsors/unclecode)
|
||||
|
||||
### 🏆 Our Sponsors
|
||||
|
||||
#### 👑 Founding Sponsors (First 50)
|
||||
*Be part of history - [Become a Founding Sponsor](https://github.com/sponsors/unclecode)*
|
||||
|
||||
<!-- Founding sponsors will be permanently recognized here -->
|
||||
|
||||
#### Current Sponsors
|
||||
Thank you to all our sponsors who make this project possible!
|
||||
|
||||
<!-- Sponsors will be automatically added here -->
|
||||
|
||||
## 🗾 Mission
|
||||
|
||||
Our mission is to unlock the value of personal and enterprise data by transforming digital footprints into structured, tradeable assets. Crawl4AI empowers individuals and organizations with open-source tools to extract and structure data, fostering a shared data economy.
|
||||
|
||||
We envision a future where AI is powered by real human knowledge, ensuring data creators directly benefit from their contributions. By democratizing data and enabling ethical sharing, we are laying the foundation for authentic AI advancement.
|
||||
|
||||
<details>
|
||||
<summary>🔑 <strong>Key Opportunities</strong></summary>
|
||||
|
||||
- **Data Capitalization**: Transform digital footprints into measurable, valuable assets.
|
||||
- **Authentic AI Data**: Provide AI systems with real human insights.
|
||||
- **Shared Economy**: Create a fair data marketplace that benefits data creators.
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>🚀 <strong>Development Pathway</strong></summary>
|
||||
|
||||
1. **Open-Source Tools**: Community-driven platforms for transparent data extraction.
|
||||
2. **Digital Asset Structuring**: Tools to organize and value digital knowledge.
|
||||
3. **Ethical Data Marketplace**: A secure, fair platform for exchanging structured data.
|
||||
|
||||
For more details, see our [full mission statement](./MISSION.md).
|
||||
</details>
|
||||
|
||||
## Star History
|
||||
|
||||
[](https://star-history.com/#unclecode/crawl4ai&Date)
|
||||
600
README.md
600
README.md
@@ -10,28 +10,52 @@
|
||||
[](https://badge.fury.io/py/crawl4ai)
|
||||
[](https://pypi.org/project/crawl4ai/)
|
||||
[](https://pepy.tech/project/crawl4ai)
|
||||
[](https://github.com/sponsors/unclecode)
|
||||
|
||||
[](https://crawl4ai.readthedocs.io/)
|
||||
[](https://github.com/unclecode/crawl4ai/blob/main/LICENSE)
|
||||
[](https://github.com/psf/black)
|
||||
[](https://github.com/PyCQA/bandit)
|
||||
|
||||
<p align="center">
|
||||
<a href="https://x.com/crawl4ai">
|
||||
<img src="https://img.shields.io/badge/Follow%20on%20X-000000?style=for-the-badge&logo=x&logoColor=white" alt="Follow on X" />
|
||||
</a>
|
||||
<a href="https://www.linkedin.com/company/crawl4ai">
|
||||
<img src="https://img.shields.io/badge/Follow%20on%20LinkedIn-0077B5?style=for-the-badge&logo=linkedin&logoColor=white" alt="Follow on LinkedIn" />
|
||||
</a>
|
||||
<a href="https://discord.gg/jP8KfhDhyN">
|
||||
<img src="https://img.shields.io/badge/Join%20our%20Discord-5865F2?style=for-the-badge&logo=discord&logoColor=white" alt="Join our Discord" />
|
||||
</a>
|
||||
</p>
|
||||
</div>
|
||||
|
||||
Crawl4AI is the #1 trending GitHub repository, actively maintained by a vibrant community. It delivers blazing-fast, AI-ready web crawling tailored for LLMs, AI agents, and data pipelines. Open source, flexible, and built for real-time performance, Crawl4AI empowers developers with unmatched speed, precision, and deployment ease.
|
||||
Crawl4AI turns the web into clean, LLM ready Markdown for RAG, agents, and data pipelines. Fast, controllable, battle tested by a 50k+ star community.
|
||||
|
||||
[✨ Check out latest update v0.4.24x](#-recent-updates)
|
||||
[✨ Check out latest update v0.7.6](#-recent-updates)
|
||||
|
||||
🎉 **Version 0.4.24x is out!** Major improvements in extraction strategies with enhanced JSON handling, SSL security, and Amazon product extraction. Plus, a completely revamped content filtering system! [Read the release notes →](https://crawl4ai.com/mkdocs/blog)
|
||||
✨ **New in v0.7.6**: Complete Webhook Infrastructure for Docker Job Queue API! Real-time notifications for both `/crawl/job` and `/llm/job` endpoints with exponential backoff retry, custom headers, and flexible delivery modes. No more polling! [Release notes →](https://github.com/unclecode/crawl4ai/blob/main/docs/blog/release-v0.7.6.md)
|
||||
|
||||
## 🧐 Why Crawl4AI?
|
||||
✨ Recent v0.7.5: Docker Hooks System with function-based API for pipeline customization, Enhanced LLM Integration with custom providers, HTTPS Preservation, and multiple community-reported bug fixes. [Release notes →](https://github.com/unclecode/crawl4ai/blob/main/docs/blog/release-v0.7.5.md)
|
||||
|
||||
✨ Previous v0.7.4: Revolutionary LLM Table Extraction with intelligent chunking, enhanced concurrency fixes, memory management refactor, and critical stability improvements. [Release notes →](https://github.com/unclecode/crawl4ai/blob/main/docs/blog/release-v0.7.4.md)
|
||||
|
||||
<details>
|
||||
<summary>🤓 <strong>My Personal Story</strong></summary>
|
||||
|
||||
I grew up on an Amstrad, thanks to my dad, and never stopped building. In grad school I specialized in NLP and built crawlers for research. That’s where I learned how much extraction matters.
|
||||
|
||||
In 2023, I needed web-to-Markdown. The “open source” option wanted an account, API token, and $16, and still under-delivered. I went turbo anger mode, built Crawl4AI in days, and it went viral. Now it’s the most-starred crawler on GitHub.
|
||||
|
||||
I made it open source for **availability**, anyone can use it without a gate. Now I’m building the platform for **affordability**, anyone can run serious crawls without breaking the bank. If that resonates, join in, send feedback, or just crawl something amazing.
|
||||
</details>
|
||||
|
||||
|
||||
<details>
|
||||
<summary>Why developers pick Crawl4AI</summary>
|
||||
|
||||
- **LLM ready output**, smart Markdown with headings, tables, code, citation hints
|
||||
- **Fast in practice**, async browser pool, caching, minimal hops
|
||||
- **Full control**, sessions, proxies, cookies, user scripts, hooks
|
||||
- **Adaptive intelligence**, learns site patterns, explores only what matters
|
||||
- **Deploy anywhere**, zero keys, CLI and Docker, cloud friendly
|
||||
</details>
|
||||
|
||||
1. **Built for LLMs**: Creates smart, concise Markdown optimized for RAG and fine-tuning applications.
|
||||
2. **Lightning Fast**: Delivers results 6x faster with real-time, cost-efficient performance.
|
||||
3. **Flexible Browser Control**: Offers session management, proxies, and custom hooks for seamless data access.
|
||||
4. **Heuristic Intelligence**: Uses advanced algorithms for efficient extraction, reducing reliance on costly models.
|
||||
5. **Open Source & Deployable**: Fully open-source with no API keys—ready for Docker and cloud integration.
|
||||
6. **Thriving Community**: Actively maintained by a vibrant community and the #1 trending GitHub repository.
|
||||
|
||||
## 🚀 Quick Start
|
||||
|
||||
@@ -40,6 +64,9 @@ Crawl4AI is the #1 trending GitHub repository, actively maintained by a vibrant
|
||||
# Install the package
|
||||
pip install -U crawl4ai
|
||||
|
||||
# For pre release versions
|
||||
pip install crawl4ai --pre
|
||||
|
||||
# Run post-installation setup
|
||||
crawl4ai-setup
|
||||
|
||||
@@ -52,7 +79,7 @@ If you encounter any browser-related issues, you can install them manually:
|
||||
python -m playwright install --with-deps chromium
|
||||
```
|
||||
|
||||
2. Run a simple web crawl:
|
||||
2. Run a simple web crawl with Python:
|
||||
```python
|
||||
import asyncio
|
||||
from crawl4ai import *
|
||||
@@ -68,6 +95,45 @@ if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
```
|
||||
|
||||
3. Or use the new command-line interface:
|
||||
```bash
|
||||
# Basic crawl with markdown output
|
||||
crwl https://www.nbcnews.com/business -o markdown
|
||||
|
||||
# Deep crawl with BFS strategy, max 10 pages
|
||||
crwl https://docs.crawl4ai.com --deep-crawl bfs --max-pages 10
|
||||
|
||||
# Use LLM extraction with a specific question
|
||||
crwl https://www.example.com/products -q "Extract all product prices"
|
||||
```
|
||||
|
||||
## 💖 Support Crawl4AI
|
||||
|
||||
> 🎉 **Sponsorship Program Now Open!** After powering 51K+ developers and 1 year of growth, Crawl4AI is launching dedicated support for **startups** and **enterprises**. Be among the first 50 **Founding Sponsors** for permanent recognition in our Hall of Fame.
|
||||
|
||||
Crawl4AI is the #1 trending open-source web crawler on GitHub. Your support keeps it independent, innovative, and free for the community — while giving you direct access to premium benefits.
|
||||
|
||||
<div align="">
|
||||
|
||||
[](https://github.com/sponsors/unclecode)
|
||||
[](https://github.com/sponsors/unclecode)
|
||||
|
||||
</div>
|
||||
|
||||
### 🤝 Sponsorship Tiers
|
||||
|
||||
- **🌱 Believer ($5/mo)** — Join the movement for data democratization
|
||||
- **🚀 Builder ($50/mo)** — Priority support & early access to features
|
||||
- **💼 Growing Team ($500/mo)** — Bi-weekly syncs & optimization help
|
||||
- **🏢 Data Infrastructure Partner ($2000/mo)** — Full partnership with dedicated support
|
||||
*Custom arrangements available - see [SPONSORS.md](SPONSORS.md) for details & contact*
|
||||
|
||||
**Why sponsor?**
|
||||
No rate-limited APIs. No lock-in. Build and own your data pipeline with direct guidance from the creator of Crawl4AI.
|
||||
|
||||
[See All Tiers & Benefits →](https://github.com/sponsors/unclecode)
|
||||
|
||||
|
||||
## ✨ Features
|
||||
|
||||
<details>
|
||||
@@ -96,6 +162,7 @@ if __name__ == "__main__":
|
||||
|
||||
- 🖥️ **Managed Browser**: Use user-owned browsers with full control, avoiding bot detection.
|
||||
- 🔄 **Remote Browser Control**: Connect to Chrome Developer Tools Protocol for remote, large-scale data extraction.
|
||||
- 👤 **Browser Profiler**: Create and manage persistent profiles with saved authentication states, cookies, and settings.
|
||||
- 🔒 **Session Management**: Preserve browser states and reuse them for multi-step crawling.
|
||||
- 🧩 **Proxy Support**: Seamlessly connect to proxies with authentication for secure access.
|
||||
- ⚙️ **Full Browser Control**: Modify headers, cookies, user agents, and more for tailored crawling setups.
|
||||
@@ -112,7 +179,7 @@ if __name__ == "__main__":
|
||||
- 📸 **Screenshots**: Capture page screenshots during crawling for debugging or analysis.
|
||||
- 📂 **Raw Data Crawling**: Directly process raw HTML (`raw:`) or local files (`file://`).
|
||||
- 🔗 **Comprehensive Link Extraction**: Extracts internal, external links, and embedded iframe content.
|
||||
- 🛠️ **Customizable Hooks**: Define hooks at every step to customize crawling behavior.
|
||||
- 🛠️ **Customizable Hooks**: Define hooks at every step to customize crawling behavior (supports both string and function-based APIs).
|
||||
- 💾 **Caching**: Cache data for improved speed and to avoid redundant fetches.
|
||||
- 📄 **Metadata Extraction**: Retrieve structured metadata from web pages.
|
||||
- 📡 **IFrame Content Extraction**: Seamless extraction from embedded iframe content.
|
||||
@@ -124,10 +191,11 @@ if __name__ == "__main__":
|
||||
<details>
|
||||
<summary>🚀 <strong>Deployment</strong></summary>
|
||||
|
||||
- 🐳 **Dockerized Setup**: Optimized Docker image with API server for easy deployment.
|
||||
- 🐳 **Dockerized Setup**: Optimized Docker image with FastAPI server for easy deployment.
|
||||
- 🔑 **Secure Authentication**: Built-in JWT token authentication for API security.
|
||||
- 🔄 **API Gateway**: One-click deployment with secure token authentication for API-based workflows.
|
||||
- 🌐 **Scalable Architecture**: Designed for mass-scale production and optimized server performance.
|
||||
- ⚙️ **DigitalOcean Deployment**: Ready-to-deploy configurations for DigitalOcean and similar platforms.
|
||||
- ☁️ **Cloud Deployment**: Ready-to-deploy configurations for major cloud platforms.
|
||||
|
||||
</details>
|
||||
|
||||
@@ -148,7 +216,7 @@ if __name__ == "__main__":
|
||||
|
||||
✨ Play around with this [](https://colab.research.google.com/drive/1SgRPrByQLzjRfwoRNq1wSGE9nYY_EE8C?usp=sharing)
|
||||
|
||||
✨ Visit our [Documentation Website](https://crawl4ai.com/mkdocs/)
|
||||
✨ Visit our [Documentation Website](https://docs.crawl4ai.com/)
|
||||
|
||||
## Installation 🛠️
|
||||
|
||||
@@ -223,28 +291,27 @@ pip install -e ".[all]" # Install all optional features
|
||||
<details>
|
||||
<summary>🐳 <strong>Docker Deployment</strong></summary>
|
||||
|
||||
> 🚀 **Major Changes Coming!** We're developing a completely new Docker implementation that will make deployment even more efficient and seamless. The current Docker setup is being deprecated in favor of this new solution.
|
||||
> 🚀 **Now Available!** Our completely redesigned Docker implementation is here! This new solution makes deployment more efficient and seamless than ever.
|
||||
|
||||
### Current Docker Support
|
||||
### New Docker Features
|
||||
|
||||
The existing Docker implementation is being deprecated and will be replaced soon. If you still need to use Docker with the current version:
|
||||
The new Docker implementation includes:
|
||||
- **Browser pooling** with page pre-warming for faster response times
|
||||
- **Interactive playground** to test and generate request code
|
||||
- **MCP integration** for direct connection to AI tools like Claude Code
|
||||
- **Comprehensive API endpoints** including HTML extraction, screenshots, PDF generation, and JavaScript execution
|
||||
- **Multi-architecture support** with automatic detection (AMD64/ARM64)
|
||||
- **Optimized resources** with improved memory management
|
||||
|
||||
- 📚 [Deprecated Docker Setup](./docs/deprecated/docker-deployment.md) - Instructions for the current Docker implementation
|
||||
- ⚠️ Note: This setup will be replaced in the next major release
|
||||
### Getting Started
|
||||
|
||||
### What's Coming Next?
|
||||
```bash
|
||||
# Pull and run the latest release
|
||||
docker pull unclecode/crawl4ai:latest
|
||||
docker run -d -p 11235:11235 --name crawl4ai --shm-size=1g unclecode/crawl4ai:latest
|
||||
|
||||
Our new Docker implementation will bring:
|
||||
- Improved performance and resource efficiency
|
||||
- Streamlined deployment process
|
||||
- Better integration with Crawl4AI features
|
||||
- Enhanced scalability options
|
||||
|
||||
Stay connected with our [GitHub repository](https://github.com/unclecode/crawl4ai) for updates!
|
||||
|
||||
</details>
|
||||
|
||||
---
|
||||
# Visit the playground at http://localhost:11235/playground
|
||||
```
|
||||
|
||||
### Quick Test
|
||||
|
||||
@@ -256,22 +323,31 @@ import requests
|
||||
# Submit a crawl job
|
||||
response = requests.post(
|
||||
"http://localhost:11235/crawl",
|
||||
json={"urls": "https://example.com", "priority": 10}
|
||||
json={"urls": ["https://example.com"], "priority": 10}
|
||||
)
|
||||
task_id = response.json()["task_id"]
|
||||
|
||||
# Continue polling until the task is complete (status="completed")
|
||||
result = requests.get(f"http://localhost:11235/task/{task_id}")
|
||||
if response.status_code == 200:
|
||||
print("Crawl job submitted successfully.")
|
||||
|
||||
if "results" in response.json():
|
||||
results = response.json()["results"]
|
||||
print("Crawl job completed. Results:")
|
||||
for result in results:
|
||||
print(result)
|
||||
else:
|
||||
task_id = response.json()["task_id"]
|
||||
print(f"Crawl job submitted. Task ID:: {task_id}")
|
||||
result = requests.get(f"http://localhost:11235/task/{task_id}")
|
||||
```
|
||||
|
||||
For more examples, see our [Docker Examples](https://github.com/unclecode/crawl4ai/blob/main/docs/examples/docker_example.py). For advanced configuration, environment variables, and usage examples, see our [Docker Deployment Guide](https://crawl4ai.com/mkdocs/basic/docker-deployment/).
|
||||
For more examples, see our [Docker Examples](https://github.com/unclecode/crawl4ai/blob/main/docs/examples/docker_example.py). For advanced configuration, environment variables, and usage examples, see our [Docker Deployment Guide](https://docs.crawl4ai.com/basic/docker-deployment/).
|
||||
|
||||
</details>
|
||||
|
||||
---
|
||||
|
||||
## 🔬 Advanced Usage Examples 🔬
|
||||
|
||||
You can check the project structure in the directory [https://github.com/unclecode/crawl4ai/docs/examples](docs/examples). Over there, you can find a variety of examples; here, some popular examples are shared.
|
||||
You can check the project structure in the directory [docs/examples](https://github.com/unclecode/crawl4ai/tree/main/docs/examples). Over there, you can find a variety of examples; here, some popular examples are shared.
|
||||
|
||||
<details>
|
||||
<summary>📝 <strong>Heuristic Markdown Generation with Clean and Fit Markdown</strong></summary>
|
||||
@@ -299,12 +375,11 @@ async def main():
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
result = await crawler.arun(
|
||||
url="https://docs.micronaut.io/4.7.6/guide/",
|
||||
url="https://docs.micronaut.io/4.9.9/guide/",
|
||||
config=run_config
|
||||
)
|
||||
print(len(result.markdown))
|
||||
print(len(result.fit_markdown))
|
||||
print(len(result.markdown_v2.fit_markdown))
|
||||
print(len(result.markdown.raw_markdown))
|
||||
print(len(result.markdown.fit_markdown))
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
@@ -318,7 +393,7 @@ if __name__ == "__main__":
|
||||
```python
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
|
||||
from crawl4ai.extraction_strategy import JsonCssExtractionStrategy
|
||||
from crawl4ai import JsonCssExtractionStrategy
|
||||
import json
|
||||
|
||||
async def main():
|
||||
@@ -352,7 +427,7 @@ async def main():
|
||||
"type": "attribute",
|
||||
"attribute": "src"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
extraction_strategy = JsonCssExtractionStrategy(schema, verbose=True)
|
||||
@@ -391,8 +466,8 @@ if __name__ == "__main__":
|
||||
```python
|
||||
import os
|
||||
import asyncio
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
|
||||
from crawl4ai.extraction_strategy import LLMExtractionStrategy
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode, LLMConfig
|
||||
from crawl4ai import LLMExtractionStrategy
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
class OpenAIModelFee(BaseModel):
|
||||
@@ -407,7 +482,7 @@ async def main():
|
||||
extraction_strategy=LLMExtractionStrategy(
|
||||
# Here you can use any provider that Litellm library supports, for instance: ollama/qwen2
|
||||
# provider="ollama/qwen2", api_token="no-token",
|
||||
provider="openai/gpt-4o", api_token=os.getenv('OPENAI_API_KEY'),
|
||||
llm_config = LLMConfig(provider="openai/gpt-4o", api_token=os.getenv('OPENAI_API_KEY')),
|
||||
schema=OpenAIModelFee.schema(),
|
||||
extraction_type="schema",
|
||||
instruction="""From the crawled content, extract all mentioned model names along with their fees for input and output tokens.
|
||||
@@ -431,7 +506,7 @@ if __name__ == "__main__":
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>🤖 <strong>Using You own Browswer with Custome User Profile</strong></summary>
|
||||
<summary>🤖 <strong>Using Your own Browser with Custom User Profile</strong></summary>
|
||||
|
||||
```python
|
||||
import os, sys
|
||||
@@ -469,24 +544,288 @@ async def test_news_crawl():
|
||||
|
||||
</details>
|
||||
|
||||
## ✨ Recent Updates
|
||||
|
||||
## ✨ Recent Updates
|
||||
<details>
|
||||
<summary><strong>Version 0.7.5 Release Highlights - The Docker Hooks & Security Update</strong></summary>
|
||||
|
||||
- 🔒 **Enhanced SSL & Security**: New SSL certificate handling with custom paths and validation options for secure crawling
|
||||
- 🔍 **Smart Content Filtering**: Advanced filtering system with regex support and efficient chunking strategies
|
||||
- 📦 **Improved JSON Extraction**: Support for complex JSONPath, JSON-CSS, and Microdata extraction
|
||||
- 🏗️ **New Field Types**: Added `computed`, `conditional`, `aggregate`, and `template` field types
|
||||
- ⚡ **Performance Boost**: Optimized caching, parallel processing, and memory management
|
||||
- 🐛 **Better Error Handling**: Enhanced debugging capabilities with detailed error tracking
|
||||
- 🔐 **Security Features**: Improved input validation and safe expression evaluation
|
||||
- **🔧 Docker Hooks System**: Complete pipeline customization with user-provided Python functions at 8 key points
|
||||
- **✨ Function-Based Hooks API (NEW)**: Write hooks as regular Python functions with full IDE support:
|
||||
```python
|
||||
from crawl4ai import hooks_to_string
|
||||
from crawl4ai.docker_client import Crawl4aiDockerClient
|
||||
|
||||
Read the full details of this release in our [0.4.24 Release Notes](https://github.com/unclecode/crawl4ai/blob/main/CHANGELOG.md).
|
||||
# Define hooks as regular Python functions
|
||||
async def on_page_context_created(page, context, **kwargs):
|
||||
"""Block images to speed up crawling"""
|
||||
await context.route("**/*.{png,jpg,jpeg,gif,webp}", lambda route: route.abort())
|
||||
await page.set_viewport_size({"width": 1920, "height": 1080})
|
||||
return page
|
||||
|
||||
async def before_goto(page, context, url, **kwargs):
|
||||
"""Add custom headers"""
|
||||
await page.set_extra_http_headers({'X-Crawl4AI': 'v0.7.5'})
|
||||
return page
|
||||
|
||||
# Option 1: Use hooks_to_string() utility for REST API
|
||||
hooks_code = hooks_to_string({
|
||||
"on_page_context_created": on_page_context_created,
|
||||
"before_goto": before_goto
|
||||
})
|
||||
|
||||
# Option 2: Docker client with automatic conversion (Recommended)
|
||||
client = Crawl4aiDockerClient(base_url="http://localhost:11235")
|
||||
results = await client.crawl(
|
||||
urls=["https://httpbin.org/html"],
|
||||
hooks={
|
||||
"on_page_context_created": on_page_context_created,
|
||||
"before_goto": before_goto
|
||||
}
|
||||
)
|
||||
# ✓ Full IDE support, type checking, and reusability!
|
||||
```
|
||||
|
||||
- **🤖 Enhanced LLM Integration**: Custom providers with temperature control and base_url configuration
|
||||
- **🔒 HTTPS Preservation**: Secure internal link handling with `preserve_https_for_internal_links=True`
|
||||
- **🐍 Python 3.10+ Support**: Modern language features and enhanced performance
|
||||
- **🛠️ Bug Fixes**: Resolved multiple community-reported issues including URL processing, JWT authentication, and proxy configuration
|
||||
|
||||
[Full v0.7.5 Release Notes →](https://github.com/unclecode/crawl4ai/blob/main/docs/blog/release-v0.7.5.md)
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary><strong>Version 0.7.4 Release Highlights - The Intelligent Table Extraction & Performance Update</strong></summary>
|
||||
|
||||
- **🚀 LLMTableExtraction**: Revolutionary table extraction with intelligent chunking for massive tables:
|
||||
```python
|
||||
from crawl4ai import LLMTableExtraction, LLMConfig
|
||||
|
||||
# Configure intelligent table extraction
|
||||
table_strategy = LLMTableExtraction(
|
||||
llm_config=LLMConfig(provider="openai/gpt-4.1-mini"),
|
||||
enable_chunking=True, # Handle massive tables
|
||||
chunk_token_threshold=5000, # Smart chunking threshold
|
||||
overlap_threshold=100, # Maintain context between chunks
|
||||
extraction_type="structured" # Get structured data output
|
||||
)
|
||||
|
||||
config = CrawlerRunConfig(table_extraction_strategy=table_strategy)
|
||||
result = await crawler.arun("https://complex-tables-site.com", config=config)
|
||||
|
||||
# Tables are automatically chunked, processed, and merged
|
||||
for table in result.tables:
|
||||
print(f"Extracted table: {len(table['data'])} rows")
|
||||
```
|
||||
|
||||
- **⚡ Dispatcher Bug Fix**: Fixed sequential processing bottleneck in arun_many for fast-completing tasks
|
||||
- **🧹 Memory Management Refactor**: Consolidated memory utilities into main utils module for cleaner architecture
|
||||
- **🔧 Browser Manager Fixes**: Resolved race conditions in concurrent page creation with thread-safe locking
|
||||
- **🔗 Advanced URL Processing**: Better handling of raw:// URLs and base tag link resolution
|
||||
- **🛡️ Enhanced Proxy Support**: Flexible proxy configuration supporting both dict and string formats
|
||||
|
||||
[Full v0.7.4 Release Notes →](https://github.com/unclecode/crawl4ai/blob/main/docs/blog/release-v0.7.4.md)
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary><strong>Version 0.7.3 Release Highlights - The Multi-Config Intelligence Update</strong></summary>
|
||||
|
||||
- **🕵️ Undetected Browser Support**: Bypass sophisticated bot detection systems:
|
||||
```python
|
||||
from crawl4ai import AsyncWebCrawler, BrowserConfig
|
||||
|
||||
browser_config = BrowserConfig(
|
||||
browser_type="undetected", # Use undetected Chrome
|
||||
headless=True, # Can run headless with stealth
|
||||
extra_args=[
|
||||
"--disable-blink-features=AutomationControlled",
|
||||
"--disable-web-security"
|
||||
]
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
result = await crawler.arun("https://protected-site.com")
|
||||
# Successfully bypass Cloudflare, Akamai, and custom bot detection
|
||||
```
|
||||
|
||||
- **🎨 Multi-URL Configuration**: Different strategies for different URL patterns in one batch:
|
||||
```python
|
||||
from crawl4ai import CrawlerRunConfig, MatchMode
|
||||
|
||||
configs = [
|
||||
# Documentation sites - aggressive caching
|
||||
CrawlerRunConfig(
|
||||
url_matcher=["*docs*", "*documentation*"],
|
||||
cache_mode="write",
|
||||
markdown_generator_options={"include_links": True}
|
||||
),
|
||||
|
||||
# News/blog sites - fresh content
|
||||
CrawlerRunConfig(
|
||||
url_matcher=lambda url: 'blog' in url or 'news' in url,
|
||||
cache_mode="bypass"
|
||||
),
|
||||
|
||||
# Fallback for everything else
|
||||
CrawlerRunConfig()
|
||||
]
|
||||
|
||||
results = await crawler.arun_many(urls, config=configs)
|
||||
# Each URL gets the perfect configuration automatically
|
||||
```
|
||||
|
||||
- **🧠 Memory Monitoring**: Track and optimize memory usage during crawling:
|
||||
```python
|
||||
from crawl4ai.memory_utils import MemoryMonitor
|
||||
|
||||
monitor = MemoryMonitor()
|
||||
monitor.start_monitoring()
|
||||
|
||||
results = await crawler.arun_many(large_url_list)
|
||||
|
||||
report = monitor.get_report()
|
||||
print(f"Peak memory: {report['peak_mb']:.1f} MB")
|
||||
print(f"Efficiency: {report['efficiency']:.1f}%")
|
||||
# Get optimization recommendations
|
||||
```
|
||||
|
||||
- **📊 Enhanced Table Extraction**: Direct DataFrame conversion from web tables:
|
||||
```python
|
||||
result = await crawler.arun("https://site-with-tables.com")
|
||||
|
||||
# New way - direct table access
|
||||
if result.tables:
|
||||
import pandas as pd
|
||||
for table in result.tables:
|
||||
df = pd.DataFrame(table['data'])
|
||||
print(f"Table: {df.shape[0]} rows × {df.shape[1]} columns")
|
||||
```
|
||||
|
||||
- **💰 GitHub Sponsors**: 4-tier sponsorship system for project sustainability
|
||||
- **🐳 Docker LLM Flexibility**: Configure providers via environment variables
|
||||
|
||||
[Full v0.7.3 Release Notes →](https://github.com/unclecode/crawl4ai/blob/main/docs/blog/release-v0.7.3.md)
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary><strong>Version 0.7.0 Release Highlights - The Adaptive Intelligence Update</strong></summary>
|
||||
|
||||
- **🧠 Adaptive Crawling**: Your crawler now learns and adapts to website patterns automatically:
|
||||
```python
|
||||
config = AdaptiveConfig(
|
||||
confidence_threshold=0.7, # Min confidence to stop crawling
|
||||
max_depth=5, # Maximum crawl depth
|
||||
max_pages=20, # Maximum number of pages to crawl
|
||||
strategy="statistical"
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
adaptive_crawler = AdaptiveCrawler(crawler, config)
|
||||
state = await adaptive_crawler.digest(
|
||||
start_url="https://news.example.com",
|
||||
query="latest news content"
|
||||
)
|
||||
# Crawler learns patterns and improves extraction over time
|
||||
```
|
||||
|
||||
- **🌊 Virtual Scroll Support**: Complete content extraction from infinite scroll pages:
|
||||
```python
|
||||
scroll_config = VirtualScrollConfig(
|
||||
container_selector="[data-testid='feed']",
|
||||
scroll_count=20,
|
||||
scroll_by="container_height",
|
||||
wait_after_scroll=1.0
|
||||
)
|
||||
|
||||
result = await crawler.arun(url, config=CrawlerRunConfig(
|
||||
virtual_scroll_config=scroll_config
|
||||
))
|
||||
```
|
||||
|
||||
- **🔗 Intelligent Link Analysis**: 3-layer scoring system for smart link prioritization:
|
||||
```python
|
||||
link_config = LinkPreviewConfig(
|
||||
query="machine learning tutorials",
|
||||
score_threshold=0.3,
|
||||
concurrent_requests=10
|
||||
)
|
||||
|
||||
result = await crawler.arun(url, config=CrawlerRunConfig(
|
||||
link_preview_config=link_config,
|
||||
score_links=True
|
||||
))
|
||||
# Links ranked by relevance and quality
|
||||
```
|
||||
|
||||
- **🎣 Async URL Seeder**: Discover thousands of URLs in seconds:
|
||||
```python
|
||||
seeder = AsyncUrlSeeder(SeedingConfig(
|
||||
source="sitemap+cc",
|
||||
pattern="*/blog/*",
|
||||
query="python tutorials",
|
||||
score_threshold=0.4
|
||||
))
|
||||
|
||||
urls = await seeder.discover("https://example.com")
|
||||
```
|
||||
|
||||
- **⚡ Performance Boost**: Up to 3x faster with optimized resource handling and memory efficiency
|
||||
|
||||
Read the full details in our [0.7.0 Release Notes](https://docs.crawl4ai.com/blog/release-v0.7.0) or check the [CHANGELOG](https://github.com/unclecode/crawl4ai/blob/main/CHANGELOG.md).
|
||||
|
||||
</details>
|
||||
|
||||
## Version Numbering in Crawl4AI
|
||||
|
||||
Crawl4AI follows standard Python version numbering conventions (PEP 440) to help users understand the stability and features of each release.
|
||||
|
||||
<details>
|
||||
<summary>📈 <strong>Version Numbers Explained</strong></summary>
|
||||
|
||||
Our version numbers follow this pattern: `MAJOR.MINOR.PATCH` (e.g., 0.4.3)
|
||||
|
||||
#### Pre-release Versions
|
||||
We use different suffixes to indicate development stages:
|
||||
|
||||
- `dev` (0.4.3dev1): Development versions, unstable
|
||||
- `a` (0.4.3a1): Alpha releases, experimental features
|
||||
- `b` (0.4.3b1): Beta releases, feature complete but needs testing
|
||||
- `rc` (0.4.3): Release candidates, potential final version
|
||||
|
||||
#### Installation
|
||||
- Regular installation (stable version):
|
||||
```bash
|
||||
pip install -U crawl4ai
|
||||
```
|
||||
|
||||
- Install pre-release versions:
|
||||
```bash
|
||||
pip install crawl4ai --pre
|
||||
```
|
||||
|
||||
- Install specific version:
|
||||
```bash
|
||||
pip install crawl4ai==0.4.3b1
|
||||
```
|
||||
|
||||
#### Why Pre-releases?
|
||||
We use pre-releases to:
|
||||
- Test new features in real-world scenarios
|
||||
- Gather feedback before final releases
|
||||
- Ensure stability for production users
|
||||
- Allow early adopters to try new features
|
||||
|
||||
For production environments, we recommend using the stable version. For testing new features, you can opt-in to pre-releases using the `--pre` flag.
|
||||
|
||||
</details>
|
||||
|
||||
## 📖 Documentation & Roadmap
|
||||
|
||||
> 🚨 **Documentation Update Alert**: We're undertaking a major documentation overhaul next week to reflect recent updates and improvements. Stay tuned for a more comprehensive and up-to-date guide!
|
||||
|
||||
For current documentation, including installation instructions, advanced features, and API reference, visit our [Documentation Website](https://crawl4ai.com/mkdocs/).
|
||||
For current documentation, including installation instructions, advanced features, and API reference, visit our [Documentation Website](https://docs.crawl4ai.com/).
|
||||
|
||||
To check our development plans and upcoming features, visit our [Roadmap](https://github.com/unclecode/crawl4ai/blob/main/ROADMAP.md).
|
||||
|
||||
@@ -494,27 +833,106 @@ To check our development plans and upcoming features, visit our [Roadmap](https:
|
||||
<summary>📈 <strong>Development TODOs</strong></summary>
|
||||
|
||||
- [x] 0. Graph Crawler: Smart website traversal using graph search algorithms for comprehensive nested page extraction
|
||||
- [ ] 1. Question-Based Crawler: Natural language driven web discovery and content extraction
|
||||
- [ ] 2. Knowledge-Optimal Crawler: Smart crawling that maximizes knowledge while minimizing data extraction
|
||||
- [ ] 3. Agentic Crawler: Autonomous system for complex multi-step crawling operations
|
||||
- [ ] 4. Automated Schema Generator: Convert natural language to extraction schemas
|
||||
- [ ] 5. Domain-Specific Scrapers: Pre-configured extractors for common platforms (academic, e-commerce)
|
||||
- [ ] 6. Web Embedding Index: Semantic search infrastructure for crawled content
|
||||
- [ ] 7. Interactive Playground: Web UI for testing, comparing strategies with AI assistance
|
||||
- [ ] 8. Performance Monitor: Real-time insights into crawler operations
|
||||
- [x] 1. Question-Based Crawler: Natural language driven web discovery and content extraction
|
||||
- [x] 2. Knowledge-Optimal Crawler: Smart crawling that maximizes knowledge while minimizing data extraction
|
||||
- [x] 3. Agentic Crawler: Autonomous system for complex multi-step crawling operations
|
||||
- [x] 4. Automated Schema Generator: Convert natural language to extraction schemas
|
||||
- [x] 5. Domain-Specific Scrapers: Pre-configured extractors for common platforms (academic, e-commerce)
|
||||
- [x] 6. Web Embedding Index: Semantic search infrastructure for crawled content
|
||||
- [x] 7. Interactive Playground: Web UI for testing, comparing strategies with AI assistance
|
||||
- [x] 8. Performance Monitor: Real-time insights into crawler operations
|
||||
- [ ] 9. Cloud Integration: One-click deployment solutions across cloud providers
|
||||
- [ ] 10. Sponsorship Program: Structured support system with tiered benefits
|
||||
- [x] 10. Sponsorship Program: Structured support system with tiered benefits
|
||||
- [ ] 11. Educational Content: "How to Crawl" video series and interactive tutorials
|
||||
|
||||
</details>
|
||||
|
||||
## 🤝 Contributing
|
||||
|
||||
We welcome contributions from the open-source community. Check out our [contribution guidelines](https://github.com/unclecode/crawl4ai/blob/main/CONTRIBUTING.md) for more information.
|
||||
We welcome contributions from the open-source community. Check out our [contribution guidelines](https://github.com/unclecode/crawl4ai/blob/main/CONTRIBUTORS.md) for more information.
|
||||
|
||||
## 📄 License
|
||||
I'll help modify the license section with badges. For the halftone effect, here's a version with it:
|
||||
|
||||
Crawl4AI is released under the [Apache 2.0 License](https://github.com/unclecode/crawl4ai/blob/main/LICENSE).
|
||||
Here's the updated license section:
|
||||
|
||||
## 📄 License & Attribution
|
||||
|
||||
This project is licensed under the Apache License 2.0, attribution is recommended via the badges below. See the [Apache 2.0 License](https://github.com/unclecode/crawl4ai/blob/main/LICENSE) file for details.
|
||||
|
||||
### Attribution Requirements
|
||||
When using Crawl4AI, you must include one of the following attribution methods:
|
||||
|
||||
<details>
|
||||
<summary>📈 <strong>1. Badge Attribution (Recommended)</strong></summary>
|
||||
Add one of these badges to your README, documentation, or website:
|
||||
|
||||
| Theme | Badge |
|
||||
|-------|-------|
|
||||
| **Disco Theme (Animated)** | <a href="https://github.com/unclecode/crawl4ai"><img src="./docs/assets/powered-by-disco.svg" alt="Powered by Crawl4AI" width="200"/></a> |
|
||||
| **Night Theme (Dark with Neon)** | <a href="https://github.com/unclecode/crawl4ai"><img src="./docs/assets/powered-by-night.svg" alt="Powered by Crawl4AI" width="200"/></a> |
|
||||
| **Dark Theme (Classic)** | <a href="https://github.com/unclecode/crawl4ai"><img src="./docs/assets/powered-by-dark.svg" alt="Powered by Crawl4AI" width="200"/></a> |
|
||||
| **Light Theme (Classic)** | <a href="https://github.com/unclecode/crawl4ai"><img src="./docs/assets/powered-by-light.svg" alt="Powered by Crawl4AI" width="200"/></a> |
|
||||
|
||||
|
||||
HTML code for adding the badges:
|
||||
```html
|
||||
<!-- Disco Theme (Animated) -->
|
||||
<a href="https://github.com/unclecode/crawl4ai">
|
||||
<img src="https://raw.githubusercontent.com/unclecode/crawl4ai/main/docs/assets/powered-by-disco.svg" alt="Powered by Crawl4AI" width="200"/>
|
||||
</a>
|
||||
|
||||
<!-- Night Theme (Dark with Neon) -->
|
||||
<a href="https://github.com/unclecode/crawl4ai">
|
||||
<img src="https://raw.githubusercontent.com/unclecode/crawl4ai/main/docs/assets/powered-by-night.svg" alt="Powered by Crawl4AI" width="200"/>
|
||||
</a>
|
||||
|
||||
<!-- Dark Theme (Classic) -->
|
||||
<a href="https://github.com/unclecode/crawl4ai">
|
||||
<img src="https://raw.githubusercontent.com/unclecode/crawl4ai/main/docs/assets/powered-by-dark.svg" alt="Powered by Crawl4AI" width="200"/>
|
||||
</a>
|
||||
|
||||
<!-- Light Theme (Classic) -->
|
||||
<a href="https://github.com/unclecode/crawl4ai">
|
||||
<img src="https://raw.githubusercontent.com/unclecode/crawl4ai/main/docs/assets/powered-by-light.svg" alt="Powered by Crawl4AI" width="200"/>
|
||||
</a>
|
||||
|
||||
<!-- Simple Shield Badge -->
|
||||
<a href="https://github.com/unclecode/crawl4ai">
|
||||
<img src="https://img.shields.io/badge/Powered%20by-Crawl4AI-blue?style=flat-square" alt="Powered by Crawl4AI"/>
|
||||
</a>
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>📖 <strong>2. Text Attribution</strong></summary>
|
||||
Add this line to your documentation:
|
||||
```
|
||||
This project uses Crawl4AI (https://github.com/unclecode/crawl4ai) for web data extraction.
|
||||
```
|
||||
</details>
|
||||
|
||||
## 📚 Citation
|
||||
|
||||
If you use Crawl4AI in your research or project, please cite:
|
||||
|
||||
```bibtex
|
||||
@software{crawl4ai2024,
|
||||
author = {UncleCode},
|
||||
title = {Crawl4AI: Open-source LLM Friendly Web Crawler & Scraper},
|
||||
year = {2024},
|
||||
publisher = {GitHub},
|
||||
journal = {GitHub Repository},
|
||||
howpublished = {\url{https://github.com/unclecode/crawl4ai}},
|
||||
commit = {Please use the commit hash you're working with}
|
||||
}
|
||||
```
|
||||
|
||||
Text citation format:
|
||||
```
|
||||
UncleCode. (2024). Crawl4AI: Open-source LLM Friendly Web Crawler & Scraper [Computer software].
|
||||
GitHub. https://github.com/unclecode/crawl4ai
|
||||
```
|
||||
|
||||
## 📧 Contact
|
||||
|
||||
@@ -551,6 +969,36 @@ We envision a future where AI is powered by real human knowledge, ensuring data
|
||||
For more details, see our [full mission statement](./MISSION.md).
|
||||
</details>
|
||||
|
||||
## 🌟 Current Sponsors
|
||||
|
||||
### 🏢 Enterprise Sponsors & Partners
|
||||
|
||||
Our enterprise sponsors and technology partners help scale Crawl4AI to power production-grade data pipelines.
|
||||
|
||||
| Company | About | Sponsorship Tier |
|
||||
|------|------|----------------------------|
|
||||
| <a href="https://dashboard.capsolver.com/passport/register?inviteCode=ESVSECTX5Q23" target="_blank"><picture><source width="120" media="(prefers-color-scheme: dark)" srcset="https://docs.crawl4ai.com/uploads/sponsors/20251013045338_72a71fa4ee4d2f40.png"><source width="120" media="(prefers-color-scheme: light)" srcset="https://www.capsolver.com/assets/images/logo-text.png"><img alt="Capsolver" src="https://www.capsolver.com/assets/images/logo-text.png"></picture></a> | AI-powered Captcha solving service. Supports all major Captcha types, including reCAPTCHA, Cloudflare, and more | 🥈 Silver |
|
||||
| <a href="https://kipo.ai" target="_blank"><img src="https://docs.crawl4ai.com/uploads/sponsors/20251013045751_2d54f57f117c651e.png" alt="DataSync" width="120"/></a> | Helps engineers and buyers find, compare, and source electronic & industrial parts in seconds, with specs, pricing, lead times & alternatives.| 🥇 Gold |
|
||||
| <a href="https://www.kidocode.com/" target="_blank"><img src="https://docs.crawl4ai.com/uploads/sponsors/20251013045045_bb8dace3f0440d65.svg" alt="Kidocode" width="120"/><p align="center">KidoCode</p></a> | Kidocode is a hybrid technology and entrepreneurship school for kids aged 5–18, offering both online and on-campus education. | 🥇 Gold |
|
||||
| <a href="https://www.alephnull.sg/" target="_blank"><img src="https://docs.crawl4ai.com/uploads/sponsors/20251013050323_a9e8e8c4c3650421.svg" alt="Aleph null" width="120"/></a> | Singapore-based Aleph Null is Asia’s leading edtech hub, dedicated to student-centric, AI-driven education—empowering learners with the tools to thrive in a fast-changing world. | 🥇 Gold |
|
||||
|
||||
### 🧑🤝 Individual Sponsors
|
||||
|
||||
A heartfelt thanks to our individual supporters! Every contribution helps us keep our opensource mission alive and thriving!
|
||||
|
||||
<p align="left">
|
||||
<a href="https://github.com/hafezparast"><img src="https://avatars.githubusercontent.com/u/14273305?s=60&v=4" style="border-radius:50%;" width="64px;"/></a>
|
||||
<a href="https://github.com/ntohidi"><img src="https://avatars.githubusercontent.com/u/17140097?s=60&v=4" style="border-radius:50%;"width="64px;"/></a>
|
||||
<a href="https://github.com/Sjoeborg"><img src="https://avatars.githubusercontent.com/u/17451310?s=60&v=4" style="border-radius:50%;"width="64px;"/></a>
|
||||
<a href="https://github.com/romek-rozen"><img src="https://avatars.githubusercontent.com/u/30595969?s=60&v=4" style="border-radius:50%;"width="64px;"/></a>
|
||||
<a href="https://github.com/Kourosh-Kiyani"><img src="https://avatars.githubusercontent.com/u/34105600?s=60&v=4" style="border-radius:50%;"width="64px;"/></a>
|
||||
<a href="https://github.com/Etherdrake"><img src="https://avatars.githubusercontent.com/u/67021215?s=60&v=4" style="border-radius:50%;"width="64px;"/></a>
|
||||
<a href="https://github.com/shaman247"><img src="https://avatars.githubusercontent.com/u/211010067?s=60&v=4" style="border-radius:50%;"width="64px;"/></a>
|
||||
<a href="https://github.com/work-flow-manager"><img src="https://avatars.githubusercontent.com/u/217665461?s=60&v=4" style="border-radius:50%;"width="64px;"/></a>
|
||||
</p>
|
||||
|
||||
> Want to join them? [Sponsor Crawl4AI →](https://github.com/sponsors/unclecode)
|
||||
|
||||
## Star History
|
||||
|
||||
[](https://star-history.com/#unclecode/crawl4ai&Date)
|
||||
|
||||
65
SPONSORS.md
Normal file
65
SPONSORS.md
Normal file
@@ -0,0 +1,65 @@
|
||||
# 💖 Sponsors & Supporters
|
||||
|
||||
Thank you to everyone supporting Crawl4AI! Your sponsorship helps keep this project open-source and actively maintained.
|
||||
|
||||
## 👑 Founding Sponsors
|
||||
*The first 50 sponsors who believed in our vision - permanently recognized*
|
||||
|
||||
<!-- Founding sponsors will be listed here with special recognition -->
|
||||
🎉 **Become a Founding Sponsor!** Only [X/50] spots remaining! [Join now →](https://github.com/sponsors/unclecode)
|
||||
|
||||
---
|
||||
|
||||
## 🏢 Data Infrastructure Partners ($2000/month)
|
||||
*These organizations are building their data sovereignty with Crawl4AI at the core*
|
||||
|
||||
<!-- Data Infrastructure Partners will be listed here -->
|
||||
*Be the first Data Infrastructure Partner! [Join us →](https://github.com/sponsors/unclecode)*
|
||||
|
||||
---
|
||||
|
||||
## 💼 Growing Teams ($500/month)
|
||||
*Teams scaling their data extraction with Crawl4AI*
|
||||
|
||||
<!-- Growing Teams will be listed here -->
|
||||
*Your team could be here! [Become a sponsor →](https://github.com/sponsors/unclecode)*
|
||||
|
||||
---
|
||||
|
||||
## 🚀 Builders ($50/month)
|
||||
*Developers and entrepreneurs building with Crawl4AI*
|
||||
|
||||
<!-- Builders will be listed here -->
|
||||
*Join the builders! [Start sponsoring →](https://github.com/sponsors/unclecode)*
|
||||
|
||||
---
|
||||
|
||||
## 🌱 Believers ($5/month)
|
||||
*The community supporting data democratization*
|
||||
|
||||
<!-- Believers will be listed here -->
|
||||
*Thank you to all our community believers!*
|
||||
|
||||
---
|
||||
|
||||
## 🤝 Want to Sponsor?
|
||||
|
||||
Crawl4AI is the #1 trending open-source web crawler. We're building the future of data extraction - where organizations own their data pipelines instead of relying on rate-limited APIs.
|
||||
|
||||
### Available Sponsorship Tiers:
|
||||
- **🌱 Believer** ($5/mo) - Support the movement
|
||||
- **🚀 Builder** ($50/mo) - Priority support & early access
|
||||
- **💼 Growing Team** ($500/mo) - Bi-weekly syncs & optimization
|
||||
- **🏢 Data Infrastructure Partner** ($2000/mo) - Full partnership & dedicated support
|
||||
|
||||
[View all tiers and benefits →](https://github.com/sponsors/unclecode)
|
||||
|
||||
### Enterprise & Custom Partnerships
|
||||
|
||||
Building data extraction at scale? Need dedicated support or infrastructure? Let's talk about a custom partnership.
|
||||
|
||||
📧 Contact: [hello@crawl4ai.com](mailto:hello@crawl4ai.com) | 📅 [Schedule a call](https://calendar.app.google/rEpvi2UBgUQjWHfJ9)
|
||||
|
||||
---
|
||||
|
||||
*This list is updated regularly. Sponsors at $50+ tiers can submit their logos via [hello@crawl4ai.com](mailto:hello@crawl4ai.com)*
|
||||
24
cliff.toml
Normal file
24
cliff.toml
Normal file
@@ -0,0 +1,24 @@
|
||||
[changelog]
|
||||
# Template format
|
||||
header = """
|
||||
# Changelog\n
|
||||
All notable changes to this project will be documented in this file.\n
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).\n
|
||||
"""
|
||||
|
||||
# Organize commits by type
|
||||
[git]
|
||||
conventional_commits = true
|
||||
filter_unconventional = true
|
||||
commit_parsers = [
|
||||
{ message = "^feat", group = "Added"},
|
||||
{ message = "^fix", group = "Fixed"},
|
||||
{ message = "^doc", group = "Documentation"},
|
||||
{ message = "^perf", group = "Performance"},
|
||||
{ message = "^refactor", group = "Changed"},
|
||||
{ message = "^style", group = "Changed"},
|
||||
{ message = "^test", group = "Testing"},
|
||||
{ message = "^chore\\(release\\): prepare for", skip = true},
|
||||
{ message = "^chore", group = "Miscellaneous Tasks"},
|
||||
]
|
||||
@@ -1,46 +1,228 @@
|
||||
# __init__.py
|
||||
import warnings
|
||||
|
||||
from .async_webcrawler import AsyncWebCrawler, CacheMode
|
||||
from .async_configs import BrowserConfig, CrawlerRunConfig
|
||||
from .extraction_strategy import ExtractionStrategy, LLMExtractionStrategy, CosineStrategy, JsonCssExtractionStrategy
|
||||
# MODIFIED: Add SeedingConfig and VirtualScrollConfig here
|
||||
from .async_configs import BrowserConfig, CrawlerRunConfig, HTTPCrawlerConfig, LLMConfig, ProxyConfig, GeolocationConfig, SeedingConfig, VirtualScrollConfig, LinkPreviewConfig, MatchMode
|
||||
|
||||
from .content_scraping_strategy import (
|
||||
ContentScrapingStrategy,
|
||||
LXMLWebScrapingStrategy,
|
||||
WebScrapingStrategy, # Backward compatibility alias
|
||||
)
|
||||
from .async_logger import (
|
||||
AsyncLoggerBase,
|
||||
AsyncLogger,
|
||||
)
|
||||
from .proxy_strategy import (
|
||||
ProxyRotationStrategy,
|
||||
RoundRobinProxyStrategy,
|
||||
)
|
||||
from .extraction_strategy import (
|
||||
ExtractionStrategy,
|
||||
LLMExtractionStrategy,
|
||||
CosineStrategy,
|
||||
JsonCssExtractionStrategy,
|
||||
JsonXPathExtractionStrategy,
|
||||
JsonLxmlExtractionStrategy,
|
||||
RegexExtractionStrategy
|
||||
)
|
||||
from .chunking_strategy import ChunkingStrategy, RegexChunking
|
||||
from .markdown_generation_strategy import DefaultMarkdownGenerator
|
||||
from .content_filter_strategy import PruningContentFilter, BM25ContentFilter
|
||||
from .models import CrawlResult
|
||||
from .__version__ import __version__
|
||||
from .table_extraction import (
|
||||
TableExtractionStrategy,
|
||||
DefaultTableExtraction,
|
||||
NoTableExtraction,
|
||||
LLMTableExtraction,
|
||||
)
|
||||
from .content_filter_strategy import (
|
||||
PruningContentFilter,
|
||||
BM25ContentFilter,
|
||||
LLMContentFilter,
|
||||
RelevantContentFilter,
|
||||
)
|
||||
from .models import CrawlResult, MarkdownGenerationResult, DisplayMode
|
||||
from .components.crawler_monitor import CrawlerMonitor
|
||||
from .link_preview import LinkPreview
|
||||
from .async_dispatcher import (
|
||||
MemoryAdaptiveDispatcher,
|
||||
SemaphoreDispatcher,
|
||||
RateLimiter,
|
||||
BaseDispatcher,
|
||||
)
|
||||
from .docker_client import Crawl4aiDockerClient
|
||||
from .hub import CrawlerHub
|
||||
from .browser_profiler import BrowserProfiler
|
||||
from .deep_crawling import (
|
||||
DeepCrawlStrategy,
|
||||
BFSDeepCrawlStrategy,
|
||||
FilterChain,
|
||||
URLPatternFilter,
|
||||
DomainFilter,
|
||||
ContentTypeFilter,
|
||||
URLFilter,
|
||||
FilterStats,
|
||||
SEOFilter,
|
||||
KeywordRelevanceScorer,
|
||||
URLScorer,
|
||||
CompositeScorer,
|
||||
DomainAuthorityScorer,
|
||||
FreshnessScorer,
|
||||
PathDepthScorer,
|
||||
BestFirstCrawlingStrategy,
|
||||
DFSDeepCrawlStrategy,
|
||||
DeepCrawlDecorator,
|
||||
)
|
||||
# NEW: Import AsyncUrlSeeder
|
||||
from .async_url_seeder import AsyncUrlSeeder
|
||||
# Adaptive Crawler
|
||||
from .adaptive_crawler import (
|
||||
AdaptiveCrawler,
|
||||
AdaptiveConfig,
|
||||
CrawlState,
|
||||
CrawlStrategy,
|
||||
StatisticalStrategy
|
||||
)
|
||||
|
||||
# C4A Script Language Support
|
||||
from .script import (
|
||||
compile as c4a_compile,
|
||||
validate as c4a_validate,
|
||||
compile_file as c4a_compile_file,
|
||||
CompilationResult,
|
||||
ValidationResult,
|
||||
ErrorDetail
|
||||
)
|
||||
|
||||
# Browser Adapters
|
||||
from .browser_adapter import (
|
||||
BrowserAdapter,
|
||||
PlaywrightAdapter,
|
||||
UndetectedAdapter
|
||||
)
|
||||
|
||||
from .utils import (
|
||||
start_colab_display_server,
|
||||
setup_colab_environment,
|
||||
hooks_to_string
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"AsyncLoggerBase",
|
||||
"AsyncLogger",
|
||||
"AsyncWebCrawler",
|
||||
"BrowserProfiler",
|
||||
"LLMConfig",
|
||||
"GeolocationConfig",
|
||||
# NEW: Add SeedingConfig and VirtualScrollConfig
|
||||
"SeedingConfig",
|
||||
"VirtualScrollConfig",
|
||||
# NEW: Add AsyncUrlSeeder
|
||||
"AsyncUrlSeeder",
|
||||
# Adaptive Crawler
|
||||
"AdaptiveCrawler",
|
||||
"AdaptiveConfig",
|
||||
"CrawlState",
|
||||
"CrawlStrategy",
|
||||
"StatisticalStrategy",
|
||||
"DeepCrawlStrategy",
|
||||
"BFSDeepCrawlStrategy",
|
||||
"BestFirstCrawlingStrategy",
|
||||
"DFSDeepCrawlStrategy",
|
||||
"FilterChain",
|
||||
"URLPatternFilter",
|
||||
"ContentTypeFilter",
|
||||
"DomainFilter",
|
||||
"FilterStats",
|
||||
"URLFilter",
|
||||
"SEOFilter",
|
||||
"KeywordRelevanceScorer",
|
||||
"URLScorer",
|
||||
"CompositeScorer",
|
||||
"DomainAuthorityScorer",
|
||||
"FreshnessScorer",
|
||||
"PathDepthScorer",
|
||||
"DeepCrawlDecorator",
|
||||
"CrawlResult",
|
||||
"CrawlerHub",
|
||||
"CacheMode",
|
||||
'BrowserConfig',
|
||||
'CrawlerRunConfig',
|
||||
'ExtractionStrategy',
|
||||
'LLMExtractionStrategy',
|
||||
'CosineStrategy',
|
||||
'JsonCssExtractionStrategy',
|
||||
'ChunkingStrategy',
|
||||
'RegexChunking',
|
||||
'DefaultMarkdownGenerator',
|
||||
'PruningContentFilter',
|
||||
'BM25ContentFilter',
|
||||
"MatchMode",
|
||||
"ContentScrapingStrategy",
|
||||
"WebScrapingStrategy",
|
||||
"LXMLWebScrapingStrategy",
|
||||
"BrowserConfig",
|
||||
"CrawlerRunConfig",
|
||||
"HTTPCrawlerConfig",
|
||||
"ExtractionStrategy",
|
||||
"LLMExtractionStrategy",
|
||||
"CosineStrategy",
|
||||
"JsonCssExtractionStrategy",
|
||||
"JsonXPathExtractionStrategy",
|
||||
"JsonLxmlExtractionStrategy",
|
||||
"RegexExtractionStrategy",
|
||||
"ChunkingStrategy",
|
||||
"RegexChunking",
|
||||
"DefaultMarkdownGenerator",
|
||||
"TableExtractionStrategy",
|
||||
"DefaultTableExtraction",
|
||||
"NoTableExtraction",
|
||||
"RelevantContentFilter",
|
||||
"PruningContentFilter",
|
||||
"BM25ContentFilter",
|
||||
"LLMContentFilter",
|
||||
"BaseDispatcher",
|
||||
"MemoryAdaptiveDispatcher",
|
||||
"SemaphoreDispatcher",
|
||||
"RateLimiter",
|
||||
"CrawlerMonitor",
|
||||
"LinkPreview",
|
||||
"DisplayMode",
|
||||
"MarkdownGenerationResult",
|
||||
"Crawl4aiDockerClient",
|
||||
"ProxyRotationStrategy",
|
||||
"RoundRobinProxyStrategy",
|
||||
"ProxyConfig",
|
||||
"start_colab_display_server",
|
||||
"setup_colab_environment",
|
||||
"hooks_to_string",
|
||||
# C4A Script additions
|
||||
"c4a_compile",
|
||||
"c4a_validate",
|
||||
"c4a_compile_file",
|
||||
"CompilationResult",
|
||||
"ValidationResult",
|
||||
"ErrorDetail",
|
||||
# Browser Adapters
|
||||
"BrowserAdapter",
|
||||
"PlaywrightAdapter",
|
||||
"UndetectedAdapter",
|
||||
"LinkPreviewConfig"
|
||||
]
|
||||
|
||||
def is_sync_version_installed():
|
||||
try:
|
||||
import selenium
|
||||
return True
|
||||
except ImportError:
|
||||
return False
|
||||
|
||||
if is_sync_version_installed():
|
||||
try:
|
||||
from .web_crawler import WebCrawler
|
||||
__all__.append("WebCrawler")
|
||||
except ImportError:
|
||||
import warnings
|
||||
print("Warning: Failed to import WebCrawler even though selenium is installed. This might be due to other missing dependencies.")
|
||||
else:
|
||||
WebCrawler = None
|
||||
# import warnings
|
||||
# print("Warning: Synchronous WebCrawler is not available. Install crawl4ai[sync] for synchronous support. However, please note that the synchronous version will be deprecated soon.")
|
||||
# def is_sync_version_installed():
|
||||
# try:
|
||||
# import selenium # noqa
|
||||
|
||||
# return True
|
||||
# except ImportError:
|
||||
# return False
|
||||
|
||||
|
||||
# if is_sync_version_installed():
|
||||
# try:
|
||||
# from .web_crawler import WebCrawler
|
||||
|
||||
# __all__.append("WebCrawler")
|
||||
# except ImportError:
|
||||
# print(
|
||||
# "Warning: Failed to import WebCrawler even though selenium is installed. This might be due to other missing dependencies."
|
||||
# )
|
||||
# else:
|
||||
# WebCrawler = None
|
||||
# # import warnings
|
||||
# # print("Warning: Synchronous WebCrawler is not available. Install crawl4ai[sync] for synchronous support. However, please note that the synchronous version will be deprecated soon.")
|
||||
|
||||
# Disable all Pydantic warnings
|
||||
warnings.filterwarnings("ignore", module="pydantic")
|
||||
# pydantic_warnings.filter_warnings()
|
||||
@@ -1,2 +1,8 @@
|
||||
# crawl4ai/_version.py
|
||||
__version__ = "0.4.243"
|
||||
# crawl4ai/__version__.py
|
||||
|
||||
# This is the version that will be used for stable releases
|
||||
__version__ = "0.7.6"
|
||||
|
||||
# For nightly builds, this gets set during build process
|
||||
__nightly_version__ = None
|
||||
|
||||
|
||||
1847
crawl4ai/adaptive_crawler copy.py
Normal file
1847
crawl4ai/adaptive_crawler copy.py
Normal file
File diff suppressed because it is too large
Load Diff
1903
crawl4ai/adaptive_crawler.py
Normal file
1903
crawl4ai/adaptive_crawler.py
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
2450
crawl4ai/async_crawler_strategy.back.py
Normal file
2450
crawl4ai/async_crawler_strategy.back.py
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,27 +1,25 @@
|
||||
import os, sys
|
||||
import os
|
||||
from pathlib import Path
|
||||
import aiosqlite
|
||||
import asyncio
|
||||
from typing import Optional, Tuple, Dict
|
||||
from typing import Optional, Dict
|
||||
from contextlib import asynccontextmanager
|
||||
import logging
|
||||
import json # Added for serialization/deserialization
|
||||
from .utils import ensure_content_dirs, generate_content_hash
|
||||
from .models import CrawlResult, MarkdownGenerationResult
|
||||
import xxhash
|
||||
import json
|
||||
from .models import CrawlResult, MarkdownGenerationResult, StringCompatibleMarkdown
|
||||
import aiofiles
|
||||
from .config import NEED_MIGRATION
|
||||
from .version_manager import VersionManager
|
||||
from .async_logger import AsyncLogger
|
||||
from .utils import get_error_context, create_box_message
|
||||
# Set up logging
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
base_directory = DB_PATH = os.path.join(os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()), ".crawl4ai")
|
||||
from .utils import ensure_content_dirs, generate_content_hash
|
||||
from .utils import VersionManager
|
||||
from .utils import get_error_context, create_box_message
|
||||
|
||||
base_directory = DB_PATH = os.path.join(
|
||||
os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()), ".crawl4ai"
|
||||
)
|
||||
os.makedirs(DB_PATH, exist_ok=True)
|
||||
DB_PATH = os.path.join(base_directory, "crawl4ai.db")
|
||||
|
||||
|
||||
class AsyncDatabaseManager:
|
||||
def __init__(self, pool_size: int = 10, max_retries: int = 3):
|
||||
self.db_path = DB_PATH
|
||||
@@ -32,28 +30,27 @@ class AsyncDatabaseManager:
|
||||
self.pool_lock = asyncio.Lock()
|
||||
self.init_lock = asyncio.Lock()
|
||||
self.connection_semaphore = asyncio.Semaphore(pool_size)
|
||||
self._initialized = False
|
||||
self._initialized = False
|
||||
self.version_manager = VersionManager()
|
||||
self.logger = AsyncLogger(
|
||||
log_file=os.path.join(base_directory, ".crawl4ai", "crawler_db.log"),
|
||||
verbose=False,
|
||||
tag_width=10
|
||||
tag_width=10,
|
||||
)
|
||||
|
||||
|
||||
|
||||
async def initialize(self):
|
||||
"""Initialize the database and connection pool"""
|
||||
try:
|
||||
self.logger.info("Initializing database", tag="INIT")
|
||||
# Ensure the database file exists
|
||||
os.makedirs(os.path.dirname(self.db_path), exist_ok=True)
|
||||
|
||||
|
||||
# Check if version update is needed
|
||||
needs_update = self.version_manager.needs_update()
|
||||
|
||||
|
||||
# Always ensure base table exists
|
||||
await self.ainit_db()
|
||||
|
||||
|
||||
# Verify the table exists
|
||||
async with aiosqlite.connect(self.db_path, timeout=30.0) as db:
|
||||
async with db.execute(
|
||||
@@ -62,33 +59,37 @@ class AsyncDatabaseManager:
|
||||
result = await cursor.fetchone()
|
||||
if not result:
|
||||
raise Exception("crawled_data table was not created")
|
||||
|
||||
|
||||
# If version changed or fresh install, run updates
|
||||
if needs_update:
|
||||
self.logger.info("New version detected, running updates", tag="INIT")
|
||||
await self.update_db_schema()
|
||||
from .migrations import run_migration # Import here to avoid circular imports
|
||||
from .migrations import (
|
||||
run_migration,
|
||||
) # Import here to avoid circular imports
|
||||
|
||||
await run_migration()
|
||||
self.version_manager.update_version() # Update stored version after successful migration
|
||||
self.logger.success("Version update completed successfully", tag="COMPLETE")
|
||||
self.logger.success(
|
||||
"Version update completed successfully", tag="COMPLETE"
|
||||
)
|
||||
else:
|
||||
self.logger.success("Database initialization completed successfully", tag="COMPLETE")
|
||||
self.logger.success(
|
||||
"Database initialization completed successfully", tag="COMPLETE"
|
||||
)
|
||||
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(
|
||||
message="Database initialization error: {error}",
|
||||
tag="ERROR",
|
||||
params={"error": str(e)}
|
||||
params={"error": str(e)},
|
||||
)
|
||||
self.logger.info(
|
||||
message="Database will be initialized on first use",
|
||||
tag="INIT"
|
||||
message="Database will be initialized on first use", tag="INIT"
|
||||
)
|
||||
|
||||
|
||||
raise
|
||||
|
||||
|
||||
async def cleanup(self):
|
||||
"""Cleanup connections when shutting down"""
|
||||
async with self.pool_lock:
|
||||
@@ -107,6 +108,7 @@ class AsyncDatabaseManager:
|
||||
self._initialized = True
|
||||
except Exception as e:
|
||||
import sys
|
||||
|
||||
error_context = get_error_context(sys.exc_info())
|
||||
self.logger.error(
|
||||
message="Database initialization failed:\n{error}\n\nContext:\n{context}\n\nTraceback:\n{traceback}",
|
||||
@@ -115,41 +117,52 @@ class AsyncDatabaseManager:
|
||||
params={
|
||||
"error": str(e),
|
||||
"context": error_context["code_context"],
|
||||
"traceback": error_context["full_traceback"]
|
||||
}
|
||||
"traceback": error_context["full_traceback"],
|
||||
},
|
||||
)
|
||||
raise
|
||||
|
||||
await self.connection_semaphore.acquire()
|
||||
task_id = id(asyncio.current_task())
|
||||
|
||||
|
||||
try:
|
||||
async with self.pool_lock:
|
||||
if task_id not in self.connection_pool:
|
||||
try:
|
||||
conn = await aiosqlite.connect(
|
||||
self.db_path,
|
||||
timeout=30.0
|
||||
)
|
||||
await conn.execute('PRAGMA journal_mode = WAL')
|
||||
await conn.execute('PRAGMA busy_timeout = 5000')
|
||||
|
||||
conn = await aiosqlite.connect(self.db_path, timeout=30.0)
|
||||
await conn.execute("PRAGMA journal_mode = WAL")
|
||||
await conn.execute("PRAGMA busy_timeout = 5000")
|
||||
|
||||
# Verify database structure
|
||||
async with conn.execute("PRAGMA table_info(crawled_data)") as cursor:
|
||||
async with conn.execute(
|
||||
"PRAGMA table_info(crawled_data)"
|
||||
) as cursor:
|
||||
columns = await cursor.fetchall()
|
||||
column_names = [col[1] for col in columns]
|
||||
expected_columns = {
|
||||
'url', 'html', 'cleaned_html', 'markdown', 'extracted_content',
|
||||
'success', 'media', 'links', 'metadata', 'screenshot',
|
||||
'response_headers', 'downloaded_files'
|
||||
"url",
|
||||
"html",
|
||||
"cleaned_html",
|
||||
"markdown",
|
||||
"extracted_content",
|
||||
"success",
|
||||
"media",
|
||||
"links",
|
||||
"metadata",
|
||||
"screenshot",
|
||||
"response_headers",
|
||||
"downloaded_files",
|
||||
}
|
||||
missing_columns = expected_columns - set(column_names)
|
||||
if missing_columns:
|
||||
raise ValueError(f"Database missing columns: {missing_columns}")
|
||||
|
||||
raise ValueError(
|
||||
f"Database missing columns: {missing_columns}"
|
||||
)
|
||||
|
||||
self.connection_pool[task_id] = conn
|
||||
except Exception as e:
|
||||
import sys
|
||||
|
||||
error_context = get_error_context(sys.exc_info())
|
||||
error_message = (
|
||||
f"Unexpected error in db get_connection at line {error_context['line_no']} "
|
||||
@@ -158,7 +171,10 @@ class AsyncDatabaseManager:
|
||||
f"Code context:\n{error_context['code_context']}"
|
||||
)
|
||||
self.logger.error(
|
||||
message=create_box_message(error_message, type= "error"),
|
||||
message="{error}",
|
||||
tag="ERROR",
|
||||
params={"error": str(error_message)},
|
||||
boxes=["error"],
|
||||
)
|
||||
|
||||
raise
|
||||
@@ -167,6 +183,7 @@ class AsyncDatabaseManager:
|
||||
|
||||
except Exception as e:
|
||||
import sys
|
||||
|
||||
error_context = get_error_context(sys.exc_info())
|
||||
error_message = (
|
||||
f"Unexpected error in db get_connection at line {error_context['line_no']} "
|
||||
@@ -175,7 +192,10 @@ class AsyncDatabaseManager:
|
||||
f"Code context:\n{error_context['code_context']}"
|
||||
)
|
||||
self.logger.error(
|
||||
message=create_box_message(error_message, type= "error"),
|
||||
message="{error}",
|
||||
tag="ERROR",
|
||||
params={"error": str(error_message)},
|
||||
boxes=["error"],
|
||||
)
|
||||
raise
|
||||
finally:
|
||||
@@ -185,7 +205,6 @@ class AsyncDatabaseManager:
|
||||
del self.connection_pool[task_id]
|
||||
self.connection_semaphore.release()
|
||||
|
||||
|
||||
async def execute_with_retry(self, operation, *args):
|
||||
"""Execute database operations with retry logic"""
|
||||
for attempt in range(self.max_retries):
|
||||
@@ -200,18 +219,16 @@ class AsyncDatabaseManager:
|
||||
message="Operation failed after {retries} attempts: {error}",
|
||||
tag="ERROR",
|
||||
force_verbose=True,
|
||||
params={
|
||||
"retries": self.max_retries,
|
||||
"error": str(e)
|
||||
}
|
||||
)
|
||||
params={"retries": self.max_retries, "error": str(e)},
|
||||
)
|
||||
raise
|
||||
await asyncio.sleep(1 * (attempt + 1)) # Exponential backoff
|
||||
|
||||
async def ainit_db(self):
|
||||
"""Initialize database schema"""
|
||||
async with aiosqlite.connect(self.db_path, timeout=30.0) as db:
|
||||
await db.execute('''
|
||||
await db.execute(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS crawled_data (
|
||||
url TEXT PRIMARY KEY,
|
||||
html TEXT,
|
||||
@@ -226,21 +243,27 @@ class AsyncDatabaseManager:
|
||||
response_headers TEXT DEFAULT "{}",
|
||||
downloaded_files TEXT DEFAULT "{}" -- New column added
|
||||
)
|
||||
''')
|
||||
"""
|
||||
)
|
||||
await db.commit()
|
||||
|
||||
|
||||
|
||||
async def update_db_schema(self):
|
||||
"""Update database schema if needed"""
|
||||
async with aiosqlite.connect(self.db_path, timeout=30.0) as db:
|
||||
cursor = await db.execute("PRAGMA table_info(crawled_data)")
|
||||
columns = await cursor.fetchall()
|
||||
column_names = [column[1] for column in columns]
|
||||
|
||||
|
||||
# List of new columns to add
|
||||
new_columns = ['media', 'links', 'metadata', 'screenshot', 'response_headers', 'downloaded_files']
|
||||
|
||||
new_columns = [
|
||||
"media",
|
||||
"links",
|
||||
"metadata",
|
||||
"screenshot",
|
||||
"response_headers",
|
||||
"downloaded_files",
|
||||
]
|
||||
|
||||
for column in new_columns:
|
||||
if column not in column_names:
|
||||
await self.aalter_db_add_column(column, db)
|
||||
@@ -248,75 +271,100 @@ class AsyncDatabaseManager:
|
||||
|
||||
async def aalter_db_add_column(self, new_column: str, db):
|
||||
"""Add new column to the database"""
|
||||
if new_column == 'response_headers':
|
||||
await db.execute(f'ALTER TABLE crawled_data ADD COLUMN {new_column} TEXT DEFAULT "{{}}"')
|
||||
if new_column == "response_headers":
|
||||
await db.execute(
|
||||
f'ALTER TABLE crawled_data ADD COLUMN {new_column} TEXT DEFAULT "{{}}"'
|
||||
)
|
||||
else:
|
||||
await db.execute(f'ALTER TABLE crawled_data ADD COLUMN {new_column} TEXT DEFAULT ""')
|
||||
await db.execute(
|
||||
f'ALTER TABLE crawled_data ADD COLUMN {new_column} TEXT DEFAULT ""'
|
||||
)
|
||||
self.logger.info(
|
||||
message="Added column '{column}' to the database",
|
||||
tag="INIT",
|
||||
params={"column": new_column}
|
||||
)
|
||||
|
||||
params={"column": new_column},
|
||||
)
|
||||
|
||||
async def aget_cached_url(self, url: str) -> Optional[CrawlResult]:
|
||||
"""Retrieve cached URL data as CrawlResult"""
|
||||
|
||||
async def _get(db):
|
||||
async with db.execute(
|
||||
'SELECT * FROM crawled_data WHERE url = ?', (url,)
|
||||
"SELECT * FROM crawled_data WHERE url = ?", (url,)
|
||||
) as cursor:
|
||||
row = await cursor.fetchone()
|
||||
if not row:
|
||||
return None
|
||||
|
||||
|
||||
# Get column names
|
||||
columns = [description[0] for description in cursor.description]
|
||||
# Create dict from row data
|
||||
row_dict = dict(zip(columns, row))
|
||||
|
||||
|
||||
# Load content from files using stored hashes
|
||||
content_fields = {
|
||||
'html': row_dict['html'],
|
||||
'cleaned_html': row_dict['cleaned_html'],
|
||||
'markdown': row_dict['markdown'],
|
||||
'extracted_content': row_dict['extracted_content'],
|
||||
'screenshot': row_dict['screenshot'],
|
||||
'screenshots': row_dict['screenshot'],
|
||||
"html": row_dict["html"],
|
||||
"cleaned_html": row_dict["cleaned_html"],
|
||||
"markdown": row_dict["markdown"],
|
||||
"extracted_content": row_dict["extracted_content"],
|
||||
"screenshot": row_dict["screenshot"],
|
||||
"screenshots": row_dict["screenshot"],
|
||||
}
|
||||
|
||||
|
||||
for field, hash_value in content_fields.items():
|
||||
if hash_value:
|
||||
content = await self._load_content(
|
||||
hash_value,
|
||||
field.split('_')[0] # Get content type from field name
|
||||
hash_value,
|
||||
field.split("_")[0], # Get content type from field name
|
||||
)
|
||||
row_dict[field] = content or ""
|
||||
else:
|
||||
row_dict[field] = ""
|
||||
|
||||
# Parse JSON fields
|
||||
json_fields = ['media', 'links', 'metadata', 'response_headers', 'markdown']
|
||||
json_fields = [
|
||||
"media",
|
||||
"links",
|
||||
"metadata",
|
||||
"response_headers",
|
||||
"markdown",
|
||||
]
|
||||
for field in json_fields:
|
||||
try:
|
||||
row_dict[field] = json.loads(row_dict[field]) if row_dict[field] else {}
|
||||
row_dict[field] = (
|
||||
json.loads(row_dict[field]) if row_dict[field] else {}
|
||||
)
|
||||
except json.JSONDecodeError:
|
||||
row_dict[field] = {}
|
||||
# Very UGLY, never mention it to me please
|
||||
if field == "markdown" and isinstance(row_dict[field], str):
|
||||
row_dict[field] = MarkdownGenerationResult(
|
||||
raw_markdown=row_dict[field] or "",
|
||||
markdown_with_citations="",
|
||||
references_markdown="",
|
||||
fit_markdown="",
|
||||
fit_html="",
|
||||
)
|
||||
else:
|
||||
row_dict[field] = {}
|
||||
|
||||
if isinstance(row_dict["markdown"], Dict):
|
||||
if row_dict["markdown"].get("raw_markdown"):
|
||||
row_dict["markdown"] = row_dict["markdown"]["raw_markdown"]
|
||||
|
||||
if isinstance(row_dict['markdown'], Dict):
|
||||
row_dict['markdown_v2'] = row_dict['markdown']
|
||||
if row_dict['markdown'].get('raw_markdown'):
|
||||
row_dict['markdown'] = row_dict['markdown']['raw_markdown']
|
||||
|
||||
# Parse downloaded_files
|
||||
try:
|
||||
row_dict['downloaded_files'] = json.loads(row_dict['downloaded_files']) if row_dict['downloaded_files'] else []
|
||||
row_dict["downloaded_files"] = (
|
||||
json.loads(row_dict["downloaded_files"])
|
||||
if row_dict["downloaded_files"]
|
||||
else []
|
||||
)
|
||||
except json.JSONDecodeError:
|
||||
row_dict['downloaded_files'] = []
|
||||
row_dict["downloaded_files"] = []
|
||||
|
||||
# Remove any fields not in CrawlResult model
|
||||
valid_fields = CrawlResult.__annotations__.keys()
|
||||
filtered_dict = {k: v for k, v in row_dict.items() if k in valid_fields}
|
||||
|
||||
filtered_dict["markdown"] = row_dict["markdown"]
|
||||
return CrawlResult(**filtered_dict)
|
||||
|
||||
try:
|
||||
@@ -326,7 +374,7 @@ class AsyncDatabaseManager:
|
||||
message="Error retrieving cached URL: {error}",
|
||||
tag="ERROR",
|
||||
force_verbose=True,
|
||||
params={"error": str(e)}
|
||||
params={"error": str(e)},
|
||||
)
|
||||
return None
|
||||
|
||||
@@ -334,37 +382,52 @@ class AsyncDatabaseManager:
|
||||
"""Cache CrawlResult data"""
|
||||
# Store content files and get hashes
|
||||
content_map = {
|
||||
'html': (result.html, 'html'),
|
||||
'cleaned_html': (result.cleaned_html or "", 'cleaned'),
|
||||
'markdown': None,
|
||||
'extracted_content': (result.extracted_content or "", 'extracted'),
|
||||
'screenshot': (result.screenshot or "", 'screenshots')
|
||||
"html": (result.html, "html"),
|
||||
"cleaned_html": (result.cleaned_html or "", "cleaned"),
|
||||
"markdown": None,
|
||||
"extracted_content": (result.extracted_content or "", "extracted"),
|
||||
"screenshot": (result.screenshot or "", "screenshots"),
|
||||
}
|
||||
|
||||
try:
|
||||
if isinstance(result.markdown, MarkdownGenerationResult):
|
||||
content_map['markdown'] = (result.markdown.model_dump_json(), 'markdown')
|
||||
elif hasattr(result, 'markdown_v2'):
|
||||
content_map['markdown'] = (result.markdown_v2.model_dump_json(), 'markdown')
|
||||
if isinstance(result.markdown, StringCompatibleMarkdown):
|
||||
content_map["markdown"] = (
|
||||
result.markdown,
|
||||
"markdown",
|
||||
)
|
||||
elif isinstance(result.markdown, MarkdownGenerationResult):
|
||||
content_map["markdown"] = (
|
||||
result.markdown.model_dump_json(),
|
||||
"markdown",
|
||||
)
|
||||
elif isinstance(result.markdown, str):
|
||||
markdown_result = MarkdownGenerationResult(raw_markdown=result.markdown)
|
||||
content_map['markdown'] = (markdown_result.model_dump_json(), 'markdown')
|
||||
content_map["markdown"] = (
|
||||
markdown_result.model_dump_json(),
|
||||
"markdown",
|
||||
)
|
||||
else:
|
||||
content_map['markdown'] = (MarkdownGenerationResult().model_dump_json(), 'markdown')
|
||||
content_map["markdown"] = (
|
||||
MarkdownGenerationResult().model_dump_json(),
|
||||
"markdown",
|
||||
)
|
||||
except Exception as e:
|
||||
self.logger.warning(
|
||||
message=f"Error processing markdown content: {str(e)}",
|
||||
tag="WARNING"
|
||||
message=f"Error processing markdown content: {str(e)}", tag="WARNING"
|
||||
)
|
||||
# Fallback to empty markdown result
|
||||
content_map['markdown'] = (MarkdownGenerationResult().model_dump_json(), 'markdown')
|
||||
|
||||
content_map["markdown"] = (
|
||||
MarkdownGenerationResult().model_dump_json(),
|
||||
"markdown",
|
||||
)
|
||||
|
||||
content_hashes = {}
|
||||
for field, (content, content_type) in content_map.items():
|
||||
content_hashes[field] = await self._store_content(content, content_type)
|
||||
|
||||
async def _cache(db):
|
||||
await db.execute('''
|
||||
await db.execute(
|
||||
"""
|
||||
INSERT INTO crawled_data (
|
||||
url, html, cleaned_html, markdown,
|
||||
extracted_content, success, media, links, metadata,
|
||||
@@ -383,20 +446,22 @@ class AsyncDatabaseManager:
|
||||
screenshot = excluded.screenshot,
|
||||
response_headers = excluded.response_headers,
|
||||
downloaded_files = excluded.downloaded_files
|
||||
''', (
|
||||
result.url,
|
||||
content_hashes['html'],
|
||||
content_hashes['cleaned_html'],
|
||||
content_hashes['markdown'],
|
||||
content_hashes['extracted_content'],
|
||||
result.success,
|
||||
json.dumps(result.media),
|
||||
json.dumps(result.links),
|
||||
json.dumps(result.metadata or {}),
|
||||
content_hashes['screenshot'],
|
||||
json.dumps(result.response_headers or {}),
|
||||
json.dumps(result.downloaded_files or [])
|
||||
))
|
||||
""",
|
||||
(
|
||||
result.url,
|
||||
content_hashes["html"],
|
||||
content_hashes["cleaned_html"],
|
||||
content_hashes["markdown"],
|
||||
content_hashes["extracted_content"],
|
||||
result.success,
|
||||
json.dumps(result.media),
|
||||
json.dumps(result.links),
|
||||
json.dumps(result.metadata or {}),
|
||||
content_hashes["screenshot"],
|
||||
json.dumps(result.response_headers or {}),
|
||||
json.dumps(result.downloaded_files or []),
|
||||
),
|
||||
)
|
||||
|
||||
try:
|
||||
await self.execute_with_retry(_cache)
|
||||
@@ -405,14 +470,14 @@ class AsyncDatabaseManager:
|
||||
message="Error caching URL: {error}",
|
||||
tag="ERROR",
|
||||
force_verbose=True,
|
||||
params={"error": str(e)}
|
||||
params={"error": str(e)},
|
||||
)
|
||||
|
||||
|
||||
async def aget_total_count(self) -> int:
|
||||
"""Get total number of cached URLs"""
|
||||
|
||||
async def _count(db):
|
||||
async with db.execute('SELECT COUNT(*) FROM crawled_data') as cursor:
|
||||
async with db.execute("SELECT COUNT(*) FROM crawled_data") as cursor:
|
||||
result = await cursor.fetchone()
|
||||
return result[0] if result else 0
|
||||
|
||||
@@ -423,14 +488,15 @@ class AsyncDatabaseManager:
|
||||
message="Error getting total count: {error}",
|
||||
tag="ERROR",
|
||||
force_verbose=True,
|
||||
params={"error": str(e)}
|
||||
params={"error": str(e)},
|
||||
)
|
||||
return 0
|
||||
|
||||
async def aclear_db(self):
|
||||
"""Clear all data from the database"""
|
||||
|
||||
async def _clear(db):
|
||||
await db.execute('DELETE FROM crawled_data')
|
||||
await db.execute("DELETE FROM crawled_data")
|
||||
|
||||
try:
|
||||
await self.execute_with_retry(_clear)
|
||||
@@ -439,13 +505,14 @@ class AsyncDatabaseManager:
|
||||
message="Error clearing database: {error}",
|
||||
tag="ERROR",
|
||||
force_verbose=True,
|
||||
params={"error": str(e)}
|
||||
params={"error": str(e)},
|
||||
)
|
||||
|
||||
async def aflush_db(self):
|
||||
"""Drop the entire table"""
|
||||
|
||||
async def _flush(db):
|
||||
await db.execute('DROP TABLE IF EXISTS crawled_data')
|
||||
await db.execute("DROP TABLE IF EXISTS crawled_data")
|
||||
|
||||
try:
|
||||
await self.execute_with_retry(_flush)
|
||||
@@ -454,42 +521,44 @@ class AsyncDatabaseManager:
|
||||
message="Error flushing database: {error}",
|
||||
tag="ERROR",
|
||||
force_verbose=True,
|
||||
params={"error": str(e)}
|
||||
params={"error": str(e)},
|
||||
)
|
||||
|
||||
|
||||
|
||||
async def _store_content(self, content: str, content_type: str) -> str:
|
||||
"""Store content in filesystem and return hash"""
|
||||
if not content:
|
||||
return ""
|
||||
|
||||
|
||||
content_hash = generate_content_hash(content)
|
||||
file_path = os.path.join(self.content_paths[content_type], content_hash)
|
||||
|
||||
|
||||
# Only write if file doesn't exist
|
||||
if not os.path.exists(file_path):
|
||||
async with aiofiles.open(file_path, 'w', encoding='utf-8') as f:
|
||||
async with aiofiles.open(file_path, "w", encoding="utf-8") as f:
|
||||
await f.write(content)
|
||||
|
||||
|
||||
return content_hash
|
||||
|
||||
async def _load_content(self, content_hash: str, content_type: str) -> Optional[str]:
|
||||
async def _load_content(
|
||||
self, content_hash: str, content_type: str
|
||||
) -> Optional[str]:
|
||||
"""Load content from filesystem by hash"""
|
||||
if not content_hash:
|
||||
return None
|
||||
|
||||
|
||||
file_path = os.path.join(self.content_paths[content_type], content_hash)
|
||||
try:
|
||||
async with aiofiles.open(file_path, 'r', encoding='utf-8') as f:
|
||||
async with aiofiles.open(file_path, "r", encoding="utf-8") as f:
|
||||
return await f.read()
|
||||
except:
|
||||
self.logger.error(
|
||||
message="Failed to load content: {file_path}",
|
||||
tag="ERROR",
|
||||
force_verbose=True,
|
||||
params={"file_path": file_path}
|
||||
params={"file_path": file_path},
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
# Create a singleton instance
|
||||
async_db_manager = AsyncDatabaseManager()
|
||||
|
||||
771
crawl4ai/async_dispatcher.py
Normal file
771
crawl4ai/async_dispatcher.py
Normal file
@@ -0,0 +1,771 @@
|
||||
from typing import Dict, Optional, List, Tuple, Union
|
||||
from .async_configs import CrawlerRunConfig
|
||||
from .models import (
|
||||
CrawlResult,
|
||||
CrawlerTaskResult,
|
||||
CrawlStatus,
|
||||
DomainState,
|
||||
)
|
||||
|
||||
from .components.crawler_monitor import CrawlerMonitor
|
||||
|
||||
from .types import AsyncWebCrawler
|
||||
|
||||
from collections.abc import AsyncGenerator
|
||||
|
||||
import time
|
||||
import psutil
|
||||
import asyncio
|
||||
import uuid
|
||||
|
||||
from urllib.parse import urlparse
|
||||
import random
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
from .utils import get_true_memory_usage_percent
|
||||
|
||||
|
||||
class RateLimiter:
|
||||
def __init__(
|
||||
self,
|
||||
base_delay: Tuple[float, float] = (1.0, 3.0),
|
||||
max_delay: float = 60.0,
|
||||
max_retries: int = 3,
|
||||
rate_limit_codes: List[int] = None,
|
||||
):
|
||||
self.base_delay = base_delay
|
||||
self.max_delay = max_delay
|
||||
self.max_retries = max_retries
|
||||
self.rate_limit_codes = rate_limit_codes or [429, 503]
|
||||
self.domains: Dict[str, DomainState] = {}
|
||||
|
||||
def get_domain(self, url: str) -> str:
|
||||
return urlparse(url).netloc
|
||||
|
||||
async def wait_if_needed(self, url: str) -> None:
|
||||
domain = self.get_domain(url)
|
||||
state = self.domains.get(domain)
|
||||
|
||||
if not state:
|
||||
self.domains[domain] = DomainState()
|
||||
state = self.domains[domain]
|
||||
|
||||
now = time.time()
|
||||
if state.last_request_time:
|
||||
wait_time = max(0, state.current_delay - (now - state.last_request_time))
|
||||
if wait_time > 0:
|
||||
await asyncio.sleep(wait_time)
|
||||
|
||||
# Random delay within base range if no current delay
|
||||
if state.current_delay == 0:
|
||||
state.current_delay = random.uniform(*self.base_delay)
|
||||
|
||||
state.last_request_time = time.time()
|
||||
|
||||
def update_delay(self, url: str, status_code: int) -> bool:
|
||||
domain = self.get_domain(url)
|
||||
state = self.domains[domain]
|
||||
|
||||
if status_code in self.rate_limit_codes:
|
||||
state.fail_count += 1
|
||||
if state.fail_count > self.max_retries:
|
||||
return False
|
||||
|
||||
# Exponential backoff with random jitter
|
||||
state.current_delay = min(
|
||||
state.current_delay * 2 * random.uniform(0.75, 1.25), self.max_delay
|
||||
)
|
||||
else:
|
||||
# Gradually reduce delay on success
|
||||
state.current_delay = max(
|
||||
random.uniform(*self.base_delay), state.current_delay * 0.75
|
||||
)
|
||||
state.fail_count = 0
|
||||
|
||||
return True
|
||||
|
||||
|
||||
|
||||
class BaseDispatcher(ABC):
|
||||
def __init__(
|
||||
self,
|
||||
rate_limiter: Optional[RateLimiter] = None,
|
||||
monitor: Optional[CrawlerMonitor] = None,
|
||||
):
|
||||
self.crawler = None
|
||||
self._domain_last_hit: Dict[str, float] = {}
|
||||
self.concurrent_sessions = 0
|
||||
self.rate_limiter = rate_limiter
|
||||
self.monitor = monitor
|
||||
|
||||
def select_config(self, url: str, configs: Union[CrawlerRunConfig, List[CrawlerRunConfig]]) -> Optional[CrawlerRunConfig]:
|
||||
"""Select the appropriate config for a given URL.
|
||||
|
||||
Args:
|
||||
url: The URL to match against
|
||||
configs: Single config or list of configs to choose from
|
||||
|
||||
Returns:
|
||||
The matching config, or None if no match found
|
||||
"""
|
||||
# Single config - return as is
|
||||
if isinstance(configs, CrawlerRunConfig):
|
||||
return configs
|
||||
|
||||
# Empty list - return None
|
||||
if not configs:
|
||||
return None
|
||||
|
||||
# Find first matching config
|
||||
for config in configs:
|
||||
if config.is_match(url):
|
||||
return config
|
||||
|
||||
# No match found - return None to indicate URL should be skipped
|
||||
return None
|
||||
|
||||
@abstractmethod
|
||||
async def crawl_url(
|
||||
self,
|
||||
url: str,
|
||||
config: Union[CrawlerRunConfig, List[CrawlerRunConfig]],
|
||||
task_id: str,
|
||||
monitor: Optional[CrawlerMonitor] = None,
|
||||
) -> CrawlerTaskResult:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def run_urls(
|
||||
self,
|
||||
urls: List[str],
|
||||
crawler: AsyncWebCrawler, # noqa: F821
|
||||
config: Union[CrawlerRunConfig, List[CrawlerRunConfig]],
|
||||
monitor: Optional[CrawlerMonitor] = None,
|
||||
) -> List[CrawlerTaskResult]:
|
||||
pass
|
||||
|
||||
|
||||
class MemoryAdaptiveDispatcher(BaseDispatcher):
|
||||
def __init__(
|
||||
self,
|
||||
memory_threshold_percent: float = 90.0,
|
||||
critical_threshold_percent: float = 95.0, # New critical threshold
|
||||
recovery_threshold_percent: float = 85.0, # New recovery threshold
|
||||
check_interval: float = 1.0,
|
||||
max_session_permit: int = 20,
|
||||
fairness_timeout: float = 600.0, # 10 minutes before prioritizing long-waiting URLs
|
||||
memory_wait_timeout: Optional[float] = 600.0,
|
||||
rate_limiter: Optional[RateLimiter] = None,
|
||||
monitor: Optional[CrawlerMonitor] = None,
|
||||
):
|
||||
super().__init__(rate_limiter, monitor)
|
||||
self.memory_threshold_percent = memory_threshold_percent
|
||||
self.critical_threshold_percent = critical_threshold_percent
|
||||
self.recovery_threshold_percent = recovery_threshold_percent
|
||||
self.check_interval = check_interval
|
||||
self.max_session_permit = max_session_permit
|
||||
self.fairness_timeout = fairness_timeout
|
||||
self.memory_wait_timeout = memory_wait_timeout
|
||||
self.result_queue = asyncio.Queue()
|
||||
self.task_queue = asyncio.PriorityQueue() # Priority queue for better management
|
||||
self.memory_pressure_mode = False # Flag to indicate when we're in memory pressure mode
|
||||
self.current_memory_percent = 0.0 # Track current memory usage
|
||||
self._high_memory_start_time: Optional[float] = None
|
||||
|
||||
async def _memory_monitor_task(self):
|
||||
"""Background task to continuously monitor memory usage and update state"""
|
||||
while True:
|
||||
self.current_memory_percent = get_true_memory_usage_percent()
|
||||
|
||||
# Enter memory pressure mode if we cross the threshold
|
||||
if self.current_memory_percent >= self.memory_threshold_percent:
|
||||
if not self.memory_pressure_mode:
|
||||
self.memory_pressure_mode = True
|
||||
self._high_memory_start_time = time.time()
|
||||
if self.monitor:
|
||||
self.monitor.update_memory_status("PRESSURE")
|
||||
else:
|
||||
if self._high_memory_start_time is None:
|
||||
self._high_memory_start_time = time.time()
|
||||
if (
|
||||
self.memory_wait_timeout is not None
|
||||
and self._high_memory_start_time is not None
|
||||
and time.time() - self._high_memory_start_time >= self.memory_wait_timeout
|
||||
):
|
||||
raise MemoryError(
|
||||
"Memory usage exceeded threshold for"
|
||||
f" {self.memory_wait_timeout} seconds"
|
||||
)
|
||||
|
||||
# Exit memory pressure mode if we go below recovery threshold
|
||||
elif self.memory_pressure_mode and self.current_memory_percent <= self.recovery_threshold_percent:
|
||||
self.memory_pressure_mode = False
|
||||
self._high_memory_start_time = None
|
||||
if self.monitor:
|
||||
self.monitor.update_memory_status("NORMAL")
|
||||
elif self.current_memory_percent < self.memory_threshold_percent:
|
||||
self._high_memory_start_time = None
|
||||
|
||||
# In critical mode, we might need to take more drastic action
|
||||
if self.current_memory_percent >= self.critical_threshold_percent:
|
||||
if self.monitor:
|
||||
self.monitor.update_memory_status("CRITICAL")
|
||||
# We could implement additional memory-saving measures here
|
||||
|
||||
await asyncio.sleep(self.check_interval)
|
||||
|
||||
def _get_priority_score(self, wait_time: float, retry_count: int) -> float:
|
||||
"""Calculate priority score (lower is higher priority)
|
||||
- URLs waiting longer than fairness_timeout get higher priority
|
||||
- More retry attempts decreases priority
|
||||
"""
|
||||
if wait_time > self.fairness_timeout:
|
||||
# High priority for long-waiting URLs
|
||||
return -wait_time
|
||||
# Standard priority based on retries
|
||||
return retry_count
|
||||
|
||||
async def crawl_url(
|
||||
self,
|
||||
url: str,
|
||||
config: Union[CrawlerRunConfig, List[CrawlerRunConfig]],
|
||||
task_id: str,
|
||||
retry_count: int = 0,
|
||||
) -> CrawlerTaskResult:
|
||||
start_time = time.time()
|
||||
error_message = ""
|
||||
memory_usage = peak_memory = 0.0
|
||||
|
||||
# Select appropriate config for this URL
|
||||
selected_config = self.select_config(url, config)
|
||||
|
||||
# If no config matches, return failed result
|
||||
if selected_config is None:
|
||||
error_message = f"No matching configuration found for URL: {url}"
|
||||
if self.monitor:
|
||||
self.monitor.update_task(
|
||||
task_id,
|
||||
status=CrawlStatus.FAILED,
|
||||
error_message=error_message
|
||||
)
|
||||
|
||||
return CrawlerTaskResult(
|
||||
task_id=task_id,
|
||||
url=url,
|
||||
result=CrawlResult(
|
||||
url=url,
|
||||
html="",
|
||||
metadata={"status": "no_config_match"},
|
||||
success=False,
|
||||
error_message=error_message
|
||||
),
|
||||
memory_usage=0,
|
||||
peak_memory=0,
|
||||
start_time=start_time,
|
||||
end_time=time.time(),
|
||||
error_message=error_message,
|
||||
retry_count=retry_count
|
||||
)
|
||||
|
||||
# Get starting memory for accurate measurement
|
||||
process = psutil.Process()
|
||||
start_memory = process.memory_info().rss / (1024 * 1024)
|
||||
|
||||
try:
|
||||
if self.monitor:
|
||||
self.monitor.update_task(
|
||||
task_id,
|
||||
status=CrawlStatus.IN_PROGRESS,
|
||||
start_time=start_time,
|
||||
retry_count=retry_count
|
||||
)
|
||||
|
||||
self.concurrent_sessions += 1
|
||||
|
||||
if self.rate_limiter:
|
||||
await self.rate_limiter.wait_if_needed(url)
|
||||
|
||||
# Check if we're in critical memory state
|
||||
if self.current_memory_percent >= self.critical_threshold_percent:
|
||||
# Requeue this task with increased priority and retry count
|
||||
enqueue_time = time.time()
|
||||
priority = self._get_priority_score(enqueue_time - start_time, retry_count + 1)
|
||||
await self.task_queue.put((priority, (url, task_id, retry_count + 1, enqueue_time)))
|
||||
|
||||
# Update monitoring
|
||||
if self.monitor:
|
||||
self.monitor.update_task(
|
||||
task_id,
|
||||
status=CrawlStatus.QUEUED,
|
||||
error_message="Requeued due to critical memory pressure"
|
||||
)
|
||||
|
||||
# Return placeholder result with requeued status
|
||||
return CrawlerTaskResult(
|
||||
task_id=task_id,
|
||||
url=url,
|
||||
result=CrawlResult(
|
||||
url=url, html="", metadata={"status": "requeued"},
|
||||
success=False, error_message="Requeued due to critical memory pressure"
|
||||
),
|
||||
memory_usage=0,
|
||||
peak_memory=0,
|
||||
start_time=start_time,
|
||||
end_time=time.time(),
|
||||
error_message="Requeued due to critical memory pressure",
|
||||
retry_count=retry_count + 1
|
||||
)
|
||||
|
||||
# Execute the crawl with selected config
|
||||
result = await self.crawler.arun(url, config=selected_config, session_id=task_id)
|
||||
|
||||
# Measure memory usage
|
||||
end_memory = process.memory_info().rss / (1024 * 1024)
|
||||
memory_usage = peak_memory = end_memory - start_memory
|
||||
|
||||
# Handle rate limiting
|
||||
if self.rate_limiter and result.status_code:
|
||||
if not self.rate_limiter.update_delay(url, result.status_code):
|
||||
error_message = f"Rate limit retry count exceeded for domain {urlparse(url).netloc}"
|
||||
if self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
|
||||
|
||||
# Update status based on result
|
||||
if not result.success:
|
||||
error_message = result.error_message
|
||||
if self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
|
||||
elif self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.COMPLETED)
|
||||
|
||||
except Exception as e:
|
||||
error_message = str(e)
|
||||
if self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
|
||||
result = CrawlResult(
|
||||
url=url, html="", metadata={}, success=False, error_message=str(e)
|
||||
)
|
||||
|
||||
finally:
|
||||
end_time = time.time()
|
||||
if self.monitor:
|
||||
self.monitor.update_task(
|
||||
task_id,
|
||||
end_time=end_time,
|
||||
memory_usage=memory_usage,
|
||||
peak_memory=peak_memory,
|
||||
error_message=error_message,
|
||||
retry_count=retry_count
|
||||
)
|
||||
self.concurrent_sessions -= 1
|
||||
|
||||
return CrawlerTaskResult(
|
||||
task_id=task_id,
|
||||
url=url,
|
||||
result=result,
|
||||
memory_usage=memory_usage,
|
||||
peak_memory=peak_memory,
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
error_message=error_message,
|
||||
retry_count=retry_count
|
||||
)
|
||||
|
||||
async def run_urls(
|
||||
self,
|
||||
urls: List[str],
|
||||
crawler: AsyncWebCrawler,
|
||||
config: Union[CrawlerRunConfig, List[CrawlerRunConfig]],
|
||||
) -> List[CrawlerTaskResult]:
|
||||
self.crawler = crawler
|
||||
|
||||
# Start the memory monitor task
|
||||
memory_monitor = asyncio.create_task(self._memory_monitor_task())
|
||||
|
||||
if self.monitor:
|
||||
self.monitor.start()
|
||||
|
||||
results = []
|
||||
|
||||
try:
|
||||
# Initialize task queue
|
||||
for url in urls:
|
||||
task_id = str(uuid.uuid4())
|
||||
if self.monitor:
|
||||
self.monitor.add_task(task_id, url)
|
||||
# Add to queue with initial priority 0, retry count 0, and current time
|
||||
await self.task_queue.put((0, (url, task_id, 0, time.time())))
|
||||
|
||||
active_tasks = []
|
||||
|
||||
# Process until both queues are empty
|
||||
while not self.task_queue.empty() or active_tasks:
|
||||
if memory_monitor.done():
|
||||
exc = memory_monitor.exception()
|
||||
if exc:
|
||||
for t in active_tasks:
|
||||
t.cancel()
|
||||
raise exc
|
||||
|
||||
# If memory pressure is low, greedily fill all available slots
|
||||
if not self.memory_pressure_mode:
|
||||
slots = self.max_session_permit - len(active_tasks)
|
||||
while slots > 0:
|
||||
try:
|
||||
# Use get_nowait() to immediately get tasks without blocking
|
||||
priority, (url, task_id, retry_count, enqueue_time) = self.task_queue.get_nowait()
|
||||
|
||||
# Create and start the task
|
||||
task = asyncio.create_task(
|
||||
self.crawl_url(url, config, task_id, retry_count)
|
||||
)
|
||||
active_tasks.append(task)
|
||||
|
||||
# Update waiting time in monitor
|
||||
if self.monitor:
|
||||
wait_time = time.time() - enqueue_time
|
||||
self.monitor.update_task(
|
||||
task_id,
|
||||
wait_time=wait_time,
|
||||
status=CrawlStatus.IN_PROGRESS
|
||||
)
|
||||
|
||||
slots -= 1
|
||||
|
||||
except asyncio.QueueEmpty:
|
||||
# No more tasks in queue, exit the loop
|
||||
break
|
||||
|
||||
# Wait for completion even if queue is starved
|
||||
if active_tasks:
|
||||
done, pending = await asyncio.wait(
|
||||
active_tasks, timeout=0.1, return_when=asyncio.FIRST_COMPLETED
|
||||
)
|
||||
|
||||
# Process completed tasks
|
||||
for completed_task in done:
|
||||
result = await completed_task
|
||||
results.append(result)
|
||||
|
||||
# Update active tasks list
|
||||
active_tasks = list(pending)
|
||||
else:
|
||||
# If no active tasks but still waiting, sleep briefly
|
||||
await asyncio.sleep(self.check_interval / 2)
|
||||
|
||||
# Update priorities for waiting tasks if needed
|
||||
await self._update_queue_priorities()
|
||||
|
||||
except Exception as e:
|
||||
if self.monitor:
|
||||
self.monitor.update_memory_status(f"QUEUE_ERROR: {str(e)}")
|
||||
|
||||
finally:
|
||||
# Clean up
|
||||
memory_monitor.cancel()
|
||||
if self.monitor:
|
||||
self.monitor.stop()
|
||||
return results
|
||||
|
||||
async def _update_queue_priorities(self):
|
||||
"""Periodically update priorities of items in the queue to prevent starvation"""
|
||||
# Skip if queue is empty
|
||||
if self.task_queue.empty():
|
||||
return
|
||||
|
||||
# Use a drain-and-refill approach to update all priorities
|
||||
temp_items = []
|
||||
|
||||
# Drain the queue (with a safety timeout to prevent blocking)
|
||||
try:
|
||||
drain_start = time.time()
|
||||
while not self.task_queue.empty() and time.time() - drain_start < 5.0: # 5 second safety timeout
|
||||
try:
|
||||
# Get item from queue with timeout
|
||||
priority, (url, task_id, retry_count, enqueue_time) = await asyncio.wait_for(
|
||||
self.task_queue.get(), timeout=0.1
|
||||
)
|
||||
|
||||
# Calculate new priority based on current wait time
|
||||
current_time = time.time()
|
||||
wait_time = current_time - enqueue_time
|
||||
new_priority = self._get_priority_score(wait_time, retry_count)
|
||||
|
||||
# Store with updated priority
|
||||
temp_items.append((new_priority, (url, task_id, retry_count, enqueue_time)))
|
||||
|
||||
# Update monitoring stats for this task
|
||||
if self.monitor and task_id in self.monitor.stats:
|
||||
self.monitor.update_task(task_id, wait_time=wait_time)
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
# Queue might be empty or very slow
|
||||
break
|
||||
except Exception as e:
|
||||
# If anything goes wrong, make sure we refill the queue with what we've got
|
||||
self.monitor.update_memory_status(f"QUEUE_ERROR: {str(e)}")
|
||||
|
||||
# Calculate queue statistics
|
||||
if temp_items and self.monitor:
|
||||
total_queued = len(temp_items)
|
||||
wait_times = [item[1][3] for item in temp_items]
|
||||
highest_wait_time = time.time() - min(wait_times) if wait_times else 0
|
||||
avg_wait_time = sum(time.time() - t for t in wait_times) / len(wait_times) if wait_times else 0
|
||||
|
||||
# Update queue statistics in monitor
|
||||
self.monitor.update_queue_statistics(
|
||||
total_queued=total_queued,
|
||||
highest_wait_time=highest_wait_time,
|
||||
avg_wait_time=avg_wait_time
|
||||
)
|
||||
|
||||
# Sort by priority (lowest number = highest priority)
|
||||
temp_items.sort(key=lambda x: x[0])
|
||||
|
||||
# Refill the queue with updated priorities
|
||||
for item in temp_items:
|
||||
await self.task_queue.put(item)
|
||||
|
||||
async def run_urls_stream(
|
||||
self,
|
||||
urls: List[str],
|
||||
crawler: AsyncWebCrawler,
|
||||
config: Union[CrawlerRunConfig, List[CrawlerRunConfig]],
|
||||
) -> AsyncGenerator[CrawlerTaskResult, None]:
|
||||
self.crawler = crawler
|
||||
|
||||
# Start the memory monitor task
|
||||
memory_monitor = asyncio.create_task(self._memory_monitor_task())
|
||||
|
||||
if self.monitor:
|
||||
self.monitor.start()
|
||||
|
||||
try:
|
||||
# Initialize task queue
|
||||
for url in urls:
|
||||
task_id = str(uuid.uuid4())
|
||||
if self.monitor:
|
||||
self.monitor.add_task(task_id, url)
|
||||
# Add to queue with initial priority 0, retry count 0, and current time
|
||||
await self.task_queue.put((0, (url, task_id, 0, time.time())))
|
||||
|
||||
active_tasks = []
|
||||
completed_count = 0
|
||||
total_urls = len(urls)
|
||||
|
||||
while completed_count < total_urls:
|
||||
if memory_monitor.done():
|
||||
exc = memory_monitor.exception()
|
||||
if exc:
|
||||
for t in active_tasks:
|
||||
t.cancel()
|
||||
raise exc
|
||||
# If memory pressure is low, greedily fill all available slots
|
||||
if not self.memory_pressure_mode:
|
||||
slots = self.max_session_permit - len(active_tasks)
|
||||
while slots > 0:
|
||||
try:
|
||||
# Use get_nowait() to immediately get tasks without blocking
|
||||
priority, (url, task_id, retry_count, enqueue_time) = self.task_queue.get_nowait()
|
||||
|
||||
# Create and start the task
|
||||
task = asyncio.create_task(
|
||||
self.crawl_url(url, config, task_id, retry_count)
|
||||
)
|
||||
active_tasks.append(task)
|
||||
|
||||
# Update waiting time in monitor
|
||||
if self.monitor:
|
||||
wait_time = time.time() - enqueue_time
|
||||
self.monitor.update_task(
|
||||
task_id,
|
||||
wait_time=wait_time,
|
||||
status=CrawlStatus.IN_PROGRESS
|
||||
)
|
||||
|
||||
slots -= 1
|
||||
|
||||
except asyncio.QueueEmpty:
|
||||
# No more tasks in queue, exit the loop
|
||||
break
|
||||
|
||||
# Process completed tasks and yield results
|
||||
if active_tasks:
|
||||
done, pending = await asyncio.wait(
|
||||
active_tasks, timeout=0.1, return_when=asyncio.FIRST_COMPLETED
|
||||
)
|
||||
|
||||
for completed_task in done:
|
||||
result = await completed_task
|
||||
|
||||
# Only count as completed if it wasn't requeued
|
||||
if "requeued" not in result.error_message:
|
||||
completed_count += 1
|
||||
yield result
|
||||
|
||||
# Update active tasks list
|
||||
active_tasks = list(pending)
|
||||
else:
|
||||
# If no active tasks but still waiting, sleep briefly
|
||||
await asyncio.sleep(self.check_interval / 2)
|
||||
|
||||
# Update priorities for waiting tasks if needed
|
||||
await self._update_queue_priorities()
|
||||
|
||||
finally:
|
||||
# Clean up
|
||||
memory_monitor.cancel()
|
||||
if self.monitor:
|
||||
self.monitor.stop()
|
||||
|
||||
|
||||
class SemaphoreDispatcher(BaseDispatcher):
|
||||
def __init__(
|
||||
self,
|
||||
semaphore_count: int = 5,
|
||||
max_session_permit: int = 20,
|
||||
rate_limiter: Optional[RateLimiter] = None,
|
||||
monitor: Optional[CrawlerMonitor] = None,
|
||||
):
|
||||
super().__init__(rate_limiter, monitor)
|
||||
self.semaphore_count = semaphore_count
|
||||
self.max_session_permit = max_session_permit
|
||||
|
||||
async def crawl_url(
|
||||
self,
|
||||
url: str,
|
||||
config: Union[CrawlerRunConfig, List[CrawlerRunConfig]],
|
||||
task_id: str,
|
||||
semaphore: asyncio.Semaphore = None,
|
||||
) -> CrawlerTaskResult:
|
||||
start_time = time.time()
|
||||
error_message = ""
|
||||
memory_usage = peak_memory = 0.0
|
||||
|
||||
# Select appropriate config for this URL
|
||||
selected_config = self.select_config(url, config)
|
||||
|
||||
# If no config matches, return failed result
|
||||
if selected_config is None:
|
||||
error_message = f"No matching configuration found for URL: {url}"
|
||||
if self.monitor:
|
||||
self.monitor.update_task(
|
||||
task_id,
|
||||
status=CrawlStatus.FAILED,
|
||||
error_message=error_message
|
||||
)
|
||||
|
||||
return CrawlerTaskResult(
|
||||
task_id=task_id,
|
||||
url=url,
|
||||
result=CrawlResult(
|
||||
url=url,
|
||||
html="",
|
||||
metadata={"status": "no_config_match"},
|
||||
success=False,
|
||||
error_message=error_message
|
||||
),
|
||||
memory_usage=0,
|
||||
peak_memory=0,
|
||||
start_time=start_time,
|
||||
end_time=time.time(),
|
||||
error_message=error_message
|
||||
)
|
||||
|
||||
try:
|
||||
if self.monitor:
|
||||
self.monitor.update_task(
|
||||
task_id, status=CrawlStatus.IN_PROGRESS, start_time=start_time
|
||||
)
|
||||
|
||||
if self.rate_limiter:
|
||||
await self.rate_limiter.wait_if_needed(url)
|
||||
|
||||
async with semaphore:
|
||||
process = psutil.Process()
|
||||
start_memory = process.memory_info().rss / (1024 * 1024)
|
||||
result = await self.crawler.arun(url, config=selected_config, session_id=task_id)
|
||||
end_memory = process.memory_info().rss / (1024 * 1024)
|
||||
|
||||
memory_usage = peak_memory = end_memory - start_memory
|
||||
|
||||
if self.rate_limiter and result.status_code:
|
||||
if not self.rate_limiter.update_delay(url, result.status_code):
|
||||
error_message = f"Rate limit retry count exceeded for domain {urlparse(url).netloc}"
|
||||
if self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
|
||||
return CrawlerTaskResult(
|
||||
task_id=task_id,
|
||||
url=url,
|
||||
result=result,
|
||||
memory_usage=memory_usage,
|
||||
peak_memory=peak_memory,
|
||||
start_time=start_time,
|
||||
end_time=time.time(),
|
||||
error_message=error_message,
|
||||
)
|
||||
|
||||
if not result.success:
|
||||
error_message = result.error_message
|
||||
if self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
|
||||
elif self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.COMPLETED)
|
||||
|
||||
except Exception as e:
|
||||
error_message = str(e)
|
||||
if self.monitor:
|
||||
self.monitor.update_task(task_id, status=CrawlStatus.FAILED)
|
||||
result = CrawlResult(
|
||||
url=url, html="", metadata={}, success=False, error_message=str(e)
|
||||
)
|
||||
|
||||
finally:
|
||||
end_time = time.time()
|
||||
if self.monitor:
|
||||
self.monitor.update_task(
|
||||
task_id,
|
||||
end_time=end_time,
|
||||
memory_usage=memory_usage,
|
||||
peak_memory=peak_memory,
|
||||
error_message=error_message,
|
||||
)
|
||||
|
||||
return CrawlerTaskResult(
|
||||
task_id=task_id,
|
||||
url=url,
|
||||
result=result,
|
||||
memory_usage=memory_usage,
|
||||
peak_memory=peak_memory,
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
error_message=error_message,
|
||||
)
|
||||
|
||||
async def run_urls(
|
||||
self,
|
||||
crawler: AsyncWebCrawler, # noqa: F821
|
||||
urls: List[str],
|
||||
config: Union[CrawlerRunConfig, List[CrawlerRunConfig]],
|
||||
) -> List[CrawlerTaskResult]:
|
||||
self.crawler = crawler
|
||||
if self.monitor:
|
||||
self.monitor.start()
|
||||
|
||||
try:
|
||||
semaphore = asyncio.Semaphore(self.semaphore_count)
|
||||
tasks = []
|
||||
|
||||
for url in urls:
|
||||
task_id = str(uuid.uuid4())
|
||||
if self.monitor:
|
||||
self.monitor.add_task(task_id, url)
|
||||
task = asyncio.create_task(
|
||||
self.crawl_url(url, config, task_id, semaphore)
|
||||
)
|
||||
tasks.append(task)
|
||||
|
||||
return await asyncio.gather(*tasks, return_exceptions=True)
|
||||
finally:
|
||||
if self.monitor:
|
||||
self.monitor.stop()
|
||||
@@ -1,42 +1,113 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from enum import Enum
|
||||
from typing import Optional, Dict, Any, Union
|
||||
from colorama import Fore, Back, Style, init
|
||||
import time
|
||||
from typing import Optional, Dict, Any, List
|
||||
import os
|
||||
from datetime import datetime
|
||||
from urllib.parse import unquote
|
||||
from rich.console import Console
|
||||
from rich.text import Text
|
||||
from .utils import create_box_message
|
||||
|
||||
|
||||
class LogLevel(Enum):
|
||||
DEFAULT = 0
|
||||
DEBUG = 1
|
||||
INFO = 2
|
||||
SUCCESS = 3
|
||||
WARNING = 4
|
||||
ERROR = 5
|
||||
CRITICAL = 6
|
||||
ALERT = 7
|
||||
NOTICE = 8
|
||||
EXCEPTION = 9
|
||||
FATAL = 10
|
||||
|
||||
|
||||
class AsyncLogger:
|
||||
def __str__(self):
|
||||
return self.name.lower()
|
||||
|
||||
class LogColor(str, Enum):
|
||||
"""Enum for log colors."""
|
||||
|
||||
DEBUG = "bright_black"
|
||||
INFO = "cyan"
|
||||
SUCCESS = "green"
|
||||
WARNING = "yellow"
|
||||
ERROR = "red"
|
||||
CYAN = "cyan"
|
||||
GREEN = "green"
|
||||
YELLOW = "yellow"
|
||||
MAGENTA = "magenta"
|
||||
DIM_MAGENTA = "dim magenta"
|
||||
RED = "red"
|
||||
|
||||
def __str__(self):
|
||||
"""Automatically convert rich color to string."""
|
||||
return self.value
|
||||
|
||||
|
||||
class AsyncLoggerBase(ABC):
|
||||
@abstractmethod
|
||||
def debug(self, message: str, tag: str = "DEBUG", **kwargs):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def info(self, message: str, tag: str = "INFO", **kwargs):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def success(self, message: str, tag: str = "SUCCESS", **kwargs):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def warning(self, message: str, tag: str = "WARNING", **kwargs):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def error(self, message: str, tag: str = "ERROR", **kwargs):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def url_status(self, url: str, success: bool, timing: float, tag: str = "FETCH", url_length: int = 100):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def error_status(self, url: str, error: str, tag: str = "ERROR", url_length: int = 100):
|
||||
pass
|
||||
|
||||
|
||||
class AsyncLogger(AsyncLoggerBase):
|
||||
"""
|
||||
Asynchronous logger with support for colored console output and file logging.
|
||||
Supports templated messages with colored components.
|
||||
"""
|
||||
|
||||
|
||||
DEFAULT_ICONS = {
|
||||
'INIT': '→',
|
||||
'READY': '✓',
|
||||
'FETCH': '↓',
|
||||
'SCRAPE': '◆',
|
||||
'EXTRACT': '■',
|
||||
'COMPLETE': '●',
|
||||
'ERROR': '×',
|
||||
'DEBUG': '⋯',
|
||||
'INFO': 'ℹ',
|
||||
'WARNING': '⚠',
|
||||
"INIT": "→",
|
||||
"READY": "✓",
|
||||
"FETCH": "↓",
|
||||
"SCRAPE": "◆",
|
||||
"EXTRACT": "■",
|
||||
"COMPLETE": "●",
|
||||
"ERROR": "×",
|
||||
"DEBUG": "⋯",
|
||||
"INFO": "ℹ",
|
||||
"WARNING": "⚠",
|
||||
"SUCCESS": "✔",
|
||||
"CRITICAL": "‼",
|
||||
"ALERT": "⚡",
|
||||
"NOTICE": "ℹ",
|
||||
"EXCEPTION": "❗",
|
||||
"FATAL": "☠",
|
||||
"DEFAULT": "•",
|
||||
}
|
||||
|
||||
DEFAULT_COLORS = {
|
||||
LogLevel.DEBUG: Fore.LIGHTBLACK_EX,
|
||||
LogLevel.INFO: Fore.CYAN,
|
||||
LogLevel.SUCCESS: Fore.GREEN,
|
||||
LogLevel.WARNING: Fore.YELLOW,
|
||||
LogLevel.ERROR: Fore.RED,
|
||||
LogLevel.DEBUG: LogColor.DEBUG,
|
||||
LogLevel.INFO: LogColor.INFO,
|
||||
LogLevel.SUCCESS: LogColor.SUCCESS,
|
||||
LogLevel.WARNING: LogColor.WARNING,
|
||||
LogLevel.ERROR: LogColor.ERROR,
|
||||
}
|
||||
|
||||
def __init__(
|
||||
@@ -45,12 +116,12 @@ class AsyncLogger:
|
||||
log_level: LogLevel = LogLevel.DEBUG,
|
||||
tag_width: int = 10,
|
||||
icons: Optional[Dict[str, str]] = None,
|
||||
colors: Optional[Dict[LogLevel, str]] = None,
|
||||
verbose: bool = True
|
||||
colors: Optional[Dict[LogLevel, LogColor]] = None,
|
||||
verbose: bool = True,
|
||||
):
|
||||
"""
|
||||
Initialize the logger.
|
||||
|
||||
|
||||
Args:
|
||||
log_file: Optional file path for logging
|
||||
log_level: Minimum log level to display
|
||||
@@ -59,14 +130,14 @@ class AsyncLogger:
|
||||
colors: Custom colors for different log levels
|
||||
verbose: Whether to output to console
|
||||
"""
|
||||
init() # Initialize colorama
|
||||
self.log_file = log_file
|
||||
self.log_level = log_level
|
||||
self.tag_width = tag_width
|
||||
self.icons = icons or self.DEFAULT_ICONS
|
||||
self.colors = colors or self.DEFAULT_COLORS
|
||||
self.verbose = verbose
|
||||
|
||||
self.console = Console()
|
||||
|
||||
# Create log file directory if needed
|
||||
if log_file:
|
||||
os.makedirs(os.path.dirname(os.path.abspath(log_file)), exist_ok=True)
|
||||
@@ -77,19 +148,24 @@ class AsyncLogger:
|
||||
|
||||
def _get_icon(self, tag: str) -> str:
|
||||
"""Get the icon for a tag, defaulting to info icon if not found."""
|
||||
return self.icons.get(tag, self.icons['INFO'])
|
||||
return self.icons.get(tag, self.icons["INFO"])
|
||||
|
||||
def _shorten(self, text, length, placeholder="..."):
|
||||
"""Truncate text in the middle if longer than length, or pad if shorter."""
|
||||
if len(text) <= length:
|
||||
return text.ljust(length) # Pad with spaces to reach desired length
|
||||
half = (length - len(placeholder)) // 2
|
||||
shortened = text[:half] + placeholder + text[-half:]
|
||||
return shortened.ljust(length) # Also pad shortened text to consistent length
|
||||
|
||||
def _write_to_file(self, message: str):
|
||||
"""Write a message to the log file if configured."""
|
||||
if self.log_file:
|
||||
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
|
||||
with open(self.log_file, 'a', encoding='utf-8') as f:
|
||||
# Strip ANSI color codes for file output
|
||||
clean_message = message.replace(Fore.RESET, '').replace(Style.RESET_ALL, '')
|
||||
for color in vars(Fore).values():
|
||||
if isinstance(color, str):
|
||||
clean_message = clean_message.replace(color, '')
|
||||
f.write(f"[{timestamp}] {clean_message}\n")
|
||||
text = Text.from_markup(message)
|
||||
plain_text = text.plain
|
||||
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
|
||||
with open(self.log_file, "a", encoding="utf-8") as f:
|
||||
f.write(f"[{timestamp}] {plain_text}\n")
|
||||
|
||||
def _log(
|
||||
self,
|
||||
@@ -97,54 +173,58 @@ class AsyncLogger:
|
||||
message: str,
|
||||
tag: str,
|
||||
params: Optional[Dict[str, Any]] = None,
|
||||
colors: Optional[Dict[str, str]] = None,
|
||||
base_color: Optional[str] = None,
|
||||
**kwargs
|
||||
colors: Optional[Dict[str, LogColor]] = None,
|
||||
boxes: Optional[List[str]] = None,
|
||||
base_color: Optional[LogColor] = None,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Core logging method that handles message formatting and output.
|
||||
|
||||
|
||||
Args:
|
||||
level: Log level for this message
|
||||
message: Message template string
|
||||
tag: Tag for the message
|
||||
params: Parameters to format into the message
|
||||
colors: Color overrides for specific parameters
|
||||
boxes: Box overrides for specific parameters
|
||||
base_color: Base color for the entire message
|
||||
"""
|
||||
if level.value < self.log_level.value:
|
||||
return
|
||||
|
||||
# Format the message with parameters if provided
|
||||
# avoid conflict with rich formatting
|
||||
parsed_message = message.replace("[", "[[").replace("]", "]]")
|
||||
if params:
|
||||
try:
|
||||
# First format the message with raw parameters
|
||||
formatted_message = message.format(**params)
|
||||
|
||||
# Then apply colors if specified
|
||||
if colors:
|
||||
for key, color in colors.items():
|
||||
# Find the formatted value in the message and wrap it with color
|
||||
if key in params:
|
||||
value_str = str(params[key])
|
||||
formatted_message = formatted_message.replace(
|
||||
value_str,
|
||||
f"{color}{value_str}{Style.RESET_ALL}"
|
||||
)
|
||||
|
||||
except KeyError as e:
|
||||
formatted_message = f"LOGGING ERROR: Missing parameter {e} in message template"
|
||||
level = LogLevel.ERROR
|
||||
# FIXME: If there are formatting strings in floating point format,
|
||||
# this may result in colors and boxes not being applied properly.
|
||||
# such as {value:.2f}, the value is 0.23333 format it to 0.23,
|
||||
# but we replace("0.23333", "[color]0.23333[/color]")
|
||||
formatted_message = parsed_message.format(**params)
|
||||
for key, value in params.items():
|
||||
# value_str may discard `[` and `]`, so we need to replace it.
|
||||
value_str = str(value).replace("[", "[[").replace("]", "]]")
|
||||
# check is need apply color
|
||||
if colors and key in colors:
|
||||
color_str = f"[{colors[key]}]{value_str}[/{colors[key]}]"
|
||||
formatted_message = formatted_message.replace(value_str, color_str)
|
||||
value_str = color_str
|
||||
|
||||
# check is need apply box
|
||||
if boxes and key in boxes:
|
||||
formatted_message = formatted_message.replace(value_str,
|
||||
create_box_message(value_str, type=str(level)))
|
||||
|
||||
else:
|
||||
formatted_message = message
|
||||
formatted_message = parsed_message
|
||||
|
||||
# Construct the full log line
|
||||
color = base_color or self.colors[level]
|
||||
log_line = f"{color}{self._format_tag(tag)} {self._get_icon(tag)} {formatted_message}{Style.RESET_ALL}"
|
||||
color: LogColor = base_color or self.colors[level]
|
||||
log_line = f"[{color}]{self._format_tag(tag)} {self._get_icon(tag)} {formatted_message} [/{color}]"
|
||||
|
||||
# Output to console if verbose
|
||||
if self.verbose or kwargs.get("force_verbose", False):
|
||||
print(log_line)
|
||||
self.console.print(log_line)
|
||||
|
||||
# Write to file if configured
|
||||
self._write_to_file(log_line)
|
||||
@@ -164,6 +244,22 @@ class AsyncLogger:
|
||||
def warning(self, message: str, tag: str = "WARNING", **kwargs):
|
||||
"""Log a warning message."""
|
||||
self._log(LogLevel.WARNING, message, tag, **kwargs)
|
||||
|
||||
def critical(self, message: str, tag: str = "CRITICAL", **kwargs):
|
||||
"""Log a critical message."""
|
||||
self._log(LogLevel.ERROR, message, tag, **kwargs)
|
||||
def exception(self, message: str, tag: str = "EXCEPTION", **kwargs):
|
||||
"""Log an exception message."""
|
||||
self._log(LogLevel.ERROR, message, tag, **kwargs)
|
||||
def fatal(self, message: str, tag: str = "FATAL", **kwargs):
|
||||
"""Log a fatal message."""
|
||||
self._log(LogLevel.ERROR, message, tag, **kwargs)
|
||||
def alert(self, message: str, tag: str = "ALERT", **kwargs):
|
||||
"""Log an alert message."""
|
||||
self._log(LogLevel.ERROR, message, tag, **kwargs)
|
||||
def notice(self, message: str, tag: str = "NOTICE", **kwargs):
|
||||
"""Log a notice message."""
|
||||
self._log(LogLevel.INFO, message, tag, **kwargs)
|
||||
|
||||
def error(self, message: str, tag: str = "ERROR", **kwargs):
|
||||
"""Log an error message."""
|
||||
@@ -175,11 +271,11 @@ class AsyncLogger:
|
||||
success: bool,
|
||||
timing: float,
|
||||
tag: str = "FETCH",
|
||||
url_length: int = 50
|
||||
url_length: int = 100,
|
||||
):
|
||||
"""
|
||||
Convenience method for logging URL fetch status.
|
||||
|
||||
|
||||
Args:
|
||||
url: The URL being processed
|
||||
success: Whether the operation was successful
|
||||
@@ -187,45 +283,92 @@ class AsyncLogger:
|
||||
tag: Tag for the message
|
||||
url_length: Maximum length for URL in log
|
||||
"""
|
||||
decoded_url = unquote(url)
|
||||
readable_url = self._shorten(decoded_url, url_length)
|
||||
self._log(
|
||||
level=LogLevel.SUCCESS if success else LogLevel.ERROR,
|
||||
message="{url:.{url_length}}... | Status: {status} | Time: {timing:.2f}s",
|
||||
message="{url} | {status} | ⏱: {timing:.2f}s",
|
||||
tag=tag,
|
||||
params={
|
||||
"url": url,
|
||||
"url_length": url_length,
|
||||
"status": success,
|
||||
"timing": timing
|
||||
"url": readable_url,
|
||||
"status": "✓" if success else "✗",
|
||||
"timing": timing,
|
||||
},
|
||||
colors={
|
||||
"status": Fore.GREEN if success else Fore.RED,
|
||||
"timing": Fore.YELLOW
|
||||
}
|
||||
"status": LogColor.SUCCESS if success else LogColor.ERROR,
|
||||
"timing": LogColor.WARNING,
|
||||
},
|
||||
)
|
||||
|
||||
def error_status(
|
||||
self,
|
||||
url: str,
|
||||
error: str,
|
||||
tag: str = "ERROR",
|
||||
url_length: int = 50
|
||||
self, url: str, error: str, tag: str = "ERROR", url_length: int = 50
|
||||
):
|
||||
"""
|
||||
Convenience method for logging error status.
|
||||
|
||||
|
||||
Args:
|
||||
url: The URL being processed
|
||||
error: Error message
|
||||
tag: Tag for the message
|
||||
url_length: Maximum length for URL in log
|
||||
"""
|
||||
decoded_url = unquote(url)
|
||||
readable_url = self._shorten(decoded_url, url_length)
|
||||
self._log(
|
||||
level=LogLevel.ERROR,
|
||||
message="{url:.{url_length}}... | Error: {error}",
|
||||
message="{url} | Error: {error}",
|
||||
tag=tag,
|
||||
params={
|
||||
"url": url,
|
||||
"url_length": url_length,
|
||||
"error": error
|
||||
}
|
||||
)
|
||||
params={"url": readable_url, "error": error},
|
||||
)
|
||||
|
||||
class AsyncFileLogger(AsyncLoggerBase):
|
||||
"""
|
||||
File-only asynchronous logger that writes logs to a specified file.
|
||||
"""
|
||||
|
||||
def __init__(self, log_file: str):
|
||||
"""
|
||||
Initialize the file logger.
|
||||
|
||||
Args:
|
||||
log_file: File path for logging
|
||||
"""
|
||||
self.log_file = log_file
|
||||
os.makedirs(os.path.dirname(os.path.abspath(log_file)), exist_ok=True)
|
||||
|
||||
def _write_to_file(self, level: str, message: str, tag: str):
|
||||
"""Write a message to the log file."""
|
||||
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
|
||||
with open(self.log_file, "a", encoding="utf-8") as f:
|
||||
f.write(f"[{timestamp}] [{level}] [{tag}] {message}\n")
|
||||
|
||||
def debug(self, message: str, tag: str = "DEBUG", **kwargs):
|
||||
"""Log a debug message to file."""
|
||||
self._write_to_file("DEBUG", message, tag)
|
||||
|
||||
def info(self, message: str, tag: str = "INFO", **kwargs):
|
||||
"""Log an info message to file."""
|
||||
self._write_to_file("INFO", message, tag)
|
||||
|
||||
def success(self, message: str, tag: str = "SUCCESS", **kwargs):
|
||||
"""Log a success message to file."""
|
||||
self._write_to_file("SUCCESS", message, tag)
|
||||
|
||||
def warning(self, message: str, tag: str = "WARNING", **kwargs):
|
||||
"""Log a warning message to file."""
|
||||
self._write_to_file("WARNING", message, tag)
|
||||
|
||||
def error(self, message: str, tag: str = "ERROR", **kwargs):
|
||||
"""Log an error message to file."""
|
||||
self._write_to_file("ERROR", message, tag)
|
||||
|
||||
def url_status(self, url: str, success: bool, timing: float, tag: str = "FETCH", url_length: int = 100):
|
||||
"""Log URL fetch status to file."""
|
||||
status = "SUCCESS" if success else "FAILED"
|
||||
message = f"{url[:url_length]}... | Status: {status} | Time: {timing:.2f}s"
|
||||
self._write_to_file("URL_STATUS", message, tag)
|
||||
|
||||
def error_status(self, url: str, error: str, tag: str = "ERROR", url_length: int = 100):
|
||||
"""Log error status to file."""
|
||||
message = f"{url[:url_length]}... | Error: {error}"
|
||||
self._write_to_file("ERROR", message, tag)
|
||||
|
||||
1471
crawl4ai/async_url_seeder.py
Normal file
1471
crawl4ai/async_url_seeder.py
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
421
crawl4ai/browser_adapter.py
Normal file
421
crawl4ai/browser_adapter.py
Normal file
@@ -0,0 +1,421 @@
|
||||
# browser_adapter.py
|
||||
"""
|
||||
Browser adapter for Crawl4AI to support both Playwright and undetected browsers
|
||||
with minimal changes to existing codebase.
|
||||
"""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import List, Dict, Any, Optional, Callable
|
||||
import time
|
||||
import json
|
||||
|
||||
# Import both, but use conditionally
|
||||
try:
|
||||
from playwright.async_api import Page
|
||||
except ImportError:
|
||||
Page = Any
|
||||
|
||||
try:
|
||||
from patchright.async_api import Page as UndetectedPage
|
||||
except ImportError:
|
||||
UndetectedPage = Any
|
||||
|
||||
|
||||
class BrowserAdapter(ABC):
|
||||
"""Abstract adapter for browser-specific operations"""
|
||||
|
||||
@abstractmethod
|
||||
async def evaluate(self, page: Page, expression: str, arg: Any = None) -> Any:
|
||||
"""Execute JavaScript in the page"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def setup_console_capture(self, page: Page, captured_console: List[Dict]) -> Optional[Callable]:
|
||||
"""Setup console message capturing, returns handler function if needed"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def setup_error_capture(self, page: Page, captured_console: List[Dict]) -> Optional[Callable]:
|
||||
"""Setup error capturing, returns handler function if needed"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def retrieve_console_messages(self, page: Page) -> List[Dict]:
|
||||
"""Retrieve captured console messages (for undetected browsers)"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def cleanup_console_capture(self, page: Page, handle_console: Optional[Callable], handle_error: Optional[Callable]):
|
||||
"""Clean up console event listeners"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_imports(self) -> tuple:
|
||||
"""Get the appropriate imports for this adapter"""
|
||||
pass
|
||||
|
||||
|
||||
class PlaywrightAdapter(BrowserAdapter):
|
||||
"""Adapter for standard Playwright"""
|
||||
|
||||
async def evaluate(self, page: Page, expression: str, arg: Any = None) -> Any:
|
||||
"""Standard Playwright evaluate"""
|
||||
if arg is not None:
|
||||
return await page.evaluate(expression, arg)
|
||||
return await page.evaluate(expression)
|
||||
|
||||
async def setup_console_capture(self, page: Page, captured_console: List[Dict]) -> Optional[Callable]:
|
||||
"""Setup console capture using Playwright's event system"""
|
||||
def handle_console_capture(msg):
|
||||
try:
|
||||
message_type = "unknown"
|
||||
try:
|
||||
message_type = msg.type
|
||||
except:
|
||||
pass
|
||||
|
||||
message_text = "unknown"
|
||||
try:
|
||||
message_text = msg.text
|
||||
except:
|
||||
pass
|
||||
|
||||
entry = {
|
||||
"type": message_type,
|
||||
"text": message_text,
|
||||
"timestamp": time.time()
|
||||
}
|
||||
|
||||
captured_console.append(entry)
|
||||
|
||||
except Exception as e:
|
||||
captured_console.append({
|
||||
"type": "console_capture_error",
|
||||
"error": str(e),
|
||||
"timestamp": time.time()
|
||||
})
|
||||
|
||||
page.on("console", handle_console_capture)
|
||||
return handle_console_capture
|
||||
|
||||
async def setup_error_capture(self, page: Page, captured_console: List[Dict]) -> Optional[Callable]:
|
||||
"""Setup error capture using Playwright's event system"""
|
||||
def handle_pageerror_capture(err):
|
||||
try:
|
||||
error_message = "Unknown error"
|
||||
try:
|
||||
error_message = err.message
|
||||
except:
|
||||
pass
|
||||
|
||||
error_stack = ""
|
||||
try:
|
||||
error_stack = err.stack
|
||||
except:
|
||||
pass
|
||||
|
||||
captured_console.append({
|
||||
"type": "error",
|
||||
"text": error_message,
|
||||
"stack": error_stack,
|
||||
"timestamp": time.time()
|
||||
})
|
||||
except Exception as e:
|
||||
captured_console.append({
|
||||
"type": "pageerror_capture_error",
|
||||
"error": str(e),
|
||||
"timestamp": time.time()
|
||||
})
|
||||
|
||||
page.on("pageerror", handle_pageerror_capture)
|
||||
return handle_pageerror_capture
|
||||
|
||||
async def retrieve_console_messages(self, page: Page) -> List[Dict]:
|
||||
"""Not needed for Playwright - messages are captured via events"""
|
||||
return []
|
||||
|
||||
async def cleanup_console_capture(self, page: Page, handle_console: Optional[Callable], handle_error: Optional[Callable]):
|
||||
"""Remove event listeners"""
|
||||
if handle_console:
|
||||
page.remove_listener("console", handle_console)
|
||||
if handle_error:
|
||||
page.remove_listener("pageerror", handle_error)
|
||||
|
||||
def get_imports(self) -> tuple:
|
||||
"""Return Playwright imports"""
|
||||
from playwright.async_api import Page, Error
|
||||
from playwright.async_api import TimeoutError as PlaywrightTimeoutError
|
||||
return Page, Error, PlaywrightTimeoutError
|
||||
|
||||
|
||||
class StealthAdapter(BrowserAdapter):
|
||||
"""Adapter for Playwright with stealth features using playwright_stealth"""
|
||||
|
||||
def __init__(self):
|
||||
self._console_script_injected = {}
|
||||
self._stealth_available = self._check_stealth_availability()
|
||||
|
||||
def _check_stealth_availability(self) -> bool:
|
||||
"""Check if playwright_stealth is available and get the correct function"""
|
||||
try:
|
||||
from playwright_stealth import stealth_async
|
||||
self._stealth_function = stealth_async
|
||||
return True
|
||||
except ImportError:
|
||||
try:
|
||||
from playwright_stealth import stealth_sync
|
||||
self._stealth_function = stealth_sync
|
||||
return True
|
||||
except ImportError:
|
||||
self._stealth_function = None
|
||||
return False
|
||||
|
||||
async def apply_stealth(self, page: Page):
|
||||
"""Apply stealth to a page if available"""
|
||||
if self._stealth_available and self._stealth_function:
|
||||
try:
|
||||
if hasattr(self._stealth_function, '__call__'):
|
||||
if 'async' in getattr(self._stealth_function, '__name__', ''):
|
||||
await self._stealth_function(page)
|
||||
else:
|
||||
self._stealth_function(page)
|
||||
except Exception as e:
|
||||
# Fail silently or log error depending on requirements
|
||||
pass
|
||||
|
||||
async def evaluate(self, page: Page, expression: str, arg: Any = None) -> Any:
|
||||
"""Standard Playwright evaluate with stealth applied"""
|
||||
if arg is not None:
|
||||
return await page.evaluate(expression, arg)
|
||||
return await page.evaluate(expression)
|
||||
|
||||
async def setup_console_capture(self, page: Page, captured_console: List[Dict]) -> Optional[Callable]:
|
||||
"""Setup console capture using Playwright's event system with stealth"""
|
||||
# Apply stealth to the page first
|
||||
await self.apply_stealth(page)
|
||||
|
||||
def handle_console_capture(msg):
|
||||
try:
|
||||
message_type = "unknown"
|
||||
try:
|
||||
message_type = msg.type
|
||||
except:
|
||||
pass
|
||||
|
||||
message_text = "unknown"
|
||||
try:
|
||||
message_text = msg.text
|
||||
except:
|
||||
pass
|
||||
|
||||
entry = {
|
||||
"type": message_type,
|
||||
"text": message_text,
|
||||
"timestamp": time.time()
|
||||
}
|
||||
|
||||
captured_console.append(entry)
|
||||
|
||||
except Exception as e:
|
||||
captured_console.append({
|
||||
"type": "console_capture_error",
|
||||
"error": str(e),
|
||||
"timestamp": time.time()
|
||||
})
|
||||
|
||||
page.on("console", handle_console_capture)
|
||||
return handle_console_capture
|
||||
|
||||
async def setup_error_capture(self, page: Page, captured_console: List[Dict]) -> Optional[Callable]:
|
||||
"""Setup error capture using Playwright's event system"""
|
||||
def handle_pageerror_capture(err):
|
||||
try:
|
||||
error_message = "Unknown error"
|
||||
try:
|
||||
error_message = err.message
|
||||
except:
|
||||
pass
|
||||
|
||||
error_stack = ""
|
||||
try:
|
||||
error_stack = err.stack
|
||||
except:
|
||||
pass
|
||||
|
||||
captured_console.append({
|
||||
"type": "error",
|
||||
"text": error_message,
|
||||
"stack": error_stack,
|
||||
"timestamp": time.time()
|
||||
})
|
||||
except Exception as e:
|
||||
captured_console.append({
|
||||
"type": "pageerror_capture_error",
|
||||
"error": str(e),
|
||||
"timestamp": time.time()
|
||||
})
|
||||
|
||||
page.on("pageerror", handle_pageerror_capture)
|
||||
return handle_pageerror_capture
|
||||
|
||||
async def retrieve_console_messages(self, page: Page) -> List[Dict]:
|
||||
"""Not needed for Playwright - messages are captured via events"""
|
||||
return []
|
||||
|
||||
async def cleanup_console_capture(self, page: Page, handle_console: Optional[Callable], handle_error: Optional[Callable]):
|
||||
"""Remove event listeners"""
|
||||
if handle_console:
|
||||
page.remove_listener("console", handle_console)
|
||||
if handle_error:
|
||||
page.remove_listener("pageerror", handle_error)
|
||||
|
||||
def get_imports(self) -> tuple:
|
||||
"""Return Playwright imports"""
|
||||
from playwright.async_api import Page, Error
|
||||
from playwright.async_api import TimeoutError as PlaywrightTimeoutError
|
||||
return Page, Error, PlaywrightTimeoutError
|
||||
|
||||
|
||||
class UndetectedAdapter(BrowserAdapter):
|
||||
"""Adapter for undetected browser automation with stealth features"""
|
||||
|
||||
def __init__(self):
|
||||
self._console_script_injected = {}
|
||||
|
||||
async def evaluate(self, page: UndetectedPage, expression: str, arg: Any = None) -> Any:
|
||||
"""Undetected browser evaluate with isolated context"""
|
||||
# For most evaluations, use isolated context for stealth
|
||||
# Only use non-isolated when we need to access our injected console capture
|
||||
isolated = not (
|
||||
"__console" in expression or
|
||||
"__captured" in expression or
|
||||
"__error" in expression or
|
||||
"window.__" in expression
|
||||
)
|
||||
|
||||
if arg is not None:
|
||||
return await page.evaluate(expression, arg, isolated_context=isolated)
|
||||
return await page.evaluate(expression, isolated_context=isolated)
|
||||
|
||||
async def setup_console_capture(self, page: UndetectedPage, captured_console: List[Dict]) -> Optional[Callable]:
|
||||
"""Setup console capture using JavaScript injection for undetected browsers"""
|
||||
if not self._console_script_injected.get(page, False):
|
||||
await page.add_init_script("""
|
||||
// Initialize console capture
|
||||
window.__capturedConsole = [];
|
||||
window.__capturedErrors = [];
|
||||
|
||||
// Store original console methods
|
||||
const originalConsole = {};
|
||||
['log', 'info', 'warn', 'error', 'debug'].forEach(method => {
|
||||
originalConsole[method] = console[method];
|
||||
console[method] = function(...args) {
|
||||
try {
|
||||
window.__capturedConsole.push({
|
||||
type: method,
|
||||
text: args.map(arg => {
|
||||
try {
|
||||
if (typeof arg === 'object') {
|
||||
return JSON.stringify(arg);
|
||||
}
|
||||
return String(arg);
|
||||
} catch (e) {
|
||||
return '[Object]';
|
||||
}
|
||||
}).join(' '),
|
||||
timestamp: Date.now()
|
||||
});
|
||||
} catch (e) {
|
||||
// Fail silently to avoid detection
|
||||
}
|
||||
|
||||
// Call original method
|
||||
originalConsole[method].apply(console, args);
|
||||
};
|
||||
});
|
||||
""")
|
||||
self._console_script_injected[page] = True
|
||||
|
||||
return None # No handler function needed for undetected browser
|
||||
|
||||
async def setup_error_capture(self, page: UndetectedPage, captured_console: List[Dict]) -> Optional[Callable]:
|
||||
"""Setup error capture using JavaScript injection for undetected browsers"""
|
||||
if not self._console_script_injected.get(page, False):
|
||||
await page.add_init_script("""
|
||||
// Capture errors
|
||||
window.addEventListener('error', (event) => {
|
||||
try {
|
||||
window.__capturedErrors.push({
|
||||
type: 'error',
|
||||
text: event.message,
|
||||
stack: event.error ? event.error.stack : '',
|
||||
filename: event.filename,
|
||||
lineno: event.lineno,
|
||||
colno: event.colno,
|
||||
timestamp: Date.now()
|
||||
});
|
||||
} catch (e) {
|
||||
// Fail silently
|
||||
}
|
||||
});
|
||||
|
||||
// Capture unhandled promise rejections
|
||||
window.addEventListener('unhandledrejection', (event) => {
|
||||
try {
|
||||
window.__capturedErrors.push({
|
||||
type: 'unhandledrejection',
|
||||
text: event.reason ? String(event.reason) : 'Unhandled Promise Rejection',
|
||||
stack: event.reason && event.reason.stack ? event.reason.stack : '',
|
||||
timestamp: Date.now()
|
||||
});
|
||||
} catch (e) {
|
||||
// Fail silently
|
||||
}
|
||||
});
|
||||
""")
|
||||
self._console_script_injected[page] = True
|
||||
|
||||
return None # No handler function needed for undetected browser
|
||||
|
||||
async def retrieve_console_messages(self, page: UndetectedPage) -> List[Dict]:
|
||||
"""Retrieve captured console messages and errors from the page"""
|
||||
messages = []
|
||||
|
||||
try:
|
||||
# Get console messages
|
||||
console_messages = await page.evaluate(
|
||||
"() => { const msgs = window.__capturedConsole || []; window.__capturedConsole = []; return msgs; }",
|
||||
isolated_context=False
|
||||
)
|
||||
messages.extend(console_messages)
|
||||
|
||||
# Get errors
|
||||
errors = await page.evaluate(
|
||||
"() => { const errs = window.__capturedErrors || []; window.__capturedErrors = []; return errs; }",
|
||||
isolated_context=False
|
||||
)
|
||||
messages.extend(errors)
|
||||
|
||||
# Convert timestamps from JS to Python format
|
||||
for msg in messages:
|
||||
if 'timestamp' in msg and isinstance(msg['timestamp'], (int, float)):
|
||||
msg['timestamp'] = msg['timestamp'] / 1000.0 # Convert from ms to seconds
|
||||
|
||||
except Exception:
|
||||
# If retrieval fails, return empty list
|
||||
pass
|
||||
|
||||
return messages
|
||||
|
||||
async def cleanup_console_capture(self, page: UndetectedPage, handle_console: Optional[Callable], handle_error: Optional[Callable]):
|
||||
"""Clean up for undetected browser - retrieve final messages"""
|
||||
# For undetected browser, we don't have event listeners to remove
|
||||
# but we should retrieve any final messages
|
||||
final_messages = await self.retrieve_console_messages(page)
|
||||
return final_messages
|
||||
|
||||
def get_imports(self) -> tuple:
|
||||
"""Return undetected browser imports"""
|
||||
from patchright.async_api import Page, Error
|
||||
from patchright.async_api import TimeoutError as PlaywrightTimeoutError
|
||||
return Page, Error, PlaywrightTimeoutError
|
||||
1149
crawl4ai/browser_manager.py
Normal file
1149
crawl4ai/browser_manager.py
Normal file
File diff suppressed because it is too large
Load Diff
1236
crawl4ai/browser_profiler.py
Normal file
1236
crawl4ai/browser_profiler.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -4,7 +4,7 @@ from enum import Enum
|
||||
class CacheMode(Enum):
|
||||
"""
|
||||
Defines the caching behavior for web crawling operations.
|
||||
|
||||
|
||||
Modes:
|
||||
- ENABLED: Normal caching behavior (read and write)
|
||||
- DISABLED: No caching at all
|
||||
@@ -12,6 +12,7 @@ class CacheMode(Enum):
|
||||
- WRITE_ONLY: Only write to cache, don't read
|
||||
- BYPASS: Bypass cache for this operation
|
||||
"""
|
||||
|
||||
ENABLED = "enabled"
|
||||
DISABLED = "disabled"
|
||||
READ_ONLY = "read_only"
|
||||
@@ -22,10 +23,10 @@ class CacheMode(Enum):
|
||||
class CacheContext:
|
||||
"""
|
||||
Encapsulates cache-related decisions and URL handling.
|
||||
|
||||
|
||||
This class centralizes all cache-related logic and URL type checking,
|
||||
making the caching behavior more predictable and maintainable.
|
||||
|
||||
|
||||
Attributes:
|
||||
url (str): The URL being processed.
|
||||
cache_mode (CacheMode): The cache mode for the current operation.
|
||||
@@ -36,10 +37,11 @@ class CacheContext:
|
||||
is_raw_html (bool): True if the URL is raw HTML, False otherwise.
|
||||
_url_display (str): The display name for the URL (web, local file, or raw HTML).
|
||||
"""
|
||||
|
||||
def __init__(self, url: str, cache_mode: CacheMode, always_bypass: bool = False):
|
||||
"""
|
||||
Initializes the CacheContext with the provided URL and cache mode.
|
||||
|
||||
|
||||
Args:
|
||||
url (str): The URL being processed.
|
||||
cache_mode (CacheMode): The cache mode for the current operation.
|
||||
@@ -48,42 +50,42 @@ class CacheContext:
|
||||
self.url = url
|
||||
self.cache_mode = cache_mode
|
||||
self.always_bypass = always_bypass
|
||||
self.is_cacheable = url.startswith(('http://', 'https://', 'file://'))
|
||||
self.is_web_url = url.startswith(('http://', 'https://'))
|
||||
self.is_cacheable = url.startswith(("http://", "https://", "file://"))
|
||||
self.is_web_url = url.startswith(("http://", "https://"))
|
||||
self.is_local_file = url.startswith("file://")
|
||||
self.is_raw_html = url.startswith("raw:")
|
||||
self._url_display = url if not self.is_raw_html else "Raw HTML"
|
||||
|
||||
|
||||
def should_read(self) -> bool:
|
||||
"""
|
||||
Determines if cache should be read based on context.
|
||||
|
||||
|
||||
How it works:
|
||||
1. If always_bypass is True or is_cacheable is False, return False.
|
||||
2. If cache_mode is ENABLED or READ_ONLY, return True.
|
||||
|
||||
|
||||
Returns:
|
||||
bool: True if cache should be read, False otherwise.
|
||||
"""
|
||||
if self.always_bypass or not self.is_cacheable:
|
||||
return False
|
||||
return self.cache_mode in [CacheMode.ENABLED, CacheMode.READ_ONLY]
|
||||
|
||||
|
||||
def should_write(self) -> bool:
|
||||
"""
|
||||
Determines if cache should be written based on context.
|
||||
|
||||
|
||||
How it works:
|
||||
1. If always_bypass is True or is_cacheable is False, return False.
|
||||
2. If cache_mode is ENABLED or WRITE_ONLY, return True.
|
||||
|
||||
|
||||
Returns:
|
||||
bool: True if cache should be written, False otherwise.
|
||||
"""
|
||||
if self.always_bypass or not self.is_cacheable:
|
||||
return False
|
||||
return self.cache_mode in [CacheMode.ENABLED, CacheMode.WRITE_ONLY]
|
||||
|
||||
|
||||
@property
|
||||
def display_url(self) -> str:
|
||||
"""Returns the URL in display format."""
|
||||
@@ -94,11 +96,11 @@ def _legacy_to_cache_mode(
|
||||
disable_cache: bool = False,
|
||||
bypass_cache: bool = False,
|
||||
no_cache_read: bool = False,
|
||||
no_cache_write: bool = False
|
||||
no_cache_write: bool = False,
|
||||
) -> CacheMode:
|
||||
"""
|
||||
Converts legacy cache parameters to the new CacheMode enum.
|
||||
|
||||
|
||||
This is an internal function to help transition from the old boolean flags
|
||||
to the new CacheMode system.
|
||||
"""
|
||||
|
||||
@@ -3,49 +3,52 @@ import re
|
||||
from collections import Counter
|
||||
import string
|
||||
from .model_loader import load_nltk_punkt
|
||||
from .utils import *
|
||||
|
||||
# Define the abstract base class for chunking strategies
|
||||
class ChunkingStrategy(ABC):
|
||||
"""
|
||||
Abstract base class for chunking strategies.
|
||||
"""
|
||||
|
||||
|
||||
@abstractmethod
|
||||
def chunk(self, text: str) -> list:
|
||||
"""
|
||||
Abstract method to chunk the given text.
|
||||
|
||||
|
||||
Args:
|
||||
text (str): The text to chunk.
|
||||
|
||||
|
||||
Returns:
|
||||
list: A list of chunks.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
# Create an identity chunking strategy f(x) = [x]
|
||||
class IdentityChunking(ChunkingStrategy):
|
||||
"""
|
||||
Chunking strategy that returns the input text as a single chunk.
|
||||
"""
|
||||
|
||||
def chunk(self, text: str) -> list:
|
||||
return [text]
|
||||
|
||||
|
||||
# Regex-based chunking
|
||||
class RegexChunking(ChunkingStrategy):
|
||||
"""
|
||||
Chunking strategy that splits text based on regular expression patterns.
|
||||
"""
|
||||
|
||||
def __init__(self, patterns=None, **kwargs):
|
||||
"""
|
||||
Initialize the RegexChunking object.
|
||||
|
||||
|
||||
Args:
|
||||
patterns (list): A list of regular expression patterns to split text.
|
||||
"""
|
||||
if patterns is None:
|
||||
patterns = [r'\n\n'] # Default split pattern
|
||||
patterns = [r"\n\n"] # Default split pattern
|
||||
self.patterns = patterns
|
||||
|
||||
def chunk(self, text: str) -> list:
|
||||
@@ -56,18 +59,20 @@ class RegexChunking(ChunkingStrategy):
|
||||
new_paragraphs.extend(re.split(pattern, paragraph))
|
||||
paragraphs = new_paragraphs
|
||||
return paragraphs
|
||||
|
||||
# NLP-based sentence chunking
|
||||
|
||||
|
||||
# NLP-based sentence chunking
|
||||
class NlpSentenceChunking(ChunkingStrategy):
|
||||
"""
|
||||
Chunking strategy that splits text into sentences using NLTK's sentence tokenizer.
|
||||
"""
|
||||
"""
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
"""
|
||||
Initialize the NlpSentenceChunking object.
|
||||
"""
|
||||
from crawl4ai.le.legacy.model_loader import load_nltk_punkt
|
||||
load_nltk_punkt()
|
||||
|
||||
|
||||
def chunk(self, text: str) -> list:
|
||||
# Improved regex for sentence splitting
|
||||
@@ -75,31 +80,34 @@ class NlpSentenceChunking(ChunkingStrategy):
|
||||
# r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<![A-Z][A-Z]\.)(?<![A-Za-z]\.)(?<=\.|\?|\!|\n)\s'
|
||||
# )
|
||||
# sentences = sentence_endings.split(text)
|
||||
# sens = [sent.strip() for sent in sentences if sent]
|
||||
# sens = [sent.strip() for sent in sentences if sent]
|
||||
from nltk.tokenize import sent_tokenize
|
||||
|
||||
sentences = sent_tokenize(text)
|
||||
sens = [sent.strip() for sent in sentences]
|
||||
|
||||
sens = [sent.strip() for sent in sentences]
|
||||
|
||||
return list(set(sens))
|
||||
|
||||
|
||||
|
||||
# Topic-based segmentation using TextTiling
|
||||
class TopicSegmentationChunking(ChunkingStrategy):
|
||||
"""
|
||||
Chunking strategy that segments text into topics using NLTK's TextTilingTokenizer.
|
||||
|
||||
|
||||
How it works:
|
||||
1. Segment the text into topics using TextTilingTokenizer
|
||||
2. Extract keywords for each topic segment
|
||||
"""
|
||||
|
||||
|
||||
def __init__(self, num_keywords=3, **kwargs):
|
||||
"""
|
||||
Initialize the TopicSegmentationChunking object.
|
||||
|
||||
|
||||
Args:
|
||||
num_keywords (int): The number of keywords to extract for each topic segment.
|
||||
"""
|
||||
import nltk as nl
|
||||
|
||||
self.tokenizer = nl.tokenize.TextTilingTokenizer()
|
||||
self.num_keywords = num_keywords
|
||||
|
||||
@@ -111,8 +119,14 @@ class TopicSegmentationChunking(ChunkingStrategy):
|
||||
def extract_keywords(self, text: str) -> list:
|
||||
# Tokenize and remove stopwords and punctuation
|
||||
import nltk as nl
|
||||
|
||||
tokens = nl.toknize.word_tokenize(text)
|
||||
tokens = [token.lower() for token in tokens if token not in nl.corpus.stopwords.words('english') and token not in string.punctuation]
|
||||
tokens = [
|
||||
token.lower()
|
||||
for token in tokens
|
||||
if token not in nl.corpus.stopwords.words("english")
|
||||
and token not in string.punctuation
|
||||
]
|
||||
|
||||
# Calculate frequency distribution
|
||||
freq_dist = Counter(tokens)
|
||||
@@ -123,23 +137,27 @@ class TopicSegmentationChunking(ChunkingStrategy):
|
||||
# Segment the text into topics
|
||||
segments = self.chunk(text)
|
||||
# Extract keywords for each topic segment
|
||||
segments_with_topics = [(segment, self.extract_keywords(segment)) for segment in segments]
|
||||
segments_with_topics = [
|
||||
(segment, self.extract_keywords(segment)) for segment in segments
|
||||
]
|
||||
return segments_with_topics
|
||||
|
||||
|
||||
|
||||
# Fixed-length word chunks
|
||||
class FixedLengthWordChunking(ChunkingStrategy):
|
||||
"""
|
||||
Chunking strategy that splits text into fixed-length word chunks.
|
||||
|
||||
|
||||
How it works:
|
||||
1. Split the text into words
|
||||
2. Create chunks of fixed length
|
||||
3. Return the list of chunks
|
||||
"""
|
||||
|
||||
def __init__(self, chunk_size=100, **kwargs):
|
||||
"""
|
||||
Initialize the fixed-length word chunking strategy with the given chunk size.
|
||||
|
||||
|
||||
Args:
|
||||
chunk_size (int): The size of each chunk in words.
|
||||
"""
|
||||
@@ -147,23 +165,28 @@ class FixedLengthWordChunking(ChunkingStrategy):
|
||||
|
||||
def chunk(self, text: str) -> list:
|
||||
words = text.split()
|
||||
return [' '.join(words[i:i + self.chunk_size]) for i in range(0, len(words), self.chunk_size)]
|
||||
|
||||
return [
|
||||
" ".join(words[i : i + self.chunk_size])
|
||||
for i in range(0, len(words), self.chunk_size)
|
||||
]
|
||||
|
||||
|
||||
# Sliding window chunking
|
||||
class SlidingWindowChunking(ChunkingStrategy):
|
||||
"""
|
||||
Chunking strategy that splits text into overlapping word chunks.
|
||||
|
||||
|
||||
How it works:
|
||||
1. Split the text into words
|
||||
2. Create chunks of fixed length
|
||||
3. Return the list of chunks
|
||||
"""
|
||||
|
||||
def __init__(self, window_size=100, step=50, **kwargs):
|
||||
"""
|
||||
Initialize the sliding window chunking strategy with the given window size and
|
||||
step size.
|
||||
|
||||
|
||||
Args:
|
||||
window_size (int): The size of the sliding window in words.
|
||||
step (int): The step size for sliding the window in words.
|
||||
@@ -174,35 +197,37 @@ class SlidingWindowChunking(ChunkingStrategy):
|
||||
def chunk(self, text: str) -> list:
|
||||
words = text.split()
|
||||
chunks = []
|
||||
|
||||
|
||||
if len(words) <= self.window_size:
|
||||
return [text]
|
||||
|
||||
|
||||
for i in range(0, len(words) - self.window_size + 1, self.step):
|
||||
chunk = ' '.join(words[i:i + self.window_size])
|
||||
chunk = " ".join(words[i : i + self.window_size])
|
||||
chunks.append(chunk)
|
||||
|
||||
|
||||
# Handle the last chunk if it doesn't align perfectly
|
||||
if i + self.window_size < len(words):
|
||||
chunks.append(' '.join(words[-self.window_size:]))
|
||||
|
||||
chunks.append(" ".join(words[-self.window_size :]))
|
||||
|
||||
return chunks
|
||||
|
||||
|
||||
|
||||
class OverlappingWindowChunking(ChunkingStrategy):
|
||||
"""
|
||||
Chunking strategy that splits text into overlapping word chunks.
|
||||
|
||||
|
||||
How it works:
|
||||
1. Split the text into words using whitespace
|
||||
2. Create chunks of fixed length equal to the window size
|
||||
3. Slide the window by the overlap size
|
||||
4. Return the list of chunks
|
||||
"""
|
||||
|
||||
def __init__(self, window_size=1000, overlap=100, **kwargs):
|
||||
"""
|
||||
Initialize the overlapping window chunking strategy with the given window size and
|
||||
overlap size.
|
||||
|
||||
|
||||
Args:
|
||||
window_size (int): The size of the window in words.
|
||||
overlap (int): The size of the overlap between consecutive chunks in words.
|
||||
@@ -213,19 +238,19 @@ class OverlappingWindowChunking(ChunkingStrategy):
|
||||
def chunk(self, text: str) -> list:
|
||||
words = text.split()
|
||||
chunks = []
|
||||
|
||||
|
||||
if len(words) <= self.window_size:
|
||||
return [text]
|
||||
|
||||
|
||||
start = 0
|
||||
while start < len(words):
|
||||
end = start + self.window_size
|
||||
chunk = ' '.join(words[start:end])
|
||||
chunk = " ".join(words[start:end])
|
||||
chunks.append(chunk)
|
||||
|
||||
|
||||
if end >= len(words):
|
||||
break
|
||||
|
||||
|
||||
start = end - self.overlap
|
||||
|
||||
return chunks
|
||||
|
||||
return chunks
|
||||
|
||||
1557
crawl4ai/cli.py
1557
crawl4ai/cli.py
File diff suppressed because it is too large
Load Diff
837
crawl4ai/components/crawler_monitor.py
Normal file
837
crawl4ai/components/crawler_monitor.py
Normal file
@@ -0,0 +1,837 @@
|
||||
import time
|
||||
import uuid
|
||||
import threading
|
||||
import psutil
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Dict, Optional, List
|
||||
import threading
|
||||
from rich.console import Console
|
||||
from rich.layout import Layout
|
||||
from rich.panel import Panel
|
||||
from rich.table import Table
|
||||
from rich.text import Text
|
||||
from rich.live import Live
|
||||
from rich import box
|
||||
from ..models import CrawlStatus
|
||||
|
||||
class TerminalUI:
|
||||
"""Terminal user interface for CrawlerMonitor using rich library."""
|
||||
|
||||
def __init__(self, refresh_rate: float = 1.0, max_width: int = 120):
|
||||
"""
|
||||
Initialize the terminal UI.
|
||||
|
||||
Args:
|
||||
refresh_rate: How often to refresh the UI (in seconds)
|
||||
max_width: Maximum width of the UI in characters
|
||||
"""
|
||||
self.console = Console(width=max_width)
|
||||
self.layout = Layout()
|
||||
self.refresh_rate = refresh_rate
|
||||
self.stop_event = threading.Event()
|
||||
self.ui_thread = None
|
||||
self.monitor = None # Will be set by CrawlerMonitor
|
||||
self.max_width = max_width
|
||||
|
||||
# Setup layout - vertical layout (top to bottom)
|
||||
self.layout.split(
|
||||
Layout(name="header", size=3),
|
||||
Layout(name="pipeline_status", size=10),
|
||||
Layout(name="task_details", ratio=1),
|
||||
Layout(name="footer", size=3) # Increased footer size to fit all content
|
||||
)
|
||||
|
||||
def start(self, monitor):
|
||||
"""Start the UI thread."""
|
||||
self.monitor = monitor
|
||||
self.stop_event.clear()
|
||||
self.ui_thread = threading.Thread(target=self._ui_loop)
|
||||
self.ui_thread.daemon = True
|
||||
self.ui_thread.start()
|
||||
|
||||
def stop(self):
|
||||
"""Stop the UI thread."""
|
||||
if self.ui_thread and self.ui_thread.is_alive():
|
||||
self.stop_event.set()
|
||||
# Only try to join if we're not in the UI thread
|
||||
# This prevents "cannot join current thread" errors
|
||||
if threading.current_thread() != self.ui_thread:
|
||||
self.ui_thread.join(timeout=5.0)
|
||||
|
||||
def _ui_loop(self):
|
||||
"""Main UI rendering loop."""
|
||||
import sys
|
||||
import select
|
||||
import termios
|
||||
import tty
|
||||
|
||||
# Setup terminal for non-blocking input
|
||||
old_settings = termios.tcgetattr(sys.stdin)
|
||||
try:
|
||||
tty.setcbreak(sys.stdin.fileno())
|
||||
|
||||
# Use Live display to render the UI
|
||||
with Live(self.layout, refresh_per_second=1/self.refresh_rate, screen=True) as live:
|
||||
self.live = live # Store the live display for updates
|
||||
|
||||
# Main UI loop
|
||||
while not self.stop_event.is_set():
|
||||
self._update_display()
|
||||
|
||||
# Check for key press (non-blocking)
|
||||
if select.select([sys.stdin], [], [], 0)[0]:
|
||||
key = sys.stdin.read(1)
|
||||
# Check for 'q' to quit
|
||||
if key == 'q':
|
||||
# Signal stop but don't call monitor.stop() from UI thread
|
||||
# as it would cause the thread to try to join itself
|
||||
self.stop_event.set()
|
||||
self.monitor.is_running = False
|
||||
break
|
||||
|
||||
time.sleep(self.refresh_rate)
|
||||
|
||||
# Just check if the monitor was stopped
|
||||
if not self.monitor.is_running:
|
||||
break
|
||||
finally:
|
||||
# Restore terminal settings
|
||||
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_settings)
|
||||
|
||||
def _update_display(self):
|
||||
"""Update the terminal display with current statistics."""
|
||||
if not self.monitor:
|
||||
return
|
||||
|
||||
# Update crawler status panel
|
||||
self.layout["header"].update(self._create_status_panel())
|
||||
|
||||
# Update pipeline status panel and task details panel
|
||||
self.layout["pipeline_status"].update(self._create_pipeline_panel())
|
||||
self.layout["task_details"].update(self._create_task_details_panel())
|
||||
|
||||
# Update footer
|
||||
self.layout["footer"].update(self._create_footer())
|
||||
|
||||
def _create_status_panel(self) -> Panel:
|
||||
"""Create the crawler status panel."""
|
||||
summary = self.monitor.get_summary()
|
||||
|
||||
# Format memory status with icon
|
||||
memory_status = self.monitor.get_memory_status()
|
||||
memory_icon = "🟢" # Default NORMAL
|
||||
if memory_status == "PRESSURE":
|
||||
memory_icon = "🟠"
|
||||
elif memory_status == "CRITICAL":
|
||||
memory_icon = "🔴"
|
||||
|
||||
# Get current memory usage
|
||||
current_memory = psutil.Process().memory_info().rss / (1024 * 1024) # MB
|
||||
memory_percent = (current_memory / psutil.virtual_memory().total) * 100
|
||||
|
||||
# Format runtime
|
||||
runtime = self.monitor._format_time(time.time() - self.monitor.start_time if self.monitor.start_time else 0)
|
||||
|
||||
# Create the status text
|
||||
status_text = Text()
|
||||
status_text.append(f"Web Crawler Dashboard | Runtime: {runtime} | Memory: {memory_percent:.1f}% {memory_icon}\n")
|
||||
status_text.append(f"Status: {memory_status} | URLs: {summary['urls_completed']}/{summary['urls_total']} | ")
|
||||
status_text.append(f"Peak Mem: {summary['peak_memory_percent']:.1f}% at {self.monitor._format_time(summary['peak_memory_time'])}")
|
||||
|
||||
return Panel(status_text, title="Crawler Status", border_style="blue")
|
||||
|
||||
def _create_pipeline_panel(self) -> Panel:
|
||||
"""Create the pipeline status panel."""
|
||||
summary = self.monitor.get_summary()
|
||||
queue_stats = self.monitor.get_queue_stats()
|
||||
|
||||
# Create a table for status counts
|
||||
table = Table(show_header=True, box=None)
|
||||
table.add_column("Status", style="cyan")
|
||||
table.add_column("Count", justify="right")
|
||||
table.add_column("Percentage", justify="right")
|
||||
table.add_column("Stat", style="cyan")
|
||||
table.add_column("Value", justify="right")
|
||||
|
||||
# Calculate overall progress
|
||||
progress = f"{summary['urls_completed']}/{summary['urls_total']}"
|
||||
progress_percent = f"{summary['completion_percentage']:.1f}%"
|
||||
|
||||
# Add rows for each status
|
||||
table.add_row(
|
||||
"Overall Progress",
|
||||
progress,
|
||||
progress_percent,
|
||||
"Est. Completion",
|
||||
summary.get('estimated_completion_time', "N/A")
|
||||
)
|
||||
|
||||
# Add rows for each status
|
||||
status_counts = summary['status_counts']
|
||||
total = summary['urls_total'] or 1 # Avoid division by zero
|
||||
|
||||
# Status rows
|
||||
table.add_row(
|
||||
"Completed",
|
||||
str(status_counts.get(CrawlStatus.COMPLETED.name, 0)),
|
||||
f"{status_counts.get(CrawlStatus.COMPLETED.name, 0) / total * 100:.1f}%",
|
||||
"Avg. Time/URL",
|
||||
f"{summary.get('avg_task_duration', 0):.2f}s"
|
||||
)
|
||||
|
||||
table.add_row(
|
||||
"Failed",
|
||||
str(status_counts.get(CrawlStatus.FAILED.name, 0)),
|
||||
f"{status_counts.get(CrawlStatus.FAILED.name, 0) / total * 100:.1f}%",
|
||||
"Concurrent Tasks",
|
||||
str(status_counts.get(CrawlStatus.IN_PROGRESS.name, 0))
|
||||
)
|
||||
|
||||
table.add_row(
|
||||
"In Progress",
|
||||
str(status_counts.get(CrawlStatus.IN_PROGRESS.name, 0)),
|
||||
f"{status_counts.get(CrawlStatus.IN_PROGRESS.name, 0) / total * 100:.1f}%",
|
||||
"Queue Size",
|
||||
str(queue_stats['total_queued'])
|
||||
)
|
||||
|
||||
table.add_row(
|
||||
"Queued",
|
||||
str(status_counts.get(CrawlStatus.QUEUED.name, 0)),
|
||||
f"{status_counts.get(CrawlStatus.QUEUED.name, 0) / total * 100:.1f}%",
|
||||
"Max Wait Time",
|
||||
f"{queue_stats['highest_wait_time']:.1f}s"
|
||||
)
|
||||
|
||||
# Requeued is a special case as it's not a status
|
||||
requeued_count = summary.get('requeued_count', 0)
|
||||
table.add_row(
|
||||
"Requeued",
|
||||
str(requeued_count),
|
||||
f"{summary.get('requeue_rate', 0):.1f}%",
|
||||
"Avg Wait Time",
|
||||
f"{queue_stats['avg_wait_time']:.1f}s"
|
||||
)
|
||||
|
||||
# Add empty row for spacing
|
||||
table.add_row(
|
||||
"",
|
||||
"",
|
||||
"",
|
||||
"Requeue Rate",
|
||||
f"{summary.get('requeue_rate', 0):.1f}%"
|
||||
)
|
||||
|
||||
return Panel(table, title="Pipeline Status", border_style="green")
|
||||
|
||||
def _create_task_details_panel(self) -> Panel:
|
||||
"""Create the task details panel."""
|
||||
# Create a table for task details
|
||||
table = Table(show_header=True, expand=True)
|
||||
table.add_column("Task ID", style="cyan", no_wrap=True, width=10)
|
||||
table.add_column("URL", style="blue", ratio=3)
|
||||
table.add_column("Status", style="green", width=15)
|
||||
table.add_column("Memory", justify="right", width=8)
|
||||
table.add_column("Peak", justify="right", width=8)
|
||||
table.add_column("Duration", justify="right", width=10)
|
||||
|
||||
# Get all task stats
|
||||
task_stats = self.monitor.get_all_task_stats()
|
||||
|
||||
# Add summary row
|
||||
active_tasks = sum(1 for stats in task_stats.values()
|
||||
if stats['status'] == CrawlStatus.IN_PROGRESS.name)
|
||||
|
||||
total_memory = sum(stats['memory_usage'] for stats in task_stats.values())
|
||||
total_peak = sum(stats['peak_memory'] for stats in task_stats.values())
|
||||
|
||||
# Summary row with separators
|
||||
table.add_row(
|
||||
"SUMMARY",
|
||||
f"Total: {len(task_stats)}",
|
||||
f"Active: {active_tasks}",
|
||||
f"{total_memory:.1f}",
|
||||
f"{total_peak:.1f}",
|
||||
"N/A"
|
||||
)
|
||||
|
||||
# Add a separator
|
||||
table.add_row("—" * 10, "—" * 20, "—" * 10, "—" * 8, "—" * 8, "—" * 10)
|
||||
|
||||
# Status icons
|
||||
status_icons = {
|
||||
CrawlStatus.QUEUED.name: "⏳",
|
||||
CrawlStatus.IN_PROGRESS.name: "🔄",
|
||||
CrawlStatus.COMPLETED.name: "✅",
|
||||
CrawlStatus.FAILED.name: "❌"
|
||||
}
|
||||
|
||||
# Calculate how many rows we can display based on available space
|
||||
# We can display more rows now that we have a dedicated panel
|
||||
display_count = min(len(task_stats), 20) # Display up to 20 tasks
|
||||
|
||||
# Add rows for each task
|
||||
for task_id, stats in sorted(
|
||||
list(task_stats.items())[:display_count],
|
||||
# Sort: 1. IN_PROGRESS first, 2. QUEUED, 3. COMPLETED/FAILED by recency
|
||||
key=lambda x: (
|
||||
0 if x[1]['status'] == CrawlStatus.IN_PROGRESS.name else
|
||||
1 if x[1]['status'] == CrawlStatus.QUEUED.name else
|
||||
2,
|
||||
-1 * (x[1].get('end_time', 0) or 0) # Most recent first
|
||||
)
|
||||
):
|
||||
# Truncate task_id and URL for display
|
||||
short_id = task_id[:8]
|
||||
url = stats['url']
|
||||
if len(url) > 50: # Allow longer URLs in the dedicated panel
|
||||
url = url[:47] + "..."
|
||||
|
||||
# Format status with icon
|
||||
status = f"{status_icons.get(stats['status'], '?')} {stats['status']}"
|
||||
|
||||
# Add row
|
||||
table.add_row(
|
||||
short_id,
|
||||
url,
|
||||
status,
|
||||
f"{stats['memory_usage']:.1f}",
|
||||
f"{stats['peak_memory']:.1f}",
|
||||
stats['duration'] if 'duration' in stats else "0:00"
|
||||
)
|
||||
|
||||
return Panel(table, title="Task Details", border_style="yellow")
|
||||
|
||||
def _create_footer(self) -> Panel:
|
||||
"""Create the footer panel."""
|
||||
from rich.columns import Columns
|
||||
from rich.align import Align
|
||||
|
||||
memory_status = self.monitor.get_memory_status()
|
||||
memory_icon = "🟢" # Default NORMAL
|
||||
if memory_status == "PRESSURE":
|
||||
memory_icon = "🟠"
|
||||
elif memory_status == "CRITICAL":
|
||||
memory_icon = "🔴"
|
||||
|
||||
# Left section - memory status
|
||||
left_text = Text()
|
||||
left_text.append("Memory Status: ", style="bold")
|
||||
status_style = "green" if memory_status == "NORMAL" else "yellow" if memory_status == "PRESSURE" else "red bold"
|
||||
left_text.append(f"{memory_icon} {memory_status}", style=status_style)
|
||||
|
||||
# Center section - copyright
|
||||
center_text = Text("© Crawl4AI 2025 | Made by UnclecCode", style="cyan italic")
|
||||
|
||||
# Right section - quit instruction
|
||||
right_text = Text()
|
||||
right_text.append("Press ", style="bold")
|
||||
right_text.append("q", style="white on blue")
|
||||
right_text.append(" to quit", style="bold")
|
||||
|
||||
# Create columns with the three sections
|
||||
footer_content = Columns(
|
||||
[
|
||||
Align.left(left_text),
|
||||
Align.center(center_text),
|
||||
Align.right(right_text)
|
||||
],
|
||||
expand=True
|
||||
)
|
||||
|
||||
# Create a more visible footer panel
|
||||
return Panel(
|
||||
footer_content,
|
||||
border_style="white",
|
||||
padding=(0, 1) # Add padding for better visibility
|
||||
)
|
||||
|
||||
|
||||
class CrawlerMonitor:
|
||||
"""
|
||||
Comprehensive monitoring and visualization system for tracking web crawler operations in real-time.
|
||||
Provides a terminal-based dashboard that displays task statuses, memory usage, queue statistics,
|
||||
and performance metrics.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
urls_total: int = 0,
|
||||
refresh_rate: float = 1.0,
|
||||
enable_ui: bool = True,
|
||||
max_width: int = 120
|
||||
):
|
||||
"""
|
||||
Initialize the CrawlerMonitor.
|
||||
|
||||
Args:
|
||||
urls_total: Total number of URLs to be crawled
|
||||
refresh_rate: How often to refresh the UI (in seconds)
|
||||
enable_ui: Whether to display the terminal UI
|
||||
max_width: Maximum width of the UI in characters
|
||||
"""
|
||||
# Core monitoring attributes
|
||||
self.stats = {} # Task ID -> stats dict
|
||||
self.memory_status = "NORMAL"
|
||||
self.start_time = None
|
||||
self.end_time = None
|
||||
self.is_running = False
|
||||
self.queue_stats = {
|
||||
"total_queued": 0,
|
||||
"highest_wait_time": 0.0,
|
||||
"avg_wait_time": 0.0
|
||||
}
|
||||
self.urls_total = urls_total
|
||||
self.urls_completed = 0
|
||||
self.peak_memory_percent = 0.0
|
||||
self.peak_memory_time = 0.0
|
||||
|
||||
# Status counts
|
||||
self.status_counts = {
|
||||
CrawlStatus.QUEUED.name: 0,
|
||||
CrawlStatus.IN_PROGRESS.name: 0,
|
||||
CrawlStatus.COMPLETED.name: 0,
|
||||
CrawlStatus.FAILED.name: 0
|
||||
}
|
||||
|
||||
# Requeue tracking
|
||||
self.requeued_count = 0
|
||||
|
||||
# Thread-safety
|
||||
self._lock = threading.RLock()
|
||||
|
||||
# Terminal UI
|
||||
self.enable_ui = enable_ui
|
||||
self.terminal_ui = TerminalUI(
|
||||
refresh_rate=refresh_rate,
|
||||
max_width=max_width
|
||||
) if enable_ui else None
|
||||
|
||||
def start(self):
|
||||
"""
|
||||
Start the monitoring session.
|
||||
|
||||
- Initializes the start_time
|
||||
- Sets is_running to True
|
||||
- Starts the terminal UI if enabled
|
||||
"""
|
||||
with self._lock:
|
||||
self.start_time = time.time()
|
||||
self.is_running = True
|
||||
|
||||
# Start the terminal UI
|
||||
if self.enable_ui and self.terminal_ui:
|
||||
self.terminal_ui.start(self)
|
||||
|
||||
def stop(self):
|
||||
"""
|
||||
Stop the monitoring session.
|
||||
|
||||
- Records end_time
|
||||
- Sets is_running to False
|
||||
- Stops the terminal UI
|
||||
- Generates final summary statistics
|
||||
"""
|
||||
with self._lock:
|
||||
self.end_time = time.time()
|
||||
self.is_running = False
|
||||
|
||||
# Stop the terminal UI
|
||||
if self.enable_ui and self.terminal_ui:
|
||||
self.terminal_ui.stop()
|
||||
|
||||
def add_task(self, task_id: str, url: str):
|
||||
"""
|
||||
Register a new task with the monitor.
|
||||
|
||||
Args:
|
||||
task_id: Unique identifier for the task
|
||||
url: URL being crawled
|
||||
|
||||
The task is initialized with:
|
||||
- status: QUEUED
|
||||
- url: The URL to crawl
|
||||
- enqueue_time: Current time
|
||||
- memory_usage: 0
|
||||
- peak_memory: 0
|
||||
- wait_time: 0
|
||||
- retry_count: 0
|
||||
"""
|
||||
with self._lock:
|
||||
self.stats[task_id] = {
|
||||
"task_id": task_id,
|
||||
"url": url,
|
||||
"status": CrawlStatus.QUEUED.name,
|
||||
"enqueue_time": time.time(),
|
||||
"start_time": None,
|
||||
"end_time": None,
|
||||
"memory_usage": 0.0,
|
||||
"peak_memory": 0.0,
|
||||
"error_message": "",
|
||||
"wait_time": 0.0,
|
||||
"retry_count": 0,
|
||||
"duration": "0:00",
|
||||
"counted_requeue": False
|
||||
}
|
||||
|
||||
# Update status counts
|
||||
self.status_counts[CrawlStatus.QUEUED.name] += 1
|
||||
|
||||
def update_task(
|
||||
self,
|
||||
task_id: str,
|
||||
status: Optional[CrawlStatus] = None,
|
||||
start_time: Optional[float] = None,
|
||||
end_time: Optional[float] = None,
|
||||
memory_usage: Optional[float] = None,
|
||||
peak_memory: Optional[float] = None,
|
||||
error_message: Optional[str] = None,
|
||||
retry_count: Optional[int] = None,
|
||||
wait_time: Optional[float] = None
|
||||
):
|
||||
"""
|
||||
Update statistics for a specific task.
|
||||
|
||||
Args:
|
||||
task_id: Unique identifier for the task
|
||||
status: New status (QUEUED, IN_PROGRESS, COMPLETED, FAILED)
|
||||
start_time: When task execution started
|
||||
end_time: When task execution ended
|
||||
memory_usage: Current memory usage in MB
|
||||
peak_memory: Maximum memory usage in MB
|
||||
error_message: Error description if failed
|
||||
retry_count: Number of retry attempts
|
||||
wait_time: Time spent in queue
|
||||
|
||||
Updates task statistics and updates status counts.
|
||||
If status changes, decrements old status count and
|
||||
increments new status count.
|
||||
"""
|
||||
with self._lock:
|
||||
# Check if task exists
|
||||
if task_id not in self.stats:
|
||||
return
|
||||
|
||||
task_stats = self.stats[task_id]
|
||||
|
||||
# Update status counts if status is changing
|
||||
old_status = task_stats["status"]
|
||||
if status and status.name != old_status:
|
||||
self.status_counts[old_status] -= 1
|
||||
self.status_counts[status.name] += 1
|
||||
|
||||
# Track completion
|
||||
if status == CrawlStatus.COMPLETED:
|
||||
self.urls_completed += 1
|
||||
|
||||
# Track requeues
|
||||
if old_status in [CrawlStatus.COMPLETED.name, CrawlStatus.FAILED.name] and not task_stats.get("counted_requeue", False):
|
||||
self.requeued_count += 1
|
||||
task_stats["counted_requeue"] = True
|
||||
|
||||
# Update task statistics
|
||||
if status:
|
||||
task_stats["status"] = status.name
|
||||
if start_time is not None:
|
||||
task_stats["start_time"] = start_time
|
||||
if end_time is not None:
|
||||
task_stats["end_time"] = end_time
|
||||
if memory_usage is not None:
|
||||
task_stats["memory_usage"] = memory_usage
|
||||
|
||||
# Update peak memory if necessary
|
||||
current_percent = (memory_usage / psutil.virtual_memory().total) * 100
|
||||
if current_percent > self.peak_memory_percent:
|
||||
self.peak_memory_percent = current_percent
|
||||
self.peak_memory_time = time.time()
|
||||
|
||||
if peak_memory is not None:
|
||||
task_stats["peak_memory"] = peak_memory
|
||||
if error_message is not None:
|
||||
task_stats["error_message"] = error_message
|
||||
if retry_count is not None:
|
||||
task_stats["retry_count"] = retry_count
|
||||
if wait_time is not None:
|
||||
task_stats["wait_time"] = wait_time
|
||||
|
||||
# Calculate duration
|
||||
if task_stats["start_time"]:
|
||||
end = task_stats["end_time"] or time.time()
|
||||
duration = end - task_stats["start_time"]
|
||||
task_stats["duration"] = self._format_time(duration)
|
||||
|
||||
def update_memory_status(self, status: str):
|
||||
"""
|
||||
Update the current memory status.
|
||||
|
||||
Args:
|
||||
status: Memory status (NORMAL, PRESSURE, CRITICAL, or custom)
|
||||
|
||||
Also updates the UI to reflect the new status.
|
||||
"""
|
||||
with self._lock:
|
||||
self.memory_status = status
|
||||
|
||||
def update_queue_statistics(
|
||||
self,
|
||||
total_queued: int,
|
||||
highest_wait_time: float,
|
||||
avg_wait_time: float
|
||||
):
|
||||
"""
|
||||
Update statistics related to the task queue.
|
||||
|
||||
Args:
|
||||
total_queued: Number of tasks currently in queue
|
||||
highest_wait_time: Longest wait time of any queued task
|
||||
avg_wait_time: Average wait time across all queued tasks
|
||||
"""
|
||||
with self._lock:
|
||||
self.queue_stats = {
|
||||
"total_queued": total_queued,
|
||||
"highest_wait_time": highest_wait_time,
|
||||
"avg_wait_time": avg_wait_time
|
||||
}
|
||||
|
||||
def get_task_stats(self, task_id: str) -> Dict:
|
||||
"""
|
||||
Get statistics for a specific task.
|
||||
|
||||
Args:
|
||||
task_id: Unique identifier for the task
|
||||
|
||||
Returns:
|
||||
Dictionary containing all task statistics
|
||||
"""
|
||||
with self._lock:
|
||||
return self.stats.get(task_id, {}).copy()
|
||||
|
||||
def get_all_task_stats(self) -> Dict[str, Dict]:
|
||||
"""
|
||||
Get statistics for all tasks.
|
||||
|
||||
Returns:
|
||||
Dictionary mapping task_ids to their statistics
|
||||
"""
|
||||
with self._lock:
|
||||
return self.stats.copy()
|
||||
|
||||
def get_memory_status(self) -> str:
|
||||
"""
|
||||
Get the current memory status.
|
||||
|
||||
Returns:
|
||||
Current memory status string
|
||||
"""
|
||||
with self._lock:
|
||||
return self.memory_status
|
||||
|
||||
def get_queue_stats(self) -> Dict:
|
||||
"""
|
||||
Get current queue statistics.
|
||||
|
||||
Returns:
|
||||
Dictionary with queue statistics including:
|
||||
- total_queued: Number of tasks in queue
|
||||
- highest_wait_time: Longest wait time
|
||||
- avg_wait_time: Average wait time
|
||||
"""
|
||||
with self._lock:
|
||||
return self.queue_stats.copy()
|
||||
|
||||
def get_summary(self) -> Dict:
|
||||
"""
|
||||
Get a summary of all crawler statistics.
|
||||
|
||||
Returns:
|
||||
Dictionary containing:
|
||||
- runtime: Total runtime in seconds
|
||||
- urls_total: Total URLs to process
|
||||
- urls_completed: Number of completed URLs
|
||||
- completion_percentage: Percentage complete
|
||||
- status_counts: Count of tasks in each status
|
||||
- memory_status: Current memory status
|
||||
- peak_memory_percent: Highest memory usage
|
||||
- peak_memory_time: When peak memory occurred
|
||||
- avg_task_duration: Average task processing time
|
||||
- estimated_completion_time: Projected finish time
|
||||
- requeue_rate: Percentage of tasks requeued
|
||||
"""
|
||||
with self._lock:
|
||||
# Calculate runtime
|
||||
current_time = time.time()
|
||||
runtime = current_time - (self.start_time or current_time)
|
||||
|
||||
# Calculate completion percentage
|
||||
completion_percentage = 0
|
||||
if self.urls_total > 0:
|
||||
completion_percentage = (self.urls_completed / self.urls_total) * 100
|
||||
|
||||
# Calculate average task duration for completed tasks
|
||||
completed_tasks = [
|
||||
task for task in self.stats.values()
|
||||
if task["status"] == CrawlStatus.COMPLETED.name and task.get("start_time") and task.get("end_time")
|
||||
]
|
||||
|
||||
avg_task_duration = 0
|
||||
if completed_tasks:
|
||||
total_duration = sum(task["end_time"] - task["start_time"] for task in completed_tasks)
|
||||
avg_task_duration = total_duration / len(completed_tasks)
|
||||
|
||||
# Calculate requeue rate
|
||||
requeue_rate = 0
|
||||
if len(self.stats) > 0:
|
||||
requeue_rate = (self.requeued_count / len(self.stats)) * 100
|
||||
|
||||
# Calculate estimated completion time
|
||||
estimated_completion_time = "N/A"
|
||||
if avg_task_duration > 0 and self.urls_total > 0 and self.urls_completed > 0:
|
||||
remaining_tasks = self.urls_total - self.urls_completed
|
||||
estimated_seconds = remaining_tasks * avg_task_duration
|
||||
estimated_completion_time = self._format_time(estimated_seconds)
|
||||
|
||||
return {
|
||||
"runtime": runtime,
|
||||
"urls_total": self.urls_total,
|
||||
"urls_completed": self.urls_completed,
|
||||
"completion_percentage": completion_percentage,
|
||||
"status_counts": self.status_counts.copy(),
|
||||
"memory_status": self.memory_status,
|
||||
"peak_memory_percent": self.peak_memory_percent,
|
||||
"peak_memory_time": self.peak_memory_time,
|
||||
"avg_task_duration": avg_task_duration,
|
||||
"estimated_completion_time": estimated_completion_time,
|
||||
"requeue_rate": requeue_rate,
|
||||
"requeued_count": self.requeued_count
|
||||
}
|
||||
|
||||
def render(self):
|
||||
"""
|
||||
Render the terminal UI.
|
||||
|
||||
This is the main UI rendering loop that:
|
||||
1. Updates all statistics
|
||||
2. Formats the display
|
||||
3. Renders the ASCII interface
|
||||
4. Handles keyboard input
|
||||
|
||||
Note: The actual rendering is handled by the TerminalUI class
|
||||
which uses the rich library's Live display.
|
||||
"""
|
||||
if self.enable_ui and self.terminal_ui:
|
||||
# Force an update of the UI
|
||||
if hasattr(self.terminal_ui, '_update_display'):
|
||||
self.terminal_ui._update_display()
|
||||
|
||||
def _format_time(self, seconds: float) -> str:
|
||||
"""
|
||||
Format time in hours:minutes:seconds.
|
||||
|
||||
Args:
|
||||
seconds: Time in seconds
|
||||
|
||||
Returns:
|
||||
Formatted time string (e.g., "1:23:45")
|
||||
"""
|
||||
delta = timedelta(seconds=int(seconds))
|
||||
hours, remainder = divmod(delta.seconds, 3600)
|
||||
minutes, seconds = divmod(remainder, 60)
|
||||
|
||||
if hours > 0:
|
||||
return f"{hours}:{minutes:02}:{seconds:02}"
|
||||
else:
|
||||
return f"{minutes}:{seconds:02}"
|
||||
|
||||
def _calculate_estimated_completion(self) -> str:
|
||||
"""
|
||||
Calculate estimated completion time based on current progress.
|
||||
|
||||
Returns:
|
||||
Formatted time string
|
||||
"""
|
||||
summary = self.get_summary()
|
||||
return summary.get("estimated_completion_time", "N/A")
|
||||
|
||||
|
||||
# Example code for testing
|
||||
if __name__ == "__main__":
|
||||
# Initialize the monitor
|
||||
monitor = CrawlerMonitor(urls_total=100)
|
||||
|
||||
# Start monitoring
|
||||
monitor.start()
|
||||
|
||||
try:
|
||||
# Simulate some tasks
|
||||
for i in range(20):
|
||||
task_id = str(uuid.uuid4())
|
||||
url = f"https://example.com/page{i}"
|
||||
monitor.add_task(task_id, url)
|
||||
|
||||
# Simulate 20% of tasks are already running
|
||||
if i < 4:
|
||||
monitor.update_task(
|
||||
task_id=task_id,
|
||||
status=CrawlStatus.IN_PROGRESS,
|
||||
start_time=time.time() - 30, # Started 30 seconds ago
|
||||
memory_usage=10.5
|
||||
)
|
||||
|
||||
# Simulate 10% of tasks are completed
|
||||
if i >= 4 and i < 6:
|
||||
start_time = time.time() - 60
|
||||
end_time = time.time() - 15
|
||||
monitor.update_task(
|
||||
task_id=task_id,
|
||||
status=CrawlStatus.IN_PROGRESS,
|
||||
start_time=start_time,
|
||||
memory_usage=8.2
|
||||
)
|
||||
monitor.update_task(
|
||||
task_id=task_id,
|
||||
status=CrawlStatus.COMPLETED,
|
||||
end_time=end_time,
|
||||
memory_usage=0,
|
||||
peak_memory=15.7
|
||||
)
|
||||
|
||||
# Simulate 5% of tasks fail
|
||||
if i >= 6 and i < 7:
|
||||
start_time = time.time() - 45
|
||||
end_time = time.time() - 20
|
||||
monitor.update_task(
|
||||
task_id=task_id,
|
||||
status=CrawlStatus.IN_PROGRESS,
|
||||
start_time=start_time,
|
||||
memory_usage=12.3
|
||||
)
|
||||
monitor.update_task(
|
||||
task_id=task_id,
|
||||
status=CrawlStatus.FAILED,
|
||||
end_time=end_time,
|
||||
memory_usage=0,
|
||||
peak_memory=18.2,
|
||||
error_message="Connection timeout"
|
||||
)
|
||||
|
||||
# Simulate memory pressure
|
||||
monitor.update_memory_status("PRESSURE")
|
||||
|
||||
# Simulate queue statistics
|
||||
monitor.update_queue_statistics(
|
||||
total_queued=16, # 20 - 4 (in progress)
|
||||
highest_wait_time=120.5,
|
||||
avg_wait_time=60.2
|
||||
)
|
||||
|
||||
# Keep the monitor running for a demonstration
|
||||
print("Crawler Monitor is running. Press 'q' to exit.")
|
||||
while monitor.is_running:
|
||||
time.sleep(0.1)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
print("\nExiting crawler monitor...")
|
||||
finally:
|
||||
# Stop the monitor
|
||||
monitor.stop()
|
||||
print("Crawler monitor exited successfully.")
|
||||
@@ -4,45 +4,84 @@ from dotenv import load_dotenv
|
||||
load_dotenv() # Load environment variables from .env file
|
||||
|
||||
# Default provider, ONLY used when the extraction strategy is LLMExtractionStrategy
|
||||
DEFAULT_PROVIDER = "openai/gpt-4o-mini"
|
||||
DEFAULT_PROVIDER = "openai/gpt-4o"
|
||||
DEFAULT_PROVIDER_API_KEY = "OPENAI_API_KEY"
|
||||
MODEL_REPO_BRANCH = "new-release-0.0.2"
|
||||
# Provider-model dictionary, ONLY used when the extraction strategy is LLMExtractionStrategy
|
||||
PROVIDER_MODELS = {
|
||||
"ollama/llama3": "no-token-needed", # Any model from Ollama no need for API token
|
||||
"ollama/llama3": "no-token-needed", # Any model from Ollama no need for API token
|
||||
"groq/llama3-70b-8192": os.getenv("GROQ_API_KEY"),
|
||||
"groq/llama3-8b-8192": os.getenv("GROQ_API_KEY"),
|
||||
"openai/gpt-4o-mini": os.getenv("OPENAI_API_KEY"),
|
||||
"openai/gpt-4o": os.getenv("OPENAI_API_KEY"),
|
||||
"openai/o1-mini": os.getenv("OPENAI_API_KEY"),
|
||||
"openai/o1-preview": os.getenv("OPENAI_API_KEY"),
|
||||
"openai/o3-mini": os.getenv("OPENAI_API_KEY"),
|
||||
"openai/o3-mini-high": os.getenv("OPENAI_API_KEY"),
|
||||
"anthropic/claude-3-haiku-20240307": os.getenv("ANTHROPIC_API_KEY"),
|
||||
"anthropic/claude-3-opus-20240229": os.getenv("ANTHROPIC_API_KEY"),
|
||||
"anthropic/claude-3-sonnet-20240229": os.getenv("ANTHROPIC_API_KEY"),
|
||||
"anthropic/claude-3-5-sonnet-20240620": os.getenv("ANTHROPIC_API_KEY"),
|
||||
"gemini/gemini-pro": os.getenv("GEMINI_API_KEY"),
|
||||
'gemini/gemini-1.5-pro': os.getenv("GEMINI_API_KEY"),
|
||||
'gemini/gemini-2.0-flash': os.getenv("GEMINI_API_KEY"),
|
||||
'gemini/gemini-2.0-flash-exp': os.getenv("GEMINI_API_KEY"),
|
||||
'gemini/gemini-2.0-flash-lite-preview-02-05': os.getenv("GEMINI_API_KEY"),
|
||||
"deepseek/deepseek-chat": os.getenv("DEEPSEEK_API_KEY"),
|
||||
}
|
||||
PROVIDER_MODELS_PREFIXES = {
|
||||
"ollama": "no-token-needed", # Any model from Ollama no need for API token
|
||||
"groq": os.getenv("GROQ_API_KEY"),
|
||||
"openai": os.getenv("OPENAI_API_KEY"),
|
||||
"anthropic": os.getenv("ANTHROPIC_API_KEY"),
|
||||
"gemini": os.getenv("GEMINI_API_KEY"),
|
||||
"deepseek": os.getenv("DEEPSEEK_API_KEY"),
|
||||
}
|
||||
|
||||
# Chunk token threshold
|
||||
CHUNK_TOKEN_THRESHOLD = 2 ** 11 # 2048 tokens
|
||||
CHUNK_TOKEN_THRESHOLD = 2**11 # 2048 tokens
|
||||
OVERLAP_RATE = 0.1
|
||||
WORD_TOKEN_RATE = 1.3
|
||||
|
||||
# Threshold for the minimum number of word in a HTML tag to be considered
|
||||
# Threshold for the minimum number of word in a HTML tag to be considered
|
||||
MIN_WORD_THRESHOLD = 1
|
||||
IMAGE_DESCRIPTION_MIN_WORD_THRESHOLD = 1
|
||||
|
||||
IMPORTANT_ATTRS = ['src', 'href', 'alt', 'title', 'width', 'height']
|
||||
ONLY_TEXT_ELIGIBLE_TAGS = ['b', 'i', 'u', 'span', 'del', 'ins', 'sub', 'sup', 'strong', 'em', 'code', 'kbd', 'var', 's', 'q', 'abbr', 'cite', 'dfn', 'time', 'small', 'mark']
|
||||
IMPORTANT_ATTRS = ["src", "href", "alt", "title", "width", "height"]
|
||||
ONLY_TEXT_ELIGIBLE_TAGS = [
|
||||
"b",
|
||||
"i",
|
||||
"u",
|
||||
"span",
|
||||
"del",
|
||||
"ins",
|
||||
"sub",
|
||||
"sup",
|
||||
"strong",
|
||||
"em",
|
||||
"code",
|
||||
"kbd",
|
||||
"var",
|
||||
"s",
|
||||
"q",
|
||||
"abbr",
|
||||
"cite",
|
||||
"dfn",
|
||||
"time",
|
||||
"small",
|
||||
"mark",
|
||||
]
|
||||
SOCIAL_MEDIA_DOMAINS = [
|
||||
'facebook.com',
|
||||
'twitter.com',
|
||||
'x.com',
|
||||
'linkedin.com',
|
||||
'instagram.com',
|
||||
'pinterest.com',
|
||||
'tiktok.com',
|
||||
'snapchat.com',
|
||||
'reddit.com',
|
||||
]
|
||||
"facebook.com",
|
||||
"twitter.com",
|
||||
"x.com",
|
||||
"linkedin.com",
|
||||
"instagram.com",
|
||||
"pinterest.com",
|
||||
"tiktok.com",
|
||||
"snapchat.com",
|
||||
"reddit.com",
|
||||
]
|
||||
|
||||
# Threshold for the Image extraction - Range is 1 to 6
|
||||
# Images are scored based on point based system, to filter based on usefulness. Points are assigned
|
||||
@@ -60,5 +99,48 @@ NEED_MIGRATION = True
|
||||
URL_LOG_SHORTEN_LENGTH = 30
|
||||
SHOW_DEPRECATION_WARNINGS = True
|
||||
SCREENSHOT_HEIGHT_TRESHOLD = 10000
|
||||
PAGE_TIMEOUT=60000
|
||||
DOWNLOAD_PAGE_TIMEOUT=60000
|
||||
PAGE_TIMEOUT = 60000
|
||||
DOWNLOAD_PAGE_TIMEOUT = 60000
|
||||
|
||||
# Global user settings with descriptions and default values
|
||||
USER_SETTINGS = {
|
||||
"DEFAULT_LLM_PROVIDER": {
|
||||
"default": "openai/gpt-4o",
|
||||
"description": "Default LLM provider in 'company/model' format (e.g., 'openai/gpt-4o', 'anthropic/claude-3-sonnet')",
|
||||
"type": "string"
|
||||
},
|
||||
"DEFAULT_LLM_PROVIDER_TOKEN": {
|
||||
"default": "",
|
||||
"description": "API token for the default LLM provider",
|
||||
"type": "string",
|
||||
"secret": True
|
||||
},
|
||||
"VERBOSE": {
|
||||
"default": False,
|
||||
"description": "Enable verbose output for all commands",
|
||||
"type": "boolean"
|
||||
},
|
||||
"BROWSER_HEADLESS": {
|
||||
"default": True,
|
||||
"description": "Run browser in headless mode by default",
|
||||
"type": "boolean"
|
||||
},
|
||||
"BROWSER_TYPE": {
|
||||
"default": "chromium",
|
||||
"description": "Default browser type (chromium or firefox)",
|
||||
"type": "string",
|
||||
"options": ["chromium", "firefox"]
|
||||
},
|
||||
"CACHE_MODE": {
|
||||
"default": "bypass",
|
||||
"description": "Default cache mode (bypass, use, or refresh)",
|
||||
"type": "string",
|
||||
"options": ["bypass", "use", "refresh"]
|
||||
},
|
||||
"USER_AGENT_MODE": {
|
||||
"default": "default",
|
||||
"description": "Default user agent mode (default, random, or mobile)",
|
||||
"type": "string",
|
||||
"options": ["default", "random", "mobile"]
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
0
crawl4ai/crawlers/amazon_product/__init__.py
Normal file
0
crawl4ai/crawlers/amazon_product/__init__.py
Normal file
20
crawl4ai/crawlers/amazon_product/crawler.py
Normal file
20
crawl4ai/crawlers/amazon_product/crawler.py
Normal file
@@ -0,0 +1,20 @@
|
||||
from crawl4ai.hub import BaseCrawler
|
||||
|
||||
__meta__ = {
|
||||
"version": "1.2.0",
|
||||
"tested_on": ["amazon.com"],
|
||||
"rate_limit": "50 RPM",
|
||||
"schema": {"product": ["name", "price"]}
|
||||
}
|
||||
|
||||
class AmazonProductCrawler(BaseCrawler):
|
||||
async def run(self, url: str, **kwargs) -> str:
|
||||
try:
|
||||
self.logger.info(f"Crawling {url}")
|
||||
return '{"product": {"name": "Test Amazon Product"}}'
|
||||
except Exception as e:
|
||||
self.logger.error(f"Crawl failed: {str(e)}")
|
||||
return json.dumps({
|
||||
"error": str(e),
|
||||
"metadata": self.meta # Include meta in error response
|
||||
})
|
||||
0
crawl4ai/crawlers/google_search/__init__.py
Normal file
0
crawl4ai/crawlers/google_search/__init__.py
Normal file
131
crawl4ai/crawlers/google_search/crawler.py
Normal file
131
crawl4ai/crawlers/google_search/crawler.py
Normal file
@@ -0,0 +1,131 @@
|
||||
from crawl4ai import BrowserConfig, AsyncWebCrawler, CrawlerRunConfig, CacheMode
|
||||
from crawl4ai.hub import BaseCrawler
|
||||
from crawl4ai.utils import optimize_html, get_home_folder, preprocess_html_for_schema
|
||||
from crawl4ai import JsonCssExtractionStrategy
|
||||
from pathlib import Path
|
||||
import json
|
||||
import os
|
||||
from typing import Dict
|
||||
|
||||
|
||||
class GoogleSearchCrawler(BaseCrawler):
|
||||
__meta__ = {
|
||||
"version": "1.0.0",
|
||||
"tested_on": ["google.com/search*"],
|
||||
"rate_limit": "10 RPM",
|
||||
"description": "Crawls Google Search results (text + images)",
|
||||
}
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.js_script = (Path(__file__).parent /
|
||||
"script.js").read_text()
|
||||
|
||||
async def run(self, url="", query: str = "", search_type: str = "text", schema_cache_path = None, **kwargs) -> str:
|
||||
"""Crawl Google Search results for a query"""
|
||||
url = f"https://www.google.com/search?q={query}&gl=sg&hl=en" if search_type == "text" else f"https://www.google.com/search?q={query}&gl=sg&hl=en&tbs=qdr:d&udm=2"
|
||||
if kwargs.get("page_start", 1) > 1:
|
||||
url = f"{url}&start={kwargs['page_start'] * 10}"
|
||||
if kwargs.get("page_length", 1) > 1:
|
||||
url = f"{url}&num={kwargs['page_length']}"
|
||||
|
||||
browser_config = BrowserConfig(headless=True, verbose=True)
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
config = CrawlerRunConfig(
|
||||
cache_mode=kwargs.get("cache_mode", CacheMode.BYPASS),
|
||||
keep_attrs=["id", "class"],
|
||||
keep_data_attributes=True,
|
||||
delay_before_return_html=kwargs.get(
|
||||
"delay", 2 if search_type == "image" else 1),
|
||||
js_code=self.js_script if search_type == "image" else None,
|
||||
)
|
||||
|
||||
result = await crawler.arun(url=url, config=config)
|
||||
if not result.success:
|
||||
return json.dumps({"error": result.error})
|
||||
|
||||
if search_type == "image":
|
||||
if result.js_execution_result.get("success", False) is False:
|
||||
return json.dumps({"error": result.js_execution_result.get("error", "Unknown error")})
|
||||
if "results" in result.js_execution_result:
|
||||
image_result = result.js_execution_result['results'][0]
|
||||
if image_result.get("success", False) is False:
|
||||
return json.dumps({"error": image_result.get("error", "Unknown error")})
|
||||
return json.dumps(image_result["result"], indent=4)
|
||||
|
||||
# For text search, extract structured data
|
||||
schemas = await self._build_schemas(result.cleaned_html, schema_cache_path)
|
||||
extracted = {
|
||||
key: JsonCssExtractionStrategy(schema=schemas[key]).run(
|
||||
url=url, sections=[result.html]
|
||||
)
|
||||
for key in schemas
|
||||
}
|
||||
return json.dumps(extracted, indent=4)
|
||||
|
||||
async def _build_schemas(self, html: str, schema_cache_path: str = None) -> Dict[str, Dict]:
|
||||
"""Build extraction schemas (organic, top stories, etc.)"""
|
||||
home_dir = get_home_folder() if not schema_cache_path else schema_cache_path
|
||||
os.makedirs(f"{home_dir}/schema", exist_ok=True)
|
||||
|
||||
# cleaned_html = optimize_html(html, threshold=100)
|
||||
cleaned_html = preprocess_html_for_schema(html)
|
||||
|
||||
organic_schema = None
|
||||
if os.path.exists(f"{home_dir}/schema/organic_schema.json"):
|
||||
with open(f"{home_dir}/schema/organic_schema.json", "r") as f:
|
||||
organic_schema = json.load(f)
|
||||
else:
|
||||
organic_schema = JsonCssExtractionStrategy.generate_schema(
|
||||
html=cleaned_html,
|
||||
target_json_example="""{
|
||||
"title": "...",
|
||||
"link": "...",
|
||||
"snippet": "...",
|
||||
"date": "1 hour ago",
|
||||
}""",
|
||||
query="""The given html is the crawled html from Google search result. Please find the schema for organic search item in the given html, I am interested in title, link, snippet text. date."""
|
||||
)
|
||||
|
||||
with open(f"{home_dir}/schema/organic_schema.json", "w") as f:
|
||||
f.write(json.dumps(organic_schema))
|
||||
|
||||
top_stories_schema = None
|
||||
if os.path.exists(f"{home_dir}/schema/top_stories_schema.json"):
|
||||
with open(f"{home_dir}/schema/top_stories_schema.json", "r") as f:
|
||||
top_stories_schema = json.load(f)
|
||||
else:
|
||||
top_stories_schema = JsonCssExtractionStrategy.generate_schema(
|
||||
html=cleaned_html,
|
||||
target_json_example="""{
|
||||
"title": "...",
|
||||
"link": "...",
|
||||
"source": "Insider Monkey",
|
||||
"date": "1 hour ago",
|
||||
}""",
|
||||
query="""The given html is the crawled html from Google search result. Please find the schema for Top Story item int he given html, I am interested in title, link, source. date and imageUrl."""
|
||||
)
|
||||
|
||||
with open(f"{home_dir}/schema/top_stories_schema.json", "w") as f:
|
||||
f.write(json.dumps(top_stories_schema))
|
||||
|
||||
suggested_query_schema = None
|
||||
if os.path.exists(f"{home_dir}/schema/suggested_query_schema.json"):
|
||||
with open(f"{home_dir}/schema/suggested_query_schema.json", "r") as f:
|
||||
suggested_query_schema = json.load(f)
|
||||
else:
|
||||
suggested_query_schema = JsonCssExtractionStrategy.generate_schema(
|
||||
html=cleaned_html,
|
||||
target_json_example="""{
|
||||
"query": "A for Apple",
|
||||
}""",
|
||||
query="""The given HTML contains the crawled HTML from Google search results. Please find the schema for each suggested query in the section "People also search for" within the given HTML. I am interested in the queries only."""
|
||||
)
|
||||
with open(f"{home_dir}/schema/suggested_query_schema.json", "w") as f:
|
||||
f.write(json.dumps(suggested_query_schema))
|
||||
|
||||
return {
|
||||
"organic_schema": organic_schema,
|
||||
"top_stories_schema": top_stories_schema,
|
||||
"suggested_query_schema": suggested_query_schema,
|
||||
}
|
||||
115
crawl4ai/crawlers/google_search/script.js
Normal file
115
crawl4ai/crawlers/google_search/script.js
Normal file
@@ -0,0 +1,115 @@
|
||||
(() => {
|
||||
// Function to extract image data from Google Images page
|
||||
function extractImageData() {
|
||||
const keys = Object.keys(window.W_jd);
|
||||
let allImageData = [];
|
||||
let currentPosition = 0;
|
||||
|
||||
// Get the symbol we'll use (from first valid entry)
|
||||
let targetSymbol;
|
||||
for (let key of keys) {
|
||||
try {
|
||||
const symbols = Object.getOwnPropertySymbols(window.W_jd[key]);
|
||||
if (symbols.length > 0) {
|
||||
targetSymbol = symbols[0];
|
||||
break;
|
||||
}
|
||||
} catch (e) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (!targetSymbol) return [];
|
||||
|
||||
// Iterate through ALL keys
|
||||
for (let key of keys) {
|
||||
try {
|
||||
const o1 = window.W_jd[key][targetSymbol]
|
||||
if (!o1) continue;
|
||||
const data = Object.values(o1)[0]
|
||||
// const data = window.W_jd[key][targetSymbol]?.Ws;
|
||||
// Check if this is a valid image data entry
|
||||
if (data && Array.isArray(data[1])) {
|
||||
const processedData = processImageEntry(data, currentPosition);
|
||||
if (processedData) {
|
||||
allImageData.push(processedData);
|
||||
currentPosition++;
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
return allImageData;
|
||||
}
|
||||
|
||||
function processImageEntry(entry, position) {
|
||||
const imageData = entry[1];
|
||||
if (!Array.isArray(imageData)) return null;
|
||||
|
||||
// Extract the image ID
|
||||
const imageId = imageData[1];
|
||||
if (!imageId) return null;
|
||||
|
||||
// Find the corresponding DOM element
|
||||
const domElement = document.querySelector(`[data-docid="${imageId}"]`);
|
||||
if (!domElement) return null;
|
||||
|
||||
// Extract data from the array structure
|
||||
const [
|
||||
_,
|
||||
id,
|
||||
thumbnailInfo,
|
||||
imageInfo,
|
||||
__,
|
||||
___,
|
||||
rgb,
|
||||
____,
|
||||
_____,
|
||||
metadata
|
||||
] = imageData;
|
||||
|
||||
// Ensure we have the required data
|
||||
if (!thumbnailInfo || !imageInfo) return null;
|
||||
|
||||
// Extract metadata from DOM
|
||||
const title = domElement?.querySelector('.toI8Rb')?.textContent?.trim();
|
||||
const source = domElement?.querySelector('.guK3rf')?.textContent?.trim();
|
||||
const link = domElement?.querySelector('a.EZAeBe')?.href;
|
||||
|
||||
if (!link) return null;
|
||||
|
||||
// Build Google Image URL
|
||||
const googleUrl = buildGoogleImageUrl(imageInfo[0], link, imageId, imageInfo[1], imageInfo[2]);
|
||||
|
||||
return {
|
||||
title,
|
||||
imageUrl: imageInfo[0],
|
||||
imageWidth: imageInfo[2],
|
||||
imageHeight: imageInfo[1],
|
||||
thumbnailUrl: thumbnailInfo[0],
|
||||
thumbnailWidth: thumbnailInfo[2],
|
||||
thumbnailHeight: thumbnailInfo[1],
|
||||
source,
|
||||
domain: metadata['2000']?.[1] || new URL(link).hostname,
|
||||
link,
|
||||
googleUrl,
|
||||
position: position + 1
|
||||
};
|
||||
}
|
||||
|
||||
function buildGoogleImageUrl(imgUrl, refUrl, tbnid, height, width) {
|
||||
const params = new URLSearchParams({
|
||||
imgurl: imgUrl,
|
||||
tbnid: tbnid,
|
||||
imgrefurl: refUrl,
|
||||
docid: tbnid,
|
||||
w: width.toString(),
|
||||
h: height.toString(),
|
||||
});
|
||||
|
||||
return `https://www.google.com/imgres?${params.toString()}`;
|
||||
}
|
||||
return extractImageData();
|
||||
})();
|
||||
47
crawl4ai/deep_crawling/__init__.py
Normal file
47
crawl4ai/deep_crawling/__init__.py
Normal file
@@ -0,0 +1,47 @@
|
||||
# deep_crawling/__init__.py
|
||||
from .base_strategy import DeepCrawlDecorator, DeepCrawlStrategy
|
||||
from .bfs_strategy import BFSDeepCrawlStrategy
|
||||
from .bff_strategy import BestFirstCrawlingStrategy
|
||||
from .dfs_strategy import DFSDeepCrawlStrategy
|
||||
from .filters import (
|
||||
FilterChain,
|
||||
ContentTypeFilter,
|
||||
DomainFilter,
|
||||
URLFilter,
|
||||
URLPatternFilter,
|
||||
FilterStats,
|
||||
ContentRelevanceFilter,
|
||||
SEOFilter
|
||||
)
|
||||
from .scorers import (
|
||||
KeywordRelevanceScorer,
|
||||
URLScorer,
|
||||
CompositeScorer,
|
||||
DomainAuthorityScorer,
|
||||
FreshnessScorer,
|
||||
PathDepthScorer,
|
||||
ContentTypeScorer
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"DeepCrawlDecorator",
|
||||
"DeepCrawlStrategy",
|
||||
"BFSDeepCrawlStrategy",
|
||||
"BestFirstCrawlingStrategy",
|
||||
"DFSDeepCrawlStrategy",
|
||||
"FilterChain",
|
||||
"ContentTypeFilter",
|
||||
"DomainFilter",
|
||||
"URLFilter",
|
||||
"URLPatternFilter",
|
||||
"FilterStats",
|
||||
"ContentRelevanceFilter",
|
||||
"SEOFilter",
|
||||
"KeywordRelevanceScorer",
|
||||
"URLScorer",
|
||||
"CompositeScorer",
|
||||
"DomainAuthorityScorer",
|
||||
"FreshnessScorer",
|
||||
"PathDepthScorer",
|
||||
"ContentTypeScorer",
|
||||
]
|
||||
159
crawl4ai/deep_crawling/base_strategy.py
Normal file
159
crawl4ai/deep_crawling/base_strategy.py
Normal file
@@ -0,0 +1,159 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import AsyncGenerator, Optional, Set, List, Dict
|
||||
from functools import wraps
|
||||
from contextvars import ContextVar
|
||||
from ..types import AsyncWebCrawler, CrawlerRunConfig, CrawlResult, RunManyReturn
|
||||
|
||||
|
||||
class DeepCrawlDecorator:
|
||||
"""Decorator that adds deep crawling capability to arun method."""
|
||||
deep_crawl_active = ContextVar("deep_crawl_active", default=False)
|
||||
|
||||
def __init__(self, crawler: AsyncWebCrawler):
|
||||
self.crawler = crawler
|
||||
|
||||
def __call__(self, original_arun):
|
||||
@wraps(original_arun)
|
||||
async def wrapped_arun(url: str, config: CrawlerRunConfig = None, **kwargs):
|
||||
# If deep crawling is already active, call the original method to avoid recursion.
|
||||
if config and config.deep_crawl_strategy and not self.deep_crawl_active.get():
|
||||
token = self.deep_crawl_active.set(True)
|
||||
# Await the arun call to get the actual result object.
|
||||
result_obj = await config.deep_crawl_strategy.arun(
|
||||
crawler=self.crawler,
|
||||
start_url=url,
|
||||
config=config
|
||||
)
|
||||
if config.stream:
|
||||
async def result_wrapper():
|
||||
try:
|
||||
async for result in result_obj:
|
||||
yield result
|
||||
finally:
|
||||
self.deep_crawl_active.reset(token)
|
||||
return result_wrapper()
|
||||
else:
|
||||
try:
|
||||
return result_obj
|
||||
finally:
|
||||
self.deep_crawl_active.reset(token)
|
||||
return await original_arun(url, config=config, **kwargs)
|
||||
return wrapped_arun
|
||||
|
||||
class DeepCrawlStrategy(ABC):
|
||||
"""
|
||||
Abstract base class for deep crawling strategies.
|
||||
|
||||
Core functions:
|
||||
- arun: Main entry point that returns an async generator of CrawlResults.
|
||||
- shutdown: Clean up resources.
|
||||
- can_process_url: Validate a URL and decide whether to process it.
|
||||
- _process_links: Extract and process links from a CrawlResult.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
async def _arun_batch(
|
||||
self,
|
||||
start_url: str,
|
||||
crawler: AsyncWebCrawler,
|
||||
config: CrawlerRunConfig,
|
||||
) -> List[CrawlResult]:
|
||||
"""
|
||||
Batch (non-streaming) mode:
|
||||
Processes one BFS level at a time, then yields all the results.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def _arun_stream(
|
||||
self,
|
||||
start_url: str,
|
||||
crawler: AsyncWebCrawler,
|
||||
config: CrawlerRunConfig,
|
||||
) -> AsyncGenerator[CrawlResult, None]:
|
||||
"""
|
||||
Streaming mode:
|
||||
Processes one BFS level at a time and yields results immediately as they arrive.
|
||||
"""
|
||||
pass
|
||||
|
||||
async def arun(
|
||||
self,
|
||||
start_url: str,
|
||||
crawler: AsyncWebCrawler,
|
||||
config: Optional[CrawlerRunConfig] = None,
|
||||
) -> RunManyReturn:
|
||||
"""
|
||||
Traverse the given URL using the specified crawler.
|
||||
|
||||
Args:
|
||||
start_url (str): The URL from which to start crawling.
|
||||
crawler (AsyncWebCrawler): The crawler instance to use.
|
||||
crawler_run_config (Optional[CrawlerRunConfig]): Crawler configuration.
|
||||
|
||||
Returns:
|
||||
Union[CrawlResultT, List[CrawlResultT], AsyncGenerator[CrawlResultT, None]]
|
||||
"""
|
||||
if config is None:
|
||||
raise ValueError("CrawlerRunConfig must be provided")
|
||||
|
||||
if config.stream:
|
||||
return self._arun_stream(start_url, crawler, config)
|
||||
else:
|
||||
return await self._arun_batch(start_url, crawler, config)
|
||||
|
||||
def __call__(self, start_url: str, crawler: AsyncWebCrawler, config: CrawlerRunConfig):
|
||||
return self.arun(start_url, crawler, config)
|
||||
|
||||
@abstractmethod
|
||||
async def shutdown(self) -> None:
|
||||
"""
|
||||
Clean up resources used by the deep crawl strategy.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def can_process_url(self, url: str, depth: int) -> bool:
|
||||
"""
|
||||
Validate the URL format and apply custom filtering logic.
|
||||
|
||||
Args:
|
||||
url (str): The URL to validate.
|
||||
depth (int): The current depth in the crawl.
|
||||
|
||||
Returns:
|
||||
bool: True if the URL should be processed, False otherwise.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def link_discovery(
|
||||
self,
|
||||
result: CrawlResult,
|
||||
source_url: str,
|
||||
current_depth: int,
|
||||
visited: Set[str],
|
||||
next_level: List[tuple],
|
||||
depths: Dict[str, int],
|
||||
) -> None:
|
||||
"""
|
||||
Extract and process links from the given crawl result.
|
||||
|
||||
This method should:
|
||||
- Validate each extracted URL using can_process_url.
|
||||
- Optionally score URLs.
|
||||
- Append valid URLs (and their parent references) to the next_level list.
|
||||
- Update the depths dictionary with the new depth for each URL.
|
||||
|
||||
Args:
|
||||
result (CrawlResult): The result from a crawl operation.
|
||||
source_url (str): The URL from which this result was obtained.
|
||||
current_depth (int): The depth at which the source URL was processed.
|
||||
visited (Set[str]): Set of already visited URLs.
|
||||
next_level (List[tuple]): List of tuples (url, parent_url) for the next BFS level.
|
||||
depths (Dict[str, int]): Mapping of URLs to their current depth.
|
||||
"""
|
||||
pass
|
||||
|
||||
271
crawl4ai/deep_crawling/bff_strategy.py
Normal file
271
crawl4ai/deep_crawling/bff_strategy.py
Normal file
@@ -0,0 +1,271 @@
|
||||
# best_first_crawling_strategy.py
|
||||
import asyncio
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from typing import AsyncGenerator, Optional, Set, Dict, List, Tuple
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from ..models import TraversalStats
|
||||
from .filters import FilterChain
|
||||
from .scorers import URLScorer
|
||||
from . import DeepCrawlStrategy
|
||||
|
||||
from ..types import AsyncWebCrawler, CrawlerRunConfig, CrawlResult, RunManyReturn
|
||||
from ..utils import normalize_url_for_deep_crawl
|
||||
|
||||
from math import inf as infinity
|
||||
|
||||
# Configurable batch size for processing items from the priority queue
|
||||
BATCH_SIZE = 10
|
||||
|
||||
|
||||
class BestFirstCrawlingStrategy(DeepCrawlStrategy):
|
||||
"""
|
||||
Best-First Crawling Strategy using a priority queue.
|
||||
|
||||
This strategy prioritizes URLs based on their score, ensuring that higher-value
|
||||
pages are crawled first. It reimplements the core traversal loop to use a priority
|
||||
queue while keeping URL validation and link discovery consistent with our design.
|
||||
|
||||
Core methods:
|
||||
- arun: Returns either a list (batch mode) or an async generator (stream mode).
|
||||
- _arun_best_first: Core generator that uses a priority queue to yield CrawlResults.
|
||||
- can_process_url: Validates URLs and applies filtering (inherited behavior).
|
||||
- link_discovery: Extracts and validates links from a CrawlResult.
|
||||
"""
|
||||
def __init__(
|
||||
self,
|
||||
max_depth: int,
|
||||
filter_chain: FilterChain = FilterChain(),
|
||||
url_scorer: Optional[URLScorer] = None,
|
||||
include_external: bool = False,
|
||||
max_pages: int = infinity,
|
||||
logger: Optional[logging.Logger] = None,
|
||||
):
|
||||
self.max_depth = max_depth
|
||||
self.filter_chain = filter_chain
|
||||
self.url_scorer = url_scorer
|
||||
self.include_external = include_external
|
||||
self.max_pages = max_pages
|
||||
# self.logger = logger or logging.getLogger(__name__)
|
||||
# Ensure logger is always a Logger instance, not a dict from serialization
|
||||
if isinstance(logger, logging.Logger):
|
||||
self.logger = logger
|
||||
else:
|
||||
# Create a new logger if logger is None, dict, or any other non-Logger type
|
||||
self.logger = logging.getLogger(__name__)
|
||||
self.stats = TraversalStats(start_time=datetime.now())
|
||||
self._cancel_event = asyncio.Event()
|
||||
self._pages_crawled = 0
|
||||
|
||||
async def can_process_url(self, url: str, depth: int) -> bool:
|
||||
"""
|
||||
Validate the URL format and apply filtering.
|
||||
For the starting URL (depth 0), filtering is bypassed.
|
||||
"""
|
||||
try:
|
||||
parsed = urlparse(url)
|
||||
if not parsed.scheme or not parsed.netloc:
|
||||
raise ValueError("Missing scheme or netloc")
|
||||
if parsed.scheme not in ("http", "https"):
|
||||
raise ValueError("Invalid scheme")
|
||||
if "." not in parsed.netloc:
|
||||
raise ValueError("Invalid domain")
|
||||
except Exception as e:
|
||||
self.logger.warning(f"Invalid URL: {url}, error: {e}")
|
||||
return False
|
||||
|
||||
if depth != 0 and not await self.filter_chain.apply(url):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
async def link_discovery(
|
||||
self,
|
||||
result: CrawlResult,
|
||||
source_url: str,
|
||||
current_depth: int,
|
||||
visited: Set[str],
|
||||
next_links: List[Tuple[str, Optional[str]]],
|
||||
depths: Dict[str, int],
|
||||
) -> None:
|
||||
"""
|
||||
Extract links from the crawl result, validate them, and append new URLs
|
||||
(with their parent references) to next_links.
|
||||
Also updates the depths dictionary.
|
||||
"""
|
||||
new_depth = current_depth + 1
|
||||
if new_depth > self.max_depth:
|
||||
return
|
||||
|
||||
# If we've reached the max pages limit, don't discover new links
|
||||
remaining_capacity = self.max_pages - self._pages_crawled
|
||||
if remaining_capacity <= 0:
|
||||
self.logger.info(f"Max pages limit ({self.max_pages}) reached, stopping link discovery")
|
||||
return
|
||||
|
||||
# Retrieve internal links; include external links if enabled.
|
||||
links = result.links.get("internal", [])
|
||||
if self.include_external:
|
||||
links += result.links.get("external", [])
|
||||
|
||||
# If we have more links than remaining capacity, limit how many we'll process
|
||||
valid_links = []
|
||||
for link in links:
|
||||
url = link.get("href")
|
||||
base_url = normalize_url_for_deep_crawl(url, source_url)
|
||||
if base_url in visited:
|
||||
continue
|
||||
if not await self.can_process_url(url, new_depth):
|
||||
self.stats.urls_skipped += 1
|
||||
continue
|
||||
|
||||
valid_links.append(base_url)
|
||||
|
||||
# Record the new depths and add to next_links
|
||||
for url in valid_links:
|
||||
depths[url] = new_depth
|
||||
next_links.append((url, source_url))
|
||||
|
||||
async def _arun_best_first(
|
||||
self,
|
||||
start_url: str,
|
||||
crawler: AsyncWebCrawler,
|
||||
config: CrawlerRunConfig,
|
||||
) -> AsyncGenerator[CrawlResult, None]:
|
||||
"""
|
||||
Core best-first crawl method using a priority queue.
|
||||
|
||||
The queue items are tuples of (score, depth, url, parent_url). Lower scores
|
||||
are treated as higher priority. URLs are processed in batches for efficiency.
|
||||
"""
|
||||
queue: asyncio.PriorityQueue = asyncio.PriorityQueue()
|
||||
# Push the initial URL with score 0 and depth 0.
|
||||
initial_score = self.url_scorer.score(start_url) if self.url_scorer else 0
|
||||
await queue.put((-initial_score, 0, start_url, None))
|
||||
visited: Set[str] = set()
|
||||
depths: Dict[str, int] = {start_url: 0}
|
||||
|
||||
while not queue.empty() and not self._cancel_event.is_set():
|
||||
# Stop if we've reached the max pages limit
|
||||
if self._pages_crawled >= self.max_pages:
|
||||
self.logger.info(f"Max pages limit ({self.max_pages}) reached, stopping crawl")
|
||||
break
|
||||
|
||||
# Calculate how many more URLs we can process in this batch
|
||||
remaining = self.max_pages - self._pages_crawled
|
||||
batch_size = min(BATCH_SIZE, remaining)
|
||||
if batch_size <= 0:
|
||||
# No more pages to crawl
|
||||
self.logger.info(f"Max pages limit ({self.max_pages}) reached, stopping crawl")
|
||||
break
|
||||
|
||||
batch: List[Tuple[float, int, str, Optional[str]]] = []
|
||||
# Retrieve up to BATCH_SIZE items from the priority queue.
|
||||
for _ in range(BATCH_SIZE):
|
||||
if queue.empty():
|
||||
break
|
||||
item = await queue.get()
|
||||
score, depth, url, parent_url = item
|
||||
if url in visited:
|
||||
continue
|
||||
visited.add(url)
|
||||
batch.append(item)
|
||||
|
||||
if not batch:
|
||||
continue
|
||||
|
||||
# Process the current batch of URLs.
|
||||
urls = [item[2] for item in batch]
|
||||
batch_config = config.clone(deep_crawl_strategy=None, stream=True)
|
||||
stream_gen = await crawler.arun_many(urls=urls, config=batch_config)
|
||||
async for result in stream_gen:
|
||||
result_url = result.url
|
||||
# Find the corresponding tuple from the batch.
|
||||
corresponding = next((item for item in batch if item[2] == result_url), None)
|
||||
if not corresponding:
|
||||
continue
|
||||
score, depth, url, parent_url = corresponding
|
||||
result.metadata = result.metadata or {}
|
||||
result.metadata["depth"] = depth
|
||||
result.metadata["parent_url"] = parent_url
|
||||
result.metadata["score"] = -score
|
||||
|
||||
# Count only successful crawls toward max_pages limit
|
||||
if result.success:
|
||||
self._pages_crawled += 1
|
||||
# Check if we've reached the limit during batch processing
|
||||
if self._pages_crawled >= self.max_pages:
|
||||
self.logger.info(f"Max pages limit ({self.max_pages}) reached during batch, stopping crawl")
|
||||
break # Exit the generator
|
||||
|
||||
yield result
|
||||
|
||||
# Only discover links from successful crawls
|
||||
if result.success:
|
||||
# Discover new links from this result
|
||||
new_links: List[Tuple[str, Optional[str]]] = []
|
||||
await self.link_discovery(result, result_url, depth, visited, new_links, depths)
|
||||
|
||||
for new_url, new_parent in new_links:
|
||||
new_depth = depths.get(new_url, depth + 1)
|
||||
new_score = self.url_scorer.score(new_url) if self.url_scorer else 0
|
||||
await queue.put((-new_score, new_depth, new_url, new_parent))
|
||||
|
||||
# End of crawl.
|
||||
|
||||
async def _arun_batch(
|
||||
self,
|
||||
start_url: str,
|
||||
crawler: AsyncWebCrawler,
|
||||
config: CrawlerRunConfig,
|
||||
) -> List[CrawlResult]:
|
||||
"""
|
||||
Best-first crawl in batch mode.
|
||||
|
||||
Aggregates all CrawlResults into a list.
|
||||
"""
|
||||
results: List[CrawlResult] = []
|
||||
async for result in self._arun_best_first(start_url, crawler, config):
|
||||
results.append(result)
|
||||
return results
|
||||
|
||||
async def _arun_stream(
|
||||
self,
|
||||
start_url: str,
|
||||
crawler: AsyncWebCrawler,
|
||||
config: CrawlerRunConfig,
|
||||
) -> AsyncGenerator[CrawlResult, None]:
|
||||
"""
|
||||
Best-first crawl in streaming mode.
|
||||
|
||||
Yields CrawlResults as they become available.
|
||||
"""
|
||||
async for result in self._arun_best_first(start_url, crawler, config):
|
||||
yield result
|
||||
|
||||
async def arun(
|
||||
self,
|
||||
start_url: str,
|
||||
crawler: AsyncWebCrawler,
|
||||
config: Optional[CrawlerRunConfig] = None,
|
||||
) -> "RunManyReturn":
|
||||
"""
|
||||
Main entry point for best-first crawling.
|
||||
|
||||
Returns either a list (batch mode) or an async generator (stream mode)
|
||||
of CrawlResults.
|
||||
"""
|
||||
if config is None:
|
||||
raise ValueError("CrawlerRunConfig must be provided")
|
||||
if config.stream:
|
||||
return self._arun_stream(start_url, crawler, config)
|
||||
else:
|
||||
return await self._arun_batch(start_url, crawler, config)
|
||||
|
||||
async def shutdown(self) -> None:
|
||||
"""
|
||||
Signal cancellation and clean up resources.
|
||||
"""
|
||||
self._cancel_event.set()
|
||||
self.stats.end_time = datetime.now()
|
||||
260
crawl4ai/deep_crawling/bfs_strategy.py
Normal file
260
crawl4ai/deep_crawling/bfs_strategy.py
Normal file
@@ -0,0 +1,260 @@
|
||||
# bfs_deep_crawl_strategy.py
|
||||
import asyncio
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from typing import AsyncGenerator, Optional, Set, Dict, List, Tuple
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from ..models import TraversalStats
|
||||
from .filters import FilterChain
|
||||
from .scorers import URLScorer
|
||||
from . import DeepCrawlStrategy
|
||||
from ..types import AsyncWebCrawler, CrawlerRunConfig, CrawlResult
|
||||
from ..utils import normalize_url_for_deep_crawl, efficient_normalize_url_for_deep_crawl
|
||||
from math import inf as infinity
|
||||
|
||||
class BFSDeepCrawlStrategy(DeepCrawlStrategy):
|
||||
"""
|
||||
Breadth-First Search deep crawling strategy.
|
||||
|
||||
Core functions:
|
||||
- arun: Main entry point; splits execution into batch or stream modes.
|
||||
- link_discovery: Extracts, filters, and (if needed) scores the outgoing URLs.
|
||||
- can_process_url: Validates URL format and applies the filter chain.
|
||||
"""
|
||||
def __init__(
|
||||
self,
|
||||
max_depth: int,
|
||||
filter_chain: FilterChain = FilterChain(),
|
||||
url_scorer: Optional[URLScorer] = None,
|
||||
include_external: bool = False,
|
||||
score_threshold: float = -infinity,
|
||||
max_pages: int = infinity,
|
||||
logger: Optional[logging.Logger] = None,
|
||||
):
|
||||
self.max_depth = max_depth
|
||||
self.filter_chain = filter_chain
|
||||
self.url_scorer = url_scorer
|
||||
self.include_external = include_external
|
||||
self.score_threshold = score_threshold
|
||||
self.max_pages = max_pages
|
||||
# self.logger = logger or logging.getLogger(__name__)
|
||||
# Ensure logger is always a Logger instance, not a dict from serialization
|
||||
if isinstance(logger, logging.Logger):
|
||||
self.logger = logger
|
||||
else:
|
||||
# Create a new logger if logger is None, dict, or any other non-Logger type
|
||||
self.logger = logging.getLogger(__name__)
|
||||
self.stats = TraversalStats(start_time=datetime.now())
|
||||
self._cancel_event = asyncio.Event()
|
||||
self._pages_crawled = 0
|
||||
|
||||
async def can_process_url(self, url: str, depth: int) -> bool:
|
||||
"""
|
||||
Validates the URL and applies the filter chain.
|
||||
For the start URL (depth 0) filtering is bypassed.
|
||||
"""
|
||||
try:
|
||||
parsed = urlparse(url)
|
||||
if not parsed.scheme or not parsed.netloc:
|
||||
raise ValueError("Missing scheme or netloc")
|
||||
if parsed.scheme not in ("http", "https"):
|
||||
raise ValueError("Invalid scheme")
|
||||
if "." not in parsed.netloc:
|
||||
raise ValueError("Invalid domain")
|
||||
except Exception as e:
|
||||
self.logger.warning(f"Invalid URL: {url}, error: {e}")
|
||||
return False
|
||||
|
||||
if depth != 0 and not await self.filter_chain.apply(url):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
async def link_discovery(
|
||||
self,
|
||||
result: CrawlResult,
|
||||
source_url: str,
|
||||
current_depth: int,
|
||||
visited: Set[str],
|
||||
next_level: List[Tuple[str, Optional[str]]],
|
||||
depths: Dict[str, int],
|
||||
) -> None:
|
||||
"""
|
||||
Extracts links from the crawl result, validates and scores them, and
|
||||
prepares the next level of URLs.
|
||||
Each valid URL is appended to next_level as a tuple (url, parent_url)
|
||||
and its depth is tracked.
|
||||
"""
|
||||
next_depth = current_depth + 1
|
||||
if next_depth > self.max_depth:
|
||||
return
|
||||
|
||||
# If we've reached the max pages limit, don't discover new links
|
||||
remaining_capacity = self.max_pages - self._pages_crawled
|
||||
if remaining_capacity <= 0:
|
||||
self.logger.info(f"Max pages limit ({self.max_pages}) reached, stopping link discovery")
|
||||
return
|
||||
|
||||
# Get internal links and, if enabled, external links.
|
||||
links = result.links.get("internal", [])
|
||||
if self.include_external:
|
||||
links += result.links.get("external", [])
|
||||
|
||||
valid_links = []
|
||||
|
||||
# First collect all valid links
|
||||
for link in links:
|
||||
url = link.get("href")
|
||||
# Strip URL fragments to avoid duplicate crawling
|
||||
# base_url = url.split('#')[0] if url else url
|
||||
base_url = normalize_url_for_deep_crawl(url, source_url)
|
||||
if base_url in visited:
|
||||
continue
|
||||
if not await self.can_process_url(url, next_depth):
|
||||
self.stats.urls_skipped += 1
|
||||
continue
|
||||
|
||||
# Score the URL if a scorer is provided
|
||||
score = self.url_scorer.score(base_url) if self.url_scorer else 0
|
||||
|
||||
# Skip URLs with scores below the threshold
|
||||
if score < self.score_threshold:
|
||||
self.logger.debug(f"URL {url} skipped: score {score} below threshold {self.score_threshold}")
|
||||
self.stats.urls_skipped += 1
|
||||
continue
|
||||
|
||||
visited.add(base_url)
|
||||
valid_links.append((base_url, score))
|
||||
|
||||
# If we have more valid links than capacity, sort by score and take the top ones
|
||||
if len(valid_links) > remaining_capacity:
|
||||
if self.url_scorer:
|
||||
# Sort by score in descending order
|
||||
valid_links.sort(key=lambda x: x[1], reverse=True)
|
||||
# Take only as many as we have capacity for
|
||||
valid_links = valid_links[:remaining_capacity]
|
||||
self.logger.info(f"Limiting to {remaining_capacity} URLs due to max_pages limit")
|
||||
|
||||
# Process the final selected links
|
||||
for url, score in valid_links:
|
||||
# attach the score to metadata if needed
|
||||
if score:
|
||||
result.metadata = result.metadata or {}
|
||||
result.metadata["score"] = score
|
||||
next_level.append((url, source_url))
|
||||
depths[url] = next_depth
|
||||
|
||||
async def _arun_batch(
|
||||
self,
|
||||
start_url: str,
|
||||
crawler: AsyncWebCrawler,
|
||||
config: CrawlerRunConfig,
|
||||
) -> List[CrawlResult]:
|
||||
"""
|
||||
Batch (non-streaming) mode:
|
||||
Processes one BFS level at a time, then yields all the results.
|
||||
"""
|
||||
visited: Set[str] = set()
|
||||
# current_level holds tuples: (url, parent_url)
|
||||
current_level: List[Tuple[str, Optional[str]]] = [(start_url, None)]
|
||||
depths: Dict[str, int] = {start_url: 0}
|
||||
|
||||
results: List[CrawlResult] = []
|
||||
|
||||
while current_level and not self._cancel_event.is_set():
|
||||
# Check if we've already reached max_pages before starting a new level
|
||||
if self._pages_crawled >= self.max_pages:
|
||||
self.logger.info(f"Max pages limit ({self.max_pages}) reached, stopping crawl")
|
||||
break
|
||||
|
||||
next_level: List[Tuple[str, Optional[str]]] = []
|
||||
urls = [url for url, _ in current_level]
|
||||
|
||||
# Clone the config to disable deep crawling recursion and enforce batch mode.
|
||||
batch_config = config.clone(deep_crawl_strategy=None, stream=False)
|
||||
batch_results = await crawler.arun_many(urls=urls, config=batch_config)
|
||||
|
||||
# Update pages crawled counter - count only successful crawls
|
||||
successful_results = [r for r in batch_results if r.success]
|
||||
self._pages_crawled += len(successful_results)
|
||||
|
||||
for result in batch_results:
|
||||
url = result.url
|
||||
depth = depths.get(url, 0)
|
||||
result.metadata = result.metadata or {}
|
||||
result.metadata["depth"] = depth
|
||||
parent_url = next((parent for (u, parent) in current_level if u == url), None)
|
||||
result.metadata["parent_url"] = parent_url
|
||||
results.append(result)
|
||||
|
||||
# Only discover links from successful crawls
|
||||
if result.success:
|
||||
# Link discovery will handle the max pages limit internally
|
||||
await self.link_discovery(result, url, depth, visited, next_level, depths)
|
||||
|
||||
current_level = next_level
|
||||
|
||||
return results
|
||||
|
||||
async def _arun_stream(
|
||||
self,
|
||||
start_url: str,
|
||||
crawler: AsyncWebCrawler,
|
||||
config: CrawlerRunConfig,
|
||||
) -> AsyncGenerator[CrawlResult, None]:
|
||||
"""
|
||||
Streaming mode:
|
||||
Processes one BFS level at a time and yields results immediately as they arrive.
|
||||
"""
|
||||
visited: Set[str] = set()
|
||||
current_level: List[Tuple[str, Optional[str]]] = [(start_url, None)]
|
||||
depths: Dict[str, int] = {start_url: 0}
|
||||
|
||||
while current_level and not self._cancel_event.is_set():
|
||||
next_level: List[Tuple[str, Optional[str]]] = []
|
||||
urls = [url for url, _ in current_level]
|
||||
visited.update(urls)
|
||||
|
||||
stream_config = config.clone(deep_crawl_strategy=None, stream=True)
|
||||
stream_gen = await crawler.arun_many(urls=urls, config=stream_config)
|
||||
|
||||
# Keep track of processed results for this batch
|
||||
results_count = 0
|
||||
async for result in stream_gen:
|
||||
url = result.url
|
||||
depth = depths.get(url, 0)
|
||||
result.metadata = result.metadata or {}
|
||||
result.metadata["depth"] = depth
|
||||
parent_url = next((parent for (u, parent) in current_level if u == url), None)
|
||||
result.metadata["parent_url"] = parent_url
|
||||
|
||||
# Count only successful crawls
|
||||
if result.success:
|
||||
self._pages_crawled += 1
|
||||
# Check if we've reached the limit during batch processing
|
||||
if self._pages_crawled >= self.max_pages:
|
||||
self.logger.info(f"Max pages limit ({self.max_pages}) reached during batch, stopping crawl")
|
||||
break # Exit the generator
|
||||
|
||||
results_count += 1
|
||||
yield result
|
||||
|
||||
# Only discover links from successful crawls
|
||||
if result.success:
|
||||
# Link discovery will handle the max pages limit internally
|
||||
await self.link_discovery(result, url, depth, visited, next_level, depths)
|
||||
|
||||
# If we didn't get results back (e.g. due to errors), avoid getting stuck in an infinite loop
|
||||
# by considering these URLs as visited but not counting them toward the max_pages limit
|
||||
if results_count == 0 and urls:
|
||||
self.logger.warning(f"No results returned for {len(urls)} URLs, marking as visited")
|
||||
|
||||
current_level = next_level
|
||||
|
||||
async def shutdown(self) -> None:
|
||||
"""
|
||||
Clean up resources and signal cancellation of the crawl.
|
||||
"""
|
||||
self._cancel_event.set()
|
||||
self.stats.end_time = datetime.now()
|
||||
432
crawl4ai/deep_crawling/crazy.py
Normal file
432
crawl4ai/deep_crawling/crazy.py
Normal file
@@ -0,0 +1,432 @@
|
||||
from __future__ import annotations
|
||||
# I just got crazy, trying to wrute K&R C but in Python. Right now I feel like I'm in a quantum state.
|
||||
# I probably won't use this; I just want to leave it here. A century later, the future human race will be like, "WTF?"
|
||||
|
||||
# ------ Imports That Will Make You Question Reality ------ #
|
||||
from functools import wraps
|
||||
from contextvars import ContextVar
|
||||
import inspect
|
||||
|
||||
from crawl4ai import CacheMode
|
||||
from crawl4ai.async_configs import CrawlerRunConfig
|
||||
from crawl4ai.models import CrawlResult, TraversalStats
|
||||
from crawl4ai.deep_crawling.filters import FilterChain
|
||||
from crawl4ai.async_webcrawler import AsyncWebCrawler
|
||||
import time
|
||||
import logging
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from collections import deque
|
||||
import asyncio
|
||||
from typing import (
|
||||
AsyncGenerator,
|
||||
Dict,
|
||||
List,
|
||||
TypeVar,
|
||||
Generic,
|
||||
Tuple,
|
||||
Callable,
|
||||
Awaitable,
|
||||
Union,
|
||||
)
|
||||
from functools import lru_cache
|
||||
import mmh3
|
||||
from bitarray import bitarray
|
||||
import numpy as np
|
||||
from heapq import heappush, heappop
|
||||
|
||||
# ------ Type Algebra Mastery ------ #
|
||||
CrawlResultT = TypeVar("CrawlResultT", bound="CrawlResult")
|
||||
PriorityT = TypeVar("PriorityT")
|
||||
P = TypeVar("P")
|
||||
|
||||
# ------ Hyperscalar Context Management ------ #
|
||||
deep_crawl_ctx = ContextVar("deep_crawl_stack", default=deque())
|
||||
|
||||
# ------ Algebraic Crawler Monoid ------ #
|
||||
class TraversalContext:
|
||||
__slots__ = ('visited', 'frontier', 'depths', 'priority_fn', 'current_depth')
|
||||
|
||||
def __init__(self,
|
||||
priority_fn: Callable[[str], Awaitable[float]] = lambda _: 1.0):
|
||||
self.visited: BloomFilter = BloomFilter(10**6, 0.01) # 1M items, 1% FP
|
||||
self.frontier: PriorityQueue = PriorityQueue()
|
||||
self.depths: Dict[str, int] = {}
|
||||
self.priority_fn = priority_fn
|
||||
self.current_depth = 0
|
||||
|
||||
def clone_for_level(self) -> TraversalContext:
|
||||
"""Monadic context propagation"""
|
||||
new_ctx = TraversalContext(self.priority_fn)
|
||||
new_ctx.visited = self.visited.copy()
|
||||
new_ctx.depths = self.depths.copy()
|
||||
new_ctx.current_depth = self.current_depth
|
||||
return new_ctx
|
||||
|
||||
class PriorityQueue(Generic[PriorityT]):
|
||||
"""Fibonacci heap-inspired priority queue with O(1) amortized operations"""
|
||||
__slots__ = ('_heap', '_index')
|
||||
|
||||
def __init__(self):
|
||||
self._heap: List[Tuple[PriorityT, float, P]] = []
|
||||
self._index: Dict[P, int] = {}
|
||||
|
||||
def insert(self, priority: PriorityT, item: P) -> None:
|
||||
tiebreaker = time.time() # Ensure FIFO for equal priorities
|
||||
heappush(self._heap, (priority, tiebreaker, item))
|
||||
self._index[item] = len(self._heap) - 1
|
||||
|
||||
def extract(self, top_n = 1) -> P:
|
||||
items = []
|
||||
for _ in range(top_n):
|
||||
if not self._heap:
|
||||
break
|
||||
priority, _, item = heappop(self._heap)
|
||||
del self._index[item]
|
||||
items.append(item)
|
||||
if not items:
|
||||
raise IndexError("Priority queue empty")
|
||||
return items
|
||||
# while self._heap:
|
||||
# _, _, item = heappop(self._heap)
|
||||
# if item in self._index:
|
||||
# del self._index[item]
|
||||
# return item
|
||||
raise IndexError("Priority queue empty")
|
||||
|
||||
|
||||
def is_empty(self) -> bool:
|
||||
return not bool(self._heap)
|
||||
|
||||
class BloomFilter:
|
||||
"""Optimal Bloom filter using murmur3 hash avalanche"""
|
||||
__slots__ = ('size', 'hashes', 'bits')
|
||||
|
||||
def __init__(self, capacity: int, error_rate: float):
|
||||
self.size = self._optimal_size(capacity, error_rate)
|
||||
self.hashes = self._optimal_hashes(capacity, self.size)
|
||||
self.bits = bitarray(self.size)
|
||||
self.bits.setall(False)
|
||||
|
||||
@staticmethod
|
||||
def _optimal_size(n: int, p: float) -> int:
|
||||
m = - (n * np.log(p)) / (np.log(2) ** 2)
|
||||
return int(np.ceil(m))
|
||||
|
||||
@staticmethod
|
||||
def _optimal_hashes(n: int, m: int) -> int:
|
||||
k = (m / n) * np.log(2)
|
||||
return int(np.ceil(k))
|
||||
|
||||
def add(self, item: str) -> None:
|
||||
for seed in range(self.hashes):
|
||||
digest = mmh3.hash(item, seed) % self.size
|
||||
self.bits[digest] = True
|
||||
|
||||
def __contains__(self, item: str) -> bool:
|
||||
return all(
|
||||
self.bits[mmh3.hash(item, seed) % self.size]
|
||||
for seed in range(self.hashes)
|
||||
)
|
||||
|
||||
def copy(self) -> BloomFilter:
|
||||
new = object.__new__(BloomFilter)
|
||||
new.size = self.size
|
||||
new.hashes = self.hashes
|
||||
new.bits = self.bits.copy()
|
||||
return new
|
||||
|
||||
def __len__(self) -> int:
|
||||
"""
|
||||
Estimates the number of items in the filter using the
|
||||
count of set bits and the formula:
|
||||
n = -m/k * ln(1 - X/m)
|
||||
where:
|
||||
m = size of bit array
|
||||
k = number of hash functions
|
||||
X = count of set bits
|
||||
"""
|
||||
set_bits = self.bits.count(True)
|
||||
if set_bits == 0:
|
||||
return 0
|
||||
|
||||
# Use the inverse bloom filter formula to estimate cardinality
|
||||
return int(
|
||||
-(self.size / self.hashes) *
|
||||
np.log(1 - set_bits / self.size)
|
||||
)
|
||||
|
||||
def bit_count(self) -> int:
|
||||
"""Returns the raw count of set bits in the filter"""
|
||||
return self.bits.count(True)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"BloomFilter(est_items={len(self)}, bits={self.bit_count()}/{self.size})"
|
||||
|
||||
# ------ Hyper-Optimal Deep Crawl Core ------ #
|
||||
class DeepCrawlDecorator:
|
||||
"""Metaprogramming marvel: Zero-cost deep crawl abstraction"""
|
||||
def __init__(self, crawler: AsyncWebCrawler):
|
||||
self.crawler = crawler
|
||||
|
||||
def __call__(self, original_arun: Callable) -> Callable:
|
||||
@wraps(original_arun)
|
||||
async def quantum_arun(url: str, config: CrawlerRunConfig = None, **kwargs):
|
||||
stack = deep_crawl_ctx.get()
|
||||
if config and config.deep_crawl_strategy and not stack:
|
||||
stack.append(self.crawler)
|
||||
try:
|
||||
deep_crawl_ctx.set(stack)
|
||||
async for result in config.deep_crawl_strategy.traverse(
|
||||
start_url=url,
|
||||
crawler=self.crawler,
|
||||
config=config
|
||||
):
|
||||
yield result
|
||||
finally:
|
||||
stack.pop()
|
||||
deep_crawl_ctx.set(stack)
|
||||
else:
|
||||
result = await original_arun(url, config=config, **kwargs)
|
||||
yield result
|
||||
return quantum_arun
|
||||
|
||||
|
||||
async def collect_results(url, crawler, config):
|
||||
if id(getattr(crawler, "arun")) != id(getattr(crawler, "original_arun")):
|
||||
setattr(crawler, "arun", getattr(crawler, "original_arun"))
|
||||
|
||||
ret = crawler.arun(url, config=config)
|
||||
# If arun is an async generator, iterate over it
|
||||
if inspect.isasyncgen(ret):
|
||||
return [r async for r in ret]
|
||||
# Otherwise, await the coroutine and normalize to a list
|
||||
result = await ret
|
||||
return result if isinstance(result, list) else [result]
|
||||
|
||||
async def collect_many_results(url, crawler, config):
|
||||
# Replace back arun to its original implementation
|
||||
if id(getattr(crawler, "arun")) != id(getattr(crawler, "original_arun")):
|
||||
setattr(crawler, "arun", getattr(crawler, "original_arun"))
|
||||
ret = crawler.arun_many(url, config=config)
|
||||
# If arun is an async generator, iterate over it
|
||||
if inspect.isasyncgen(ret):
|
||||
return [r async for r in ret]
|
||||
# Otherwise, await the coroutine and normalize to a list
|
||||
result = await ret
|
||||
return result if isinstance(result, list) else [result]
|
||||
|
||||
|
||||
# ------ Deep Crawl Strategy Interface ------ #
|
||||
CrawlResultT = TypeVar("CrawlResultT", bound=CrawlResult)
|
||||
# In batch mode we return List[CrawlResult] and in stream mode an AsyncGenerator.
|
||||
RunManyReturn = Union[CrawlResultT, List[CrawlResultT], AsyncGenerator[CrawlResultT, None]]
|
||||
|
||||
|
||||
class DeepCrawlStrategy(ABC):
|
||||
"""Abstract base class that will make Dijkstra smile"""
|
||||
@abstractmethod
|
||||
async def traverse(self,
|
||||
start_url: str,
|
||||
crawler: AsyncWebCrawler,
|
||||
config: CrawlerRunConfig) -> RunManyReturn:
|
||||
"""Traverse with O(1) memory complexity via generator fusion"""
|
||||
...
|
||||
|
||||
@abstractmethod
|
||||
def precompute_priority(self, url: str) -> Awaitable[float]:
|
||||
"""Quantum-inspired priority precomputation"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def link_hypercube(self, result: CrawlResult) -> AsyncGenerator[str, None]:
|
||||
"""Hilbert-curve optimized link generation"""
|
||||
pass
|
||||
|
||||
# ------ BFS That Would Make Knuth Proud ------ #
|
||||
|
||||
def calculate_quantum_batch_size(
|
||||
depth: int,
|
||||
max_depth: int,
|
||||
frontier_size: int,
|
||||
visited_size: int
|
||||
) -> int:
|
||||
"""
|
||||
Calculates optimal batch size for URL processing using quantum-inspired mathematical principles.
|
||||
|
||||
This function implements a sophisticated batch size calculation using:
|
||||
1. Golden Ratio (φ) based scaling for optimal irrationality
|
||||
2. Depth-aware amplitude modulation
|
||||
3. Harmonic series dampening
|
||||
4. Logarithmic growth control
|
||||
5. Dynamic frontier adaptation
|
||||
|
||||
The formula follows the quantum harmonic oscillator principle:
|
||||
N = ⌈φ^(2d) * log₂(|V|) * H(d)⁻¹ * min(20, |F|/10)⌉
|
||||
where:
|
||||
φ = Golden Ratio ((1 + √5) / 2)
|
||||
d = depth factor (normalized remaining depth)
|
||||
|V| = size of visited set
|
||||
H(d) = d-th harmonic number
|
||||
|F| = frontier size
|
||||
|
||||
Args:
|
||||
depth (int): Current traversal depth
|
||||
max_depth (int): Maximum allowed depth
|
||||
frontier_size (int): Current size of frontier queue
|
||||
visited_size (int): Number of URLs visited so far
|
||||
|
||||
Returns:
|
||||
int: Optimal batch size bounded between 1 and 100
|
||||
|
||||
Mathematical Properties:
|
||||
- Maintains O(log n) growth with respect to visited size
|
||||
- Provides φ-optimal distribution of resources
|
||||
- Ensures quantum-like state transitions between depths
|
||||
- Harmonically dampened to prevent exponential explosion
|
||||
"""
|
||||
# Golden ratio φ = (1 + √5) / 2
|
||||
φ = (1 + 5 ** 0.5) / 2
|
||||
|
||||
# Calculate normalized depth factor [0, 1]
|
||||
depth_factor = (max_depth - depth) / max_depth if depth < max_depth else 0
|
||||
|
||||
# Compute harmonic number for current depth
|
||||
harmonic = sum(1/k for k in range(1, depth + 2))
|
||||
|
||||
# Calculate quantum batch size
|
||||
batch_size = int(np.ceil(
|
||||
(φ ** (depth_factor * 2)) * # Golden ratio scaling
|
||||
np.log2(visited_size + 2) * # Logarithmic growth factor
|
||||
(1 / harmonic) * # Harmonic dampening
|
||||
max(1, min(20, frontier_size / 10)) # Frontier-aware scaling
|
||||
))
|
||||
|
||||
# Enforce practical bounds
|
||||
return max(1, min(100, batch_size))
|
||||
|
||||
|
||||
class BFSDeepCrawlStrategy(DeepCrawlStrategy):
|
||||
"""Breadth-First Search with Einstein-Rosen bridge optimization"""
|
||||
__slots__ = ('max_depth', 'filter_chain', 'priority_fn', 'stats', '_cancel')
|
||||
|
||||
def __init__(self,
|
||||
max_depth: int,
|
||||
filter_chain: FilterChain = FilterChain(),
|
||||
priority_fn: Callable[[str], Awaitable[float]] = lambda url: 1.0,
|
||||
logger: logging.Logger = None):
|
||||
self.max_depth = max_depth
|
||||
self.filter_chain = filter_chain
|
||||
self.priority_fn = priority_fn
|
||||
self.stats = TraversalStats()
|
||||
self._cancel = asyncio.Event()
|
||||
self.semaphore = asyncio.Semaphore(1000)
|
||||
|
||||
async def traverse(self,
|
||||
start_url: str,
|
||||
crawler: AsyncWebCrawler,
|
||||
config: CrawlerRunConfig) -> RunManyReturn:
|
||||
"""Non-blocking BFS with O(b^d) time complexity awareness"""
|
||||
ctx = TraversalContext(self.priority_fn)
|
||||
ctx.frontier.insert(self.priority_fn(start_url), (start_url, None, 0))
|
||||
ctx.visited.add(start_url)
|
||||
ctx.depths[start_url] = 0
|
||||
|
||||
while not ctx.frontier.is_empty() and not self._cancel.is_set():
|
||||
# Use the best algorith, to find top_n value
|
||||
top_n = calculate_quantum_batch_size(
|
||||
depth=ctx.current_depth,
|
||||
max_depth=self.max_depth,
|
||||
frontier_size=len(ctx.frontier._heap),
|
||||
visited_size=len(ctx.visited)
|
||||
)
|
||||
|
||||
urls = ctx.frontier.extract(top_n=top_n)
|
||||
# url, parent, depth = ctx.frontier.extract(top_n=top_n)
|
||||
if urls:
|
||||
ctx.current_depth = urls[0][2]
|
||||
|
||||
async with self.semaphore:
|
||||
results = await collect_many_results([url for (url, parent, depth) in urls], crawler, config)
|
||||
# results = await asyncio.gather(*[
|
||||
# collect_results(url, crawler, config) for (url, parent, depth) in urls
|
||||
# ])
|
||||
# result = _result[0]
|
||||
for ix, result in enumerate(results):
|
||||
url, parent, depth = result.url, urls[ix][1], urls[ix][2]
|
||||
result.metadata['depth'] = depth
|
||||
result.metadata['parent'] = parent
|
||||
yield result
|
||||
|
||||
if depth < self.max_depth:
|
||||
async for link in self.link_hypercube(result):
|
||||
if link not in ctx.visited:
|
||||
priority = self.priority_fn(link)
|
||||
ctx.frontier.insert(priority, (link, url, depth + 1))
|
||||
ctx.visited.add(link)
|
||||
ctx.depths[link] = depth + 1
|
||||
|
||||
@lru_cache(maxsize=65536)
|
||||
async def validate_url(self, url: str) -> bool:
|
||||
"""Memoized URL validation with λ-calculus purity"""
|
||||
try:
|
||||
parsed = urlparse(url)
|
||||
return (parsed.scheme in {'http', 'https'}
|
||||
and '.' in parsed.netloc
|
||||
and await self.filter_chain.apply(url))
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
async def link_hypercube(self, result: CrawlResult) -> AsyncGenerator[str, None]:
|
||||
"""Hilbert-ordered link generation with O(1) yield latency"""
|
||||
links = (link['href'] for link in result.links.get('internal', []))
|
||||
validated = filter(self.validate_url, links)
|
||||
for link in sorted(validated, key=lambda x: -self.priority_fn(x)):
|
||||
yield link
|
||||
|
||||
def __aiter__(self) -> AsyncGenerator[CrawlResult, None]:
|
||||
"""Native async iterator interface"""
|
||||
return self.traverse()
|
||||
|
||||
async def __anext__(self) -> CrawlResult:
|
||||
"""True async iterator protocol implementation"""
|
||||
result = await self.traverse().__anext__()
|
||||
if result:
|
||||
return result
|
||||
raise StopAsyncIteration
|
||||
|
||||
async def precompute_priority(self, url):
|
||||
return super().precompute_priority(url)
|
||||
|
||||
async def shutdown(self):
|
||||
self._cancel.set()
|
||||
|
||||
# ------ Usage That Will Drop Jaws ------ #
|
||||
async def main():
|
||||
"""Quantum crawl example"""
|
||||
strategy = BFSDeepCrawlStrategy(
|
||||
max_depth=2,
|
||||
priority_fn=lambda url: 1.0 / (len(url) + 1e-9), # Inverse length priority
|
||||
# filter_chain=FilterChain(...)
|
||||
)
|
||||
|
||||
config: CrawlerRunConfig = CrawlerRunConfig(
|
||||
deep_crawl_strategy=strategy,
|
||||
stream=False,
|
||||
verbose=True,
|
||||
cache_mode=CacheMode.BYPASS
|
||||
)
|
||||
|
||||
async with AsyncWebCrawler() as crawler:
|
||||
run_decorator = DeepCrawlDecorator(crawler)
|
||||
setattr(crawler, "original_arun", crawler.arun)
|
||||
crawler.arun = run_decorator(crawler.arun)
|
||||
start_time = time.perf_counter()
|
||||
async for result in crawler.arun("https://docs.crawl4ai.com", config=config):
|
||||
print(f"🌀 {result.url} (Depth: {result.metadata['depth']})")
|
||||
print(f"Deep crawl completed in {time.perf_counter() - start_time:.2f}s")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
110
crawl4ai/deep_crawling/dfs_strategy.py
Normal file
110
crawl4ai/deep_crawling/dfs_strategy.py
Normal file
@@ -0,0 +1,110 @@
|
||||
# dfs_deep_crawl_strategy.py
|
||||
from typing import AsyncGenerator, Optional, Set, Dict, List, Tuple
|
||||
|
||||
from ..models import CrawlResult
|
||||
from .bfs_strategy import BFSDeepCrawlStrategy # noqa
|
||||
from ..types import AsyncWebCrawler, CrawlerRunConfig
|
||||
|
||||
class DFSDeepCrawlStrategy(BFSDeepCrawlStrategy):
|
||||
"""
|
||||
Depth-First Search (DFS) deep crawling strategy.
|
||||
|
||||
Inherits URL validation and link discovery from BFSDeepCrawlStrategy.
|
||||
Overrides _arun_batch and _arun_stream to use a stack (LIFO) for DFS traversal.
|
||||
"""
|
||||
async def _arun_batch(
|
||||
self,
|
||||
start_url: str,
|
||||
crawler: AsyncWebCrawler,
|
||||
config: CrawlerRunConfig,
|
||||
) -> List[CrawlResult]:
|
||||
"""
|
||||
Batch (non-streaming) DFS mode.
|
||||
Uses a stack to traverse URLs in DFS order, aggregating CrawlResults into a list.
|
||||
"""
|
||||
visited: Set[str] = set()
|
||||
# Stack items: (url, parent_url, depth)
|
||||
stack: List[Tuple[str, Optional[str], int]] = [(start_url, None, 0)]
|
||||
depths: Dict[str, int] = {start_url: 0}
|
||||
results: List[CrawlResult] = []
|
||||
|
||||
while stack and not self._cancel_event.is_set():
|
||||
url, parent, depth = stack.pop()
|
||||
if url in visited or depth > self.max_depth:
|
||||
continue
|
||||
visited.add(url)
|
||||
|
||||
# Clone config to disable recursive deep crawling.
|
||||
batch_config = config.clone(deep_crawl_strategy=None, stream=False)
|
||||
url_results = await crawler.arun_many(urls=[url], config=batch_config)
|
||||
|
||||
for result in url_results:
|
||||
result.metadata = result.metadata or {}
|
||||
result.metadata["depth"] = depth
|
||||
result.metadata["parent_url"] = parent
|
||||
if self.url_scorer:
|
||||
result.metadata["score"] = self.url_scorer.score(url)
|
||||
results.append(result)
|
||||
|
||||
# Count only successful crawls toward max_pages limit
|
||||
if result.success:
|
||||
self._pages_crawled += 1
|
||||
# Check if we've reached the limit during batch processing
|
||||
if self._pages_crawled >= self.max_pages:
|
||||
self.logger.info(f"Max pages limit ({self.max_pages}) reached during batch, stopping crawl")
|
||||
break # Exit the generator
|
||||
|
||||
# Only discover links from successful crawls
|
||||
new_links: List[Tuple[str, Optional[str]]] = []
|
||||
await self.link_discovery(result, url, depth, visited, new_links, depths)
|
||||
|
||||
# Push new links in reverse order so the first discovered is processed next.
|
||||
for new_url, new_parent in reversed(new_links):
|
||||
new_depth = depths.get(new_url, depth + 1)
|
||||
stack.append((new_url, new_parent, new_depth))
|
||||
return results
|
||||
|
||||
async def _arun_stream(
|
||||
self,
|
||||
start_url: str,
|
||||
crawler: AsyncWebCrawler,
|
||||
config: CrawlerRunConfig,
|
||||
) -> AsyncGenerator[CrawlResult, None]:
|
||||
"""
|
||||
Streaming DFS mode.
|
||||
Uses a stack to traverse URLs in DFS order and yields CrawlResults as they become available.
|
||||
"""
|
||||
visited: Set[str] = set()
|
||||
stack: List[Tuple[str, Optional[str], int]] = [(start_url, None, 0)]
|
||||
depths: Dict[str, int] = {start_url: 0}
|
||||
|
||||
while stack and not self._cancel_event.is_set():
|
||||
url, parent, depth = stack.pop()
|
||||
if url in visited or depth > self.max_depth:
|
||||
continue
|
||||
visited.add(url)
|
||||
|
||||
stream_config = config.clone(deep_crawl_strategy=None, stream=True)
|
||||
stream_gen = await crawler.arun_many(urls=[url], config=stream_config)
|
||||
async for result in stream_gen:
|
||||
result.metadata = result.metadata or {}
|
||||
result.metadata["depth"] = depth
|
||||
result.metadata["parent_url"] = parent
|
||||
if self.url_scorer:
|
||||
result.metadata["score"] = self.url_scorer.score(url)
|
||||
yield result
|
||||
|
||||
# Only count successful crawls toward max_pages limit
|
||||
# and only discover links from successful crawls
|
||||
if result.success:
|
||||
self._pages_crawled += 1
|
||||
# Check if we've reached the limit during batch processing
|
||||
if self._pages_crawled >= self.max_pages:
|
||||
self.logger.info(f"Max pages limit ({self.max_pages}) reached during batch, stopping crawl")
|
||||
break # Exit the generator
|
||||
|
||||
new_links: List[Tuple[str, Optional[str]]] = []
|
||||
await self.link_discovery(result, url, depth, visited, new_links, depths)
|
||||
for new_url, new_parent in reversed(new_links):
|
||||
new_depth = depths.get(new_url, depth + 1)
|
||||
stack.append((new_url, new_parent, new_depth))
|
||||
694
crawl4ai/deep_crawling/filters.py
Normal file
694
crawl4ai/deep_crawling/filters.py
Normal file
@@ -0,0 +1,694 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import List, Pattern, Set, Union
|
||||
from urllib.parse import urlparse
|
||||
from array import array
|
||||
import re
|
||||
import logging
|
||||
from functools import lru_cache
|
||||
import fnmatch
|
||||
from dataclasses import dataclass
|
||||
import weakref
|
||||
import math
|
||||
from collections import defaultdict
|
||||
from typing import Dict
|
||||
from ..utils import HeadPeekr
|
||||
import asyncio
|
||||
import inspect
|
||||
|
||||
|
||||
@dataclass
|
||||
class FilterStats:
|
||||
__slots__ = ("_counters",)
|
||||
|
||||
def __init__(self):
|
||||
# Use array of unsigned ints for atomic operations
|
||||
self._counters = array("I", [0, 0, 0]) # total, passed, rejected
|
||||
|
||||
@property
|
||||
def total_urls(self):
|
||||
return self._counters[0]
|
||||
|
||||
@property
|
||||
def passed_urls(self):
|
||||
return self._counters[1]
|
||||
|
||||
@property
|
||||
def rejected_urls(self):
|
||||
return self._counters[2]
|
||||
|
||||
|
||||
class URLFilter(ABC):
|
||||
"""Optimized base filter class"""
|
||||
|
||||
__slots__ = ("name", "stats", "_logger_ref")
|
||||
|
||||
def __init__(self, name: str = None):
|
||||
self.name = name or self.__class__.__name__
|
||||
self.stats = FilterStats()
|
||||
# Lazy logger initialization using weakref
|
||||
self._logger_ref = None
|
||||
|
||||
@property
|
||||
def logger(self):
|
||||
if self._logger_ref is None or self._logger_ref() is None:
|
||||
logger = logging.getLogger(f"urlfilter.{self.name}")
|
||||
self._logger_ref = weakref.ref(logger)
|
||||
return self._logger_ref()
|
||||
|
||||
@abstractmethod
|
||||
def apply(self, url: str) -> bool:
|
||||
pass
|
||||
|
||||
def _update_stats(self, passed: bool):
|
||||
# Use direct array index for speed
|
||||
self.stats._counters[0] += 1 # total
|
||||
self.stats._counters[1] += passed # passed
|
||||
self.stats._counters[2] += not passed # rejected
|
||||
|
||||
|
||||
class FilterChain:
|
||||
"""Optimized filter chain"""
|
||||
|
||||
__slots__ = ("filters", "stats", "_logger_ref")
|
||||
|
||||
def __init__(self, filters: List[URLFilter] = None):
|
||||
self.filters = tuple(filters or []) # Immutable tuple for speed
|
||||
self.stats = FilterStats()
|
||||
self._logger_ref = None
|
||||
|
||||
@property
|
||||
def logger(self):
|
||||
if self._logger_ref is None or self._logger_ref() is None:
|
||||
logger = logging.getLogger("urlfilter.chain")
|
||||
self._logger_ref = weakref.ref(logger)
|
||||
return self._logger_ref()
|
||||
|
||||
def add_filter(self, filter_: URLFilter) -> "FilterChain":
|
||||
"""Add a filter to the chain"""
|
||||
self.filters.append(filter_)
|
||||
return self # Enable method chaining
|
||||
|
||||
async def apply(self, url: str) -> bool:
|
||||
"""Apply all filters concurrently when possible"""
|
||||
self.stats._counters[0] += 1 # Total processed URLs
|
||||
|
||||
tasks = []
|
||||
for f in self.filters:
|
||||
result = f.apply(url)
|
||||
|
||||
if inspect.isawaitable(result):
|
||||
tasks.append(result) # Collect async tasks
|
||||
elif not result: # Sync rejection
|
||||
self.stats._counters[2] += 1 # Sync rejected
|
||||
return False
|
||||
|
||||
if tasks:
|
||||
results = await asyncio.gather(*tasks)
|
||||
|
||||
# Count how many filters rejected
|
||||
rejections = results.count(False)
|
||||
self.stats._counters[2] += rejections
|
||||
|
||||
if not all(results):
|
||||
return False # Stop early if any filter rejected
|
||||
|
||||
self.stats._counters[1] += 1 # Passed
|
||||
return True
|
||||
|
||||
|
||||
class URLPatternFilter(URLFilter):
|
||||
"""Pattern filter balancing speed and completeness"""
|
||||
|
||||
__slots__ = (
|
||||
"patterns", # Store original patterns for serialization
|
||||
"use_glob", # Store original use_glob for serialization
|
||||
"reverse", # Store original reverse for serialization
|
||||
"_simple_suffixes",
|
||||
"_simple_prefixes",
|
||||
"_domain_patterns",
|
||||
"_path_patterns",
|
||||
"_reverse",
|
||||
)
|
||||
|
||||
PATTERN_TYPES = {
|
||||
"SUFFIX": 1, # *.html
|
||||
"PREFIX": 2, # /foo/*
|
||||
"DOMAIN": 3, # *.example.com
|
||||
"PATH": 4, # Everything else
|
||||
"REGEX": 5,
|
||||
}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
patterns: Union[str, Pattern, List[Union[str, Pattern]]],
|
||||
use_glob: bool = True,
|
||||
reverse: bool = False,
|
||||
):
|
||||
super().__init__()
|
||||
# Store original constructor params for serialization
|
||||
self.patterns = patterns
|
||||
self.use_glob = use_glob
|
||||
self.reverse = reverse
|
||||
|
||||
self._reverse = reverse
|
||||
patterns = [patterns] if isinstance(patterns, (str, Pattern)) else patterns
|
||||
|
||||
self._simple_suffixes = set()
|
||||
self._simple_prefixes = set()
|
||||
self._domain_patterns = []
|
||||
self._path_patterns = []
|
||||
|
||||
for pattern in patterns:
|
||||
pattern_type = self._categorize_pattern(pattern)
|
||||
self._add_pattern(pattern, pattern_type)
|
||||
|
||||
def _categorize_pattern(self, pattern: str) -> int:
|
||||
"""Categorize pattern for specialized handling"""
|
||||
if not isinstance(pattern, str):
|
||||
return self.PATTERN_TYPES["PATH"]
|
||||
|
||||
# Check if it's a regex pattern
|
||||
if pattern.startswith("^") or pattern.endswith("$") or "\\d" in pattern:
|
||||
return self.PATTERN_TYPES["REGEX"]
|
||||
|
||||
if pattern.count("*") == 1:
|
||||
if pattern.startswith("*."):
|
||||
return self.PATTERN_TYPES["SUFFIX"]
|
||||
if pattern.endswith("/*"):
|
||||
return self.PATTERN_TYPES["PREFIX"]
|
||||
|
||||
if "://" in pattern and pattern.startswith("*."):
|
||||
return self.PATTERN_TYPES["DOMAIN"]
|
||||
|
||||
return self.PATTERN_TYPES["PATH"]
|
||||
|
||||
def _add_pattern(self, pattern: str, pattern_type: int):
|
||||
"""Add pattern to appropriate matcher"""
|
||||
if pattern_type == self.PATTERN_TYPES["REGEX"]:
|
||||
# For regex patterns, compile directly without glob translation
|
||||
if isinstance(pattern, str) and (
|
||||
pattern.startswith("^") or pattern.endswith("$") or "\\d" in pattern
|
||||
):
|
||||
self._path_patterns.append(re.compile(pattern))
|
||||
return
|
||||
elif pattern_type == self.PATTERN_TYPES["SUFFIX"]:
|
||||
self._simple_suffixes.add(pattern[2:])
|
||||
elif pattern_type == self.PATTERN_TYPES["PREFIX"]:
|
||||
self._simple_prefixes.add(pattern[:-2])
|
||||
elif pattern_type == self.PATTERN_TYPES["DOMAIN"]:
|
||||
self._domain_patterns.append(re.compile(pattern.replace("*.", r"[^/]+\.")))
|
||||
else:
|
||||
if isinstance(pattern, str):
|
||||
# Handle complex glob patterns
|
||||
if "**" in pattern:
|
||||
pattern = pattern.replace("**", ".*")
|
||||
if "{" in pattern:
|
||||
# Convert {a,b} to (a|b)
|
||||
pattern = re.sub(
|
||||
r"\{([^}]+)\}",
|
||||
lambda m: f'({"|".join(m.group(1).split(","))})',
|
||||
pattern,
|
||||
)
|
||||
pattern = fnmatch.translate(pattern)
|
||||
self._path_patterns.append(
|
||||
pattern if isinstance(pattern, Pattern) else re.compile(pattern)
|
||||
)
|
||||
|
||||
@lru_cache(maxsize=10000)
|
||||
def apply(self, url: str) -> bool:
|
||||
# Quick suffix check (*.html)
|
||||
if self._simple_suffixes:
|
||||
path = url.split("?")[0]
|
||||
if path.split("/")[-1].split(".")[-1] in self._simple_suffixes:
|
||||
result = True
|
||||
self._update_stats(result)
|
||||
return not result if self._reverse else result
|
||||
|
||||
# Domain check
|
||||
if self._domain_patterns:
|
||||
for pattern in self._domain_patterns:
|
||||
if pattern.match(url):
|
||||
result = True
|
||||
self._update_stats(result)
|
||||
return not result if self._reverse else result
|
||||
|
||||
# Prefix check (/foo/*)
|
||||
if self._simple_prefixes:
|
||||
path = url.split("?")[0]
|
||||
# if any(path.startswith(p) for p in self._simple_prefixes):
|
||||
# result = True
|
||||
# self._update_stats(result)
|
||||
# return not result if self._reverse else result
|
||||
####
|
||||
# Modified the prefix matching logic to ensure path boundary checking:
|
||||
# - Check if the matched prefix is followed by a path separator (`/`), query parameter (`?`), fragment (`#`), or is at the end of the path
|
||||
# - This ensures `/api/` only matches complete path segments, not substrings like `/apiv2/`
|
||||
####
|
||||
for prefix in self._simple_prefixes:
|
||||
if path.startswith(prefix):
|
||||
if len(path) == len(prefix) or path[len(prefix)] in ['/', '?', '#']:
|
||||
result = True
|
||||
self._update_stats(result)
|
||||
return not result if self._reverse else result
|
||||
|
||||
# Complex patterns
|
||||
if self._path_patterns:
|
||||
if any(p.search(url) for p in self._path_patterns):
|
||||
result = True
|
||||
self._update_stats(result)
|
||||
return not result if self._reverse else result
|
||||
|
||||
result = False
|
||||
self._update_stats(result)
|
||||
return not result if self._reverse else result
|
||||
|
||||
|
||||
class ContentTypeFilter(URLFilter):
|
||||
"""Optimized content type filter using fast lookups"""
|
||||
|
||||
__slots__ = ("allowed_types", "_ext_map", "_check_extension")
|
||||
|
||||
# Fast extension to mime type mapping
|
||||
_MIME_MAP = {
|
||||
# Text Formats
|
||||
"txt": "text/plain",
|
||||
"html": "text/html",
|
||||
"htm": "text/html",
|
||||
"xhtml": "application/xhtml+xml",
|
||||
"css": "text/css",
|
||||
"csv": "text/csv",
|
||||
"ics": "text/calendar",
|
||||
"js": "application/javascript",
|
||||
# Images
|
||||
"bmp": "image/bmp",
|
||||
"gif": "image/gif",
|
||||
"jpeg": "image/jpeg",
|
||||
"jpg": "image/jpeg",
|
||||
"png": "image/png",
|
||||
"svg": "image/svg+xml",
|
||||
"tiff": "image/tiff",
|
||||
"ico": "image/x-icon",
|
||||
"webp": "image/webp",
|
||||
# Audio
|
||||
"mp3": "audio/mpeg",
|
||||
"wav": "audio/wav",
|
||||
"ogg": "audio/ogg",
|
||||
"m4a": "audio/mp4",
|
||||
"aac": "audio/aac",
|
||||
# Video
|
||||
"mp4": "video/mp4",
|
||||
"mpeg": "video/mpeg",
|
||||
"webm": "video/webm",
|
||||
"avi": "video/x-msvideo",
|
||||
"mov": "video/quicktime",
|
||||
"flv": "video/x-flv",
|
||||
"wmv": "video/x-ms-wmv",
|
||||
"mkv": "video/x-matroska",
|
||||
# Applications
|
||||
"json": "application/json",
|
||||
"xml": "application/xml",
|
||||
"pdf": "application/pdf",
|
||||
"zip": "application/zip",
|
||||
"gz": "application/gzip",
|
||||
"tar": "application/x-tar",
|
||||
"rar": "application/vnd.rar",
|
||||
"7z": "application/x-7z-compressed",
|
||||
"exe": "application/vnd.microsoft.portable-executable",
|
||||
"msi": "application/x-msdownload",
|
||||
# Fonts
|
||||
"woff": "font/woff",
|
||||
"woff2": "font/woff2",
|
||||
"ttf": "font/ttf",
|
||||
"otf": "font/otf",
|
||||
# Microsoft Office
|
||||
"doc": "application/msword",
|
||||
"dot": "application/msword",
|
||||
"docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
||||
"xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
|
||||
"xls": "application/vnd.ms-excel",
|
||||
"ppt": "application/vnd.ms-powerpoint",
|
||||
"pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
|
||||
# OpenDocument Formats
|
||||
"odt": "application/vnd.oasis.opendocument.text",
|
||||
"ods": "application/vnd.oasis.opendocument.spreadsheet",
|
||||
"odp": "application/vnd.oasis.opendocument.presentation",
|
||||
# Archives
|
||||
"tar.gz": "application/gzip",
|
||||
"tgz": "application/gzip",
|
||||
"bz2": "application/x-bzip2",
|
||||
# Others
|
||||
"rtf": "application/rtf",
|
||||
"apk": "application/vnd.android.package-archive",
|
||||
"epub": "application/epub+zip",
|
||||
"jar": "application/java-archive",
|
||||
"swf": "application/x-shockwave-flash",
|
||||
"midi": "audio/midi",
|
||||
"mid": "audio/midi",
|
||||
"ps": "application/postscript",
|
||||
"ai": "application/postscript",
|
||||
"eps": "application/postscript",
|
||||
# Custom or less common
|
||||
"bin": "application/octet-stream",
|
||||
"dmg": "application/x-apple-diskimage",
|
||||
"iso": "application/x-iso9660-image",
|
||||
"deb": "application/x-debian-package",
|
||||
"rpm": "application/x-rpm",
|
||||
"sqlite": "application/vnd.sqlite3",
|
||||
# Placeholder
|
||||
"unknown": "application/octet-stream", # Fallback for unknown file types
|
||||
# php
|
||||
"php": "application/x-httpd-php",
|
||||
"php3": "application/x-httpd-php",
|
||||
"php4": "application/x-httpd-php",
|
||||
"php5": "application/x-httpd-php",
|
||||
"php7": "application/x-httpd-php",
|
||||
"phtml": "application/x-httpd-php",
|
||||
"phps": "application/x-httpd-php-source",
|
||||
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
@lru_cache(maxsize=1000)
|
||||
def _extract_extension(url: str) -> str:
|
||||
"""Extracts file extension from a URL."""
|
||||
# Remove scheme (http://, https://) if present
|
||||
if "://" in url:
|
||||
url = url.split("://", 1)[-1] # Get everything after '://'
|
||||
|
||||
# Remove domain (everything up to the first '/')
|
||||
path_start = url.find("/")
|
||||
path = url[path_start:] if path_start != -1 else ""
|
||||
|
||||
# Extract last filename in path
|
||||
filename = path.rsplit("/", 1)[-1] if "/" in path else ""
|
||||
|
||||
# Extract and validate extension
|
||||
if "." not in filename:
|
||||
return ""
|
||||
|
||||
return filename.rpartition(".")[-1].lower()
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
allowed_types: Union[str, List[str]],
|
||||
check_extension: bool = True,
|
||||
ext_map: Dict[str, str] = _MIME_MAP,
|
||||
):
|
||||
super().__init__()
|
||||
# Normalize and store as frozenset for fast lookup
|
||||
self.allowed_types = frozenset(
|
||||
t.lower()
|
||||
for t in (
|
||||
allowed_types if isinstance(allowed_types, list) else [allowed_types]
|
||||
)
|
||||
)
|
||||
self._check_extension = check_extension
|
||||
|
||||
# Pre-compute extension map for allowed types
|
||||
self._ext_map = frozenset(
|
||||
ext
|
||||
for ext, mime in self._MIME_MAP.items()
|
||||
if any(allowed in mime for allowed in self.allowed_types)
|
||||
)
|
||||
|
||||
@lru_cache(maxsize=1000)
|
||||
def _check_url_cached(self, url: str) -> bool:
|
||||
"""Cached URL checking"""
|
||||
if not self._check_extension:
|
||||
return True
|
||||
ext = self._extract_extension(url)
|
||||
if not ext:
|
||||
return True
|
||||
|
||||
return ext in self._ext_map
|
||||
|
||||
def apply(self, url: str) -> bool:
|
||||
"""Fast extension check with caching"""
|
||||
result = self._check_url_cached(url)
|
||||
self._update_stats(result)
|
||||
return result
|
||||
|
||||
|
||||
class DomainFilter(URLFilter):
|
||||
"""Optimized domain filter with fast lookups and caching"""
|
||||
|
||||
__slots__ = ("_allowed_domains", "_blocked_domains", "_domain_cache")
|
||||
|
||||
# Regex for fast domain extraction
|
||||
_DOMAIN_REGEX = re.compile(r"://([^/]+)")
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
allowed_domains: Union[str, List[str]] = None,
|
||||
blocked_domains: Union[str, List[str]] = None,
|
||||
):
|
||||
super().__init__()
|
||||
|
||||
# Convert inputs to frozensets for immutable, fast lookups
|
||||
self._allowed_domains = (
|
||||
frozenset(self._normalize_domains(allowed_domains))
|
||||
if allowed_domains
|
||||
else None
|
||||
)
|
||||
self._blocked_domains = (
|
||||
frozenset(self._normalize_domains(blocked_domains))
|
||||
if blocked_domains
|
||||
else frozenset()
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _normalize_domains(domains: Union[str, List[str]]) -> Set[str]:
|
||||
"""Fast domain normalization"""
|
||||
if isinstance(domains, str):
|
||||
return {domains.lower()}
|
||||
return {d.lower() for d in domains}
|
||||
|
||||
@staticmethod
|
||||
def _is_subdomain(domain: str, parent_domain: str) -> bool:
|
||||
"""Check if domain is a subdomain of parent_domain"""
|
||||
return domain == parent_domain or domain.endswith(f".{parent_domain}")
|
||||
|
||||
@staticmethod
|
||||
@lru_cache(maxsize=10000)
|
||||
def _extract_domain(url: str) -> str:
|
||||
"""Ultra-fast domain extraction with regex and caching"""
|
||||
match = DomainFilter._DOMAIN_REGEX.search(url)
|
||||
return match.group(1).lower() if match else ""
|
||||
|
||||
def apply(self, url: str) -> bool:
|
||||
"""Optimized domain checking with early returns"""
|
||||
# Skip processing if no filters
|
||||
if not self._blocked_domains and self._allowed_domains is None:
|
||||
self._update_stats(True)
|
||||
return True
|
||||
|
||||
domain = self._extract_domain(url)
|
||||
|
||||
# Check for blocked domains, including subdomains
|
||||
for blocked in self._blocked_domains:
|
||||
if self._is_subdomain(domain, blocked):
|
||||
self._update_stats(False)
|
||||
return False
|
||||
|
||||
# If no allowed domains specified, accept all non-blocked
|
||||
if self._allowed_domains is None:
|
||||
self._update_stats(True)
|
||||
return True
|
||||
|
||||
# Check if domain matches any allowed domain (including subdomains)
|
||||
for allowed in self._allowed_domains:
|
||||
if self._is_subdomain(domain, allowed):
|
||||
self._update_stats(True)
|
||||
return True
|
||||
|
||||
# No matches found
|
||||
self._update_stats(False)
|
||||
return False
|
||||
|
||||
|
||||
class ContentRelevanceFilter(URLFilter):
|
||||
"""BM25-based relevance filter using head section content"""
|
||||
|
||||
__slots__ = ("query_terms", "threshold", "k1", "b", "avgdl")
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
query: str,
|
||||
threshold: float,
|
||||
k1: float = 1.2,
|
||||
b: float = 0.75,
|
||||
avgdl: int = 1000,
|
||||
):
|
||||
super().__init__(name="BM25RelevanceFilter")
|
||||
self.query_terms = self._tokenize(query)
|
||||
self.threshold = threshold
|
||||
self.k1 = k1 # TF saturation parameter
|
||||
self.b = b # Length normalization parameter
|
||||
self.avgdl = avgdl # Average document length (empirical value)
|
||||
|
||||
async def apply(self, url: str) -> bool:
|
||||
head_content = await HeadPeekr.peek_html(url)
|
||||
if not head_content:
|
||||
self._update_stats(False)
|
||||
return False
|
||||
|
||||
# Field extraction with weighting
|
||||
fields = {
|
||||
"title": HeadPeekr.get_title(head_content) or "",
|
||||
"meta": HeadPeekr.extract_meta_tags(head_content),
|
||||
}
|
||||
doc_text = self._build_document(fields)
|
||||
|
||||
score = self._bm25(doc_text)
|
||||
decision = score >= self.threshold
|
||||
self._update_stats(decision)
|
||||
return decision
|
||||
|
||||
def _build_document(self, fields: Dict) -> str:
|
||||
"""Weighted document construction"""
|
||||
return " ".join(
|
||||
[
|
||||
fields["title"] * 3, # Title weight
|
||||
fields["meta"].get("description", "") * 2,
|
||||
fields["meta"].get("keywords", ""),
|
||||
" ".join(fields["meta"].values()),
|
||||
]
|
||||
)
|
||||
|
||||
def _tokenize(self, text: str) -> List[str]:
|
||||
"""Fast case-insensitive tokenization"""
|
||||
return text.lower().split()
|
||||
|
||||
def _bm25(self, document: str) -> float:
|
||||
"""Optimized BM25 implementation for head sections"""
|
||||
doc_terms = self._tokenize(document)
|
||||
doc_len = len(doc_terms)
|
||||
tf = defaultdict(int)
|
||||
|
||||
for term in doc_terms:
|
||||
tf[term] += 1
|
||||
|
||||
score = 0.0
|
||||
for term in set(self.query_terms):
|
||||
term_freq = tf[term]
|
||||
idf = math.log((1 + 1) / (term_freq + 0.5) + 1) # Simplified IDF
|
||||
numerator = term_freq * (self.k1 + 1)
|
||||
denominator = term_freq + self.k1 * (
|
||||
1 - self.b + self.b * (doc_len / self.avgdl)
|
||||
)
|
||||
score += idf * (numerator / denominator)
|
||||
|
||||
return score
|
||||
|
||||
|
||||
class SEOFilter(URLFilter):
|
||||
"""Quantitative SEO quality assessment filter using head section analysis"""
|
||||
|
||||
__slots__ = ("threshold", "_weights", "_kw_patterns")
|
||||
|
||||
# Based on SEMrush/Google ranking factors research
|
||||
DEFAULT_WEIGHTS = {
|
||||
"title_length": 0.15,
|
||||
"title_kw": 0.18,
|
||||
"meta_description": 0.12,
|
||||
"canonical": 0.10,
|
||||
"robot_ok": 0.20, # Most critical factor
|
||||
"schema_org": 0.10,
|
||||
"url_quality": 0.15,
|
||||
}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
threshold: float = 0.65,
|
||||
keywords: List[str] = None,
|
||||
weights: Dict[str, float] = None,
|
||||
):
|
||||
super().__init__(name="SEOFilter")
|
||||
self.threshold = threshold
|
||||
self._weights = weights or self.DEFAULT_WEIGHTS
|
||||
self._kw_patterns = (
|
||||
re.compile(
|
||||
r"\b({})\b".format("|".join(map(re.escape, keywords or []))), re.I
|
||||
)
|
||||
if keywords
|
||||
else None
|
||||
)
|
||||
|
||||
async def apply(self, url: str) -> bool:
|
||||
head_content = await HeadPeekr.peek_html(url)
|
||||
if not head_content:
|
||||
self._update_stats(False)
|
||||
return False
|
||||
|
||||
meta = HeadPeekr.extract_meta_tags(head_content)
|
||||
title = HeadPeekr.get_title(head_content) or ""
|
||||
parsed_url = urlparse(url)
|
||||
|
||||
scores = {
|
||||
"title_length": self._score_title_length(title),
|
||||
"title_kw": self._score_keyword_presence(title),
|
||||
"meta_description": self._score_meta_description(
|
||||
meta.get("description", "")
|
||||
),
|
||||
"canonical": self._score_canonical(meta.get("canonical"), url),
|
||||
"robot_ok": 1.0 if "noindex" not in meta.get("robots", "") else 0.0,
|
||||
"schema_org": self._score_schema_org(head_content),
|
||||
"url_quality": self._score_url_quality(parsed_url),
|
||||
}
|
||||
|
||||
total_score = sum(
|
||||
weight * scores[factor] for factor, weight in self._weights.items()
|
||||
)
|
||||
|
||||
decision = total_score >= self.threshold
|
||||
self._update_stats(decision)
|
||||
return decision
|
||||
|
||||
def _score_title_length(self, title: str) -> float:
|
||||
length = len(title)
|
||||
if 50 <= length <= 60:
|
||||
return 1.0
|
||||
if 40 <= length < 50 or 60 < length <= 70:
|
||||
return 0.7
|
||||
return 0.3 # Poor length
|
||||
|
||||
def _score_keyword_presence(self, text: str) -> float:
|
||||
if not self._kw_patterns:
|
||||
return 0.0
|
||||
matches = len(self._kw_patterns.findall(text))
|
||||
return min(matches * 0.3, 1.0) # Max 3 matches
|
||||
|
||||
def _score_meta_description(self, desc: str) -> float:
|
||||
length = len(desc)
|
||||
if 140 <= length <= 160:
|
||||
return 1.0
|
||||
return 0.5 if 120 <= length <= 200 else 0.2
|
||||
|
||||
def _score_canonical(self, canonical: str, original: str) -> float:
|
||||
if not canonical:
|
||||
return 0.5 # Neutral score
|
||||
return 1.0 if canonical == original else 0.2
|
||||
|
||||
def _score_schema_org(self, html: str) -> float:
|
||||
# Detect any schema.org markup in head
|
||||
return (
|
||||
1.0
|
||||
if re.search(r'<script[^>]+type=["\']application/ld\+json', html)
|
||||
else 0.0
|
||||
)
|
||||
|
||||
def _score_url_quality(self, parsed_url) -> float:
|
||||
score = 1.0
|
||||
path = parsed_url.path.lower()
|
||||
|
||||
# Penalty factors
|
||||
if len(path) > 80:
|
||||
score *= 0.7
|
||||
if re.search(r"\d{4}", path):
|
||||
score *= 0.8 # Numbers in path
|
||||
if parsed_url.query:
|
||||
score *= 0.6 # URL parameters
|
||||
if "_" in path:
|
||||
score *= 0.9 # Underscores vs hyphens
|
||||
|
||||
return score
|
||||
519
crawl4ai/deep_crawling/scorers.py
Normal file
519
crawl4ai/deep_crawling/scorers.py
Normal file
@@ -0,0 +1,519 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import List, Dict, Optional
|
||||
from dataclasses import dataclass
|
||||
from urllib.parse import urlparse, unquote
|
||||
import re
|
||||
import logging
|
||||
from functools import lru_cache
|
||||
from array import array
|
||||
import ctypes
|
||||
import platform
|
||||
PLATFORM = platform.system()
|
||||
|
||||
# Pre-computed scores for common year differences
|
||||
_SCORE_LOOKUP = [1.0, 0.5, 0.3333333333333333, 0.25]
|
||||
|
||||
# Pre-computed scores for common year differences
|
||||
_FRESHNESS_SCORES = [
|
||||
1.0, # Current year
|
||||
0.9, # Last year
|
||||
0.8, # 2 years ago
|
||||
0.7, # 3 years ago
|
||||
0.6, # 4 years ago
|
||||
0.5, # 5 years ago
|
||||
]
|
||||
|
||||
class ScoringStats:
|
||||
__slots__ = ('_urls_scored', '_total_score', '_min_score', '_max_score')
|
||||
|
||||
def __init__(self):
|
||||
self._urls_scored = 0
|
||||
self._total_score = 0.0
|
||||
self._min_score = None # Lazy initialization
|
||||
self._max_score = None
|
||||
|
||||
def update(self, score: float) -> None:
|
||||
"""Optimized update with minimal operations"""
|
||||
self._urls_scored += 1
|
||||
self._total_score += score
|
||||
|
||||
# Lazy min/max tracking - only if actually accessed
|
||||
if self._min_score is not None:
|
||||
if score < self._min_score:
|
||||
self._min_score = score
|
||||
if self._max_score is not None:
|
||||
if score > self._max_score:
|
||||
self._max_score = score
|
||||
|
||||
def get_average(self) -> float:
|
||||
"""Direct calculation instead of property"""
|
||||
return self._total_score / self._urls_scored if self._urls_scored else 0.0
|
||||
|
||||
def get_min(self) -> float:
|
||||
"""Lazy min calculation"""
|
||||
if self._min_score is None:
|
||||
self._min_score = self._total_score / self._urls_scored if self._urls_scored else 0.0
|
||||
return self._min_score
|
||||
|
||||
def get_max(self) -> float:
|
||||
"""Lazy max calculation"""
|
||||
if self._max_score is None:
|
||||
self._max_score = self._total_score / self._urls_scored if self._urls_scored else 0.0
|
||||
return self._max_score
|
||||
class URLScorer(ABC):
|
||||
__slots__ = ('_weight', '_stats')
|
||||
|
||||
def __init__(self, weight: float = 1.0):
|
||||
# Store weight directly as float32 for memory efficiency
|
||||
self._weight = ctypes.c_float(weight).value
|
||||
self._stats = ScoringStats()
|
||||
|
||||
@abstractmethod
|
||||
def _calculate_score(self, url: str) -> float:
|
||||
"""Calculate raw score for URL."""
|
||||
pass
|
||||
|
||||
def score(self, url: str) -> float:
|
||||
"""Calculate weighted score with minimal overhead."""
|
||||
score = self._calculate_score(url) * self._weight
|
||||
self._stats.update(score)
|
||||
return score
|
||||
|
||||
@property
|
||||
def stats(self):
|
||||
"""Access to scoring statistics."""
|
||||
return self._stats
|
||||
|
||||
@property
|
||||
def weight(self):
|
||||
return self._weight
|
||||
|
||||
class CompositeScorer(URLScorer):
|
||||
__slots__ = ('_scorers', '_normalize', '_weights_array', '_score_array')
|
||||
|
||||
def __init__(self, scorers: List[URLScorer], normalize: bool = True):
|
||||
"""Initialize composite scorer combining multiple scoring strategies.
|
||||
|
||||
Optimized for:
|
||||
- Fast parallel scoring
|
||||
- Memory efficient score aggregation
|
||||
- Quick short-circuit conditions
|
||||
- Pre-allocated arrays
|
||||
|
||||
Args:
|
||||
scorers: List of scoring strategies to combine
|
||||
normalize: Whether to normalize final score by scorer count
|
||||
"""
|
||||
super().__init__(weight=1.0)
|
||||
self._scorers = scorers
|
||||
self._normalize = normalize
|
||||
|
||||
# Pre-allocate arrays for scores and weights
|
||||
self._weights_array = array('f', [s.weight for s in scorers])
|
||||
self._score_array = array('f', [0.0] * len(scorers))
|
||||
|
||||
@lru_cache(maxsize=10000)
|
||||
def _calculate_score(self, url: str) -> float:
|
||||
"""Calculate combined score from all scoring strategies.
|
||||
|
||||
Uses:
|
||||
1. Pre-allocated arrays for scores
|
||||
2. Short-circuit on zero scores
|
||||
3. Optimized normalization
|
||||
4. Vectorized operations where possible
|
||||
|
||||
Args:
|
||||
url: URL to score
|
||||
|
||||
Returns:
|
||||
Combined and optionally normalized score
|
||||
"""
|
||||
total_score = 0.0
|
||||
scores = self._score_array
|
||||
|
||||
# Get scores from all scorers
|
||||
for i, scorer in enumerate(self._scorers):
|
||||
# Use public score() method which applies weight
|
||||
scores[i] = scorer.score(url)
|
||||
total_score += scores[i]
|
||||
|
||||
# Normalize if requested
|
||||
if self._normalize and self._scorers:
|
||||
count = len(self._scorers)
|
||||
return total_score / count
|
||||
|
||||
return total_score
|
||||
|
||||
def score(self, url: str) -> float:
|
||||
"""Public scoring interface with stats tracking.
|
||||
|
||||
Args:
|
||||
url: URL to score
|
||||
|
||||
Returns:
|
||||
Final combined score
|
||||
"""
|
||||
score = self._calculate_score(url)
|
||||
self.stats.update(score)
|
||||
return score
|
||||
|
||||
class KeywordRelevanceScorer(URLScorer):
|
||||
__slots__ = ('_weight', '_stats', '_keywords', '_case_sensitive')
|
||||
|
||||
def __init__(self, keywords: List[str], weight: float = 1.0, case_sensitive: bool = False):
|
||||
super().__init__(weight=weight)
|
||||
self._case_sensitive = case_sensitive
|
||||
# Pre-process keywords once
|
||||
self._keywords = [k if case_sensitive else k.lower() for k in keywords]
|
||||
|
||||
@lru_cache(maxsize=10000)
|
||||
def _url_bytes(self, url: str) -> bytes:
|
||||
"""Cache decoded URL bytes"""
|
||||
return url.encode('utf-8') if self._case_sensitive else url.lower().encode('utf-8')
|
||||
|
||||
|
||||
def _calculate_score(self, url: str) -> float:
|
||||
"""Fast string matching without regex or byte conversion"""
|
||||
if not self._case_sensitive:
|
||||
url = url.lower()
|
||||
|
||||
matches = sum(1 for k in self._keywords if k in url)
|
||||
|
||||
# Fast return paths
|
||||
if not matches:
|
||||
return 0.0
|
||||
if matches == len(self._keywords):
|
||||
return 1.0
|
||||
|
||||
return matches / len(self._keywords)
|
||||
|
||||
class PathDepthScorer(URLScorer):
|
||||
__slots__ = ('_weight', '_stats', '_optimal_depth') # Remove _url_cache
|
||||
|
||||
def __init__(self, optimal_depth: int = 3, weight: float = 1.0):
|
||||
super().__init__(weight=weight)
|
||||
self._optimal_depth = optimal_depth
|
||||
|
||||
@staticmethod
|
||||
@lru_cache(maxsize=10000)
|
||||
def _quick_depth(path: str) -> int:
|
||||
"""Ultra fast path depth calculation.
|
||||
|
||||
Examples:
|
||||
- "http://example.com" -> 0 # No path segments
|
||||
- "http://example.com/" -> 0 # Empty path
|
||||
- "http://example.com/a" -> 1
|
||||
- "http://example.com/a/b" -> 2
|
||||
"""
|
||||
if not path or path == '/':
|
||||
return 0
|
||||
|
||||
if '/' not in path:
|
||||
return 0
|
||||
|
||||
depth = 0
|
||||
last_was_slash = True
|
||||
|
||||
for c in path:
|
||||
if c == '/':
|
||||
if not last_was_slash:
|
||||
depth += 1
|
||||
last_was_slash = True
|
||||
else:
|
||||
last_was_slash = False
|
||||
|
||||
if not last_was_slash:
|
||||
depth += 1
|
||||
|
||||
return depth
|
||||
|
||||
@lru_cache(maxsize=10000) # Cache the whole calculation
|
||||
def _calculate_score(self, url: str) -> float:
|
||||
pos = url.find('/', url.find('://') + 3)
|
||||
if pos == -1:
|
||||
depth = 0
|
||||
else:
|
||||
depth = self._quick_depth(url[pos:])
|
||||
|
||||
# Use lookup table for common distances
|
||||
distance = depth - self._optimal_depth
|
||||
distance = distance if distance >= 0 else -distance # Faster than abs()
|
||||
|
||||
if distance < 4:
|
||||
return _SCORE_LOOKUP[distance]
|
||||
|
||||
return 1.0 / (1.0 + distance)
|
||||
|
||||
class ContentTypeScorer(URLScorer):
|
||||
__slots__ = ('_weight', '_exact_types', '_regex_types')
|
||||
|
||||
def __init__(self, type_weights: Dict[str, float], weight: float = 1.0):
|
||||
"""Initialize scorer with type weights map.
|
||||
|
||||
Args:
|
||||
type_weights: Dict mapping file extensions/patterns to scores (e.g. {'.html$': 1.0})
|
||||
weight: Overall weight multiplier for this scorer
|
||||
"""
|
||||
super().__init__(weight=weight)
|
||||
self._exact_types = {} # Fast lookup for simple extensions
|
||||
self._regex_types = [] # Fallback for complex patterns
|
||||
|
||||
# Split into exact vs regex matchers for performance
|
||||
for pattern, score in type_weights.items():
|
||||
if pattern.startswith('.') and pattern.endswith('$'):
|
||||
ext = pattern[1:-1]
|
||||
self._exact_types[ext] = score
|
||||
else:
|
||||
self._regex_types.append((re.compile(pattern), score))
|
||||
|
||||
# Sort complex patterns by score for early exit
|
||||
self._regex_types.sort(key=lambda x: -x[1])
|
||||
|
||||
@staticmethod
|
||||
@lru_cache(maxsize=10000)
|
||||
def _quick_extension(url: str) -> str:
|
||||
"""Extract file extension ultra-fast without regex/splits.
|
||||
|
||||
Handles:
|
||||
- Basic extensions: "example.html" -> "html"
|
||||
- Query strings: "page.php?id=1" -> "php"
|
||||
- Fragments: "doc.pdf#page=1" -> "pdf"
|
||||
- Path params: "file.jpg;width=100" -> "jpg"
|
||||
|
||||
Args:
|
||||
url: URL to extract extension from
|
||||
|
||||
Returns:
|
||||
Extension without dot, or empty string if none found
|
||||
"""
|
||||
pos = url.rfind('.')
|
||||
if pos == -1:
|
||||
return ''
|
||||
|
||||
# Find first non-alphanumeric char after extension
|
||||
end = len(url)
|
||||
for i in range(pos + 1, len(url)):
|
||||
c = url[i]
|
||||
# Stop at query string, fragment, path param or any non-alphanumeric
|
||||
if c in '?#;' or not c.isalnum():
|
||||
end = i
|
||||
break
|
||||
|
||||
return url[pos + 1:end].lower()
|
||||
|
||||
@lru_cache(maxsize=10000)
|
||||
def _calculate_score(self, url: str) -> float:
|
||||
"""Calculate content type score for URL.
|
||||
|
||||
Uses staged approach:
|
||||
1. Try exact extension match (fast path)
|
||||
2. Fall back to regex patterns if needed
|
||||
|
||||
Args:
|
||||
url: URL to score
|
||||
|
||||
Returns:
|
||||
Score between 0.0 and 1.0 * weight
|
||||
"""
|
||||
# Fast path: direct extension lookup
|
||||
ext = self._quick_extension(url)
|
||||
if ext:
|
||||
score = self._exact_types.get(ext, None)
|
||||
if score is not None:
|
||||
return score
|
||||
|
||||
# Slow path: regex patterns
|
||||
for pattern, score in self._regex_types:
|
||||
if pattern.search(url):
|
||||
return score
|
||||
|
||||
return 0.0
|
||||
|
||||
class FreshnessScorer(URLScorer):
|
||||
__slots__ = ('_weight', '_date_pattern', '_current_year')
|
||||
|
||||
def __init__(self, weight: float = 1.0, current_year: int = 2024):
|
||||
"""Initialize freshness scorer.
|
||||
|
||||
Extracts and scores dates from URLs using format:
|
||||
- YYYY/MM/DD
|
||||
- YYYY-MM-DD
|
||||
- YYYY_MM_DD
|
||||
- YYYY (year only)
|
||||
|
||||
Args:
|
||||
weight: Score multiplier
|
||||
current_year: Year to calculate freshness against (default 2024)
|
||||
"""
|
||||
super().__init__(weight=weight)
|
||||
self._current_year = current_year
|
||||
|
||||
# Combined pattern for all date formats
|
||||
# Uses non-capturing groups (?:) and alternation
|
||||
self._date_pattern = re.compile(
|
||||
r'(?:/' # Path separator
|
||||
r'|[-_])' # or date separators
|
||||
r'((?:19|20)\d{2})' # Year group (1900-2099)
|
||||
r'(?:' # Optional month/day group
|
||||
r'(?:/|[-_])' # Date separator
|
||||
r'(?:\d{2})' # Month
|
||||
r'(?:' # Optional day
|
||||
r'(?:/|[-_])' # Date separator
|
||||
r'(?:\d{2})' # Day
|
||||
r')?' # Day is optional
|
||||
r')?' # Month/day group is optional
|
||||
)
|
||||
|
||||
@lru_cache(maxsize=10000)
|
||||
def _extract_year(self, url: str) -> Optional[int]:
|
||||
"""Extract the most recent year from URL.
|
||||
|
||||
Args:
|
||||
url: URL to extract year from
|
||||
|
||||
Returns:
|
||||
Year as int or None if no valid year found
|
||||
"""
|
||||
matches = self._date_pattern.finditer(url)
|
||||
latest_year = None
|
||||
|
||||
# Find most recent year
|
||||
for match in matches:
|
||||
year = int(match.group(1))
|
||||
if (year <= self._current_year and # Sanity check
|
||||
(latest_year is None or year > latest_year)):
|
||||
latest_year = year
|
||||
|
||||
return latest_year
|
||||
|
||||
@lru_cache(maxsize=10000)
|
||||
def _calculate_score(self, url: str) -> float:
|
||||
"""Calculate freshness score based on URL date.
|
||||
|
||||
More recent years score higher. Uses pre-computed scoring
|
||||
table for common year differences.
|
||||
|
||||
Args:
|
||||
url: URL to score
|
||||
|
||||
Returns:
|
||||
Score between 0.0 and 1.0 * weight
|
||||
"""
|
||||
year = self._extract_year(url)
|
||||
if year is None:
|
||||
return 0.5 # Default score
|
||||
|
||||
# Use lookup table for common year differences
|
||||
year_diff = self._current_year - year
|
||||
if year_diff < len(_FRESHNESS_SCORES):
|
||||
return _FRESHNESS_SCORES[year_diff]
|
||||
|
||||
# Fallback calculation for older content
|
||||
return max(0.1, 1.0 - year_diff * 0.1)
|
||||
|
||||
class DomainAuthorityScorer(URLScorer):
|
||||
__slots__ = ('_weight', '_domain_weights', '_default_weight', '_top_domains')
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
domain_weights: Dict[str, float],
|
||||
default_weight: float = 0.5,
|
||||
weight: float = 1.0,
|
||||
):
|
||||
"""Initialize domain authority scorer.
|
||||
|
||||
Args:
|
||||
domain_weights: Dict mapping domains to authority scores
|
||||
default_weight: Score for unknown domains
|
||||
weight: Overall scorer weight multiplier
|
||||
|
||||
Example:
|
||||
{
|
||||
'python.org': 1.0,
|
||||
'github.com': 0.9,
|
||||
'medium.com': 0.7
|
||||
}
|
||||
"""
|
||||
super().__init__(weight=weight)
|
||||
|
||||
# Pre-process domains for faster lookup
|
||||
self._domain_weights = {
|
||||
domain.lower(): score
|
||||
for domain, score in domain_weights.items()
|
||||
}
|
||||
self._default_weight = default_weight
|
||||
|
||||
# Cache top domains for fast path
|
||||
self._top_domains = {
|
||||
domain: score
|
||||
for domain, score in sorted(
|
||||
domain_weights.items(),
|
||||
key=lambda x: -x[1]
|
||||
)[:5] # Keep top 5 highest scoring domains
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
@lru_cache(maxsize=10000)
|
||||
def _extract_domain(url: str) -> str:
|
||||
"""Extract domain from URL ultra-fast.
|
||||
|
||||
Handles:
|
||||
- Basic domains: "example.com"
|
||||
- Subdomains: "sub.example.com"
|
||||
- Ports: "example.com:8080"
|
||||
- IPv4: "192.168.1.1"
|
||||
|
||||
Args:
|
||||
url: Full URL to extract domain from
|
||||
|
||||
Returns:
|
||||
Lowercase domain without port
|
||||
"""
|
||||
# Find domain start
|
||||
start = url.find('://')
|
||||
if start == -1:
|
||||
start = 0
|
||||
else:
|
||||
start += 3
|
||||
|
||||
# Find domain end
|
||||
end = url.find('/', start)
|
||||
if end == -1:
|
||||
end = url.find('?', start)
|
||||
if end == -1:
|
||||
end = url.find('#', start)
|
||||
if end == -1:
|
||||
end = len(url)
|
||||
|
||||
# Extract domain and remove port
|
||||
domain = url[start:end]
|
||||
port_idx = domain.rfind(':')
|
||||
if port_idx != -1:
|
||||
domain = domain[:port_idx]
|
||||
|
||||
return domain.lower()
|
||||
|
||||
@lru_cache(maxsize=10000)
|
||||
def _calculate_score(self, url: str) -> float:
|
||||
"""Calculate domain authority score.
|
||||
|
||||
Uses staged approach:
|
||||
1. Check top domains (fastest)
|
||||
2. Check full domain weights
|
||||
3. Return default weight
|
||||
|
||||
Args:
|
||||
url: URL to score
|
||||
|
||||
Returns:
|
||||
Authority score between 0.0 and 1.0 * weight
|
||||
"""
|
||||
domain = self._extract_domain(url)
|
||||
|
||||
# Fast path: check top domains first
|
||||
score = self._top_domains.get(domain)
|
||||
if score is not None:
|
||||
return score
|
||||
|
||||
# Regular path: check all domains
|
||||
return self._domain_weights.get(domain, self._default_weight)
|
||||
219
crawl4ai/docker_client.py
Normal file
219
crawl4ai/docker_client.py
Normal file
@@ -0,0 +1,219 @@
|
||||
from typing import List, Optional, Union, AsyncGenerator, Dict, Any, Callable
|
||||
import httpx
|
||||
import json
|
||||
from urllib.parse import urljoin
|
||||
import asyncio
|
||||
|
||||
from .async_configs import BrowserConfig, CrawlerRunConfig
|
||||
from .models import CrawlResult
|
||||
from .async_logger import AsyncLogger, LogLevel
|
||||
from .utils import hooks_to_string
|
||||
|
||||
|
||||
class Crawl4aiClientError(Exception):
|
||||
"""Base exception for Crawl4ai Docker client errors."""
|
||||
pass
|
||||
|
||||
|
||||
class ConnectionError(Crawl4aiClientError):
|
||||
"""Raised when connection to the Docker server fails."""
|
||||
pass
|
||||
|
||||
|
||||
class RequestError(Crawl4aiClientError):
|
||||
"""Raised when the server returns an error response."""
|
||||
pass
|
||||
|
||||
|
||||
class Crawl4aiDockerClient:
|
||||
"""Client for interacting with Crawl4AI Docker server with token authentication."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
base_url: str = "http://localhost:8000",
|
||||
timeout: float = 30.0,
|
||||
verify_ssl: bool = True,
|
||||
verbose: bool = True,
|
||||
log_file: Optional[str] = None
|
||||
):
|
||||
self.base_url = base_url.rstrip('/')
|
||||
self.timeout = timeout
|
||||
self.logger = AsyncLogger(log_file=log_file, log_level=LogLevel.DEBUG, verbose=verbose)
|
||||
self._http_client = httpx.AsyncClient(
|
||||
timeout=timeout,
|
||||
verify=verify_ssl,
|
||||
headers={"Content-Type": "application/json"}
|
||||
)
|
||||
self._token: Optional[str] = None
|
||||
|
||||
async def authenticate(self, email: str) -> None:
|
||||
"""Authenticate with the server and store the token."""
|
||||
url = urljoin(self.base_url, "/token")
|
||||
try:
|
||||
self.logger.info(f"Authenticating with email: {email}", tag="AUTH")
|
||||
response = await self._http_client.post(url, json={"email": email})
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
self._token = data["access_token"]
|
||||
self._http_client.headers["Authorization"] = f"Bearer {self._token}"
|
||||
self.logger.success("Authentication successful", tag="AUTH")
|
||||
except (httpx.RequestError, httpx.HTTPStatusError) as e:
|
||||
error_msg = f"Authentication failed: {str(e)}"
|
||||
self.logger.error(error_msg, tag="ERROR")
|
||||
raise ConnectionError(error_msg)
|
||||
|
||||
async def _check_server(self) -> None:
|
||||
"""Check if server is reachable, raising an error if not."""
|
||||
try:
|
||||
await self._http_client.get(urljoin(self.base_url, "/health"))
|
||||
self.logger.success(f"Connected to {self.base_url}", tag="READY")
|
||||
except httpx.RequestError as e:
|
||||
self.logger.error(f"Server unreachable: {str(e)}", tag="ERROR")
|
||||
raise ConnectionError(f"Cannot connect to server: {str(e)}")
|
||||
|
||||
def _prepare_request(
|
||||
self,
|
||||
urls: List[str],
|
||||
browser_config: Optional[BrowserConfig] = None,
|
||||
crawler_config: Optional[CrawlerRunConfig] = None,
|
||||
hooks: Optional[Union[Dict[str, Callable], Dict[str, str]]] = None,
|
||||
hooks_timeout: int = 30
|
||||
) -> Dict[str, Any]:
|
||||
"""Prepare request data from configs."""
|
||||
if self._token:
|
||||
self._http_client.headers["Authorization"] = f"Bearer {self._token}"
|
||||
|
||||
request_data = {
|
||||
"urls": urls,
|
||||
"browser_config": browser_config.dump() if browser_config else {},
|
||||
"crawler_config": crawler_config.dump() if crawler_config else {}
|
||||
}
|
||||
|
||||
# Handle hooks if provided
|
||||
if hooks:
|
||||
# Check if hooks are already strings or need conversion
|
||||
if any(callable(v) for v in hooks.values()):
|
||||
# Convert function objects to strings
|
||||
hooks_code = hooks_to_string(hooks)
|
||||
else:
|
||||
# Already in string format
|
||||
hooks_code = hooks
|
||||
|
||||
request_data["hooks"] = {
|
||||
"code": hooks_code,
|
||||
"timeout": hooks_timeout
|
||||
}
|
||||
|
||||
return request_data
|
||||
|
||||
async def _request(self, method: str, endpoint: str, **kwargs) -> httpx.Response:
|
||||
"""Make an HTTP request with error handling."""
|
||||
url = urljoin(self.base_url, endpoint)
|
||||
try:
|
||||
response = await self._http_client.request(method, url, **kwargs)
|
||||
response.raise_for_status()
|
||||
return response
|
||||
except httpx.TimeoutException as e:
|
||||
raise ConnectionError(f"Request timed out: {str(e)}")
|
||||
except httpx.RequestError as e:
|
||||
raise ConnectionError(f"Failed to connect: {str(e)}")
|
||||
except httpx.HTTPStatusError as e:
|
||||
error_msg = (e.response.json().get("detail", str(e))
|
||||
if "application/json" in e.response.headers.get("content-type", "")
|
||||
else str(e))
|
||||
raise RequestError(f"Server error {e.response.status_code}: {error_msg}")
|
||||
|
||||
async def crawl(
|
||||
self,
|
||||
urls: List[str],
|
||||
browser_config: Optional[BrowserConfig] = None,
|
||||
crawler_config: Optional[CrawlerRunConfig] = None,
|
||||
hooks: Optional[Union[Dict[str, Callable], Dict[str, str]]] = None,
|
||||
hooks_timeout: int = 30
|
||||
) -> Union[CrawlResult, List[CrawlResult], AsyncGenerator[CrawlResult, None]]:
|
||||
"""
|
||||
Execute a crawl operation.
|
||||
|
||||
Args:
|
||||
urls: List of URLs to crawl
|
||||
browser_config: Browser configuration
|
||||
crawler_config: Crawler configuration
|
||||
hooks: Optional hooks - can be either:
|
||||
- Dict[str, Callable]: Function objects that will be converted to strings
|
||||
- Dict[str, str]: Already stringified hook code
|
||||
hooks_timeout: Timeout in seconds for each hook execution (1-120)
|
||||
|
||||
Returns:
|
||||
Single CrawlResult, list of results, or async generator for streaming
|
||||
|
||||
Example with function hooks:
|
||||
>>> async def my_hook(page, context, **kwargs):
|
||||
... await page.set_viewport_size({"width": 1920, "height": 1080})
|
||||
... return page
|
||||
>>>
|
||||
>>> result = await client.crawl(
|
||||
... ["https://example.com"],
|
||||
... hooks={"on_page_context_created": my_hook}
|
||||
... )
|
||||
"""
|
||||
await self._check_server()
|
||||
|
||||
data = self._prepare_request(urls, browser_config, crawler_config, hooks, hooks_timeout)
|
||||
is_streaming = crawler_config and crawler_config.stream
|
||||
|
||||
self.logger.info(f"Crawling {len(urls)} URLs {'(streaming)' if is_streaming else ''}", tag="CRAWL")
|
||||
|
||||
if is_streaming:
|
||||
async def stream_results() -> AsyncGenerator[CrawlResult, None]:
|
||||
async with self._http_client.stream("POST", f"{self.base_url}/crawl/stream", json=data) as response:
|
||||
response.raise_for_status()
|
||||
async for line in response.aiter_lines():
|
||||
if line.strip():
|
||||
result = json.loads(line)
|
||||
if "error" in result:
|
||||
self.logger.error_status(url=result.get("url", "unknown"), error=result["error"])
|
||||
continue
|
||||
self.logger.url_status(url=result.get("url", "unknown"), success=True, timing=result.get("timing", 0.0))
|
||||
if result.get("status") == "completed":
|
||||
continue
|
||||
else:
|
||||
yield CrawlResult(**result)
|
||||
return stream_results()
|
||||
|
||||
response = await self._request("POST", "/crawl", json=data)
|
||||
result_data = response.json()
|
||||
if not result_data.get("success", False):
|
||||
raise RequestError(f"Crawl failed: {result_data.get('msg', 'Unknown error')}")
|
||||
|
||||
results = [CrawlResult(**r) for r in result_data.get("results", [])]
|
||||
self.logger.success(f"Crawl completed with {len(results)} results", tag="CRAWL")
|
||||
return results[0] if len(results) == 1 else results
|
||||
|
||||
async def get_schema(self) -> Dict[str, Any]:
|
||||
"""Retrieve configuration schemas."""
|
||||
response = await self._request("GET", "/schema")
|
||||
return response.json()
|
||||
|
||||
async def close(self) -> None:
|
||||
"""Close the HTTP client session."""
|
||||
self.logger.info("Closing client", tag="CLOSE")
|
||||
await self._http_client.aclose()
|
||||
|
||||
async def __aenter__(self) -> "Crawl4aiDockerClient":
|
||||
return self
|
||||
|
||||
async def __aexit__(self, exc_type: Optional[type], exc_val: Optional[Exception], exc_tb: Optional[Any]) -> None:
|
||||
await self.close()
|
||||
|
||||
|
||||
# Example usage
|
||||
async def main():
|
||||
async with Crawl4aiDockerClient(verbose=True) as client:
|
||||
await client.authenticate("user@example.com")
|
||||
result = await client.crawl(["https://example.com"])
|
||||
print(result)
|
||||
schema = await client.get_schema()
|
||||
print(schema)
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -54,13 +54,13 @@ class HTML2Text(html.parser.HTMLParser):
|
||||
self.td_count = 0
|
||||
self.table_start = False
|
||||
self.unicode_snob = config.UNICODE_SNOB # covered in cli
|
||||
|
||||
|
||||
self.escape_snob = config.ESCAPE_SNOB # covered in cli
|
||||
self.escape_backslash = config.ESCAPE_BACKSLASH # covered in cli
|
||||
self.escape_dot = config.ESCAPE_DOT # covered in cli
|
||||
self.escape_plus = config.ESCAPE_PLUS # covered in cli
|
||||
self.escape_dash = config.ESCAPE_DASH # covered in cli
|
||||
|
||||
|
||||
self.links_each_paragraph = config.LINKS_EACH_PARAGRAPH
|
||||
self.body_width = bodywidth # covered in cli
|
||||
self.skip_internal_links = config.SKIP_INTERNAL_LINKS # covered in cli
|
||||
@@ -144,8 +144,8 @@ class HTML2Text(html.parser.HTMLParser):
|
||||
|
||||
def update_params(self, **kwargs):
|
||||
for key, value in kwargs.items():
|
||||
setattr(self, key, value)
|
||||
|
||||
setattr(self, key, value)
|
||||
|
||||
def feed(self, data: str) -> None:
|
||||
data = data.replace("</' + 'script>", "</ignore>")
|
||||
super().feed(data)
|
||||
@@ -510,6 +510,7 @@ class HTML2Text(html.parser.HTMLParser):
|
||||
|
||||
if tag == "a" and not self.ignore_links:
|
||||
if start:
|
||||
self.inside_link = True
|
||||
if (
|
||||
"href" in attrs
|
||||
and attrs["href"] is not None
|
||||
@@ -526,6 +527,7 @@ class HTML2Text(html.parser.HTMLParser):
|
||||
else:
|
||||
self.astack.append(None)
|
||||
else:
|
||||
self.inside_link = False
|
||||
if self.astack:
|
||||
a = self.astack.pop()
|
||||
if self.maybe_automatic_link and not self.empty_link:
|
||||
@@ -610,13 +612,22 @@ class HTML2Text(html.parser.HTMLParser):
|
||||
self.o("[" + str(a_props.count) + "]")
|
||||
|
||||
if tag == "dl" and start:
|
||||
self.p()
|
||||
if tag == "dt" and not start:
|
||||
self.pbr()
|
||||
if tag == "dd" and start:
|
||||
self.o(" ")
|
||||
if tag == "dd" and not start:
|
||||
self.pbr()
|
||||
self.p() # Add paragraph break before list starts
|
||||
self.p_p = 0 # Reset paragraph state
|
||||
|
||||
elif tag == "dt" and start:
|
||||
if self.p_p == 0: # If not first term
|
||||
self.o("\n\n") # Add spacing before new term-definition pair
|
||||
self.p_p = 0 # Reset paragraph state
|
||||
|
||||
elif tag == "dt" and not start:
|
||||
self.o("\n") # Single newline between term and definition
|
||||
|
||||
elif tag == "dd" and start:
|
||||
self.o(" ") # Indent definition
|
||||
|
||||
elif tag == "dd" and not start:
|
||||
self.p_p = 0
|
||||
|
||||
if tag in ["ol", "ul"]:
|
||||
# Google Docs create sub lists as top level lists
|
||||
@@ -903,7 +914,13 @@ class HTML2Text(html.parser.HTMLParser):
|
||||
self.empty_link = False
|
||||
|
||||
if not self.code and not self.pre and not entity_char:
|
||||
data = escape_md_section(data, snob=self.escape_snob, escape_dot=self.escape_dot, escape_plus=self.escape_plus, escape_dash=self.escape_dash)
|
||||
data = escape_md_section(
|
||||
data,
|
||||
snob=self.escape_snob,
|
||||
escape_dot=self.escape_dot,
|
||||
escape_plus=self.escape_plus,
|
||||
escape_dash=self.escape_dash,
|
||||
)
|
||||
self.preceding_data = data
|
||||
self.o(data, puredata=True)
|
||||
|
||||
@@ -1006,6 +1023,7 @@ class HTML2Text(html.parser.HTMLParser):
|
||||
newlines += 1
|
||||
return result
|
||||
|
||||
|
||||
def html2text(html: str, baseurl: str = "", bodywidth: Optional[int] = None) -> str:
|
||||
if bodywidth is None:
|
||||
bodywidth = config.BODY_WIDTH
|
||||
@@ -1013,17 +1031,19 @@ def html2text(html: str, baseurl: str = "", bodywidth: Optional[int] = None) ->
|
||||
|
||||
return h.handle(html)
|
||||
|
||||
|
||||
class CustomHTML2Text(HTML2Text):
|
||||
def __init__(self, *args, handle_code_in_pre=False, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.inside_pre = False
|
||||
self.inside_code = False
|
||||
self.inside_link = False
|
||||
self.preserve_tags = set() # Set of tags to preserve
|
||||
self.current_preserved_tag = None
|
||||
self.preserved_content = []
|
||||
self.preserve_depth = 0
|
||||
self.handle_code_in_pre = handle_code_in_pre
|
||||
|
||||
self.handle_code_in_pre = handle_code_in_pre
|
||||
|
||||
# Configuration options
|
||||
self.skip_internal_links = False
|
||||
self.single_line_break = False
|
||||
@@ -1041,9 +1061,9 @@ class CustomHTML2Text(HTML2Text):
|
||||
def update_params(self, **kwargs):
|
||||
"""Update parameters and set preserved tags."""
|
||||
for key, value in kwargs.items():
|
||||
if key == 'preserve_tags':
|
||||
if key == "preserve_tags":
|
||||
self.preserve_tags = set(value)
|
||||
elif key == 'handle_code_in_pre':
|
||||
elif key == "handle_code_in_pre":
|
||||
self.handle_code_in_pre = value
|
||||
else:
|
||||
setattr(self, key, value)
|
||||
@@ -1056,17 +1076,19 @@ class CustomHTML2Text(HTML2Text):
|
||||
self.current_preserved_tag = tag
|
||||
self.preserved_content = []
|
||||
# Format opening tag with attributes
|
||||
attr_str = ''.join(f' {k}="{v}"' for k, v in attrs.items() if v is not None)
|
||||
self.preserved_content.append(f'<{tag}{attr_str}>')
|
||||
attr_str = "".join(
|
||||
f' {k}="{v}"' for k, v in attrs.items() if v is not None
|
||||
)
|
||||
self.preserved_content.append(f"<{tag}{attr_str}>")
|
||||
self.preserve_depth += 1
|
||||
return
|
||||
else:
|
||||
self.preserve_depth -= 1
|
||||
if self.preserve_depth == 0:
|
||||
self.preserved_content.append(f'</{tag}>')
|
||||
self.preserved_content.append(f"</{tag}>")
|
||||
# Output the preserved HTML block with proper spacing
|
||||
preserved_html = ''.join(self.preserved_content)
|
||||
self.o('\n' + preserved_html + '\n')
|
||||
preserved_html = "".join(self.preserved_content)
|
||||
self.o("\n" + preserved_html + "\n")
|
||||
self.current_preserved_tag = None
|
||||
return
|
||||
|
||||
@@ -1074,30 +1096,38 @@ class CustomHTML2Text(HTML2Text):
|
||||
if self.preserve_depth > 0:
|
||||
if start:
|
||||
# Format nested tags with attributes
|
||||
attr_str = ''.join(f' {k}="{v}"' for k, v in attrs.items() if v is not None)
|
||||
self.preserved_content.append(f'<{tag}{attr_str}>')
|
||||
attr_str = "".join(
|
||||
f' {k}="{v}"' for k, v in attrs.items() if v is not None
|
||||
)
|
||||
self.preserved_content.append(f"<{tag}{attr_str}>")
|
||||
else:
|
||||
self.preserved_content.append(f'</{tag}>')
|
||||
self.preserved_content.append(f"</{tag}>")
|
||||
return
|
||||
|
||||
# Handle pre tags
|
||||
if tag == 'pre':
|
||||
if tag == "pre":
|
||||
if start:
|
||||
self.o('```\n') # Markdown code block start
|
||||
self.o("```\n") # Markdown code block start
|
||||
self.inside_pre = True
|
||||
else:
|
||||
self.o('\n```\n') # Markdown code block end
|
||||
self.o("\n```\n") # Markdown code block end
|
||||
self.inside_pre = False
|
||||
elif tag == 'code':
|
||||
elif tag == "code":
|
||||
if self.inside_pre and not self.handle_code_in_pre:
|
||||
# Ignore code tags inside pre blocks if handle_code_in_pre is False
|
||||
return
|
||||
if start:
|
||||
self.o('`') # Markdown inline code start
|
||||
if not self.inside_link:
|
||||
self.o("`") # Only output backtick if not inside a link
|
||||
self.inside_code = True
|
||||
else:
|
||||
self.o('`') # Markdown inline code end
|
||||
if not self.inside_link:
|
||||
self.o("`") # Only output backtick if not inside a link
|
||||
self.inside_code = False
|
||||
|
||||
# If inside a link, let the parent class handle the content
|
||||
if self.inside_link:
|
||||
super().handle_tag(tag, attrs, start)
|
||||
else:
|
||||
super().handle_tag(tag, attrs, start)
|
||||
|
||||
@@ -1113,13 +1143,12 @@ class CustomHTML2Text(HTML2Text):
|
||||
return
|
||||
if self.inside_code:
|
||||
# Inline code: no newlines allowed
|
||||
self.o(data.replace('\n', ' '))
|
||||
self.o(data.replace("\n", " "))
|
||||
return
|
||||
|
||||
# Default behavior for other tags
|
||||
super().handle_data(data, entity_char)
|
||||
|
||||
|
||||
# # Handle pre tags
|
||||
# if tag == 'pre':
|
||||
# if start:
|
||||
|
||||
@@ -1,2 +1,3 @@
|
||||
class OutCallback:
|
||||
def __call__(self, s: str) -> None: ...
|
||||
def __call__(self, s: str) -> None:
|
||||
...
|
||||
|
||||
@@ -210,7 +210,7 @@ def escape_md_section(
|
||||
snob: bool = False,
|
||||
escape_dot: bool = True,
|
||||
escape_plus: bool = True,
|
||||
escape_dash: bool = True
|
||||
escape_dash: bool = True,
|
||||
) -> str:
|
||||
"""
|
||||
Escapes markdown-sensitive characters across whole document sections.
|
||||
@@ -233,6 +233,7 @@ def escape_md_section(
|
||||
|
||||
return text
|
||||
|
||||
|
||||
def reformat_table(lines: List[str], right_margin: int) -> List[str]:
|
||||
"""
|
||||
Given the lines of a table
|
||||
|
||||
69
crawl4ai/hub.py
Normal file
69
crawl4ai/hub.py
Normal file
@@ -0,0 +1,69 @@
|
||||
# crawl4ai/hub.py
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Dict, Type, Union
|
||||
import logging
|
||||
import importlib
|
||||
from pathlib import Path
|
||||
import inspect
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BaseCrawler(ABC):
|
||||
def __init__(self):
|
||||
self.logger = logging.getLogger(self.__class__.__name__)
|
||||
|
||||
@abstractmethod
|
||||
async def run(self, url: str = "", **kwargs) -> str:
|
||||
"""
|
||||
Implement this method to return JSON string.
|
||||
Must accept URL + arbitrary kwargs for flexibility.
|
||||
"""
|
||||
pass
|
||||
|
||||
def __init_subclass__(cls, **kwargs):
|
||||
"""Enforce interface validation on subclassing"""
|
||||
super().__init_subclass__(**kwargs)
|
||||
|
||||
# Verify run method signature
|
||||
run_method = cls.run
|
||||
if not run_method.__code__.co_argcount >= 2: # self + url
|
||||
raise TypeError(f"{cls.__name__} must implement 'run(self, url: str, **kwargs)'")
|
||||
|
||||
# Verify async nature
|
||||
if not inspect.iscoroutinefunction(run_method):
|
||||
raise TypeError(f"{cls.__name__}.run must be async")
|
||||
|
||||
class CrawlerHub:
|
||||
_crawlers: Dict[str, Type[BaseCrawler]] = {}
|
||||
|
||||
@classmethod
|
||||
def _discover_crawlers(cls):
|
||||
"""Dynamically load crawlers from /crawlers in 3 lines"""
|
||||
base_path = Path(__file__).parent / "crawlers"
|
||||
for crawler_dir in base_path.iterdir():
|
||||
if crawler_dir.is_dir():
|
||||
try:
|
||||
module = importlib.import_module(
|
||||
f"crawl4ai.crawlers.{crawler_dir.name}.crawler"
|
||||
)
|
||||
for attr in dir(module):
|
||||
cls._maybe_register_crawler(
|
||||
getattr(module, attr), crawler_dir.name
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed {crawler_dir.name}: {str(e)}")
|
||||
|
||||
@classmethod
|
||||
def _maybe_register_crawler(cls, obj, name: str):
|
||||
"""Brilliant one-liner registration"""
|
||||
if isinstance(obj, type) and issubclass(obj, BaseCrawler) and obj != BaseCrawler:
|
||||
module = importlib.import_module(obj.__module__)
|
||||
obj.meta = getattr(module, "__meta__", {})
|
||||
cls._crawlers[name] = obj
|
||||
|
||||
@classmethod
|
||||
def get(cls, name: str) -> Union[Type[BaseCrawler], None]:
|
||||
if not cls._crawlers:
|
||||
cls._discover_crawlers()
|
||||
return cls._crawlers.get(name)
|
||||
@@ -2,29 +2,150 @@ import subprocess
|
||||
import sys
|
||||
import asyncio
|
||||
from .async_logger import AsyncLogger, LogLevel
|
||||
from pathlib import Path
|
||||
import os
|
||||
import shutil
|
||||
|
||||
# Initialize logger
|
||||
logger = AsyncLogger(log_level=LogLevel.DEBUG, verbose=True)
|
||||
|
||||
def setup_home_directory():
|
||||
"""Set up the .crawl4ai folder structure in the user's home directory."""
|
||||
base_dir = os.getenv("CRAWL4_AI_BASE_DIRECTORY")
|
||||
crawl4ai_folder = Path(base_dir) if base_dir else Path.home()
|
||||
crawl4ai_config = crawl4ai_folder / "global.yml"
|
||||
crawl4ai_folder = crawl4ai_folder / ".crawl4ai"
|
||||
cache_folder = crawl4ai_folder / "cache"
|
||||
content_folders = [
|
||||
"html_content",
|
||||
"cleaned_html",
|
||||
"markdown_content",
|
||||
"extracted_content",
|
||||
"screenshots",
|
||||
]
|
||||
|
||||
# Clean up old cache if exists
|
||||
if cache_folder.exists():
|
||||
shutil.rmtree(cache_folder)
|
||||
|
||||
# Create new folder structure
|
||||
crawl4ai_folder.mkdir(exist_ok=True)
|
||||
cache_folder.mkdir(exist_ok=True)
|
||||
for folder in content_folders:
|
||||
(crawl4ai_folder / folder).mkdir(exist_ok=True)
|
||||
|
||||
# If config file does not exist, create it
|
||||
if not crawl4ai_config.exists():
|
||||
with open(crawl4ai_config, "w") as f:
|
||||
f.write("")
|
||||
|
||||
def post_install():
|
||||
"""Run all post-installation tasks"""
|
||||
"""
|
||||
Run all post-installation tasks.
|
||||
Checks CRAWL4AI_MODE environment variable. If set to 'api',
|
||||
skips Playwright browser installation.
|
||||
"""
|
||||
logger.info("Running post-installation setup...", tag="INIT")
|
||||
install_playwright()
|
||||
setup_home_directory()
|
||||
|
||||
# Check environment variable to conditionally skip Playwright install
|
||||
run_mode = os.getenv('CRAWL4AI_MODE')
|
||||
if run_mode == 'api':
|
||||
logger.warning(
|
||||
"CRAWL4AI_MODE=api detected. Skipping Playwright browser installation.",
|
||||
tag="SETUP"
|
||||
)
|
||||
else:
|
||||
# Proceed with installation only if mode is not 'api'
|
||||
install_playwright()
|
||||
|
||||
run_migration()
|
||||
# TODO: Will be added in the future
|
||||
# setup_builtin_browser()
|
||||
logger.success("Post-installation setup completed!", tag="COMPLETE")
|
||||
|
||||
def setup_builtin_browser():
|
||||
"""Set up a builtin browser for use with Crawl4AI"""
|
||||
try:
|
||||
logger.info("Setting up builtin browser...", tag="INIT")
|
||||
asyncio.run(_setup_builtin_browser())
|
||||
logger.success("Builtin browser setup completed!", tag="COMPLETE")
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to set up builtin browser: {e}")
|
||||
logger.warning("You can manually set up a builtin browser using 'crawl4ai-doctor builtin-browser-start'")
|
||||
|
||||
async def _setup_builtin_browser():
|
||||
try:
|
||||
# Import BrowserProfiler here to avoid circular imports
|
||||
from .browser_profiler import BrowserProfiler
|
||||
profiler = BrowserProfiler(logger=logger)
|
||||
|
||||
# Launch the builtin browser
|
||||
cdp_url = await profiler.launch_builtin_browser(headless=True)
|
||||
if cdp_url:
|
||||
logger.success(f"Builtin browser launched at {cdp_url}", tag="BROWSER")
|
||||
else:
|
||||
logger.warning("Failed to launch builtin browser", tag="BROWSER")
|
||||
except Exception as e:
|
||||
logger.warning(f"Error setting up builtin browser: {e}", tag="BROWSER")
|
||||
raise
|
||||
|
||||
|
||||
def install_playwright():
|
||||
logger.info("Installing Playwright browsers...", tag="INIT")
|
||||
try:
|
||||
# subprocess.check_call([sys.executable, "-m", "playwright", "install", "--with-deps", "--force", "chrome"])
|
||||
subprocess.check_call([sys.executable, "-m", "playwright", "install", "--with-deps", "--force", "chromium"])
|
||||
logger.success("Playwright installation completed successfully.", tag="COMPLETE")
|
||||
except subprocess.CalledProcessError as e:
|
||||
subprocess.check_call(
|
||||
[
|
||||
sys.executable,
|
||||
"-m",
|
||||
"playwright",
|
||||
"install",
|
||||
"--with-deps",
|
||||
"--force",
|
||||
"chromium",
|
||||
]
|
||||
)
|
||||
logger.success(
|
||||
"Playwright installation completed successfully.", tag="COMPLETE"
|
||||
)
|
||||
except subprocess.CalledProcessError:
|
||||
# logger.error(f"Error during Playwright installation: {e}", tag="ERROR")
|
||||
logger.warning(f"Please run '{sys.executable} -m playwright install --with-deps' manually after the installation.")
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"Please run '{sys.executable} -m playwright install --with-deps' manually after the installation."
|
||||
)
|
||||
except Exception:
|
||||
# logger.error(f"Unexpected error during Playwright installation: {e}", tag="ERROR")
|
||||
logger.warning(f"Please run '{sys.executable} -m playwright install --with-deps' manually after the installation.")
|
||||
logger.warning(
|
||||
f"Please run '{sys.executable} -m playwright install --with-deps' manually after the installation."
|
||||
)
|
||||
|
||||
# Install Patchright browsers for undetected browser support
|
||||
logger.info("Installing Patchright browsers for undetected mode...", tag="INIT")
|
||||
try:
|
||||
subprocess.check_call(
|
||||
[
|
||||
sys.executable,
|
||||
"-m",
|
||||
"patchright",
|
||||
"install",
|
||||
"--with-deps",
|
||||
"--force",
|
||||
"chromium",
|
||||
]
|
||||
)
|
||||
logger.success(
|
||||
"Patchright installation completed successfully.", tag="COMPLETE"
|
||||
)
|
||||
except subprocess.CalledProcessError:
|
||||
logger.warning(
|
||||
f"Please run '{sys.executable} -m patchright install --with-deps' manually after the installation."
|
||||
)
|
||||
except Exception:
|
||||
logger.warning(
|
||||
f"Please run '{sys.executable} -m patchright install --with-deps' manually after the installation."
|
||||
)
|
||||
|
||||
|
||||
def run_migration():
|
||||
"""Initialize database during installation"""
|
||||
@@ -33,18 +154,26 @@ def run_migration():
|
||||
from crawl4ai.async_database import async_db_manager
|
||||
|
||||
asyncio.run(async_db_manager.initialize())
|
||||
logger.success("Database initialization completed successfully.", tag="COMPLETE")
|
||||
logger.success(
|
||||
"Database initialization completed successfully.", tag="COMPLETE"
|
||||
)
|
||||
except ImportError:
|
||||
logger.warning("Database module not found. Will initialize on first use.")
|
||||
except Exception as e:
|
||||
logger.warning(f"Database initialization failed: {e}")
|
||||
logger.warning("Database will be initialized on first use")
|
||||
|
||||
|
||||
async def run_doctor():
|
||||
"""Test if Crawl4AI is working properly"""
|
||||
logger.info("Running Crawl4AI health check...", tag="INIT")
|
||||
try:
|
||||
from .async_webcrawler import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig, CacheMode
|
||||
from .async_webcrawler import (
|
||||
AsyncWebCrawler,
|
||||
BrowserConfig,
|
||||
CrawlerRunConfig,
|
||||
CacheMode,
|
||||
)
|
||||
|
||||
browser_config = BrowserConfig(
|
||||
headless=True,
|
||||
@@ -52,7 +181,7 @@ async def run_doctor():
|
||||
ignore_https_errors=True,
|
||||
light_mode=True,
|
||||
viewport_width=1280,
|
||||
viewport_height=720
|
||||
viewport_height=720,
|
||||
)
|
||||
|
||||
run_config = CrawlerRunConfig(
|
||||
@@ -62,10 +191,7 @@ async def run_doctor():
|
||||
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
logger.info("Testing crawling capabilities...", tag="TEST")
|
||||
result = await crawler.arun(
|
||||
url="https://crawl4ai.com",
|
||||
config=run_config
|
||||
)
|
||||
result = await crawler.arun(url="https://crawl4ai.com", config=run_config)
|
||||
|
||||
if result and result.markdown:
|
||||
logger.success("✅ Crawling test passed!", tag="COMPLETE")
|
||||
@@ -77,7 +203,10 @@ async def run_doctor():
|
||||
logger.error(f"❌ Test failed: {e}", tag="ERROR")
|
||||
return False
|
||||
|
||||
|
||||
def doctor():
|
||||
"""Entry point for the doctor command"""
|
||||
import asyncio
|
||||
return asyncio.run(run_doctor())
|
||||
|
||||
asyncio.run(run_doctor())
|
||||
sys.exit(0)
|
||||
|
||||
@@ -1,15 +1,18 @@
|
||||
import os, sys
|
||||
import os
|
||||
|
||||
|
||||
# Create a function get name of a js script, then load from the CURRENT folder of this script and return its content as string, make sure its error free
|
||||
def load_js_script(script_name):
|
||||
# Get the path of the current script
|
||||
current_script_path = os.path.dirname(os.path.realpath(__file__))
|
||||
# Get the path of the script to load
|
||||
script_path = os.path.join(current_script_path, script_name + '.js')
|
||||
script_path = os.path.join(current_script_path, script_name + ".js")
|
||||
# Check if the script exists
|
||||
if not os.path.exists(script_path):
|
||||
raise ValueError(f"Script {script_name} not found in the folder {current_script_path}")
|
||||
raise ValueError(
|
||||
f"Script {script_name} not found in the folder {current_script_path}"
|
||||
)
|
||||
# Load the content of the script
|
||||
with open(script_path, 'r') as f:
|
||||
with open(script_path, "r") as f:
|
||||
script_content = f.read()
|
||||
return script_content
|
||||
|
||||
@@ -115,5 +115,6 @@ async () => {
|
||||
document.body.style.overflow = "auto";
|
||||
|
||||
// Wait a bit for any animations to complete
|
||||
await new Promise((resolve) => setTimeout(resolve, 100));
|
||||
document.body.scrollIntoView(false);
|
||||
await new Promise((resolve) => setTimeout(resolve, 50));
|
||||
};
|
||||
|
||||
0
crawl4ai/legacy/__init__.py
Normal file
0
crawl4ai/legacy/__init__.py
Normal file
123
crawl4ai/legacy/cli.py
Normal file
123
crawl4ai/legacy/cli.py
Normal file
@@ -0,0 +1,123 @@
|
||||
import click
|
||||
import sys
|
||||
import asyncio
|
||||
from typing import List
|
||||
from .docs_manager import DocsManager
|
||||
from .async_logger import AsyncLogger
|
||||
|
||||
logger = AsyncLogger(verbose=True)
|
||||
docs_manager = DocsManager(logger)
|
||||
|
||||
|
||||
def print_table(headers: List[str], rows: List[List[str]], padding: int = 2):
|
||||
"""Print formatted table with headers and rows"""
|
||||
widths = [max(len(str(cell)) for cell in col) for col in zip(headers, *rows)]
|
||||
border = "+" + "+".join("-" * (w + 2 * padding) for w in widths) + "+"
|
||||
|
||||
def format_row(row):
|
||||
return (
|
||||
"|"
|
||||
+ "|".join(
|
||||
f"{' ' * padding}{str(cell):<{w}}{' ' * padding}"
|
||||
for cell, w in zip(row, widths)
|
||||
)
|
||||
+ "|"
|
||||
)
|
||||
|
||||
click.echo(border)
|
||||
click.echo(format_row(headers))
|
||||
click.echo(border)
|
||||
for row in rows:
|
||||
click.echo(format_row(row))
|
||||
click.echo(border)
|
||||
|
||||
|
||||
@click.group()
|
||||
def cli():
|
||||
"""Crawl4AI Command Line Interface"""
|
||||
pass
|
||||
|
||||
|
||||
@cli.group()
|
||||
def docs():
|
||||
"""Documentation operations"""
|
||||
pass
|
||||
|
||||
|
||||
@docs.command()
|
||||
@click.argument("sections", nargs=-1)
|
||||
@click.option(
|
||||
"--mode", type=click.Choice(["extended", "condensed"]), default="extended"
|
||||
)
|
||||
def combine(sections: tuple, mode: str):
|
||||
"""Combine documentation sections"""
|
||||
try:
|
||||
asyncio.run(docs_manager.ensure_docs_exist())
|
||||
click.echo(docs_manager.generate(sections, mode))
|
||||
except Exception as e:
|
||||
logger.error(str(e), tag="ERROR")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
@docs.command()
|
||||
@click.argument("query")
|
||||
@click.option("--top-k", "-k", default=5)
|
||||
@click.option("--build-index", is_flag=True, help="Build index if missing")
|
||||
def search(query: str, top_k: int, build_index: bool):
|
||||
"""Search documentation"""
|
||||
try:
|
||||
result = docs_manager.search(query, top_k)
|
||||
if result == "No search index available. Call build_search_index() first.":
|
||||
if build_index or click.confirm("No search index found. Build it now?"):
|
||||
asyncio.run(docs_manager.llm_text.generate_index_files())
|
||||
result = docs_manager.search(query, top_k)
|
||||
click.echo(result)
|
||||
except Exception as e:
|
||||
click.echo(f"Error: {str(e)}", err=True)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
@docs.command()
|
||||
def update():
|
||||
"""Update docs from GitHub"""
|
||||
try:
|
||||
asyncio.run(docs_manager.fetch_docs())
|
||||
click.echo("Documentation updated successfully")
|
||||
except Exception as e:
|
||||
click.echo(f"Error: {str(e)}", err=True)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
@docs.command()
|
||||
@click.option("--force-facts", is_flag=True, help="Force regenerate fact files")
|
||||
@click.option("--clear-cache", is_flag=True, help="Clear BM25 cache")
|
||||
def index(force_facts: bool, clear_cache: bool):
|
||||
"""Build or rebuild search indexes"""
|
||||
try:
|
||||
asyncio.run(docs_manager.ensure_docs_exist())
|
||||
asyncio.run(
|
||||
docs_manager.llm_text.generate_index_files(
|
||||
force_generate_facts=force_facts, clear_bm25_cache=clear_cache
|
||||
)
|
||||
)
|
||||
click.echo("Search indexes built successfully")
|
||||
except Exception as e:
|
||||
click.echo(f"Error: {str(e)}", err=True)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
# Add docs list command
|
||||
@docs.command()
|
||||
def list():
|
||||
"""List available documentation sections"""
|
||||
try:
|
||||
sections = docs_manager.list()
|
||||
print_table(["Sections"], [[section] for section in sections])
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"Error: {str(e)}", err=True)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
cli()
|
||||
@@ -15,54 +15,53 @@ import logging, time
|
||||
import base64
|
||||
from PIL import Image, ImageDraw, ImageFont
|
||||
from io import BytesIO
|
||||
from typing import List, Callable
|
||||
from typing import Callable
|
||||
import requests
|
||||
import os
|
||||
from pathlib import Path
|
||||
from .utils import *
|
||||
|
||||
logger = logging.getLogger('selenium.webdriver.remote.remote_connection')
|
||||
logger = logging.getLogger("selenium.webdriver.remote.remote_connection")
|
||||
logger.setLevel(logging.WARNING)
|
||||
|
||||
logger_driver = logging.getLogger('selenium.webdriver.common.service')
|
||||
logger_driver = logging.getLogger("selenium.webdriver.common.service")
|
||||
logger_driver.setLevel(logging.WARNING)
|
||||
|
||||
urllib3_logger = logging.getLogger('urllib3.connectionpool')
|
||||
urllib3_logger = logging.getLogger("urllib3.connectionpool")
|
||||
urllib3_logger.setLevel(logging.WARNING)
|
||||
|
||||
# Disable http.client logging
|
||||
http_client_logger = logging.getLogger('http.client')
|
||||
http_client_logger = logging.getLogger("http.client")
|
||||
http_client_logger.setLevel(logging.WARNING)
|
||||
|
||||
# Disable driver_finder and service logging
|
||||
driver_finder_logger = logging.getLogger('selenium.webdriver.common.driver_finder')
|
||||
driver_finder_logger = logging.getLogger("selenium.webdriver.common.driver_finder")
|
||||
driver_finder_logger.setLevel(logging.WARNING)
|
||||
|
||||
|
||||
|
||||
|
||||
class CrawlerStrategy(ABC):
|
||||
@abstractmethod
|
||||
def crawl(self, url: str, **kwargs) -> str:
|
||||
pass
|
||||
|
||||
|
||||
@abstractmethod
|
||||
def take_screenshot(self, save_path: str):
|
||||
pass
|
||||
|
||||
|
||||
@abstractmethod
|
||||
def update_user_agent(self, user_agent: str):
|
||||
pass
|
||||
|
||||
|
||||
@abstractmethod
|
||||
def set_hook(self, hook_type: str, hook: Callable):
|
||||
pass
|
||||
|
||||
|
||||
class CloudCrawlerStrategy(CrawlerStrategy):
|
||||
def __init__(self, use_cached_html = False):
|
||||
def __init__(self, use_cached_html=False):
|
||||
super().__init__()
|
||||
self.use_cached_html = use_cached_html
|
||||
|
||||
|
||||
def crawl(self, url: str) -> str:
|
||||
data = {
|
||||
"urls": [url],
|
||||
@@ -76,6 +75,7 @@ class CloudCrawlerStrategy(CrawlerStrategy):
|
||||
html = response["results"][0]["html"]
|
||||
return sanitize_input_encode(html)
|
||||
|
||||
|
||||
class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
|
||||
def __init__(self, use_cached_html=False, js_code=None, **kwargs):
|
||||
super().__init__()
|
||||
@@ -87,20 +87,25 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
|
||||
if kwargs.get("user_agent"):
|
||||
self.options.add_argument("--user-agent=" + kwargs.get("user_agent"))
|
||||
else:
|
||||
user_agent = kwargs.get("user_agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36")
|
||||
user_agent = kwargs.get(
|
||||
"user_agent",
|
||||
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
|
||||
)
|
||||
self.options.add_argument(f"--user-agent={user_agent}")
|
||||
self.options.add_argument("user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36")
|
||||
|
||||
self.options.add_argument(
|
||||
"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
|
||||
)
|
||||
|
||||
self.options.headless = kwargs.get("headless", True)
|
||||
if self.options.headless:
|
||||
self.options.add_argument("--headless")
|
||||
|
||||
self.options.add_argument("--disable-gpu")
|
||||
|
||||
self.options.add_argument("--disable-gpu")
|
||||
self.options.add_argument("--window-size=1920,1080")
|
||||
self.options.add_argument("--no-sandbox")
|
||||
self.options.add_argument("--disable-dev-shm-usage")
|
||||
self.options.add_argument("--disable-blink-features=AutomationControlled")
|
||||
|
||||
self.options.add_argument("--disable-blink-features=AutomationControlled")
|
||||
|
||||
# self.options.add_argument("--disable-dev-shm-usage")
|
||||
self.options.add_argument("--disable-gpu")
|
||||
# self.options.add_argument("--disable-extensions")
|
||||
@@ -120,14 +125,14 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
|
||||
self.use_cached_html = use_cached_html
|
||||
self.js_code = js_code
|
||||
self.verbose = kwargs.get("verbose", False)
|
||||
|
||||
|
||||
# Hooks
|
||||
self.hooks = {
|
||||
'on_driver_created': None,
|
||||
'on_user_agent_updated': None,
|
||||
'before_get_url': None,
|
||||
'after_get_url': None,
|
||||
'before_return_html': None
|
||||
"on_driver_created": None,
|
||||
"on_user_agent_updated": None,
|
||||
"before_get_url": None,
|
||||
"after_get_url": None,
|
||||
"before_return_html": None,
|
||||
}
|
||||
|
||||
# chromedriver_autoinstaller.install()
|
||||
@@ -137,31 +142,28 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
|
||||
# chromedriver_path = chromedriver_autoinstaller.install()
|
||||
# chromedriver_path = chromedriver_autoinstaller.utils.download_chromedriver()
|
||||
# self.service = Service(chromedriver_autoinstaller.install())
|
||||
|
||||
|
||||
|
||||
# chromedriver_path = ChromeDriverManager().install()
|
||||
# self.service = Service(chromedriver_path)
|
||||
# self.service.log_path = "NUL"
|
||||
# self.driver = webdriver.Chrome(service=self.service, options=self.options)
|
||||
|
||||
|
||||
# Use selenium-manager (built into Selenium 4.10.0+)
|
||||
self.service = Service()
|
||||
self.driver = webdriver.Chrome(options=self.options)
|
||||
|
||||
self.driver = self.execute_hook('on_driver_created', self.driver)
|
||||
|
||||
|
||||
self.driver = self.execute_hook("on_driver_created", self.driver)
|
||||
|
||||
if kwargs.get("cookies"):
|
||||
for cookie in kwargs.get("cookies"):
|
||||
self.driver.add_cookie(cookie)
|
||||
|
||||
|
||||
|
||||
def set_hook(self, hook_type: str, hook: Callable):
|
||||
if hook_type in self.hooks:
|
||||
self.hooks[hook_type] = hook
|
||||
else:
|
||||
raise ValueError(f"Invalid hook type: {hook_type}")
|
||||
|
||||
|
||||
def execute_hook(self, hook_type: str, *args):
|
||||
hook = self.hooks.get(hook_type)
|
||||
if hook:
|
||||
@@ -170,7 +172,9 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
|
||||
if isinstance(result, webdriver.Chrome):
|
||||
return result
|
||||
else:
|
||||
raise TypeError(f"Hook {hook_type} must return an instance of webdriver.Chrome or None.")
|
||||
raise TypeError(
|
||||
f"Hook {hook_type} must return an instance of webdriver.Chrome or None."
|
||||
)
|
||||
# If the hook returns None or there is no hook, return self.driver
|
||||
return self.driver
|
||||
|
||||
@@ -178,60 +182,77 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
|
||||
self.options.add_argument(f"user-agent={user_agent}")
|
||||
self.driver.quit()
|
||||
self.driver = webdriver.Chrome(service=self.service, options=self.options)
|
||||
self.driver = self.execute_hook('on_user_agent_updated', self.driver)
|
||||
self.driver = self.execute_hook("on_user_agent_updated", self.driver)
|
||||
|
||||
def set_custom_headers(self, headers: dict):
|
||||
# Enable Network domain for sending headers
|
||||
self.driver.execute_cdp_cmd('Network.enable', {})
|
||||
self.driver.execute_cdp_cmd("Network.enable", {})
|
||||
# Set extra HTTP headers
|
||||
self.driver.execute_cdp_cmd('Network.setExtraHTTPHeaders', {'headers': headers})
|
||||
self.driver.execute_cdp_cmd("Network.setExtraHTTPHeaders", {"headers": headers})
|
||||
|
||||
def _ensure_page_load(self, max_checks=6, check_interval=0.01):
|
||||
def _ensure_page_load(self, max_checks=6, check_interval=0.01):
|
||||
initial_length = len(self.driver.page_source)
|
||||
|
||||
|
||||
for ix in range(max_checks):
|
||||
# print(f"Checking page load: {ix}")
|
||||
time.sleep(check_interval)
|
||||
current_length = len(self.driver.page_source)
|
||||
|
||||
|
||||
if current_length != initial_length:
|
||||
break
|
||||
|
||||
return self.driver.page_source
|
||||
|
||||
|
||||
def crawl(self, url: str, **kwargs) -> str:
|
||||
# Create md5 hash of the URL
|
||||
import hashlib
|
||||
|
||||
url_hash = hashlib.md5(url.encode()).hexdigest()
|
||||
|
||||
|
||||
if self.use_cached_html:
|
||||
cache_file_path = os.path.join(os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()), ".crawl4ai", "cache", url_hash)
|
||||
cache_file_path = os.path.join(
|
||||
os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()),
|
||||
".crawl4ai",
|
||||
"cache",
|
||||
url_hash,
|
||||
)
|
||||
if os.path.exists(cache_file_path):
|
||||
with open(cache_file_path, "r") as f:
|
||||
return sanitize_input_encode(f.read())
|
||||
|
||||
try:
|
||||
self.driver = self.execute_hook('before_get_url', self.driver)
|
||||
self.driver = self.execute_hook("before_get_url", self.driver)
|
||||
if self.verbose:
|
||||
print(f"[LOG] 🕸️ Crawling {url} using LocalSeleniumCrawlerStrategy...")
|
||||
self.driver.get(url) #<html><head></head><body></body></html>
|
||||
|
||||
self.driver.get(url) # <html><head></head><body></body></html>
|
||||
|
||||
WebDriverWait(self.driver, 20).until(
|
||||
lambda d: d.execute_script('return document.readyState') == 'complete'
|
||||
lambda d: d.execute_script("return document.readyState") == "complete"
|
||||
)
|
||||
WebDriverWait(self.driver, 10).until(
|
||||
EC.presence_of_all_elements_located((By.TAG_NAME, "body"))
|
||||
)
|
||||
|
||||
self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
|
||||
|
||||
self.driver = self.execute_hook('after_get_url', self.driver)
|
||||
html = sanitize_input_encode(self._ensure_page_load()) # self.driver.page_source
|
||||
can_not_be_done_headless = False # Look at my creativity for naming variables
|
||||
|
||||
|
||||
self.driver.execute_script(
|
||||
"window.scrollTo(0, document.body.scrollHeight);"
|
||||
)
|
||||
|
||||
self.driver = self.execute_hook("after_get_url", self.driver)
|
||||
html = sanitize_input_encode(
|
||||
self._ensure_page_load()
|
||||
) # self.driver.page_source
|
||||
can_not_be_done_headless = (
|
||||
False # Look at my creativity for naming variables
|
||||
)
|
||||
|
||||
# TODO: Very ugly approach, but promise to change it!
|
||||
if kwargs.get('bypass_headless', False) or html == "<html><head></head><body></body></html>":
|
||||
print("[LOG] 🙌 Page could not be loaded in headless mode. Trying non-headless mode...")
|
||||
if (
|
||||
kwargs.get("bypass_headless", False)
|
||||
or html == "<html><head></head><body></body></html>"
|
||||
):
|
||||
print(
|
||||
"[LOG] 🙌 Page could not be loaded in headless mode. Trying non-headless mode..."
|
||||
)
|
||||
can_not_be_done_headless = True
|
||||
options = Options()
|
||||
options.headless = False
|
||||
@@ -239,27 +260,31 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
|
||||
options.add_argument("--window-size=5,5")
|
||||
driver = webdriver.Chrome(service=self.service, options=options)
|
||||
driver.get(url)
|
||||
self.driver = self.execute_hook('after_get_url', driver)
|
||||
self.driver = self.execute_hook("after_get_url", driver)
|
||||
html = sanitize_input_encode(driver.page_source)
|
||||
driver.quit()
|
||||
|
||||
|
||||
# Execute JS code if provided
|
||||
self.js_code = kwargs.get("js_code", self.js_code)
|
||||
if self.js_code and type(self.js_code) == str:
|
||||
self.driver.execute_script(self.js_code)
|
||||
# Optionally, wait for some condition after executing the JS code
|
||||
WebDriverWait(self.driver, 10).until(
|
||||
lambda driver: driver.execute_script("return document.readyState") == "complete"
|
||||
lambda driver: driver.execute_script("return document.readyState")
|
||||
== "complete"
|
||||
)
|
||||
elif self.js_code and type(self.js_code) == list:
|
||||
for js in self.js_code:
|
||||
self.driver.execute_script(js)
|
||||
WebDriverWait(self.driver, 10).until(
|
||||
lambda driver: driver.execute_script("return document.readyState") == "complete"
|
||||
lambda driver: driver.execute_script(
|
||||
"return document.readyState"
|
||||
)
|
||||
== "complete"
|
||||
)
|
||||
|
||||
|
||||
# Optionally, wait for some condition after executing the JS code : Contributed by (https://github.com/jonymusky)
|
||||
wait_for = kwargs.get('wait_for', False)
|
||||
wait_for = kwargs.get("wait_for", False)
|
||||
if wait_for:
|
||||
if callable(wait_for):
|
||||
print("[LOG] 🔄 Waiting for condition...")
|
||||
@@ -268,32 +293,37 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
|
||||
print("[LOG] 🔄 Waiting for condition...")
|
||||
WebDriverWait(self.driver, 20).until(
|
||||
EC.presence_of_element_located((By.CSS_SELECTOR, wait_for))
|
||||
)
|
||||
|
||||
)
|
||||
|
||||
if not can_not_be_done_headless:
|
||||
html = sanitize_input_encode(self.driver.page_source)
|
||||
self.driver = self.execute_hook('before_return_html', self.driver, html)
|
||||
|
||||
self.driver = self.execute_hook("before_return_html", self.driver, html)
|
||||
|
||||
# Store in cache
|
||||
cache_file_path = os.path.join(os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()), ".crawl4ai", "cache", url_hash)
|
||||
cache_file_path = os.path.join(
|
||||
os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()),
|
||||
".crawl4ai",
|
||||
"cache",
|
||||
url_hash,
|
||||
)
|
||||
with open(cache_file_path, "w", encoding="utf-8") as f:
|
||||
f.write(html)
|
||||
|
||||
|
||||
if self.verbose:
|
||||
print(f"[LOG] ✅ Crawled {url} successfully!")
|
||||
|
||||
|
||||
return html
|
||||
except InvalidArgumentException as e:
|
||||
if not hasattr(e, 'msg'):
|
||||
if not hasattr(e, "msg"):
|
||||
e.msg = sanitize_input_encode(str(e))
|
||||
raise InvalidArgumentException(f"Failed to crawl {url}: {e.msg}")
|
||||
except WebDriverException as e:
|
||||
# If e does nlt have msg attribute create it and set it to str(e)
|
||||
if not hasattr(e, 'msg'):
|
||||
if not hasattr(e, "msg"):
|
||||
e.msg = sanitize_input_encode(str(e))
|
||||
raise WebDriverException(f"Failed to crawl {url}: {e.msg}")
|
||||
raise WebDriverException(f"Failed to crawl {url}: {e.msg}")
|
||||
except Exception as e:
|
||||
if not hasattr(e, 'msg'):
|
||||
if not hasattr(e, "msg"):
|
||||
e.msg = sanitize_input_encode(str(e))
|
||||
raise Exception(f"Failed to crawl {url}: {e.msg}")
|
||||
|
||||
@@ -301,7 +331,9 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
|
||||
try:
|
||||
# Get the dimensions of the page
|
||||
total_width = self.driver.execute_script("return document.body.scrollWidth")
|
||||
total_height = self.driver.execute_script("return document.body.scrollHeight")
|
||||
total_height = self.driver.execute_script(
|
||||
"return document.body.scrollHeight"
|
||||
)
|
||||
|
||||
# Set the window size to the dimensions of the page
|
||||
self.driver.set_window_size(total_width, total_height)
|
||||
@@ -313,25 +345,27 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
|
||||
image = Image.open(BytesIO(screenshot))
|
||||
|
||||
# Convert image to RGB mode (this will handle both RGB and RGBA images)
|
||||
rgb_image = image.convert('RGB')
|
||||
rgb_image = image.convert("RGB")
|
||||
|
||||
# Convert to JPEG and compress
|
||||
buffered = BytesIO()
|
||||
rgb_image.save(buffered, format="JPEG", quality=85)
|
||||
img_base64 = base64.b64encode(buffered.getvalue()).decode('utf-8')
|
||||
img_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
||||
|
||||
if self.verbose:
|
||||
print(f"[LOG] 📸 Screenshot taken and converted to base64")
|
||||
print("[LOG] 📸 Screenshot taken and converted to base64")
|
||||
|
||||
return img_base64
|
||||
except Exception as e:
|
||||
error_message = sanitize_input_encode(f"Failed to take screenshot: {str(e)}")
|
||||
error_message = sanitize_input_encode(
|
||||
f"Failed to take screenshot: {str(e)}"
|
||||
)
|
||||
print(error_message)
|
||||
|
||||
# Generate an image with black background
|
||||
img = Image.new('RGB', (800, 600), color='black')
|
||||
img = Image.new("RGB", (800, 600), color="black")
|
||||
draw = ImageDraw.Draw(img)
|
||||
|
||||
|
||||
# Load a font
|
||||
try:
|
||||
font = ImageFont.truetype("arial.ttf", 40)
|
||||
@@ -345,16 +379,16 @@ class LocalSeleniumCrawlerStrategy(CrawlerStrategy):
|
||||
|
||||
# Calculate text position
|
||||
text_position = (10, 10)
|
||||
|
||||
|
||||
# Draw the text on the image
|
||||
draw.text(text_position, wrapped_text, fill=text_color, font=font)
|
||||
|
||||
|
||||
# Convert to base64
|
||||
buffered = BytesIO()
|
||||
img.save(buffered, format="JPEG")
|
||||
img_base64 = base64.b64encode(buffered.getvalue()).decode('utf-8')
|
||||
img_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
||||
|
||||
return img_base64
|
||||
|
||||
|
||||
def quit(self):
|
||||
self.driver.quit()
|
||||
@@ -7,11 +7,13 @@ DB_PATH = os.path.join(os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()), ".cra
|
||||
os.makedirs(DB_PATH, exist_ok=True)
|
||||
DB_PATH = os.path.join(DB_PATH, "crawl4ai.db")
|
||||
|
||||
|
||||
def init_db():
|
||||
global DB_PATH
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute('''
|
||||
cursor.execute(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS crawled_data (
|
||||
url TEXT PRIMARY KEY,
|
||||
html TEXT,
|
||||
@@ -24,31 +26,42 @@ def init_db():
|
||||
metadata TEXT DEFAULT "{}",
|
||||
screenshot TEXT DEFAULT ""
|
||||
)
|
||||
''')
|
||||
"""
|
||||
)
|
||||
conn.commit()
|
||||
conn.close()
|
||||
|
||||
|
||||
def alter_db_add_screenshot(new_column: str = "media"):
|
||||
check_db_path()
|
||||
try:
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute(f'ALTER TABLE crawled_data ADD COLUMN {new_column} TEXT DEFAULT ""')
|
||||
cursor.execute(
|
||||
f'ALTER TABLE crawled_data ADD COLUMN {new_column} TEXT DEFAULT ""'
|
||||
)
|
||||
conn.commit()
|
||||
conn.close()
|
||||
except Exception as e:
|
||||
print(f"Error altering database to add screenshot column: {e}")
|
||||
|
||||
|
||||
def check_db_path():
|
||||
if not DB_PATH:
|
||||
raise ValueError("Database path is not set or is empty.")
|
||||
|
||||
def get_cached_url(url: str) -> Optional[Tuple[str, str, str, str, str, str, str, bool, str]]:
|
||||
|
||||
def get_cached_url(
|
||||
url: str,
|
||||
) -> Optional[Tuple[str, str, str, str, str, str, str, bool, str]]:
|
||||
check_db_path()
|
||||
try:
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute('SELECT url, html, cleaned_html, markdown, extracted_content, success, media, links, metadata, screenshot FROM crawled_data WHERE url = ?', (url,))
|
||||
cursor.execute(
|
||||
"SELECT url, html, cleaned_html, markdown, extracted_content, success, media, links, metadata, screenshot FROM crawled_data WHERE url = ?",
|
||||
(url,),
|
||||
)
|
||||
result = cursor.fetchone()
|
||||
conn.close()
|
||||
return result
|
||||
@@ -56,12 +69,25 @@ def get_cached_url(url: str) -> Optional[Tuple[str, str, str, str, str, str, str
|
||||
print(f"Error retrieving cached URL: {e}")
|
||||
return None
|
||||
|
||||
def cache_url(url: str, html: str, cleaned_html: str, markdown: str, extracted_content: str, success: bool, media : str = "{}", links : str = "{}", metadata : str = "{}", screenshot: str = ""):
|
||||
|
||||
def cache_url(
|
||||
url: str,
|
||||
html: str,
|
||||
cleaned_html: str,
|
||||
markdown: str,
|
||||
extracted_content: str,
|
||||
success: bool,
|
||||
media: str = "{}",
|
||||
links: str = "{}",
|
||||
metadata: str = "{}",
|
||||
screenshot: str = "",
|
||||
):
|
||||
check_db_path()
|
||||
try:
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute('''
|
||||
cursor.execute(
|
||||
"""
|
||||
INSERT INTO crawled_data (url, html, cleaned_html, markdown, extracted_content, success, media, links, metadata, screenshot)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
ON CONFLICT(url) DO UPDATE SET
|
||||
@@ -74,18 +100,32 @@ def cache_url(url: str, html: str, cleaned_html: str, markdown: str, extracted_c
|
||||
links = excluded.links,
|
||||
metadata = excluded.metadata,
|
||||
screenshot = excluded.screenshot
|
||||
''', (url, html, cleaned_html, markdown, extracted_content, success, media, links, metadata, screenshot))
|
||||
""",
|
||||
(
|
||||
url,
|
||||
html,
|
||||
cleaned_html,
|
||||
markdown,
|
||||
extracted_content,
|
||||
success,
|
||||
media,
|
||||
links,
|
||||
metadata,
|
||||
screenshot,
|
||||
),
|
||||
)
|
||||
conn.commit()
|
||||
conn.close()
|
||||
except Exception as e:
|
||||
print(f"Error caching URL: {e}")
|
||||
|
||||
|
||||
def get_total_count() -> int:
|
||||
check_db_path()
|
||||
try:
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute('SELECT COUNT(*) FROM crawled_data')
|
||||
cursor.execute("SELECT COUNT(*) FROM crawled_data")
|
||||
result = cursor.fetchone()
|
||||
conn.close()
|
||||
return result[0]
|
||||
@@ -93,43 +133,48 @@ def get_total_count() -> int:
|
||||
print(f"Error getting total count: {e}")
|
||||
return 0
|
||||
|
||||
|
||||
def clear_db():
|
||||
check_db_path()
|
||||
try:
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute('DELETE FROM crawled_data')
|
||||
cursor.execute("DELETE FROM crawled_data")
|
||||
conn.commit()
|
||||
conn.close()
|
||||
except Exception as e:
|
||||
print(f"Error clearing database: {e}")
|
||||
|
||||
|
||||
|
||||
def flush_db():
|
||||
check_db_path()
|
||||
try:
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute('DROP TABLE crawled_data')
|
||||
cursor.execute("DROP TABLE crawled_data")
|
||||
conn.commit()
|
||||
conn.close()
|
||||
except Exception as e:
|
||||
print(f"Error flushing database: {e}")
|
||||
|
||||
|
||||
def update_existing_records(new_column: str = "media", default_value: str = "{}"):
|
||||
check_db_path()
|
||||
try:
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute(f'UPDATE crawled_data SET {new_column} = "{default_value}" WHERE screenshot IS NULL')
|
||||
cursor.execute(
|
||||
f'UPDATE crawled_data SET {new_column} = "{default_value}" WHERE screenshot IS NULL'
|
||||
)
|
||||
conn.commit()
|
||||
conn.close()
|
||||
except Exception as e:
|
||||
print(f"Error updating existing records: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Delete the existing database file
|
||||
if os.path.exists(DB_PATH):
|
||||
os.remove(DB_PATH)
|
||||
init_db()
|
||||
init_db()
|
||||
# alter_db_add_screenshot("COL_NAME")
|
||||
|
||||
@@ -4,6 +4,7 @@ from pathlib import Path
|
||||
from crawl4ai.async_logger import AsyncLogger
|
||||
from crawl4ai.llmtxt import AsyncLLMTextManager
|
||||
|
||||
|
||||
class DocsManager:
|
||||
def __init__(self, logger=None):
|
||||
self.docs_dir = Path.home() / ".crawl4ai" / "docs"
|
||||
@@ -21,11 +22,14 @@ class DocsManager:
|
||||
"""Copy from local docs or download from GitHub"""
|
||||
try:
|
||||
# Try local first
|
||||
if self.local_docs.exists() and (any(self.local_docs.glob("*.md")) or any(self.local_docs.glob("*.tokens"))):
|
||||
if self.local_docs.exists() and (
|
||||
any(self.local_docs.glob("*.md"))
|
||||
or any(self.local_docs.glob("*.tokens"))
|
||||
):
|
||||
# Empty the local docs directory
|
||||
for file_path in self.docs_dir.glob("*.md"):
|
||||
file_path.unlink()
|
||||
# for file_path in self.docs_dir.glob("*.tokens"):
|
||||
# for file_path in self.docs_dir.glob("*.tokens"):
|
||||
# file_path.unlink()
|
||||
for file_path in self.local_docs.glob("*.md"):
|
||||
shutil.copy2(file_path, self.docs_dir / file_path.name)
|
||||
@@ -36,14 +40,14 @@ class DocsManager:
|
||||
# Fallback to GitHub
|
||||
response = requests.get(
|
||||
"https://api.github.com/repos/unclecode/crawl4ai/contents/docs/llm.txt",
|
||||
headers={'Accept': 'application/vnd.github.v3+json'}
|
||||
headers={"Accept": "application/vnd.github.v3+json"},
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
|
||||
for item in response.json():
|
||||
if item['type'] == 'file' and item['name'].endswith('.md'):
|
||||
content = requests.get(item['download_url']).text
|
||||
with open(self.docs_dir / item['name'], 'w', encoding='utf-8') as f:
|
||||
if item["type"] == "file" and item["name"].endswith(".md"):
|
||||
content = requests.get(item["download_url"]).text
|
||||
with open(self.docs_dir / item["name"], "w", encoding="utf-8") as f:
|
||||
f.write(content)
|
||||
return True
|
||||
|
||||
@@ -57,11 +61,15 @@ class DocsManager:
|
||||
# Remove [0-9]+_ prefix
|
||||
names = [name.split("_", 1)[1] if name[0].isdigit() else name for name in names]
|
||||
# Exclude those end with .xs.md and .q.md
|
||||
names = [name for name in names if not name.endswith(".xs") and not name.endswith(".q")]
|
||||
names = [
|
||||
name
|
||||
for name in names
|
||||
if not name.endswith(".xs") and not name.endswith(".q")
|
||||
]
|
||||
return names
|
||||
|
||||
|
||||
def generate(self, sections, mode="extended"):
|
||||
return self.llm_text.generate(sections, mode)
|
||||
|
||||
|
||||
def search(self, query: str, top_k: int = 5):
|
||||
return self.llm_text.search(query, top_k)
|
||||
return self.llm_text.search(query, top_k)
|
||||
@@ -11,16 +11,16 @@ from rank_bm25 import BM25Okapi
|
||||
from nltk.tokenize import word_tokenize
|
||||
from nltk.corpus import stopwords
|
||||
from nltk.stem import WordNetLemmatizer
|
||||
from litellm import completion, batch_completion
|
||||
from litellm import batch_completion
|
||||
from .async_logger import AsyncLogger
|
||||
import litellm
|
||||
import pickle
|
||||
import hashlib # <--- ADDED for file-hash
|
||||
from fnmatch import fnmatch
|
||||
import glob
|
||||
|
||||
litellm.set_verbose = False
|
||||
|
||||
|
||||
def _compute_file_hash(file_path: Path) -> str:
|
||||
"""Compute MD5 hash for the file's entire content."""
|
||||
hash_md5 = hashlib.md5()
|
||||
@@ -29,13 +29,14 @@ def _compute_file_hash(file_path: Path) -> str:
|
||||
hash_md5.update(chunk)
|
||||
return hash_md5.hexdigest()
|
||||
|
||||
|
||||
class AsyncLLMTextManager:
|
||||
def __init__(
|
||||
self,
|
||||
docs_dir: Path,
|
||||
logger: Optional[AsyncLogger] = None,
|
||||
max_concurrent_calls: int = 5,
|
||||
batch_size: int = 3
|
||||
batch_size: int = 3,
|
||||
) -> None:
|
||||
self.docs_dir = docs_dir
|
||||
self.logger = logger
|
||||
@@ -51,7 +52,7 @@ class AsyncLLMTextManager:
|
||||
contents = []
|
||||
for file_path in doc_batch:
|
||||
try:
|
||||
with open(file_path, 'r', encoding='utf-8') as f:
|
||||
with open(file_path, "r", encoding="utf-8") as f:
|
||||
contents.append(f.read())
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error reading {file_path}: {str(e)}")
|
||||
@@ -77,43 +78,53 @@ Wrap your response in <index>...</index> tags.
|
||||
# Prepare messages for batch processing
|
||||
messages_list = [
|
||||
[
|
||||
{"role": "user", "content": f"{prompt}\n\nGenerate index for this documentation:\n\n{content}"}
|
||||
{
|
||||
"role": "user",
|
||||
"content": f"{prompt}\n\nGenerate index for this documentation:\n\n{content}",
|
||||
}
|
||||
]
|
||||
for content in contents if content
|
||||
for content in contents
|
||||
if content
|
||||
]
|
||||
|
||||
try:
|
||||
responses = batch_completion(
|
||||
model="anthropic/claude-3-5-sonnet-latest",
|
||||
messages=messages_list,
|
||||
logger_fn=None
|
||||
logger_fn=None,
|
||||
)
|
||||
|
||||
# Process responses and save index files
|
||||
for response, file_path in zip(responses, doc_batch):
|
||||
try:
|
||||
index_content_match = re.search(
|
||||
r'<index>(.*?)</index>',
|
||||
r"<index>(.*?)</index>",
|
||||
response.choices[0].message.content,
|
||||
re.DOTALL
|
||||
re.DOTALL,
|
||||
)
|
||||
if not index_content_match:
|
||||
self.logger.warning(f"No <index>...</index> content found for {file_path}")
|
||||
self.logger.warning(
|
||||
f"No <index>...</index> content found for {file_path}"
|
||||
)
|
||||
continue
|
||||
|
||||
index_content = re.sub(
|
||||
r"\n\s*\n", "\n", index_content_match.group(1)
|
||||
).strip()
|
||||
if index_content:
|
||||
index_file = file_path.with_suffix('.q.md')
|
||||
with open(index_file, 'w', encoding='utf-8') as f:
|
||||
index_file = file_path.with_suffix(".q.md")
|
||||
with open(index_file, "w", encoding="utf-8") as f:
|
||||
f.write(index_content)
|
||||
self.logger.info(f"Created index file: {index_file}")
|
||||
else:
|
||||
self.logger.warning(f"No index content found in response for {file_path}")
|
||||
self.logger.warning(
|
||||
f"No index content found in response for {file_path}"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error processing response for {file_path}: {str(e)}")
|
||||
self.logger.error(
|
||||
f"Error processing response for {file_path}: {str(e)}"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error in batch completion: {str(e)}")
|
||||
@@ -171,7 +182,12 @@ Wrap your response in <index>...</index> tags.
|
||||
|
||||
lemmatizer = WordNetLemmatizer()
|
||||
stop_words = set(stopwords.words("english")) - {
|
||||
"how", "what", "when", "where", "why", "which",
|
||||
"how",
|
||||
"what",
|
||||
"when",
|
||||
"where",
|
||||
"why",
|
||||
"which",
|
||||
}
|
||||
|
||||
tokens = []
|
||||
@@ -222,7 +238,9 @@ Wrap your response in <index>...</index> tags.
|
||||
self.logger.info("Checking which .q.md files need (re)indexing...")
|
||||
|
||||
# Gather all .q.md files
|
||||
q_files = [self.docs_dir / f for f in os.listdir(self.docs_dir) if f.endswith(".q.md")]
|
||||
q_files = [
|
||||
self.docs_dir / f for f in os.listdir(self.docs_dir) if f.endswith(".q.md")
|
||||
]
|
||||
|
||||
# We'll store known (unchanged) facts in these lists
|
||||
existing_facts: List[str] = []
|
||||
@@ -243,7 +261,9 @@ Wrap your response in <index>...</index> tags.
|
||||
# Otherwise, load the existing cache and compare hash
|
||||
cache = self._load_or_create_token_cache(qf)
|
||||
# If the .q.tokens was out of date (i.e. changed hash), we reindex
|
||||
if len(cache["facts"]) == 0 or cache.get("content_hash") != _compute_file_hash(qf):
|
||||
if len(cache["facts"]) == 0 or cache.get(
|
||||
"content_hash"
|
||||
) != _compute_file_hash(qf):
|
||||
needSet.append(qf)
|
||||
else:
|
||||
# File is unchanged → retrieve cached token data
|
||||
@@ -255,20 +275,29 @@ Wrap your response in <index>...</index> tags.
|
||||
if not needSet and not clear_cache:
|
||||
# If no file needs reindexing, try loading existing index
|
||||
if self.maybe_load_bm25_index(clear_cache=False):
|
||||
self.logger.info("No new/changed .q.md files found. Using existing BM25 index.")
|
||||
self.logger.info(
|
||||
"No new/changed .q.md files found. Using existing BM25 index."
|
||||
)
|
||||
return
|
||||
else:
|
||||
# If there's no existing index, we must build a fresh index from the old caches
|
||||
self.logger.info("No existing BM25 index found. Building from cached facts.")
|
||||
self.logger.info(
|
||||
"No existing BM25 index found. Building from cached facts."
|
||||
)
|
||||
if existing_facts:
|
||||
self.logger.info(f"Building BM25 index with {len(existing_facts)} cached facts.")
|
||||
self.logger.info(
|
||||
f"Building BM25 index with {len(existing_facts)} cached facts."
|
||||
)
|
||||
self.bm25_index = BM25Okapi(existing_tokens)
|
||||
self.tokenized_facts = existing_facts
|
||||
with open(self.bm25_index_file, "wb") as f:
|
||||
pickle.dump({
|
||||
"bm25_index": self.bm25_index,
|
||||
"tokenized_facts": self.tokenized_facts
|
||||
}, f)
|
||||
pickle.dump(
|
||||
{
|
||||
"bm25_index": self.bm25_index,
|
||||
"tokenized_facts": self.tokenized_facts,
|
||||
},
|
||||
f,
|
||||
)
|
||||
else:
|
||||
self.logger.warning("No facts found at all. Index remains empty.")
|
||||
return
|
||||
@@ -311,7 +340,9 @@ Wrap your response in <index>...</index> tags.
|
||||
self._save_token_cache(file, fresh_cache)
|
||||
|
||||
mem_usage = process.memory_info().rss / 1024 / 1024
|
||||
self.logger.debug(f"Memory usage after {file.name}: {mem_usage:.2f}MB")
|
||||
self.logger.debug(
|
||||
f"Memory usage after {file.name}: {mem_usage:.2f}MB"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error processing {file}: {str(e)}")
|
||||
@@ -328,40 +359,49 @@ Wrap your response in <index>...</index> tags.
|
||||
all_tokens = existing_tokens + new_tokens
|
||||
|
||||
# 3) Build BM25 index from combined facts
|
||||
self.logger.info(f"Building BM25 index with {len(all_facts)} total facts (old + new).")
|
||||
self.logger.info(
|
||||
f"Building BM25 index with {len(all_facts)} total facts (old + new)."
|
||||
)
|
||||
self.bm25_index = BM25Okapi(all_tokens)
|
||||
self.tokenized_facts = all_facts
|
||||
|
||||
# 4) Save the updated BM25 index to disk
|
||||
with open(self.bm25_index_file, "wb") as f:
|
||||
pickle.dump({
|
||||
"bm25_index": self.bm25_index,
|
||||
"tokenized_facts": self.tokenized_facts
|
||||
}, f)
|
||||
pickle.dump(
|
||||
{
|
||||
"bm25_index": self.bm25_index,
|
||||
"tokenized_facts": self.tokenized_facts,
|
||||
},
|
||||
f,
|
||||
)
|
||||
|
||||
final_mem = process.memory_info().rss / 1024 / 1024
|
||||
self.logger.info(f"Search index updated. Final memory usage: {final_mem:.2f}MB")
|
||||
|
||||
async def generate_index_files(self, force_generate_facts: bool = False, clear_bm25_cache: bool = False) -> None:
|
||||
async def generate_index_files(
|
||||
self, force_generate_facts: bool = False, clear_bm25_cache: bool = False
|
||||
) -> None:
|
||||
"""
|
||||
Generate index files for all documents in parallel batches
|
||||
|
||||
|
||||
Args:
|
||||
force_generate_facts (bool): If True, regenerate indexes even if they exist
|
||||
clear_bm25_cache (bool): If True, clear existing BM25 index cache
|
||||
"""
|
||||
self.logger.info("Starting index generation for documentation files.")
|
||||
|
||||
|
||||
md_files = [
|
||||
self.docs_dir / f for f in os.listdir(self.docs_dir)
|
||||
if f.endswith('.md') and not any(f.endswith(x) for x in ['.q.md', '.xs.md'])
|
||||
self.docs_dir / f
|
||||
for f in os.listdir(self.docs_dir)
|
||||
if f.endswith(".md") and not any(f.endswith(x) for x in [".q.md", ".xs.md"])
|
||||
]
|
||||
|
||||
# Filter out files that already have .q files unless force=True
|
||||
if not force_generate_facts:
|
||||
md_files = [
|
||||
f for f in md_files
|
||||
if not (self.docs_dir / f.name.replace('.md', '.q.md')).exists()
|
||||
f
|
||||
for f in md_files
|
||||
if not (self.docs_dir / f.name.replace(".md", ".q.md")).exists()
|
||||
]
|
||||
|
||||
if not md_files:
|
||||
@@ -369,8 +409,10 @@ Wrap your response in <index>...</index> tags.
|
||||
else:
|
||||
# Process documents in batches
|
||||
for i in range(0, len(md_files), self.batch_size):
|
||||
batch = md_files[i:i + self.batch_size]
|
||||
self.logger.info(f"Processing batch {i//self.batch_size + 1}/{(len(md_files)//self.batch_size) + 1}")
|
||||
batch = md_files[i : i + self.batch_size]
|
||||
self.logger.info(
|
||||
f"Processing batch {i//self.batch_size + 1}/{(len(md_files)//self.batch_size) + 1}"
|
||||
)
|
||||
await self._process_document_batch(batch)
|
||||
|
||||
self.logger.info("Index generation complete, building/updating search index.")
|
||||
@@ -378,21 +420,31 @@ Wrap your response in <index>...</index> tags.
|
||||
|
||||
def generate(self, sections: List[str], mode: str = "extended") -> str:
|
||||
# Get all markdown files
|
||||
all_files = glob.glob(str(self.docs_dir / "[0-9]*.md")) + \
|
||||
glob.glob(str(self.docs_dir / "[0-9]*.xs.md"))
|
||||
|
||||
all_files = glob.glob(str(self.docs_dir / "[0-9]*.md")) + glob.glob(
|
||||
str(self.docs_dir / "[0-9]*.xs.md")
|
||||
)
|
||||
|
||||
# Extract base names without extensions
|
||||
base_docs = {Path(f).name.split('.')[0] for f in all_files
|
||||
if not Path(f).name.endswith('.q.md')}
|
||||
|
||||
base_docs = {
|
||||
Path(f).name.split(".")[0]
|
||||
for f in all_files
|
||||
if not Path(f).name.endswith(".q.md")
|
||||
}
|
||||
|
||||
# Filter by sections if provided
|
||||
if sections:
|
||||
base_docs = {doc for doc in base_docs
|
||||
if any(section.lower() in doc.lower() for section in sections)}
|
||||
|
||||
base_docs = {
|
||||
doc
|
||||
for doc in base_docs
|
||||
if any(section.lower() in doc.lower() for section in sections)
|
||||
}
|
||||
|
||||
# Get file paths based on mode
|
||||
files = []
|
||||
for doc in sorted(base_docs, key=lambda x: int(x.split('_')[0]) if x.split('_')[0].isdigit() else 999999):
|
||||
for doc in sorted(
|
||||
base_docs,
|
||||
key=lambda x: int(x.split("_")[0]) if x.split("_")[0].isdigit() else 999999,
|
||||
):
|
||||
if mode == "condensed":
|
||||
xs_file = self.docs_dir / f"{doc}.xs.md"
|
||||
regular_file = self.docs_dir / f"{doc}.md"
|
||||
@@ -404,7 +456,7 @@ Wrap your response in <index>...</index> tags.
|
||||
content = []
|
||||
for file in files:
|
||||
try:
|
||||
with open(file, 'r', encoding='utf-8') as f:
|
||||
with open(file, "r", encoding="utf-8") as f:
|
||||
fname = Path(file).name
|
||||
content.append(f"{'#'*20}\n# {fname}\n{'#'*20}\n\n{f.read()}")
|
||||
except Exception as e:
|
||||
@@ -443,15 +495,9 @@ Wrap your response in <index>...</index> tags.
|
||||
for file, _ in ranked_files:
|
||||
main_doc = str(file).replace(".q.md", ".md")
|
||||
if os.path.exists(self.docs_dir / main_doc):
|
||||
with open(self.docs_dir / main_doc, "r", encoding='utf-8') as f:
|
||||
with open(self.docs_dir / main_doc, "r", encoding="utf-8") as f:
|
||||
only_file_name = main_doc.split("/")[-1]
|
||||
content = [
|
||||
"#" * 20,
|
||||
f"# {only_file_name}",
|
||||
"#" * 20,
|
||||
"",
|
||||
f.read()
|
||||
]
|
||||
content = ["#" * 20, f"# {only_file_name}", "#" * 20, "", f.read()]
|
||||
results.append("\n".join(content))
|
||||
|
||||
return "\n\n---\n\n".join(results)
|
||||
@@ -482,7 +528,9 @@ Wrap your response in <index>...</index> tags.
|
||||
if len(components) == 3:
|
||||
code_ref = components[2].strip()
|
||||
code_tokens = self.preprocess_text(code_ref)
|
||||
code_match_score = len(set(query_tokens) & set(code_tokens)) / len(query_tokens)
|
||||
code_match_score = len(set(query_tokens) & set(code_tokens)) / len(
|
||||
query_tokens
|
||||
)
|
||||
|
||||
file_data[file_path]["total_score"] += score
|
||||
file_data[file_path]["match_count"] += 1
|
||||
@@ -1,14 +1,14 @@
|
||||
# version_manager.py
|
||||
import os
|
||||
from pathlib import Path
|
||||
from packaging import version
|
||||
from . import __version__
|
||||
|
||||
|
||||
class VersionManager:
|
||||
def __init__(self):
|
||||
self.home_dir = Path.home() / ".crawl4ai"
|
||||
self.version_file = self.home_dir / "version.txt"
|
||||
|
||||
|
||||
def get_installed_version(self):
|
||||
"""Get the version recorded in home directory"""
|
||||
if not self.version_file.exists():
|
||||
@@ -17,14 +17,13 @@ class VersionManager:
|
||||
return version.parse(self.version_file.read_text().strip())
|
||||
except:
|
||||
return None
|
||||
|
||||
|
||||
def update_version(self):
|
||||
"""Update the version file to current library version"""
|
||||
self.version_file.write_text(__version__.__version__)
|
||||
|
||||
|
||||
def needs_update(self):
|
||||
"""Check if database needs update based on version"""
|
||||
installed = self.get_installed_version()
|
||||
current = version.parse(__version__.__version__)
|
||||
return installed is None or installed < current
|
||||
|
||||
294
crawl4ai/legacy/web_crawler.py
Normal file
294
crawl4ai/legacy/web_crawler.py
Normal file
@@ -0,0 +1,294 @@
|
||||
import os, time
|
||||
|
||||
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
||||
from pathlib import Path
|
||||
|
||||
from .models import UrlModel, CrawlResult
|
||||
from .database import init_db, get_cached_url, cache_url
|
||||
from .utils import *
|
||||
from .chunking_strategy import *
|
||||
from .extraction_strategy import *
|
||||
from .crawler_strategy import *
|
||||
from typing import List
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from ..content_scraping_strategy import LXMLWebScrapingStrategy as WebScrapingStrategy
|
||||
from .config import *
|
||||
import warnings
|
||||
import json
|
||||
|
||||
warnings.filterwarnings(
|
||||
"ignore",
|
||||
message='Field "model_name" has conflict with protected namespace "model_".',
|
||||
)
|
||||
|
||||
|
||||
class WebCrawler:
|
||||
def __init__(
|
||||
self,
|
||||
crawler_strategy: CrawlerStrategy = None,
|
||||
always_by_pass_cache: bool = False,
|
||||
verbose: bool = False,
|
||||
):
|
||||
self.crawler_strategy = crawler_strategy or LocalSeleniumCrawlerStrategy(
|
||||
verbose=verbose
|
||||
)
|
||||
self.always_by_pass_cache = always_by_pass_cache
|
||||
self.crawl4ai_folder = os.path.join(
|
||||
os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()), ".crawl4ai"
|
||||
)
|
||||
os.makedirs(self.crawl4ai_folder, exist_ok=True)
|
||||
os.makedirs(f"{self.crawl4ai_folder}/cache", exist_ok=True)
|
||||
init_db()
|
||||
self.ready = False
|
||||
|
||||
def warmup(self):
|
||||
print("[LOG] 🌤️ Warming up the WebCrawler")
|
||||
self.run(
|
||||
url="https://google.com/",
|
||||
word_count_threshold=5,
|
||||
extraction_strategy=NoExtractionStrategy(),
|
||||
bypass_cache=False,
|
||||
verbose=False,
|
||||
)
|
||||
self.ready = True
|
||||
print("[LOG] 🌞 WebCrawler is ready to crawl")
|
||||
|
||||
def fetch_page(
|
||||
self,
|
||||
url_model: UrlModel,
|
||||
provider: str = DEFAULT_PROVIDER,
|
||||
api_token: str = None,
|
||||
extract_blocks_flag: bool = True,
|
||||
word_count_threshold=MIN_WORD_THRESHOLD,
|
||||
css_selector: str = None,
|
||||
screenshot: bool = False,
|
||||
use_cached_html: bool = False,
|
||||
extraction_strategy: ExtractionStrategy = None,
|
||||
chunking_strategy: ChunkingStrategy = RegexChunking(),
|
||||
**kwargs,
|
||||
) -> CrawlResult:
|
||||
return self.run(
|
||||
url_model.url,
|
||||
word_count_threshold,
|
||||
extraction_strategy or NoExtractionStrategy(),
|
||||
chunking_strategy,
|
||||
bypass_cache=url_model.forced,
|
||||
css_selector=css_selector,
|
||||
screenshot=screenshot,
|
||||
**kwargs,
|
||||
)
|
||||
pass
|
||||
|
||||
def fetch_pages(
|
||||
self,
|
||||
url_models: List[UrlModel],
|
||||
provider: str = DEFAULT_PROVIDER,
|
||||
api_token: str = None,
|
||||
extract_blocks_flag: bool = True,
|
||||
word_count_threshold=MIN_WORD_THRESHOLD,
|
||||
use_cached_html: bool = False,
|
||||
css_selector: str = None,
|
||||
screenshot: bool = False,
|
||||
extraction_strategy: ExtractionStrategy = None,
|
||||
chunking_strategy: ChunkingStrategy = RegexChunking(),
|
||||
**kwargs,
|
||||
) -> List[CrawlResult]:
|
||||
extraction_strategy = extraction_strategy or NoExtractionStrategy()
|
||||
|
||||
def fetch_page_wrapper(url_model, *args, **kwargs):
|
||||
return self.fetch_page(url_model, *args, **kwargs)
|
||||
|
||||
with ThreadPoolExecutor() as executor:
|
||||
results = list(
|
||||
executor.map(
|
||||
fetch_page_wrapper,
|
||||
url_models,
|
||||
[provider] * len(url_models),
|
||||
[api_token] * len(url_models),
|
||||
[extract_blocks_flag] * len(url_models),
|
||||
[word_count_threshold] * len(url_models),
|
||||
[css_selector] * len(url_models),
|
||||
[screenshot] * len(url_models),
|
||||
[use_cached_html] * len(url_models),
|
||||
[extraction_strategy] * len(url_models),
|
||||
[chunking_strategy] * len(url_models),
|
||||
*[kwargs] * len(url_models),
|
||||
)
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
def run(
|
||||
self,
|
||||
url: str,
|
||||
word_count_threshold=MIN_WORD_THRESHOLD,
|
||||
extraction_strategy: ExtractionStrategy = None,
|
||||
chunking_strategy: ChunkingStrategy = RegexChunking(),
|
||||
bypass_cache: bool = False,
|
||||
css_selector: str = None,
|
||||
screenshot: bool = False,
|
||||
user_agent: str = None,
|
||||
verbose=True,
|
||||
**kwargs,
|
||||
) -> CrawlResult:
|
||||
try:
|
||||
extraction_strategy = extraction_strategy or NoExtractionStrategy()
|
||||
extraction_strategy.verbose = verbose
|
||||
if not isinstance(extraction_strategy, ExtractionStrategy):
|
||||
raise ValueError("Unsupported extraction strategy")
|
||||
if not isinstance(chunking_strategy, ChunkingStrategy):
|
||||
raise ValueError("Unsupported chunking strategy")
|
||||
|
||||
word_count_threshold = max(word_count_threshold, MIN_WORD_THRESHOLD)
|
||||
|
||||
cached = None
|
||||
screenshot_data = None
|
||||
extracted_content = None
|
||||
if not bypass_cache and not self.always_by_pass_cache:
|
||||
cached = get_cached_url(url)
|
||||
|
||||
if kwargs.get("warmup", True) and not self.ready:
|
||||
return None
|
||||
|
||||
if cached:
|
||||
html = sanitize_input_encode(cached[1])
|
||||
extracted_content = sanitize_input_encode(cached[4])
|
||||
if screenshot:
|
||||
screenshot_data = cached[9]
|
||||
if not screenshot_data:
|
||||
cached = None
|
||||
|
||||
if not cached or not html:
|
||||
if user_agent:
|
||||
self.crawler_strategy.update_user_agent(user_agent)
|
||||
t1 = time.time()
|
||||
html = sanitize_input_encode(self.crawler_strategy.crawl(url, **kwargs))
|
||||
t2 = time.time()
|
||||
if verbose:
|
||||
print(
|
||||
f"[LOG] 🚀 Crawling done for {url}, success: {bool(html)}, time taken: {t2 - t1:.2f} seconds"
|
||||
)
|
||||
if screenshot:
|
||||
screenshot_data = self.crawler_strategy.take_screenshot()
|
||||
|
||||
crawl_result = self.process_html(
|
||||
url,
|
||||
html,
|
||||
extracted_content,
|
||||
word_count_threshold,
|
||||
extraction_strategy,
|
||||
chunking_strategy,
|
||||
css_selector,
|
||||
screenshot_data,
|
||||
verbose,
|
||||
bool(cached),
|
||||
**kwargs,
|
||||
)
|
||||
crawl_result.success = bool(html)
|
||||
return crawl_result
|
||||
except Exception as e:
|
||||
if not hasattr(e, "msg"):
|
||||
e.msg = str(e)
|
||||
print(f"[ERROR] 🚫 Failed to crawl {url}, error: {e.msg}")
|
||||
return CrawlResult(url=url, html="", success=False, error_message=e.msg)
|
||||
|
||||
def process_html(
|
||||
self,
|
||||
url: str,
|
||||
html: str,
|
||||
extracted_content: str,
|
||||
word_count_threshold: int,
|
||||
extraction_strategy: ExtractionStrategy,
|
||||
chunking_strategy: ChunkingStrategy,
|
||||
css_selector: str,
|
||||
screenshot: bool,
|
||||
verbose: bool,
|
||||
is_cached: bool,
|
||||
**kwargs,
|
||||
) -> CrawlResult:
|
||||
t = time.time()
|
||||
# Extract content from HTML
|
||||
try:
|
||||
t1 = time.time()
|
||||
scrapping_strategy = WebScrapingStrategy()
|
||||
extra_params = {
|
||||
k: v
|
||||
for k, v in kwargs.items()
|
||||
if k not in ["only_text", "image_description_min_word_threshold"]
|
||||
}
|
||||
result = scrapping_strategy.scrap(
|
||||
url,
|
||||
html,
|
||||
word_count_threshold=word_count_threshold,
|
||||
css_selector=css_selector,
|
||||
only_text=kwargs.get("only_text", False),
|
||||
image_description_min_word_threshold=kwargs.get(
|
||||
"image_description_min_word_threshold",
|
||||
IMAGE_DESCRIPTION_MIN_WORD_THRESHOLD,
|
||||
),
|
||||
**extra_params,
|
||||
)
|
||||
|
||||
# result = get_content_of_website_optimized(url, html, word_count_threshold, css_selector=css_selector, only_text=kwargs.get("only_text", False))
|
||||
if verbose:
|
||||
print(
|
||||
f"[LOG] 🚀 Content extracted for {url}, success: True, time taken: {time.time() - t1:.2f} seconds"
|
||||
)
|
||||
|
||||
if result is None:
|
||||
raise ValueError(f"Failed to extract content from the website: {url}")
|
||||
except InvalidCSSSelectorError as e:
|
||||
raise ValueError(str(e))
|
||||
|
||||
cleaned_html = sanitize_input_encode(result.get("cleaned_html", ""))
|
||||
markdown = sanitize_input_encode(result.get("markdown", ""))
|
||||
media = result.get("media", [])
|
||||
links = result.get("links", [])
|
||||
metadata = result.get("metadata", {})
|
||||
|
||||
if extracted_content is None:
|
||||
if verbose:
|
||||
print(
|
||||
f"[LOG] 🔥 Extracting semantic blocks for {url}, Strategy: {extraction_strategy.name}"
|
||||
)
|
||||
|
||||
sections = chunking_strategy.chunk(markdown)
|
||||
extracted_content = extraction_strategy.run(url, sections)
|
||||
extracted_content = json.dumps(
|
||||
extracted_content, indent=4, default=str, ensure_ascii=False
|
||||
)
|
||||
|
||||
if verbose:
|
||||
print(
|
||||
f"[LOG] 🚀 Extraction done for {url}, time taken: {time.time() - t:.2f} seconds."
|
||||
)
|
||||
|
||||
screenshot = None if not screenshot else screenshot
|
||||
|
||||
if not is_cached:
|
||||
cache_url(
|
||||
url,
|
||||
html,
|
||||
cleaned_html,
|
||||
markdown,
|
||||
extracted_content,
|
||||
True,
|
||||
json.dumps(media),
|
||||
json.dumps(links),
|
||||
json.dumps(metadata),
|
||||
screenshot=screenshot,
|
||||
)
|
||||
|
||||
return CrawlResult(
|
||||
url=url,
|
||||
html=html,
|
||||
cleaned_html=format_html(cleaned_html),
|
||||
markdown=markdown,
|
||||
media=media,
|
||||
links=links,
|
||||
metadata=metadata,
|
||||
screenshot=screenshot,
|
||||
extracted_content=extracted_content,
|
||||
success=True,
|
||||
error_message="",
|
||||
)
|
||||
395
crawl4ai/link_preview.py
Normal file
395
crawl4ai/link_preview.py
Normal file
@@ -0,0 +1,395 @@
|
||||
"""
|
||||
Link Extractor for Crawl4AI
|
||||
|
||||
Extracts head content from links discovered during crawling using URLSeeder's
|
||||
efficient parallel processing and caching infrastructure.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import fnmatch
|
||||
from typing import Dict, List, Optional, Any
|
||||
from .async_logger import AsyncLogger
|
||||
from .async_url_seeder import AsyncUrlSeeder
|
||||
from .async_configs import SeedingConfig, CrawlerRunConfig
|
||||
from .models import Links, Link
|
||||
from .utils import calculate_total_score
|
||||
|
||||
|
||||
class LinkPreview:
|
||||
"""
|
||||
Extracts head content from links using URLSeeder's parallel processing infrastructure.
|
||||
|
||||
This class provides intelligent link filtering and head content extraction with:
|
||||
- Pattern-based inclusion/exclusion filtering
|
||||
- Parallel processing with configurable concurrency
|
||||
- Caching for performance
|
||||
- BM25 relevance scoring
|
||||
- Memory-safe processing for large link sets
|
||||
"""
|
||||
|
||||
def __init__(self, logger: Optional[AsyncLogger] = None):
|
||||
"""
|
||||
Initialize the LinkPreview.
|
||||
|
||||
Args:
|
||||
logger: Optional logger instance for recording events
|
||||
"""
|
||||
self.logger = logger
|
||||
self.seeder: Optional[AsyncUrlSeeder] = None
|
||||
self._owns_seeder = False
|
||||
|
||||
async def __aenter__(self):
|
||||
"""Async context manager entry."""
|
||||
await self.start()
|
||||
return self
|
||||
|
||||
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
||||
"""Async context manager exit."""
|
||||
await self.close()
|
||||
|
||||
async def start(self):
|
||||
"""Initialize the URLSeeder instance."""
|
||||
if not self.seeder:
|
||||
self.seeder = AsyncUrlSeeder(logger=self.logger)
|
||||
await self.seeder.__aenter__()
|
||||
self._owns_seeder = True
|
||||
|
||||
async def close(self):
|
||||
"""Clean up resources."""
|
||||
if self.seeder and self._owns_seeder:
|
||||
await self.seeder.__aexit__(None, None, None)
|
||||
self.seeder = None
|
||||
self._owns_seeder = False
|
||||
|
||||
def _log(self, level: str, message: str, tag: str = "LINK_EXTRACT", **kwargs):
|
||||
"""Helper method to safely log messages."""
|
||||
if self.logger:
|
||||
log_method = getattr(self.logger, level, None)
|
||||
if log_method:
|
||||
log_method(message=message, tag=tag, params=kwargs.get('params', {}))
|
||||
|
||||
async def extract_link_heads(
|
||||
self,
|
||||
links: Links,
|
||||
config: CrawlerRunConfig
|
||||
) -> Links:
|
||||
"""
|
||||
Extract head content for filtered links and attach to Link objects.
|
||||
|
||||
Args:
|
||||
links: Links object containing internal and external links
|
||||
config: CrawlerRunConfig with link_preview_config settings
|
||||
|
||||
Returns:
|
||||
Links object with head_data attached to filtered Link objects
|
||||
"""
|
||||
link_config = config.link_preview_config
|
||||
|
||||
# Ensure seeder is initialized
|
||||
await self.start()
|
||||
|
||||
# Filter links based on configuration
|
||||
filtered_urls = self._filter_links(links, link_config)
|
||||
|
||||
if not filtered_urls:
|
||||
self._log("info", "No links matched filtering criteria")
|
||||
return links
|
||||
|
||||
self._log("info", "Extracting head content for {count} filtered links",
|
||||
params={"count": len(filtered_urls)})
|
||||
|
||||
# Extract head content using URLSeeder
|
||||
head_results = await self._extract_heads_parallel(filtered_urls, link_config)
|
||||
|
||||
# Merge results back into Link objects
|
||||
updated_links = self._merge_head_data(links, head_results, config)
|
||||
|
||||
self._log("info", "Completed head extraction for links, {success} successful",
|
||||
params={"success": len([r for r in head_results if r.get("status") == "valid"])})
|
||||
|
||||
return updated_links
|
||||
|
||||
def _filter_links(self, links: Links, link_config: Dict[str, Any]) -> List[str]:
|
||||
"""
|
||||
Filter links based on configuration parameters.
|
||||
|
||||
Args:
|
||||
links: Links object containing internal and external links
|
||||
link_config: Configuration dictionary for link extraction
|
||||
|
||||
Returns:
|
||||
List of filtered URL strings
|
||||
"""
|
||||
filtered_urls = []
|
||||
|
||||
# Include internal links if configured
|
||||
if link_config.include_internal:
|
||||
filtered_urls.extend([link.href for link in links.internal if link.href])
|
||||
self._log("debug", "Added {count} internal links",
|
||||
params={"count": len(links.internal)})
|
||||
|
||||
# Include external links if configured
|
||||
if link_config.include_external:
|
||||
filtered_urls.extend([link.href for link in links.external if link.href])
|
||||
self._log("debug", "Added {count} external links",
|
||||
params={"count": len(links.external)})
|
||||
|
||||
# Apply include patterns
|
||||
include_patterns = link_config.include_patterns
|
||||
if include_patterns:
|
||||
filtered_urls = [
|
||||
url for url in filtered_urls
|
||||
if any(fnmatch.fnmatch(url, pattern) for pattern in include_patterns)
|
||||
]
|
||||
self._log("debug", "After include patterns: {count} links remain",
|
||||
params={"count": len(filtered_urls)})
|
||||
|
||||
# Apply exclude patterns
|
||||
exclude_patterns = link_config.exclude_patterns
|
||||
if exclude_patterns:
|
||||
filtered_urls = [
|
||||
url for url in filtered_urls
|
||||
if not any(fnmatch.fnmatch(url, pattern) for pattern in exclude_patterns)
|
||||
]
|
||||
self._log("debug", "After exclude patterns: {count} links remain",
|
||||
params={"count": len(filtered_urls)})
|
||||
|
||||
# Limit number of links
|
||||
max_links = link_config.max_links
|
||||
if max_links > 0 and len(filtered_urls) > max_links:
|
||||
filtered_urls = filtered_urls[:max_links]
|
||||
self._log("debug", "Limited to {max_links} links",
|
||||
params={"max_links": max_links})
|
||||
|
||||
# Remove duplicates while preserving order
|
||||
seen = set()
|
||||
unique_urls = []
|
||||
for url in filtered_urls:
|
||||
if url not in seen:
|
||||
seen.add(url)
|
||||
unique_urls.append(url)
|
||||
|
||||
self._log("debug", "Final filtered URLs: {count} unique links",
|
||||
params={"count": len(unique_urls)})
|
||||
|
||||
return unique_urls
|
||||
|
||||
async def _extract_heads_parallel(
|
||||
self,
|
||||
urls: List[str],
|
||||
link_config: Dict[str, Any]
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Extract head content for URLs using URLSeeder's parallel processing.
|
||||
|
||||
Args:
|
||||
urls: List of URLs to process
|
||||
link_config: Configuration dictionary for link extraction
|
||||
|
||||
Returns:
|
||||
List of dictionaries with url, status, head_data, and optional relevance_score
|
||||
"""
|
||||
verbose = link_config.verbose
|
||||
concurrency = link_config.concurrency
|
||||
|
||||
if verbose:
|
||||
self._log("info", "Starting batch processing: {total} links with {concurrency} concurrent workers",
|
||||
params={"total": len(urls), "concurrency": concurrency})
|
||||
|
||||
# Create SeedingConfig for URLSeeder
|
||||
seeding_config = SeedingConfig(
|
||||
extract_head=True,
|
||||
concurrency=concurrency,
|
||||
hits_per_sec=getattr(link_config, 'hits_per_sec', None),
|
||||
query=link_config.query,
|
||||
score_threshold=link_config.score_threshold,
|
||||
scoring_method="bm25" if link_config.query else None,
|
||||
verbose=verbose
|
||||
)
|
||||
|
||||
# Use URLSeeder's extract_head_for_urls method with progress tracking
|
||||
if verbose:
|
||||
# Create a wrapper to track progress
|
||||
results = await self._extract_with_progress(urls, seeding_config, link_config)
|
||||
else:
|
||||
results = await self.seeder.extract_head_for_urls(
|
||||
urls=urls,
|
||||
config=seeding_config,
|
||||
concurrency=concurrency,
|
||||
timeout=link_config.timeout
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
async def _extract_with_progress(
|
||||
self,
|
||||
urls: List[str],
|
||||
seeding_config: SeedingConfig,
|
||||
link_config: Dict[str, Any]
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Extract head content with progress reporting."""
|
||||
|
||||
total_urls = len(urls)
|
||||
concurrency = link_config.concurrency
|
||||
batch_size = max(1, total_urls // 10) # Report progress every 10%
|
||||
|
||||
# Process URLs and track progress
|
||||
completed = 0
|
||||
successful = 0
|
||||
failed = 0
|
||||
|
||||
# Create a custom progress tracking version
|
||||
# We'll modify URLSeeder's method to include progress callbacks
|
||||
|
||||
# For now, let's use the existing method and report at the end
|
||||
# In a production version, we would modify URLSeeder to accept progress callbacks
|
||||
|
||||
self._log("info", "Processing links in batches...")
|
||||
|
||||
# Use existing method
|
||||
results = await self.seeder.extract_head_for_urls(
|
||||
urls=urls,
|
||||
config=seeding_config,
|
||||
concurrency=concurrency,
|
||||
timeout=link_config.timeout
|
||||
)
|
||||
|
||||
# Count results
|
||||
for result in results:
|
||||
completed += 1
|
||||
if result.get("status") == "valid":
|
||||
successful += 1
|
||||
else:
|
||||
failed += 1
|
||||
|
||||
# Final progress report
|
||||
self._log("info", "Batch processing completed: {completed}/{total} processed, {successful} successful, {failed} failed",
|
||||
params={
|
||||
"completed": completed,
|
||||
"total": total_urls,
|
||||
"successful": successful,
|
||||
"failed": failed
|
||||
})
|
||||
|
||||
return results
|
||||
|
||||
def _merge_head_data(
|
||||
self,
|
||||
original_links: Links,
|
||||
head_results: List[Dict[str, Any]],
|
||||
config: CrawlerRunConfig
|
||||
) -> Links:
|
||||
"""
|
||||
Merge head extraction results back into Link objects.
|
||||
|
||||
Args:
|
||||
original_links: Original Links object
|
||||
head_results: Results from head extraction
|
||||
|
||||
Returns:
|
||||
Links object with head_data attached to matching links
|
||||
"""
|
||||
# Create URL to head_data mapping
|
||||
url_to_head_data = {}
|
||||
for result in head_results:
|
||||
url = result.get("url")
|
||||
if url:
|
||||
url_to_head_data[url] = {
|
||||
"head_data": result.get("head_data", {}),
|
||||
"status": result.get("status", "unknown"),
|
||||
"error": result.get("error"),
|
||||
"relevance_score": result.get("relevance_score")
|
||||
}
|
||||
|
||||
# Update internal links
|
||||
updated_internal = []
|
||||
for link in original_links.internal:
|
||||
if link.href in url_to_head_data:
|
||||
head_info = url_to_head_data[link.href]
|
||||
# Create new Link object with head data and scoring
|
||||
contextual_score = head_info.get("relevance_score")
|
||||
|
||||
updated_link = Link(
|
||||
href=link.href,
|
||||
text=link.text,
|
||||
title=link.title,
|
||||
base_domain=link.base_domain,
|
||||
head_data=head_info["head_data"],
|
||||
head_extraction_status=head_info["status"],
|
||||
head_extraction_error=head_info.get("error"),
|
||||
intrinsic_score=getattr(link, 'intrinsic_score', None),
|
||||
contextual_score=contextual_score
|
||||
)
|
||||
|
||||
# Add relevance score to head_data for backward compatibility
|
||||
if contextual_score is not None:
|
||||
updated_link.head_data = updated_link.head_data or {}
|
||||
updated_link.head_data["relevance_score"] = contextual_score
|
||||
|
||||
# Calculate total score combining intrinsic and contextual scores
|
||||
updated_link.total_score = calculate_total_score(
|
||||
intrinsic_score=updated_link.intrinsic_score,
|
||||
contextual_score=updated_link.contextual_score,
|
||||
score_links_enabled=getattr(config, 'score_links', False),
|
||||
query_provided=bool(config.link_preview_config.query)
|
||||
)
|
||||
|
||||
updated_internal.append(updated_link)
|
||||
else:
|
||||
# Keep original link unchanged
|
||||
updated_internal.append(link)
|
||||
|
||||
# Update external links
|
||||
updated_external = []
|
||||
for link in original_links.external:
|
||||
if link.href in url_to_head_data:
|
||||
head_info = url_to_head_data[link.href]
|
||||
# Create new Link object with head data and scoring
|
||||
contextual_score = head_info.get("relevance_score")
|
||||
|
||||
updated_link = Link(
|
||||
href=link.href,
|
||||
text=link.text,
|
||||
title=link.title,
|
||||
base_domain=link.base_domain,
|
||||
head_data=head_info["head_data"],
|
||||
head_extraction_status=head_info["status"],
|
||||
head_extraction_error=head_info.get("error"),
|
||||
intrinsic_score=getattr(link, 'intrinsic_score', None),
|
||||
contextual_score=contextual_score
|
||||
)
|
||||
|
||||
# Add relevance score to head_data for backward compatibility
|
||||
if contextual_score is not None:
|
||||
updated_link.head_data = updated_link.head_data or {}
|
||||
updated_link.head_data["relevance_score"] = contextual_score
|
||||
|
||||
# Calculate total score combining intrinsic and contextual scores
|
||||
updated_link.total_score = calculate_total_score(
|
||||
intrinsic_score=updated_link.intrinsic_score,
|
||||
contextual_score=updated_link.contextual_score,
|
||||
score_links_enabled=getattr(config, 'score_links', False),
|
||||
query_provided=bool(config.link_preview_config.query)
|
||||
)
|
||||
|
||||
updated_external.append(updated_link)
|
||||
else:
|
||||
# Keep original link unchanged
|
||||
updated_external.append(link)
|
||||
|
||||
# Sort links by relevance score if available
|
||||
if any(hasattr(link, 'head_data') and link.head_data and 'relevance_score' in link.head_data
|
||||
for link in updated_internal + updated_external):
|
||||
|
||||
def get_relevance_score(link):
|
||||
if hasattr(link, 'head_data') and link.head_data and 'relevance_score' in link.head_data:
|
||||
return link.head_data['relevance_score']
|
||||
return 0.0
|
||||
|
||||
updated_internal.sort(key=get_relevance_score, reverse=True)
|
||||
updated_external.sort(key=get_relevance_score, reverse=True)
|
||||
|
||||
return Links(
|
||||
internal=updated_internal,
|
||||
external=updated_external
|
||||
)
|
||||
@@ -2,77 +2,101 @@ from abc import ABC, abstractmethod
|
||||
from typing import Optional, Dict, Any, Tuple
|
||||
from .models import MarkdownGenerationResult
|
||||
from .html2text import CustomHTML2Text
|
||||
from .content_filter_strategy import RelevantContentFilter, BM25ContentFilter
|
||||
# from .types import RelevantContentFilter
|
||||
from .content_filter_strategy import RelevantContentFilter
|
||||
import re
|
||||
from urllib.parse import urljoin
|
||||
|
||||
# Pre-compile the regex pattern
|
||||
LINK_PATTERN = re.compile(r'!?\[([^\]]+)\]\(([^)]+?)(?:\s+"([^"]*)")?\)')
|
||||
|
||||
|
||||
def fast_urljoin(base: str, url: str) -> str:
|
||||
"""Fast URL joining for common cases."""
|
||||
if url.startswith(('http://', 'https://', 'mailto:', '//')):
|
||||
if url.startswith(("http://", "https://", "mailto:", "//")):
|
||||
return url
|
||||
if url.startswith('/'):
|
||||
if url.startswith("/"):
|
||||
# Handle absolute paths
|
||||
if base.endswith('/'):
|
||||
if base.endswith("/"):
|
||||
return base[:-1] + url
|
||||
return base + url
|
||||
return urljoin(base, url)
|
||||
|
||||
|
||||
class MarkdownGenerationStrategy(ABC):
|
||||
"""Abstract base class for markdown generation strategies."""
|
||||
def __init__(self, content_filter: Optional[RelevantContentFilter] = None, options: Optional[Dict[str, Any]] = None):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
content_filter: Optional[RelevantContentFilter] = None,
|
||||
options: Optional[Dict[str, Any]] = None,
|
||||
verbose: bool = False,
|
||||
content_source: str = "cleaned_html",
|
||||
):
|
||||
self.content_filter = content_filter
|
||||
self.options = options or {}
|
||||
|
||||
self.verbose = verbose
|
||||
self.content_source = content_source
|
||||
|
||||
@abstractmethod
|
||||
def generate_markdown(self,
|
||||
cleaned_html: str,
|
||||
base_url: str = "",
|
||||
html2text_options: Optional[Dict[str, Any]] = None,
|
||||
content_filter: Optional[RelevantContentFilter] = None,
|
||||
citations: bool = True,
|
||||
**kwargs) -> MarkdownGenerationResult:
|
||||
"""Generate markdown from cleaned HTML."""
|
||||
def generate_markdown(
|
||||
self,
|
||||
input_html: str,
|
||||
base_url: str = "",
|
||||
html2text_options: Optional[Dict[str, Any]] = None,
|
||||
content_filter: Optional[RelevantContentFilter] = None,
|
||||
citations: bool = True,
|
||||
**kwargs,
|
||||
) -> MarkdownGenerationResult:
|
||||
"""Generate markdown from the selected input HTML."""
|
||||
pass
|
||||
|
||||
|
||||
class DefaultMarkdownGenerator(MarkdownGenerationStrategy):
|
||||
"""
|
||||
Default implementation of markdown generation strategy.
|
||||
|
||||
|
||||
How it works:
|
||||
1. Generate raw markdown from cleaned HTML.
|
||||
2. Convert links to citations.
|
||||
3. Generate fit markdown if content filter is provided.
|
||||
4. Return MarkdownGenerationResult.
|
||||
|
||||
|
||||
Args:
|
||||
content_filter (Optional[RelevantContentFilter]): Content filter for generating fit markdown.
|
||||
options (Optional[Dict[str, Any]]): Additional options for markdown generation. Defaults to None.
|
||||
|
||||
content_source (str): Source of content to generate markdown from. Options: "cleaned_html", "raw_html", "fit_html". Defaults to "cleaned_html".
|
||||
|
||||
Returns:
|
||||
MarkdownGenerationResult: Result containing raw markdown, fit markdown, fit HTML, and references markdown.
|
||||
"""
|
||||
def __init__(self, content_filter: Optional[RelevantContentFilter] = None, options: Optional[Dict[str, Any]] = None):
|
||||
super().__init__(content_filter, options)
|
||||
|
||||
def convert_links_to_citations(self, markdown: str, base_url: str = "") -> Tuple[str, str]:
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
content_filter: Optional[RelevantContentFilter] = None,
|
||||
options: Optional[Dict[str, Any]] = None,
|
||||
content_source: str = "cleaned_html",
|
||||
):
|
||||
super().__init__(content_filter, options, verbose=False, content_source=content_source)
|
||||
|
||||
def convert_links_to_citations(
|
||||
self, markdown: str, base_url: str = ""
|
||||
) -> Tuple[str, str]:
|
||||
"""
|
||||
Convert links in markdown to citations.
|
||||
|
||||
|
||||
How it works:
|
||||
1. Find all links in the markdown.
|
||||
2. Convert links to citations.
|
||||
3. Return converted markdown and references markdown.
|
||||
|
||||
|
||||
Note:
|
||||
This function uses a regex pattern to find links in markdown.
|
||||
|
||||
|
||||
Args:
|
||||
markdown (str): Markdown text.
|
||||
base_url (str): Base URL for URL joins.
|
||||
|
||||
|
||||
Returns:
|
||||
Tuple[str, str]: Converted markdown and references markdown.
|
||||
"""
|
||||
@@ -81,103 +105,156 @@ class DefaultMarkdownGenerator(MarkdownGenerationStrategy):
|
||||
parts = []
|
||||
last_end = 0
|
||||
counter = 1
|
||||
|
||||
|
||||
for match in LINK_PATTERN.finditer(markdown):
|
||||
parts.append(markdown[last_end:match.start()])
|
||||
parts.append(markdown[last_end : match.start()])
|
||||
text, url, title = match.groups()
|
||||
|
||||
|
||||
# Use cached URL if available, otherwise compute and cache
|
||||
if base_url and not url.startswith(('http://', 'https://', 'mailto:')):
|
||||
if base_url and not url.startswith(("http://", "https://", "mailto:")):
|
||||
if url not in url_cache:
|
||||
url_cache[url] = fast_urljoin(base_url, url)
|
||||
url = url_cache[url]
|
||||
|
||||
|
||||
if url not in link_map:
|
||||
desc = []
|
||||
if title: desc.append(title)
|
||||
if text and text != title: desc.append(text)
|
||||
if title:
|
||||
desc.append(title)
|
||||
if text and text != title:
|
||||
desc.append(text)
|
||||
link_map[url] = (counter, ": " + " - ".join(desc) if desc else "")
|
||||
counter += 1
|
||||
|
||||
|
||||
num = link_map[url][0]
|
||||
parts.append(f"{text}⟨{num}⟩" if not match.group(0).startswith('!') else f"![{text}⟨{num}⟩]")
|
||||
parts.append(
|
||||
f"{text}⟨{num}⟩"
|
||||
if not match.group(0).startswith("!")
|
||||
else f"![{text}⟨{num}⟩]"
|
||||
)
|
||||
last_end = match.end()
|
||||
|
||||
|
||||
parts.append(markdown[last_end:])
|
||||
converted_text = ''.join(parts)
|
||||
|
||||
converted_text = "".join(parts)
|
||||
|
||||
# Pre-build reference strings
|
||||
references = ["\n\n## References\n\n"]
|
||||
references.extend(
|
||||
f"⟨{num}⟩ {url}{desc}\n"
|
||||
f"⟨{num}⟩ {url}{desc}\n"
|
||||
for url, (num, desc) in sorted(link_map.items(), key=lambda x: x[1][0])
|
||||
)
|
||||
|
||||
return converted_text, ''.join(references)
|
||||
|
||||
def generate_markdown(self,
|
||||
cleaned_html: str,
|
||||
base_url: str = "",
|
||||
html2text_options: Optional[Dict[str, Any]] = None,
|
||||
options: Optional[Dict[str, Any]] = None,
|
||||
content_filter: Optional[RelevantContentFilter] = None,
|
||||
citations: bool = True,
|
||||
**kwargs) -> MarkdownGenerationResult:
|
||||
return converted_text, "".join(references)
|
||||
|
||||
def generate_markdown(
|
||||
self,
|
||||
input_html: str,
|
||||
base_url: str = "",
|
||||
html2text_options: Optional[Dict[str, Any]] = None,
|
||||
options: Optional[Dict[str, Any]] = None,
|
||||
content_filter: Optional[RelevantContentFilter] = None,
|
||||
citations: bool = True,
|
||||
**kwargs,
|
||||
) -> MarkdownGenerationResult:
|
||||
"""
|
||||
Generate markdown with citations from cleaned HTML.
|
||||
|
||||
Generate markdown with citations from the provided input HTML.
|
||||
|
||||
How it works:
|
||||
1. Generate raw markdown from cleaned HTML.
|
||||
1. Generate raw markdown from the input HTML.
|
||||
2. Convert links to citations.
|
||||
3. Generate fit markdown if content filter is provided.
|
||||
4. Return MarkdownGenerationResult.
|
||||
|
||||
|
||||
Args:
|
||||
cleaned_html (str): Cleaned HTML content.
|
||||
input_html (str): The HTML content to process (selected based on content_source).
|
||||
base_url (str): Base URL for URL joins.
|
||||
html2text_options (Optional[Dict[str, Any]]): HTML2Text options.
|
||||
options (Optional[Dict[str, Any]]): Additional options for markdown generation.
|
||||
content_filter (Optional[RelevantContentFilter]): Content filter for generating fit markdown.
|
||||
citations (bool): Whether to generate citations.
|
||||
|
||||
|
||||
Returns:
|
||||
MarkdownGenerationResult: Result containing raw markdown, fit markdown, fit HTML, and references markdown.
|
||||
"""
|
||||
# Initialize HTML2Text with options
|
||||
h = CustomHTML2Text()
|
||||
if html2text_options:
|
||||
h.update_params(**html2text_options)
|
||||
elif options:
|
||||
h.update_params(**options)
|
||||
elif self.options:
|
||||
h.update_params(**self.options)
|
||||
try:
|
||||
# Initialize HTML2Text with default options for better conversion
|
||||
h = CustomHTML2Text(baseurl=base_url)
|
||||
default_options = {
|
||||
"body_width": 0, # Disable text wrapping
|
||||
"ignore_emphasis": False,
|
||||
"ignore_links": False,
|
||||
"ignore_images": False,
|
||||
"protect_links": False,
|
||||
"single_line_break": True,
|
||||
"mark_code": True,
|
||||
"escape_snob": False,
|
||||
}
|
||||
|
||||
# Generate raw markdown
|
||||
raw_markdown = h.handle(cleaned_html)
|
||||
raw_markdown = raw_markdown.replace(' ```', '```')
|
||||
# Update with custom options if provided
|
||||
if html2text_options:
|
||||
default_options.update(html2text_options)
|
||||
elif options:
|
||||
default_options.update(options)
|
||||
elif self.options:
|
||||
default_options.update(self.options)
|
||||
|
||||
# Convert links to citations
|
||||
markdown_with_citations: str = ""
|
||||
references_markdown: str = ""
|
||||
if citations:
|
||||
markdown_with_citations, references_markdown = self.convert_links_to_citations(
|
||||
raw_markdown, base_url
|
||||
h.update_params(**default_options)
|
||||
|
||||
# Ensure we have valid input
|
||||
if not input_html:
|
||||
input_html = ""
|
||||
elif not isinstance(input_html, str):
|
||||
input_html = str(input_html)
|
||||
|
||||
# Generate raw markdown
|
||||
try:
|
||||
raw_markdown = h.handle(input_html)
|
||||
except Exception as e:
|
||||
raw_markdown = f"Error converting HTML to markdown: {str(e)}"
|
||||
|
||||
raw_markdown = raw_markdown.replace(" ```", "```")
|
||||
|
||||
# Convert links to citations
|
||||
markdown_with_citations: str = raw_markdown
|
||||
references_markdown: str = ""
|
||||
if citations:
|
||||
try:
|
||||
(
|
||||
markdown_with_citations,
|
||||
references_markdown,
|
||||
) = self.convert_links_to_citations(raw_markdown, base_url)
|
||||
except Exception as e:
|
||||
markdown_with_citations = raw_markdown
|
||||
references_markdown = f"Error generating citations: {str(e)}"
|
||||
|
||||
# Generate fit markdown if content filter is provided
|
||||
fit_markdown: Optional[str] = ""
|
||||
filtered_html: Optional[str] = ""
|
||||
if content_filter or self.content_filter:
|
||||
try:
|
||||
content_filter = content_filter or self.content_filter
|
||||
filtered_html = content_filter.filter_content(input_html)
|
||||
filtered_html = "\n".join(
|
||||
"<div>{}</div>".format(s) for s in filtered_html
|
||||
)
|
||||
fit_markdown = h.handle(filtered_html)
|
||||
except Exception as e:
|
||||
fit_markdown = f"Error generating fit markdown: {str(e)}"
|
||||
filtered_html = ""
|
||||
|
||||
return MarkdownGenerationResult(
|
||||
raw_markdown=raw_markdown or "",
|
||||
markdown_with_citations=markdown_with_citations or "",
|
||||
references_markdown=references_markdown or "",
|
||||
fit_markdown=fit_markdown or "",
|
||||
fit_html=filtered_html or "",
|
||||
)
|
||||
except Exception as e:
|
||||
# If anything fails, return empty strings with error message
|
||||
error_msg = f"Error in markdown generation: {str(e)}"
|
||||
return MarkdownGenerationResult(
|
||||
raw_markdown=error_msg,
|
||||
markdown_with_citations=error_msg,
|
||||
references_markdown="",
|
||||
fit_markdown="",
|
||||
fit_html="",
|
||||
)
|
||||
|
||||
# Generate fit markdown if content filter is provided
|
||||
fit_markdown: Optional[str] = ""
|
||||
filtered_html: Optional[str] = ""
|
||||
if content_filter or self.content_filter:
|
||||
content_filter = content_filter or self.content_filter
|
||||
filtered_html = content_filter.filter_content(cleaned_html)
|
||||
filtered_html = '\n'.join('<div>{}</div>'.format(s) for s in filtered_html)
|
||||
fit_markdown = h.handle(filtered_html)
|
||||
|
||||
return MarkdownGenerationResult(
|
||||
raw_markdown=raw_markdown,
|
||||
markdown_with_citations=markdown_with_citations,
|
||||
references_markdown=references_markdown,
|
||||
fit_markdown=fit_markdown,
|
||||
fit_html=filtered_html,
|
||||
)
|
||||
|
||||
|
||||
@@ -1,13 +1,11 @@
|
||||
import os
|
||||
import asyncio
|
||||
import logging
|
||||
from pathlib import Path
|
||||
import aiosqlite
|
||||
from typing import Optional
|
||||
import xxhash
|
||||
import aiofiles
|
||||
import shutil
|
||||
import time
|
||||
from datetime import datetime
|
||||
from .async_logger import AsyncLogger, LogLevel
|
||||
|
||||
@@ -17,18 +15,19 @@ logger = AsyncLogger(log_level=LogLevel.DEBUG, verbose=True)
|
||||
# logging.basicConfig(level=logging.INFO)
|
||||
# logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DatabaseMigration:
|
||||
def __init__(self, db_path: str):
|
||||
self.db_path = db_path
|
||||
self.content_paths = self._ensure_content_dirs(os.path.dirname(db_path))
|
||||
|
||||
|
||||
def _ensure_content_dirs(self, base_path: str) -> dict:
|
||||
dirs = {
|
||||
'html': 'html_content',
|
||||
'cleaned': 'cleaned_html',
|
||||
'markdown': 'markdown_content',
|
||||
'extracted': 'extracted_content',
|
||||
'screenshots': 'screenshots'
|
||||
"html": "html_content",
|
||||
"cleaned": "cleaned_html",
|
||||
"markdown": "markdown_content",
|
||||
"extracted": "extracted_content",
|
||||
"screenshots": "screenshots",
|
||||
}
|
||||
content_paths = {}
|
||||
for key, dirname in dirs.items():
|
||||
@@ -47,43 +46,55 @@ class DatabaseMigration:
|
||||
async def _store_content(self, content: str, content_type: str) -> str:
|
||||
if not content:
|
||||
return ""
|
||||
|
||||
|
||||
content_hash = self._generate_content_hash(content)
|
||||
file_path = os.path.join(self.content_paths[content_type], content_hash)
|
||||
|
||||
|
||||
if not os.path.exists(file_path):
|
||||
async with aiofiles.open(file_path, 'w', encoding='utf-8') as f:
|
||||
async with aiofiles.open(file_path, "w", encoding="utf-8") as f:
|
||||
await f.write(content)
|
||||
|
||||
|
||||
return content_hash
|
||||
|
||||
async def migrate_database(self):
|
||||
"""Migrate existing database to file-based storage"""
|
||||
# logger.info("Starting database migration...")
|
||||
logger.info("Starting database migration...", tag="INIT")
|
||||
|
||||
|
||||
try:
|
||||
async with aiosqlite.connect(self.db_path) as db:
|
||||
# Get all rows
|
||||
async with db.execute(
|
||||
'''SELECT url, html, cleaned_html, markdown,
|
||||
extracted_content, screenshot FROM crawled_data'''
|
||||
"""SELECT url, html, cleaned_html, markdown,
|
||||
extracted_content, screenshot FROM crawled_data"""
|
||||
) as cursor:
|
||||
rows = await cursor.fetchall()
|
||||
|
||||
migrated_count = 0
|
||||
for row in rows:
|
||||
url, html, cleaned_html, markdown, extracted_content, screenshot = row
|
||||
|
||||
(
|
||||
url,
|
||||
html,
|
||||
cleaned_html,
|
||||
markdown,
|
||||
extracted_content,
|
||||
screenshot,
|
||||
) = row
|
||||
|
||||
# Store content in files and get hashes
|
||||
html_hash = await self._store_content(html, 'html')
|
||||
cleaned_hash = await self._store_content(cleaned_html, 'cleaned')
|
||||
markdown_hash = await self._store_content(markdown, 'markdown')
|
||||
extracted_hash = await self._store_content(extracted_content, 'extracted')
|
||||
screenshot_hash = await self._store_content(screenshot, 'screenshots')
|
||||
html_hash = await self._store_content(html, "html")
|
||||
cleaned_hash = await self._store_content(cleaned_html, "cleaned")
|
||||
markdown_hash = await self._store_content(markdown, "markdown")
|
||||
extracted_hash = await self._store_content(
|
||||
extracted_content, "extracted"
|
||||
)
|
||||
screenshot_hash = await self._store_content(
|
||||
screenshot, "screenshots"
|
||||
)
|
||||
|
||||
# Update database with hashes
|
||||
await db.execute('''
|
||||
await db.execute(
|
||||
"""
|
||||
UPDATE crawled_data
|
||||
SET html = ?,
|
||||
cleaned_html = ?,
|
||||
@@ -91,40 +102,51 @@ class DatabaseMigration:
|
||||
extracted_content = ?,
|
||||
screenshot = ?
|
||||
WHERE url = ?
|
||||
''', (html_hash, cleaned_hash, markdown_hash,
|
||||
extracted_hash, screenshot_hash, url))
|
||||
|
||||
""",
|
||||
(
|
||||
html_hash,
|
||||
cleaned_hash,
|
||||
markdown_hash,
|
||||
extracted_hash,
|
||||
screenshot_hash,
|
||||
url,
|
||||
),
|
||||
)
|
||||
|
||||
migrated_count += 1
|
||||
if migrated_count % 100 == 0:
|
||||
logger.info(f"Migrated {migrated_count} records...", tag="INIT")
|
||||
|
||||
|
||||
await db.commit()
|
||||
logger.success(f"Migration completed. {migrated_count} records processed.", tag="COMPLETE")
|
||||
logger.success(
|
||||
f"Migration completed. {migrated_count} records processed.",
|
||||
tag="COMPLETE",
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
# logger.error(f"Migration failed: {e}")
|
||||
logger.error(
|
||||
message="Migration failed: {error}",
|
||||
tag="ERROR",
|
||||
params={"error": str(e)}
|
||||
params={"error": str(e)},
|
||||
)
|
||||
raise e
|
||||
|
||||
|
||||
async def backup_database(db_path: str) -> str:
|
||||
"""Create backup of existing database"""
|
||||
if not os.path.exists(db_path):
|
||||
logger.info("No existing database found. Skipping backup.", tag="INIT")
|
||||
return None
|
||||
|
||||
|
||||
# Create backup with timestamp
|
||||
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
|
||||
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
backup_path = f"{db_path}.backup_{timestamp}"
|
||||
|
||||
|
||||
try:
|
||||
# Wait for any potential write operations to finish
|
||||
await asyncio.sleep(1)
|
||||
|
||||
|
||||
# Create backup
|
||||
shutil.copy2(db_path, backup_path)
|
||||
logger.info(f"Database backup created at: {backup_path}", tag="COMPLETE")
|
||||
@@ -132,37 +154,41 @@ async def backup_database(db_path: str) -> str:
|
||||
except Exception as e:
|
||||
# logger.error(f"Backup failed: {e}")
|
||||
logger.error(
|
||||
message="Migration failed: {error}",
|
||||
tag="ERROR",
|
||||
params={"error": str(e)}
|
||||
)
|
||||
message="Migration failed: {error}", tag="ERROR", params={"error": str(e)}
|
||||
)
|
||||
raise e
|
||||
|
||||
|
||||
|
||||
async def run_migration(db_path: Optional[str] = None):
|
||||
"""Run database migration"""
|
||||
if db_path is None:
|
||||
db_path = os.path.join(Path.home(), ".crawl4ai", "crawl4ai.db")
|
||||
|
||||
|
||||
if not os.path.exists(db_path):
|
||||
logger.info("No existing database found. Skipping migration.", tag="INIT")
|
||||
return
|
||||
|
||||
|
||||
# Create backup first
|
||||
backup_path = await backup_database(db_path)
|
||||
if not backup_path:
|
||||
return
|
||||
|
||||
|
||||
migration = DatabaseMigration(db_path)
|
||||
await migration.migrate_database()
|
||||
|
||||
|
||||
|
||||
def main():
|
||||
"""CLI entry point for migration"""
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser(description='Migrate Crawl4AI database to file-based storage')
|
||||
parser.add_argument('--db-path', help='Custom database path')
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Migrate Crawl4AI database to file-based storage"
|
||||
)
|
||||
parser.add_argument("--db-path", help="Custom database path")
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
asyncio.run(run_migration(args.db_path))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
main()
|
||||
|
||||
@@ -2,109 +2,125 @@ from functools import lru_cache
|
||||
from pathlib import Path
|
||||
import subprocess, os
|
||||
import shutil
|
||||
import tarfile
|
||||
from .model_loader import *
|
||||
import argparse
|
||||
import urllib.request
|
||||
from crawl4ai.config import MODEL_REPO_BRANCH
|
||||
|
||||
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def get_available_memory(device):
|
||||
import torch
|
||||
if device.type == 'cuda':
|
||||
|
||||
if device.type == "cuda":
|
||||
return torch.cuda.get_device_properties(device).total_memory
|
||||
elif device.type == 'mps':
|
||||
return 48 * 1024 ** 3 # Assuming 8GB for MPS, as a conservative estimate
|
||||
elif device.type == "mps":
|
||||
return 48 * 1024**3 # Assuming 8GB for MPS, as a conservative estimate
|
||||
else:
|
||||
return 0
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def calculate_batch_size(device):
|
||||
available_memory = get_available_memory(device)
|
||||
|
||||
if device.type == 'cpu':
|
||||
|
||||
if device.type == "cpu":
|
||||
return 16
|
||||
elif device.type in ['cuda', 'mps']:
|
||||
elif device.type in ["cuda", "mps"]:
|
||||
# Adjust these thresholds based on your model size and available memory
|
||||
if available_memory >= 31 * 1024 ** 3: # > 32GB
|
||||
if available_memory >= 31 * 1024**3: # > 32GB
|
||||
return 256
|
||||
elif available_memory >= 15 * 1024 ** 3: # > 16GB to 32GB
|
||||
elif available_memory >= 15 * 1024**3: # > 16GB to 32GB
|
||||
return 128
|
||||
elif available_memory >= 8 * 1024 ** 3: # 8GB to 16GB
|
||||
elif available_memory >= 8 * 1024**3: # 8GB to 16GB
|
||||
return 64
|
||||
else:
|
||||
return 32
|
||||
else:
|
||||
return 16 # Default batch size
|
||||
|
||||
return 16 # Default batch size
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def get_device():
|
||||
import torch
|
||||
|
||||
if torch.cuda.is_available():
|
||||
device = torch.device('cuda')
|
||||
device = torch.device("cuda")
|
||||
elif torch.backends.mps.is_available():
|
||||
device = torch.device('mps')
|
||||
device = torch.device("mps")
|
||||
else:
|
||||
device = torch.device('cpu')
|
||||
return device
|
||||
|
||||
device = torch.device("cpu")
|
||||
return device
|
||||
|
||||
|
||||
def set_model_device(model):
|
||||
device = get_device()
|
||||
model.to(device)
|
||||
model.to(device)
|
||||
return model, device
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def get_home_folder():
|
||||
home_folder = os.path.join(os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()), ".crawl4ai")
|
||||
home_folder = os.path.join(
|
||||
os.getenv("CRAWL4_AI_BASE_DIRECTORY", Path.home()), ".crawl4ai"
|
||||
)
|
||||
os.makedirs(home_folder, exist_ok=True)
|
||||
os.makedirs(f"{home_folder}/cache", exist_ok=True)
|
||||
os.makedirs(f"{home_folder}/models", exist_ok=True)
|
||||
return home_folder
|
||||
return home_folder
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def load_bert_base_uncased():
|
||||
from transformers import BertTokenizer, BertModel, AutoTokenizer, AutoModel
|
||||
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', resume_download=None)
|
||||
model = BertModel.from_pretrained('bert-base-uncased', resume_download=None)
|
||||
from transformers import BertTokenizer, BertModel
|
||||
|
||||
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased", resume_download=None)
|
||||
model = BertModel.from_pretrained("bert-base-uncased", resume_download=None)
|
||||
model.eval()
|
||||
model, device = set_model_device(model)
|
||||
return tokenizer, model
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def load_HF_embedding_model(model_name="BAAI/bge-small-en-v1.5") -> tuple:
|
||||
"""Load the Hugging Face model for embedding.
|
||||
|
||||
|
||||
Args:
|
||||
model_name (str, optional): The model name to load. Defaults to "BAAI/bge-small-en-v1.5".
|
||||
|
||||
|
||||
Returns:
|
||||
tuple: The tokenizer and model.
|
||||
"""
|
||||
from transformers import BertTokenizer, BertModel, AutoTokenizer, AutoModel
|
||||
from transformers import AutoTokenizer, AutoModel
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_name, resume_download=None)
|
||||
model = AutoModel.from_pretrained(model_name, resume_download=None)
|
||||
model.eval()
|
||||
model, device = set_model_device(model)
|
||||
return tokenizer, model
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def load_text_classifier():
|
||||
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
||||
from transformers import pipeline
|
||||
import torch
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("dstefa/roberta-base_topic_classification_nyt_news")
|
||||
model = AutoModelForSequenceClassification.from_pretrained("dstefa/roberta-base_topic_classification_nyt_news")
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
"dstefa/roberta-base_topic_classification_nyt_news"
|
||||
)
|
||||
model = AutoModelForSequenceClassification.from_pretrained(
|
||||
"dstefa/roberta-base_topic_classification_nyt_news"
|
||||
)
|
||||
model.eval()
|
||||
model, device = set_model_device(model)
|
||||
pipe = pipeline("text-classification", model=model, tokenizer=tokenizer)
|
||||
return pipe
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def load_text_multilabel_classifier():
|
||||
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
||||
import numpy as np
|
||||
from scipy.special import expit
|
||||
import torch
|
||||
|
||||
@@ -116,18 +132,27 @@ def load_text_multilabel_classifier():
|
||||
# else:
|
||||
# device = torch.device("cpu")
|
||||
# # return load_spacy_model(), torch.device("cpu")
|
||||
|
||||
|
||||
MODEL = "cardiffnlp/tweet-topic-21-multi"
|
||||
tokenizer = AutoTokenizer.from_pretrained(MODEL, resume_download=None)
|
||||
model = AutoModelForSequenceClassification.from_pretrained(MODEL, resume_download=None)
|
||||
model = AutoModelForSequenceClassification.from_pretrained(
|
||||
MODEL, resume_download=None
|
||||
)
|
||||
model.eval()
|
||||
model, device = set_model_device(model)
|
||||
class_mapping = model.config.id2label
|
||||
|
||||
def _classifier(texts, threshold=0.5, max_length=64):
|
||||
tokens = tokenizer(texts, return_tensors='pt', padding=True, truncation=True, max_length=max_length)
|
||||
tokens = {key: val.to(device) for key, val in tokens.items()} # Move tokens to the selected device
|
||||
tokens = tokenizer(
|
||||
texts,
|
||||
return_tensors="pt",
|
||||
padding=True,
|
||||
truncation=True,
|
||||
max_length=max_length,
|
||||
)
|
||||
tokens = {
|
||||
key: val.to(device) for key, val in tokens.items()
|
||||
} # Move tokens to the selected device
|
||||
|
||||
with torch.no_grad():
|
||||
output = model(**tokens)
|
||||
@@ -138,35 +163,41 @@ def load_text_multilabel_classifier():
|
||||
|
||||
batch_labels = []
|
||||
for prediction in predictions:
|
||||
labels = [class_mapping[i] for i, value in enumerate(prediction) if value == 1]
|
||||
labels = [
|
||||
class_mapping[i] for i, value in enumerate(prediction) if value == 1
|
||||
]
|
||||
batch_labels.append(labels)
|
||||
|
||||
return batch_labels
|
||||
|
||||
return _classifier, device
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def load_nltk_punkt():
|
||||
import nltk
|
||||
|
||||
try:
|
||||
nltk.data.find('tokenizers/punkt')
|
||||
nltk.data.find("tokenizers/punkt")
|
||||
except LookupError:
|
||||
nltk.download('punkt')
|
||||
return nltk.data.find('tokenizers/punkt')
|
||||
nltk.download("punkt")
|
||||
return nltk.data.find("tokenizers/punkt")
|
||||
|
||||
|
||||
@lru_cache()
|
||||
def load_spacy_model():
|
||||
import spacy
|
||||
|
||||
name = "models/reuters"
|
||||
home_folder = get_home_folder()
|
||||
model_folder = Path(home_folder) / name
|
||||
|
||||
|
||||
# Check if the model directory already exists
|
||||
if not (model_folder.exists() and any(model_folder.iterdir())):
|
||||
repo_url = "https://github.com/unclecode/crawl4ai.git"
|
||||
branch = MODEL_REPO_BRANCH
|
||||
branch = MODEL_REPO_BRANCH
|
||||
repo_folder = Path(home_folder) / "crawl4ai"
|
||||
|
||||
|
||||
print("[LOG] ⏬ Downloading Spacy model for the first time...")
|
||||
|
||||
# Remove existing repo folder if it exists
|
||||
@@ -176,7 +207,9 @@ def load_spacy_model():
|
||||
if model_folder.exists():
|
||||
shutil.rmtree(model_folder)
|
||||
except PermissionError:
|
||||
print("[WARNING] Unable to remove existing folders. Please manually delete the following folders and try again:")
|
||||
print(
|
||||
"[WARNING] Unable to remove existing folders. Please manually delete the following folders and try again:"
|
||||
)
|
||||
print(f"- {repo_folder}")
|
||||
print(f"- {model_folder}")
|
||||
return None
|
||||
@@ -187,7 +220,7 @@ def load_spacy_model():
|
||||
["git", "clone", "-b", branch, repo_url, str(repo_folder)],
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.DEVNULL,
|
||||
check=True
|
||||
check=True,
|
||||
)
|
||||
|
||||
# Create the models directory if it doesn't exist
|
||||
@@ -215,6 +248,7 @@ def load_spacy_model():
|
||||
print(f"Error loading spacy model: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def download_all_models(remove_existing=False):
|
||||
"""Download all models required for Crawl4AI."""
|
||||
if remove_existing:
|
||||
@@ -243,14 +277,20 @@ def download_all_models(remove_existing=False):
|
||||
load_nltk_punkt()
|
||||
print("[LOG] ✅ All models downloaded successfully.")
|
||||
|
||||
|
||||
def main():
|
||||
print("[LOG] Welcome to the Crawl4AI Model Downloader!")
|
||||
print("[LOG] This script will download all the models required for Crawl4AI.")
|
||||
parser = argparse.ArgumentParser(description="Crawl4AI Model Downloader")
|
||||
parser.add_argument('--remove-existing', action='store_true', help="Remove existing models before downloading")
|
||||
parser.add_argument(
|
||||
"--remove-existing",
|
||||
action="store_true",
|
||||
help="Remove existing models before downloading",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
download_all_models(remove_existing=args.remove_existing)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@@ -1,21 +1,121 @@
|
||||
from pydantic import BaseModel, HttpUrl
|
||||
from pydantic import BaseModel, HttpUrl, PrivateAttr, Field
|
||||
from typing import List, Dict, Optional, Callable, Awaitable, Union, Any
|
||||
from typing import AsyncGenerator
|
||||
from typing import Generic, TypeVar
|
||||
from enum import Enum
|
||||
from dataclasses import dataclass
|
||||
from .ssl_certificate import SSLCertificate
|
||||
from datetime import datetime
|
||||
from datetime import timedelta
|
||||
|
||||
|
||||
###############################
|
||||
# Dispatcher Models
|
||||
###############################
|
||||
@dataclass
|
||||
class DomainState:
|
||||
last_request_time: float = 0
|
||||
current_delay: float = 0
|
||||
fail_count: int = 0
|
||||
|
||||
|
||||
@dataclass
|
||||
class CrawlerTaskResult:
|
||||
task_id: str
|
||||
url: str
|
||||
result: "CrawlResult"
|
||||
memory_usage: float
|
||||
peak_memory: float
|
||||
start_time: Union[datetime, float]
|
||||
end_time: Union[datetime, float]
|
||||
error_message: str = ""
|
||||
retry_count: int = 0
|
||||
wait_time: float = 0.0
|
||||
|
||||
@property
|
||||
def success(self) -> bool:
|
||||
return self.result.success
|
||||
|
||||
class CrawlStatus(Enum):
|
||||
QUEUED = "QUEUED"
|
||||
IN_PROGRESS = "IN_PROGRESS"
|
||||
COMPLETED = "COMPLETED"
|
||||
FAILED = "FAILED"
|
||||
|
||||
@dataclass
|
||||
class CrawlStats:
|
||||
task_id: str
|
||||
url: str
|
||||
status: CrawlStatus
|
||||
start_time: Optional[Union[datetime, float]] = None
|
||||
end_time: Optional[Union[datetime, float]] = None
|
||||
memory_usage: float = 0.0
|
||||
peak_memory: float = 0.0
|
||||
error_message: str = ""
|
||||
wait_time: float = 0.0
|
||||
retry_count: int = 0
|
||||
counted_requeue: bool = False
|
||||
|
||||
@property
|
||||
def duration(self) -> str:
|
||||
if not self.start_time:
|
||||
return "0:00"
|
||||
|
||||
# Convert start_time to datetime if it's a float
|
||||
start = self.start_time
|
||||
if isinstance(start, float):
|
||||
start = datetime.fromtimestamp(start)
|
||||
|
||||
# Get end time or use current time
|
||||
end = self.end_time or datetime.now()
|
||||
# Convert end_time to datetime if it's a float
|
||||
if isinstance(end, float):
|
||||
end = datetime.fromtimestamp(end)
|
||||
|
||||
duration = end - start
|
||||
return str(timedelta(seconds=int(duration.total_seconds())))
|
||||
|
||||
class DisplayMode(Enum):
|
||||
DETAILED = "DETAILED"
|
||||
AGGREGATED = "AGGREGATED"
|
||||
|
||||
|
||||
###############################
|
||||
# Crawler Models
|
||||
###############################
|
||||
@dataclass
|
||||
class TokenUsage:
|
||||
completion_tokens: int = 0
|
||||
prompt_tokens: int = 0
|
||||
prompt_tokens: int = 0
|
||||
total_tokens: int = 0
|
||||
completion_tokens_details: Optional[dict] = None
|
||||
prompt_tokens_details: Optional[dict] = None
|
||||
|
||||
|
||||
class UrlModel(BaseModel):
|
||||
url: HttpUrl
|
||||
forced: bool = False
|
||||
|
||||
|
||||
|
||||
@dataclass
|
||||
class TraversalStats:
|
||||
"""Statistics for the traversal process"""
|
||||
|
||||
start_time: datetime = datetime.now()
|
||||
urls_processed: int = 0
|
||||
urls_failed: int = 0
|
||||
urls_skipped: int = 0
|
||||
total_depth_reached: int = 0
|
||||
current_depth: int = 0
|
||||
|
||||
class DispatchResult(BaseModel):
|
||||
task_id: str
|
||||
memory_usage: float
|
||||
peak_memory: float
|
||||
start_time: Union[datetime, float]
|
||||
end_time: Union[datetime, float]
|
||||
error_message: str = ""
|
||||
|
||||
class MarkdownGenerationResult(BaseModel):
|
||||
raw_markdown: str
|
||||
markdown_with_citations: str
|
||||
@@ -23,20 +123,23 @@ class MarkdownGenerationResult(BaseModel):
|
||||
fit_markdown: Optional[str] = None
|
||||
fit_html: Optional[str] = None
|
||||
|
||||
def __str__(self):
|
||||
return self.raw_markdown
|
||||
|
||||
class CrawlResult(BaseModel):
|
||||
url: str
|
||||
html: str
|
||||
fit_html: Optional[str] = None
|
||||
success: bool
|
||||
cleaned_html: Optional[str] = None
|
||||
media: Dict[str, List[Dict]] = {}
|
||||
links: Dict[str, List[Dict]] = {}
|
||||
downloaded_files: Optional[List[str]] = None
|
||||
js_execution_result: Optional[Dict[str, Any]] = None
|
||||
screenshot: Optional[str] = None
|
||||
pdf : Optional[bytes] = None
|
||||
markdown: Optional[Union[str, MarkdownGenerationResult]] = None
|
||||
markdown_v2: Optional[MarkdownGenerationResult] = None
|
||||
fit_markdown: Optional[str] = None
|
||||
fit_html: Optional[str] = None
|
||||
pdf: Optional[bytes] = None
|
||||
mhtml: Optional[str] = None
|
||||
_markdown: Optional[MarkdownGenerationResult] = PrivateAttr(default=None)
|
||||
extracted_content: Optional[str] = None
|
||||
metadata: Optional[dict] = None
|
||||
error_message: Optional[str] = None
|
||||
@@ -44,18 +147,241 @@ class CrawlResult(BaseModel):
|
||||
response_headers: Optional[dict] = None
|
||||
status_code: Optional[int] = None
|
||||
ssl_certificate: Optional[SSLCertificate] = None
|
||||
dispatch_result: Optional[DispatchResult] = None
|
||||
redirected_url: Optional[str] = None
|
||||
network_requests: Optional[List[Dict[str, Any]]] = None
|
||||
console_messages: Optional[List[Dict[str, Any]]] = None
|
||||
tables: List[Dict] = Field(default_factory=list) # NEW – [{headers,rows,caption,summary}]
|
||||
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
# NOTE: The StringCompatibleMarkdown class, custom __init__ method, property getters/setters,
|
||||
# and model_dump override all exist to support a smooth transition from markdown as a string
|
||||
# to markdown as a MarkdownGenerationResult object, while maintaining backward compatibility.
|
||||
#
|
||||
# This allows code that expects markdown to be a string to continue working, while also
|
||||
# providing access to the full MarkdownGenerationResult object's properties.
|
||||
#
|
||||
# The markdown_v2 property is deprecated and raises an error directing users to use markdown.
|
||||
#
|
||||
# When backward compatibility is no longer needed in future versions, this entire mechanism
|
||||
# can be simplified to a standard field with no custom accessors or serialization logic.
|
||||
|
||||
def __init__(self, **data):
|
||||
markdown_result = data.pop('markdown', None)
|
||||
super().__init__(**data)
|
||||
if markdown_result is not None:
|
||||
self._markdown = (
|
||||
MarkdownGenerationResult(**markdown_result)
|
||||
if isinstance(markdown_result, dict)
|
||||
else markdown_result
|
||||
)
|
||||
|
||||
@property
|
||||
def markdown(self):
|
||||
"""
|
||||
Property that returns a StringCompatibleMarkdown object that behaves like
|
||||
a string but also provides access to MarkdownGenerationResult attributes.
|
||||
|
||||
This approach allows backward compatibility with code that expects 'markdown'
|
||||
to be a string, while providing access to the full MarkdownGenerationResult.
|
||||
"""
|
||||
if self._markdown is None:
|
||||
return None
|
||||
return StringCompatibleMarkdown(self._markdown)
|
||||
|
||||
@markdown.setter
|
||||
def markdown(self, value):
|
||||
"""
|
||||
Setter for the markdown property.
|
||||
"""
|
||||
self._markdown = value
|
||||
|
||||
@property
|
||||
def markdown_v2(self):
|
||||
"""
|
||||
Deprecated property that raises an AttributeError when accessed.
|
||||
|
||||
This property exists to inform users that 'markdown_v2' has been
|
||||
deprecated and they should use 'markdown' instead.
|
||||
"""
|
||||
raise AttributeError(
|
||||
"The 'markdown_v2' attribute is deprecated and has been removed. "
|
||||
"""Please use 'markdown' instead, which now returns a MarkdownGenerationResult, with
|
||||
following properties:
|
||||
- raw_markdown: The raw markdown string
|
||||
- markdown_with_citations: The markdown string with citations
|
||||
- references_markdown: The markdown string with references
|
||||
- fit_markdown: The markdown string with fit text
|
||||
"""
|
||||
)
|
||||
|
||||
@property
|
||||
def fit_markdown(self):
|
||||
"""
|
||||
Deprecated property that raises an AttributeError when accessed.
|
||||
"""
|
||||
raise AttributeError(
|
||||
"The 'fit_markdown' attribute is deprecated and has been removed. "
|
||||
"Please use 'markdown.fit_markdown' instead."
|
||||
)
|
||||
|
||||
@property
|
||||
def fit_html(self):
|
||||
"""
|
||||
Deprecated property that raises an AttributeError when accessed.
|
||||
"""
|
||||
raise AttributeError(
|
||||
"The 'fit_html' attribute is deprecated and has been removed. "
|
||||
"Please use 'markdown.fit_html' instead."
|
||||
)
|
||||
|
||||
def model_dump(self, *args, **kwargs):
|
||||
"""
|
||||
Override model_dump to include the _markdown private attribute in serialization.
|
||||
|
||||
This override is necessary because:
|
||||
1. PrivateAttr fields are excluded from serialization by default
|
||||
2. We need to maintain backward compatibility by including the 'markdown' field
|
||||
in the serialized output
|
||||
3. We're transitioning from 'markdown_v2' to enhancing 'markdown' to hold
|
||||
the same type of data
|
||||
|
||||
Future developers: This method ensures that the markdown content is properly
|
||||
serialized despite being stored in a private attribute. If the serialization
|
||||
requirements change, this is where you would update the logic.
|
||||
"""
|
||||
result = super().model_dump(*args, **kwargs)
|
||||
|
||||
# Remove any property descriptors that might have been included
|
||||
# These deprecated properties should not be in the serialized output
|
||||
for key in ['fit_html', 'fit_markdown', 'markdown_v2']:
|
||||
if key in result and isinstance(result[key], property):
|
||||
# del result[key]
|
||||
# Nasrin: I decided to convert it to string instead of removing it.
|
||||
result[key] = str(result[key])
|
||||
|
||||
# Add the markdown field properly
|
||||
if self._markdown is not None:
|
||||
result["markdown"] = self._markdown.model_dump()
|
||||
return result
|
||||
|
||||
class StringCompatibleMarkdown(str):
|
||||
"""A string subclass that also provides access to MarkdownGenerationResult attributes"""
|
||||
def __new__(cls, markdown_result):
|
||||
return super().__new__(cls, markdown_result.raw_markdown)
|
||||
|
||||
def __init__(self, markdown_result):
|
||||
self._markdown_result = markdown_result
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self._markdown_result, name)
|
||||
|
||||
CrawlResultT = TypeVar('CrawlResultT', bound=CrawlResult)
|
||||
|
||||
class CrawlResultContainer(Generic[CrawlResultT]):
|
||||
def __init__(self, results: Union[CrawlResultT, List[CrawlResultT]]):
|
||||
# Normalize to a list
|
||||
if isinstance(results, list):
|
||||
self._results = results
|
||||
else:
|
||||
self._results = [results]
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self._results)
|
||||
|
||||
def __getitem__(self, index):
|
||||
return self._results[index]
|
||||
|
||||
def __len__(self):
|
||||
return len(self._results)
|
||||
|
||||
def __getattr__(self, attr):
|
||||
# Delegate attribute access to the first element.
|
||||
if self._results:
|
||||
return getattr(self._results[0], attr)
|
||||
raise AttributeError(f"{self.__class__.__name__} object has no attribute '{attr}'")
|
||||
|
||||
def __repr__(self):
|
||||
return f"{self.__class__.__name__}({self._results!r})"
|
||||
|
||||
RunManyReturn = Union[
|
||||
CrawlResultContainer[CrawlResultT],
|
||||
AsyncGenerator[CrawlResultT, None]
|
||||
]
|
||||
|
||||
|
||||
# END of backward compatibility code for markdown/markdown_v2.
|
||||
# When removing this code in the future, make sure to:
|
||||
# 1. Replace the private attribute and property with a standard field
|
||||
# 2. Update any serialization logic that might depend on the current behavior
|
||||
|
||||
class AsyncCrawlResponse(BaseModel):
|
||||
html: str
|
||||
response_headers: Dict[str, str]
|
||||
js_execution_result: Optional[Dict[str, Any]] = None
|
||||
status_code: int
|
||||
screenshot: Optional[str] = None
|
||||
pdf_data: Optional[bytes] = None
|
||||
mhtml_data: Optional[str] = None
|
||||
get_delayed_content: Optional[Callable[[Optional[float]], Awaitable[str]]] = None
|
||||
downloaded_files: Optional[List[str]] = None
|
||||
ssl_certificate: Optional[SSLCertificate] = None
|
||||
redirected_url: Optional[str] = None
|
||||
network_requests: Optional[List[Dict[str, Any]]] = None
|
||||
console_messages: Optional[List[Dict[str, Any]]] = None
|
||||
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
###############################
|
||||
# Scraping Models
|
||||
###############################
|
||||
class MediaItem(BaseModel):
|
||||
src: Optional[str] = ""
|
||||
data: Optional[str] = ""
|
||||
alt: Optional[str] = ""
|
||||
desc: Optional[str] = ""
|
||||
score: Optional[int] = 0
|
||||
type: str = "image"
|
||||
group_id: Optional[int] = 0
|
||||
format: Optional[str] = None
|
||||
width: Optional[int] = None
|
||||
|
||||
|
||||
class Link(BaseModel):
|
||||
href: Optional[str] = ""
|
||||
text: Optional[str] = ""
|
||||
title: Optional[str] = ""
|
||||
base_domain: Optional[str] = ""
|
||||
head_data: Optional[Dict[str, Any]] = None # Head metadata extracted from link target
|
||||
head_extraction_status: Optional[str] = None # "success", "failed", "skipped"
|
||||
head_extraction_error: Optional[str] = None # Error message if extraction failed
|
||||
intrinsic_score: Optional[float] = None # Quality score based on URL structure, text, and context
|
||||
contextual_score: Optional[float] = None # BM25 relevance score based on query and head content
|
||||
total_score: Optional[float] = None # Combined score from intrinsic and contextual scores
|
||||
|
||||
|
||||
class Media(BaseModel):
|
||||
images: List[MediaItem] = []
|
||||
videos: List[
|
||||
MediaItem
|
||||
] = [] # Using MediaItem model for now, can be extended with Video model if needed
|
||||
audios: List[
|
||||
MediaItem
|
||||
] = [] # Using MediaItem model for now, can be extended with Audio model if needed
|
||||
tables: List[Dict] = [] # Table data extracted from HTML tables
|
||||
|
||||
|
||||
class Links(BaseModel):
|
||||
internal: List[Link] = []
|
||||
external: List[Link] = []
|
||||
|
||||
|
||||
class ScrapingResult(BaseModel):
|
||||
cleaned_html: str
|
||||
success: bool
|
||||
media: Media = Media()
|
||||
links: Links = Links()
|
||||
metadata: Dict[str, Any] = {}
|
||||
|
||||
195
crawl4ai/processors/pdf/__init__.py
Normal file
195
crawl4ai/processors/pdf/__init__.py
Normal file
@@ -0,0 +1,195 @@
|
||||
from pathlib import Path
|
||||
import asyncio
|
||||
from dataclasses import asdict
|
||||
from crawl4ai.async_logger import AsyncLogger
|
||||
from crawl4ai.async_crawler_strategy import AsyncCrawlerStrategy
|
||||
from crawl4ai.models import AsyncCrawlResponse, ScrapingResult
|
||||
from crawl4ai.content_scraping_strategy import ContentScrapingStrategy
|
||||
from .processor import NaivePDFProcessorStrategy # Assuming your current PDF code is in pdf_processor.py
|
||||
|
||||
class PDFCrawlerStrategy(AsyncCrawlerStrategy):
|
||||
def __init__(self, logger: AsyncLogger = None):
|
||||
self.logger = logger
|
||||
|
||||
async def crawl(self, url: str, **kwargs) -> AsyncCrawlResponse:
|
||||
# Just pass through with empty HTML - scraper will handle actual processing
|
||||
return AsyncCrawlResponse(
|
||||
html="Scraper will handle the real work", # Scraper will handle the real work
|
||||
response_headers={"Content-Type": "application/pdf"},
|
||||
status_code=200
|
||||
)
|
||||
|
||||
async def close(self):
|
||||
pass
|
||||
|
||||
async def __aenter__(self):
|
||||
return self
|
||||
|
||||
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
||||
await self.close()
|
||||
|
||||
class PDFContentScrapingStrategy(ContentScrapingStrategy):
|
||||
"""
|
||||
A content scraping strategy for PDF files.
|
||||
|
||||
Attributes:
|
||||
save_images_locally (bool): Whether to save images locally.
|
||||
extract_images (bool): Whether to extract images from PDF.
|
||||
image_save_dir (str): Directory to save extracted images.
|
||||
logger (AsyncLogger): Logger instance for recording events and errors.
|
||||
|
||||
Methods:
|
||||
scrap(url: str, html: str, **params) -> ScrapingResult:
|
||||
Scrap content from a PDF file.
|
||||
ascrap(url: str, html: str, **kwargs) -> ScrapingResult:
|
||||
Asynchronous version of scrap.
|
||||
|
||||
Usage:
|
||||
strategy = PDFContentScrapingStrategy(
|
||||
save_images_locally=False,
|
||||
extract_images=False,
|
||||
image_save_dir=None,
|
||||
logger=logger
|
||||
)
|
||||
|
||||
"""
|
||||
def __init__(self,
|
||||
save_images_locally : bool = False,
|
||||
extract_images : bool = False,
|
||||
image_save_dir : str = None,
|
||||
batch_size: int = 4,
|
||||
logger: AsyncLogger = None):
|
||||
self.logger = logger
|
||||
self.pdf_processor = NaivePDFProcessorStrategy(
|
||||
save_images_locally=save_images_locally,
|
||||
extract_images=extract_images,
|
||||
image_save_dir=image_save_dir,
|
||||
batch_size=batch_size
|
||||
)
|
||||
self._temp_files = [] # Track temp files for cleanup
|
||||
|
||||
def scrap(self, url: str, html: str, **params) -> ScrapingResult:
|
||||
"""
|
||||
Scrap content from a PDF file.
|
||||
|
||||
Args:
|
||||
url (str): The URL of the PDF file.
|
||||
html (str): The HTML content of the page.
|
||||
**params: Additional parameters.
|
||||
|
||||
Returns:
|
||||
ScrapingResult: The scraped content.
|
||||
"""
|
||||
# Download if URL or use local path
|
||||
pdf_path = self._get_pdf_path(url)
|
||||
try:
|
||||
# Process PDF
|
||||
# result = self.pdf_processor.process(Path(pdf_path))
|
||||
result = self.pdf_processor.process_batch(Path(pdf_path))
|
||||
|
||||
# Combine page HTML
|
||||
cleaned_html = f"""
|
||||
<html>
|
||||
<head><meta name="pdf-pages" content="{len(result.pages)}"></head>
|
||||
<body>
|
||||
{''.join(f'<div class="pdf-page" data-page="{i+1}">{page.html}</div>'
|
||||
for i, page in enumerate(result.pages))}
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
|
||||
# Accumulate media and links with page numbers
|
||||
media = {"images": []}
|
||||
links = {"urls": []}
|
||||
|
||||
for page in result.pages:
|
||||
# Add page number to each image
|
||||
for img in page.images:
|
||||
img["page"] = page.page_number
|
||||
media["images"].append(img)
|
||||
|
||||
# Add page number to each link
|
||||
for link in page.links:
|
||||
links["urls"].append({
|
||||
"url": link,
|
||||
"page": page.page_number
|
||||
})
|
||||
|
||||
return ScrapingResult(
|
||||
cleaned_html=cleaned_html,
|
||||
success=True,
|
||||
media=media,
|
||||
links=links,
|
||||
metadata=asdict(result.metadata)
|
||||
)
|
||||
finally:
|
||||
# Cleanup temp file if downloaded
|
||||
if url.startswith(("http://", "https://")):
|
||||
try:
|
||||
Path(pdf_path).unlink(missing_ok=True)
|
||||
if pdf_path in self._temp_files:
|
||||
self._temp_files.remove(pdf_path)
|
||||
except Exception as e:
|
||||
if self.logger:
|
||||
self.logger.warning(f"Failed to cleanup temp file {pdf_path}: {e}")
|
||||
|
||||
async def ascrap(self, url: str, html: str, **kwargs) -> ScrapingResult:
|
||||
# For simple cases, you can use the sync version
|
||||
return await asyncio.to_thread(self.scrap, url, html, **kwargs)
|
||||
|
||||
|
||||
def _get_pdf_path(self, url: str) -> str:
|
||||
if url.startswith(("http://", "https://")):
|
||||
import tempfile
|
||||
import requests
|
||||
|
||||
# Create temp file with .pdf extension
|
||||
temp_file = tempfile.NamedTemporaryFile(suffix='.pdf', delete=False)
|
||||
self._temp_files.append(temp_file.name)
|
||||
|
||||
try:
|
||||
if self.logger:
|
||||
self.logger.info(f"Downloading PDF from {url}...")
|
||||
|
||||
# Download PDF with streaming and timeout
|
||||
# Connection timeout: 10s, Read timeout: 300s (5 minutes for large PDFs)
|
||||
response = requests.get(url, stream=True, timeout=(20, 60 * 10))
|
||||
response.raise_for_status()
|
||||
|
||||
# Get file size if available
|
||||
total_size = int(response.headers.get('content-length', 0))
|
||||
downloaded = 0
|
||||
|
||||
# Write to temp file
|
||||
with open(temp_file.name, 'wb') as f:
|
||||
for chunk in response.iter_content(chunk_size=8192):
|
||||
f.write(chunk)
|
||||
downloaded += len(chunk)
|
||||
if self.logger and total_size > 0:
|
||||
progress = (downloaded / total_size) * 100
|
||||
if progress % 10 < 0.1: # Log every 10%
|
||||
self.logger.debug(f"PDF download progress: {progress:.0f}%")
|
||||
|
||||
if self.logger:
|
||||
self.logger.info(f"PDF downloaded successfully: {temp_file.name}")
|
||||
|
||||
return temp_file.name
|
||||
|
||||
except requests.exceptions.Timeout as e:
|
||||
# Clean up temp file if download fails
|
||||
Path(temp_file.name).unlink(missing_ok=True)
|
||||
self._temp_files.remove(temp_file.name)
|
||||
raise RuntimeError(f"Timeout downloading PDF from {url}: {str(e)}")
|
||||
except Exception as e:
|
||||
# Clean up temp file if download fails
|
||||
Path(temp_file.name).unlink(missing_ok=True)
|
||||
self._temp_files.remove(temp_file.name)
|
||||
raise RuntimeError(f"Failed to download PDF from {url}: {str(e)}")
|
||||
|
||||
elif url.startswith("file://"):
|
||||
return url[7:] # Strip file:// prefix
|
||||
|
||||
return url # Assume local path
|
||||
|
||||
|
||||
__all__ = ["PDFCrawlerStrategy", "PDFContentScrapingStrategy"]
|
||||
487
crawl4ai/processors/pdf/processor.py
Normal file
487
crawl4ai/processors/pdf/processor.py
Normal file
@@ -0,0 +1,487 @@
|
||||
import logging
|
||||
import re
|
||||
from abc import ABC, abstractmethod
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from time import time
|
||||
from dataclasses import dataclass, asdict, field
|
||||
from typing import Dict, List, Optional, Any, Union
|
||||
import base64
|
||||
import tempfile
|
||||
from .utils import *
|
||||
from .utils import (
|
||||
apply_png_predictor,
|
||||
clean_pdf_text,
|
||||
clean_pdf_text_to_html,
|
||||
)
|
||||
|
||||
# Remove direct PyPDF2 imports from the top
|
||||
# import PyPDF2
|
||||
# from PyPDF2 import PdfReader
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@dataclass
|
||||
class PDFMetadata:
|
||||
title: Optional[str] = None
|
||||
author: Optional[str] = None
|
||||
producer: Optional[str] = None
|
||||
created: Optional[datetime] = None
|
||||
modified: Optional[datetime] = None
|
||||
pages: int = 0
|
||||
encrypted: bool = False
|
||||
file_size: Optional[int] = None
|
||||
|
||||
@dataclass
|
||||
class PDFPage:
|
||||
page_number: int
|
||||
raw_text: str = ""
|
||||
markdown: str = ""
|
||||
html: str = ""
|
||||
images: List[Dict] = field(default_factory=list)
|
||||
links: List[str] = field(default_factory=list)
|
||||
layout: List[Dict] = field(default_factory=list)
|
||||
|
||||
@dataclass
|
||||
class PDFProcessResult:
|
||||
metadata: PDFMetadata
|
||||
pages: List[PDFPage]
|
||||
processing_time: float = 0.0
|
||||
version: str = "1.0"
|
||||
|
||||
class PDFProcessorStrategy(ABC):
|
||||
@abstractmethod
|
||||
def process(self, pdf_path: Path) -> PDFProcessResult:
|
||||
pass
|
||||
|
||||
class NaivePDFProcessorStrategy(PDFProcessorStrategy):
|
||||
def __init__(self, image_dpi: int = 144, image_quality: int = 85, extract_images: bool = True,
|
||||
save_images_locally: bool = False, image_save_dir: Optional[Path] = None, batch_size: int = 4):
|
||||
# Import check at initialization time
|
||||
try:
|
||||
import PyPDF2
|
||||
except ImportError:
|
||||
raise ImportError("PyPDF2 is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
|
||||
|
||||
self.image_dpi = image_dpi
|
||||
self.image_quality = image_quality
|
||||
self.current_page_number = 0
|
||||
self.extract_images = extract_images
|
||||
self.save_images_locally = save_images_locally
|
||||
self.image_save_dir = image_save_dir
|
||||
self.batch_size = batch_size
|
||||
self._temp_dir = None
|
||||
|
||||
def process(self, pdf_path: Path) -> PDFProcessResult:
|
||||
# Import inside method to allow dependency to be optional
|
||||
try:
|
||||
from PyPDF2 import PdfReader
|
||||
except ImportError:
|
||||
raise ImportError("PyPDF2 is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
|
||||
|
||||
start_time = time()
|
||||
result = PDFProcessResult(
|
||||
metadata=PDFMetadata(),
|
||||
pages=[],
|
||||
version="1.1"
|
||||
)
|
||||
|
||||
try:
|
||||
with pdf_path.open('rb') as file:
|
||||
reader = PdfReader(file)
|
||||
result.metadata = self._extract_metadata(pdf_path, reader)
|
||||
|
||||
# Handle image directory
|
||||
image_dir = None
|
||||
if self.extract_images and self.save_images_locally:
|
||||
if self.image_save_dir:
|
||||
image_dir = Path(self.image_save_dir)
|
||||
image_dir.mkdir(exist_ok=True, parents=True)
|
||||
else:
|
||||
self._temp_dir = tempfile.mkdtemp(prefix='pdf_images_')
|
||||
image_dir = Path(self._temp_dir)
|
||||
|
||||
for page_num, page in enumerate(reader.pages):
|
||||
self.current_page_number = page_num + 1
|
||||
pdf_page = self._process_page(page, image_dir)
|
||||
result.pages.append(pdf_page)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to process PDF: {str(e)}")
|
||||
raise
|
||||
finally:
|
||||
# Cleanup temp directory if it was created
|
||||
if self._temp_dir and not self.image_save_dir:
|
||||
import shutil
|
||||
try:
|
||||
shutil.rmtree(self._temp_dir)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to cleanup temp directory: {str(e)}")
|
||||
|
||||
result.processing_time = time() - start_time
|
||||
return result
|
||||
|
||||
def process_batch(self, pdf_path: Path) -> PDFProcessResult:
|
||||
"""Like process() but processes PDF pages in parallel batches"""
|
||||
# Import inside method to allow dependency to be optional
|
||||
try:
|
||||
from PyPDF2 import PdfReader
|
||||
import PyPDF2 # For type checking
|
||||
except ImportError:
|
||||
raise ImportError("PyPDF2 is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
|
||||
|
||||
import concurrent.futures
|
||||
import threading
|
||||
|
||||
# Initialize PyPDF2 thread support
|
||||
if not hasattr(threading.current_thread(), "_children"):
|
||||
threading.current_thread()._children = set()
|
||||
|
||||
start_time = time()
|
||||
result = PDFProcessResult(
|
||||
metadata=PDFMetadata(),
|
||||
pages=[],
|
||||
version="1.1"
|
||||
)
|
||||
|
||||
try:
|
||||
# Get metadata and page count from main thread
|
||||
with pdf_path.open('rb') as file:
|
||||
reader = PdfReader(file)
|
||||
result.metadata = self._extract_metadata(pdf_path, reader)
|
||||
total_pages = len(reader.pages)
|
||||
|
||||
# Handle image directory setup
|
||||
image_dir = None
|
||||
if self.extract_images and self.save_images_locally:
|
||||
if self.image_save_dir:
|
||||
image_dir = Path(self.image_save_dir)
|
||||
image_dir.mkdir(exist_ok=True, parents=True)
|
||||
else:
|
||||
self._temp_dir = tempfile.mkdtemp(prefix='pdf_images_')
|
||||
image_dir = Path(self._temp_dir)
|
||||
|
||||
def process_page_safely(page_num: int):
|
||||
# Each thread opens its own file handle
|
||||
with pdf_path.open('rb') as file:
|
||||
thread_reader = PdfReader(file)
|
||||
page = thread_reader.pages[page_num]
|
||||
self.current_page_number = page_num + 1
|
||||
return self._process_page(page, image_dir)
|
||||
|
||||
# Process pages in parallel batches
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=self.batch_size) as executor:
|
||||
futures = []
|
||||
for page_num in range(total_pages):
|
||||
future = executor.submit(process_page_safely, page_num)
|
||||
futures.append((page_num + 1, future))
|
||||
|
||||
# Collect results in order
|
||||
result.pages = [None] * total_pages
|
||||
for page_num, future in futures:
|
||||
try:
|
||||
pdf_page = future.result()
|
||||
result.pages[page_num - 1] = pdf_page
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to process page {page_num}: {str(e)}")
|
||||
raise
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to process PDF: {str(e)}")
|
||||
raise
|
||||
finally:
|
||||
# Cleanup temp directory if it was created
|
||||
if self._temp_dir and not self.image_save_dir:
|
||||
import shutil
|
||||
try:
|
||||
shutil.rmtree(self._temp_dir)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to cleanup temp directory: {str(e)}")
|
||||
|
||||
result.processing_time = time() - start_time
|
||||
return result
|
||||
|
||||
def _process_page(self, page, image_dir: Optional[Path]) -> PDFPage:
|
||||
pdf_page = PDFPage(
|
||||
page_number=self.current_page_number,
|
||||
)
|
||||
|
||||
# Text and font extraction
|
||||
def visitor_text(text, cm, tm, font_dict, font_size):
|
||||
pdf_page.raw_text += text
|
||||
pdf_page.layout.append({
|
||||
"type": "text",
|
||||
"text": text,
|
||||
"x": tm[4],
|
||||
"y": tm[5],
|
||||
})
|
||||
|
||||
page.extract_text(visitor_text=visitor_text)
|
||||
|
||||
# Image extraction
|
||||
if self.extract_images:
|
||||
pdf_page.images = self._extract_images(page, image_dir)
|
||||
|
||||
# Link extraction
|
||||
pdf_page.links = self._extract_links(page)
|
||||
|
||||
# Add markdown content
|
||||
pdf_page.markdown = clean_pdf_text(self.current_page_number, pdf_page.raw_text)
|
||||
pdf_page.html = clean_pdf_text_to_html(self.current_page_number, pdf_page.raw_text)
|
||||
|
||||
return pdf_page
|
||||
|
||||
def _extract_images(self, page, image_dir: Optional[Path]) -> List[Dict]:
|
||||
# Import PyPDF2 for type checking only when needed
|
||||
try:
|
||||
import PyPDF2
|
||||
except ImportError:
|
||||
raise ImportError("PyPDF2 is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
|
||||
|
||||
if not self.extract_images:
|
||||
return []
|
||||
|
||||
images = []
|
||||
try:
|
||||
resources = page.get("/Resources")
|
||||
if resources: # Check if resources exist
|
||||
resources = resources.get_object() # Resolve IndirectObject
|
||||
if '/XObject' in resources:
|
||||
xobjects = resources['/XObject'].get_object()
|
||||
img_count = 0
|
||||
for obj_name in xobjects:
|
||||
xobj = xobjects[obj_name]
|
||||
if hasattr(xobj, 'get_object') and callable(xobj.get_object):
|
||||
xobj = xobj.get_object()
|
||||
if xobj.get('/Subtype') == '/Image':
|
||||
try:
|
||||
img_count += 1
|
||||
img_filename = f"page_{self.current_page_number}_img_{img_count}"
|
||||
data = xobj.get_data()
|
||||
filters = xobj.get('/Filter', [])
|
||||
if not isinstance(filters, list):
|
||||
filters = [filters]
|
||||
|
||||
# Resolve IndirectObjects in properties
|
||||
width = xobj.get('/Width', 0)
|
||||
height = xobj.get('/Height', 0)
|
||||
color_space = xobj.get('/ColorSpace', '/DeviceRGB')
|
||||
if isinstance(color_space, PyPDF2.generic.IndirectObject):
|
||||
color_space = color_space.get_object()
|
||||
|
||||
# Handle different image encodings
|
||||
success = False
|
||||
image_format = 'bin'
|
||||
image_data = None
|
||||
|
||||
if '/FlateDecode' in filters:
|
||||
try:
|
||||
decode_parms = xobj.get('/DecodeParms', {})
|
||||
if isinstance(decode_parms, PyPDF2.generic.IndirectObject):
|
||||
decode_parms = decode_parms.get_object()
|
||||
|
||||
predictor = decode_parms.get('/Predictor', 1)
|
||||
bits = xobj.get('/BitsPerComponent', 8)
|
||||
colors = 3 if color_space == '/DeviceRGB' else 1
|
||||
|
||||
if predictor >= 10:
|
||||
data = apply_png_predictor(data, width, bits, colors)
|
||||
|
||||
# Create PIL Image
|
||||
from PIL import Image
|
||||
mode = 'RGB' if color_space == '/DeviceRGB' else 'L'
|
||||
img = Image.frombytes(mode, (width, height), data)
|
||||
|
||||
if self.save_images_locally:
|
||||
final_path = (image_dir / img_filename).with_suffix('.png')
|
||||
img.save(final_path)
|
||||
image_data = str(final_path)
|
||||
else:
|
||||
import io
|
||||
img_byte_arr = io.BytesIO()
|
||||
img.save(img_byte_arr, format='PNG')
|
||||
image_data = base64.b64encode(img_byte_arr.getvalue()).decode('utf-8')
|
||||
|
||||
success = True
|
||||
image_format = 'png'
|
||||
except Exception as e:
|
||||
logger.error(f"FlateDecode error: {str(e)}")
|
||||
|
||||
elif '/DCTDecode' in filters:
|
||||
# JPEG image
|
||||
try:
|
||||
if self.save_images_locally:
|
||||
final_path = (image_dir / img_filename).with_suffix('.jpg')
|
||||
with open(final_path, 'wb') as f:
|
||||
f.write(data)
|
||||
image_data = str(final_path)
|
||||
else:
|
||||
image_data = base64.b64encode(data).decode('utf-8')
|
||||
success = True
|
||||
image_format = 'jpeg'
|
||||
except Exception as e:
|
||||
logger.error(f"JPEG save error: {str(e)}")
|
||||
|
||||
elif '/CCITTFaxDecode' in filters:
|
||||
try:
|
||||
if data[:4] != b'II*\x00':
|
||||
# Add TIFF header if missing
|
||||
tiff_header = b'II*\x00\x08\x00\x00\x00\x0e\x00\x00\x01\x03\x00\x01\x00\x00\x00' + \
|
||||
width.to_bytes(4, 'little') + \
|
||||
b'\x01\x03\x00\x01\x00\x00\x00' + \
|
||||
height.to_bytes(4, 'little') + \
|
||||
b'\x01\x12\x00\x03\x00\x00\x00\x01\x00\x01\x00\x00\x01\x17\x00\x04\x00\x00\x00\x01\x00\x00\x00J\x01\x1B\x00\x05\x00\x00\x00\x01\x00\x00\x00R\x01\x28\x00\x03\x00\x00\x00\x01\x00\x02\x00\x00'
|
||||
data = tiff_header + data
|
||||
|
||||
if self.save_images_locally:
|
||||
final_path = (image_dir / img_filename).with_suffix('.tiff')
|
||||
with open(final_path, 'wb') as f:
|
||||
f.write(data)
|
||||
image_data = str(final_path)
|
||||
else:
|
||||
image_data = base64.b64encode(data).decode('utf-8')
|
||||
success = True
|
||||
image_format = 'tiff'
|
||||
except Exception as e:
|
||||
logger.error(f"CCITT save error: {str(e)}")
|
||||
|
||||
elif '/JPXDecode' in filters:
|
||||
# JPEG 2000
|
||||
try:
|
||||
if self.save_images_locally:
|
||||
final_path = (image_dir / img_filename).with_suffix('.jp2')
|
||||
with open(final_path, 'wb') as f:
|
||||
f.write(data)
|
||||
image_data = str(final_path)
|
||||
else:
|
||||
image_data = base64.b64encode(data).decode('utf-8')
|
||||
success = True
|
||||
image_format = 'jpeg2000'
|
||||
except Exception as e:
|
||||
logger.error(f"JPEG2000 save error: {str(e)}")
|
||||
|
||||
if success and image_data:
|
||||
image_info = {
|
||||
"format": image_format,
|
||||
"width": width,
|
||||
"height": height,
|
||||
"color_space": str(color_space),
|
||||
"bits_per_component": xobj.get('/BitsPerComponent', 1)
|
||||
}
|
||||
|
||||
if self.save_images_locally:
|
||||
image_info["path"] = image_data
|
||||
else:
|
||||
image_info["data"] = image_data
|
||||
|
||||
images.append(image_info)
|
||||
else:
|
||||
# Fallback: Save raw data
|
||||
if self.save_images_locally:
|
||||
final_path = (image_dir / img_filename).with_suffix('.bin')
|
||||
with open(final_path, 'wb') as f:
|
||||
f.write(data)
|
||||
logger.warning(f"Saved raw image data to {final_path}")
|
||||
else:
|
||||
image_data = base64.b64encode(data).decode('utf-8')
|
||||
images.append({
|
||||
"format": "bin",
|
||||
"width": width,
|
||||
"height": height,
|
||||
"color_space": str(color_space),
|
||||
"bits_per_component": xobj.get('/BitsPerComponent', 1),
|
||||
"data": image_data
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing image: {str(e)}")
|
||||
except Exception as e:
|
||||
logger.error(f"Image extraction error: {str(e)}")
|
||||
|
||||
return images
|
||||
|
||||
def _extract_links(self, page) -> List[str]:
|
||||
links = []
|
||||
if '/Annots' in page:
|
||||
try:
|
||||
for annot in page['/Annots']:
|
||||
a = annot.get_object()
|
||||
if '/A' in a and '/URI' in a['/A']:
|
||||
links.append(a['/A']['/URI'])
|
||||
except Exception as e:
|
||||
print(f"Link error: {str(e)}")
|
||||
return links
|
||||
|
||||
def _extract_metadata(self, pdf_path: Path, reader = None) -> PDFMetadata:
|
||||
# Import inside method to allow dependency to be optional
|
||||
if reader is None:
|
||||
try:
|
||||
from PyPDF2 import PdfReader
|
||||
reader = PdfReader(pdf_path)
|
||||
except ImportError:
|
||||
raise ImportError("PyPDF2 is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
|
||||
|
||||
meta = reader.metadata or {}
|
||||
created = self._parse_pdf_date(meta.get('/CreationDate', ''))
|
||||
modified = self._parse_pdf_date(meta.get('/ModDate', ''))
|
||||
|
||||
return PDFMetadata(
|
||||
title=meta.get('/Title'),
|
||||
author=meta.get('/Author'),
|
||||
producer=meta.get('/Producer'),
|
||||
created=created,
|
||||
modified=modified,
|
||||
pages=len(reader.pages),
|
||||
encrypted=reader.is_encrypted,
|
||||
file_size=pdf_path.stat().st_size
|
||||
)
|
||||
|
||||
def _parse_pdf_date(self, date_str: str) -> Optional[datetime]:
|
||||
try:
|
||||
match = re.match(r'D:(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})', date_str)
|
||||
if not match:
|
||||
return None
|
||||
|
||||
return datetime(
|
||||
year=int(match[1]),
|
||||
month=int(match[2]),
|
||||
day=int(match[3]),
|
||||
hour=int(match[4]),
|
||||
minute=int(match[5]),
|
||||
second=int(match[6])
|
||||
)
|
||||
except:
|
||||
return None
|
||||
|
||||
# Usage example
|
||||
if __name__ == "__main__":
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
try:
|
||||
# Import PyPDF2 only when running the file directly
|
||||
import PyPDF2
|
||||
from PyPDF2 import PdfReader
|
||||
except ImportError:
|
||||
print("PyPDF2 is required for PDF processing. Install with 'pip install crawl4ai[pdf]'")
|
||||
exit(1)
|
||||
|
||||
current_dir = Path(__file__).resolve().parent
|
||||
pdf_path = f'{current_dir}/test.pdf'
|
||||
|
||||
strategy = NaivePDFProcessorStrategy()
|
||||
result = strategy.process(Path(pdf_path))
|
||||
|
||||
# Convert to JSON
|
||||
json_output = asdict(result)
|
||||
print(json.dumps(json_output, indent=2, default=str))
|
||||
|
||||
with open(f'{current_dir}/test.html', 'w') as f:
|
||||
for page in result.pages:
|
||||
f.write(f'<h1>Page {page["page_number"]}</h1>')
|
||||
f.write(page['html'])
|
||||
with open(f'{current_dir}/test.md', 'w') as f:
|
||||
for page in result.pages:
|
||||
f.write(f'# Page {page["page_number"]}\n\n')
|
||||
f.write(clean_pdf_text(page["page_number"], page['raw_text']))
|
||||
f.write('\n\n')
|
||||
350
crawl4ai/processors/pdf/utils.py
Normal file
350
crawl4ai/processors/pdf/utils.py
Normal file
@@ -0,0 +1,350 @@
|
||||
import re
|
||||
|
||||
def apply_png_predictor(data, width, bits, color_channels):
|
||||
"""Decode PNG predictor (PDF 1.5+ filter)"""
|
||||
bytes_per_pixel = (bits * color_channels) // 8
|
||||
if (bits * color_channels) % 8 != 0:
|
||||
bytes_per_pixel += 1
|
||||
|
||||
stride = width * bytes_per_pixel
|
||||
scanline_length = stride + 1 # +1 for filter byte
|
||||
|
||||
if len(data) % scanline_length != 0:
|
||||
raise ValueError("Invalid scanline structure")
|
||||
|
||||
num_lines = len(data) // scanline_length
|
||||
output = bytearray()
|
||||
prev_line = b'\x00' * stride
|
||||
|
||||
for i in range(num_lines):
|
||||
line = data[i*scanline_length:(i+1)*scanline_length]
|
||||
filter_type = line[0]
|
||||
filtered = line[1:]
|
||||
|
||||
if filter_type == 0: # None
|
||||
decoded = filtered
|
||||
elif filter_type == 1: # Sub
|
||||
decoded = bytearray(filtered)
|
||||
for j in range(bytes_per_pixel, len(decoded)):
|
||||
decoded[j] = (decoded[j] + decoded[j - bytes_per_pixel]) % 256
|
||||
elif filter_type == 2: # Up
|
||||
decoded = bytearray([(filtered[j] + prev_line[j]) % 256
|
||||
for j in range(len(filtered))])
|
||||
elif filter_type == 3: # Average
|
||||
decoded = bytearray(filtered)
|
||||
for j in range(len(decoded)):
|
||||
left = decoded[j - bytes_per_pixel] if j >= bytes_per_pixel else 0
|
||||
up = prev_line[j]
|
||||
avg = (left + up) // 2
|
||||
decoded[j] = (decoded[j] + avg) % 256
|
||||
elif filter_type == 4: # Paeth
|
||||
decoded = bytearray(filtered)
|
||||
for j in range(len(decoded)):
|
||||
left = decoded[j - bytes_per_pixel] if j >= bytes_per_pixel else 0
|
||||
up = prev_line[j]
|
||||
up_left = prev_line[j - bytes_per_pixel] if j >= bytes_per_pixel else 0
|
||||
paeth = paeth_predictor(left, up, up_left)
|
||||
decoded[j] = (decoded[j] + paeth) % 256
|
||||
else:
|
||||
raise ValueError(f"Unsupported filter type: {filter_type}")
|
||||
|
||||
output.extend(decoded)
|
||||
prev_line = decoded
|
||||
|
||||
return bytes(output)
|
||||
|
||||
def paeth_predictor(a, b, c):
|
||||
p = a + b - c
|
||||
pa = abs(p - a)
|
||||
pb = abs(p - b)
|
||||
pc = abs(p - c)
|
||||
if pa <= pb and pa <= pc:
|
||||
return a
|
||||
elif pb <= pc:
|
||||
return b
|
||||
else:
|
||||
return c
|
||||
|
||||
import re
|
||||
import html
|
||||
|
||||
def clean_pdf_text_to_html(page_number, text):
|
||||
# Decode Unicode escapes and handle surrogate pairs
|
||||
try:
|
||||
decoded = text.encode('latin-1').decode('unicode-escape')
|
||||
decoded = decoded.encode('utf-16', 'surrogatepass').decode('utf-16')
|
||||
except Exception as e:
|
||||
decoded = text # Fallback if decoding fails
|
||||
|
||||
article_title_detected = False
|
||||
# decoded = re.sub(r'\.\n', '.\n\n', decoded)
|
||||
# decoded = re.sub(r'\.\n', '<|break|>', decoded)
|
||||
lines = decoded.split('\n')
|
||||
output = []
|
||||
current_paragraph = []
|
||||
in_header = False
|
||||
email_pattern = re.compile(r'\{.*?\}')
|
||||
affiliation_pattern = re.compile(r'^†')
|
||||
quote_pattern = re.compile(r'^["“]')
|
||||
author_pattern = re.compile(
|
||||
r'^\s*[A-Z][a-zA-Z]+(?:\s+[A-Z][a-zA-Z]+)*\s*(?:[†*0-9]+)?'
|
||||
r'(?:,\s*[A-Z][a-zA-Z]+(?:\s+[A-Z][a-zA-Z]+)*\s*(?:[†*0-9]+)?)*'
|
||||
r'(?:,\s*(?:and|&)\s+[A-Z][a-zA-Z]+(?:\s+[A-Z][a-zA-Z]+)*\s*(?:[†*0-9]+)?)?\s*$'
|
||||
)
|
||||
|
||||
def flush_paragraph():
|
||||
if current_paragraph:
|
||||
para = ' '.join(current_paragraph)
|
||||
para = re.sub(r'\s+', ' ', para).strip()
|
||||
if para:
|
||||
# escaped_para = html.escape(para)
|
||||
escaped_para = para
|
||||
# escaped_para = re.sub(r'\.\n', '.\n\n', escaped_para)
|
||||
# Split escaped_para by <|break|> to avoid HTML escaping
|
||||
escaped_para = escaped_para.split('.\n\n')
|
||||
# Wrap each part in <p> tag
|
||||
escaped_para = [f'<p>{part}</p>' for part in escaped_para]
|
||||
output.append(f'<div class="paragraph">{"".join(escaped_para)}</div><hr/>')
|
||||
current_paragraph.clear()
|
||||
|
||||
for i, line in enumerate(lines):
|
||||
line = line.strip()
|
||||
|
||||
# Handle empty lines
|
||||
if not line:
|
||||
flush_paragraph()
|
||||
continue
|
||||
|
||||
# Detect article title (first line with reasonable length)
|
||||
if not article_title_detected and i == 0 and 3 <= len(line.split()) <= 8 and len(lines) > 1:
|
||||
flush_paragraph()
|
||||
escaped_line = html.escape(line)
|
||||
output.append(f'<h2>{escaped_line}</h2>')
|
||||
article_title_detected = True
|
||||
continue
|
||||
|
||||
# Detect numbered headers like "2.1 Background"
|
||||
numbered_header = re.match(r'^(\d+(?:\.\d+)*)\s+(.+)$', line)
|
||||
if i > 0 and not lines[i-1].strip() and numbered_header:
|
||||
flush_paragraph()
|
||||
level = numbered_header.group(1).count('.') + 1
|
||||
header_text = numbered_header.group(2)
|
||||
md_level = min(level + 1, 6)
|
||||
escaped_header = html.escape(header_text)
|
||||
output.append(f'<h{md_level}>{escaped_header}</h{md_level}>')
|
||||
in_header = True
|
||||
continue
|
||||
|
||||
# Detect authors
|
||||
if page_number == 1 and author_pattern.match(line):
|
||||
authors = re.sub(r'[†â€]', '', line)
|
||||
authors = re.split(r', | and ', authors)
|
||||
formatted_authors = []
|
||||
for author in authors:
|
||||
if author.strip():
|
||||
parts = [p for p in author.strip().split() if p]
|
||||
formatted = ' '.join(parts)
|
||||
escaped_author = html.escape(formatted)
|
||||
formatted_authors.append(f'<strong>{escaped_author}</strong>')
|
||||
|
||||
if len(formatted_authors) > 1:
|
||||
joined = ', '.join(formatted_authors[:-1]) + ' and ' + formatted_authors[-1]
|
||||
else:
|
||||
joined = formatted_authors[0]
|
||||
|
||||
output.append(f'<p>{joined}</p>')
|
||||
continue
|
||||
|
||||
# Detect affiliation
|
||||
if affiliation_pattern.match(line):
|
||||
escaped_line = html.escape(line)
|
||||
output.append(f'<p><em>{escaped_line}</em></p>')
|
||||
continue
|
||||
|
||||
# Detect emails
|
||||
if email_pattern.match(line):
|
||||
escaped_line = html.escape(line)
|
||||
output.append(f'<p><code>{escaped_line}</code></p>')
|
||||
continue
|
||||
|
||||
# Detect section headers
|
||||
if re.match(r'^(Abstract|\d+\s+[A-Z]|References|Appendix|Figure|Table)', line):
|
||||
flush_paragraph()
|
||||
escaped_line = html.escape(line)
|
||||
output.append(f'<h2 class="section-header"><em>{escaped_line}</em></h2>')
|
||||
in_header = True
|
||||
continue
|
||||
|
||||
# Handle quotes
|
||||
if quote_pattern.match(line):
|
||||
flush_paragraph()
|
||||
escaped_line = html.escape(line)
|
||||
output.append(f'<blockquote><p>{escaped_line}</p></blockquote>')
|
||||
continue
|
||||
|
||||
# Handle hyphenated words
|
||||
if line.endswith('-'):
|
||||
current_paragraph.append(line[:-1].strip())
|
||||
else:
|
||||
current_paragraph.append(line)
|
||||
|
||||
# Handle paragraph breaks after headers
|
||||
if in_header and not line.endswith(('.', '!', '?')):
|
||||
flush_paragraph()
|
||||
in_header = False
|
||||
|
||||
flush_paragraph()
|
||||
|
||||
# Post-process HTML
|
||||
html_output = '\n'.join(output)
|
||||
|
||||
# Fix common citation patterns
|
||||
html_output = re.sub(r'\(([A-Z][a-z]+ et al\. \d{4})\)', r'<cite>\1</cite>', html_output)
|
||||
|
||||
# Fix escaped characters
|
||||
html_output = html_output.replace('\\ud835', '').replace('\\u2020', '†')
|
||||
|
||||
# Remove leftover hyphens and fix spacing
|
||||
html_output = re.sub(r'\s+-\s+', '', html_output)
|
||||
html_output = re.sub(r'\s+([.,!?)])', r'\1', html_output)
|
||||
|
||||
return html_output
|
||||
|
||||
def clean_pdf_text(page_number, text):
|
||||
# Decode Unicode escapes and handle surrogate pairs
|
||||
try:
|
||||
decoded = text.encode('latin-1').decode('unicode-escape')
|
||||
decoded = decoded.encode('utf-16', 'surrogatepass').decode('utf-16')
|
||||
except Exception as e:
|
||||
decoded = text # Fallback if decoding fails
|
||||
|
||||
article_title_detected = False
|
||||
decoded = re.sub(r'\.\n', '.\n\n', decoded)
|
||||
lines = decoded.split('\n')
|
||||
output = []
|
||||
current_paragraph = []
|
||||
in_header = False
|
||||
email_pattern = re.compile(r'\{.*?\}')
|
||||
affiliation_pattern = re.compile(r'^†')
|
||||
quote_pattern = re.compile(r'^["“]')
|
||||
author_pattern = re.compile(
|
||||
r'^\s*[A-Z][a-zA-Z]+(?:\s+[A-Z][a-zA-Z]+)*\s*(?:[†*0-9]+)?'
|
||||
r'(?:,\s*[A-Z][a-zA-Z]+(?:\s+[A-Z][a-zA-Z]+)*\s*(?:[†*0-9]+)?)*'
|
||||
r'(?:,\s*(?:and|&)\s+[A-Z][a-zA-Z]+(?:\s+[A-Z][a-zA-Z]+)*\s*(?:[†*0-9]+)?)?\s*$'
|
||||
)
|
||||
|
||||
def flush_paragraph():
|
||||
if current_paragraph:
|
||||
para = ' '.join(current_paragraph)
|
||||
para = re.sub(r'\s+', ' ', para).strip()
|
||||
if para:
|
||||
output.append(para)
|
||||
current_paragraph.clear()
|
||||
|
||||
for i, line in enumerate(lines):
|
||||
line = line.strip()
|
||||
|
||||
# Handle special patterns
|
||||
if not line:
|
||||
flush_paragraph()
|
||||
continue
|
||||
|
||||
# Detect headline (first line, reasonable length, surrounded by empty lines)
|
||||
if not article_title_detected and i == 0 and 3 <= len(line.split()) <= 8 and (len(lines) > 1):
|
||||
flush_paragraph()
|
||||
output.append(f'## {line}')
|
||||
continue
|
||||
|
||||
# Detect paragraph breaks for ALL paragraphs
|
||||
if not line and current_paragraph:
|
||||
flush_paragraph()
|
||||
output.append('') # Add empty line between paragraphs
|
||||
continue
|
||||
|
||||
# Detect numbered headers like "2.1 Background"
|
||||
numbered_header = re.match(r'^(\d+(?:\.\d+)*)\s+(.+)$', line)
|
||||
if not lines[i-1].strip() and numbered_header:
|
||||
flush_paragraph()
|
||||
level = numbered_header.group(1).count('.') + 1 # Convert 2.1 → level 2
|
||||
header_text = numbered_header.group(2)
|
||||
# Never go beyond ### for subsections
|
||||
md_level = min(level + 1, 6) # 1 → ##, 2 → ###, 3 → #### etc
|
||||
output.append(f'{"#" * md_level} {header_text}')
|
||||
in_header = True
|
||||
continue
|
||||
|
||||
|
||||
# Detect authors
|
||||
if page_number == 1 and author_pattern.match(line):
|
||||
# Clean and format author names
|
||||
authors = re.sub(r'[†â€]', '', line) # Remove affiliation markers
|
||||
authors = re.split(r', | and ', authors)
|
||||
formatted_authors = []
|
||||
for author in authors:
|
||||
if author.strip():
|
||||
# Handle "First Last" formatting
|
||||
parts = [p for p in author.strip().split() if p]
|
||||
formatted = ' '.join(parts)
|
||||
formatted_authors.append(f'**{formatted}**')
|
||||
|
||||
# Join with commas and "and"
|
||||
if len(formatted_authors) > 1:
|
||||
joined = ', '.join(formatted_authors[:-1]) + ' and ' + formatted_authors[-1]
|
||||
else:
|
||||
joined = formatted_authors[0]
|
||||
|
||||
output.append(joined)
|
||||
continue
|
||||
|
||||
# Detect affiliation
|
||||
if affiliation_pattern.match(line):
|
||||
output.append(f'*{line}*')
|
||||
continue
|
||||
|
||||
# Detect emails
|
||||
if email_pattern.match(line):
|
||||
output.append(f'`{line}`')
|
||||
continue
|
||||
|
||||
# Detect section headers
|
||||
if re.match(r'^(Abstract|\d+\s+[A-Z]|References|Appendix|Figure|Table)', line):
|
||||
flush_paragraph()
|
||||
output.append(f'_[{line}]_')
|
||||
in_header = True
|
||||
continue
|
||||
|
||||
|
||||
# Handle quotes
|
||||
if quote_pattern.match(line):
|
||||
flush_paragraph()
|
||||
output.append(f'> {line}')
|
||||
continue
|
||||
|
||||
# Handle hyphenated words
|
||||
if line.endswith('-'):
|
||||
current_paragraph.append(line[:-1].strip())
|
||||
else:
|
||||
current_paragraph.append(line)
|
||||
|
||||
# Handle paragraph breaks after headers
|
||||
if in_header and not line.endswith(('.', '!', '?')):
|
||||
flush_paragraph()
|
||||
in_header = False
|
||||
|
||||
flush_paragraph()
|
||||
|
||||
# Post-processing
|
||||
markdown = '\n\n'.join(output)
|
||||
|
||||
# Fix common citation patterns
|
||||
markdown = re.sub(r'\(([A-Z][a-z]+ et al\. \d{4})\)', r'[\1]', markdown)
|
||||
|
||||
# Fix escaped characters
|
||||
markdown = markdown.replace('\\ud835', '').replace('\\u2020', '†')
|
||||
|
||||
# Remove leftover hyphens and fix spacing
|
||||
markdown = re.sub(r'\s+-\s+', '', markdown) # Join hyphenated words
|
||||
markdown = re.sub(r'\s+([.,!?)])', r'\1', markdown) # Fix punctuation spacing
|
||||
|
||||
|
||||
return markdown
|
||||
1376
crawl4ai/prompts.py
1376
crawl4ai/prompts.py
File diff suppressed because it is too large
Load Diff
158
crawl4ai/proxy_strategy.py
Normal file
158
crawl4ai/proxy_strategy.py
Normal file
@@ -0,0 +1,158 @@
|
||||
from typing import List, Dict, Optional
|
||||
from abc import ABC, abstractmethod
|
||||
from itertools import cycle
|
||||
import os
|
||||
|
||||
|
||||
########### ATTENTION PEOPLE OF EARTH ###########
|
||||
# I have moved this config to async_configs.py, kept it here, in case someone still importing it, however
|
||||
# be a dear and follow `from crawl4ai import ProxyConfig` instead :)
|
||||
class ProxyConfig:
|
||||
def __init__(
|
||||
self,
|
||||
server: str,
|
||||
username: Optional[str] = None,
|
||||
password: Optional[str] = None,
|
||||
ip: Optional[str] = None,
|
||||
):
|
||||
"""Configuration class for a single proxy.
|
||||
|
||||
Args:
|
||||
server: Proxy server URL (e.g., "http://127.0.0.1:8080")
|
||||
username: Optional username for proxy authentication
|
||||
password: Optional password for proxy authentication
|
||||
ip: Optional IP address for verification purposes
|
||||
"""
|
||||
self.server = server
|
||||
self.username = username
|
||||
self.password = password
|
||||
|
||||
# Extract IP from server if not explicitly provided
|
||||
self.ip = ip or self._extract_ip_from_server()
|
||||
|
||||
def _extract_ip_from_server(self) -> Optional[str]:
|
||||
"""Extract IP address from server URL."""
|
||||
try:
|
||||
# Simple extraction assuming http://ip:port format
|
||||
if "://" in self.server:
|
||||
parts = self.server.split("://")[1].split(":")
|
||||
return parts[0]
|
||||
else:
|
||||
parts = self.server.split(":")
|
||||
return parts[0]
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def from_string(proxy_str: str) -> "ProxyConfig":
|
||||
"""Create a ProxyConfig from a string in the format 'ip:port:username:password'."""
|
||||
parts = proxy_str.split(":")
|
||||
if len(parts) == 4: # ip:port:username:password
|
||||
ip, port, username, password = parts
|
||||
return ProxyConfig(
|
||||
server=f"http://{ip}:{port}",
|
||||
username=username,
|
||||
password=password,
|
||||
ip=ip
|
||||
)
|
||||
elif len(parts) == 2: # ip:port only
|
||||
ip, port = parts
|
||||
return ProxyConfig(
|
||||
server=f"http://{ip}:{port}",
|
||||
ip=ip
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Invalid proxy string format: {proxy_str}")
|
||||
|
||||
@staticmethod
|
||||
def from_dict(proxy_dict: Dict) -> "ProxyConfig":
|
||||
"""Create a ProxyConfig from a dictionary."""
|
||||
return ProxyConfig(
|
||||
server=proxy_dict.get("server"),
|
||||
username=proxy_dict.get("username"),
|
||||
password=proxy_dict.get("password"),
|
||||
ip=proxy_dict.get("ip")
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def from_env(env_var: str = "PROXIES") -> List["ProxyConfig"]:
|
||||
"""Load proxies from environment variable.
|
||||
|
||||
Args:
|
||||
env_var: Name of environment variable containing comma-separated proxy strings
|
||||
|
||||
Returns:
|
||||
List of ProxyConfig objects
|
||||
"""
|
||||
proxies = []
|
||||
try:
|
||||
proxy_list = os.getenv(env_var, "").split(",")
|
||||
for proxy in proxy_list:
|
||||
if not proxy:
|
||||
continue
|
||||
proxies.append(ProxyConfig.from_string(proxy))
|
||||
except Exception as e:
|
||||
print(f"Error loading proxies from environment: {e}")
|
||||
return proxies
|
||||
|
||||
def to_dict(self) -> Dict:
|
||||
"""Convert to dictionary representation."""
|
||||
return {
|
||||
"server": self.server,
|
||||
"username": self.username,
|
||||
"password": self.password,
|
||||
"ip": self.ip
|
||||
}
|
||||
|
||||
def clone(self, **kwargs) -> "ProxyConfig":
|
||||
"""Create a copy of this configuration with updated values.
|
||||
|
||||
Args:
|
||||
**kwargs: Key-value pairs of configuration options to update
|
||||
|
||||
Returns:
|
||||
ProxyConfig: A new instance with the specified updates
|
||||
"""
|
||||
config_dict = self.to_dict()
|
||||
config_dict.update(kwargs)
|
||||
return ProxyConfig.from_dict(config_dict)
|
||||
|
||||
|
||||
class ProxyRotationStrategy(ABC):
|
||||
"""Base abstract class for proxy rotation strategies"""
|
||||
|
||||
@abstractmethod
|
||||
async def get_next_proxy(self) -> Optional[ProxyConfig]:
|
||||
"""Get next proxy configuration from the strategy"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def add_proxies(self, proxies: List[ProxyConfig]):
|
||||
"""Add proxy configurations to the strategy"""
|
||||
pass
|
||||
|
||||
class RoundRobinProxyStrategy:
|
||||
"""Simple round-robin proxy rotation strategy using ProxyConfig objects"""
|
||||
|
||||
def __init__(self, proxies: List[ProxyConfig] = None):
|
||||
"""
|
||||
Initialize with optional list of proxy configurations
|
||||
|
||||
Args:
|
||||
proxies: List of ProxyConfig objects
|
||||
"""
|
||||
self._proxies = []
|
||||
self._proxy_cycle = None
|
||||
if proxies:
|
||||
self.add_proxies(proxies)
|
||||
|
||||
def add_proxies(self, proxies: List[ProxyConfig]):
|
||||
"""Add new proxies to the rotation pool"""
|
||||
self._proxies.extend(proxies)
|
||||
self._proxy_cycle = cycle(self._proxies)
|
||||
|
||||
async def get_next_proxy(self) -> Optional[ProxyConfig]:
|
||||
"""Get next proxy in round-robin fashion"""
|
||||
if not self._proxy_cycle:
|
||||
return None
|
||||
return next(self._proxy_cycle)
|
||||
35
crawl4ai/script/__init__.py
Normal file
35
crawl4ai/script/__init__.py
Normal file
@@ -0,0 +1,35 @@
|
||||
"""
|
||||
C4A-Script: A domain-specific language for web automation in Crawl4AI
|
||||
"""
|
||||
|
||||
from .c4a_compile import C4ACompiler, compile, validate, compile_file
|
||||
from .c4a_result import (
|
||||
CompilationResult,
|
||||
ValidationResult,
|
||||
ErrorDetail,
|
||||
WarningDetail,
|
||||
ErrorType,
|
||||
Severity,
|
||||
Suggestion
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
# Main compiler
|
||||
"C4ACompiler",
|
||||
|
||||
# Convenience functions
|
||||
"compile",
|
||||
"validate",
|
||||
"compile_file",
|
||||
|
||||
# Result types
|
||||
"CompilationResult",
|
||||
"ValidationResult",
|
||||
"ErrorDetail",
|
||||
"WarningDetail",
|
||||
|
||||
# Enums
|
||||
"ErrorType",
|
||||
"Severity",
|
||||
"Suggestion"
|
||||
]
|
||||
398
crawl4ai/script/c4a_compile.py
Normal file
398
crawl4ai/script/c4a_compile.py
Normal file
@@ -0,0 +1,398 @@
|
||||
"""
|
||||
Clean C4A-Script API with Result pattern
|
||||
No exceptions - always returns results
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
import pathlib
|
||||
import re
|
||||
from typing import Union, List, Optional
|
||||
|
||||
# JSON_SCHEMA_BUILDER is still used elsewhere,
|
||||
# but we now also need the new script-builder prompt.
|
||||
from ..prompts import GENERATE_JS_SCRIPT_PROMPT, GENERATE_SCRIPT_PROMPT
|
||||
import logging
|
||||
import re
|
||||
|
||||
from .c4a_result import (
|
||||
CompilationResult, ValidationResult, ErrorDetail, WarningDetail,
|
||||
ErrorType, Severity, Suggestion
|
||||
)
|
||||
from .c4ai_script import Compiler
|
||||
from lark.exceptions import UnexpectedToken, UnexpectedCharacters, VisitError
|
||||
from ..async_configs import LLMConfig
|
||||
from ..utils import perform_completion_with_backoff
|
||||
|
||||
|
||||
class C4ACompiler:
|
||||
"""Main compiler with result-based API"""
|
||||
|
||||
# Error code mapping
|
||||
ERROR_CODES = {
|
||||
"missing_then": "E001",
|
||||
"missing_paren": "E002",
|
||||
"missing_comma": "E003",
|
||||
"missing_endproc": "E004",
|
||||
"undefined_proc": "E005",
|
||||
"missing_backticks": "E006",
|
||||
"invalid_command": "E007",
|
||||
"syntax_error": "E999"
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def compile(cls, script: Union[str, List[str]], root: Optional[pathlib.Path] = None) -> CompilationResult:
|
||||
"""
|
||||
Compile C4A-Script to JavaScript
|
||||
|
||||
Args:
|
||||
script: C4A-Script as string or list of lines
|
||||
root: Root directory for includes
|
||||
|
||||
Returns:
|
||||
CompilationResult with success status and JS code or errors
|
||||
"""
|
||||
# Normalize input
|
||||
if isinstance(script, list):
|
||||
script_text = '\n'.join(script)
|
||||
script_lines = script
|
||||
else:
|
||||
script_text = script
|
||||
script_lines = script.split('\n')
|
||||
|
||||
try:
|
||||
# Try compilation
|
||||
compiler = Compiler(root)
|
||||
js_code = compiler.compile(script_text)
|
||||
|
||||
# Success!
|
||||
result = CompilationResult(
|
||||
success=True,
|
||||
js_code=js_code,
|
||||
metadata={
|
||||
"lineCount": len(script_lines),
|
||||
"statementCount": len(js_code)
|
||||
}
|
||||
)
|
||||
|
||||
# Add any warnings (future feature)
|
||||
# result.warnings = cls._check_warnings(script_text)
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
# Convert exception to ErrorDetail
|
||||
error = cls._exception_to_error(e, script_lines)
|
||||
return CompilationResult(
|
||||
success=False,
|
||||
errors=[error],
|
||||
metadata={
|
||||
"lineCount": len(script_lines)
|
||||
}
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def validate(cls, script: Union[str, List[str]]) -> ValidationResult:
|
||||
"""
|
||||
Validate script syntax without generating code
|
||||
|
||||
Args:
|
||||
script: C4A-Script to validate
|
||||
|
||||
Returns:
|
||||
ValidationResult with validity status and any errors
|
||||
"""
|
||||
result = cls.compile(script)
|
||||
|
||||
return ValidationResult(
|
||||
valid=result.success,
|
||||
errors=result.errors,
|
||||
warnings=result.warnings
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def compile_file(cls, path: Union[str, pathlib.Path]) -> CompilationResult:
|
||||
"""
|
||||
Compile a C4A-Script file
|
||||
|
||||
Args:
|
||||
path: Path to the file
|
||||
|
||||
Returns:
|
||||
CompilationResult
|
||||
"""
|
||||
path = pathlib.Path(path)
|
||||
|
||||
if not path.exists():
|
||||
error = ErrorDetail(
|
||||
type=ErrorType.RUNTIME,
|
||||
code="E100",
|
||||
severity=Severity.ERROR,
|
||||
message=f"File not found: {path}",
|
||||
line=0,
|
||||
column=0,
|
||||
source_line=""
|
||||
)
|
||||
return CompilationResult(success=False, errors=[error])
|
||||
|
||||
try:
|
||||
script = path.read_text()
|
||||
return cls.compile(script, root=path.parent)
|
||||
except Exception as e:
|
||||
error = ErrorDetail(
|
||||
type=ErrorType.RUNTIME,
|
||||
code="E101",
|
||||
severity=Severity.ERROR,
|
||||
message=f"Error reading file: {str(e)}",
|
||||
line=0,
|
||||
column=0,
|
||||
source_line=""
|
||||
)
|
||||
return CompilationResult(success=False, errors=[error])
|
||||
|
||||
@classmethod
|
||||
def _exception_to_error(cls, exc: Exception, script_lines: List[str]) -> ErrorDetail:
|
||||
"""Convert an exception to ErrorDetail"""
|
||||
|
||||
if isinstance(exc, UnexpectedToken):
|
||||
return cls._handle_unexpected_token(exc, script_lines)
|
||||
elif isinstance(exc, UnexpectedCharacters):
|
||||
return cls._handle_unexpected_chars(exc, script_lines)
|
||||
elif isinstance(exc, ValueError):
|
||||
return cls._handle_value_error(exc, script_lines)
|
||||
else:
|
||||
# Generic error
|
||||
return ErrorDetail(
|
||||
type=ErrorType.SYNTAX,
|
||||
code=cls.ERROR_CODES["syntax_error"],
|
||||
severity=Severity.ERROR,
|
||||
message=str(exc),
|
||||
line=1,
|
||||
column=1,
|
||||
source_line=script_lines[0] if script_lines else ""
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def _handle_unexpected_token(cls, exc: UnexpectedToken, script_lines: List[str]) -> ErrorDetail:
|
||||
"""Handle UnexpectedToken errors"""
|
||||
line = exc.line
|
||||
column = exc.column
|
||||
|
||||
# Get context lines
|
||||
source_line = script_lines[line - 1] if 0 < line <= len(script_lines) else ""
|
||||
line_before = script_lines[line - 2] if line > 1 and line <= len(script_lines) + 1 else None
|
||||
line_after = script_lines[line] if 0 < line < len(script_lines) else None
|
||||
|
||||
# Determine error type and suggestions
|
||||
if exc.token.type == 'CLICK' and 'THEN' in str(exc.expected):
|
||||
code = cls.ERROR_CODES["missing_then"]
|
||||
message = "Missing 'THEN' keyword after IF condition"
|
||||
suggestions = [
|
||||
Suggestion(
|
||||
"Add 'THEN' after the condition",
|
||||
source_line.replace("CLICK", "THEN CLICK") if source_line else None
|
||||
)
|
||||
]
|
||||
elif exc.token.type == '$END':
|
||||
code = cls.ERROR_CODES["missing_endproc"]
|
||||
message = "Unexpected end of script"
|
||||
suggestions = [
|
||||
Suggestion("Check for missing ENDPROC"),
|
||||
Suggestion("Ensure all procedures are properly closed")
|
||||
]
|
||||
elif 'RPAR' in str(exc.expected):
|
||||
code = cls.ERROR_CODES["missing_paren"]
|
||||
message = "Missing closing parenthesis ')'"
|
||||
suggestions = [
|
||||
Suggestion("Add closing parenthesis at the end of the condition")
|
||||
]
|
||||
elif 'COMMA' in str(exc.expected):
|
||||
code = cls.ERROR_CODES["missing_comma"]
|
||||
message = "Missing comma ',' in command"
|
||||
suggestions = [
|
||||
Suggestion("Add comma between arguments")
|
||||
]
|
||||
else:
|
||||
# Check if this might be missing backticks
|
||||
if exc.token.type == 'NAME' and 'BACKTICK_STRING' in str(exc.expected):
|
||||
code = cls.ERROR_CODES["missing_backticks"]
|
||||
message = "Selector must be wrapped in backticks"
|
||||
suggestions = [
|
||||
Suggestion(
|
||||
"Wrap the selector in backticks",
|
||||
f"`{exc.token.value}`"
|
||||
)
|
||||
]
|
||||
else:
|
||||
code = cls.ERROR_CODES["syntax_error"]
|
||||
message = f"Unexpected '{exc.token.value}'"
|
||||
if exc.expected:
|
||||
expected_list = [str(e) for e in exc.expected if not str(e).startswith('_')][:3]
|
||||
if expected_list:
|
||||
message += f". Expected: {', '.join(expected_list)}"
|
||||
suggestions = []
|
||||
|
||||
return ErrorDetail(
|
||||
type=ErrorType.SYNTAX,
|
||||
code=code,
|
||||
severity=Severity.ERROR,
|
||||
message=message,
|
||||
line=line,
|
||||
column=column,
|
||||
source_line=source_line,
|
||||
line_before=line_before,
|
||||
line_after=line_after,
|
||||
suggestions=suggestions
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def _handle_unexpected_chars(cls, exc: UnexpectedCharacters, script_lines: List[str]) -> ErrorDetail:
|
||||
"""Handle UnexpectedCharacters errors"""
|
||||
line = exc.line
|
||||
column = exc.column
|
||||
|
||||
source_line = script_lines[line - 1] if 0 < line <= len(script_lines) else ""
|
||||
|
||||
# Check for missing backticks
|
||||
if "CLICK" in source_line and column > source_line.find("CLICK"):
|
||||
code = cls.ERROR_CODES["missing_backticks"]
|
||||
message = "Selector must be wrapped in backticks"
|
||||
suggestions = [
|
||||
Suggestion(
|
||||
"Wrap the selector in backticks",
|
||||
re.sub(r'CLICK\s+([^\s]+)', r'CLICK `\1`', source_line)
|
||||
)
|
||||
]
|
||||
else:
|
||||
code = cls.ERROR_CODES["syntax_error"]
|
||||
message = f"Invalid character at position {column}"
|
||||
suggestions = []
|
||||
|
||||
return ErrorDetail(
|
||||
type=ErrorType.SYNTAX,
|
||||
code=code,
|
||||
severity=Severity.ERROR,
|
||||
message=message,
|
||||
line=line,
|
||||
column=column,
|
||||
source_line=source_line,
|
||||
suggestions=suggestions
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def _handle_value_error(cls, exc: ValueError, script_lines: List[str]) -> ErrorDetail:
|
||||
"""Handle ValueError (runtime errors)"""
|
||||
message = str(exc)
|
||||
|
||||
# Check for undefined procedure
|
||||
if "Unknown procedure" in message:
|
||||
proc_match = re.search(r"'([^']+)'", message)
|
||||
if proc_match:
|
||||
proc_name = proc_match.group(1)
|
||||
|
||||
# Find the line with the procedure call
|
||||
for i, line in enumerate(script_lines):
|
||||
if proc_name in line and not line.strip().startswith('PROC'):
|
||||
return ErrorDetail(
|
||||
type=ErrorType.RUNTIME,
|
||||
code=cls.ERROR_CODES["undefined_proc"],
|
||||
severity=Severity.ERROR,
|
||||
message=f"Undefined procedure '{proc_name}'",
|
||||
line=i + 1,
|
||||
column=line.find(proc_name) + 1,
|
||||
source_line=line,
|
||||
suggestions=[
|
||||
Suggestion(
|
||||
f"Define the procedure before using it",
|
||||
f"PROC {proc_name}\n # commands here\nENDPROC"
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
# Generic runtime error
|
||||
return ErrorDetail(
|
||||
type=ErrorType.RUNTIME,
|
||||
code="E999",
|
||||
severity=Severity.ERROR,
|
||||
message=message,
|
||||
line=1,
|
||||
column=1,
|
||||
source_line=script_lines[0] if script_lines else ""
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def generate_script(
|
||||
html: str,
|
||||
query: str | None = None,
|
||||
mode: str = "c4a",
|
||||
llm_config: LLMConfig | None = None,
|
||||
**completion_kwargs,
|
||||
) -> str:
|
||||
"""
|
||||
One-shot helper that calls the LLM exactly once to convert a
|
||||
natural-language goal + HTML snippet into either:
|
||||
|
||||
1. raw JavaScript (`mode="js"`)
|
||||
2. Crawl4ai DSL (`mode="c4a"`)
|
||||
|
||||
The returned string is guaranteed to be free of markdown wrappers
|
||||
or explanatory text, ready for direct execution.
|
||||
"""
|
||||
if llm_config is None:
|
||||
llm_config = LLMConfig() # falls back to env vars / defaults
|
||||
|
||||
# Build the user chunk
|
||||
user_prompt = "\n".join(
|
||||
[
|
||||
"## GOAL",
|
||||
"<<goael>>",
|
||||
(query or "Prepare the page for crawling."),
|
||||
"<</goal>>",
|
||||
"",
|
||||
"## HTML",
|
||||
"<<html>>",
|
||||
html[:100000], # guardrail against token blast
|
||||
"<</html>>",
|
||||
"",
|
||||
"## MODE",
|
||||
mode,
|
||||
]
|
||||
)
|
||||
|
||||
# Call the LLM with retry/back-off logic
|
||||
full_prompt = f"{GENERATE_SCRIPT_PROMPT}\n\n{user_prompt}" if mode == "c4a" else f"{GENERATE_JS_SCRIPT_PROMPT}\n\n{user_prompt}"
|
||||
|
||||
response = perform_completion_with_backoff(
|
||||
provider=llm_config.provider,
|
||||
prompt_with_variables=full_prompt,
|
||||
api_token=llm_config.api_token,
|
||||
json_response=False,
|
||||
base_url=getattr(llm_config, 'base_url', None),
|
||||
**completion_kwargs,
|
||||
)
|
||||
|
||||
# Extract content from the response
|
||||
raw_response = response.choices[0].message.content.strip()
|
||||
|
||||
# Strip accidental markdown fences (```js … ```)
|
||||
clean = re.sub(r"^```(?:[a-zA-Z0-9_-]+)?\s*|```$", "", raw_response, flags=re.MULTILINE).strip()
|
||||
|
||||
if not clean:
|
||||
raise RuntimeError("LLM returned empty script.")
|
||||
|
||||
return clean
|
||||
|
||||
|
||||
# Convenience functions for direct use
|
||||
def compile(script: Union[str, List[str]], root: Optional[pathlib.Path] = None) -> CompilationResult:
|
||||
"""Compile C4A-Script to JavaScript"""
|
||||
return C4ACompiler.compile(script, root)
|
||||
|
||||
|
||||
def validate(script: Union[str, List[str]]) -> ValidationResult:
|
||||
"""Validate C4A-Script syntax"""
|
||||
return C4ACompiler.validate(script)
|
||||
|
||||
|
||||
def compile_file(path: Union[str, pathlib.Path]) -> CompilationResult:
|
||||
"""Compile C4A-Script file"""
|
||||
return C4ACompiler.compile_file(path)
|
||||
219
crawl4ai/script/c4a_result.py
Normal file
219
crawl4ai/script/c4a_result.py
Normal file
@@ -0,0 +1,219 @@
|
||||
"""
|
||||
Result classes for C4A-Script compilation
|
||||
Clean API design with no exceptions
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
from dataclasses import dataclass, field
|
||||
from enum import Enum
|
||||
from typing import List, Dict, Any, Optional
|
||||
import json
|
||||
|
||||
|
||||
class ErrorType(Enum):
|
||||
SYNTAX = "syntax"
|
||||
SEMANTIC = "semantic"
|
||||
RUNTIME = "runtime"
|
||||
|
||||
|
||||
class Severity(Enum):
|
||||
ERROR = "error"
|
||||
WARNING = "warning"
|
||||
INFO = "info"
|
||||
|
||||
|
||||
@dataclass
|
||||
class Suggestion:
|
||||
"""A suggestion for fixing an error"""
|
||||
message: str
|
||||
fix: Optional[str] = None
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"message": self.message,
|
||||
"fix": self.fix
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class ErrorDetail:
|
||||
"""Detailed information about a compilation error"""
|
||||
# Core info
|
||||
type: ErrorType
|
||||
code: str # E001, E002, etc.
|
||||
severity: Severity
|
||||
message: str
|
||||
|
||||
# Location
|
||||
line: int
|
||||
column: int
|
||||
|
||||
# Context
|
||||
source_line: str
|
||||
|
||||
# Optional fields with defaults
|
||||
end_line: Optional[int] = None
|
||||
end_column: Optional[int] = None
|
||||
line_before: Optional[str] = None
|
||||
line_after: Optional[str] = None
|
||||
|
||||
# Help
|
||||
suggestions: List[Suggestion] = field(default_factory=list)
|
||||
documentation_url: Optional[str] = None
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
"""Convert to dictionary for JSON serialization"""
|
||||
return {
|
||||
"type": self.type.value,
|
||||
"code": self.code,
|
||||
"severity": self.severity.value,
|
||||
"message": self.message,
|
||||
"location": {
|
||||
"line": self.line,
|
||||
"column": self.column,
|
||||
"endLine": self.end_line,
|
||||
"endColumn": self.end_column
|
||||
},
|
||||
"context": {
|
||||
"sourceLine": self.source_line,
|
||||
"lineBefore": self.line_before,
|
||||
"lineAfter": self.line_after,
|
||||
"marker": {
|
||||
"start": self.column - 1,
|
||||
"length": (self.end_column - self.column) if self.end_column else 1
|
||||
}
|
||||
},
|
||||
"suggestions": [s.to_dict() for s in self.suggestions],
|
||||
"documentationUrl": self.documentation_url
|
||||
}
|
||||
|
||||
def to_json(self) -> str:
|
||||
"""Convert to JSON string"""
|
||||
return json.dumps(self.to_dict(), indent=2)
|
||||
|
||||
@property
|
||||
def formatted_message(self) -> str:
|
||||
"""Returns the nice text format for terminals"""
|
||||
lines = []
|
||||
lines.append(f"\n{'='*60}")
|
||||
lines.append(f"{self.type.value.title()} Error [{self.code}]")
|
||||
lines.append(f"{'='*60}")
|
||||
lines.append(f"Location: Line {self.line}, Column {self.column}")
|
||||
lines.append(f"Error: {self.message}")
|
||||
|
||||
if self.source_line:
|
||||
marker = " " * (self.column - 1) + "^"
|
||||
if self.end_column:
|
||||
marker += "~" * (self.end_column - self.column - 1)
|
||||
lines.append(f"\nCode:")
|
||||
if self.line_before:
|
||||
lines.append(f" {self.line - 1: >3} | {self.line_before}")
|
||||
lines.append(f" {self.line: >3} | {self.source_line}")
|
||||
lines.append(f" | {marker}")
|
||||
if self.line_after:
|
||||
lines.append(f" {self.line + 1: >3} | {self.line_after}")
|
||||
|
||||
if self.suggestions:
|
||||
lines.append("\nSuggestions:")
|
||||
for i, suggestion in enumerate(self.suggestions, 1):
|
||||
lines.append(f" {i}. {suggestion.message}")
|
||||
if suggestion.fix:
|
||||
lines.append(f" Fix: {suggestion.fix}")
|
||||
|
||||
lines.append("="*60)
|
||||
return "\n".join(lines)
|
||||
|
||||
@property
|
||||
def simple_message(self) -> str:
|
||||
"""Returns just the error message without formatting"""
|
||||
return f"Line {self.line}: {self.message}"
|
||||
|
||||
|
||||
@dataclass
|
||||
class WarningDetail:
|
||||
"""Information about a compilation warning"""
|
||||
code: str
|
||||
message: str
|
||||
line: int
|
||||
column: int
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"code": self.code,
|
||||
"message": self.message,
|
||||
"line": self.line,
|
||||
"column": self.column
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class CompilationResult:
|
||||
"""Result of C4A-Script compilation"""
|
||||
success: bool
|
||||
js_code: Optional[List[str]] = None
|
||||
errors: List[ErrorDetail] = field(default_factory=list)
|
||||
warnings: List[WarningDetail] = field(default_factory=list)
|
||||
metadata: Dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
"""Convert to dictionary for JSON serialization"""
|
||||
return {
|
||||
"success": self.success,
|
||||
"jsCode": self.js_code,
|
||||
"errors": [e.to_dict() for e in self.errors],
|
||||
"warnings": [w.to_dict() for w in self.warnings],
|
||||
"metadata": self.metadata
|
||||
}
|
||||
|
||||
def to_json(self) -> str:
|
||||
"""Convert to JSON string"""
|
||||
return json.dumps(self.to_dict(), indent=2)
|
||||
|
||||
@property
|
||||
def has_errors(self) -> bool:
|
||||
"""Check if there are any errors"""
|
||||
return len(self.errors) > 0
|
||||
|
||||
@property
|
||||
def has_warnings(self) -> bool:
|
||||
"""Check if there are any warnings"""
|
||||
return len(self.warnings) > 0
|
||||
|
||||
@property
|
||||
def first_error(self) -> Optional[ErrorDetail]:
|
||||
"""Get the first error if any"""
|
||||
return self.errors[0] if self.errors else None
|
||||
|
||||
def __str__(self) -> str:
|
||||
"""String representation for debugging"""
|
||||
if self.success:
|
||||
msg = f"✓ Compilation successful"
|
||||
if self.js_code:
|
||||
msg += f" - {len(self.js_code)} statements generated"
|
||||
if self.warnings:
|
||||
msg += f" ({len(self.warnings)} warnings)"
|
||||
return msg
|
||||
else:
|
||||
return f"✗ Compilation failed - {len(self.errors)} error(s)"
|
||||
|
||||
|
||||
@dataclass
|
||||
class ValidationResult:
|
||||
"""Result of script validation"""
|
||||
valid: bool
|
||||
errors: List[ErrorDetail] = field(default_factory=list)
|
||||
warnings: List[WarningDetail] = field(default_factory=list)
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"valid": self.valid,
|
||||
"errors": [e.to_dict() for e in self.errors],
|
||||
"warnings": [w.to_dict() for w in self.warnings]
|
||||
}
|
||||
|
||||
def to_json(self) -> str:
|
||||
return json.dumps(self.to_dict(), indent=2)
|
||||
|
||||
@property
|
||||
def first_error(self) -> Optional[ErrorDetail]:
|
||||
return self.errors[0] if self.errors else None
|
||||
690
crawl4ai/script/c4ai_script.py
Normal file
690
crawl4ai/script/c4ai_script.py
Normal file
@@ -0,0 +1,690 @@
|
||||
"""
|
||||
2025-06-03
|
||||
By Unclcode:
|
||||
C4A-Script Language Documentation
|
||||
Feeds Crawl4AI via CrawlerRunConfig(js_code=[ ... ]) – no core modifications.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
import pathlib, re, sys, textwrap
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Dict, List, Union
|
||||
|
||||
from lark import Lark, Transformer, v_args
|
||||
from lark.exceptions import UnexpectedToken, UnexpectedCharacters, VisitError
|
||||
|
||||
|
||||
# --------------------------------------------------------------------------- #
|
||||
# Custom Error Classes
|
||||
# --------------------------------------------------------------------------- #
|
||||
class C4AScriptError(Exception):
|
||||
"""Custom error class for C4A-Script compilation errors"""
|
||||
|
||||
def __init__(self, message: str, line: int = None, column: int = None,
|
||||
error_type: str = "Syntax Error", details: str = None):
|
||||
self.message = message
|
||||
self.line = line
|
||||
self.column = column
|
||||
self.error_type = error_type
|
||||
self.details = details
|
||||
super().__init__(self._format_message())
|
||||
|
||||
def _format_message(self) -> str:
|
||||
"""Format a clear error message"""
|
||||
lines = [f"\n{'='*60}"]
|
||||
lines.append(f"C4A-Script {self.error_type}")
|
||||
lines.append(f"{'='*60}")
|
||||
|
||||
if self.line:
|
||||
lines.append(f"Location: Line {self.line}" + (f", Column {self.column}" if self.column else ""))
|
||||
|
||||
lines.append(f"Error: {self.message}")
|
||||
|
||||
if self.details:
|
||||
lines.append(f"\nDetails: {self.details}")
|
||||
|
||||
lines.append("="*60)
|
||||
return "\n".join(lines)
|
||||
|
||||
@classmethod
|
||||
def from_exception(cls, exc: Exception, script: Union[str, List[str]]) -> 'C4AScriptError':
|
||||
"""Create C4AScriptError from another exception"""
|
||||
script_text = script if isinstance(script, str) else '\n'.join(script)
|
||||
script_lines = script_text.split('\n')
|
||||
|
||||
if isinstance(exc, UnexpectedToken):
|
||||
# Extract line and column from UnexpectedToken
|
||||
line = exc.line
|
||||
column = exc.column
|
||||
|
||||
# Get the problematic line
|
||||
if 0 < line <= len(script_lines):
|
||||
problem_line = script_lines[line - 1]
|
||||
marker = " " * (column - 1) + "^"
|
||||
|
||||
details = f"\nCode:\n {problem_line}\n {marker}\n"
|
||||
|
||||
# Improve error message based on context
|
||||
if exc.token.type == 'CLICK' and 'THEN' in str(exc.expected):
|
||||
message = "Missing 'THEN' keyword after IF condition"
|
||||
elif exc.token.type == '$END':
|
||||
message = "Unexpected end of script. Check for missing ENDPROC or incomplete commands"
|
||||
elif 'RPAR' in str(exc.expected):
|
||||
message = "Missing closing parenthesis ')'"
|
||||
elif 'COMMA' in str(exc.expected):
|
||||
message = "Missing comma ',' in command"
|
||||
else:
|
||||
message = f"Unexpected '{exc.token}'"
|
||||
if exc.expected:
|
||||
expected_list = [str(e) for e in exc.expected if not e.startswith('_')]
|
||||
if expected_list:
|
||||
message += f". Expected: {', '.join(expected_list[:3])}"
|
||||
|
||||
details += f"Token: {exc.token.type} ('{exc.token.value}')"
|
||||
else:
|
||||
message = str(exc)
|
||||
details = None
|
||||
|
||||
return cls(message, line, column, "Syntax Error", details)
|
||||
|
||||
elif isinstance(exc, UnexpectedCharacters):
|
||||
# Extract line and column
|
||||
line = exc.line
|
||||
column = exc.column
|
||||
|
||||
if 0 < line <= len(script_lines):
|
||||
problem_line = script_lines[line - 1]
|
||||
marker = " " * (column - 1) + "^"
|
||||
|
||||
details = f"\nCode:\n {problem_line}\n {marker}\n"
|
||||
message = f"Invalid character or unexpected text at position {column}"
|
||||
else:
|
||||
message = str(exc)
|
||||
details = None
|
||||
|
||||
return cls(message, line, column, "Syntax Error", details)
|
||||
|
||||
elif isinstance(exc, ValueError):
|
||||
# Handle runtime errors like undefined procedures
|
||||
message = str(exc)
|
||||
|
||||
# Try to find which line caused the error
|
||||
if "Unknown procedure" in message:
|
||||
proc_name = re.search(r"'([^']+)'", message)
|
||||
if proc_name:
|
||||
proc_name = proc_name.group(1)
|
||||
for i, line in enumerate(script_lines, 1):
|
||||
if proc_name in line and not line.strip().startswith('PROC'):
|
||||
details = f"\nCode:\n {line.strip()}\n\nMake sure the procedure '{proc_name}' is defined with PROC...ENDPROC"
|
||||
return cls(f"Undefined procedure '{proc_name}'", i, None, "Runtime Error", details)
|
||||
|
||||
return cls(message, None, None, "Runtime Error", None)
|
||||
|
||||
else:
|
||||
# Generic error
|
||||
return cls(str(exc), None, None, "Compilation Error", None)
|
||||
|
||||
|
||||
# --------------------------------------------------------------------------- #
|
||||
# 1. Grammar
|
||||
# --------------------------------------------------------------------------- #
|
||||
GRAMMAR = r"""
|
||||
start : line*
|
||||
?line : command | proc_def | include | comment
|
||||
|
||||
command : wait | nav | click_cmd | double_click | right_click | move | drag | scroll
|
||||
| type | clear | set_input | press | key_down | key_up
|
||||
| eval_cmd | setvar | proc_call | if_cmd | repeat_cmd
|
||||
|
||||
wait : "WAIT" (ESCAPED_STRING|BACKTICK_STRING|NUMBER) NUMBER? -> wait_cmd
|
||||
nav : "GO" URL -> go
|
||||
| "RELOAD" -> reload
|
||||
| "BACK" -> back
|
||||
| "FORWARD" -> forward
|
||||
|
||||
click_cmd : "CLICK" (BACKTICK_STRING|NUMBER NUMBER) -> click
|
||||
double_click : "DOUBLE_CLICK" (BACKTICK_STRING|NUMBER NUMBER) -> double_click
|
||||
right_click : "RIGHT_CLICK" (BACKTICK_STRING|NUMBER NUMBER) -> right_click
|
||||
|
||||
move : "MOVE" coords -> move
|
||||
drag : "DRAG" coords coords -> drag
|
||||
scroll : "SCROLL" DIR NUMBER? -> scroll
|
||||
|
||||
type : "TYPE" (ESCAPED_STRING | NAME) -> type
|
||||
clear : "CLEAR" BACKTICK_STRING -> clear
|
||||
set_input : "SET" BACKTICK_STRING (ESCAPED_STRING | BACKTICK_STRING | NAME) -> set_input
|
||||
press : "PRESS" WORD -> press
|
||||
key_down : "KEY_DOWN" WORD -> key_down
|
||||
key_up : "KEY_UP" WORD -> key_up
|
||||
|
||||
eval_cmd : "EVAL" BACKTICK_STRING -> eval_cmd
|
||||
setvar : "SETVAR" NAME "=" value -> setvar
|
||||
proc_call : NAME -> proc_call
|
||||
proc_def : "PROC" NAME line* "ENDPROC" -> proc_def
|
||||
include : "USE" ESCAPED_STRING -> include
|
||||
comment : /#.*/ -> comment
|
||||
|
||||
if_cmd : "IF" "(" condition ")" "THEN" command ("ELSE" command)? -> if_cmd
|
||||
repeat_cmd : "REPEAT" "(" command "," repeat_count ")" -> repeat_cmd
|
||||
|
||||
condition : not_cond | exists_cond | js_cond
|
||||
not_cond : "NOT" condition -> not_cond
|
||||
exists_cond : "EXISTS" BACKTICK_STRING -> exists_cond
|
||||
js_cond : BACKTICK_STRING -> js_cond
|
||||
|
||||
repeat_count : NUMBER | BACKTICK_STRING
|
||||
|
||||
coords : NUMBER NUMBER
|
||||
value : ESCAPED_STRING | BACKTICK_STRING | NUMBER
|
||||
DIR : /(UP|DOWN|LEFT|RIGHT)/i
|
||||
REST : /[^\n]+/
|
||||
|
||||
URL : /(http|https):\/\/[^\s]+/
|
||||
NAME : /\$?[A-Za-z_][A-Za-z0-9_]*/
|
||||
WORD : /[A-Za-z0-9+]+/
|
||||
BACKTICK_STRING : /`[^`]*`/
|
||||
|
||||
%import common.NUMBER
|
||||
%import common.ESCAPED_STRING
|
||||
%import common.WS_INLINE
|
||||
%import common.NEWLINE
|
||||
%ignore WS_INLINE
|
||||
%ignore NEWLINE
|
||||
"""
|
||||
|
||||
# --------------------------------------------------------------------------- #
|
||||
# 2. IR dataclasses
|
||||
# --------------------------------------------------------------------------- #
|
||||
@dataclass
|
||||
class Cmd:
|
||||
op: str
|
||||
args: List[Any]
|
||||
|
||||
@dataclass
|
||||
class Proc:
|
||||
name: str
|
||||
body: List[Cmd]
|
||||
|
||||
# --------------------------------------------------------------------------- #
|
||||
# 3. AST → IR
|
||||
# --------------------------------------------------------------------------- #
|
||||
@v_args(inline=True)
|
||||
class ASTBuilder(Transformer):
|
||||
# helpers
|
||||
def _strip(self, s):
|
||||
if s.startswith('"') and s.endswith('"'):
|
||||
return s[1:-1]
|
||||
elif s.startswith('`') and s.endswith('`'):
|
||||
return s[1:-1]
|
||||
return s
|
||||
def start(self,*i): return list(i)
|
||||
def line(self,i): return i
|
||||
def command(self,i): return i
|
||||
|
||||
# WAIT
|
||||
def wait_cmd(self, rest, timeout=None):
|
||||
rest_str = str(rest)
|
||||
# Check if it's a number (including floats)
|
||||
try:
|
||||
num_val = float(rest_str)
|
||||
payload = (num_val, "seconds")
|
||||
except ValueError:
|
||||
if rest_str.startswith('"') and rest_str.endswith('"'):
|
||||
payload = (self._strip(rest_str), "text")
|
||||
elif rest_str.startswith('`') and rest_str.endswith('`'):
|
||||
payload = (self._strip(rest_str), "selector")
|
||||
else:
|
||||
payload = (rest_str, "selector")
|
||||
return Cmd("WAIT", [payload, int(timeout) if timeout else None])
|
||||
|
||||
# NAV
|
||||
def go(self,u): return Cmd("GO",[str(u)])
|
||||
def reload(self): return Cmd("RELOAD",[])
|
||||
def back(self): return Cmd("BACK",[])
|
||||
def forward(self): return Cmd("FORWARD",[])
|
||||
|
||||
# CLICK, DOUBLE_CLICK, RIGHT_CLICK
|
||||
def click(self, *args):
|
||||
return self._handle_click("CLICK", args)
|
||||
|
||||
def double_click(self, *args):
|
||||
return self._handle_click("DBLCLICK", args)
|
||||
|
||||
def right_click(self, *args):
|
||||
return self._handle_click("RIGHTCLICK", args)
|
||||
|
||||
def _handle_click(self, op, args):
|
||||
if len(args) == 1:
|
||||
# Single argument - backtick string
|
||||
target = self._strip(str(args[0]))
|
||||
return Cmd(op, [("selector", target)])
|
||||
else:
|
||||
# Two arguments - coordinates
|
||||
x, y = args
|
||||
return Cmd(op, [("coords", int(x), int(y))])
|
||||
|
||||
|
||||
# MOVE / DRAG / SCROLL
|
||||
def coords(self,x,y): return ("coords",int(x),int(y))
|
||||
def move(self,c): return Cmd("MOVE",[c])
|
||||
def drag(self,c1,c2): return Cmd("DRAG",[c1,c2])
|
||||
def scroll(self,dir_tok,amt=None):
|
||||
return Cmd("SCROLL",[dir_tok.upper(), int(amt) if amt else 500])
|
||||
|
||||
# KEYS
|
||||
def type(self,tok): return Cmd("TYPE",[self._strip(str(tok))])
|
||||
def clear(self,sel): return Cmd("CLEAR",[self._strip(str(sel))])
|
||||
def set_input(self,sel,val): return Cmd("SET",[self._strip(str(sel)), self._strip(str(val))])
|
||||
def press(self,w): return Cmd("PRESS",[str(w)])
|
||||
def key_down(self,w): return Cmd("KEYDOWN",[str(w)])
|
||||
def key_up(self,w): return Cmd("KEYUP",[str(w)])
|
||||
|
||||
# FLOW
|
||||
def eval_cmd(self,txt): return Cmd("EVAL",[self._strip(str(txt))])
|
||||
def setvar(self,n,v):
|
||||
# v might be a Token or a Tree, extract value properly
|
||||
if hasattr(v, 'value'):
|
||||
value = v.value
|
||||
elif hasattr(v, 'children') and len(v.children) > 0:
|
||||
value = v.children[0].value
|
||||
else:
|
||||
value = str(v)
|
||||
return Cmd("SETVAR",[str(n), self._strip(value)])
|
||||
def proc_call(self,n): return Cmd("CALL",[str(n)])
|
||||
def proc_def(self,n,*body): return Proc(str(n),[b for b in body if isinstance(b,Cmd)])
|
||||
def include(self,p): return Cmd("INCLUDE",[self._strip(p)])
|
||||
def comment(self,*_): return Cmd("NOP",[])
|
||||
|
||||
# IF-THEN-ELSE and EXISTS
|
||||
def if_cmd(self, condition, then_cmd, else_cmd=None):
|
||||
return Cmd("IF", [condition, then_cmd, else_cmd])
|
||||
|
||||
def condition(self, cond):
|
||||
return cond
|
||||
|
||||
def not_cond(self, cond):
|
||||
return ("NOT", cond)
|
||||
|
||||
def exists_cond(self, selector):
|
||||
return ("EXISTS", self._strip(str(selector)))
|
||||
|
||||
def js_cond(self, expr):
|
||||
return ("JS", self._strip(str(expr)))
|
||||
|
||||
# REPEAT
|
||||
def repeat_cmd(self, cmd, count):
|
||||
return Cmd("REPEAT", [cmd, count])
|
||||
|
||||
def repeat_count(self, value):
|
||||
return str(value)
|
||||
|
||||
# --------------------------------------------------------------------------- #
|
||||
# 4. Compiler
|
||||
# --------------------------------------------------------------------------- #
|
||||
class Compiler:
|
||||
def __init__(self, root: pathlib.Path|None=None):
|
||||
self.parser = Lark(GRAMMAR,start="start",parser="lalr")
|
||||
self.root = pathlib.Path(root or ".").resolve()
|
||||
self.vars: Dict[str,Any] = {}
|
||||
self.procs: Dict[str,Proc]= {}
|
||||
|
||||
def compile(self, text: Union[str, List[str]]) -> List[str]:
|
||||
# Handle list input by joining with newlines
|
||||
if isinstance(text, list):
|
||||
text = '\n'.join(text)
|
||||
|
||||
ir = self._parse_with_includes(text)
|
||||
ir = self._collect_procs(ir)
|
||||
ir = self._inline_calls(ir)
|
||||
ir = self._apply_set_vars(ir)
|
||||
return [self._emit_js(c) for c in ir if isinstance(c,Cmd) and c.op!="NOP"]
|
||||
|
||||
# passes
|
||||
def _parse_with_includes(self,txt,seen=None):
|
||||
seen=seen or set()
|
||||
cmds=ASTBuilder().transform(self.parser.parse(txt))
|
||||
out=[]
|
||||
for c in cmds:
|
||||
if isinstance(c,Cmd) and c.op=="INCLUDE":
|
||||
p=(self.root/c.args[0]).resolve()
|
||||
if p in seen: raise ValueError(f"Circular include {p}")
|
||||
seen.add(p); out+=self._parse_with_includes(p.read_text(),seen)
|
||||
else: out.append(c)
|
||||
return out
|
||||
|
||||
def _collect_procs(self,ir):
|
||||
out=[]
|
||||
for i in ir:
|
||||
if isinstance(i,Proc): self.procs[i.name]=i
|
||||
else: out.append(i)
|
||||
return out
|
||||
|
||||
def _inline_calls(self,ir):
|
||||
out=[]
|
||||
for c in ir:
|
||||
if isinstance(c,Cmd) and c.op=="CALL":
|
||||
if c.args[0] not in self.procs:
|
||||
raise ValueError(f"Unknown procedure {c.args[0]!r}")
|
||||
out+=self._inline_calls(self.procs[c.args[0]].body)
|
||||
else: out.append(c)
|
||||
return out
|
||||
|
||||
def _apply_set_vars(self,ir):
|
||||
def sub(s): return re.sub(r"\$(\w+)",lambda m:str(self.vars.get(m.group(1),m.group(0))) ,s) if isinstance(s,str) else s
|
||||
out=[]
|
||||
for c in ir:
|
||||
if isinstance(c,Cmd):
|
||||
if c.op=="SETVAR":
|
||||
# Store variable
|
||||
self.vars[c.args[0].lstrip('$')]=c.args[1]
|
||||
else:
|
||||
# Apply variable substitution to commands that use them
|
||||
if c.op in("TYPE","EVAL","SET"): c.args=[sub(a) for a in c.args]
|
||||
out.append(c)
|
||||
return out
|
||||
|
||||
# JS emitter
|
||||
def _emit_js(self, cmd: Cmd) -> str:
|
||||
op, a = cmd.op, cmd.args
|
||||
if op == "GO": return f"window.location.href = '{a[0]}';"
|
||||
if op == "RELOAD": return "window.location.reload();"
|
||||
if op == "BACK": return "window.history.back();"
|
||||
if op == "FORWARD": return "window.history.forward();"
|
||||
|
||||
if op == "WAIT":
|
||||
arg, kind = a[0]
|
||||
timeout = a[1] or 10
|
||||
if kind == "seconds":
|
||||
return f"await new Promise(r=>setTimeout(r,{arg}*1000));"
|
||||
if kind == "selector":
|
||||
sel = arg.replace("\\","\\\\").replace("'","\\'")
|
||||
return textwrap.dedent(f"""
|
||||
await new Promise((res,rej)=>{{
|
||||
const max = {timeout*1000}, t0 = performance.now();
|
||||
const id = setInterval(()=>{{
|
||||
if(document.querySelector('{sel}')){{clearInterval(id);res();}}
|
||||
else if(performance.now()-t0>max){{clearInterval(id);rej('WAIT selector timeout');}}
|
||||
}},100);
|
||||
}});
|
||||
""").strip()
|
||||
if kind == "text":
|
||||
txt = arg.replace('`', '\\`')
|
||||
return textwrap.dedent(f"""
|
||||
await new Promise((res,rej)=>{{
|
||||
const max={timeout*1000},t0=performance.now();
|
||||
const id=setInterval(()=>{{
|
||||
if(document.body.innerText.includes(`{txt}`)){{clearInterval(id);res();}}
|
||||
else if(performance.now()-t0>max){{clearInterval(id);rej('WAIT text timeout');}}
|
||||
}},100);
|
||||
}});
|
||||
""").strip()
|
||||
|
||||
# click-style helpers
|
||||
def _js_click(sel, evt="click", button=0, detail=1):
|
||||
sel = sel.replace("'", "\\'")
|
||||
return textwrap.dedent(f"""
|
||||
(()=>{{
|
||||
const el=document.querySelector('{sel}');
|
||||
if(el){{
|
||||
el.focus&&el.focus();
|
||||
el.dispatchEvent(new MouseEvent('{evt}',{{bubbles:true,button:{button},detail:{detail}}}));
|
||||
}}
|
||||
}})();
|
||||
""").strip()
|
||||
|
||||
def _js_click_xy(x, y, evt="click", button=0, detail=1):
|
||||
return textwrap.dedent(f"""
|
||||
(()=>{{
|
||||
const el=document.elementFromPoint({x},{y});
|
||||
if(el){{
|
||||
el.focus&&el.focus();
|
||||
el.dispatchEvent(new MouseEvent('{evt}',{{bubbles:true,button:{button},detail:{detail}}}));
|
||||
}}
|
||||
}})();
|
||||
""").strip()
|
||||
|
||||
if op in ("CLICK", "DBLCLICK", "RIGHTCLICK"):
|
||||
evt = {"CLICK":"click","DBLCLICK":"dblclick","RIGHTCLICK":"contextmenu"}[op]
|
||||
btn = 2 if op=="RIGHTCLICK" else 0
|
||||
det = 2 if op=="DBLCLICK" else 1
|
||||
kind,*rest = a[0]
|
||||
return _js_click_xy(*rest) if kind=="coords" else _js_click(rest[0],evt,btn,det)
|
||||
|
||||
if op == "MOVE":
|
||||
_, x, y = a[0]
|
||||
return textwrap.dedent(f"""
|
||||
document.dispatchEvent(new MouseEvent('mousemove',{{clientX:{x},clientY:{y},bubbles:true}}));
|
||||
""").strip()
|
||||
|
||||
if op == "DRAG":
|
||||
(_, x1, y1), (_, x2, y2) = a
|
||||
return textwrap.dedent(f"""
|
||||
(()=>{{
|
||||
const s=document.elementFromPoint({x1},{y1});
|
||||
if(!s) return;
|
||||
s.dispatchEvent(new MouseEvent('mousedown',{{bubbles:true,clientX:{x1},clientY:{y1}}}));
|
||||
document.dispatchEvent(new MouseEvent('mousemove',{{bubbles:true,clientX:{x2},clientY:{y2}}}));
|
||||
document.dispatchEvent(new MouseEvent('mouseup', {{bubbles:true,clientX:{x2},clientY:{y2}}}));
|
||||
}})();
|
||||
""").strip()
|
||||
|
||||
if op == "SCROLL":
|
||||
dir_, amt = a
|
||||
dx, dy = {"UP":(0,-amt),"DOWN":(0,amt),"LEFT":(-amt,0),"RIGHT":(amt,0)}[dir_]
|
||||
return f"window.scrollBy({dx},{dy});"
|
||||
|
||||
if op == "TYPE":
|
||||
txt = a[0].replace("'", "\\'")
|
||||
return textwrap.dedent(f"""
|
||||
(()=>{{
|
||||
const el=document.activeElement;
|
||||
if(el){{
|
||||
el.value += '{txt}';
|
||||
el.dispatchEvent(new Event('input',{{bubbles:true}}));
|
||||
}}
|
||||
}})();
|
||||
""").strip()
|
||||
|
||||
if op == "CLEAR":
|
||||
sel = a[0].replace("'", "\\'")
|
||||
return textwrap.dedent(f"""
|
||||
(()=>{{
|
||||
const el=document.querySelector('{sel}');
|
||||
if(el && 'value' in el){{
|
||||
el.value = '';
|
||||
el.dispatchEvent(new Event('input',{{bubbles:true}}));
|
||||
el.dispatchEvent(new Event('change',{{bubbles:true}}));
|
||||
}}
|
||||
}})();
|
||||
""").strip()
|
||||
|
||||
if op == "SET" and len(a) == 2:
|
||||
# This is SET for input fields (SET `#field` "value")
|
||||
sel = a[0].replace("'", "\\'")
|
||||
val = a[1].replace("'", "\\'")
|
||||
return textwrap.dedent(f"""
|
||||
(()=>{{
|
||||
const el=document.querySelector('{sel}');
|
||||
if(el && 'value' in el){{
|
||||
el.value = '';
|
||||
el.focus&&el.focus();
|
||||
el.value = '{val}';
|
||||
el.dispatchEvent(new Event('input',{{bubbles:true}}));
|
||||
el.dispatchEvent(new Event('change',{{bubbles:true}}));
|
||||
}}
|
||||
}})();
|
||||
""").strip()
|
||||
|
||||
if op in ("PRESS","KEYDOWN","KEYUP"):
|
||||
key = a[0]
|
||||
evs = {"PRESS":("keydown","keyup"),"KEYDOWN":("keydown",),"KEYUP":("keyup",)}[op]
|
||||
return ";".join([f"document.dispatchEvent(new KeyboardEvent('{e}',{{key:'{key}',bubbles:true}}))" for e in evs]) + ";"
|
||||
|
||||
if op == "EVAL":
|
||||
return textwrap.dedent(f"""
|
||||
(()=>{{
|
||||
try {{
|
||||
{a[0]};
|
||||
}} catch (e) {{
|
||||
console.error('C4A-Script EVAL error:', e);
|
||||
}}
|
||||
}})();
|
||||
""").strip()
|
||||
|
||||
if op == "IF":
|
||||
condition, then_cmd, else_cmd = a
|
||||
|
||||
# Generate condition JavaScript
|
||||
js_condition = self._emit_condition(condition)
|
||||
|
||||
# Generate commands - handle both regular commands and procedure calls
|
||||
then_js = self._handle_cmd_or_proc(then_cmd)
|
||||
else_js = self._handle_cmd_or_proc(else_cmd) if else_cmd else ""
|
||||
|
||||
if else_cmd:
|
||||
return textwrap.dedent(f"""
|
||||
if ({js_condition}) {{
|
||||
{then_js}
|
||||
}} else {{
|
||||
{else_js}
|
||||
}}
|
||||
""").strip()
|
||||
else:
|
||||
return textwrap.dedent(f"""
|
||||
if ({js_condition}) {{
|
||||
{then_js}
|
||||
}}
|
||||
""").strip()
|
||||
|
||||
if op == "REPEAT":
|
||||
cmd, count = a
|
||||
|
||||
# Handle the count - could be number or JS expression
|
||||
if count.isdigit():
|
||||
# Simple number
|
||||
repeat_js = self._handle_cmd_or_proc(cmd)
|
||||
return textwrap.dedent(f"""
|
||||
for (let _i = 0; _i < {count}; _i++) {{
|
||||
{repeat_js}
|
||||
}}
|
||||
""").strip()
|
||||
else:
|
||||
# JS expression (from backticks)
|
||||
count_expr = count[1:-1] if count.startswith('`') and count.endswith('`') else count
|
||||
repeat_js = self._handle_cmd_or_proc(cmd)
|
||||
return textwrap.dedent(f"""
|
||||
(()=>{{
|
||||
const _count = {count_expr};
|
||||
if (typeof _count === 'number') {{
|
||||
for (let _i = 0; _i < _count; _i++) {{
|
||||
{repeat_js}
|
||||
}}
|
||||
}} else if (_count) {{
|
||||
{repeat_js}
|
||||
}}
|
||||
}})();
|
||||
""").strip()
|
||||
|
||||
raise ValueError(f"Unhandled op {op}")
|
||||
|
||||
def _emit_condition(self, condition):
|
||||
"""Convert a condition tuple to JavaScript"""
|
||||
cond_type = condition[0]
|
||||
|
||||
if cond_type == "EXISTS":
|
||||
return f"!!document.querySelector('{condition[1]}')"
|
||||
elif cond_type == "NOT":
|
||||
# Recursively handle the negated condition
|
||||
inner_condition = self._emit_condition(condition[1])
|
||||
return f"!({inner_condition})"
|
||||
else: # JS condition
|
||||
return condition[1]
|
||||
|
||||
def _handle_cmd_or_proc(self, cmd):
|
||||
"""Handle a command that might be a regular command or a procedure call"""
|
||||
if not cmd:
|
||||
return ""
|
||||
|
||||
if isinstance(cmd, Cmd):
|
||||
if cmd.op == "CALL":
|
||||
# Inline the procedure
|
||||
if cmd.args[0] not in self.procs:
|
||||
raise ValueError(f"Unknown procedure {cmd.args[0]!r}")
|
||||
proc_body = self.procs[cmd.args[0]].body
|
||||
return "\n".join([self._emit_js(c) for c in proc_body if c.op != "NOP"])
|
||||
else:
|
||||
return self._emit_js(cmd)
|
||||
return ""
|
||||
|
||||
# --------------------------------------------------------------------------- #
|
||||
# 5. Helpers + demo
|
||||
# --------------------------------------------------------------------------- #
|
||||
|
||||
def compile_string(script: Union[str, List[str]], *, root: Union[pathlib.Path, None] = None) -> List[str]:
|
||||
"""Compile C4A-Script from string or list of strings to JavaScript.
|
||||
|
||||
Args:
|
||||
script: C4A-Script as a string or list of command strings
|
||||
root: Root directory for resolving includes (optional)
|
||||
|
||||
Returns:
|
||||
List of JavaScript command strings
|
||||
|
||||
Raises:
|
||||
C4AScriptError: When compilation fails with detailed error information
|
||||
"""
|
||||
try:
|
||||
return Compiler(root).compile(script)
|
||||
except Exception as e:
|
||||
# Wrap the error with better formatting
|
||||
raise C4AScriptError.from_exception(e, script)
|
||||
|
||||
def compile_file(path: pathlib.Path) -> List[str]:
|
||||
"""Compile C4A-Script from file to JavaScript.
|
||||
|
||||
Args:
|
||||
path: Path to C4A-Script file
|
||||
|
||||
Returns:
|
||||
List of JavaScript command strings
|
||||
"""
|
||||
return compile_string(path.read_text(), root=path.parent)
|
||||
|
||||
def compile_lines(lines: List[str], *, root: Union[pathlib.Path, None] = None) -> List[str]:
|
||||
"""Compile C4A-Script from list of lines to JavaScript.
|
||||
|
||||
Args:
|
||||
lines: List of C4A-Script command lines
|
||||
root: Root directory for resolving includes (optional)
|
||||
|
||||
Returns:
|
||||
List of JavaScript command strings
|
||||
"""
|
||||
return compile_string(lines, root=root)
|
||||
|
||||
DEMO = """
|
||||
# quick sanity demo
|
||||
PROC login
|
||||
SET `input[name="username"]` $user
|
||||
SET `input[name="password"]` $pass
|
||||
CLICK `button.submit`
|
||||
ENDPROC
|
||||
|
||||
SETVAR user = "tom@crawl4ai.com"
|
||||
SETVAR pass = "hunter2"
|
||||
|
||||
GO https://example.com/login
|
||||
WAIT `input[name="username"]` 10
|
||||
login
|
||||
WAIT 3
|
||||
EVAL `console.log('logged in')`
|
||||
"""
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) == 2:
|
||||
for js in compile_file(pathlib.Path(sys.argv[1])):
|
||||
print(js)
|
||||
else:
|
||||
print("=== DEMO ===")
|
||||
for js in compile_string(DEMO):
|
||||
print(js)
|
||||
@@ -9,173 +9,196 @@ from urllib.parse import urlparse
|
||||
import OpenSSL.crypto
|
||||
from pathlib import Path
|
||||
|
||||
# === Inherit from dict ===
|
||||
class SSLCertificate(dict):
|
||||
"""
|
||||
A class representing an SSL certificate, behaving like a dictionary
|
||||
for direct JSON serialization. It stores the certificate information internally
|
||||
and provides methods for export and property access.
|
||||
|
||||
class SSLCertificate:
|
||||
"""
|
||||
A class representing an SSL certificate with methods to export in various formats.
|
||||
|
||||
Attributes:
|
||||
cert_info (Dict[str, Any]): The certificate information.
|
||||
|
||||
Methods:
|
||||
from_url(url: str, timeout: int = 10) -> Optional['SSLCertificate']: Create SSLCertificate instance from a URL.
|
||||
from_file(file_path: str) -> Optional['SSLCertificate']: Create SSLCertificate instance from a file.
|
||||
from_binary(binary_data: bytes) -> Optional['SSLCertificate']: Create SSLCertificate instance from binary data.
|
||||
export_as_pem() -> str: Export the certificate as PEM format.
|
||||
export_as_der() -> bytes: Export the certificate as DER format.
|
||||
export_as_json() -> Dict[str, Any]: Export the certificate as JSON format.
|
||||
export_as_text() -> str: Export the certificate as text format.
|
||||
Inherits from dict, so instances are directly JSON serializable.
|
||||
"""
|
||||
|
||||
# Use __slots__ for potential memory optimization if desired, though less common when inheriting dict
|
||||
# __slots__ = ("_cert_info",) # If using slots, be careful with dict inheritance interaction
|
||||
|
||||
def __init__(self, cert_info: Dict[str, Any]):
|
||||
self._cert_info = self._decode_cert_data(cert_info)
|
||||
"""
|
||||
Initializes the SSLCertificate object.
|
||||
|
||||
@staticmethod
|
||||
def from_url(url: str, timeout: int = 10) -> Optional['SSLCertificate']:
|
||||
"""
|
||||
Create SSLCertificate instance from a URL.
|
||||
|
||||
Args:
|
||||
url (str): URL of the website.
|
||||
timeout (int): Timeout for the connection (default: 10).
|
||||
|
||||
Returns:
|
||||
Optional[SSLCertificate]: SSLCertificate instance if successful, None otherwise.
|
||||
cert_info (Dict[str, Any]): The raw certificate dictionary.
|
||||
"""
|
||||
try:
|
||||
hostname = urlparse(url).netloc
|
||||
if ':' in hostname:
|
||||
hostname = hostname.split(':')[0]
|
||||
|
||||
context = ssl.create_default_context()
|
||||
with socket.create_connection((hostname, 443), timeout=timeout) as sock:
|
||||
with context.wrap_socket(sock, server_hostname=hostname) as ssock:
|
||||
cert_binary = ssock.getpeercert(binary_form=True)
|
||||
x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_ASN1, cert_binary)
|
||||
|
||||
cert_info = {
|
||||
"subject": dict(x509.get_subject().get_components()),
|
||||
"issuer": dict(x509.get_issuer().get_components()),
|
||||
"version": x509.get_version(),
|
||||
"serial_number": hex(x509.get_serial_number()),
|
||||
"not_before": x509.get_notBefore(),
|
||||
"not_after": x509.get_notAfter(),
|
||||
"fingerprint": x509.digest("sha256").hex(),
|
||||
"signature_algorithm": x509.get_signature_algorithm(),
|
||||
"raw_cert": base64.b64encode(cert_binary)
|
||||
}
|
||||
|
||||
# Add extensions
|
||||
extensions = []
|
||||
for i in range(x509.get_extension_count()):
|
||||
ext = x509.get_extension(i)
|
||||
extensions.append({
|
||||
"name": ext.get_short_name(),
|
||||
"value": str(ext)
|
||||
})
|
||||
cert_info["extensions"] = extensions
|
||||
|
||||
return SSLCertificate(cert_info)
|
||||
|
||||
except Exception as e:
|
||||
return None
|
||||
# 1. Decode the data (handle bytes -> str)
|
||||
decoded_info = self._decode_cert_data(cert_info)
|
||||
|
||||
# 2. Store the decoded info internally (optional but good practice)
|
||||
# self._cert_info = decoded_info # You can keep this if methods rely on it
|
||||
|
||||
# 3. Initialize the dictionary part of the object with the decoded data
|
||||
super().__init__(decoded_info)
|
||||
|
||||
@staticmethod
|
||||
def _decode_cert_data(data: Any) -> Any:
|
||||
"""Helper method to decode bytes in certificate data."""
|
||||
if isinstance(data, bytes):
|
||||
return data.decode('utf-8')
|
||||
try:
|
||||
# Try UTF-8 first, fallback to latin-1 for arbitrary bytes
|
||||
return data.decode("utf-8")
|
||||
except UnicodeDecodeError:
|
||||
return data.decode("latin-1") # Or handle as needed, maybe hex representation
|
||||
elif isinstance(data, dict):
|
||||
return {
|
||||
(k.decode('utf-8') if isinstance(k, bytes) else k): SSLCertificate._decode_cert_data(v)
|
||||
(
|
||||
k.decode("utf-8") if isinstance(k, bytes) else k
|
||||
): SSLCertificate._decode_cert_data(v)
|
||||
for k, v in data.items()
|
||||
}
|
||||
elif isinstance(data, list):
|
||||
return [SSLCertificate._decode_cert_data(item) for item in data]
|
||||
return data
|
||||
|
||||
@staticmethod
|
||||
def from_url(url: str, timeout: int = 10) -> Optional["SSLCertificate"]:
|
||||
"""
|
||||
Create SSLCertificate instance from a URL. Fetches cert info and initializes.
|
||||
(Fetching logic remains the same)
|
||||
"""
|
||||
cert_info_raw = None # Variable to hold the fetched dict
|
||||
try:
|
||||
hostname = urlparse(url).netloc
|
||||
if ":" in hostname:
|
||||
hostname = hostname.split(":")[0]
|
||||
|
||||
context = ssl.create_default_context()
|
||||
# Set check_hostname to False and verify_mode to CERT_NONE temporarily
|
||||
# for potentially problematic certificates during fetch, but parse the result regardless.
|
||||
# context.check_hostname = False
|
||||
# context.verify_mode = ssl.CERT_NONE
|
||||
|
||||
with socket.create_connection((hostname, 443), timeout=timeout) as sock:
|
||||
with context.wrap_socket(sock, server_hostname=hostname) as ssock:
|
||||
cert_binary = ssock.getpeercert(binary_form=True)
|
||||
if not cert_binary:
|
||||
print(f"Warning: No certificate returned for {hostname}")
|
||||
return None
|
||||
|
||||
x509 = OpenSSL.crypto.load_certificate(
|
||||
OpenSSL.crypto.FILETYPE_ASN1, cert_binary
|
||||
)
|
||||
|
||||
# Create the dictionary directly
|
||||
cert_info_raw = {
|
||||
"subject": dict(x509.get_subject().get_components()),
|
||||
"issuer": dict(x509.get_issuer().get_components()),
|
||||
"version": x509.get_version(),
|
||||
"serial_number": hex(x509.get_serial_number()),
|
||||
"not_before": x509.get_notBefore(), # Keep as bytes initially, _decode handles it
|
||||
"not_after": x509.get_notAfter(), # Keep as bytes initially
|
||||
"fingerprint": x509.digest("sha256").hex(), # hex() is already string
|
||||
"signature_algorithm": x509.get_signature_algorithm(), # Keep as bytes
|
||||
"raw_cert": base64.b64encode(cert_binary), # Base64 is bytes, _decode handles it
|
||||
}
|
||||
|
||||
# Add extensions
|
||||
extensions = []
|
||||
for i in range(x509.get_extension_count()):
|
||||
ext = x509.get_extension(i)
|
||||
# get_short_name() returns bytes, str(ext) handles value conversion
|
||||
extensions.append(
|
||||
{"name": ext.get_short_name(), "value": str(ext)}
|
||||
)
|
||||
cert_info_raw["extensions"] = extensions
|
||||
|
||||
except ssl.SSLCertVerificationError as e:
|
||||
print(f"SSL Verification Error for {url}: {e}")
|
||||
# Decide if you want to proceed or return None based on your needs
|
||||
# You might try fetching without verification here if needed, but be cautious.
|
||||
return None
|
||||
except socket.gaierror:
|
||||
print(f"Could not resolve hostname: {hostname}")
|
||||
return None
|
||||
except socket.timeout:
|
||||
print(f"Connection timed out for {url}")
|
||||
return None
|
||||
except Exception as e:
|
||||
print(f"Error fetching/processing certificate for {url}: {e}")
|
||||
# Log the full error details if needed: logging.exception("Cert fetch error")
|
||||
return None
|
||||
|
||||
# If successful, create the SSLCertificate instance from the dictionary
|
||||
if cert_info_raw:
|
||||
return SSLCertificate(cert_info_raw)
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
# --- Properties now access the dictionary items directly via self[] ---
|
||||
@property
|
||||
def issuer(self) -> Dict[str, str]:
|
||||
return self.get("issuer", {}) # Use self.get for safety
|
||||
|
||||
@property
|
||||
def subject(self) -> Dict[str, str]:
|
||||
return self.get("subject", {})
|
||||
|
||||
@property
|
||||
def valid_from(self) -> str:
|
||||
return self.get("not_before", "")
|
||||
|
||||
@property
|
||||
def valid_until(self) -> str:
|
||||
return self.get("not_after", "")
|
||||
|
||||
@property
|
||||
def fingerprint(self) -> str:
|
||||
return self.get("fingerprint", "")
|
||||
|
||||
# --- Export methods can use `self` directly as it is the dict ---
|
||||
def to_json(self, filepath: Optional[str] = None) -> Optional[str]:
|
||||
"""
|
||||
Export certificate as JSON.
|
||||
|
||||
Args:
|
||||
filepath (Optional[str]): Path to save the JSON file (default: None).
|
||||
|
||||
Returns:
|
||||
Optional[str]: JSON string if successful, None otherwise.
|
||||
"""
|
||||
json_str = json.dumps(self._cert_info, indent=2, ensure_ascii=False)
|
||||
"""Export certificate as JSON."""
|
||||
# `self` is already the dictionary we want to serialize
|
||||
json_str = json.dumps(self, indent=2, ensure_ascii=False)
|
||||
if filepath:
|
||||
Path(filepath).write_text(json_str, encoding='utf-8')
|
||||
Path(filepath).write_text(json_str, encoding="utf-8")
|
||||
return None
|
||||
return json_str
|
||||
|
||||
def to_pem(self, filepath: Optional[str] = None) -> Optional[str]:
|
||||
"""
|
||||
Export certificate as PEM.
|
||||
|
||||
Args:
|
||||
filepath (Optional[str]): Path to save the PEM file (default: None).
|
||||
|
||||
Returns:
|
||||
Optional[str]: PEM string if successful, None otherwise.
|
||||
"""
|
||||
"""Export certificate as PEM."""
|
||||
try:
|
||||
# Decode the raw_cert (which should be string due to _decode)
|
||||
raw_cert_bytes = base64.b64decode(self.get("raw_cert", ""))
|
||||
x509 = OpenSSL.crypto.load_certificate(
|
||||
OpenSSL.crypto.FILETYPE_ASN1,
|
||||
base64.b64decode(self._cert_info['raw_cert'])
|
||||
OpenSSL.crypto.FILETYPE_ASN1, raw_cert_bytes
|
||||
)
|
||||
pem_data = OpenSSL.crypto.dump_certificate(
|
||||
OpenSSL.crypto.FILETYPE_PEM,
|
||||
x509
|
||||
).decode('utf-8')
|
||||
|
||||
OpenSSL.crypto.FILETYPE_PEM, x509
|
||||
).decode("utf-8")
|
||||
|
||||
if filepath:
|
||||
Path(filepath).write_text(pem_data, encoding='utf-8')
|
||||
Path(filepath).write_text(pem_data, encoding="utf-8")
|
||||
return None
|
||||
return pem_data
|
||||
except Exception as e:
|
||||
return None
|
||||
print(f"Error converting to PEM: {e}")
|
||||
return None
|
||||
|
||||
def to_der(self, filepath: Optional[str] = None) -> Optional[bytes]:
|
||||
"""
|
||||
Export certificate as DER.
|
||||
|
||||
Args:
|
||||
filepath (Optional[str]): Path to save the DER file (default: None).
|
||||
|
||||
Returns:
|
||||
Optional[bytes]: DER bytes if successful, None otherwise.
|
||||
"""
|
||||
"""Export certificate as DER."""
|
||||
try:
|
||||
der_data = base64.b64decode(self._cert_info['raw_cert'])
|
||||
# Decode the raw_cert (which should be string due to _decode)
|
||||
der_data = base64.b64decode(self.get("raw_cert", ""))
|
||||
if filepath:
|
||||
Path(filepath).write_bytes(der_data)
|
||||
return None
|
||||
return der_data
|
||||
except Exception:
|
||||
return None
|
||||
except Exception as e:
|
||||
print(f"Error converting to DER: {e}")
|
||||
return None
|
||||
|
||||
@property
|
||||
def issuer(self) -> Dict[str, str]:
|
||||
"""Get certificate issuer information."""
|
||||
return self._cert_info.get('issuer', {})
|
||||
|
||||
@property
|
||||
def subject(self) -> Dict[str, str]:
|
||||
"""Get certificate subject information."""
|
||||
return self._cert_info.get('subject', {})
|
||||
|
||||
@property
|
||||
def valid_from(self) -> str:
|
||||
"""Get certificate validity start date."""
|
||||
return self._cert_info.get('not_before', '')
|
||||
|
||||
@property
|
||||
def valid_until(self) -> str:
|
||||
"""Get certificate validity end date."""
|
||||
return self._cert_info.get('not_after', '')
|
||||
|
||||
@property
|
||||
def fingerprint(self) -> str:
|
||||
"""Get certificate fingerprint."""
|
||||
return self._cert_info.get('fingerprint', '')
|
||||
# Optional: Add __repr__ for better debugging
|
||||
def __repr__(self) -> str:
|
||||
subject_cn = self.subject.get('CN', 'N/A')
|
||||
issuer_cn = self.issuer.get('CN', 'N/A')
|
||||
return f"<SSLCertificate Subject='{subject_cn}' Issuer='{issuer_cn}'>"
|
||||
1396
crawl4ai/table_extraction.py
Normal file
1396
crawl4ai/table_extraction.py
Normal file
File diff suppressed because it is too large
Load Diff
195
crawl4ai/types.py
Normal file
195
crawl4ai/types.py
Normal file
@@ -0,0 +1,195 @@
|
||||
from typing import TYPE_CHECKING, Union
|
||||
|
||||
# Logger types
|
||||
AsyncLoggerBase = Union['AsyncLoggerBaseType']
|
||||
AsyncLogger = Union['AsyncLoggerType']
|
||||
|
||||
# Crawler core types
|
||||
AsyncWebCrawler = Union['AsyncWebCrawlerType']
|
||||
CacheMode = Union['CacheModeType']
|
||||
CrawlResult = Union['CrawlResultType']
|
||||
CrawlerHub = Union['CrawlerHubType']
|
||||
BrowserProfiler = Union['BrowserProfilerType']
|
||||
# NEW: Add AsyncUrlSeederType
|
||||
AsyncUrlSeeder = Union['AsyncUrlSeederType']
|
||||
|
||||
# Configuration types
|
||||
BrowserConfig = Union['BrowserConfigType']
|
||||
CrawlerRunConfig = Union['CrawlerRunConfigType']
|
||||
HTTPCrawlerConfig = Union['HTTPCrawlerConfigType']
|
||||
LLMConfig = Union['LLMConfigType']
|
||||
# NEW: Add SeedingConfigType
|
||||
SeedingConfig = Union['SeedingConfigType']
|
||||
|
||||
# Content scraping types
|
||||
ContentScrapingStrategy = Union['ContentScrapingStrategyType']
|
||||
LXMLWebScrapingStrategy = Union['LXMLWebScrapingStrategyType']
|
||||
# Backward compatibility alias
|
||||
WebScrapingStrategy = Union['LXMLWebScrapingStrategyType']
|
||||
|
||||
# Proxy types
|
||||
ProxyRotationStrategy = Union['ProxyRotationStrategyType']
|
||||
RoundRobinProxyStrategy = Union['RoundRobinProxyStrategyType']
|
||||
|
||||
# Extraction types
|
||||
ExtractionStrategy = Union['ExtractionStrategyType']
|
||||
LLMExtractionStrategy = Union['LLMExtractionStrategyType']
|
||||
CosineStrategy = Union['CosineStrategyType']
|
||||
JsonCssExtractionStrategy = Union['JsonCssExtractionStrategyType']
|
||||
JsonXPathExtractionStrategy = Union['JsonXPathExtractionStrategyType']
|
||||
|
||||
# Chunking types
|
||||
ChunkingStrategy = Union['ChunkingStrategyType']
|
||||
RegexChunking = Union['RegexChunkingType']
|
||||
|
||||
# Markdown generation types
|
||||
DefaultMarkdownGenerator = Union['DefaultMarkdownGeneratorType']
|
||||
MarkdownGenerationResult = Union['MarkdownGenerationResultType']
|
||||
|
||||
# Content filter types
|
||||
RelevantContentFilter = Union['RelevantContentFilterType']
|
||||
PruningContentFilter = Union['PruningContentFilterType']
|
||||
BM25ContentFilter = Union['BM25ContentFilterType']
|
||||
LLMContentFilter = Union['LLMContentFilterType']
|
||||
|
||||
# Dispatcher types
|
||||
BaseDispatcher = Union['BaseDispatcherType']
|
||||
MemoryAdaptiveDispatcher = Union['MemoryAdaptiveDispatcherType']
|
||||
SemaphoreDispatcher = Union['SemaphoreDispatcherType']
|
||||
RateLimiter = Union['RateLimiterType']
|
||||
CrawlerMonitor = Union['CrawlerMonitorType']
|
||||
DisplayMode = Union['DisplayModeType']
|
||||
RunManyReturn = Union['RunManyReturnType']
|
||||
|
||||
# Docker client
|
||||
Crawl4aiDockerClient = Union['Crawl4aiDockerClientType']
|
||||
|
||||
# Deep crawling types
|
||||
DeepCrawlStrategy = Union['DeepCrawlStrategyType']
|
||||
BFSDeepCrawlStrategy = Union['BFSDeepCrawlStrategyType']
|
||||
FilterChain = Union['FilterChainType']
|
||||
ContentTypeFilter = Union['ContentTypeFilterType']
|
||||
DomainFilter = Union['DomainFilterType']
|
||||
URLFilter = Union['URLFilterType']
|
||||
FilterStats = Union['FilterStatsType']
|
||||
SEOFilter = Union['SEOFilterType']
|
||||
KeywordRelevanceScorer = Union['KeywordRelevanceScorerType']
|
||||
URLScorer = Union['URLScorerType']
|
||||
CompositeScorer = Union['CompositeScorerType']
|
||||
DomainAuthorityScorer = Union['DomainAuthorityScorerType']
|
||||
FreshnessScorer = Union['FreshnessScorerType']
|
||||
PathDepthScorer = Union['PathDepthScorerType']
|
||||
BestFirstCrawlingStrategy = Union['BestFirstCrawlingStrategyType']
|
||||
DFSDeepCrawlStrategy = Union['DFSDeepCrawlStrategyType']
|
||||
DeepCrawlDecorator = Union['DeepCrawlDecoratorType']
|
||||
|
||||
# Only import types during type checking to avoid circular imports
|
||||
if TYPE_CHECKING:
|
||||
# Logger imports
|
||||
from .async_logger import (
|
||||
AsyncLoggerBase as AsyncLoggerBaseType,
|
||||
AsyncLogger as AsyncLoggerType,
|
||||
)
|
||||
|
||||
# Crawler core imports
|
||||
from .async_webcrawler import (
|
||||
AsyncWebCrawler as AsyncWebCrawlerType,
|
||||
CacheMode as CacheModeType,
|
||||
)
|
||||
from .models import CrawlResult as CrawlResultType
|
||||
from .hub import CrawlerHub as CrawlerHubType
|
||||
from .browser_profiler import BrowserProfiler as BrowserProfilerType
|
||||
# NEW: Import AsyncUrlSeeder for type checking
|
||||
from .async_url_seeder import AsyncUrlSeeder as AsyncUrlSeederType
|
||||
|
||||
# Configuration imports
|
||||
from .async_configs import (
|
||||
BrowserConfig as BrowserConfigType,
|
||||
CrawlerRunConfig as CrawlerRunConfigType,
|
||||
HTTPCrawlerConfig as HTTPCrawlerConfigType,
|
||||
LLMConfig as LLMConfigType,
|
||||
# NEW: Import SeedingConfig for type checking
|
||||
SeedingConfig as SeedingConfigType,
|
||||
)
|
||||
|
||||
# Content scraping imports
|
||||
from .content_scraping_strategy import (
|
||||
ContentScrapingStrategy as ContentScrapingStrategyType,
|
||||
LXMLWebScrapingStrategy as LXMLWebScrapingStrategyType,
|
||||
)
|
||||
|
||||
# Proxy imports
|
||||
from .proxy_strategy import (
|
||||
ProxyRotationStrategy as ProxyRotationStrategyType,
|
||||
RoundRobinProxyStrategy as RoundRobinProxyStrategyType,
|
||||
)
|
||||
|
||||
# Extraction imports
|
||||
from .extraction_strategy import (
|
||||
ExtractionStrategy as ExtractionStrategyType,
|
||||
LLMExtractionStrategy as LLMExtractionStrategyType,
|
||||
CosineStrategy as CosineStrategyType,
|
||||
JsonCssExtractionStrategy as JsonCssExtractionStrategyType,
|
||||
JsonXPathExtractionStrategy as JsonXPathExtractionStrategyType,
|
||||
)
|
||||
|
||||
# Chunking imports
|
||||
from .chunking_strategy import (
|
||||
ChunkingStrategy as ChunkingStrategyType,
|
||||
RegexChunking as RegexChunkingType,
|
||||
)
|
||||
|
||||
# Markdown generation imports
|
||||
from .markdown_generation_strategy import (
|
||||
DefaultMarkdownGenerator as DefaultMarkdownGeneratorType,
|
||||
)
|
||||
from .models import MarkdownGenerationResult as MarkdownGenerationResultType
|
||||
|
||||
# Content filter imports
|
||||
from .content_filter_strategy import (
|
||||
RelevantContentFilter as RelevantContentFilterType,
|
||||
PruningContentFilter as PruningContentFilterType,
|
||||
BM25ContentFilter as BM25ContentFilterType,
|
||||
LLMContentFilter as LLMContentFilterType,
|
||||
)
|
||||
|
||||
# Dispatcher imports
|
||||
from .async_dispatcher import (
|
||||
BaseDispatcher as BaseDispatcherType,
|
||||
MemoryAdaptiveDispatcher as MemoryAdaptiveDispatcherType,
|
||||
SemaphoreDispatcher as SemaphoreDispatcherType,
|
||||
RateLimiter as RateLimiterType,
|
||||
CrawlerMonitor as CrawlerMonitorType,
|
||||
DisplayMode as DisplayModeType,
|
||||
RunManyReturn as RunManyReturnType,
|
||||
)
|
||||
|
||||
# Docker client
|
||||
from .docker_client import Crawl4aiDockerClient as Crawl4aiDockerClientType
|
||||
|
||||
# Deep crawling imports
|
||||
from .deep_crawling import (
|
||||
DeepCrawlStrategy as DeepCrawlStrategyType,
|
||||
BFSDeepCrawlStrategy as BFSDeepCrawlStrategyType,
|
||||
FilterChain as FilterChainType,
|
||||
ContentTypeFilter as ContentTypeFilterType,
|
||||
DomainFilter as DomainFilterType,
|
||||
URLFilter as URLFilterType,
|
||||
FilterStats as FilterStatsType,
|
||||
SEOFilter as SEOFilterType,
|
||||
KeywordRelevanceScorer as KeywordRelevanceScorerType,
|
||||
URLScorer as URLScorerType,
|
||||
CompositeScorer as CompositeScorerType,
|
||||
DomainAuthorityScorer as DomainAuthorityScorerType,
|
||||
FreshnessScorer as FreshnessScorerType,
|
||||
PathDepthScorer as PathDepthScorerType,
|
||||
BestFirstCrawlingStrategy as BestFirstCrawlingStrategyType,
|
||||
DFSDeepCrawlStrategy as DFSDeepCrawlStrategyType,
|
||||
DeepCrawlDecorator as DeepCrawlDecoratorType,
|
||||
)
|
||||
|
||||
|
||||
|
||||
def create_llm_config(*args, **kwargs) -> 'LLMConfigType':
|
||||
from .async_configs import LLMConfig
|
||||
return LLMConfig(*args, **kwargs)
|
||||
@@ -2,11 +2,148 @@ import random
|
||||
from typing import Optional, Literal, List, Dict, Tuple
|
||||
import re
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from fake_useragent import UserAgent
|
||||
import requests
|
||||
from lxml import html
|
||||
import json
|
||||
from typing import Union
|
||||
|
||||
class UserAgentGenerator:
|
||||
class UAGen(ABC):
|
||||
@abstractmethod
|
||||
def generate(self,
|
||||
browsers: Optional[List[str]] = None,
|
||||
os: Optional[Union[str, List[str]]] = None,
|
||||
min_version: float = 0.0,
|
||||
platforms: Optional[Union[str, List[str]]] = None,
|
||||
pct_threshold: Optional[float] = None,
|
||||
fallback: str = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 Chrome/116.0.0.0 Safari/537.36") -> Union[str, Dict]:
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def generate_client_hints( user_agent: str) -> str:
|
||||
"""Generate Sec-CH-UA header value based on user agent string"""
|
||||
def _parse_user_agent(user_agent: str) -> Dict[str, str]:
|
||||
"""Parse a user agent string to extract browser and version information"""
|
||||
browsers = {
|
||||
"chrome": r"Chrome/(\d+)",
|
||||
"edge": r"Edg/(\d+)",
|
||||
"safari": r"Version/(\d+)",
|
||||
"firefox": r"Firefox/(\d+)",
|
||||
}
|
||||
|
||||
result = {}
|
||||
for browser, pattern in browsers.items():
|
||||
match = re.search(pattern, user_agent)
|
||||
if match:
|
||||
result[browser] = match.group(1)
|
||||
|
||||
return result
|
||||
browsers = _parse_user_agent(user_agent)
|
||||
|
||||
# Client hints components
|
||||
hints = []
|
||||
|
||||
# Handle different browser combinations
|
||||
if "chrome" in browsers:
|
||||
hints.append(f'"Chromium";v="{browsers["chrome"]}"')
|
||||
hints.append('"Not_A Brand";v="8"')
|
||||
|
||||
if "edge" in browsers:
|
||||
hints.append(f'"Microsoft Edge";v="{browsers["edge"]}"')
|
||||
else:
|
||||
hints.append(f'"Google Chrome";v="{browsers["chrome"]}"')
|
||||
|
||||
elif "firefox" in browsers:
|
||||
# Firefox doesn't typically send Sec-CH-UA
|
||||
return '""'
|
||||
|
||||
elif "safari" in browsers:
|
||||
# Safari's format for client hints
|
||||
hints.append(f'"Safari";v="{browsers["safari"]}"')
|
||||
hints.append('"Not_A Brand";v="8"')
|
||||
|
||||
return ", ".join(hints)
|
||||
|
||||
class ValidUAGenerator(UAGen):
|
||||
def __init__(self):
|
||||
self.ua = UserAgent()
|
||||
|
||||
def generate(self,
|
||||
browsers: Optional[List[str]] = None,
|
||||
os: Optional[Union[str, List[str]]] = None,
|
||||
min_version: float = 0.0,
|
||||
platforms: Optional[Union[str, List[str]]] = None,
|
||||
pct_threshold: Optional[float] = None,
|
||||
fallback: str = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 Chrome/116.0.0.0 Safari/537.36") -> str:
|
||||
|
||||
self.ua = UserAgent(
|
||||
browsers=browsers or ['Chrome', 'Firefox', 'Edge'],
|
||||
os=os or ['Windows', 'Mac OS X'],
|
||||
min_version=min_version,
|
||||
platforms=platforms or ['desktop'],
|
||||
fallback=fallback
|
||||
)
|
||||
return self.ua.random
|
||||
|
||||
class OnlineUAGenerator(UAGen):
|
||||
def __init__(self):
|
||||
self.agents = []
|
||||
self._fetch_agents()
|
||||
|
||||
def _fetch_agents(self):
|
||||
try:
|
||||
response = requests.get(
|
||||
'https://www.useragents.me/',
|
||||
timeout=5,
|
||||
headers={'Accept': 'text/html,application/xhtml+xml'}
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
tree = html.fromstring(response.content)
|
||||
json_text = tree.cssselect('#most-common-desktop-useragents-json-csv > div:nth-child(1) > textarea')[0].text
|
||||
self.agents = json.loads(json_text)
|
||||
except Exception as e:
|
||||
print(f"Error fetching agents: {e}")
|
||||
|
||||
def generate(self,
|
||||
browsers: Optional[List[str]] = None,
|
||||
os: Optional[Union[str, List[str]]] = None,
|
||||
min_version: float = 0.0,
|
||||
platforms: Optional[Union[str, List[str]]] = None,
|
||||
pct_threshold: Optional[float] = None,
|
||||
fallback: str = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 Chrome/116.0.0.0 Safari/537.36") -> Dict:
|
||||
|
||||
if not self.agents:
|
||||
self._fetch_agents()
|
||||
|
||||
filtered_agents = self.agents
|
||||
|
||||
if pct_threshold:
|
||||
filtered_agents = [a for a in filtered_agents if a['pct'] >= pct_threshold]
|
||||
|
||||
if browsers:
|
||||
filtered_agents = [a for a in filtered_agents
|
||||
if any(b.lower() in a['ua'].lower() for b in browsers)]
|
||||
|
||||
if os:
|
||||
os_list = [os] if isinstance(os, str) else os
|
||||
filtered_agents = [a for a in filtered_agents
|
||||
if any(o.lower() in a['ua'].lower() for o in os_list)]
|
||||
|
||||
if platforms:
|
||||
platform_list = [platforms] if isinstance(platforms, str) else platforms
|
||||
filtered_agents = [a for a in filtered_agents
|
||||
if any(p.lower() in a['ua'].lower() for p in platform_list)]
|
||||
|
||||
return filtered_agents[0] if filtered_agents else {'ua': fallback, 'pct': 0}
|
||||
|
||||
|
||||
|
||||
class UserAgentGenerator():
|
||||
"""
|
||||
Generate random user agents with specified constraints.
|
||||
|
||||
|
||||
Attributes:
|
||||
desktop_platforms (dict): A dictionary of possible desktop platforms and their corresponding user agent strings.
|
||||
mobile_platforms (dict): A dictionary of possible mobile platforms and their corresponding user agent strings.
|
||||
@@ -18,7 +155,7 @@ class UserAgentGenerator:
|
||||
safari_versions (list): A list of possible Safari browser versions.
|
||||
ios_versions (list): A list of possible iOS browser versions.
|
||||
android_versions (list): A list of possible Android browser versions.
|
||||
|
||||
|
||||
Methods:
|
||||
generate_user_agent(
|
||||
platform: Literal["desktop", "mobile"] = "desktop",
|
||||
@@ -30,8 +167,9 @@ class UserAgentGenerator:
|
||||
safari_version: Optional[str] = None,
|
||||
ios_version: Optional[str] = None,
|
||||
android_version: Optional[str] = None
|
||||
): Generates a random user agent string based on the specified parameters.
|
||||
): Generates a random user agent string based on the specified parameters.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
# Previous platform definitions remain the same...
|
||||
self.desktop_platforms = {
|
||||
@@ -47,7 +185,7 @@ class UserAgentGenerator:
|
||||
"generic": "(X11; Linux x86_64)",
|
||||
"ubuntu": "(X11; Ubuntu; Linux x86_64)",
|
||||
"chrome_os": "(X11; CrOS x86_64 14541.0.0)",
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
self.mobile_platforms = {
|
||||
@@ -60,26 +198,14 @@ class UserAgentGenerator:
|
||||
"ios": {
|
||||
"iphone": "(iPhone; CPU iPhone OS 16_5 like Mac OS X)",
|
||||
"ipad": "(iPad; CPU OS 16_5 like Mac OS X)",
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
# Browser Combinations
|
||||
self.browser_combinations = {
|
||||
1: [
|
||||
["chrome"],
|
||||
["firefox"],
|
||||
["safari"],
|
||||
["edge"]
|
||||
],
|
||||
2: [
|
||||
["gecko", "firefox"],
|
||||
["chrome", "safari"],
|
||||
["webkit", "safari"]
|
||||
],
|
||||
3: [
|
||||
["chrome", "safari", "edge"],
|
||||
["webkit", "chrome", "safari"]
|
||||
]
|
||||
1: [["chrome"], ["firefox"], ["safari"], ["edge"]],
|
||||
2: [["gecko", "firefox"], ["chrome", "safari"], ["webkit", "safari"]],
|
||||
3: [["chrome", "safari", "edge"], ["webkit", "chrome", "safari"]],
|
||||
}
|
||||
|
||||
# Rendering Engines with versions
|
||||
@@ -90,7 +216,7 @@ class UserAgentGenerator:
|
||||
"Gecko/20100101",
|
||||
"Gecko/20100101", # Firefox usually uses this constant version
|
||||
"Gecko/2010010",
|
||||
]
|
||||
],
|
||||
}
|
||||
|
||||
# Browser Versions
|
||||
@@ -135,25 +261,25 @@ class UserAgentGenerator:
|
||||
def get_browser_stack(self, num_browsers: int = 1) -> List[str]:
|
||||
"""
|
||||
Get a valid combination of browser versions.
|
||||
|
||||
|
||||
How it works:
|
||||
1. Check if the number of browsers is supported.
|
||||
2. Randomly choose a combination of browsers.
|
||||
3. Iterate through the combination and add browser versions.
|
||||
4. Return the browser stack.
|
||||
|
||||
|
||||
Args:
|
||||
num_browsers: Number of browser specifications (1-3)
|
||||
|
||||
|
||||
Returns:
|
||||
List[str]: A list of browser versions.
|
||||
"""
|
||||
if num_browsers not in self.browser_combinations:
|
||||
raise ValueError(f"Unsupported number of browsers: {num_browsers}")
|
||||
|
||||
|
||||
combination = random.choice(self.browser_combinations[num_browsers])
|
||||
browser_stack = []
|
||||
|
||||
|
||||
for browser in combination:
|
||||
if browser == "chrome":
|
||||
browser_stack.append(random.choice(self.chrome_versions))
|
||||
@@ -167,18 +293,20 @@ class UserAgentGenerator:
|
||||
browser_stack.append(random.choice(self.rendering_engines["gecko"]))
|
||||
elif browser == "webkit":
|
||||
browser_stack.append(self.rendering_engines["chrome_webkit"])
|
||||
|
||||
|
||||
return browser_stack
|
||||
|
||||
def generate(self,
|
||||
device_type: Optional[Literal['desktop', 'mobile']] = None,
|
||||
os_type: Optional[str] = None,
|
||||
device_brand: Optional[str] = None,
|
||||
browser_type: Optional[Literal['chrome', 'edge', 'safari', 'firefox']] = None,
|
||||
num_browsers: int = 3) -> str:
|
||||
def generate(
|
||||
self,
|
||||
device_type: Optional[Literal["desktop", "mobile"]] = None,
|
||||
os_type: Optional[str] = None,
|
||||
device_brand: Optional[str] = None,
|
||||
browser_type: Optional[Literal["chrome", "edge", "safari", "firefox"]] = None,
|
||||
num_browsers: int = 3,
|
||||
) -> str:
|
||||
"""
|
||||
Generate a random user agent with specified constraints.
|
||||
|
||||
|
||||
Args:
|
||||
device_type: 'desktop' or 'mobile'
|
||||
os_type: 'windows', 'macos', 'linux', 'android', 'ios'
|
||||
@@ -188,23 +316,29 @@ class UserAgentGenerator:
|
||||
"""
|
||||
# Get platform string
|
||||
platform = self.get_random_platform(device_type, os_type, device_brand)
|
||||
|
||||
|
||||
# Start with Mozilla
|
||||
components = ["Mozilla/5.0", platform]
|
||||
|
||||
|
||||
# Add browser stack
|
||||
browser_stack = self.get_browser_stack(num_browsers)
|
||||
|
||||
|
||||
# Add appropriate legacy token based on browser stack
|
||||
if "Firefox" in str(browser_stack):
|
||||
if "Firefox" in str(browser_stack) or browser_type == "firefox":
|
||||
components.append(random.choice(self.rendering_engines["gecko"]))
|
||||
elif "Chrome" in str(browser_stack) or "Safari" in str(browser_stack):
|
||||
elif "Chrome" in str(browser_stack) or "Safari" in str(browser_stack) or browser_type == "chrome":
|
||||
components.append(self.rendering_engines["chrome_webkit"])
|
||||
components.append("(KHTML, like Gecko)")
|
||||
|
||||
elif "Edge" in str(browser_stack) or browser_type == "edge":
|
||||
components.append(self.rendering_engines["safari_webkit"])
|
||||
components.append("(KHTML, like Gecko)")
|
||||
elif "Safari" in str(browser_stack) or browser_type == "safari":
|
||||
components.append(self.rendering_engines["chrome_webkit"])
|
||||
components.append("(KHTML, like Gecko)")
|
||||
|
||||
# Add browser versions
|
||||
components.extend(browser_stack)
|
||||
|
||||
|
||||
return " ".join(components)
|
||||
|
||||
def generate_with_client_hints(self, **kwargs) -> Tuple[str, str]:
|
||||
@@ -215,16 +349,20 @@ class UserAgentGenerator:
|
||||
|
||||
def get_random_platform(self, device_type, os_type, device_brand):
|
||||
"""Helper method to get random platform based on constraints"""
|
||||
platforms = self.desktop_platforms if device_type == 'desktop' else \
|
||||
self.mobile_platforms if device_type == 'mobile' else \
|
||||
{**self.desktop_platforms, **self.mobile_platforms}
|
||||
|
||||
platforms = (
|
||||
self.desktop_platforms
|
||||
if device_type == "desktop"
|
||||
else self.mobile_platforms
|
||||
if device_type == "mobile"
|
||||
else {**self.desktop_platforms, **self.mobile_platforms}
|
||||
)
|
||||
|
||||
if os_type:
|
||||
for platform_group in [self.desktop_platforms, self.mobile_platforms]:
|
||||
if os_type in platform_group:
|
||||
platforms = {os_type: platform_group[os_type]}
|
||||
break
|
||||
|
||||
|
||||
os_key = random.choice(list(platforms.keys()))
|
||||
if device_brand and device_brand in platforms[os_key]:
|
||||
return platforms[os_key][device_brand]
|
||||
@@ -233,73 +371,58 @@ class UserAgentGenerator:
|
||||
def parse_user_agent(self, user_agent: str) -> Dict[str, str]:
|
||||
"""Parse a user agent string to extract browser and version information"""
|
||||
browsers = {
|
||||
'chrome': r'Chrome/(\d+)',
|
||||
'edge': r'Edg/(\d+)',
|
||||
'safari': r'Version/(\d+)',
|
||||
'firefox': r'Firefox/(\d+)'
|
||||
"chrome": r"Chrome/(\d+)",
|
||||
"edge": r"Edg/(\d+)",
|
||||
"safari": r"Version/(\d+)",
|
||||
"firefox": r"Firefox/(\d+)",
|
||||
}
|
||||
|
||||
|
||||
result = {}
|
||||
for browser, pattern in browsers.items():
|
||||
match = re.search(pattern, user_agent)
|
||||
if match:
|
||||
result[browser] = match.group(1)
|
||||
|
||||
|
||||
return result
|
||||
|
||||
def generate_client_hints(self, user_agent: str) -> str:
|
||||
"""Generate Sec-CH-UA header value based on user agent string"""
|
||||
browsers = self.parse_user_agent(user_agent)
|
||||
|
||||
|
||||
# Client hints components
|
||||
hints = []
|
||||
|
||||
|
||||
# Handle different browser combinations
|
||||
if 'chrome' in browsers:
|
||||
if "chrome" in browsers:
|
||||
hints.append(f'"Chromium";v="{browsers["chrome"]}"')
|
||||
hints.append('"Not_A Brand";v="8"')
|
||||
|
||||
if 'edge' in browsers:
|
||||
|
||||
if "edge" in browsers:
|
||||
hints.append(f'"Microsoft Edge";v="{browsers["edge"]}"')
|
||||
else:
|
||||
hints.append(f'"Google Chrome";v="{browsers["chrome"]}"')
|
||||
|
||||
elif 'firefox' in browsers:
|
||||
|
||||
elif "firefox" in browsers:
|
||||
# Firefox doesn't typically send Sec-CH-UA
|
||||
return '""'
|
||||
|
||||
elif 'safari' in browsers:
|
||||
|
||||
elif "safari" in browsers:
|
||||
# Safari's format for client hints
|
||||
hints.append(f'"Safari";v="{browsers["safari"]}"')
|
||||
hints.append('"Not_A Brand";v="8"')
|
||||
|
||||
return ', '.join(hints)
|
||||
|
||||
return ", ".join(hints)
|
||||
|
||||
|
||||
# Example usage:
|
||||
if __name__ == "__main__":
|
||||
generator = UserAgentGenerator()
|
||||
print(generator.generate())
|
||||
|
||||
print("\nSingle browser (Chrome):")
|
||||
print(generator.generate(num_browsers=1, browser_type='chrome'))
|
||||
# Usage example:
|
||||
generator = ValidUAGenerator()
|
||||
ua = generator.generate()
|
||||
print(ua)
|
||||
|
||||
print("\nTwo browsers (Gecko/Firefox):")
|
||||
print(generator.generate(num_browsers=2))
|
||||
|
||||
print("\nThree browsers (Chrome/Safari/Edge):")
|
||||
print(generator.generate(num_browsers=3))
|
||||
|
||||
print("\nFirefox on Linux:")
|
||||
print(generator.generate(
|
||||
device_type='desktop',
|
||||
os_type='linux',
|
||||
browser_type='firefox',
|
||||
num_browsers=2
|
||||
))
|
||||
|
||||
print("\nChrome/Safari/Edge on Windows:")
|
||||
print(generator.generate(
|
||||
device_type='desktop',
|
||||
os_type='windows',
|
||||
num_browsers=3
|
||||
))
|
||||
generator = OnlineUAGenerator()
|
||||
ua = generator.generate()
|
||||
print(ua)
|
||||
|
||||
|
||||
3060
crawl4ai/utils.py
3060
crawl4ai/utils.py
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user