This commit is contained in:
2025-05-11 18:58:43 +02:00
parent d5a73f342e
commit f3f3045ebf
48 changed files with 686 additions and 243 deletions

41
Bomtoon_JS/bomtoon.js Normal file
View File

@@ -0,0 +1,41 @@
async function downloadImages(blobUrls) {
for (let i = 0; i < blobUrls.length; i++) {
let response = await fetch(blobUrls[i]);
let blob = await response.blob();
let blobUrlObject = URL.createObjectURL(blob);
let indexStr = String(i).padStart(3, "0"); // 生成 3 位数格式
let filename = `${indexStr}.webp`; // e.g., 001.webp
let a = document.createElement("a");
a.href = blobUrlObject;
a.download = filename;
document.body.appendChild(a);
a.click();
document.body.removeChild(a);
URL.revokeObjectURL(blobUrlObject);
console.log(`Downloaded: ${filename}`);
await new Promise(resolve => setTimeout(resolve, 500)); // 避免 Chrome 限制
}
const div = document.querySelector('div.printView > div:not(style)');
const div2 = Array.from(div.children).filter(el => el.tagName.toLowerCase() === 'div')[1];
const div2_1_1 = div2.querySelector('div:nth-of-type(1) > div:nth-of-type(1)');
const count = Array.from(div2_1_1.children).filter(el => {
return el.tagName.toLowerCase() === 'div' &&
el.hasAttribute('width') &&
el.hasAttribute('height');
}).length;
console.log("div2.1.1 下的 <div> 数量为:", count);
console.log(document.title);
}
const blobs = [...document.querySelectorAll("img")]
.map(el => el.src)
.filter(src => src.startsWith("blob:"));
downloadImages(blobs);

View File

@@ -0,0 +1,53 @@
async function downloadCanvasImages() {
let seenCanvases = new Set(); // 存储已经下载过的 Canvas避免重复下载
let lastScrollTop = 0;
while (true) {
console.log("🔽 正在下载当前屏幕的所有 Canvas...");
// 获取所有 <canvas> 并下载
document.querySelectorAll("canvas").forEach((canvas, index) => {
if (!seenCanvases.has(canvas)) { // 确保不重复下载
seenCanvases.add(canvas);
let imgData = canvas.toDataURL("image/webp"); // 转为 Base64 webp
let a = document.createElement("a");
a.href = imgData;
a.download = `${String(seenCanvases.size).padStart(3, "0")}.webp`; // 命名 001, 002, ...
document.body.appendChild(a);
a.click();
document.body.removeChild(a);
console.log(`✅ 下载: ${a.download}`);
}
});
// 记录滚动前的位置
lastScrollTop = window.scrollY;
// 向下滚动 1 屏
window.scrollBy(0, window.innerHeight);
await new Promise(resolve => setTimeout(resolve, 3000)); // 等待 3 秒加载新 Canvas
// 如果滚动到底,停止执行
if (window.scrollY === lastScrollTop) {
console.log("🎉 已滚动到底,所有 Canvas 下载完成!");
break;
}
}
const div = document.querySelector('div.printView > div:not(style)');
const div2 = Array.from(div.children).filter(el => el.tagName.toLowerCase() === 'div')[1];
const div2_1_1_1 = div2.querySelector('div:nth-of-type(1) > div:nth-of-type(1) > div:nth-of-type(1)');
const count = Array.from(div2_1_1_1.children).filter(el => {
return el.tagName.toLowerCase() === 'div' &&
el.hasAttribute('width') &&
el.hasAttribute('height');
}).length;
console.log("div2.1.1.1 下的 <div> 数量为:", count);
console.log(document.title);
}
// 运行脚本
downloadCanvasImages();

View File

@@ -0,0 +1,14 @@
const divs = document.querySelectorAll('div[class^="CanvasViewer__Container"]');
console.log("符合条件的 div 总数:", divs.length);
const div = document.querySelector('div.printView > div:not(style)');
const div2 = Array.from(div.children).filter(el => el.tagName.toLowerCase() === 'div')[1];
const div2_1_1_1 = div2.querySelector('div:nth-of-type(1) > div:nth-of-type(1) > div:nth-of-type(1)');
const count = Array.from(div2_1_1_1.children).filter(el => {
return el.tagName.toLowerCase() === 'div' &&
el.hasAttribute('width') &&
el.hasAttribute('height');
}).length;
console.log("div2.1.1.1 下的 <div> 数量为:", count);
console.log(document.title);

24
Bomtoon_JS/get_count.js Normal file
View File

@@ -0,0 +1,24 @@
(() => {
const count = document.querySelectorAll('div[class^="ImageContainer__Container"]').length;
const existing = localStorage.getItem("divCountList") || "";
localStorage.setItem("divCountList", existing + count + "\n");
console.log("✅ Count saved:", count);
})();
(() => {
const data = localStorage.getItem("divCountList") || "";
const blob = new Blob([data], { type: "text/plain" });
const link = document.createElement("a");
link.href = URL.createObjectURL(blob);
link.download = "div_counts.txt";
document.body.appendChild(link);
link.click();
document.body.removeChild(link);
console.log("📦 File downloaded as div_counts.txt");
})();

29
Bomtoon_JS/search_tag.js Normal file
View File

@@ -0,0 +1,29 @@
const container = document.querySelector('.fmxVRH');
if (container) {
// 获取 container 下所有 size="16" 的 div 元素
const targetDivs = container.querySelectorAll('div[size="16"]');
// 提取每个 div 的文本内容
const contents = Array.from(targetDivs).map(div => div.textContent.trim());
const contentString = contents.join('\n');
// 保存到 localStorage
localStorage.setItem('searchTag', contentString);
} else {
console.log("没有找到 class 为 fmxVRH 的元素");
}
(() => {
const data = localStorage.getItem("searchTag") || "";
const blob = new Blob([data], { type: "text/plain" });
const link = document.createElement("a");
link.href = URL.createObjectURL(blob);
link.download = "tag_results.txt";
document.body.appendChild(link);
link.click();
document.body.removeChild(link);
console.log("📦 File downloaded as tag_results.txt");
})();
localStorage.removeItem('searchTag');

31
KakaoPage_JS/kakaopage.js Normal file
View File

@@ -0,0 +1,31 @@
async function downloadImages(urls) {
for (let i = 0; i < urls.length; i++) {
const url = urls[i];
try {
// 尝试抓取并转成 Blob
const response = await fetch(url, { mode: 'cors' });
const blob = await response.blob();
// 创建临时链接并强制下载
const blobUrl = URL.createObjectURL(blob);
const a = document.createElement('a');
a.href = blobUrl;
a.download = `image_${i + 1}.jpg`;
document.body.appendChild(a);
a.click();
document.body.removeChild(a);
URL.revokeObjectURL(blobUrl); // 清理资源
console.log(`✅ 下载成功: ${url}`);
} catch (err) {
console.error(`❌ 下载失败: ${url}`, err);
}
}
}
const links = Array.from(document.querySelectorAll('img'))
.map(img => img.src)
.filter(src => src.startsWith('https://page-edge.kakao.com/sdownload/resource?kid='));
downloadImages(links);

Binary file not shown.

72
bomtoon.py Normal file
View File

@@ -0,0 +1,72 @@
from pathlib import Path
import shutil
BOMTOON = {
# "漫画名": (判断目录是否不为temp起始index修正index)
"鄰居是公會成員": (True, 0, 1),
"PAYBACK": (True, 0, 0),
"1995青春報告": (True, 0, 0),
"Unsleep": (True, 0, 1),
"Backlight": (True, 0, 1),
"鬼夜曲": (True, 65, -3),
"披薩外送員與黃金宮": (True, 0, -1),
"No Moral": (True, 87, 0),
# "No Moral": (True, 0, 1),
"易地思之": (True, 0, 1),
"監禁倉庫": (True, 0, 1),
"棋子的世界": (True, 0, 1),
"夢龍傳": (True, 0, 1),
"融冰曲線": (True, 0, 1),
}
# current = "鄰居是公會成員"
# current = "Unsleep"
# current = "Backlight"
# current = "1995青春報告"
current = "鬼夜曲"
# current = "No Moral"
# current = "易地思之"
# current = "監禁倉庫"
# current = "披薩外送員與黃金宮"
# current = "PAYBACK"
# current = "夢龍傳"
# current = "棋子的世界"
# current = "融冰曲線"
bomtoon_path = Path('E:/') / 'Webtoon' / current if BOMTOON[current][0] else Path('E:/') / 'Temp_Webtoon' / current
def find_next_index(index_list, start_index):
if len(index_list) == 0:
return 0
index_list = sorted(set(index_list)) # 先去重并排序
for i in range(index_list[0], index_list[-1]): # 遍历从最小值到最大值
if i not in index_list and i > start_index:
return i # 返回第一个缺失的数字
return index_list[-1] + 1
def create_dir(index, bomtoon_name):
name = str(index) + '.' + '' + str(index + BOMTOON[bomtoon_name][2]) + ''
# name = str(index) + '.' + '外傳 第' + str(index - 86) + '話'
path = Path(bomtoon_path) / name
path.mkdir(parents=True, exist_ok=True)
print(f"create {path}")
return path
def move_all_webps(dest_dir: Path):
download_dir = Path('C:/') / 'Users' / 'ithil' / 'Downloads'
if not dest_dir.exists():
dest_dir.mkdir(parents=True) # 如果目标目录不存在,则创建
for file in download_dir.iterdir():
if file.is_file() and file.suffix.lower() == ".webp": # 只移动webp文件
shutil.move(str(file), str(dest_dir)) # 移动文件
index_list = []
for first_level_path in bomtoon_path.iterdir():
if first_level_path.is_dir():
index_list.append(int(first_level_path.name.split(".")[0]))
index = find_next_index(index_list, BOMTOON[current][1])
new_dir = create_dir(index, current)
move_all_webps(new_dir)

25
bomtoon_search_tag.py Normal file
View File

@@ -0,0 +1,25 @@
from pathlib import Path
txt1 = Path('E:/') / "basic.txt"
txt2 = Path('E:/') / "bad.txt"
with txt1.open("r", encoding="utf-8") as f:
titles1 = [line.strip() for line in f if line.strip()]
with txt2.open("r", encoding="utf-8") as f:
titles2 = [line.strip() for line in f if line.strip()]
result = []
for title in titles1:
if title not in titles2:
result.append(title)
txt3 = Path('E:/') / "good.txt"
with txt3.open("r", encoding="utf-8") as f:
titles3 = [line.strip() for line in f if line.strip()]
result_new = []
for title in result:
if title in titles3:
result_new.append(title)
print(result_new)

50
check_raw.py Normal file
View File

@@ -0,0 +1,50 @@
from pathlib import Path
import os
def get_all_sub_paths(path: Path):
if not path.exists() or not path.is_dir():
print(f"路径 {path} 无效或不是一个目录")
return
sub_path_list = []
for travel_dir_path in path.iterdir():
if travel_dir_path.is_dir():
sub_path_list.append(travel_dir_path)
for first_level_path in travel_dir_path.iterdir():
if first_level_path.is_dir():
sub_path_list.append(first_level_path)
for second_level_path in first_level_path.iterdir():
if second_level_path.is_dir():
sub_path_list.append(second_level_path)
print("second level dir")
return sub_path_list
def traverse_directory(paths):
for path in paths:
dir_name = str(path).replace("D:\\Photo\\", "")
network_path = Path(r"\\TRUENAS\Media\Photo\Photos") / dir_name
file_count_local = 0
file_count_network = 0
for sub_local in path.iterdir():
if sub_local.is_file():
file_count_local +=1
for sub_network in network_path.iterdir():
if sub_network.is_file():
file_count_network +=1
print(f"目录: {path.name}")
if file_count_local == file_count_network:
print(f"文件总数: {file_count_local}")
else:
print("!!!!!!!!文件数量不一致!!!!!!!!")
# path = Path('D:/') / 'Photo'
# path = Path('//TRUENAS') / 'Media' / 'Photo' / 'Photos'
path = r"\\TRUENAS\Media\Photo\Photos"
all_paths = get_all_sub_paths(Path(path))
# for path in all_paths:
# print(path)
traverse_directory(all_paths)

34
compare_bomtoon_count.py Normal file
View File

@@ -0,0 +1,34 @@
from pathlib import Path
BOMTOON = "Unsleep"
PATH = Path('E:/') / 'Webtoon' / BOMTOON
folders = [
(int(p.name.split('.')[0]), p) # 取前缀数字用于排序
for p in PATH.iterdir()
if p.is_dir() and p.name.split('.')[0].isdigit()
]
folders.sort(key=lambda x: x[0])
webp_counts = []
for index, path in folders:
count = len([f for f in path.iterdir() if f.is_file() and f.suffix.lower() == ".webp"])
webp_counts.append(count)
# 输出最终结果
print(webp_counts)
txt = PATH / "div_counts.txt"
with txt.open("r") as f:
counts = [int(line.strip()) for line in f if line.strip()]
print(counts)
for i, (a, b) in enumerate(zip(counts, webp_counts)):
if a != b + 1:
print(f"❌ 第 {i} 项不同: list1 = {a}, list2 + 1 = {b + 1}")
else:
print(f"✅ 第 {i} 项相同: list1 = {a}, list2 + 1 = {b + 1}")

Binary file not shown.

Binary file not shown.

View File

@@ -3,25 +3,28 @@ from pathlib import Path
import shutil
from PIL import Image
from data.path_constant import ANDROID_ASSETS, DOWNLOAD_DIR, NETWORK_DIR
from data.special_list import BOMTOON
class WebtoonConverter:
def __init__(self, webtoon_path: Path):
self.webtoon_path = webtoon_path
self.webtoon_path_network = NETWORK_DIR / webtoon_path.name
self.thumbnail = ''
self.img_extensions = {'.png', '.jpg', '.jpeg', '.webp'}
def do_convert(self):
def do_convert(self):
if self.webtoon_path.is_dir() and self.has_new_episode():
print(self.webtoon_path)
self.copy_information()
self.copy_information()
for item_path in self.webtoon_path.iterdir():
if item_path.is_dir():
episode_path = item_path
if self.is_new_episode(episode_path):
if self.is_new_episode(episode_path) and self.is_not_empty(episode_path):
print(f"new episode: {episode_path}")
self.delete_over_width_image(episode_path)
if self.webtoon_path.name not in BOMTOON:
self.delete_over_width_image(episode_path)
self.concat_images(episode_path)
elif item_path.suffix.lower() in self.img_extensions:
elif item_path.name == self.thumbnail:
thumbnail_path = item_path
self.copy_thumbnail(thumbnail_path)
@@ -46,13 +49,15 @@ class WebtoonConverter:
local_information = json.load(json_file)
with open(info_path_network, "r", encoding='utf-8') as json_file:
network_information = json.load(json_file)
self.thumbnail = local_information["thumbnail"]
if (
local_information["title"] == network_information["title"] and
local_information["author"] == network_information["author"] and
local_information["tag"] == network_information["tag"] and
local_information["description"] == network_information["description"] and
local_information["thumbnail_name"] == network_information["thumbnail_name"]
local_information["thumbnail"] == network_information["thumbnail"]
):
copy_necessary = False
@@ -104,7 +109,7 @@ class WebtoonConverter:
"author": existing_information["author"],
"tag": tag,
"description": existing_information["description"],
"thumbnail_name": existing_information["thumbnail_name"]
"thumbnail": existing_information["thumbnail"]
}
with open(path, 'w', encoding='utf-8') as json_file:
@@ -121,15 +126,23 @@ class WebtoonConverter:
print(f"Source file '{thumbnail_path}' not found.")
def delete_over_width_image(self, episode_path: Path):
if self.webtoon_path.name != "地下城見聞錄[UO]":
for img_path in episode_path.iterdir():
if self._is_image_800(img_path):
img_path.unlink()
print(f"delete {img_path}")
def delete_bomtoon_000(self, episode_path: Path):
for img_path in episode_path.iterdir():
if self._is_image_800(img_path):
if img_path.name == "000.webp":
img_path.unlink()
print(f"delete {img_path}")
def _is_image_800(self, image_path: Path) -> bool:
try:
with Image.open(image_path) as img:
return img.width >= 800
return img.width >= 800 and img.width < 1080
except Exception as e:
print(f"Error opening image {image_path}: {e}")
return False
@@ -137,6 +150,9 @@ class WebtoonConverter:
def is_new_episode(self, episode_path: Path) -> bool:
episode_path_network = self.webtoon_path_network / episode_path.name
return not episode_path_network.exists()
def is_not_empty(self, episode_path: Path) -> bool:
return any(episode_path.iterdir())
def concat_images(self, episode_path: Path):
@@ -153,7 +169,7 @@ class WebtoonConverter:
with open(img_path, 'rb') as img_file:
img = Image.open(img_file)
img.load()
if total_height + img.height > 28800:
if total_height + img.height > 20000:
self.save_concatenated_image(result_images, episode_path_network, result_index)
result_index += 1
result_images = []

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -1,6 +1,7 @@
from dataclasses import dataclass
CLIENT_ID = 2155768539
# CLIENT_ID = 2155768539
CLIENT_ID = 3437754656
@dataclass
class Cookie:
@@ -8,18 +9,17 @@ class Cookie:
userID: str
ant: str
COOKIES = [
Cookie(name="ithi", userID="twnu7577d258564215", ant="MFivJ2uk0eyBd7G28D0_4WSk3QXdpHXxp1rkDaNXdCU~"),#ok
Cookie(name="ym", userID="twnu18c780bce30104", ant=""),
Cookie(name="83", userID="twnud41942de09830d", ant=""),
Cookie(name="bjl", userID="twnuf8429dee79c3d3", ant=""), #ok
Cookie(name="yy", userID="twnucbb3bdfce95b85", ant=""),
Cookie(name="hk", userID="twnuf622dd45e496ea", ant="ypc2JaDoKwfgghdheiFRCJvBjWid78M9djJooqOeMnY~"),
Cookie(name="aa", userID="twnuc0728a46c25738", ant=""), #ok
Cookie(name="bb", userID="twnu407ef7f1a046fd", ant="pSQPuFHTEVSztUuDcP4eboMqyY5La0Hb5JRWYILj1z8~"),
Cookie(name="wn", userID="twnu7322f207fb75ab", ant="4q3ArCVX_yx5fTq0kWWCanc60SXEnUU3QyuF0wys8Hc~")
]
COOKIE = Cookie(name="ithi", userID="koru685bfca187016d", ant="6omfFxw3u2ksAbZDZmLY8sFsQcGARzqc1lrRv-fiblg~")
COOKIE_NAME = 'ithi'
URL_TYPE = '1' # 1, 3, 7, m, p
TASK_TYPE = 'dc' # d, c
# search Network -> JS -> _app-... -> open in Sources panel
# case 0:
# if (r = t.userId,
# o = t.episodeId,
# i = t.timestamp,
# s = t.nonce,
# u = t.aid,
# l = t.zid,
# window.crypto && window.crypto.subtle) {
# e.next = 4;
# break
# }

View File

@@ -10,20 +10,20 @@ class KakaoRequest:
def get_episode_headers(self, ant):
return {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zht",
"Accept-Encoding": "gzip, deflate, br, zstd",
"Accept-Language": "ko",
"Cache-Control": "no-cache",
"Cookie": f"theme=dark; _kp_collector={self.app_id}; atn={ant}",
"Dnt": "1",
"Origin": "https://tw.kakaowebtoon.com",
"Origin": "https://webtoon.kakao.com/",
"Pragma": "no-cache",
"Referer": "https://tw.kakaowebtoon.com/",
"Referer": "https://webtoon.kakao.com/",
"Sec-Ch-Ua": '"Not A(Brand";v="99", "Microsoft Edge";v="121", "Chromium";v="121"',
"Sec-Ch-Ua-Mobile": "?0",
"Sec-Ch-Ua-Platform": '"Windows"',
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-site",
"Sec-Fetch-Dest": "script",
"Sec-Fetch-Mode": "no-cors",
"Sec-Fetch-Site": "cross-site",
"Sec-Gpc": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36",
}

View File

@@ -3,6 +3,7 @@ from pathlib import Path
DOWNLOAD_DIR = Path('E:/') / 'Webtoon'
NETWORK_DIR = Path('//TRUENAS') / 'Media' / 'Webtoon'
NETWORK_DIR = Path(r'\\TRUENAS\Media\Webtoon')
TEMP_DOWNLOAD_DIR = Path('E:/') / 'Temp_Webtoon'
DOWNLOAD_LIST_TXT = Path(DOWNLOAD_DIR) / 'download.txt'

View File

@@ -1,62 +1,94 @@
WEBTOON_NOT_PROCESSED = [
'陷阱', # 完结
'情侶破壞者', # 完结
'我獨自升級', # 完结
'8級魔法師再臨', # 完结
'婚姻這門生意', # 完结
'婚姻這門生意[18+]', # 完结
'守護女主角哥哥的方法', # KakaoTW完结
'轉生後變成天才', # KakaoTW完结
'兩個繼承人', # KakaoTW完结
'患上不出道就會死的病', # KakaoTW完结
'無法品味的男人', # KakaoTW完结
'摘下偽善男主角的面具', # KakaoTW完结
'皇家婚姻', # KakaoTW完结
'鐵血家族獵犬的重生', # KakaoTW完结
'重生百次的最強玩家', # KakaoTW完结
'我獨自升級', # KakaoTW完结
'結局創造者', # 停更
'黑影之夜', # 季休
'狂魔重生記', # 季休
'在魔法學院偽裝教師', # 季休
'兔子與黑豹的共生關係', # 付费 / 季休
'成為家人的方法', # YDS下载后续
'惡女重生', # 本篇完结 2394
'試著改變故事類型吧', # 本篇完结 2494
'皇家婚姻', # 本篇完结 2952
'關於你的愛', # 完结
'Netkama Punch!!!', # 完结
'守護女主角哥哥的方法', # 腰斩
'唯一的希望', # 季休
'轉生後變成天才', # 季休
"地下城見聞錄", # 季休
"符文之子", # 季休 4129
'兩個繼承人', # KakaoTW完结 두 명의 상속인
'成為家人的方法', # 在kakaopage连载 가족이 되는 방법
'同情的形態', # 동정의 형태
]
WEBTOON_18_BONUS = [
'婚姻這門生意[18+]'
]
KAKAO_ONLY_MAIN_ACCOUNT = [
'152', # 骷髏士兵卷土重來
'167', # 試著改變故事類型吧 P
'222', # 成為我筆下男主角的妻子
'247', # 領主夫人罷工中
'322', # 婚姻這門生意 P
'330', # 同情的形態 P
'399', # 噬魔法師
'424', # 地下城見聞錄
'587', # Pickmeup
'591', # 武當奇俠
'736', # Boss大人請振作
'784', # 永遠的謊言
'787', # 魔法師的校園生存法則
'862', # 符文之子
]
KAKAO_TO_TW = {
"나 혼자만 레벨업": "我獨自升級",
"해골병사는 던전을 지키지 못했다":"骷髏士兵卷土重來",
"악역의 엔딩은 죽음뿐": "反派角色只有死亡結局",
"악녀는 두 번 산다": "惡女重生",
"장르를 바꿔보도록 하겠습니다": "試著改變故事類型吧",
"무당기협": "武當奇俠",
"내 남자 주인공의 아내가 되었다": "成為我筆下男主角的妻子",
"빌어먹을 환생":"轉生後變成天才",
"로열 메리지": "皇家婚姻",
"상냥한 남자주인공의 가면을 벗기면":"摘下偽善男主角的面具",
"맛볼 수 없는 남자": "無法品味的男人",
"데뷔 못 하면 죽는 병 걸림":"患上不出道就會死的病",
"여보, 나 파업할게요": "領主夫人罷工中",
"던전 견문록": "地下城見聞錄",
"픽 미 업!": "Pick me up!",
"철혈검가 사냥개의 회귀": "鐵血家族獵犬的重生",
"만렙 플레이어의 100번째 회귀": "重生百次的最強玩家",
"룬의 아이들": "符文之子"
}
KAKAO_1 = [
'41'
]
KAKAO_1 = {
"2358", # 骷髏士兵卷土重來
}
KAKAO_3 = [
'303', # 天才詐欺犯的雙重身分
]
KAKAO_3 = {
"2830", # 成為我筆下男主角的妻子
}
KAKAO_7 = [
'41', # 反派角色只有死亡結局
'116', # 惡女重生
'200', # 暴君就該配惡女
'233', # 少女賭神愛黛兒
]
KAKAO_C = {
}
KAKAO_PAY = [
'230', # 兔子與黑豹的共生關係
'516', # 結局創造者
]
KAKAO_W = {
"2383", # 反派角色只有死亡結局
"2499", # 武當奇俠
"2977", # 摘下偽善男主角的面具
"3008", # 患上不出道就會死的病
"3205", # Pick me up!
"3455", # 鐵血家族獵犬的重生
"3786", # 重生百次的最強玩家
}
KAKAO_P = {
"2998", # 無法品味的男人
"3024", # 領主夫人罷工中
}
BOMTOON = {
"Netkama Punch!!!", # done
"關於你的愛", # done
"PAYBACK", # every 10 days
"披薩外送員與黃金宮", # every 10 days
"No Moral", # every 10 days
"1995青春報告", # weekly
"Backlight", # weekly
"Unsleep", # weekly
"鄰居是公會成員", # weekly
"鬼夜曲", # weekly
"監禁倉庫", # pending
"棋子的世界", # pending
"易地思之", # pending
"夢龍傳", # pending
"融冰曲線", # pending
}
BOMTOON_TEMP = {
}

View File

@@ -21,19 +21,3 @@ def get_webtoon_headers():
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
}
def get_bomtoon_headers():
return {
"Accept": "image/avif,image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate, br, zstd",
"Accept-Language": "de-DE,de;q=0.9,en-US;q=0.8,en;q=0.7,zh-CN;q=0.6,zh;q=0.5",
"Priority": "i",
"Referer": "https://www.bomtoon.tw/",
"Sec-Ch-Ua": '"Not_A Brand";v="8", "Chromium";v="126", "Google Chrome";v="126"',
"Sec-Ch-Ua-Mobile": "?0",
"Sec-Ch-Ua-Platform": '"Windows"',
"Sec-Fetch-Dest": "image",
"Sec-Fetch-Mode": "no-cors",
"Sec-Fetch-Site": "cross-site",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
}

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -1,62 +0,0 @@
from pathlib import Path
from typing import TYPE_CHECKING
from bs4 import BeautifulSoup
import httpx
import requests
from data.path_constant import DOWNLOAD_DIR
from data.webtoon_request import get_bomtoon_headers
from downloaders.downloader import Downloader
class Bomtoon(Downloader):
def __init__(self, webtoon_id):
super().__init__(webtoon_id)
self.headers = get_bomtoon_headers()
def _fetch_information(self, url):
res = requests.get(url, headers=self.headers)
if res.status_code == 200:
soup = BeautifulSoup(res.content, 'html.parser')
title = soup.find('title')
if title:
self.title = title.get_text().split('-')[0].strip()
author = soup.find('meta', attrs={'name': 'author'})
if author:
self.author = author.get('content')
description = soup.find('meta', attrs={'property': 'og:description'})
if description:
self.description = description.get('content')
tags = soup.find('meta', attrs={'name': 'keywords'})
if tags:
tags_list = tags.get('content').split(',')
if '連載' in tags_list[0]:
self.tag = tags_list[1]
else:
self.tag = tags_list[0]
self.thumbnail_url = ""
self.thumbnail_name = self.webtoon_id + '.jpg'
else:
print(f"fetch_information: {res.status_code}")
def _fetch_episode_information(self):
pass
def _get_episode_image_urls(self, episode_index) -> list[str]:
pass
async def _download_image(
self,
episode_path: Path,
url: str,
image_no: int
) -> None:
pass

View File

@@ -1,8 +1,7 @@
import base64
import hashlib
from contextlib import suppress
from WebtoonScraper.exceptions import MissingOptionalDependencyError
# from Cryptodome.Cipher import AES
class Decrypt :
def __init__(self, aid, episodeId, timestamp, nonce, userId, zid):
@@ -18,7 +17,7 @@ class Decrypt :
with suppress(AttributeError):
return cls.AES
try:
from Cryptodome.Cipher import AES
from Crypto.Cipher import AES
except ImportError:
raise ImportError("Missing optional dependency 'pycryptodomex'. Please install it to use this functionality.")

View File

@@ -2,7 +2,7 @@ import asyncio
import html
import json
from pathlib import Path
import pyfilename as pf
#import pyfilename as pf
import shutil
import time
from httpx import AsyncClient
@@ -53,11 +53,13 @@ class Downloader:
if information_path.exists():
with open(information_path, "r", encoding='utf-8') as json_file:
existing_information = json.load(json_file)
if (self.author == ""):
save_necessary = False
if (
existing_information["title"] == self.title and
existing_information["author"] == self.author and
existing_information["description"] == self.description and
existing_information["thumbnail_name"] == self.thumbnail_name
existing_information["thumbnail"] == self.thumbnail_name
):
save_necessary = False
if (save_necessary):
@@ -66,7 +68,7 @@ class Downloader:
"author": self.author,
"tag": self.tag,
"description": self.description,
"thumbnail_name": self.thumbnail_name
"thumbnail": self.thumbnail_name
}
with open(information_path, 'w', encoding='utf-8') as json_file:
@@ -91,10 +93,15 @@ class Downloader:
def _get_unobtained_episodes(self) -> list[int]:
downloaded_episodes = []
for dir in self.webtoon_path.glob('*'):
if dir.is_dir():
downloaded_episodes.append(int(dir.name.split('.')[0]))
if self.title == "反派角色只有死亡結局":
downloaded_episodes = [i - 2 for i in downloaded_episodes]
if self.title == "成為我筆下男主角的妻子" or self.title == "領主夫人罷工中":
downloaded_episodes = [i - 1 for i in downloaded_episodes]
if self.title in WEBTOON_18_BONUS:
count = len(self.readablities_index_list) - len(downloaded_episodes)
@@ -114,6 +121,10 @@ class Downloader:
for episode_index in episode_index_list:
episode_name = self.episode_titles[episode_index]
episode_title = self._get_safe_file_name(episode_index, episode_name)
if self.title == "反派角色只有死亡結局":
episode_title = self._get_safe_file_name(episode_index + 2, episode_name)
if self.title == "成為我筆下男主角的妻子" or self.title == "領主夫人罷工中":
episode_title = self._get_safe_file_name(episode_index + 1, episode_name)
# episode_title = self._get_safe_file_name(f"{episode_index}.{self.episode_titles[episode_index]}")
print(episode_title)
episode_path = self.webtoon_path / episode_title
@@ -165,4 +176,4 @@ class Downloader:
episode_title = f"{episode_index}.{episode_name}"
return pf.convert(html.unescape(episode_title))
return html.unescape(episode_title)

View File

@@ -10,6 +10,7 @@ import requests
from data.path_constant import DOWNLOAD_DIR, DOWNLOAD_LIST_TXT
from data.kakao_cookie import Cookie
from data.kakao_request import KakaoRequest
from data.special_list import KAKAO_TO_TW
from downloaders.decrypt import Decrypt
from downloaders.downloader import Downloader
@@ -27,7 +28,7 @@ class KakaoWebtoon(Downloader):
self.post_headers = self.kakaoRequest.get_post_headers(self.cookie.ant)
def verify_cookie(self) -> bool:
url = f"https://gateway.tw.kakaowebtoon.com/episode/v2/views/content-home/contents/{self.webtoon_id}/episodes?sort=-NO&offset=0&limit=30"
url = f"https://gateway.webtoon.kakao.com/episode/v2/views/content-home/contents/{self.webtoon_id}/episodes?sort=-NO&offset=0&limit=30"
res = requests.get(url, headers=self.episode_headers)
return res.status_code == 200
@@ -39,15 +40,22 @@ class KakaoWebtoon(Downloader):
description = soup.find('meta', attrs={'name': 'description'})
if description:
self.description = description.get('content')
self.description = ""
thumbnail_url = soup.find('meta', attrs={'property': 'og:image'})
if thumbnail_url:
self.thumbnail_url = thumbnail_url.get('content')
all_p = soup.find_all('p')
self.title = all_p[0].get_text()
self.author = all_p[1].get_text()
self.tag = all_p[2].get_text()
title = all_p[0].get_text()
if title in KAKAO_TO_TW:
self.title = KAKAO_TO_TW.get(title)
self.author = ""
self.tag = ""
else:
self.title = title
self.author = all_p[1].get_text()
self.tag = all_p[2].get_text()
self.thumbnail_name = self.webtoon_id + '.' + self.thumbnail_url.split('.')[-1]
def _fetch_episode_information(self):
@@ -56,7 +64,7 @@ class KakaoWebtoon(Downloader):
is_last: bool = False
webtoon_episodes_data = []
while not is_last:
url = f"https://gateway.tw.kakaowebtoon.com/episode/v2/views/content-home/contents/{self.webtoon_id}/episodes?sort=-NO&offset={offset}&limit={limit}"
url = f"https://gateway-kw.kakao.com/episode/v2/views/content-home/contents/{self.webtoon_id}/episodes?sort=-NO&offset={offset}&limit={limit}"
res = requests.get(url, headers=self.episode_headers)
if res.status_code == 200:
json_data = res.json()
@@ -88,15 +96,15 @@ class KakaoWebtoon(Downloader):
self.episode_ids = episode_ids
self.seo_ids = seo_ids
self.episode_titles = episode_titles
self.readablities_index_list = [index for index, value in enumerate(readablities) if value == True]
self.readablities_index_list = [index for index, value in enumerate(readablities) if value == True]
def _get_episode_image_urls(self, episode_index) -> list[tuple[str, bytes, bytes]] | None:
episode_id = self.episode_ids[episode_index]
url = f"https://gateway.tw.kakaowebtoon.com/episode/v1/views/viewer/episodes/{episode_id}/media-resources"
url = f"https://gateway-kw.kakao.com/episode/v1/views/viewer/episodes/{episode_id}/media-resources"
payload = self.kakaoRequest.get_payload(episode_id)
res = requests.post(url, headers=self.post_headers, json=payload)
data = res.json()["data"]
aid = data["media"]["aid"]

58
dungeon_odyssey.py Normal file
View File

@@ -0,0 +1,58 @@
from pathlib import Path
import json
from data.path_constant import DOWNLOAD_DIR, NETWORK_DIR, TEMP_DOWNLOAD_DIR
# DUNGEON_HOME = Path('E:/') / 'Webtoon' / '地下城見聞錄'
# for i in range (114, 115):
# name = str(i) + '.' + '第' + str(i + 1) + '话'
# path = Path(DUNGEON_HOME) / name
# path.mkdir(parents=True, exist_ok=True)
# for first_level_path in TEMP_DOWNLOAD_DIR.iterdir():
# if first_level_path.is_dir():
# data_path = first_level_path / 'information.json'
# with open(data_path, 'r', encoding='utf-8') as file:
# data = json.load(file)
# # 2. 修改属性名
# if 'thumbnail_name' in data:
# data['thumbnail'] = data.pop('thumbnail_name')
# # 3. 保存修改后的 JSON 文件
# with open(data_path, 'w', encoding='utf-8') as file:
# json.dump(data, file, ensure_ascii=False, indent=4)
# print("属性名已修改并保存!")
# for first_level_path in DOWNLOAD_DIR.iterdir():
# if first_level_path.is_dir():
# print(first_level_path.name)
# HOME = Path('E:/') / 'Webtoon' / '鄰居是公會成員' / '65.第66話'
# for img_path in HOME.iterdir():
# print(img_path.name)
# for file in HOME.glob("*.webp"): # 仅遍历 .webp 文件
# new_name = f"{file.stem.split(' ')[0]}{file.suffix}"
# # new_name = f"{(int(file.stem) - 1):03d}{file.suffix}" # 转换为 3 位数
# new_path = file.with_name(new_name) # 生成新路径
# file.rename(new_path) # 重命名文件
# print(f"重命名: {file.name} → {new_name}")
# print("所有文件已重命名完毕!")
# HOME = Path('E:/') / 'Webtoon' / '鬼夜曲'
HOME = NETWORK_DIR / '鬼夜曲'
for episode in HOME.iterdir():
if episode.is_dir():
index = int(episode.name.split(".")[0])
if index > 65:
new_name = f"{index}.第{index - 3}"
new_path = HOME / new_name
episode.rename(new_path)

View File

@@ -2,7 +2,7 @@ from pathlib import Path
from data.path_constant import DOWNLOAD_DIR, NETWORK_DIR, TEMP_DOWNLOAD_DIR
from helper.missing_episode import get_missing_episodes
from helper.missing_images import get_missing_images, resize_and_overwrite
from prerequisite import delete_all_empty_episodes
from helper.prerequisite import delete_all_empty_episodes
# delete_all_empty_episodes(DOWNLOAD_DIR)
# delete_all_empty_episodes(NETWORK_DIR)

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

18
helper/get_kakao_list.py Normal file
View File

@@ -0,0 +1,18 @@
from data.special_list import KAKAO_1, KAKAO_3, KAKAO_C, KAKAO_P, KAKAO_W
def get_kakao_id_list(input):
kakao_list = []
if "1" in input:
kakao_list.extend(KAKAO_1)
if "3" in input:
kakao_list.extend(KAKAO_3)
if "c" in input:
kakao_list.extend(KAKAO_C)
if "w" in input:
kakao_list.extend(KAKAO_W)
if "p" in input:
kakao_list.extend(KAKAO_P)
return kakao_list

View File

@@ -27,7 +27,7 @@ def delete_all_webtoons_without_episodes():
def get_download_list(path: Path):
url_list = []
try:
with open(path, 'r') as file:
with open(path, 'r', encoding='utf-8') as file:
for url in file:
if 'https://' in url:
url_list.append(url.strip())

101
main.py
View File

@@ -1,65 +1,50 @@
import argparse
from converter.converter import WebtoonConverter
from data.kakao_cookie import COOKIE_NAME, COOKIES, TASK_TYPE, URL_TYPE
from data.special_list import KAKAO_1, KAKAO_3, KAKAO_7, KAKAO_PAY, WEBTOON_NOT_PROCESSED, KAKAO_ONLY_MAIN_ACCOUNT
from data.path_constant import DOWNLOAD_DIR, DOWNLOAD_LIST_TXT
from downloaders.bomtoon import Bomtoon
from data.kakao_cookie import COOKIE
from data.special_list import WEBTOON_NOT_PROCESSED
from data.path_constant import DOWNLOAD_DIR, DOWNLOAD_LIST_TXT, TEMP_DOWNLOAD_DIR, TEMP_DOWNLOAD_LIST_TXT
from downloaders.kakao_webtoon import KakaoWebtoon
from prerequisite import get_download_list
from helper.get_kakao_list import get_kakao_id_list
from helper.prerequisite import get_download_list
from downloaders.webtoon_com import Webtoon
DOWNLOAD_WEBTOON = True
DOWNLOAD_WEBTOON = False
CONVERT_ALL = False
task = 'dc'
kakao_list = ""
kakao_ids = get_kakao_id_list(kakao_list)
valid_cookies = []
new_webtoons = []
def set_valid_cookie():
global valid_cookies
for cookie in COOKIES:
if cookie.name == COOKIE_NAME:
print(cookie.name)
valid_cookies.append(cookie)
def get_kakao_urls(inputs):
result = []
if '1' in inputs:
result += KAKAO_1
if '3' in inputs:
result += KAKAO_3
if '7' in inputs:
result += KAKAO_7
if 'm' in inputs:
result += KAKAO_ONLY_MAIN_ACCOUNT
if 'p' in inputs:
result += KAKAO_PAY
return result
def download():
if len(valid_cookies) > 0:
url_list = get_download_list(DOWNLOAD_LIST_TXT)
for url in url_list:
webtoon = None
if 'tw.kakaowebtoon.com' in url:
webtoon_id = url.split('/')[-1]
for cookie in valid_cookies:
if webtoon_id in get_kakao_urls(URL_TYPE):
webtoon = KakaoWebtoon(webtoon_id, cookie)
webtoon.download_webtoon(url, DOWNLOAD_DIR)
elif DOWNLOAD_WEBTOON and 'www.webtoons.com' in url:
webtoon_id = url.split('=')[1]
webtoon = Webtoon(webtoon_id)
webtoon.download_webtoon(url, DOWNLOAD_DIR)
elif 'www.bomtoon.tw' in url:
webtoon_id = url.split('/')[-1]
webtoon = Bomtoon(webtoon_id)
webtoon.download_webtoon(url, DOWNLOAD_DIR)
if webtoon is not None and webtoon.new_webtoon != "":
new_webtoons.append(webtoon.new_webtoon)
url_list = get_download_list(DOWNLOAD_LIST_TXT)
for url in url_list:
webtoon = None
if 'webtoon.kakao.com' in url:
webtoon_id = url.split('/')[-1]
if webtoon_id in kakao_ids:
webtoon = KakaoWebtoon(webtoon_id, COOKIE)
webtoon.download_webtoon(url, DOWNLOAD_DIR)
elif DOWNLOAD_WEBTOON and 'www.webtoons.com' in url:
webtoon_id = url.split('=')[1]
webtoon = Webtoon(webtoon_id)
webtoon.download_webtoon(url, DOWNLOAD_DIR)
if webtoon is not None and webtoon.new_webtoon != "":
new_webtoons.append(webtoon.new_webtoon)
print(new_webtoons)
# temp_url_list = get_download_list(TEMP_DOWNLOAD_LIST_TXT)
# for temp_url in temp_url_list:
# if 'webtoon.kakao.com' in temp_url:
# webtoon_id = temp_url.split('/')[-1]
# webtoon = KakaoWebtoon(webtoon_id, COOKIE)
# webtoon.download_webtoon(temp_url, TEMP_DOWNLOAD_DIR)
def convert():
for webtoon_path in DOWNLOAD_DIR.iterdir():
if len(new_webtoons) > 0:
@@ -87,13 +72,21 @@ def main():
convert()
if __name__ == "__main__":
set_valid_cookie()
task = TASK_TYPE
if 'd' in task:
download()
if 'c' in task:
# new_webtoons.append('1995青春報告')
# new_webtoons.append('Unsleep')
# new_webtoons.append('鬼夜曲')
# new_webtoons.append('Backlight')
# new_webtoons.append('鄰居是公會成員')
# new_webtoons.append('No Moral')
# new_webtoons.append('披薩外送員與黃金宮')
# new_webtoons.append('PAYBACK')
# new_webtoons.append('融冰曲線')
# new_webtoons.append('夢龍傳')
# new_webtoons.append('棋子的世界')
# new_webtoons.append('監禁倉庫')
# new_webtoons.append('易地思之')
convert()
print('MyWebtoon')
print('MyWebtoon')

View File

@@ -1,17 +1,29 @@
import os
from pathlib import Path
from data.path_constant import DOWNLOAD_DIR, NETWORK_DIR
from prerequisite import rename_episodes
from helper.prerequisite import rename_episodes
rename_episodes(DOWNLOAD_DIR)
rename_episodes(NETWORK_DIR)
# rename_episodes(DOWNLOAD_DIR)
# rename_episodes(NETWORK_DIR)
for first_level_path in NETWORK_DIR.iterdir():
if first_level_path.name == '怪力亂神':
for second_level_path in first_level_path.iterdir():
if "話." in second_level_path.name:
episode_name = second_level_path.name.replace("話.", "")
# for first_level_path in NETWORK_DIR.iterdir():
# if first_level_path.name == '怪力亂神':
# for second_level_path in first_level_path.iterdir():
# if "話." in second_level_path.name:
# episode_name = second_level_path.name.replace("話.", "話 ")
new_path = first_level_path / episode_name
print(second_level_path)
print(new_path)
os.rename(second_level_path, new_path)
# new_path = first_level_path / episode_name
# print(second_level_path)
# print(new_path)
# os.rename(second_level_path, new_path)
download_dir = Path('C:/') / 'Users' / 'ithil' / 'Downloads'
download_dir = Path('E:/') / 'Webtoon' / 'PAYBACK' / '0.序章'
for image in download_dir.iterdir():
name = image.name
# new_name = str(int(name.split('.')[0]) - 10) + '.webp'
new_name = f"{(int(image.stem) -10):03d}{image.suffix}"
new_path = image.with_name(new_name) # 生成新路径
image.rename(new_path) # 重命名文件
print(f"重命名: {image.name}{new_name}")