Spaces:
Sleeping
Sleeping
Create fetchYoutubeSubtitle.py
Browse files- fetchYoutubeSubtitle.py +102 -0
fetchYoutubeSubtitle.py
ADDED
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
import requests
|
4 |
+
|
5 |
+
|
6 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter, TokenTextSplitter
|
7 |
+
from typing import List, Dict
|
8 |
+
|
9 |
+
SUBTITLE_DOWNLOADER_URL = 'https://savesubs.com'
|
10 |
+
|
11 |
+
def fetchYoutubeSubtitleUrls(video_id):
|
12 |
+
headers = {
|
13 |
+
'accept': 'application/json, text/plain, */*',
|
14 |
+
'accept-language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
|
15 |
+
'cache-control': 'no-cache',
|
16 |
+
'Content-Type': 'application/json; charset=UTF-8',
|
17 |
+
'pragma': 'no-cache',
|
18 |
+
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
|
19 |
+
'x-auth-token': os.environ.get('SAVESUBS_X_AUTH_TOKEN', ''),
|
20 |
+
'x-requested-domain': 'savesubs.com',
|
21 |
+
'X-requested-with': 'xmlhttprequest',
|
22 |
+
'sec-ch-ua': '"Google Chrome";v="111", "Not(A:Brand";v="8", "Chromium";v="111"',
|
23 |
+
'sec-ch-ua-mobile': '?0',
|
24 |
+
'sec-ch-ua-platform': 'Linux',
|
25 |
+
'sec-fetch-dest': 'empty',
|
26 |
+
'sec-fetch-mode': 'cors',
|
27 |
+
'sec-fetch-site': 'same-origin',
|
28 |
+
'authority': 'savesubs.com',
|
29 |
+
'origin': 'https://savesubs.com',
|
30 |
+
'referer': f'https://savesubs.com/process?url=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3D{video_id}'
|
31 |
+
}
|
32 |
+
data = {
|
33 |
+
'data': {'url': f'https://www.youtube.com/watch?v={video_id}'}
|
34 |
+
}
|
35 |
+
session = requests.Session()
|
36 |
+
|
37 |
+
proxy = os.environ.get('PROXY', None)
|
38 |
+
if proxy:
|
39 |
+
session.proxies = {
|
40 |
+
"http": proxy,
|
41 |
+
"https": proxy,
|
42 |
+
}
|
43 |
+
|
44 |
+
response = session.post(SUBTITLE_DOWNLOADER_URL + '/action/extract', json=data, headers=headers)
|
45 |
+
if not response.ok:
|
46 |
+
logger.error(headers, "request header")
|
47 |
+
logger.error(response.headers.raw())
|
48 |
+
return {'title': None, 'subtitleList': None, 'error': response.statusText}
|
49 |
+
else:
|
50 |
+
try:
|
51 |
+
json = response.json().get('response', {})
|
52 |
+
logger.info(json, 'subtitle url')
|
53 |
+
return {'title': json.get('title'), 'subtitleList': json.get('formats')}
|
54 |
+
except Exception as error:
|
55 |
+
logger.error(error)
|
56 |
+
return {'title': None, 'subtitleList': None, 'error': str(error)}
|
57 |
+
|
58 |
+
|
59 |
+
async def find(subtitleList: List[Dict], args: Dict) -> Dict:
|
60 |
+
key = list(args.keys())[0]
|
61 |
+
return next((item for item in subtitleList if item[key] == args[key]), None)
|
62 |
+
|
63 |
+
async def fetchYoutubeSubtitle(videoId: str) -> Dict:
|
64 |
+
subtitle_url = ""
|
65 |
+
betterSubtitle = {}
|
66 |
+
subtitleList = []
|
67 |
+
title = ""
|
68 |
+
error = ""
|
69 |
+
|
70 |
+
result = await fetchYoutubeSubtitleUrls(videoId)
|
71 |
+
title, subtitleList, error = result["title"], result["subtitleList"], result["error"]
|
72 |
+
|
73 |
+
if not subtitleList or len(subtitleList) <= 0:
|
74 |
+
return {"title": title, "docs": None, "error": error}
|
75 |
+
|
76 |
+
betterSubtitle = (
|
77 |
+
await find(subtitleList, {"quality": "English"})
|
78 |
+
or await find(subtitleList, {"quality": "English (auto"})
|
79 |
+
or await find(subtitleList, {"quality": "zh-CN"})
|
80 |
+
or subtitleList[0]
|
81 |
+
)
|
82 |
+
|
83 |
+
subtitleUrl = f"{SUBTITLE_DOWNLOADER_URL}{betterSubtitle['url']}?ext=srt"
|
84 |
+
|
85 |
+
session = requests.Session()
|
86 |
+
|
87 |
+
proxy = os.environ.get('PROXY', None)
|
88 |
+
if proxy:
|
89 |
+
session.proxies = {
|
90 |
+
"http": proxy,
|
91 |
+
"https": proxy,
|
92 |
+
}
|
93 |
+
response = session.get(url)
|
94 |
+
|
95 |
+
loader = SRTLoader(await response.blob())
|
96 |
+
splitter = TokenTextSplitter(
|
97 |
+
encodingName="cl100k_base",
|
98 |
+
chunkSize=2048,
|
99 |
+
)
|
100 |
+
|
101 |
+
docs = await loader.loadAndSplit(splitter)
|
102 |
+
return {"title": title, "docs": docs}
|