diff --git a/README.md b/README.md index 312df8bece..c3b1ce625a 100644 --- a/README.md +++ b/README.md @@ -85,6 +85,10 @@ - [🔗 最新结果](#最新结果) - [⚙️ 配置参数](#配置) - [🚀 快速上手](#快速上手) + - [工作流](#工作流) + - [命令行](#命令行) + - [GUI软件](#GUI-软件) + - [Docker](#Docker) - [📖 详细教程](./docs/tutorial.md) - [🗓️ 更新日志](./CHANGELOG.md) - [❤️ 赞赏](#赞赏) @@ -188,17 +192,16 @@ https://cdn.jsdelivr.net/gh/Guovin/iptv-api@gd/source.json | source_file | 模板文件路径 | config/demo.txt | | speed_weight | 速率权重值(所有权重值总和应为 1) | 0.5 | | subscribe_num | 结果中偏好的订阅源接口数量 | 3 | -| subscribe_urls | 订阅源,请输入订阅链接(支持 txt 与 m3u 链接),多个链接以逗号分隔 | | | url_keywords_blacklist | 接口关键字黑名单,用于过滤含特定字符的接口 | | | urls_limit | 单个频道接口数量 | 10 | ## 快速上手 -### 方式一:工作流 +### 工作流 Fork 本项目并开启工作流更新,具体步骤请见[详细教程](./docs/tutorial.md) -### 方式二:命令行 +### 命令行 ```shell pip install pipenv @@ -220,7 +223,7 @@ pipenv run dev pipenv run service ``` -### 方式三:GUI 软件 +### GUI 软件 1. 下载[IPTV-API 更新软件](https://github.com/Guovin/iptv-api/releases),打开软件,点击更新,即可完成更新 @@ -232,7 +235,7 @@ pipenv run ui IPTV-API更新软件 -### 方式四:Docker +### Docker - iptv-api(完整版本):性能要求较高,更新速度较慢,稳定性、成功率高;修改配置 open_driver = False 可切换到 Lite 版本运行模式(推荐酒店源、组播源、关键字搜索使用此版本) diff --git a/README_en.md b/README_en.md index c96520445c..8cc38408d8 100644 --- a/README_en.md +++ b/README_en.md @@ -85,6 +85,10 @@ - [🔗 Latest results](#latest-results) - [⚙️ Config parameter](#Config) - [🚀 Quick Start](#quick-start) + - [Workflow](#workflow) + - [Command Line](#command-line) + - [GUI Software](#gui-software) + - [Docker](#docker) - [📖 Detailed Tutorial](./docs/tutorial_en.md) - [🗓️ Changelog](./CHANGELOG.md) - [❤️ Appreciate](#appreciate) @@ -188,18 +192,17 @@ https://cdn.jsdelivr.net/gh/Guovin/iptv-api@gd/source.json | source_file | Template file path | config/demo.txt | | speed_weight | Speed weight value (the sum of all weight values should be 1) | 0.5 | | subscribe_num | The number of preferred subscribe source interfaces in the results | 3 | -| subscribe_urls | Subscription source, please enter the subscription link (supports txt and m3u links), multiple links should be separated by commas | | | url_keywords_blacklist | Interface keyword blacklist, used to filter out interfaces containing specific characters | | | urls_limit | Number of interfaces per channel | 10 | ## Quick Start -### Method 1: Workflow +### Workflow Fork this project and initiate workflow updates, detailed steps are available at [Detailed Tutorial](./docs/tutorial_en.md) -### Method 2: Command Line +### Command Line ```shell pip install pipenv @@ -221,7 +224,7 @@ Start service: pipenv run service ``` -### Method 3: GUI Software +### GUI Software 1. Download [IPTV-API update software](https://github.com/Guovin/iptv-api/releases), open the software, click update to complete the update @@ -234,7 +237,7 @@ pipenv run ui IPTV-API update software -### Method 4: Docker +### Docker - iptv-api (Full version): Higher performance requirements, slower update speed, high stability and success rate. Set open_driver = False to switch to the lite running mode (recommended for hotel sources, multicast sources, and online diff --git a/config/config.ini b/config/config.ini index 409596804f..6be19b1807 100644 --- a/config/config.ini +++ b/config/config.ini @@ -28,7 +28,6 @@ ipv6_num = 5 open_m3u_result = True url_keywords_blacklist = epg.pw,skype.serv00.net,iptv.yjxfz.com,live-hls-web-ajb.getaj.net,live.goodiptv.club,hc73k3dhwo5gfkt.wcetv.com,stream1.freetv.fun,zw9999.cnstream.top,zsntlqj.xicp.net open_subscribe = True -subscribe_urls = https://iptv.b2og.com/txt/fmml_ipv6.txt,https://ghp.ci/raw.githubusercontent.com/suxuang/myIPTV/main/ipv6.m3u,https://live.zbds.top/tv/iptv6.txt,https://live.zbds.top/tv/iptv4.txt,https://live.fanmingming.com/tv/m3u/ipv6.m3u,https://ghp.ci/https://raw.githubusercontent.com/joevess/IPTV/main/home.m3u8,https://aktv.top/live.txt,http://175.178.251.183:6689/live.txt,https://ghp.ci/https://raw.githubusercontent.com/kimwang1978/collect-tv-txt/main/merged_output.txt,https://m3u.ibert.me/txt/fmml_dv6.txt,https://m3u.ibert.me/txt/o_cn.txt,https://m3u.ibert.me/txt/j_iptv.txt,https://ghp.ci/https://raw.githubusercontent.com/xzw832/cmys/main/S_CCTV.txt,https://ghp.ci/https://raw.githubusercontent.com/xzw832/cmys/main/S_weishi.txt,https://ghp.ci//https://raw.githubusercontent.com/asdjkl6/tv/tv/.m3u/整套直播源/测试/整套直播源/l.txt,https://ghp.ci//https://raw.githubusercontent.com/asdjkl6/tv/tv/.m3u/整套直播源/测试/整套直播源/kk.txt open_multicast = True open_multicast_foodie = True open_multicast_fofa = True diff --git a/config/subscribe.txt b/config/subscribe.txt new file mode 100644 index 0000000000..81ddce1127 --- /dev/null +++ b/config/subscribe.txt @@ -0,0 +1,19 @@ +# 这是订阅源列表,每行一个订阅地址 +# This is a list of subscription sources, with one subscription address per line + +https://iptv.b2og.com/txt/fmml_ipv6.txt +https://ghp.ci/raw.githubusercontent.com/suxuang/myIPTV/main/ipv6.m3u +https://live.zbds.top/tv/iptv6.txt +https://live.zbds.top/tv/iptv4.txt +https://live.fanmingming.com/tv/m3u/ipv6.m3u +https://ghp.ci/https://raw.githubusercontent.com/joevess/IPTV/main/home.m3u8 +https://aktv.top/live.txt +http://175.178.251.183:6689/live.txt +https://ghp.ci/https://raw.githubusercontent.com/kimwang1978/collect-tv-txt/main/merged_output.txt +https://m3u.ibert.me/txt/fmml_dv6.txt +https://m3u.ibert.me/txt/o_cn.txt +https://m3u.ibert.me/txt/j_iptv.txt +https://ghp.ci/https://raw.githubusercontent.com/xzw832/cmys/main/S_CCTV.txt +https://ghp.ci/https://raw.githubusercontent.com/xzw832/cmys/main/S_weishi.txt +https://ghp.ci//https://raw.githubusercontent.com/asdjkl6/tv/tv/.m3u/整套直播源/测试/整套直播源/l.txt +https://ghp.ci//https://raw.githubusercontent.com/asdjkl6/tv/tv/.m3u/整套直播源/测试/整套直播源/kk.txt diff --git a/docs/config.md b/docs/config.md index 903c16c88a..7a38a5b6f7 100644 --- a/docs/config.md +++ b/docs/config.md @@ -48,6 +48,5 @@ | source_file | 模板文件路径 | config/demo.txt | | speed_weight | 速率权重值(所有权重值总和应为 1) | 0.5 | | subscribe_num | 结果中偏好的订阅源接口数量 | 3 | -| subscribe_urls | 订阅源,请输入订阅链接(支持 txt 与 m3u 链接),多个链接以逗号分隔 | | | url_keywords_blacklist | 接口关键字黑名单,用于过滤含特定字符的接口 | | | urls_limit | 单个频道接口数量 | 10 | \ No newline at end of file diff --git a/docs/config_en.md b/docs/config_en.md index 181916b800..dca62473f4 100644 --- a/docs/config_en.md +++ b/docs/config_en.md @@ -48,6 +48,5 @@ | source_file | Template file path | config/demo.txt | | speed_weight | Speed weight value (the sum of all weight values should be 1) | 0.5 | | subscribe_num | The number of preferred subscribe source interfaces in the results | 3 | -| subscribe_urls | Subscription source, please enter the subscription link (supports txt and m3u links), multiple links should be separated by commas | | | url_keywords_blacklist | Interface keyword blacklist, used to filter out interfaces containing specific characters | | | urls_limit | Number of interfaces per channel | 10 | \ No newline at end of file diff --git a/main.py b/main.py index d3ba78fcac..477a63a127 100644 --- a/main.py +++ b/main.py @@ -30,7 +30,7 @@ format_interval, check_ipv6_support, resource_path, - get_whitelist_urls + get_urls_from_file ) @@ -71,8 +71,8 @@ async def visit_page(self, channel_names=None): continue if config.open_method[setting]: if setting == "subscribe": - subscribe_urls = config.subscribe_urls - whitelist_urls = get_whitelist_urls() + subscribe_urls = get_urls_from_file(constants.subscribe_path) + whitelist_urls = get_urls_from_file(constants.whitelist_path) task = asyncio.create_task( task_func(subscribe_urls, whitelist=whitelist_urls, callback=self.update_progress) ) @@ -107,8 +107,9 @@ def get_urls_len(self, filter=False): async def main(self): try: + user_final_file = config.final_file + main_start_time = time() if config.open_update: - main_start_time = time() self.channel_items = get_channel_items() channel_names = [ name @@ -157,7 +158,6 @@ async def main(self): callback=lambda: self.pbar_update(name="写入结果"), ) self.pbar.close() - user_final_file = config.final_file update_file(user_final_file, constants.result_path) if config.open_use_old_result: if open_sort: @@ -170,9 +170,8 @@ async def main(self): ) as file: pickle.dump(channel_data_cache, file) convert_to_m3u() - total_time = format_interval(time() - main_start_time) print( - f"🥳 Update completed! Total time spent: {total_time}. Please check the {user_final_file} file!" + f"🥳 Update completed! Total time spent: {format_interval(time() - main_start_time)}. Please check the {user_final_file} file!" ) if self.run_ui: open_service = config.open_service @@ -180,7 +179,7 @@ async def main(self): tip = ( f"✅ 服务启动成功{service_tip}" if open_service and config.open_update == False - else f"🥳 更新完成, 耗时: {total_time}, 请检查{user_final_file}文件{service_tip}" + else f"🥳 更新完成, 耗时: {format_interval(time() - main_start_time)}, 请检查{user_final_file}文件{service_tip}" ) self.update_progress( tip, diff --git a/tkinter_ui/subscribe.py b/tkinter_ui/subscribe.py index b79305579b..323c857a91 100644 --- a/tkinter_ui/subscribe.py +++ b/tkinter_ui/subscribe.py @@ -1,7 +1,10 @@ import tkinter as tk +from tkinter import scrolledtext from tkinter import ttk + +import utils.constants as constants from utils.config import config -from tkinter import scrolledtext +from utils.tools import get_urls_from_file class SubscribeUI: @@ -39,7 +42,7 @@ def init_ui(self, root): self.subscribe_urls_text.pack( side=tk.LEFT, padx=4, pady=8, expand=True, fill=tk.BOTH ) - self.subscribe_urls_text.insert(tk.END, ",".join(config.subscribe_urls)) + self.subscribe_urls_text.insert(tk.END, ",".join(get_urls_from_file(constants.subscribe_path))) self.subscribe_urls_text.bind("", self.update_subscribe_urls) def update_open_subscribe(self): diff --git a/updates/subscribe/request.py b/updates/subscribe/request.py index 9c12a3dfc2..c6a9620ea7 100644 --- a/updates/subscribe/request.py +++ b/updates/subscribe/request.py @@ -31,7 +31,7 @@ async def get_channels_by_subscribe_urls( Get the channels by subscribe urls """ subscribe_results = {} - subscribe_urls_len = len(urls if urls else config.subscribe_urls) + subscribe_urls_len = len(urls) pbar = tqdm_asyncio( total=subscribe_urls_len, desc=f"Processing subscribe {'for multicast' if multicast else ''}", @@ -134,7 +134,7 @@ def process_subscribe_channels(subscribe_info): with ThreadPoolExecutor(max_workers=100) as executor: futures = [ executor.submit(process_subscribe_channels, subscribe_url) - for subscribe_url in (urls if urls else config.subscribe_urls) + for subscribe_url in urls ] for future in futures: subscribe_results = merge_objects(subscribe_results, future.result()) diff --git a/utils/channel.py b/utils/channel.py index ab62f9694c..5178fc4caa 100644 --- a/utils/channel.py +++ b/utils/channel.py @@ -26,8 +26,8 @@ remove_cache_info, resource_path, write_content_into_txt, - get_whitelist_urls, - get_whitelist_name_urls, + get_urls_from_file, + get_name_urls_from_file, get_logger, ) @@ -72,7 +72,7 @@ def get_channel_items(): """ user_source_file = resource_path(config.source_file) channels = defaultdict(lambda: defaultdict(list)) - whitelist = get_whitelist_name_urls() + whitelist = get_name_urls_from_file(constants.whitelist_path) whitelist_len = len(list(whitelist.keys())) if whitelist_len: print(f"Found {whitelist_len} channel in whitelist") @@ -549,7 +549,7 @@ async def process_sort_channel_list(data, ipv6=False, callback=None): """ ipv6_proxy = None if (not config.open_ipv6 or ipv6) else constants.ipv6_proxy need_sort_data = copy.deepcopy(data) - whitelist_urls = get_whitelist_urls() + whitelist_urls = get_urls_from_file(constants.whitelist_path) if whitelist_urls: print(f"Found {len(whitelist_urls)} whitelist urls") process_nested_dict(need_sort_data, seen=set(whitelist_urls), flag=r"cache:(.*)", force_str="!") diff --git a/utils/config.py b/utils/config.py index 416c42a794..93c22202b1 100644 --- a/utils/config.py +++ b/utils/config.py @@ -305,16 +305,6 @@ def multicast_page_num(self): def online_search_page_num(self): return config.getint("Settings", "online_search_page_num", fallback=1) - @property - def subscribe_urls(self): - return [ - url.strip() - for url in self.config.get("Settings", "subscribe_urls", fallback="").split( - "," - ) - if url.strip() - ] - @property def delay_weight(self): return self.config.getfloat("Settings", "delay_weight", fallback=0.5) diff --git a/utils/constants.py b/utils/constants.py index 2ca97d4477..bf6463dde6 100644 --- a/utils/constants.py +++ b/utils/constants.py @@ -6,6 +6,8 @@ whitelist_path = os.path.join(config_path, "whitelist.txt") +subscribe_path = os.path.join(config_path, "subscribe.txt") + result_path = os.path.join(output_path, "result_new.txt") cache_path = os.path.join(output_path, "cache.pkl") diff --git a/utils/tools.py b/utils/tools.py index 260c84b781..8b38b4fedc 100644 --- a/utils/tools.py +++ b/utils/tools.py @@ -542,15 +542,25 @@ def get_name_url(content, pattern, multiline=False, check_url=True): return channels -def get_whitelist_urls(): +def get_real_path(path) -> str: """ - Get the whitelist urls + Get the real path """ - whitelist_file = resource_path(constants.whitelist_path) + dir_path, file = os.path.split(path) + user_real_path = os.path.join(dir_path, 'user_' + file) + real_path = user_real_path if os.path.exists(user_real_path) else path + return real_path + + +def get_urls_from_file(path: str) -> list: + """ + Get the urls from file + """ + real_path = get_real_path(resource_path(path)) urls = [] url_pattern = constants.url_pattern - if os.path.exists(whitelist_file): - with open(whitelist_file, "r", encoding="utf-8") as f: + if os.path.exists(real_path): + with open(real_path, "r", encoding="utf-8") as f: for line in f: match = re.search(url_pattern, line) if match: @@ -558,15 +568,15 @@ def get_whitelist_urls(): return urls -def get_whitelist_name_urls(): +def get_name_urls_from_file(path: str) -> dict[str, list]: """ - Get the whitelist name urls + Get the name and urls from file """ - whitelist_file = resource_path(constants.whitelist_path) + real_path = get_real_path(resource_path(path)) name_urls = defaultdict(list) txt_pattern = constants.txt_pattern - if os.path.exists(whitelist_file): - with open(whitelist_file, "r", encoding="utf-8") as f: + if os.path.exists(real_path): + with open(real_path, "r", encoding="utf-8") as f: for line in f: name_url = get_name_url(line, pattern=txt_pattern) if name_url and name_url[0]: