diff --git a/RVC_CLI.ipynb b/RVC_CLI.ipynb index 0f9ab92..8d4fd00 100644 --- a/RVC_CLI.ipynb +++ b/RVC_CLI.ipynb @@ -134,13 +134,14 @@ "\n", "input_path = \"/content/drive/MyDrive/vocals.wav\" #@param {type:\"string\"}\n", "output_path = \"/content/output.wav\"\n", - "f0method = \"rmvpe\" #@param [\"pm\", \"dio\", \"crepe\", \"crepe-tiny\", \"harvest\", \"rmvpe\"] {allow-input: false}\n", + "f0method = \"rmvpe\" #@param [\"pm\", \"dio\", \"crepe\", \"crepe-tiny\", \"harvest\", \"rmvpe\", \"hybrid[rmvpe+fcpe]\"] {allow-input: false}\n", "f0up_key = 0 #@param {type:\"slider\", min:-24, max:24, step:0}\n", "filter_radius = 0 #@param {type:\"slider\", min:0, max:10, step:0}\n", "index_rate = 0.0 #@param {type:\"slider\", min:0.0, max:1.0, step:0.1}\n", "hop_length = 1 # @param {type:\"slider\", min:1, max:512, step:0}\n", "split_audio = False #@param{type:\"boolean\"}\n", - "!python main.py infer {f0up_key} {filter_radius} {index_rate} {hop_length} {f0method} \"{input_path}\" \"{output_path}\" \"{pth_file}\" \"{index_file}\" {split_audio}\n", + "autotune = False #@param{type:\"boolean\"}\n", + "!python main.py infer {f0up_key} {filter_radius} {index_rate} {hop_length} {f0method} \"{input_path}\" \"{output_path}\" \"{pth_file}\" \"{index_file}\" {split_audio} {autotune}\n", "\n", "from IPython.display import Audio, display, clear_output\n", "clear_output()\n", diff --git a/requirements.txt b/requirements.txt index 8912a64..4e481f6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -41,5 +41,6 @@ tensorboard # Miscellaneous ffmpy==0.3.1 git+https://github.com/lanpa/tensorboardX +git+https://github.com/IAHispano/gdown edge-tts==6.1.9 flask diff --git a/rvc/lib/tools/gdown.py b/rvc/lib/tools/gdown.py deleted file mode 100644 index 5f13a56..0000000 --- a/rvc/lib/tools/gdown.py +++ /dev/null @@ -1,402 +0,0 @@ -from __future__ import print_function - -import json -import os -import os.path as osp -import re -import warnings -from six.moves import urllib_parse -import shutil -import sys -import tempfile -import textwrap -import time - -import requests -import six -import tqdm - -def indent(text, prefix): - def prefixed_lines(): - for line in text.splitlines(True): - yield (prefix + line if line.strip() else line) - - return "".join(prefixed_lines()) - -class FileURLRetrievalError(Exception): - pass - - -class FolderContentsMaximumLimitError(Exception): - pass - -def parse_url(url, warning=True): - """Parse URLs especially for Google Drive links. - - file_id: ID of file on Google Drive. - is_download_link: Flag if it is download link of Google Drive. - """ - parsed = urllib_parse.urlparse(url) - query = urllib_parse.parse_qs(parsed.query) - is_gdrive = parsed.hostname in ["drive.google.com", "docs.google.com"] - is_download_link = parsed.path.endswith("/uc") - - if not is_gdrive: - return is_gdrive, is_download_link - - file_id = None - if "id" in query: - file_ids = query["id"] - if len(file_ids) == 1: - file_id = file_ids[0] - else: - patterns = [ - r"^/file/d/(.*?)/(edit|view)$", - r"^/file/u/[0-9]+/d/(.*?)/(edit|view)$", - r"^/document/d/(.*?)/(edit|htmlview|view)$", - r"^/document/u/[0-9]+/d/(.*?)/(edit|htmlview|view)$", - r"^/presentation/d/(.*?)/(edit|htmlview|view)$", - r"^/presentation/u/[0-9]+/d/(.*?)/(edit|htmlview|view)$", - r"^/spreadsheets/d/(.*?)/(edit|htmlview|view)$", - r"^/spreadsheets/u/[0-9]+/d/(.*?)/(edit|htmlview|view)$", - ] - for pattern in patterns: - match = re.match(pattern, parsed.path) - if match: - file_id = match.groups()[0] - break - - if warning and not is_download_link: - warnings.warn( - "You specified a Google Drive link that is not the correct link " - "to download a file. You might want to try `--fuzzy` option " - "or the following url: {url}".format( - url="https://drive.google.com/uc?id={}".format(file_id) - ) - ) - - return file_id, is_download_link - - -CHUNK_SIZE = 512 * 1024 # 512KB -home = osp.expanduser("~") - - -def get_url_from_gdrive_confirmation(contents): - url = "" - m = re.search(r'href="(\/uc\?export=download[^"]+)', contents) - if m: - url = "https://docs.google.com" + m.groups()[0] - url = url.replace("&", "&") - return url - - m = re.search(r'href="/open\?id=([^"]+)"', contents) - if m: - url = m.groups()[0] - uuid = re.search(r'(.*)
', contents) - if m: - error = m.groups()[0] - raise FileURLRetrievalError(error) - - raise FileURLRetrievalError( - "Cannot retrieve the public link of the file. " - "You may need to change the permission to " - "'Anyone with the link', or have had many accesses." - ) -def _get_session(proxy, use_cookies, return_cookies_file=False): - sess = requests.session() - - sess.headers.update( - {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6)"} - ) - - if proxy is not None: - sess.proxies = {"http": proxy, "https": proxy} - print("Using proxy:", proxy, file=sys.stderr) - - # Load cookies if exists - cookies_file = osp.join(home, ".cache/gdown/cookies.json") - if osp.exists(cookies_file) and use_cookies: - with open(cookies_file) as f: - cookies = json.load(f) - for k, v in cookies: - sess.cookies[k] = v - - if return_cookies_file: - return sess, cookies_file - else: - return sess - - -def download( - url=None, - output=None, - quiet=False, - proxy=None, - speed=None, - use_cookies=True, - verify=True, - id=None, - fuzzy=True, - resume=False, - format=None, -): - """Download file from URL. - - Parameters - ---------- - url: str - URL. Google Drive URL is also supported. - output: str - Output filename. Default is basename of URL. - quiet: bool - Suppress terminal output. Default is False. - proxy: str - Proxy. - speed: float - Download byte size per second (e.g., 256KB/s = 256 * 1024). - use_cookies: bool - Flag to use cookies. Default is True. - verify: bool or string - Either a bool, in which case it controls whether the server's TLS - certificate is verified, or a string, in which case it must be a path - to a CA bundle to use. Default is True. - id: str - Google Drive's file ID. - fuzzy: bool - Fuzzy extraction of Google Drive's file Id. Default is False. - resume: bool - Resume the download from existing tmp file if possible. - Default is False. - format: str, optional - Format of Google Docs, Spreadsheets and Slides. Default is: - - Google Docs: 'docx' - - Google Spreadsheet: 'xlsx' - - Google Slides: 'pptx' - - Returns - ------- - output: str - Output filename. - """ - if not (id is None) ^ (url is None): - raise ValueError("Either url or id has to be specified") - if id is not None: - url = "https://drive.google.com/uc?id={id}".format(id=id) - - url_origin = url - - sess, cookies_file = _get_session( - proxy=proxy, use_cookies=use_cookies, return_cookies_file=True - ) - - gdrive_file_id, is_gdrive_download_link = parse_url(url, warning=not fuzzy) - - if fuzzy and gdrive_file_id: - # overwrite the url with fuzzy match of a file id - url = "https://drive.google.com/uc?id={id}".format(id=gdrive_file_id) - url_origin = url - is_gdrive_download_link = True - - - - while True: - res = sess.get(url, stream=True, verify=verify) - - if url == url_origin and res.status_code == 500: - # The file could be Google Docs or Spreadsheets. - url = "https://drive.google.com/open?id={id}".format( - id=gdrive_file_id - ) - continue - - if res.headers["Content-Type"].startswith("text/html"): - m = re.search("