diff --git a/AutoCoverTool/online/monitor_worker.py b/AutoCoverTool/online/monitor_worker.py new file mode 100644 index 0000000..7635d26 --- /dev/null +++ b/AutoCoverTool/online/monitor_worker.py @@ -0,0 +1,64 @@ +""" +监控程序 +通过查看最近一个state设置成1的时间距离现在超过10分钟,则认为消费程序出错,需要报警 +""" + +import json +import requests + +from online.common import * + + +def feishu_send(feishu_url, msg, users): + data = { + "mobiles": users, + "msg": msg + } + ret = requests.post(feishu_url, json.dumps(data)) + + +def alarm(msg): + tm = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + msg = "tm:{}|msg:{}".format(tm, msg) + feishu_url = "http://sg-prod-songbook-webmp-1:8000/api/feishu/people" + users = ["18810833785"] + feishu_send(feishu_url, msg, users) + + +def monitor_check(): + sql = "select update_time, song_id from av_db.svc_queue_table where state=1 order by update_time desc limit 1" + data = get_data_by_mysql(sql, banned_user_map) + if len(data) == 0: + return 1 + up_time = int(data[0][0]) + cur_time = int(time.time()) + # 发现被取走的数据距离当前时间超过5分钟 + if cur_time - up_time > 5 * 60: + return 2 + return 0 + + +def monitor_process(): + err_msg = { + 1: "异常!变调队列中没有正常被处理的数据请及时查看!", + 2: "异常!变调队列中长时间没有任务被消费请及时查看!" + } + while True: + print("\n\n-----------start------------------->>>>>") + time.sleep(5) + err = monitor_check() + if err != 0: + for i in range(0, 3): + time.sleep(5) + monitor_check() + err = monitor_check() + if err != 0: + alarm(err_msg[err]) + print("-------alarm--err={}-------------->>>>>".format(err)) + # 报警后等待冷却5分钟 + time.sleep(5 * 60) + print("-------success---------------->>>>>") + + +if __name__ == '__main__': + monitor_process() diff --git a/AutoCoverTool/ref/so_vits_svc/train.py b/AutoCoverTool/ref/so_vits_svc/train.py index ace0928..75e99cd 100644 --- a/AutoCoverTool/ref/so_vits_svc/train.py +++ b/AutoCoverTool/ref/so_vits_svc/train.py @@ -1,305 +1,306 @@ import logging logging.getLogger('matplotlib').setLevel(logging.WARNING) +logging.getLogger('numba').setLevel(logging.WARNING) import os import json import argparse import itertools import math import torch from torch import nn, optim from torch.nn import functional as F from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter import torch.multiprocessing as mp import torch.distributed as dist from torch.nn.parallel import DistributedDataParallel as DDP from torch.cuda.amp import autocast, GradScaler import commons import utils from data_utils import TextAudioSpeakerLoader, EvalDataLoader from models import ( SynthesizerTrn, MultiPeriodDiscriminator, ) from losses import ( kl_loss, generator_loss, discriminator_loss, feature_loss ) from mel_processing import mel_spectrogram_torch, spec_to_mel_torch torch.backends.cudnn.benchmark = True global_step = 0 # os.environ['TORCH_DISTRIBUTED_DEBUG'] = 'INFO' def main(): """Assume Single Node Multi GPUs Training Only""" assert torch.cuda.is_available(), "CPU training is not allowed." hps = utils.get_hparams() n_gpus = torch.cuda.device_count() os.environ['MASTER_ADDR'] = 'localhost' os.environ['MASTER_PORT'] = hps.train.port mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps,)) def run(rank, n_gpus, hps): print("CurRank:===>", rank) global global_step if rank == 0: logger = utils.get_logger(hps.model_dir) logger.info(hps) utils.check_git_hash(hps.model_dir) writer = SummaryWriter(log_dir=hps.model_dir) writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) dist.init_process_group(backend='nccl', init_method='env://', world_size=n_gpus, rank=rank) torch.manual_seed(hps.train.seed) torch.cuda.set_device(rank) # 从每段音频文件中获取特征 # hubert特征,f0,幅度谱特征,对应音频段波形(384 * hop_length),人声编码[0],每一次获取3840ms长度的特征 train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps) train_loader = DataLoader(train_dataset, num_workers=8, shuffle=False, pin_memory=True, batch_size=hps.train.batch_size) if rank == 0: eval_dataset = EvalDataLoader(hps.data.validation_files, hps) eval_loader = DataLoader(eval_dataset, num_workers=1, shuffle=False, batch_size=1, pin_memory=False, drop_last=False) net_g = SynthesizerTrn( hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, **hps.model).cuda(rank) net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank) optim_g = torch.optim.AdamW( net_g.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps) optim_d = torch.optim.AdamW( net_d.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps) net_g = DDP(net_g, device_ids=[rank]) # , find_unused_parameters=True) net_d = DDP(net_d, device_ids=[rank]) try: _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g) _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d) global_step = (epoch_str - 1) * len(train_loader) print("load checkpoint ok !") except: epoch_str = 1 global_step = 0 scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) scaler = GradScaler(enabled=hps.train.fp16_run) for epoch in range(epoch_str, hps.train.epochs + 1): if rank == 0: train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, eval_loader], logger, [writer, writer_eval]) else: train_and_evaluate(rank, epoch, hps, [net_g, net_d], [optim_g, optim_d], [scheduler_g, scheduler_d], scaler, [train_loader, None], None, None) scheduler_g.step() scheduler_d.step() def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers): net_g, net_d = nets optim_g, optim_d = optims scheduler_g, scheduler_d = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers # train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() for batch_idx, items in enumerate(train_loader): # hubert特征,f0,幅度谱特征,对应音频段波形(384 * hop_length),人声编码[0] c, f0, spec, y, spk = items g = spk.cuda(rank, non_blocking=True) spec, y = spec.cuda(rank, non_blocking=True), y.cuda(rank, non_blocking=True) c = c.cuda(rank, non_blocking=True) f0 = f0.cuda(rank, non_blocking=True) """ "sampling_rate": 32000, "filter_length": 1280, "hop_length": 320, "win_length": 1280, "n_mel_channels": 80, "mel_fmin": 0.0, "mel_fmax": null """ mel = spec_to_mel_torch( spec, hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax) with autocast(enabled=hps.train.fp16_run): # net_g的输入: hubert特征,f0,幅度谱特征,说话人id,mel谱特征 # net_g的输出: # 原始波形,批次中每个采样到的帧的位置,批次中幅度谱的有效帧位置, # 幅度谱编码得到正态分布后随机采样得到的z, z经过标准化流之后得到z_p, hubert特征层得到的正态分布的均值, # hubert特征层得到的正态分布的标准差(logs_p),幅度谱和人声信息得到的均值(m_q),幅度谱和人声信息得到的标准差(logs_q) y_hat, ids_slice, z_mask, \ (z, z_p, m_p, logs_p, m_q, logs_q) = net_g(c, f0, spec, g=g, mel=mel) y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length) y_hat_mel = mel_spectrogram_torch( y_hat.squeeze(1), hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, hps.data.mel_fmin, hps.data.mel_fmax ) y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice # Discriminator y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) with autocast(enabled=False): loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g) loss_disc_all = loss_disc optim_d.zero_grad() scaler.scale(loss_disc_all).backward() scaler.unscale_(optim_d) grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) scaler.step(optim_d) with autocast(enabled=hps.train.fp16_run): # Generator y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) with autocast(enabled=False): # mel谱之间的损失函数,后面是系数,误差越小越好 loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel # KL散度,z_p: 幅度谱侧得到的采样值经过标准化流之后的结果,logs_q: 幅度谱侧得到的标准差,m_p:hubert侧得到的均值 # logs_p: hubert侧得到的标准差,z_mask: 批次中幅度谱的有效帧位置, loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl # 在d模型中将y和y_hat的每一层特征结果都拿出来,做l1距离 loss_fm = feature_loss(fmap_r, fmap_g) loss_gen, losses_gen = generator_loss(y_d_hat_g) loss_gen_all = loss_gen + loss_fm + loss_mel + loss_kl optim_g.zero_grad() scaler.scale(loss_gen_all).backward() scaler.unscale_(optim_g) grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None) scaler.step(optim_g) scaler.update() if rank == 0: if global_step % hps.train.log_interval == 0: lr = optim_g.param_groups[0]['lr'] losses = [loss_disc, loss_gen, loss_fm, loss_mel, loss_kl] logger.info('Train Epoch: {} [{:.0f}%]'.format( epoch, 100. * batch_idx / len(train_loader))) logger.info([x.item() for x in losses] + [global_step, lr]) scalar_dict = {"loss/g/total": loss_gen_all, "loss/d/total": loss_disc_all, "learning_rate": lr, "grad_norm_d": grad_norm_d, "grad_norm_g": grad_norm_g} scalar_dict.update({"loss/g/fm": loss_fm, "loss/g/mel": loss_mel, "loss/g/kl": loss_kl}) scalar_dict.update({"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)}) scalar_dict.update({"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)}) scalar_dict.update({"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)}) image_dict = { "slice/mel_org": utils.plot_spectrogram_to_numpy(y_mel[0].data.cpu().numpy()), "slice/mel_gen": utils.plot_spectrogram_to_numpy(y_hat_mel[0].data.cpu().numpy()), "all/mel": utils.plot_spectrogram_to_numpy(mel[0].data.cpu().numpy()), } utils.summarize( writer=writer, global_step=global_step, images=image_dict, scalars=scalar_dict ) if global_step % hps.train.eval_interval == 0: evaluate(hps, net_g, eval_loader, writer_eval) utils.save_checkpoint(net_g, optim_g, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "G_{}.pth".format(global_step))) utils.save_checkpoint(net_d, optim_d, hps.train.learning_rate, epoch, os.path.join(hps.model_dir, "D_{}.pth".format(global_step))) global_step += 1 if rank == 0: logger.info('====> Epoch: {},{}'.format(epoch, global_step)) def evaluate(hps, generator, eval_loader, writer_eval): generator.eval() image_dict = {} audio_dict = {} with torch.no_grad(): for batch_idx, items in enumerate(eval_loader): c, f0, spec, y, spk = items g = spk[:1].cuda(0) spec, y = spec[:1].cuda(0), y[:1].cuda(0) c = c[:1].cuda(0) f0 = f0[:1].cuda(0) mel = spec_to_mel_torch( spec, hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax) y_hat = generator.module.infer(c, f0, g=g, mel=mel) y_hat_mel = mel_spectrogram_torch( y_hat.squeeze(1).float(), hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, hps.data.mel_fmin, hps.data.mel_fmax ) audio_dict.update({ f"gen/audio_{batch_idx}": y_hat[0], f"gt/audio_{batch_idx}": y[0] }) image_dict.update({ f"gen/mel": utils.plot_spectrogram_to_numpy(y_hat_mel[0].cpu().numpy()), "gt/mel": utils.plot_spectrogram_to_numpy(mel[0].cpu().numpy()) }) utils.summarize( writer=writer_eval, global_step=global_step, images=image_dict, audios=audio_dict, audio_sampling_rate=hps.data.sampling_rate ) generator.train() if __name__ == "__main__": main() diff --git a/AutoCoverTool/script/get_song_url.py b/AutoCoverTool/script/get_song_url.py index 538b620..ecd5f05 100644 --- a/AutoCoverTool/script/get_song_url.py +++ b/AutoCoverTool/script/get_song_url.py @@ -1,517 +1,162 @@ """ 获取歌曲的地址 # song_src=2 是来源108和109的歌曲,未被洗过的 # song_src=1 是曲库给的 # song_src=3 # 用于轻变调的 """ from script.common import * from copy import deepcopy from online.common import update_db def get_url_by_song_id(song_id): sql = "select task_url,starmaker_songid from silence where starmaker_songid = {} order by task_id desc limit 1".format( song_id) ban = deepcopy(banned_user_map) ban["db"] = "starmaker_musicbook" data = get_data_by_mysql(sql, ban) if len(data) > 0: return data[0][0] return None def process(): arr = [ - "611752105030548048", - "611752105030548042", - "611752105030517085", - "611752105030485219", - "611752105030773591", - "611752105030629604", - "611752105029461042", - "611752105030629605", - "611752105030649282", - "611752105030773587", - "611752105030773586", - "611752105030773582", - "611752105030558704", - "611752105030773578", - "611752105028990750", - "611752105028477539", - "611752105030598726", - "611752105030485212", - "611752105027484914", - "611752105028450908", - "611752105027460090", - "611752105030773576", - "611752105030517080", - "611752105030588074", - "611752105030611002", - "611752105028413629", - "611752105030773572", - "611752105028778443", - "611752105026976871", - "611752105028102453", - "611752105030773569", - "611752105030588011", - "611752105028800798", - "611752105028477538", - "611752105030629642", - "611752105030487027", - "611752105029054058", - "611752105028497813", - "611752105028510639", - "611752105030548037", - "611752105030548041", - "611752105030486911", - "611752105030548021", - "611752105030553757", - "611752105029558659", - "611752105030487494", - "611752105029381727", - "611752105028975151", - "611752105030534119", - "611752105028427015", - "611752105030532828", - "611752105030501864", - "611752105028408815", - "611752105030486255", - "611752105029646790", - "611752105028932362", - "611752105030502067", - "611752105028975148", - "611752105030517536", - "611752105030494683", - "611752105030630667", - "611752105030739634", - "611752105030547919", - "611752105030577557", - "611752105030547903", - "611752105030547881", - "611752105030739633", - "611752105030630646", - "611752105030630615", - "611752105030488624", - "611752105030739630", - "611752105030630518", - "611752105030602720", - "611752105030739631", - "611752105030538997", - "611752105030739628", - "611752105030739625", - "611752105030739626", - "611752105030739623", - "611752105030630616", - "611752105030630498", - "611752105030739620", - "611752105030739842", - "611752105030739840", - "611752105030739834", - "611752105030739836", - "611752105030739827", - "611752105030700227", - "611752105030739828", - "611752105030589605", - "611752105030679611", - "611752105030739826", - "611752105030739821", - "611752105030739824", - "611752105030739820", - "611752105030737424", - "611752105030739814", - "611752105030737420", - "611752105030735716", - "611752105030739816", - "611752105030739813", - "611752105030713239", - "611752105030723574", - "611752105030739809", - "611752105030662427", - "611752105030739807", - "611752105030737399", - "611752105028778437", - "611752105030739801", - "611752105030739804", - "611752105030739797", - "611752105030739684", - "611752105030739677", - "611752105030739671", - "611752105030739666", - "611752105030739655", - "611752105030577995", - "611752105030739636", - "611752105030631344", - "611752105030547885", - "611752105030631311", - "611752105030486286", - "611752105030488567", - "611752105030494668", - "611752105030532592", - "611752105030478368", - "611752105030487499", - "611752105030486028", - "611752105030486182", - "611752105030486156", - "611752105030738611", - "611752105030704793", - "611752105030494860", - "611752105030631392", - "611752105030631373", - "611752105030547882", - "611752105030534733", - "611752105030494745", - "611752105028848562", - "611752105030570248", - "611752105030550279", - "611752105030591221", - "611752105030485676", - "611752105030630876", - "611752105030674722", - "611752105030484183", - "611752105030630867", - "611752105030630871", - "611752105030580027", - "611752105030644708", - "611752105030630863", - "611752105030600361", - "611752105030600311", - "611752105030630857", - "611752105030630861", - "611752105030629310", - "611752105030630823", - "611752105030630460", - "611752105030484117", - "611752105030630853", - "611752105030630856", - "611752105030630851", - "611752105030630850", - "611752105030630845", - "611752105030630842", - "611752105030630843", - "611752105030630457", - "611752105030630453", - "611752105030630132", - "611752105030630805", - "611752105030630128", - "611752105030630477", - "611752105030630547", - "611752105030588820", - "611752105030483580", - "611752105030630449", - "611752105030630542", - "611752105030630445", - "611752105030630478", - "611752105030484394", - "611752105030630448", - "611752105030576611", - "611752105030598794", - "611752105030630835", - "611752105030588851", - "611752105030630348", - "611752105030630349", - "611752105030630126", - "611752105030544749", - "611752105030553750", - "611752105030553746", - "611752105030553736", - "611752105030553743", - "611752105030484121", - "611752105030544745", - "611752105030747660", - "611752105030591221", - "611752105030485517", - "611752105030478344", - "611752105030478343", - "611752105030750819", - "611752105030750817", - "611752105030750818", - "611752105030750816", - "611752105030554115", - "611752105030629748", - "611752105030750815", - "611752105030750878", - "611752105030746530", - "611752105026577483", - "611752105030550263", - "611752105030750881", - "611752105025034371", - "611752105030750876", - "611752105030750875", - "611752105030750390", - "611752105030717472", - "611752105030750873", - "611752105030750872", - "611752105028876417", - "611752105025741780", - "611752105030750870", - "611752105030750871", - "611752105022973103", - "611752105030750867", - "611752105022840505", - "611752105030750868", - "611752105022832271", - "611752105022759233", - "611752105022765990", - "611752105022833335", - "611752105030548385", - "611752105030750768", - "611752105029648557", - "611752105030554122", - "611752105030750767", - "611752105030487077", - "611752105022842161", - "611752105022756613", - "611752105030568505", - "611752105022740047", - "611752105022763524", - "611752105022741741", - "611752105030640738", - "611752105030533166", - "611752105030554085", - "611752105030532771", - "611752105022808705", - "611752105030750824", - "611752105030750823", - "611752105022779893", - "611752105030750825", - "611752105024973490", - "611752105030477418", - "611752105030750269", - "611752105022876788", - "611752105022772534", - "611752105022783545", - "611752105030750223", - "611752105028899742", - "611752105030630148", - "611752105030629674", - "611752105030593891", - "611752105030554114", - "611752105024546871", - "611752105030630393", - "611752105030719536", - "611752105030630396", - "611752105026827284", - "611752105030517863", - "611752105030719833", - "611752105022745668", - "611752105030749987", - "611752105026630782", - "611752105030749799", - "611752105025618640", - "611752105022783154", - "611752105022783707", - "611752105022767188", - "611752105022615726", - "611752105022759619", - "611752105030750135", - "611752105022785717", - "611752105022759615", - "611752105030517508", - "611752105030593875", - "611752105022849099", - "611752105030534533", - "611752105030546630", - "611752105024973489", - "611752105022735483", - "611752105030535129", - "611752105025453401", - "611752105030747599", - "611752105027626963", - "611752105030739531", - "611752105030535838", - "611752105022838265", - "611752105024748258", - "611752105024608148", - "611752105022824550", - "611752105022768795", - "611752105022784953", - "611752105030555729", - "611752105025979439", - "611752105030485353", - "611752105030485358", - "611752105030485344", - "611752105030485349", - "611752105030485351", - "611752105030485336", - "611752105030485342", - "611752105030485329", - "611752105030485338", - "611752105030485325", - "611752105030485331", - "611752105030485322", - "611752105030485316", - "611752105030485319", - "611752105030485317", - "611752105030485313", - "611752105030485309", - "611752105030485308", - "611752105030485307", - "611752105030776031", - "611752105030598568", - "611752105030485410", - "611752105030485397", - "611752105030478370", - "611752105030485402", - "611752105030485404", - "611752105030485399", - "611752105030485379", - "611752105030485383", - "611752105030485368", - "611752105030485395", - "611752105030485387", - "611752105030485385", - "611752105030776030", - "611752105030776028", - "611752105030485389", - "611752105030485378", - "611752105030485392", - "611752105030775926", - "611752105030775928", - "611752105030775920", - "611752105030775915", - "611752105030590328", - "611752105030775906", - "611752105030775902", - "611752105030775710", - "611752105030775707", - "611752105030775705", - "611752105030775695", - "611752105030775692", - "611752105030775685", - "611752105030775684", - "611752105030775683", - "611752105030775680", - "611752105030775677", - "611752105030775984", - "611752105030775980", - "611752105030775976", - "611752105030775969", - "611752105030775965", - "611752105030775966", - "611752105030775962", - "611752105030775959", - "611752105030775956", - "611752105030775951", - "611752105030775948", - "611752105030775947", - "611752105030775938", - "611752105030775934", - "611752105025482876", - "611752105030775932", - "611752105030776026", - "611752105030776015", - "611752105030776012", - "611752105030776009", - "611752105030776005", - "611752105030775998", - "611752105030775995", - "611752105030775990", - "611752105030775900", - "611752105022616654", - "611752105030775889", - "611752105030775884", - "611752105029290545", - "611752105022615521", - "611752105030775881", - "611752105029399015", - "611752105022613888", - "611752105022616835", - "611752105029381793", - "611752105030775864", - "611752105030579638", - "611752105030775853", - "611752105030775850", - "611752105026149436", - "611752105030775837", - "611752105030775836", - "611752105030775817", - "611752105030775813", - "611752105030775815", - "611752105030775809", - "611752105030775806", - "611752105030775802", - "611752105030775795", - "611752105030775790", - "611752105030775785", - "611752105030775782", - "611752105030775780", - "611752105030775775", - "611752105030775766", - "611752105030775768", - "611752105023363347", - "611752105030775764", - "611752105030775756", - "611752105022615516", - "611752105030706110", - "611752105030775751", - "611752105030775750", - "611752105030775746", - "611752105030775744", - "611752105030775741", - "611752105030775736", - "611752105022770764", - "611752105030775732", - "611752105030775734", - "611752105030775730", - "611752105030560609", - "611752105030775725", - "611752105030560604", - "611752105030775713" + "611752105030770437", + "611752105022704186", + "611752105030770446", + "611752105030660018", + "611752105023142842", + "611752105025980261", + "611752105030770459", + "611752105030770462", + "611752105030770468", + "611752105025957813", + "611752105030595983", + "611752105030770482", + "611752105030770497", + "611752105030659982", + "611752105030770510", + "611752105030770516", + "611752105030770523", + "611752105030770545", + "611752105030770549", + "611752105030770552", + "611752105030770557", + "611752105030770569", + "611752105030770577", + "611752105030770581", + "611752105030770585", + "611752105030770588", + "611752105030770591", + "611752105030770660", + "611752105022647066", + "611752105030770665", + "611752105030770669", + "611752105030770675", + "611752105030770681", + "611752105030563555", + "611752105030770688", + "611752105030770694", + "611752105030770697", + "611752105030770701", + "611752105030770706", + "611752105030776532", + "611752105030776552", + "611752105030776562", + "611752105030776580", + "611752105030563422", + "611752105030776597", + "611752105030776600", + "611752105030776606", + "611752105030776616", + "611752105030776624", + "611752105030776627", + "611752105030780547", + "611752105030780562", + "611752105030780604", + "611752105030780617", + "611752105030780621", + "611752105030780630", + "611752105030780636", + "611752105030780640", + "611752105030780645", + "611752105030780648", + "611752105030780650", + "611752105030780655", + "611752105030780657", + "611752105030780664", + "611752105030780667", + "611752105030780676", + "611752105030780750", + "611752105030780760", + "611752105030780766", + "611752105030780787", + "611752105030780795", + "611752105030780801", + "611752105030780805", + "611752105030780806", + "611752105030780814", + "611752105030780833", + "611752105030780840", + "611752105030596072", + "611752105030780846", + "611752105030596128", ] ban = deepcopy(banned_user_map) ban["db"] = "av_db" for sid in arr: url = get_url_by_song_id(sid) if url is not None: print("out,{},{}".format(url, sid)) # 只要没有对外输出过,均可以向其中填充 sql = "select song_id from svc_queue_table where song_id={} and (song_src in (3, 4, 5) and state=2)".format(sid) data = get_data_by_mysql(sql, ban) if len(data) == 0: tm = int(time.time()) - sql = "replace INTO svc_queue_table (song_id, url, create_time, update_time, song_src) VALUES ({}, \"{}\",{}, {}, 4)" \ + sql = "replace INTO svc_queue_table (song_id, url, create_time, update_time, song_src) VALUES ({}, \"{}\",{}, {}, 5)" \ .format(sid, url, tm, tm) update_db(sql, ban) def get_data_from_song(): sql = """ select tb1.song_id, tb1.recording_count from ( select song_id,recording_count from starmaker.song where song_src in (108,109) and song_status = 2 order by recording_count desc ) as tb1 left join ( select song_id from av_db.svc_queue_table ) as tb2 on tb1.song_id = tb2.song_id where tb2.song_id is null order by tb1.recording_count desc limit 5000 """ ban = deepcopy(banned_user_map) ban_v1 = deepcopy(banned_user_map) ban["db"] = "starmaker_musicbook" ban_v1["db"] = "av_db" data = get_data_by_mysql(sql, ban) for dt in data: sid = dt[0] url = get_url_by_song_id(sid) if url is not None: print("out,{},{}".format(url, sid)) tm = int(time.time()) sql = "insert INTO svc_queue_table (song_id, url, create_time, update_time, song_src) VALUES ({}, \"{}\", {}, {}, 3)" \ .format(sid, url, tm, tm) update_db(sql, ban_v1) if __name__ == '__main__': # get_diff_song() # get_data_from_song() process()