Page MenuHomePhabricator

No OneTemporary

diff --git a/AutoCoverTool/ref/online/voice_class_online.py b/AutoCoverTool/ref/online/voice_class_online.py
index 6041c94..eab43a7 100644
--- a/AutoCoverTool/ref/online/voice_class_online.py
+++ b/AutoCoverTool/ref/online/voice_class_online.py
@@ -1,420 +1,420 @@
"""
男女声分类在线工具
1 转码为16bit单声道
2 均衡化
3 模型分类
"""
import os
import sys
import librosa
import shutil
import logging
import time
import torch.nn.functional as F
import numpy as np
from model import *
# from common import bind_kernel
logging.basicConfig(level=logging.INFO)
os.environ["LRU_CACHE_CAPACITY"] = "1"
# torch.set_num_threads(1)
# bind_kernel(1)
"""
临时用一下,全局使用的变量
"""
transcode_time = 0
vb_time = 0
mfcc_time = 0
predict_time = 0
"""
错误码
"""
ERR_CODE_SUCCESS = 0 # 处理成功
ERR_CODE_NO_FILE = -1 # 文件不存在
ERR_CODE_TRANSCODE = -2 # 转码失败
ERR_CODE_VOLUME_BALANCED = -3 # 均衡化失败
ERR_CODE_FEATURE_TOO_SHORT = -4 # 特征文件太短
"""
常量
"""
FRAME_LEN = 128
MFCC_LEN = 80
-EBUR128_BIN = "/opt/soft/bin/standard_audio_no_cut"
+EBUR128_BIN = "/data/gpu_env_common/res/av_svc/bin/standard_audio_no_cut"
# EBUR128_BIN = "/Users/yangjianli/linux/opt/soft/bin/standard_audio_no_cut"
GENDER_FEMALE = 0
GENDER_MALE = 1
GENDER_OTHER = 2
"""
通用函数
"""
def exec_cmd(cmd):
ret = os.system(cmd)
if ret != 0:
return False
return True
"""
业务需要的函数
"""
def get_one_mfcc(file_url):
st = time.time()
data, sr = librosa.load(file_url, sr=16000)
if len(data) < 512:
return []
mfcc = librosa.feature.mfcc(y=data, sr=sr, n_fft=512, hop_length=256, n_mfcc=MFCC_LEN)
mfcc = mfcc.transpose()
print("get_one_mfcc:spend_time={}".format(time.time() - st))
global mfcc_time
mfcc_time += time.time() - st
return mfcc
def volume_balanced(src, dst):
st = time.time()
cmd = "{} {} {}".format(EBUR128_BIN, src, dst)
logging.info(cmd)
exec_cmd(cmd)
if not os.path.exists(dst):
logging.error("volume_balanced:cmd={}".format(cmd))
print("volume_balanced:spend_time={}".format(time.time() - st))
global vb_time
vb_time += time.time() - st
return os.path.exists(dst)
def transcode(src, dst):
st = time.time()
cmd = "ffmpeg -loglevel quiet -i {} -ar 16000 -ac 1 {}".format(src, dst)
logging.info(cmd)
exec_cmd(cmd)
if not os.path.exists(dst):
logging.error("transcode:cmd={}".format(cmd))
print("transcode:spend_time={}".format(time.time() - st))
global transcode_time
transcode_time += time.time() - st
return os.path.exists(dst)
class VoiceClass:
def __init__(self, music_voice_pure_model, music_voice_no_pure_model, gender_pure_model, gender_no_pure_model):
"""
四个模型
:param music_voice_pure_model: 分辨纯净人声/其他
:param music_voice_no_pure_model: 分辨有人声/其他
:param gender_pure_model: 纯净人声分辨男女
:param gender_no_pure_model: 有人声分辨男女
"""
st = time.time()
self.device = "cpu"
self.batch_size = 256
self.music_voice_pure_model = load_model(MusicVoiceV5Model, music_voice_pure_model, self.device)
self.music_voice_no_pure_model = load_model(MusicVoiceV5Model, music_voice_no_pure_model, self.device)
self.gender_pure_model = load_model(MobileNetV2Gender, gender_pure_model, self.device)
self.gender_no_pure_model = load_model(MobileNetV2Gender, gender_no_pure_model, self.device)
logging.info("load model ok ! spend_time={}".format(time.time() - st))
def batch_predict(self, model, features):
st = time.time()
scores = []
with torch.no_grad():
for i in range(0, len(features), self.batch_size):
cur_data = features[i:i + self.batch_size].to(self.device)
predicts = model(cur_data)
predicts_score = F.softmax(predicts, dim=1)
scores.extend(predicts_score.cpu().numpy())
ret = np.array(scores)
global predict_time
predict_time += time.time() - st
return ret
def predict_pure(self, filename, features):
scores = self.batch_predict(self.music_voice_pure_model, features)
new_features = []
for idx, score in enumerate(scores):
if score[0] > 0.5: # 非人声
continue
new_features.append(features[idx].numpy())
# 人声段太少,不能进行处理
# 参数可以改
new_feature_len = len(new_features)
new_feature_rate = len(new_features) / len(features)
if new_feature_len < 4 or new_feature_rate < 0.4:
logging.warning(
"filename={}|predict_pure|other|len={}|rate={}".format(filename, new_feature_len, new_feature_rate)
)
return GENDER_OTHER, -1
new_features = torch.from_numpy(np.array(new_features))
scores = self.batch_predict(self.gender_pure_model, new_features)
f_avg = sum(scores[:, 0]) / len(scores)
m_avg = sum(scores[:, 1]) / len(scores)
female_rate = f_avg / (f_avg + m_avg)
if female_rate > 0.65:
return GENDER_FEMALE, female_rate
if female_rate < 0.12:
return GENDER_MALE, female_rate
logging.warning(
"filename={}|predict_pure|other|len={}|rate={}".format(filename, new_feature_len, new_feature_rate)
)
return GENDER_OTHER, female_rate
def predict_no_pure(self, filename, features):
scores = self.batch_predict(self.music_voice_no_pure_model, features)
new_features = []
for idx, score in enumerate(scores):
if score[0] > 0.5: # 非人声
continue
new_features.append(features[idx].numpy())
# 人声段太少,不能进行处理
# 参数可以改
new_feature_len = len(new_features)
new_feature_rate = len(new_features) / len(features)
if new_feature_len < 4 or new_feature_rate < 0.4:
logging.warning(
"filename={}|predict_no_pure|other|len={}|rate={}".format(filename, new_feature_len, new_feature_rate)
)
return GENDER_OTHER, -1
new_features = torch.from_numpy(np.array(new_features))
scores = self.batch_predict(self.gender_no_pure_model, new_features)
f_avg = sum(scores[:, 0]) / len(scores)
m_avg = sum(scores[:, 1]) / len(scores)
female_rate = f_avg / (f_avg + m_avg)
if female_rate > 0.75:
return GENDER_FEMALE, female_rate
if female_rate < 0.1:
return GENDER_MALE, female_rate
logging.warning(
"filename={}|predict_no_pure|other|len={}|rate={}".format(filename, new_feature_len, new_feature_rate)
)
return GENDER_OTHER, female_rate
def predict(self, filename, features):
st = time.time()
new_features = []
for i in range(FRAME_LEN, len(features), FRAME_LEN):
new_features.append(features[i - FRAME_LEN: i])
new_features = torch.from_numpy(np.array(new_features))
gender, rate = self.predict_pure(filename, new_features)
if gender == GENDER_OTHER:
logging.info("start no pure process...")
return self.predict_no_pure(filename, new_features)
print("predict|spend_time={}".format(time.time() - st))
return gender, rate
def process_one_logic(self, filename, file_path, cache_dir):
tmp_wav = os.path.join(cache_dir, "tmp.wav")
tmp_vb_wav = os.path.join(cache_dir, "tmp_vb.wav")
if not transcode(file_path, tmp_wav):
return ERR_CODE_TRANSCODE
if not volume_balanced(tmp_wav, tmp_vb_wav):
return ERR_CODE_VOLUME_BALANCED
features = get_one_mfcc(tmp_vb_wav)
if len(features) < FRAME_LEN:
logging.error("feature too short|file_path={}".format(file_path))
return ERR_CODE_FEATURE_TOO_SHORT
return self.predict(filename, features)
def process_one(self, file_path):
base_dir = os.path.dirname(file_path)
filename = os.path.splitext(file_path)[0]
cache_dir = os.path.join(base_dir, filename + "_cache")
if os.path.exists(cache_dir):
shutil.rmtree(cache_dir)
os.makedirs(cache_dir)
ret = self.process_one_logic(filename, file_path, cache_dir)
shutil.rmtree(cache_dir)
return ret
def process(self, file_path):
gender, female_rate = self.process_one(file_path)
logging.info("{}|gender={}|female_rate={}".format(file_path, gender, female_rate))
return gender, female_rate
def process_by_feature(self, feature_file):
"""
直接处理特征文件
:param feature_file:
:return:
"""
filename = os.path.splitext(feature_file)[0]
features = np.load(feature_file)
gender, female_rate = self.predict(filename, features)
return gender, female_rate
def test_all_feature():
import glob
base_dir = "/data/datasets/music_voice_dataset_full/feature_online_data_v3"
female = glob.glob(os.path.join(base_dir, "female/*feature.npy"))
male = glob.glob(os.path.join(base_dir, "male/*feature.npy"))
other = glob.glob(os.path.join(base_dir, "other/*feature.npy"))
model_path = "/data/jianli.yang/voice_classification/online/models"
music_voice_pure_model = os.path.join(model_path, "voice_005_rec_v5.pth")
music_voice_no_pure_model = os.path.join(model_path, "voice_10_v5.pth")
gender_pure_model = os.path.join(model_path, "gender_8k_ratev5_v6_adam.pth")
gender_no_pure_model = os.path.join(model_path, "gender_8k_v6_adam.pth")
vc = VoiceClass(music_voice_pure_model, music_voice_no_pure_model, gender_pure_model, gender_no_pure_model)
tot_st = time.time()
ret_map = {
0: {0: 0, 1: 0, 2: 0},
1: {0: 0, 1: 0, 2: 0},
2: {0: 0, 1: 0, 2: 0}
}
for file in female:
st = time.time()
print("------------------------------>>>>>")
gender, female_score = vc.process_by_feature(file)
ret_map[0][gender] += 1
if gender != 0:
print("err:female->{}|{}|{}".format(gender, file, female_score))
print("process|spend_tm=={}".format(time.time() - st))
for file in male:
st = time.time()
print("------------------------------>>>>>")
gender, female_score = vc.process_by_feature(file)
ret_map[1][gender] += 1
if gender != 1:
print("err:male->{}|{}|{}".format(gender, file, female_score))
print("process|spend_tm=={}".format(time.time() - st))
for file in other:
st = time.time()
print("------------------------------>>>>>")
gender, female_score = vc.process_by_feature(file)
ret_map[2][gender] += 1
if gender != 2:
print("err:other->{}|{}|{}".format(gender, file, female_score))
print("process|spend_tm=={}".format(time.time() - st))
global transcode_time, vb_time, mfcc_time, predict_time
print("spend_time:tot={}|transcode={}|vb={}|gen_feature={}|predict={}".format(time.time() - tot_st, transcode_time,
vb_time, mfcc_time, predict_time))
f_f = ret_map[0][0]
f_m = ret_map[0][1]
f_o = ret_map[0][2]
m_f = ret_map[1][0]
m_m = ret_map[1][1]
m_o = ret_map[1][2]
o_f = ret_map[2][0]
o_m = ret_map[2][1]
o_o = ret_map[2][2]
print("ff:{},fm:{},fo:{}".format(f_f, f_m, f_o))
print("mm:{},mf:{},mo:{}".format(m_m, m_f, m_o))
print("om:{},of:{},oo:{}".format(o_m, o_f, o_o))
# 女性准确率和召回率
f_acc = f_f / (f_f + m_f + o_f)
f_recall = f_f / (f_f + f_m + f_o)
# 男性准确率和召回率
m_acc = m_m / (m_m + f_m + o_m)
m_recall = m_m / (m_m + m_f + m_o)
print("female: acc={}|recall={}".format(f_acc, f_recall))
print("male: acc={}|recall={}".format(m_acc, m_recall))
def test_all():
import glob
base_dir = "/data/datasets/music_voice_dataset_full/online_data_v3_top200"
female = glob.glob(os.path.join(base_dir, "female/*mp4"))
male = glob.glob(os.path.join(base_dir, "male/*mp4"))
other = glob.glob(os.path.join(base_dir, "other/*mp4"))
model_path = "/data/jianli.yang/voice_classification/online/models"
music_voice_pure_model = os.path.join(model_path, "voice_005_rec_v5.pth")
music_voice_no_pure_model = os.path.join(model_path, "voice_10_v5.pth")
gender_pure_model = os.path.join(model_path, "gender_8k_ratev5_v6_adam.pth")
gender_no_pure_model = os.path.join(model_path, "gender_8k_v6_adam.pth")
vc = VoiceClass(music_voice_pure_model, music_voice_no_pure_model, gender_pure_model, gender_no_pure_model)
tot_st = time.time()
ret_map = {
0: {0: 0, 1: 0, 2: 0},
1: {0: 0, 1: 0, 2: 0},
2: {0: 0, 1: 0, 2: 0}
}
for file in female:
st = time.time()
print("------------------------------>>>>>")
gender, female_score = vc.process(file)
ret_map[0][gender] += 1
if gender != 0:
print("err:female->{}|{}|{}".format(gender, file, female_score))
print("process|spend_tm=={}".format(time.time() - st))
for file in male:
st = time.time()
print("------------------------------>>>>>")
gender, female_score = vc.process(file)
ret_map[1][gender] += 1
if gender != 1:
print("err:male->{}|{}|{}".format(gender, file, female_score))
print("process|spend_tm=={}".format(time.time() - st))
for file in other:
st = time.time()
print("------------------------------>>>>>")
gender, female_score = vc.process(file)
ret_map[2][gender] += 1
if gender != 2:
print("err:other->{}|{}|{}".format(gender, file, female_score))
print("process|spend_tm=={}".format(time.time() - st))
global transcode_time, vb_time, mfcc_time, predict_time
print("spend_time:tot={}|transcode={}|vb={}|gen_feature={}|predict={}".format(time.time() - tot_st, transcode_time,
vb_time, mfcc_time, predict_time))
f_f = ret_map[0][0]
f_m = ret_map[0][1]
f_o = ret_map[0][2]
m_f = ret_map[1][0]
m_m = ret_map[1][1]
m_o = ret_map[1][2]
o_f = ret_map[2][0]
o_m = ret_map[2][1]
o_o = ret_map[2][2]
print("ff:{},fm:{},fo:{}".format(f_f, f_m, f_o))
print("mm:{},mf:{},mo:{}".format(m_m, m_f, m_o))
print("om:{},of:{},oo:{}".format(o_m, o_f, o_o))
# 女性准确率和召回率
f_acc = f_f / (f_f + m_f + o_f)
f_recall = f_f / (f_f + f_m + f_o)
# 男性准确率和召回率
m_acc = m_m / (m_m + f_m + o_m)
m_recall = m_m / (m_m + m_f + m_o)
print("female: acc={}|recall={}".format(f_acc, f_recall))
print("male: acc={}|recall={}".format(m_acc, m_recall))
if __name__ == "__main__":
# test_all()
# test_all_feature()
model_path = sys.argv[1]
voice_path = sys.argv[2]
music_voice_pure_model = os.path.join(model_path, "voice_005_rec_v5.pth")
music_voice_no_pure_model = os.path.join(model_path, "voice_10_v5.pth")
gender_pure_model = os.path.join(model_path, "gender_8k_ratev5_v6_adam.pth")
gender_no_pure_model = os.path.join(model_path, "gender_8k_v6_adam.pth")
vc = VoiceClass(music_voice_pure_model, music_voice_no_pure_model, gender_pure_model, gender_no_pure_model)
for i in range(0, 1):
st = time.time()
print("------------------------------>>>>>")
vc.process(voice_path)
print("process|spend_tm=={}".format(time.time() - st))
diff --git a/AutoCoverTool/script/get_song_url.py b/AutoCoverTool/script/get_song_url.py
index 2635982..9b42cdf 100644
--- a/AutoCoverTool/script/get_song_url.py
+++ b/AutoCoverTool/script/get_song_url.py
@@ -1,546 +1,128 @@
"""
获取歌曲的地址
# song_src=2 是来源108和109的歌曲,未被洗过的
# song_src=1 是曲库给的
# song_src=3 # 用于轻变调的
"""
from script.common import *
from copy import deepcopy
from online.common import update_db
def get_url_by_song_id(song_id):
sql = "select task_url,starmaker_songid from silence where starmaker_songid = {} order by task_id desc limit 1".format(
song_id)
ban = deepcopy(banned_user_map)
ban["db"] = "starmaker_musicbook"
data = get_data_by_mysql(sql, ban)
if len(data) > 0:
return data[0][0]
return None
def process():
arr = [
- "611752105030534669",
- "611752105030534671",
- "611752105029291860",
- "611752105030534665",
- "611752105030534667",
- "611752105027734182",
- "611752105030532701",
- "611752105027626957",
- "611752105030532696",
- "611752105030517855",
- "611752105025184103",
- "611752105030517856",
- "611752105024164143",
- "611752105030517852",
- "611752105026681421",
- "611752105024571437",
- "611752105022779865",
- "611752105030517845",
- "611752105030493464",
- "611752105030517843",
- "611752105030517018",
- "611752105030517014",
- "611752105022838003",
- "611752105024118499",
- "611752105030517015",
- "611752105030517012",
- "611752105025587378",
- "611752105023644389",
- "611752105023616289",
- "611752105025502433",
- "611752105030517008",
- "611752105024199100",
- "611752105030517003",
- "611752105026614996",
- "611752105029087255",
- "611752105030517518",
- "611752105030517840",
- "611752105030517844",
- "611752105030517859",
- "611752105022777749",
- "611752105030532705",
- "611752105026265975",
- "611752105030532703",
- "611752105030487368",
- "611752105030483712",
- "611752105030517850",
- "611752105022782535",
- "611752105030517021",
- "611752105030517854",
- "611752105023541359",
- "611752105029792918",
- "611752105024199117",
- "611752105029673594",
- "611752105023674624",
- "611752105028990732",
- "611752105028487811",
- "611752105024194923",
- "611752105023763921",
- "611752105023434554",
- "611752105022838184",
- "611752105030553757",
- "611752105027326100",
- "611752105023977089",
- "611752105023674617",
- "611752105023620980",
- "611752105023536537",
- "611752105023301456",
- "611752105022842491",
- "611752105022841266",
- "611752105030548398",
- "611752105022839768",
- "611752105022838572",
- "611752105022781228",
- "611752105029598755",
- "611752105030517531",
- "611752105023541369",
- "611752105023678576",
- "611752105023346238",
- "611752105027648094",
- "611752105027326090",
- "611752105027832575",
- "611752105030478339",
- "611752105027795229",
- "611752105027734187",
- "611752105022614618",
- "611752105023329571",
- "611752105023234496",
- "611752105026134338",
- "611752105030554109",
- "611752105030517489",
- "611752105029836690",
- "611752105030555726",
- "611752105030555723",
- "611752105030553598",
- "611752105030555722",
- "611752105029656131",
- "611752105030555720",
- "611752105023674607",
- "611752105023478557",
- "611752105030555716",
- "611752105030555717",
- "611752105022784360",
- "611752105022836164",
- "611752105029711726",
- "611752105022783553",
- "611752105030555714",
- "611752105022824550",
- "611752105022838413",
- "611752105022783797",
- "611752105022781620",
- "611752105022775924",
- "611752105022774759",
- "611752105022759877",
- "611752105022785234",
- "611752105030555710",
- "611752105030555712",
- "611752105022768451",
- "611752105022767499",
- "611752105030555707",
- "611752105022755443",
- "611752105022777511",
- "611752105022775921",
- "611752105022841864",
- "611752105022766831",
- "611752105022768795",
- "611752105022779062",
- "611752105030555708",
- "611752105030555700",
- "611752105030517183",
- "611752105030555703",
- "611752105030555697",
- "611752105022768285",
- "611752105020419088",
- "611752105022747795",
- "611752105022774521",
- "611752105030555698",
- "611752105030555694",
- "611752105030555696",
- "611752105030555689",
- "611752105022749162",
- "611752105022785417",
- "611752105022784953",
- "611752105022616389",
- "611752105030555690",
- "611752105030555687",
- "611752105022838918",
- "611752105022775551",
- "611752105022728634",
- "611752105022773679",
- "611752105030555686",
- "611752105022758328",
- "611752105030555684",
- "611752105030555681",
- "611752105030555683",
- "611752105022754068",
- "611752105030516960",
- "611752105022764207",
- "611752105022743905",
- "611752105022757953",
- "611752105030516957",
- "611752105030516959",
- "611752105020332899",
- "611752105022841838",
- "611752105029665047",
- "611752105030516953",
- "611752105030490291",
- "611752105022762732",
- "611752105030516955",
- "611752105022762600",
- "611752105022774589",
- "611752105022754286",
- "611752105030516949",
- "611752105022774560",
- "611752105030516946",
- "611752105030487061",
- "611752105030516947",
- "611752105030516933",
- "611752105030516940",
- "611752105030555748",
- "611752105030555742",
- "611752105030555740",
- "611752105030555741",
- "611752105030555737",
- "611752105030553605",
- "611752105030555739",
- "611752105030555733",
- "611752105030555729",
- "611752105030544017",
- "611752105030555727",
- "611752105030486334",
- "611752105030544028",
- "611752105022647043",
- "611752105024402503",
- "611752105022704184",
- "611752105027532732",
- "611752105028858105",
- "611752105027532750",
- "611752105022778279",
- "611752105030517318",
- "611752105026752362",
- "611752105023636284",
- "611752105030517315",
- "611752105022742205",
- "611752105022754485",
- "611752105030517314",
- "611752105023462684",
- "611752105022728585",
- "611752105023751007",
- "611752105030484779",
- "611752105030550189",
- "611752105022729202",
- "611752105030486059",
- "611752105029292588",
- "611752105022647103",
- "611752105027273004",
- "611752105027460081",
- "611752105026900908",
- "611752105022647046",
- "611752105029648513",
- "611752105028032107",
+ "611752105029372457",
+ "611752105029432778",
+ "611752105029443799",
+ "611752105029460983",
+ "611752105029443805",
+ "611752105029395215",
+ "611752105029290706",
+ "611752105029047776",
+ "611752105028820642",
+ "611752105028820632",
+ "611752105027781516",
+ "611752105022617127",
+ "611752105022613728",
+ "611752105028533644",
+ "611752105020286444",
+ "611752105028960419",
+ "611752105020286452",
+ "611752105020417686",
+ "611752105025184112",
"611752105026452639",
"611752105024996266",
- "611752105024728131",
+ "611752105026536915",
+ "611752105025063752",
+ "611752105024356022",
"611752105027832575",
- "611752105030483919",
- "611752105030484774",
- "611752105027228696",
- "611752105022728126",
- "611752105022840114",
- "611752105026982813",
- "611752105022741687",
- "611752105030486054",
- "611752105020352152",
- "611752105020352156",
- "611752105020336897",
- "611752105020256286",
- "611752105030765622",
+ "611752105028820607",
+ "611752105024766043",
+ "611752105027734198",
+ "611752105024766044",
+ "611752105028922725",
+ "611752105024247386",
+ "611752105028820634",
+ "611752105029461031",
+ "611752105029054060",
+ "611752105024938926",
"611752105028820609",
- "611752105030488595",
- "611752105030517536",
- "611752105030501857",
- "611752105030478339",
- "611752105025957389",
- "611752105024415490",
- "611752105027854244",
- "611752105029527187",
- "611752105028444597",
- "611752105027903168",
- "611752105028778353",
- "611752105028906605",
- "611752105027781526",
- "611752105027877887",
- "611752105027795229",
- "611752105027734187",
- "611752105028820612",
- "611752105027626964",
- "611752105027460080",
- "611752105027507932",
- "611752105027611342",
- "611752105027435127",
- "611752105029648514",
- "611752105026874730",
- "611752105030591117",
"611752105025541483",
"611752105026536913",
- "611752105022647044",
- "611752105023434557",
- "611752105023440333",
- "611752105023460357",
- "611752105023510939",
- "611752105022842387",
- "611752105023674599",
- "611752105023160140",
- "611752105022647074",
- "611752105022615220",
- "611752105028408822",
- "611752105022772279",
- "611752105022614618",
- "611752105020417684",
- "611752105024608150",
- "611752105030499232",
- "611752105030485430",
- "611752105023683357",
- "611752105023301455",
- "611752105023458990",
- "611752105027228689",
- "611752105026437878",
+ "611752105027326105",
+ "611752105024628049",
+ "611752105029461035",
+ "611752105024728134",
"611752105027460089",
- "611752105029570157",
- "611752105022700847",
- "611752105029006303",
- "611752105028820629",
- "611752105023134539",
- "611752105022647087",
- "611752105027326104",
- "611752105022652047",
- "611752105022839468",
- "611752105028944645",
- "611752105022911042",
- "611752105020348944",
- "611752105020348945",
- "611752105020332345",
- "611752105027484925",
- "611752105027484915",
- "611752105028870536",
- "611752105027877846",
- "611752105027307631",
- "611752105026437853",
- "611752105023604729",
- "611752105024230229",
- "611752105022816170",
- "611752105028523417",
- "611752105022652046",
- "611752105022782720",
- "611752105024380150",
- "611752105022839949",
- "611752105022761851",
- "611752105022741054",
- "611752105022756250",
- "611752105030533676",
- "611752105022756563",
- "611752105022728598",
- "611752105022760225",
- "611752105025034424",
- "611752105022776389",
- "611752105022767205",
- "611752105030534929",
- "611752105022775308",
- "611752105030502738",
- "611752105022615626",
- "611752105030502742",
- "611752105022741814",
- "611752105029648552",
- "611752105022742136",
- "611752105030486351",
- "611752105022770959",
- "611752105022765477",
- "611752105022751650",
- "611752105022742577",
- "611752105030534505",
- "611752105022973044",
- "611752105029649153",
- "611752105030549651",
- "611752105025494340",
- "611752105020343697",
- "611752105020283852",
- "611752105020373961",
- "611752105022729294",
- "611752105020409111",
- "611752105024938882",
- "611752105029443802",
- "611752105027903154",
- "611752105022614626",
- "611752105020308424",
- "611752105030629613",
- "611752105030534740",
- "611752105030534855",
- "611752105022782724",
- "611752105027781516",
- "611752105029648513",
- "611752105020417686",
- "611752105024996266",
- "611752105024728131",
- "611752105027832575",
- "611752105029646791",
- "611752105027228696",
- "611752105022614749",
- "611752105020336084",
- "611752105026982813",
- "611752105022783586",
- "611752105022741687",
- "611752105023870020",
- "611752105022729203",
- "611752105028143469",
- "611752105030486054",
- "611752105022729411",
- "611752105020336950",
- "611752105020256284",
- "611752105030548045",
- "611752105030503007",
- "611752105029047774",
- "611752105026792339",
- "611752105026449363",
- "611752105026736869",
- "611752105022614727",
- "611752105022615372",
- "611752105022780524",
- "611752105022769594",
- "611752105022758407",
- "611752105022746664",
- "611752105022763120",
- "611752105022745603",
- "611752105030487351",
- "611752105022747491",
- "611752105022728209",
- "611752105022884087",
- "611752105022890433",
- "611752105022741836",
- "611752105022728574",
- "611752105022728612",
- "611752105022739185",
- "611752105030532703",
- "611752105024118493",
- "611752105030535938",
- "611752105030487366",
- "611752105030487368",
- "611752105030487365",
- "611752105022843075",
- "611752105022790159",
- "611752105022778099",
- "611752105022776703",
- "611752105022776364",
- "611752105022774641",
- "611752105022770768",
- "611752105022770226",
- "611752105022769617",
- "611752105022769056",
- "611752105029780685",
- "611752105030477448",
- "611752105022767219",
- "611752105022754490",
- "611752105022760812",
- "611752105030487360",
- "611752105022749100",
- "611752105022728481",
- "611752105022769181",
- "611752105030487358",
- "611752105022739209",
- "611752105022774610",
- "611752105022728721",
- "611752105022741064",
- "611752105022775968",
- "611752105030487354",
- "611752105022771053",
- "611752105022779825",
- "611752105022744563",
- "611752105022744436",
- "611752105030487355",
- "611752105022771161",
- "611752105022748598",
- "611752105022766486",
- "611752105022814952",
- "611752105022728118",
- "611752105022778616",
- "611752105022778275",
- "611752105022614337",
- "611752105022774253",
- "611752105022762324",
- "611752105026299314",
- "611752105022784079",
- "611752105022774583",
- "611752105022770293",
- "611752105030487359",
- "611752105022765790",
- "611752105030535451",
- "611752105030517752",
- "611752105030487362",
- "611752105022775916",
- "611752105022776159",
- "611752105029292588",
- "611752105022838005",
- "611752105023541359",
- "611752105020336965",
- "611752105020293286"
+ "611752105029432787",
+ "611752105028820629"
]
ban = deepcopy(banned_user_map)
ban["db"] = "av_db"
for sid in arr:
url = get_url_by_song_id(sid)
if url is not None:
print("out,{},{}".format(url, sid))
# 只要没有对外输出过,均可以向其中填充
- sql = "select song_id from svc_queue_table where song_id={} and (song_src in (3, 4, 5) and state=2)".format(sid)
+ sql = "select song_id from svc_queue_table where song_id={} and (song_src in (3, 4, 5) and state=2)".format(
+ sid)
data = get_data_by_mysql(sql, ban)
if len(data) == 0:
tm = int(time.time())
- sql = "replace INTO svc_queue_table (song_id, url, create_time, update_time, song_src) VALUES ({}, \"{}\",{}, {}, 4)" \
+ sql = "replace INTO svc_queue_table (song_id, url, create_time, update_time, song_src) VALUES ({}, \"{}\",{}, {}, 5)" \
.format(sid, url, tm, tm)
update_db(sql, ban)
def get_data_from_song():
sql = """
select tb1.song_id, tb1.recording_count
from (
select song_id,recording_count
from starmaker.song
where song_src in (108,109) and song_status = 2
order by recording_count desc
) as tb1
left join
(
select song_id
from av_db.svc_queue_table
) as tb2
on tb1.song_id = tb2.song_id
where tb2.song_id is null
order by tb1.recording_count desc limit 5000
"""
ban = deepcopy(banned_user_map)
ban_v1 = deepcopy(banned_user_map)
ban["db"] = "starmaker_musicbook"
ban_v1["db"] = "av_db"
data = get_data_by_mysql(sql, ban)
for dt in data:
sid = dt[0]
url = get_url_by_song_id(sid)
if url is not None:
print("out,{},{}".format(url, sid))
tm = int(time.time())
sql = "insert INTO svc_queue_table (song_id, url, create_time, update_time, song_src) VALUES ({}, \"{}\", {}, {}, 3)" \
.format(sid, url, tm, tm)
update_db(sql, ban_v1)
if __name__ == '__main__':
# get_diff_song()
# get_data_from_song()
process()

File Metadata

Mime Type
text/x-diff
Expires
Sun, Jan 12, 08:31 (1 d, 15 h)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
1347174
Default Alt Text
(34 KB)

Event Timeline