[{"data":1,"prerenderedAt":473},["ShallowReactive",2],{"content-query-eBsIqHcCJF":3},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"title":8,"description":9,"date":10,"cover":11,"type":12,"body":13,"_type":467,"_id":468,"_source":469,"_file":470,"_stem":471,"_extension":472},"/news/zh/3058","zh",false,"","快来获取你的“知音”——MusicGen in MindSpore NLP","要说最近一两周AIGC领域最被关注的内容，非Suno AI莫属。","2024-04-11","https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/04/12/1cb30ecb8c5f485287d7e16a21e9e69b.png","news",{"type":14,"children":15,"toc":460},"root",[16,24,29,34,42,47,52,57,62,67,78,85,92,97,109,114,118,150,154,158,163,168,188,193,198,208,213,218,223,228,233,244,249,257,262,270,283,288,296,301,313,318,326,331,343,348,353,372,377,384,389,394,402,414,426,437,448],{"type":17,"tag":18,"props":19,"children":21},"element","h1",{"id":20},"快来获取你的知音musicgen-in-mindspore-nlp",[22],{"type":23,"value":8},"text",{"type":17,"tag":25,"props":26,"children":27},"p",{},[28],{"type":23,"value":9},{"type":17,"tag":25,"props":30,"children":31},{},[32],{"type":23,"value":33},"作为“音乐界的ChatGPT”，Suno AI最新推出的V3模型以其生成广播质量级别音乐的非凡能力火速引爆了音乐圈，网友们也纷开始放飞自我，在AI音乐生成的二创道路上越走越远。",{"type":17,"tag":25,"props":35,"children":36},{},[37],{"type":17,"tag":38,"props":39,"children":41},"img",{"alt":7,"src":40},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/04/12/1ea12d020c54417c9bc78465ecabfa76.png",[],{"type":17,"tag":25,"props":43,"children":44},{},[45],{"type":23,"value":46},"B站网友Mr_Mr_han[1]和一玄青基于Suno AI的二创[2]",{"type":17,"tag":25,"props":48,"children":49},{},[50],{"type":23,"value":51},"对AI开发者与爱好者来说，目前最大的遗憾可能就是Suno V3还没有开源，也没有任何训练的细节，但没有关系，我们现在可以通过MindSpore NLP玩转音乐生成。",{"type":17,"tag":25,"props":53,"children":54},{},[55],{"type":23,"value":56},"MindSpore NLP已支持Meta推出的人工智能音乐生成器——MusicGen！！！它是目前在AI音乐领域难得的开源且可以充当商用训练基础的模型。支持随机生成一段音乐，也可以通过你输入的文本生成指定风格的音乐。",{"type":17,"tag":25,"props":58,"children":59},{},[60],{"type":23,"value":61},"先来试听几段由MusicGen生成的音乐：",{"type":17,"tag":25,"props":63,"children":64},{},[65],{"type":23,"value":66},"MusicGen随机生成的音乐：",{"type":17,"tag":25,"props":68,"children":69},{},[70,76],{"type":17,"tag":71,"props":72,"children":73},"strong",{},[74],{"type":23,"value":75},"musicgen_out_unconditional",{"type":23,"value":77},",昇思MindSpore,15秒",{"type":17,"tag":25,"props":79,"children":80},{},[81],{"type":17,"tag":38,"props":82,"children":84},{"alt":7,"src":83},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/04/12/99f7721ad6ba4e88815fb57f5001a4c6.png",[],{"type":17,"tag":25,"props":86,"children":87},{},[88],{"type":17,"tag":38,"props":89,"children":91},{"alt":7,"src":90},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/04/12/2ddf204305524e1da4e935aaf2617796.png",[],{"type":17,"tag":25,"props":93,"children":94},{},[95],{"type":23,"value":96},"如果你想生成属于自己的专属音乐，快来MindSpore NLP体验属于你自己的“知音”吧！MusicGen链接：",{"type":17,"tag":25,"props":98,"children":99},{},[100],{"type":17,"tag":101,"props":102,"children":106},"a",{"href":103,"rel":104},"https://github.com/mindspore-lab/mindnlp/blob/master/llm/inference/musicgen/run%5C_musicgen.ipynb",[105],"nofollow",[107],{"type":23,"value":108},"https://github.com/mindspore-lab/mindnlp/blob/master/llm/inference/musicgen/run\\_musicgen.ipynb",{"type":17,"tag":25,"props":110,"children":111},{},[112],{"type":23,"value":113},"**",{"type":17,"tag":25,"props":115,"children":116},{},[117],{"type":23,"value":113},{"type":17,"tag":25,"props":119,"children":120},{},[121],{"type":17,"tag":71,"props":122,"children":123},{},[124],{"type":17,"tag":71,"props":125,"children":126},{},[127],{"type":17,"tag":71,"props":128,"children":129},{},[130],{"type":17,"tag":71,"props":131,"children":132},{},[133],{"type":17,"tag":71,"props":134,"children":135},{},[136],{"type":17,"tag":71,"props":137,"children":138},{},[139],{"type":17,"tag":71,"props":140,"children":141},{},[142],{"type":17,"tag":71,"props":143,"children":144},{},[145],{"type":17,"tag":71,"props":146,"children":147},{},[148],{"type":23,"value":149},"基于MindSpore NLP+MusicGen生成自己的个性化音乐",{"type":17,"tag":25,"props":151,"children":152},{},[153],{"type":23,"value":113},{"type":17,"tag":25,"props":155,"children":156},{},[157],{"type":23,"value":113},{"type":17,"tag":25,"props":159,"children":160},{},[161],{"type":23,"value":162},"MindSpore NLP如今已全面拥抱HuggingFace，基于动态图实现一天完成热点SOTA模型快速适配。熟悉开发的小伙伴可能会发现基于MindSpore NLP实现的接口和HuggingFace并没有太多的差别，接下来我们借音乐生成的案例来进行阐明。",{"type":17,"tag":25,"props":164,"children":165},{},[166],{"type":23,"value":167},"音乐生成的方式有以下3种：",{"type":17,"tag":169,"props":170,"children":171},"ul",{},[172,178,183],{"type":17,"tag":173,"props":174,"children":175},"li",{},[176],{"type":23,"value":177},"无提示生成：让模型“自力更生“，随机生成一段音乐，体验开音乐盲盒的惊喜。",{"type":17,"tag":173,"props":179,"children":180},{},[181],{"type":23,"value":182},"文本提示生成：通过文本指定生成音乐的风格，如：生成一段80s的朋克风音乐，需要有很密集的鼓点。",{"type":17,"tag":173,"props":184,"children":185},{},[186],{"type":23,"value":187},"音频提示生成：通过音频提示来生成音乐。",{"type":17,"tag":25,"props":189,"children":190},{},[191],{"type":23,"value":192},"在此之前，需要完成一点点“准备工作“。",{"type":17,"tag":25,"props":194,"children":195},{},[196],{"type":23,"value":197},"首先，我们将本次生成音乐用到的MusicGen-small模型进行实例化。",{"type":17,"tag":199,"props":200,"children":202},"pre",{"code":201},"from mindnlp.transformers import MusicgenForConditionalGeneration\n\nmodel = MusicgenForConditionalGeneration.from_pretrained(\"facebook/musicgen-small\")\n",[203],{"type":17,"tag":204,"props":205,"children":206},"code",{"__ignoreMap":7},[207],{"type":23,"value":201},{"type":17,"tag":25,"props":209,"children":210},{},[211],{"type":23,"value":212},"MusicGen提供了small、medium和big三种规格的预训练权重文件，本次指南默认使用small规格的权重，生成的音频质量较低，但是生成的速度是最快的。感兴趣的小伙伴也可以尝试不同其他规格的MusicGen。",{"type":17,"tag":25,"props":214,"children":215},{},[216],{"type":23,"value":217},"接下来，就到了生成音乐的正式环节了。",{"type":17,"tag":25,"props":219,"children":220},{},[221],{"type":23,"value":222},"MusicGen都是通过调用model.generate接口，但可以通过变换不同的输入（即代码中的inputs）来实现不同方式的音乐生成。",{"type":17,"tag":25,"props":224,"children":225},{},[226],{"type":23,"value":227},"补充一点，MusicGen支持贪心（greedy）和采样（sampling）两种生成模式。在实际执行过程中，采样模式得到的结果要显著优于贪心模式。因此我们默认启用采样模式，并且可以在调用MusicgenForConditionalGeneration.generate时设置do_sample=True来显式指定使用采样模式。",{"type":17,"tag":25,"props":229,"children":230},{},[231],{"type":23,"value":232},"在了解了音乐生成的整体思路后，我们就可以尝试上述的三种不同的音乐生成方式了。",{"type":17,"tag":25,"props":234,"children":235},{},[236],{"type":17,"tag":71,"props":237,"children":238},{},[239],{"type":17,"tag":71,"props":240,"children":241},{},[242],{"type":23,"value":243},"方式1：无提示生成",{"type":17,"tag":25,"props":245,"children":246},{},[247],{"type":23,"value":248},"我们可以通过方法 `MusicgenForConditionalGeneration.get_unconditional_inputs` 获得网络的随机输入，然后使用 `.generate` 方法进行自回归生成，指定 `do_sample=True` 来启用采样模式：",{"type":17,"tag":199,"props":250,"children":252},{"code":251},"unconditional_inputs = model.get_unconditional_inputs(num_samples=1)\n\naudio_values = model.generate(**unconditional_inputs, do_sample=True, max_new_tokens=768)\n",[253],{"type":17,"tag":204,"props":254,"children":255},{"__ignoreMap":7},[256],{"type":23,"value":251},{"type":17,"tag":25,"props":258,"children":259},{},[260],{"type":23,"value":261},"接下来，我们需要使用第三方库`scipy`将输出的音频保存为`musicgen_out_unconditional.wav` 文件。",{"type":17,"tag":199,"props":263,"children":265},{"code":264},"import scipy\n\nsampling_rate = model.config.audio_encoder.sampling_rate\n\nscipy.io.wavfile.write(\"musicgen_out_unconditional.wav\", rate=sampling_rate, data=audio_values[0, 0].asnumpy())\n",[266],{"type":17,"tag":204,"props":267,"children":268},{"__ignoreMap":7},[269],{"type":23,"value":264},{"type":17,"tag":271,"props":272,"children":274},"h2",{"id":273},"方式2文本提示生成",[275],{"type":17,"tag":71,"props":276,"children":277},{},[278],{"type":17,"tag":71,"props":279,"children":280},{},[281],{"type":23,"value":282},"方式2：文本提示生成",{"type":17,"tag":25,"props":284,"children":285},{},[286],{"type":23,"value":287},"首先通过AutoProcessor对输入进行预处理，基于文本提示生成音频样本。然后，可以将预处理后的输入传递给.generate方法以生成文本条件音频样本。最后，我们将生成出来的音频文件保存为`musicgen_out_text.wav`。",{"type":17,"tag":199,"props":289,"children":291},{"code":290},"from mindnlp.transformers import AutoProcessor\n\n\n\nprocessor = AutoProcessor.from_pretrained(\"facebook/musicgen-small\")\n\ninputs = processor(\n\n    text=[\"80s pop track with bassy drums and synth\", \"90s rock song with loud guitars and heavy drums\"],\n\n    padding=True,\n\n    return_tensors=\"ms\",\n\n)\n\naudio_values = model.generate(**inputs, do_sample=True, guidance_scale=3, max_new_tokens=768)\n\nscipy.io.wavfile.write(\"musicgen_out_text.wav\", rate=sampling_rate, data=audio_values[0, 0].asnumpy())\n",[292],{"type":17,"tag":204,"props":293,"children":294},{"__ignoreMap":7},[295],{"type":23,"value":290},{"type":17,"tag":25,"props":297,"children":298},{},[299],{"type":23,"value":300},"大家可以通过修改processor中的文本描述，定制自己个人风格的音乐。",{"type":17,"tag":271,"props":302,"children":304},{"id":303},"方式3音频提示生成",[305],{"type":17,"tag":71,"props":306,"children":307},{},[308],{"type":17,"tag":71,"props":309,"children":310},{},[311],{"type":23,"value":312},"方式3：音********频提示生成",{"type":17,"tag":25,"props":314,"children":315},{},[316],{"type":23,"value":317},"AutoProcessor同样可以对用于音频预测的音频提示进行预处理。在以下示例中，我们首先加载音频文件，然后进行预处理，并将输入给到网络模型来进行音频生成。最后，我们将生成出来的音频文件保存为` musicgen_out_audio.wav `。",{"type":17,"tag":199,"props":319,"children":321},{"code":320},"from datasets import load_dataset\n\n\n\ndataset = load_dataset(\"sanchit-gandhi/gtzan\", split=\"train\", streaming=True)\n\nsample = next(iter(dataset))[\"audio\"]\n\nsample[\"array\"] = sample[\"array\"][: len(sample[\"array\"]) // 2]\n\n\n\ninputs = processor(\n\n    audio=sample[\"array\"],\n\n    sampling_rate=sample[\"sampling_rate\"],\n\n    text=[\"80s blues track with groovy saxophone\"],\n\n    padding=True,\n\n    return_tensors=\"ms\",\n\n)\n\n\n\naudio_values = model.generate(**inputs, do_sample=True, guidance_scale=3, max_new_tokens=256)\n\nscipy.io.wavfile.write(\"musicgen_out_audio.wav\", rate=sampling_rate, data=audio_values[0, 0].asnumpy())\n",[322],{"type":17,"tag":204,"props":323,"children":324},{"__ignoreMap":7},[325],{"type":23,"value":320},{"type":17,"tag":25,"props":327,"children":328},{},[329],{"type":23,"value":330},"大家如有其他的音频文件，也可以通过这种方式来引导模型输出。",{"type":17,"tag":18,"props":332,"children":334},{"id":333},"附录musicgen是什么",[335],{"type":17,"tag":71,"props":336,"children":337},{},[338],{"type":17,"tag":71,"props":339,"children":340},{},[341],{"type":23,"value":342},"附录：MusicGen是什么？",{"type":17,"tag":25,"props":344,"children":345},{},[346],{"type":23,"value":347},"MusicGen是来自Meta AI的Jade Copet等人提出的基于单个语言模型（LM）的音乐生成模型，能够根据文本描述或音频提示生成高质量的音乐样本，相关研究成果参考论文《Simple and Controllable Music Generation》[3]。",{"type":17,"tag":25,"props":349,"children":350},{},[351],{"type":23,"value":352},"MusicGen模型基于Transformer结构，可以分解为三个不同的阶段：",{"type":17,"tag":354,"props":355,"children":356},"ol",{},[357,362,367],{"type":17,"tag":173,"props":358,"children":359},{},[360],{"type":23,"value":361},"用户输入的文本描述作为输入传递给一个固定的文本编码器模型，以获得一系列隐形状态表示。",{"type":17,"tag":173,"props":363,"children":364},{},[365],{"type":23,"value":366},"训练MusicGen解码器来预测离散的隐形状态音频token。",{"type":17,"tag":173,"props":368,"children":369},{},[370],{"type":23,"value":371},"对这些音频token使用音频压缩模型（如EnCodec）进行解码，以恢复音频波形。",{"type":17,"tag":25,"props":373,"children":374},{},[375],{"type":23,"value":376},"与传统方法不同，MusicGen采用单个stage的Transformer LM结合高效的token交织模式，取消了多层级的多个模型结构，例如分层或上采样，这使得MusicGen能够生成单声道和立体声的高质量音乐样本，同时提供更好的生成输出控制。MusicGen不仅能够生成符合文本描述的音乐，还能够通过旋律条件控制生成的音调结构。",{"type":17,"tag":25,"props":378,"children":379},{},[380],{"type":17,"tag":38,"props":381,"children":383},{"alt":7,"src":382},"https://obs-mindspore-file.obs.cn-north-4.myhuaweicloud.com/file/2024/04/12/fc0dbb336fbc4034a022651ea5c11828.png",[],{"type":17,"tag":25,"props":385,"children":386},{},[387],{"type":23,"value":388},"图表1 MusicGen使用的码本延迟模式",{"type":17,"tag":25,"props":390,"children":391},{},[392],{"type":23,"value":393},"MusicGen直接使用谷歌的t5-base[4]及其权重作为文本编码器模型，并使用EnCodec 32kHz[5]及其权重作为音频压缩模型。MusicGen解码器是一个语言模型架构，针对音乐生成任务从零开始进行训练。",{"type":17,"tag":271,"props":395,"children":397},{"id":396},"参考链接",[398],{"type":17,"tag":71,"props":399,"children":400},{},[401],{"type":23,"value":396},{"type":17,"tag":25,"props":403,"children":404},{},[405,407],{"type":23,"value":406},"[1]",{"type":17,"tag":101,"props":408,"children":411},{"href":409,"rel":410},"https://www.bilibili.com/video/BV1w1421D7tQ/?spm%5C_id%5C_from=333.337.search-card.all.click&vd%5C_source=71c01b2a505751311824aae7033733ad",[105],[412],{"type":23,"value":413},"https://www.bilibili.com/video/BV1w1421D7tQ/?spm\\_id\\_from=333.337.search-card.all.click&vd\\_source=71c01b2a505751311824aae7033733ad",{"type":17,"tag":25,"props":415,"children":416},{},[417,419],{"type":23,"value":418},"[2]",{"type":17,"tag":101,"props":420,"children":423},{"href":421,"rel":422},"https://www.bilibili.com/video/BV1Cx421S7fD/?spm%5C_id%5C_from=333.337.search-card.all.click&vd%5C_source=71c01b2a505751311824aae7033733ad",[105],[424],{"type":23,"value":425},"https://www.bilibili.com/video/BV1Cx421S7fD/?spm\\_id\\_from=333.337.search-card.all.click&vd\\_source=71c01b2a505751311824aae7033733ad",{"type":17,"tag":25,"props":427,"children":428},{},[429,431],{"type":23,"value":430},"[3]",{"type":17,"tag":101,"props":432,"children":435},{"href":433,"rel":434},"https://arxiv.org/abs/2306.05284",[105],[436],{"type":23,"value":433},{"type":17,"tag":25,"props":438,"children":439},{},[440,442],{"type":23,"value":441},"[4]",{"type":17,"tag":101,"props":443,"children":446},{"href":444,"rel":445},"https://huggingface.co/t5-base",[105],[447],{"type":23,"value":444},{"type":17,"tag":25,"props":449,"children":450},{},[451,453],{"type":23,"value":452},"[5]",{"type":17,"tag":101,"props":454,"children":457},{"href":455,"rel":456},"https://huggingface.co/facebook/encodec%5C_32khz",[105],[458],{"type":23,"value":459},"https://huggingface.co/facebook/encodec\\_32khz",{"title":7,"searchDepth":461,"depth":461,"links":462},4,[463,465,466],{"id":273,"depth":464,"text":282},2,{"id":303,"depth":464,"text":312},{"id":396,"depth":464,"text":396},"markdown","content:news:zh:3058.md","content","news/zh/3058.md","news/zh/3058","md",1776506079093]