import time import os import pickle import sys sys.path.append('/') from src.music.pipeline.music_pipeline import encode_music from src.music2cocktailrep.pipeline.music2cocktailrep import music2cocktailrep, setup_translation_models, debug_translation from src.cocktails.pipeline.cocktailrep2recipe import cocktailrep2recipe from src.debugger import Debugger from datetime import datetime from shutil import copy import streamlit as st from src.music.config import AUDIO_PATH, MIDI_PATH from pretty_midi import PrettyMIDI import numpy as np import pydub from PIL import Image import pytube st.set_page_config( page_title="TastyPiano", page_icon="🎹", ) st.title('TastyPiano') synestesia_path = 'data/synesthesia/' debugger = Debugger(verbose=False) np.random.seed(0) def centered_module(func, text, args=None): _, col2, _ = st.columns([1, 2, 1]) with col2: return func(text) def centered_button(func, text, args=None): _, _, _, col3, _, _, _ = st.columns([1, 1, 1, 1, 1, 1, 1]) with col3: return func(text) def setup_streamlite(): path = '/'.join(pytube.__file__.split('/')[:-1]) + '/cipher.py' with open(path, 'r') as f: cipher = f.read().split('\n') cipher[286] = cipher[286].replace('\\', '') with open(path, 'w') as f: f.write('\n'.join(cipher)) os.path.exists(path) setup_translation_models() image = Image.open('./data/pianocktail.jpg') st.image(image, caption='Pianocktail by Compagnie la Rumeur') st.subheader("Ready to taste music?") st.markdown("TastyPiano generates music--taste synesthetic experiences by letting you turn any piano song into a cocktail. It is inspired by the" " *Pianocktail*, a literary invention from the French novelist, jazz musician, singer and engineer Boris Vian. I see TastyPiano as a digital member of the " "Pianocktail species, along with [other](https://www.youtube.com/watch?v=5bdX5i0nAWw) " "[wonderful](https://www.youtube.com/watch?v=pzsDOH-xtrs&list=PLC196AA37A2D1C066&index=3) [machines](https://www.youtube.com/watch?v=y0RJg7I2x34).") st.markdown("But TastyPiano is different from existing pianocktails. While existing version merely map played notes to drops of corresponding ingredients, " "TastyPiano listens to the song, analyzes the music, hallucinates matching tastes and flavors, before it starts generating cocktail recipes " "until it finds the one matching its gustatory hallucination. Check my [blog post](https://ccolas.github.io/project/pianocktail) for more information." " Hear and taste it for yourself!") st.subheader("How to use it?") st.markdown("Provide a piano solo input and click on the **Taste it!** button.\n" "\nYou can input either: \n* a YouTube url **or**\n* an audio file (.mp3) **or**\n* a midi file (.mid)\n" "All these should be **only piano**, no other instrument.\n" "Note that audio sources are cropped to the first 40s because the process of " "converting it to midi is rather slow (1sec/sec). Midi inputs are taken whole. Please report any bug / buggy url in the community tab.") st.subheader("Prepare") col1, col2, col3 = st.columns(3) generate_audio_from_midi = False with col1: st.markdown('**YouTube url**') url = st.text_area('Type it below', 'https://www.youtube.com/watch?v=UGK70IkP830', height=160) with col2: st.markdown('**Audio file**') audio = st.file_uploader("Upload it here (.mp3)", type=['.mp3']) with col3: st.markdown('**Midi file**') midi = st.file_uploader("Upload it here (.mid)", type=['.mid']) generate_audio_from_midi = st.checkbox('Generate audio? Untick if the song is too long (>10min)', value=True) #url = "https://www.youtube.com/watch?v=UGK70IkP830" #unit# = 'mL' def run(unit): setup_and_run(unit=unit, url=url, midi=midi, audio=audio, generate_audio_from_midi=generate_audio_from_midi, extra_code=None) #run(unit) st.markdown('##') unit = st.radio('Pick the units (before pressing "Taste it!", default mL)', ['mL', 'oz'], index=0) button = centered_button(st.button, 'Taste it!') # print(url) if button: run(unit) def pianocktail(unit='mL', record=False, url=None, midi=None, audio=None, processed=None, crop=40, verbose=False, debug=False, level=0): assert url is not None or midi is not None or audio is not None or processed is not None if verbose: print('------\nNew synesthetic exploration!') init_time = time.time() try: with st.spinner("Listening to the song (~1min).."): music_ai_rep, music_handcoded_rep, all_paths, error = encode_music(record=record, url=url, audio_path=audio, midi_path=midi, nb_aug=0, noise_injection=False, augmentation=False, processed_path=processed, crop=crop, apply_filtering=False, verbose=verbose, level=level+2) if music_ai_rep is not None: with st.spinner(text="Thinking about corresponding flavors.."): cocktail_rep, affective_cluster_id, affect = music2cocktailrep(music_ai_rep, music_handcoded_rep, verbose=verbose, level=level+2) with st.spinner("Trying recipes (15s).."): cocktail_recipes, scores = cocktailrep2recipe(cocktail_rep, unit=unit, target_affective_cluster=affective_cluster_id, verbose=verbose, full_verbose=verbose, \ level=level+2) cocktail_recipe = cocktail_recipes[0] recipe_score = scores[0] if debug: music_reconstruction = debug_translation(music_ai_rep) debugger.extract_info(all_paths, affective_cluster_id, affect, cocktail_rep, music_reconstruction, recipe_score, verbose=verbose, level=level+2) debug_info = debugger.debug_dict else: debug_info = None if verbose: print(cocktail_recipe.replace('Recipe', ' ' * (level + 2) + 'Generated recipe:').replace('None ()', '')) debugger.print_debug(level=level+2) print(f'\nEnd of synesthetic exploration ({int(time.time() - init_time)} secs).\n------') st.success('Recipe found!') else: st.error('Error in listening. Is the url valid? the audio an .mp3? the midi a .mid?') cocktail_recipe = None debug_info = None except: st.error('Error: ' + error) cocktail_recipe = None debug_info = None return cocktail_recipe, debug_info def setup_and_run(unit='mL', url=None, midi=None, audio=None, generate_audio_from_midi=False, verbose=True, debug=True, extra_code=None): if url is None and midi is None and audio is None: st.error('Please enter a piano input.') assert False st.subheader('Synesthesia') now = datetime.now() folder_name = f'date_{now.year}_{now.month}_{now.day}_time_{now.hour}_{now.minute}_{now.second}' folder_path = synestesia_path + folder_name if extra_code is not None: folder_path += '_' + extra_code if os.path.exists(folder_path): folder_path += '_2' folder_path += '/' os.makedirs(folder_path, exist_ok=True) if midi is not None: st.write(f' \tReading from midi file: {midi.name}') midi_path = MIDI_PATH + 'from_url_midi/' + midi.name[:-4] + '_midi.mid' audio_path = AUDIO_PATH + 'from_url/' + midi.name.replace('.mid', '.mp3') with open(midi_path, "wb") as f: f.write(midi.getbuffer()) midi = midi_path if generate_audio_from_midi: midi_data = PrettyMIDI(midi_path) audio_data = midi_data.fluidsynth(fs=44100) y = np.int16(audio_data * 2 ** 15) song = pydub.AudioSegment(y.tobytes(), frame_rate=44100, sample_width=2, channels=1) song.export(audio_path, format="mp3", bitrate="320k") # st.write(audio_data) st.audio(audio_path, format='audio/mp3') url = None elif audio is not None: st.write(f' \tReading from audio file: {audio.name}') audio_path = AUDIO_PATH + 'from_url/' + audio.name with open(audio_path, "wb") as f: f.write(audio.getbuffer()) audio = audio_path audio_file = open(audio, 'rb') audio_bytes = audio_file.read() st.audio(audio_bytes, format='audio/mp3') url = None else: st.write(f' \tReading from YouTube url: {url}') st.video(url) _, col2, _ = st.columns([1, 1, 1]) with col2: st.markdown('##') recipe, debug = pianocktail(unit=unit, url=url, midi=midi, audio=audio, verbose=verbose, debug=debug) with open(folder_path + 'debug.pk', 'wb') as f: pickle.dump(debug, f) with open(folder_path + 'recipe.txt', 'w') as f: f.write(recipe) paths = debug['all_paths'] if paths['url'] is not None: with open(folder_path + 'url.txt', 'w') as f: f.write(paths['url']) for k in ['audio_path', 'midi_path']: origin = paths[k] if origin is not None: copy(origin, folder_path + origin.split('/')[-1]) st.subheader('Recipe') recipe = recipe.replace(' Enjoy!', ' \nEnjoy!').replace('\n', ' \n') st.text(recipe) st.markdown('**About this synesthesia**') closest_songs = [debug['nn_music'][i][:-26].split('structured_')[1].replace('_', ' ') for i in range(3)] str_songs = 'These are the closest song I know: ' str_songs += ' '.join([f' \n* {closest_songs[i]}' for i in range(3)]) st.markdown(str_songs + '.') str_cocktails = 'These are existing cocktails that are close to the taste of this song:' str_cocktails += ' '.join([f' \n* {cocktail_name}: {cocktail_url}' for cocktail_name, cocktail_url in zip(debug['nearest_cocktail_names'][:3], debug['nearest_cocktail_urls'][:3])]) st.markdown(str_cocktails + '.') if __name__ == '__main__': setup_streamlite() # urls = ["https://www.youtube.com/watch?v=PLFVGwGQcB0", # "https://www.youtube.com/watch?v=VQmuAr93OlI", # "https://www.youtube.com/watch?v=Nv2GgV34qIg&list=PLO9E3V4rGLD8_iWrCioJRWZXJJE3Fzu_J&index=4", # "https://www.youtube.com/watch?v=qAEIjWYdoYc&list=PLO9E3V4rGLD8_iWrCioJRWZXJJE3Fzu_J&index=1", # "https://www.youtube.com/watch?v=M73x3O7dhmg&list=PLO9E3V4rGLD8_iWrCioJRWZXJJE3Fzu_J&index=5"] # setup_translation_models() # setup_and_run(url=urls[0], verbose=True, debug=True) # recipes = [] # for url in urls: # recipe = pianocktail(url=url, verbose=True, debug=True)[0] # recipes.append(recipe) # stop = 1