Dataset Preparation
In [ ]:
Copied!
!pip install pytube --user
!pip install requests
!pip install pandas
!pip install numpy
!pip install pytube --user
!pip install requests
!pip install pandas
!pip install numpy
Testing video download¶
In [ ]:
Copied!
import os
import json
from pytube import YouTube
import re
def remove_special_characters(input_string):
# Using regex to keep only alphanumeric characters and spaces
clean_string = re.sub(r'[^a-zA-Z0-9\s]', '', input_string)
return clean_string
def download_video_info(video_url, output_directory='downloads'):
try:
# Create a YouTube object
yt = YouTube(video_url)
# Create a directory for downloads if it doesn't exist
if not os.path.exists(output_directory):
os.makedirs(output_directory)
# Get the highest resolution audio stream
audio_stream = yt.streams.filter(only_audio=True).first()
# Download the audio stream
file_name = remove_special_characters(yt.title)
file_name = file_name.replace(" ","_")
file_name = file_name.replace("…","_")
file_name = file_name.replace(",","_")
audio_stream.download(output_directory,filename=f'{file_name}.wav')
audio_path = os.path.join(f"{output_directory}/{file_name}.wav")
print(f"Downloading audio to {audio_path}...")
# Collect video information
video_info = {
'title': yt.title,
'duration': yt.length,
'author': yt.author,
'views': yt.views,
'description': yt.description,
'audio_path': audio_path
}
return video_info
except Exception as e:
print(f"An error occurred: {str(e)}")
return None
def process_video_links(file_path):
with open(file_path, 'r') as file:
video_links = file.readlines()
video_data_list = []
# video_links = ["https://www.youtube.com/watch?v=CgruI1RjH_c"]
for video_link in video_links:
video_link = video_link.strip()
video_info = download_video_info(video_link)
if video_info:
video_data_list.append(video_info)
# Save video data to a JSON file
output_json_path = 'video_data.json'
with open(output_json_path, 'w') as json_file:
json.dump(video_data_list, json_file, indent=2)
print(f'Video data saved to {output_json_path}')
# Replace 'YOUR_TEXT_FILE_PATH' with the path to your text file containing video links
text_file_path = "./downloads/Fireship_clone/@Fireship-shorts.txt"
# Process video links and save data to JSON
process_video_links(text_file_path)
import os
import json
from pytube import YouTube
import re
def remove_special_characters(input_string):
# Using regex to keep only alphanumeric characters and spaces
clean_string = re.sub(r'[^a-zA-Z0-9\s]', '', input_string)
return clean_string
def download_video_info(video_url, output_directory='downloads'):
try:
# Create a YouTube object
yt = YouTube(video_url)
# Create a directory for downloads if it doesn't exist
if not os.path.exists(output_directory):
os.makedirs(output_directory)
# Get the highest resolution audio stream
audio_stream = yt.streams.filter(only_audio=True).first()
# Download the audio stream
file_name = remove_special_characters(yt.title)
file_name = file_name.replace(" ","_")
file_name = file_name.replace("…","_")
file_name = file_name.replace(",","_")
audio_stream.download(output_directory,filename=f'{file_name}.wav')
audio_path = os.path.join(f"{output_directory}/{file_name}.wav")
print(f"Downloading audio to {audio_path}...")
# Collect video information
video_info = {
'title': yt.title,
'duration': yt.length,
'author': yt.author,
'views': yt.views,
'description': yt.description,
'audio_path': audio_path
}
return video_info
except Exception as e:
print(f"An error occurred: {str(e)}")
return None
def process_video_links(file_path):
with open(file_path, 'r') as file:
video_links = file.readlines()
video_data_list = []
# video_links = ["https://www.youtube.com/watch?v=CgruI1RjH_c"]
for video_link in video_links:
video_link = video_link.strip()
video_info = download_video_info(video_link)
if video_info:
video_data_list.append(video_info)
# Save video data to a JSON file
output_json_path = 'video_data.json'
with open(output_json_path, 'w') as json_file:
json.dump(video_data_list, json_file, indent=2)
print(f'Video data saved to {output_json_path}')
# Replace 'YOUR_TEXT_FILE_PATH' with the path to your text file containing video links
text_file_path = "./downloads/Fireship_clone/@Fireship-shorts.txt"
# Process video links and save data to JSON
process_video_links(text_file_path)
Testing Audio Transcription api¶
In [2]:
Copied!
import json
from deepgram import DeepgramClient, PrerecordedOptions
def transcribe_audio(audio_file_path):
# Your Deepgram API Key
DEEPGRAM_API_KEY = ''
# Initialize the Deepgram SDK
deepgram = DeepgramClient(DEEPGRAM_API_KEY)
# Call the transcribe_file method on the prerecorded class
with open(audio_file_path, "rb") as file:
buffer_data = file.read()
payload = {
"buffer": buffer_data,
}
options = PrerecordedOptions(
model="nova-2",
language="en",
smart_format=True,
punctuate=True,
paragraphs=True,
diarize=True,
summarize="v2",
detect_topics=True,
filler_words=True,
)
file_response = deepgram.listen.prerecorded.v("1").transcribe_file(payload, options)
file_response = file_response.to_json()
json_final = json.loads(file_response)
with open(f"test.json", "w") as file:
json.dump(json_final, file, indent=4)
return json_final
# # Example usage:
# audio_file_path = "./downloads/Fireship_clone/100+_Computer_Science_Concepts_Explained.wav"
# transcribe_audio(audio_file_path)
# print("Transcribing completed successfully")
import json
from deepgram import DeepgramClient, PrerecordedOptions
def transcribe_audio(audio_file_path):
# Your Deepgram API Key
DEEPGRAM_API_KEY = ''
# Initialize the Deepgram SDK
deepgram = DeepgramClient(DEEPGRAM_API_KEY)
# Call the transcribe_file method on the prerecorded class
with open(audio_file_path, "rb") as file:
buffer_data = file.read()
payload = {
"buffer": buffer_data,
}
options = PrerecordedOptions(
model="nova-2",
language="en",
smart_format=True,
punctuate=True,
paragraphs=True,
diarize=True,
summarize="v2",
detect_topics=True,
filler_words=True,
)
file_response = deepgram.listen.prerecorded.v("1").transcribe_file(payload, options)
file_response = file_response.to_json()
json_final = json.loads(file_response)
with open(f"test.json", "w") as file:
json.dump(json_final, file, indent=4)
return json_final
# # Example usage:
# audio_file_path = "./downloads/Fireship_clone/100+_Computer_Science_Concepts_Explained.wav"
# transcribe_audio(audio_file_path)
# print("Transcribing completed successfully")
Download Youtube video transcribe it and save the transcribe¶
In [ ]:
Copied!
import os
import json
from pytube import YouTube
from tqdm import tqdm
from deepgram import DeepgramClient, PrerecordedOptions
def download_and_transcribe_video(video_url, output_directory='downloads'):
try:
# Create a YouTube object
yt = YouTube(video_url)
# Create a directory for downloads if it doesn't exist
if not os.path.exists(output_directory):
os.makedirs(output_directory)
# Get the highest resolution audio stream
audio_stream = yt.streams.filter(only_audio=True).first()
# Download the audio stream with tqdm progress bar
file_name = remove_special_characters(yt.title)
file_name = file_name.replace(" ","_")
file_name = file_name.replace("…","_")
file_name = file_name.replace(",","_")
audio_path = os.path.join(output_directory, f'{file_name}.wav')
print(f"Downloading audio to {audio_path}...")
# with tqdm(total=audio_stream.filesize, unit='B', unit_scale=True, desc=f'Downloading {file_name}') as bar:
# def on_progress(chunk, _):
# bar.update(len(chunk))
audio_stream.download(output_directory, filename=f'{file_name}.wav')
transcript = transcribe_audio(audio_path)
# Collect video information
video_info = {
'link': video_url,
'title': yt.title,
'duration': yt.length,
'author': yt.author,
'views': yt.views,
'description': yt.description,
'audio_path': audio_path,
'transcript': transcript
}
save_transcript_to_json(video_info, f'{output_directory}/{file_name}_transcript.json')
append_transcript_to_json(video_info, f'final_json_transcript_final.json')
return video_info
except Exception as e:
print(f"An error occurred: {str(e)}")
return None
def save_transcript_to_json(transcript, json_path):
with open(json_path, 'w') as file:
json.dump(transcript, file, indent=4)
print(f'Transcript saved to {json_path}')
def append_transcript_to_json(transcript, json_path):
# Create an empty list if the file doesn't exist yet
if not os.path.exists(json_path):
with open(json_path, 'w') as file:
json.dump([], file)
# Load existing data from the file
with open(json_path, 'r') as file:
data = json.load(file)
# Append the new transcript to the list
data.append(transcript)
# Save the updated list to the file
with open(json_path, 'w') as file:
json.dump(data, file, indent=4)
print(f'Transcript appended to {json_path}')
def process_video_links(file_path):
with open(file_path, 'r') as file:
video_list = file.readlines()
video_data_list = []
videos_to_process = [video_line.strip().split(",") for video_line in video_list if video_line.strip().endswith(',0')]
# for video_link in tqdm(video_list, desc='Processing videos', unit='video'):
# for idx, video_line in enumerate(tqdm(video_list, desc='Processing videos', unit='video')):
for video_link, progress in tqdm(videos_to_process, desc='Processing videos', unit='video'):
# video_link, progress = video_line.split(",")
if int(progress) == 0:
video_link = video_link.strip()
print(f'\nDownloading and transcribing: {video_link}')
try:
video_info = download_and_transcribe_video(video_link)
video_data_list.append(video_info)
idx = video_list.index(f'{video_link},0\n')
video_list[idx] = f'{video_link},1\n'
except:
# save failed video links in a text file
print(f'\nError processing video',video_link)
with open("logs_file.txt", 'a') as log_file:
log_file.write(video_link)
else:
print("Video already downloaded and processed")
with open(file_path, "w") as file:
file.writelines(video_list)
# Replace 'YOUR_TEXT_FILE_PATH' with the path to your text file containing video links
text_file_path = "./downloads/Fireship_clone_2/@Fireship-videos-remaining.txt"
# Process video links and save data to JSON
process_video_links(text_file_path)
import os
import json
from pytube import YouTube
from tqdm import tqdm
from deepgram import DeepgramClient, PrerecordedOptions
def download_and_transcribe_video(video_url, output_directory='downloads'):
try:
# Create a YouTube object
yt = YouTube(video_url)
# Create a directory for downloads if it doesn't exist
if not os.path.exists(output_directory):
os.makedirs(output_directory)
# Get the highest resolution audio stream
audio_stream = yt.streams.filter(only_audio=True).first()
# Download the audio stream with tqdm progress bar
file_name = remove_special_characters(yt.title)
file_name = file_name.replace(" ","_")
file_name = file_name.replace("…","_")
file_name = file_name.replace(",","_")
audio_path = os.path.join(output_directory, f'{file_name}.wav')
print(f"Downloading audio to {audio_path}...")
# with tqdm(total=audio_stream.filesize, unit='B', unit_scale=True, desc=f'Downloading {file_name}') as bar:
# def on_progress(chunk, _):
# bar.update(len(chunk))
audio_stream.download(output_directory, filename=f'{file_name}.wav')
transcript = transcribe_audio(audio_path)
# Collect video information
video_info = {
'link': video_url,
'title': yt.title,
'duration': yt.length,
'author': yt.author,
'views': yt.views,
'description': yt.description,
'audio_path': audio_path,
'transcript': transcript
}
save_transcript_to_json(video_info, f'{output_directory}/{file_name}_transcript.json')
append_transcript_to_json(video_info, f'final_json_transcript_final.json')
return video_info
except Exception as e:
print(f"An error occurred: {str(e)}")
return None
def save_transcript_to_json(transcript, json_path):
with open(json_path, 'w') as file:
json.dump(transcript, file, indent=4)
print(f'Transcript saved to {json_path}')
def append_transcript_to_json(transcript, json_path):
# Create an empty list if the file doesn't exist yet
if not os.path.exists(json_path):
with open(json_path, 'w') as file:
json.dump([], file)
# Load existing data from the file
with open(json_path, 'r') as file:
data = json.load(file)
# Append the new transcript to the list
data.append(transcript)
# Save the updated list to the file
with open(json_path, 'w') as file:
json.dump(data, file, indent=4)
print(f'Transcript appended to {json_path}')
def process_video_links(file_path):
with open(file_path, 'r') as file:
video_list = file.readlines()
video_data_list = []
videos_to_process = [video_line.strip().split(",") for video_line in video_list if video_line.strip().endswith(',0')]
# for video_link in tqdm(video_list, desc='Processing videos', unit='video'):
# for idx, video_line in enumerate(tqdm(video_list, desc='Processing videos', unit='video')):
for video_link, progress in tqdm(videos_to_process, desc='Processing videos', unit='video'):
# video_link, progress = video_line.split(",")
if int(progress) == 0:
video_link = video_link.strip()
print(f'\nDownloading and transcribing: {video_link}')
try:
video_info = download_and_transcribe_video(video_link)
video_data_list.append(video_info)
idx = video_list.index(f'{video_link},0\n')
video_list[idx] = f'{video_link},1\n'
except:
# save failed video links in a text file
print(f'\nError processing video',video_link)
with open("logs_file.txt", 'a') as log_file:
log_file.write(video_link)
else:
print("Video already downloaded and processed")
with open(file_path, "w") as file:
file.writelines(video_list)
# Replace 'YOUR_TEXT_FILE_PATH' with the path to your text file containing video links
text_file_path = "./downloads/Fireship_clone_2/@Fireship-videos-remaining.txt"
# Process video links and save data to JSON
process_video_links(text_file_path)
In [4]:
Copied!
# combine all the json files into a single file
import os
import json
def combine_json_files(directory_name, output_file='combined.json'):
combined_data = []
# Check if the directory exists
if not os.path.exists(directory_name) or not os.path.isdir(directory_name):
print(f"Error: {directory_name} is not a valid directory.")
return
# Loop through all files in the directory
for filename in os.listdir(directory_name):
file_path = os.path.join(directory_name, filename)
# Check if the file is a JSON file
if os.path.isfile(file_path) and filename.endswith('.json'):
with open(file_path, 'r') as file:
try:
# Load JSON data from the file
json_data = json.load(file)
# Append the loaded data to the combined_data list
combined_data.append(json_data)
except json.JSONDecodeError as e:
print(f"Error decoding JSON in file {filename}: {e}")
# Write the combined_data to a new JSON file
with open(output_file, 'w') as output_file:
json.dump(combined_data, output_file, indent=2)
print(f"Combined JSON data saved to {output_file.name}")
# Example usage:
directory_name = './downloads/'
combine_json_files(directory_name)
# combine all the json files into a single file
import os
import json
def combine_json_files(directory_name, output_file='combined.json'):
combined_data = []
# Check if the directory exists
if not os.path.exists(directory_name) or not os.path.isdir(directory_name):
print(f"Error: {directory_name} is not a valid directory.")
return
# Loop through all files in the directory
for filename in os.listdir(directory_name):
file_path = os.path.join(directory_name, filename)
# Check if the file is a JSON file
if os.path.isfile(file_path) and filename.endswith('.json'):
with open(file_path, 'r') as file:
try:
# Load JSON data from the file
json_data = json.load(file)
# Append the loaded data to the combined_data list
combined_data.append(json_data)
except json.JSONDecodeError as e:
print(f"Error decoding JSON in file {filename}: {e}")
# Write the combined_data to a new JSON file
with open(output_file, 'w') as output_file:
json.dump(combined_data, output_file, indent=2)
print(f"Combined JSON data saved to {output_file.name}")
# Example usage:
directory_name = './downloads/'
combine_json_files(directory_name)
Combined JSON data saved to combined.json
Huggingface Dataset prepraration¶
In [ ]:
Copied!
from datasets import load_dataset
dataset = load_dataset("json", data_files="./combined.json")
# dataset2 = load_dataset("json", data_files="./final_json_transcript_final.json")
from datasets import load_dataset
dataset = load_dataset("json", data_files="./combined.json")
# dataset2 = load_dataset("json", data_files="./final_json_transcript_final.json")
In [6]:
Copied!
dataset
dataset
Out[6]:
DatasetDict({ train: Dataset({ features: ['author', 'duration', 'description', 'transcript', 'audio_path', 'link', 'title', 'views'], num_rows: 522 }) })
In [87]:
Copied!
# Code to see which videos links have failed to download and transcribe
# Assuming you have the text file named 'input.txt' and the list of video links
# named 'video_link_list'
input_file_path = './downloads/Fireship_clone_2/@Fireship-videos.txt'
output_file_path = 'output.txt'
# Read the existing links from the text file
with open(input_file_path, 'r') as file:
existing_links = [line.split(',')[0] for line in file]
# Filter out the links that are not in video_link_list
new_links = [link for link in existing_links if link not in dataset["train"]["link"]]
# Write the new links to the output file
with open(output_file_path, 'w') as output_file:
for link in new_links:
output_file.write(f"{link},0\n")
print(f"New links written to {output_file_path}")
# Code to see which videos links have failed to download and transcribe
# Assuming you have the text file named 'input.txt' and the list of video links
# named 'video_link_list'
input_file_path = './downloads/Fireship_clone_2/@Fireship-videos.txt'
output_file_path = 'output.txt'
# Read the existing links from the text file
with open(input_file_path, 'r') as file:
existing_links = [line.split(',')[0] for line in file]
# Filter out the links that are not in video_link_list
new_links = [link for link in existing_links if link not in dataset["train"]["link"]]
# Write the new links to the output file
with open(output_file_path, 'w') as output_file:
for link in new_links:
output_file.write(f"{link},0\n")
print(f"New links written to {output_file_path}")
New links written to output.txt
In [ ]:
Copied!
from huggingface_hub import notebook_login
notebook_login()
from huggingface_hub import notebook_login
notebook_login()
In [9]:
Copied!
dataset
dataset
Out[9]:
DatasetDict({ train: Dataset({ features: ['author', 'duration', 'description', 'transcript', 'audio_path', 'link', 'title', 'views'], num_rows: 522 }) })
In [10]:
Copied!
import pandas as pd
df = pd.DataFrame(dataset['train'])
import pandas as pd
df = pd.DataFrame(dataset['train'])
In [11]:
Copied!
df.head()
df.head()
Out[11]:
author | duration | description | transcript | audio_path | link | title | views | |
---|---|---|---|---|---|---|---|---|
0 | Fireship | 787 | Learn the fundamentals of Computer Science wit... | {'metadata': {'channels': 1, 'created': '2024-... | downloads\100+_Computer_Science_Concepts_Expla... | https://www.youtube.com/watch?v=-uleG_Vecis | 100+ Computer Science Concepts Explained | 2110216 |
1 | Fireship | 743 | The ultimate 10 minute JavaScript course that ... | {'metadata': {'channels': 1, 'created': '2024-... | downloads\100+_JavaScript_Concepts_you_Need_to... | https://www.youtube.com/watch?v=lkIFF4maKMU | 100+ JavaScript Concepts you Need to Know | 1642938 |
2 | Fireship | 798 | WebDev 101 is a complete introduction into the... | {'metadata': {'channels': 1, 'created': '2024-... | downloads\100+_Web_Development_Things_you_Shou... | https://www.youtube.com/watch?v=erEgovG9WBs | 100+ Web Development Things you Should Know | 1296840 |
3 | Fireship | 1471 | Top 100 Firebase Pro Tips 🔥💯. Optimize your ap... | {'metadata': {'channels': 1, 'created': '2024-... | downloads\100_Firebase_Tips,_Tricks,_and_Screw... | https://www.youtube.com/watch?v=iWEgpdVSZyg | 100 Firebase Tips, Tricks, and Screw-ups | 177364 |
4 | Fireship | 246 | Google made a ton of exciting announcements at... | {'metadata': {'channels': 1, 'created': '2024-... | downloads\10_crazy_announcements_from_Google_I... | https://www.youtube.com/watch?v=nmfRDRNjCnM | 10 crazy announcements from Google I/O | 968111 |
In [12]:
Copied!
df.rename(columns={'transcript': 'transcript_json'}, inplace=True)
df.head()
df.rename(columns={'transcript': 'transcript_json'}, inplace=True)
df.head()
Out[12]:
author | duration | description | transcript_json | audio_path | link | title | views | |
---|---|---|---|---|---|---|---|---|
0 | Fireship | 787 | Learn the fundamentals of Computer Science wit... | {'metadata': {'channels': 1, 'created': '2024-... | downloads\100+_Computer_Science_Concepts_Expla... | https://www.youtube.com/watch?v=-uleG_Vecis | 100+ Computer Science Concepts Explained | 2110216 |
1 | Fireship | 743 | The ultimate 10 minute JavaScript course that ... | {'metadata': {'channels': 1, 'created': '2024-... | downloads\100+_JavaScript_Concepts_you_Need_to... | https://www.youtube.com/watch?v=lkIFF4maKMU | 100+ JavaScript Concepts you Need to Know | 1642938 |
2 | Fireship | 798 | WebDev 101 is a complete introduction into the... | {'metadata': {'channels': 1, 'created': '2024-... | downloads\100+_Web_Development_Things_you_Shou... | https://www.youtube.com/watch?v=erEgovG9WBs | 100+ Web Development Things you Should Know | 1296840 |
3 | Fireship | 1471 | Top 100 Firebase Pro Tips 🔥💯. Optimize your ap... | {'metadata': {'channels': 1, 'created': '2024-... | downloads\100_Firebase_Tips,_Tricks,_and_Screw... | https://www.youtube.com/watch?v=iWEgpdVSZyg | 100 Firebase Tips, Tricks, and Screw-ups | 177364 |
4 | Fireship | 246 | Google made a ton of exciting announcements at... | {'metadata': {'channels': 1, 'created': '2024-... | downloads\10_crazy_announcements_from_Google_I... | https://www.youtube.com/watch?v=nmfRDRNjCnM | 10 crazy announcements from Google I/O | 968111 |
In [13]:
Copied!
json_string = df.loc[0, 'transcript_json']
# Display the loaded JSON object
print(json_string["results"]["channels"][0]["alternatives"][0]["transcript"])
json_string = df.loc[0, 'transcript_json']
# Display the loaded JSON object
print(json_string["results"]["channels"][0]["alternatives"][0]["transcript"])
What's the first thing you should do when your code throws an error? Obviously, you should change nothing and try to run it again a few times. If that doesn't work, you're gonna need a computer science degree. The awesome thing about software engineering is that you can learn to code and get a high paying job, while literally having no idea how anything actually works. It all just feels like magic. Like a pilot driving a giant metal tube in the sky while knowing nothing about aerodynamics. Mother of God, no. Holy shit. Shit. Welcome to computer science 101. In today's video, you'll learn the science behind the garbage code you've been writing by learning 101 different computer science terms and concepts. This is a computer. It's just a piece of tape that holds ones and zeros along with a device that can read and write to it. It's called a Turing machine and in theory, it can compute anything, like the graphics in this video or the algorithm that recommended that you watch it. At the core of modern computers, we have the central processing unit. If we crack rack it open, we find a piece of silicon that contains billions of tiny transistors, which are like microscopic on off switches. The value at one of these switches is called a bit end is the smallest piece of information a computer can use. However, 1 bit by itself is not very useful, so they come in a package of 8 called a byte. 1 byte can represent 250 6 different values, like all the characters that you type on your keyboard. In fact, when you type into your keyboard, the character produced is actually mapped to a binary value in a character encoding like ASCII or utf8 binary is just a system for counting, like the base ten system you normally use when counting seeing on your fingers, but it only has 2 characters, 1 and 0. Humans have a hard time reading binary, so most often it's represented in a hexadecimal base 16 format, where ten numbers and 6 letters can represent a 4 bit group called a nibble. As a developer, when you write code in a programming language, it will actually be converted into machine code, which is a binary format that can be decoded and executed by the CPU. What it doesn't do though is store data for your applications. For that, computers have random access memory or RAM. It's like a neighborhood, and inside of every house lives a byte. Every location has a memory address, which the CPU can read and write too. You can think of the CPU and RAM as the brain of the computer. But in order for a computer to be useful, it needs to handle input and output. An input device might be the keyboard and mouse, while an output device might be your monitor. Luckily, most developers don't need to worry about how this hardware fits together because we have operating system kernels, like Linux, Mac, and Windows that control all hardware resources via device drivers. Now, to start hacking on the operating system, your first entry point is the shell, which is a program that is the operating system to the end user. It's called a shell because it wraps the kernel. It takes a line of text as input and produces an output. At this is called a command line interface. Not only can it connect to your own computer, but with the secure shell protocol, it can also connect to remote computers a network. Now that you have access to the mainframe, it's time to pick a programming language, which is a tool that uses the abstraction principle to make computers practical to work with for humans by simplifying different systems layer by layer. Some languages like Python are interpreted. That means there's a program called an interpreter that will execute each line of code 1 by 1. Other languages like c plus plus are compiled. They use a compiler to convert the entire program into machine code in advance before the CPU attempts to execute it. This results in an executable file that can be run by the operating system without any extra dependencies. Now every every programming language has a variety of built in data types to represent the data we're working with in our code. Instead of bytes, we work with more human friendly things select characters and numbers. Now, the most fundamental way to use data in your application is to declare a variable. This attaches a name to a data point, allowing you to reuse it somewhere else in your code. Python is a dynamically typed language, which means we don't need to tell the program exactly which data type is assigned to a variable. It just figures it out automatically. However, other languages like C are statically typed, and that means you need to specify the data type of a variable in your code. When you define a variable, its value is stored somewhere in memory on the hardware, and you may need to allocate and free up memory already throughout the program. A pointer is a variable whose value is the memory address of another variable, which can be used for low level memory control. Many languages don't want to deal with low level memory management and instead implement a garbage collector, which automatically allocates and deallocates memory when an object is no longer referenced in the program. Carpet day. No. Now, the data types available are different in every programming language, but typically you'll find int to represent whole numbers, switch may or may not be signed or unsigned to represent negative numbers as well. When numbers require a decimal point, they typically use the floating point type. It's called a float because there's only enough memory to represent a certain range of numbers at a certain precision, and is basically a form of scientific notation to make computers faster. If you need more range or precision, many languages also have a double that doubles the amount of memory used for the number. Now when it comes to characters, you'll typically find the char data type to represent a single character or more commonly a string to represent multiple characters together. Ultimately, these characters triggers get stored in a memory address somewhere, but they need to be stored in a certain order. When the order starts with the most significant byte and the smallest memory address, it's called big endian or vice versa, if the least significant byte is stored in the smallest address, it's called little endian. When it comes to practical software for engineering. One of the most fundamental things we do is organize data into data structures. The most useful data structure is probably the array or list. Just like a shopping list. It organizes multiple data points and order. However, it also maintains an index of integers that starts at 0 and goes up for every new item in the list. That can be useful, but you don't actually need an index to create a list of items. Another option is a link list where each item has a pointer to the next item in front of Another option is a stack that follows the last in first out principle. It's like stacking a set of plates, then when you want to access data, you pop the last one off the top. The inverse option is a q, which is first in first out. Just like when you get into the breadline, the first person there is the first one to be fed. Now, another extremely useful data structure is the hash, which might also be called a map or dictionary. It's like an array, but instead of an index of integers, you define the keys that point to each individual item, giving you a collection of key value pairs. In many cases though, it's not efficient to organize data in a linear way. To address that problem, we have trees, which organize nodes together in a hierarchy that can often be traversed more quickly. This can sometimes be too rigid of data structure though. So instead, a graph can be created to connect multiple nodes together in a virtually unlimited number of ways. A graph has a node for the data and an edge for the relationship between the data points. Data structures are essential, but they don't do anything by themselves. To do something useful, you'll need to code up an algorithm, which is just code that solves a problem. I took the initiative in creating the Internet. In our code, we have several mechanisms for implementing algorithms. The most fundamental of which is a function, which is a block of code that takes an input then does something and returns an output. Like a variable, a function has a name and it can called from other parts of your code with different input parameters called arguments. One thing you might do in the function body is compare one value to another. Every language has a variety of built in operators like equality, greater than, and less than that you can use to compare 2 values. If a is greater than b, then it forms a value of true, but if b is greater than a, then the value is false. True false is what's known as a boolean data type and whenever your code produces value like this, it's known as an expression, but not all code will produce a value. Sometimes your code will simply do something which is known as a statement. A good example is the if statement which handles conditional logic. For example, if the condition is true, it will execute this code, otherwise it will short circuit and run the code inside of the else block. Another very common type of statement is a loop. A while loop will run this block of code over and over again until the condition in the parenthesis becomes false. That can be useful, but more often than not, you'll want to loop over an iterable data type like an array. Most languages have a for loop that can run some code for every object in the array or iterable data structure. Now in some cases, a function may not have an output, which is generally called a void function. An interesting thing about functions is that they can call themselves. When a function calls itself, it's called recursion because when done like this by default, it will recurse forever creating an infinite loop. That happens because when you call a function, the programming language will put it into memory on what's known as the call stack, which is a short term chunk of memory for executing your code. When a function keeps calling itself, the language will keep pushing frames onto the call stack until you get a stack overflow error. To avoid this, your algorithm needs a base condition so it knows when to terminate the loop. Now, when you write an algorithm, you'll need to determine if it's any good, and the system for doing that is called big o notation. It's a standard format for approximating the performance have an algorithm at scale. It may reference time complexity, which is how fast your algorithm will run, and space complexity, which deals with how much memory is required to run it. Developers have many different algorithm types at their disposal. The most crude option is brute force, where you might loop over every possible combination to hack somebody's credit card pin. A more sophisticated approach might be divide and conquer, like binary search where you cut the problem in half multiple times until you find what you're looking for. Another option is dynamic programming algorithms, where a problem is broken down into multiple smaller sub problems and the result of each computation is stored for later use using a technique called memoization. That means if a function has already been called, it will use the existing value instead of recomputing it again from scratch. Then we have greedy algorithms that will make the choice that is most beneficial in the short term without considering the problem as a whole. One example of this is Dijkstra's shortest path algorithm. On the flip side, we have backtracking algorithms, which take a more incremental approach by looking at all the possible options, like a rat in a maze exploring all the different potential paths. Now, when it comes to implementing your code, there are always multiple ways to get the job done. One aiming paradigm is declarative, where your code describes what the program does and the outcome, but doesn't care about things like control flow. This style of programming is often associated with functional languages like Haskell. The other paradigm is imperative programming, where your code uses statements like if and while, providing explicit instructions about how to produce an outcome. It's associated with procedural languages like C. Today, most general purpose languages like Python, JavaScript, Hotline, Swift, and so on are multi paradigm, which means they support all these options at the same time, in addition to object oriented programming. The idea behind OOP is that you use classes to write a blueprint for the data or objects in your code. A class can encapsulate variables, which are commonly called properties, as well as functions, which are usually called methods in this context. It's a common way to organize and reuse code because classes can share behaviors between each other through inheritance, where a subclass can extend and override the behaviors of the parent class. And it opens the door to all kinds have other ideas called design patterns. Now, a class by itself doesn't actually do anything. Instead, it's used to instantiate objects, which are actual chunks of data that live in your computer's memory. Often, you'll want to reference the same object over and over again in your code. When data is long lived, it can't go in the call stack. Instead, most languages have a separate area of memory hold the heap, which unlike the call stack can grow and shrink based on how your application is used. It also allows you to pass objects by reference, which means you can use the same object in multiple variables without increasing the memory footprint because it always points to the same chunk of memory in the heap. Now, what's interesting is that if we go back to the CPU that we talked about in the beginning, you'll notice that it contains multiple threads. A thread takes the physical CPU core in breaks into virtual cores that allow it to run code simultaneously. There are some programming languages that support parallelism where you can write code that literally executes on 2 different threads at the same time. However, many languages out there are only single threaded, but that doesn't mean they can't do 2 things at the same time. Instead, they implement concurrency models like an event loop or coroutines that can pause or delay the normal execution of code to handle multiple job's on a single thread at the same time. Now, in modern computing, we're rarely working with the bare metal CPU and RAM. Instead, we work in the cloud with a virtual machine, which is just a piece set software that simulates hardware that allows us to take really big computers and split them up into a bunch of smaller virtual computers. These machines are the backbone of the Internet and are connected via the Internet protocol. Each machine has a unique IP address to identify it on the network. Work. That IP address is usually alias to a URL that is registered in a global database called the domain name service. Now to establish a connection, in. The 2 computers will perform a TCP handshake, which will allow them to exchange messages called packets. On top of that, there's usually security layer like SSL to encrypt and decrypt the messages over the network. Now the 2 computers can securely share data with the hypertext transfer for protocol. The client may request a web page, then the server will respond with some HTML. Modern servers provide a standardized way for a client to request data, which is called an application programming interface or API. The most common architecture is REST, where URLs are mapped to different data entities available on the server. And that brings us to our final topic, mother effing printers. You're gonna need to learn how these things work inside fighting out, because every time you go to grandma's house, she's going to ask you to fix it, which shouldn't be a problem for a computer scientist like you. Thanks for watching, and I will see you in the next one.
In [14]:
Copied!
import pandas as pd
import json
# Assuming your DataFrame is named df
def parse_json(row):
try:
transcript_json = row['transcript_json']
if transcript_json["results"]["summary"]["result"] == "success":
transcript = str(transcript_json["results"]["channels"][0]["alternatives"][0]["transcript"])
summary = str(transcript_json["results"]["summary"]["short"])
return transcript, summary
else:
print("an error occurred")
return None, None
except (json.JSONDecodeError, KeyError):
print("an exception occurred")
return None, None
# Apply the custom function to each row
df[['transcript', 'summary']] = df.apply(parse_json, axis=1, result_type='expand')
# Display the updated DataFrame
# print(df.head())
import pandas as pd
import json
# Assuming your DataFrame is named df
def parse_json(row):
try:
transcript_json = row['transcript_json']
if transcript_json["results"]["summary"]["result"] == "success":
transcript = str(transcript_json["results"]["channels"][0]["alternatives"][0]["transcript"])
summary = str(transcript_json["results"]["summary"]["short"])
return transcript, summary
else:
print("an error occurred")
return None, None
except (json.JSONDecodeError, KeyError):
print("an exception occurred")
return None, None
# Apply the custom function to each row
df[['transcript', 'summary']] = df.apply(parse_json, axis=1, result_type='expand')
# Display the updated DataFrame
# print(df.head())
In [15]:
Copied!
from datasets import Dataset
import pandas as pd
final_dataset = Dataset.from_pandas(df)
from datasets import Dataset
import pandas as pd
final_dataset = Dataset.from_pandas(df)
In [16]:
Copied!
final_dataset
final_dataset
Out[16]:
Dataset({ features: ['author', 'duration', 'description', 'transcript_json', 'audio_path', 'link', 'title', 'views', 'transcript', 'summary'], num_rows: 522 })
In [ ]:
Copied!
final_dataset.push_to_hub("Huggingface-userId/FS_transcribe_summary")
final_dataset.push_to_hub("Huggingface-userId/FS_transcribe_summary")
Prompt formatting to the following format¶
[INST]
You are youtuber called {author} you make engaging high-intensity and entertaining coding tutorials and tech news.
you covers a wide range of topics relevant to programmers, aiming to help them learn and improve their skills quickly.
Given the title of the video : {title}
and a small summary : {video_summary}
[/INST]
Generate the video : {video_transcript}
In [20]:
Copied!
import pandas as pd
import json
# Assuming your DataFrame is named df
def create_prompt(row):
try:
author = row["author"]
title = row["title"]
video_transcript = row["transcript"]
video_summary = row["summary"]
# transcript_json = row['transcript_json']
text = f"""
[INST]
You are youtuber called {author} you make engaging high-intensity and entertaining coding tutorials and tech news.
you covers a wide range of topics relevant to programmers, aiming to help them learn and improve their skills quickly.
Given the title of the video : {title}
and a small summary : {video_summary}
[/INST]
Generate the video : {video_transcript}
"""
return text
except (json.JSONDecodeError, KeyError):
print("an exception occurred")
return None
# Apply the custom function to each row
df['text'] = df.apply(create_prompt, axis=1, result_type='expand')
# Display the updated DataFrame
df.head()
import pandas as pd
import json
# Assuming your DataFrame is named df
def create_prompt(row):
try:
author = row["author"]
title = row["title"]
video_transcript = row["transcript"]
video_summary = row["summary"]
# transcript_json = row['transcript_json']
text = f"""
[INST]
You are youtuber called {author} you make engaging high-intensity and entertaining coding tutorials and tech news.
you covers a wide range of topics relevant to programmers, aiming to help them learn and improve their skills quickly.
Given the title of the video : {title}
and a small summary : {video_summary}
[/INST]
Generate the video : {video_transcript}
"""
return text
except (json.JSONDecodeError, KeyError):
print("an exception occurred")
return None
# Apply the custom function to each row
df['text'] = df.apply(create_prompt, axis=1, result_type='expand')
# Display the updated DataFrame
df.head()
Out[20]:
author | duration | description | transcript_json | audio_path | link | title | views | transcript | summary | text | |
---|---|---|---|---|---|---|---|---|---|---|---|
0 | Fireship | 787 | Learn the fundamentals of Computer Science wit... | {'metadata': {'channels': 1, 'created': '2024-... | downloads\100+_Computer_Science_Concepts_Expla... | https://www.youtube.com/watch?v=-uleG_Vecis | 100+ Computer Science Concepts Explained | 2110216 | What's the first thing you should do when your... | The importance of hardware and memory for a co... | \n [INST]\n You are youtuber cal... |
1 | Fireship | 743 | The ultimate 10 minute JavaScript course that ... | {'metadata': {'channels': 1, 'created': '2024-... | downloads\100+_JavaScript_Concepts_you_Need_to... | https://www.youtube.com/watch?v=lkIFF4maKMU | 100+ JavaScript Concepts you Need to Know | 1642938 | JavaScript. It's a wonderful programming langu... | The speaker explains that JavaScript is a prog... | \n [INST]\n You are youtuber cal... |
2 | Fireship | 798 | WebDev 101 is a complete introduction into the... | {'metadata': {'channels': 1, 'created': '2024-... | downloads\100+_Web_Development_Things_you_Shou... | https://www.youtube.com/watch?v=erEgovG9WBs | 100+ Web Development Things you Should Know | 1296840 | Web development is the best job in the world. ... | The internet is a collection of machines conne... | \n [INST]\n You are youtuber cal... |
3 | Fireship | 1471 | Top 100 Firebase Pro Tips 🔥💯. Optimize your ap... | {'metadata': {'channels': 1, 'created': '2024-... | downloads\100_Firebase_Tips,_Tricks,_and_Screw... | https://www.youtube.com/watch?v=iWEgpdVSZyg | 100 Firebase Tips, Tricks, and Screw-ups | 177364 | Welcome to my top 10 Firebase tips. Welcome to... | The speakers discuss how to build successful r... | \n [INST]\n You are youtuber cal... |
4 | Fireship | 246 | Google made a ton of exciting announcements at... | {'metadata': {'channels': 1, 'created': '2024-... | downloads\10_crazy_announcements_from_Google_I... | https://www.youtube.com/watch?v=nmfRDRNjCnM | 10 crazy announcements from Google I/O | 968111 | It is May 11, 2023, and you're watching the Co... | In this video, the speakers discuss Google's u... | \n [INST]\n You are youtuber cal... |
In [21]:
Copied!
from datasets import Dataset
import pandas as pd
final_dataset = Dataset.from_pandas(df)
from datasets import Dataset
import pandas as pd
final_dataset = Dataset.from_pandas(df)
In [ ]:
Copied!
final_dataset.push_to_hub("Huggingface-userId/FS_transcribe_summary_prompt")
final_dataset.push_to_hub("Huggingface-userId/FS_transcribe_summary_prompt")
Parsing operation for the api response from deepgram¶
In [31]:
Copied!
with open("dummy.json","r") as f:
transcribe_json_list = json.load(f)
with open("dummy.json","r") as f:
transcribe_json_list = json.load(f)
In [36]:
Copied!
transcribe_json_list[0]["results"]["channels"][0]["alternatives"][0]["transcript"]
transcribe_json_list[0]["results"]["channels"][0]["alternatives"][0]["transcript"]
Out[36]:
"Have you ever woken up in the middle of the night in a panic wondering how to extract a polygonal mesh of an isosurface from a 3 dimensional discrete scalar field? Yeah. I didn't think so. But back in 87, 2 programmers at General Electric did. They created and patented the marching cubes algorithm, an algorithm that has likely saved countless lives by allowing doctors to visualize data from CT and MRI scans. Whenever you instruct a machine to solve a problem with code, you're creating an algorithm, a procedure for rearranging ones and zeros that can make animals talk and vacuums walk. Most algorithms belong in a dumpster, but some are fast, skin. Some are beautiful and some are so weird, they're indistinguishable from magic. Today, we'll look at 10 of the most interesting algorithms ever engineered sphere, and how they're used to solve very interesting problems in the real world. 1st on the list, we have wave function collapse. One of the weirdest things in all of science is the double slit experiment, where particles behave like a wave when you're not looking, but when you look, they suddenly collapse down to a particle. It seems counterintuitive, but it makes total sense when you realize we're living in a simulation and the universe wrote algorithm to cut down on its AWS build. It's an interesting concept to think about philosophically, but the general idea behind wave function collapse can also be implemented programmatically. Imagine we have a map for a video game, but what if this is a side scrolling game that can go on for eternity? We can't just make a bigger map, we need an algorithm to procedurally generate it on the fly. What's so weird is that we can take this initial map and think of it as being in the initial superposition of all possibilities. It's the wave function. Then upon observation, it collapses into a particle. Or in other words, it selects a random map tile but follows a consistent set of rules, like in this case, making sure that the roads are always connected, providing a random yet cohesive result and doesn't rely on any sort of modern generative AI. Speaking of which, AI is weird as hell. Diffusion is a machine learning algorithm originally developed at OpenAI and is the magic behind image generators like DALL E and Stable Diffusion. But the concept of diffusion actually comes from thermodynamics, where particles spread from areas of higher concentration to lower concentration. In artificial intelligence, the process is reversed. The algorithm starts by generating random noise, which would be like high entropy and thermodynamics, and gradually refines it to a structured image, which would be lower entropy. But first, you'll need to train model that can do this well. The diffusion algorithm works in 2 phases. In the forward phase, it gradually adds noise to an image step by step until it becomes completely random. In the second phase, the algorithm reverses this process, reconstructing it back into a coherent image. When the algorithm runs over millions of labeled images, we get a collection of weights that can be used to generate new images out of thin air, allowing us to build an infinite army OnlyFans models. It's highly compute intensive, but also works well on audio. And the next frontier is diffusion for video generation. But now Now let's talk about simulated annealing. One frustrating thing about programming is that for many problems, there's not just one solution, but many solutions. Like an Amazon warehouse has many different ways to organize its inventory, but some ways are more efficient than others. Annealing is a word that comes from metallurgy, where metals are skid heated up and cooled down over and over again to remove defects. The same idea is used in optimization algorithms to find the best answer or n a c of good answers. Imagine trying to find the highest point in a mountain range full of peaks and valleys. A simple hill climb algorithm won't work because there are many local peaks. Initially, the temperature sky allowing the algorithm to explore freely. As time goes on though, the temperature is lowered, which decreases the probability of accepting a worse solution. The off here is exploration versus exploitation. But the reason I included this algorithm is because it's also a good way for beginners to learn how to code. Initially, you start out exploring all kinds of different technologies and frameworks, then eventually you find one specific area to exploit and specialize in. But we can't talk about algorithms without talking about sorting. And the most ingenious sorting algorithm of all time is without a doubt, sleep sort. The majority of sane sorting algorithms out there use strategies like divide conquer to break up an array into subarrays where it can be sorted more efficiently. However, some random genius on 4 chan found a better way, but it's a bit unconventional. Here's what the code looks like in bash. It's incredibly simple. It loops over the array, and then for each element, it opens up a new thread that sleeps for the amount of time proportional to the value of its element. Then finally, after waking up, it prints that element. It's genius because it delegates the sorting to the CPU scheduler. It's also dumb and useless because it delegates sorting to the CPU scheduler. Speaking of which, you might be familiar with another useless sorting algorithm, BOGO sort, which tries to sort an array by randomly guessing over and over again. It's like playing the lottery. But what if we apply the same algorithm with quantum mechanics to the multiverse? If we're to trust multiverse science, we know that all possible outcomes exist in separate parallel universes. That means as a developer, if you find yourself with an unsorted array, there's or some other parallel universe where it is sorted. The technology isn't quite there yet, but if we could randomly observe these other universes to find the sorted array, we could then use a portal gun to travel to that universe, which would make our lives much easier. Although, we would obviously have to kill the version of ourself in that universe, but if it's a large array, quantum Bogosort might be worth it. That's purely hypothetical, but one of the most practical and goaded algorithms of all time is SCAR SA, a public key cryptosystem. It's essential for digital security, allowing people on the Internet to lock their mailboxes and sign their letters with a unique signature. But it's based on one simple mathematical reality. Multiplying large numbers to find 2 original large prime numbers is extremely difficult and time like it take your laptop 300,000,000,000,000 years to brute force. Unless quantum computers become a thing, and we can start leveraging Shor's algorithm, which can solve the integer factorization problem exponentially faster than any classical algorithm. Prime factoring is pretty simple, but how this algorithm does it is where things get weird. It relies on concepts like cubits, skits superposition and entanglement to perform massive amounts of calculations in parallel. The algorithm is legit, but so far, the biggest number ever factored is 21. Even IBM's state of the art Q System 1 fails when trying to factor the number 35. However, just recently, skin. The Chinese factored this big ass number with a quantum computer, but it uses a different algorithm that doesn't scale very well for large numbers unlike Shor's algorithm. Everything is safe for now, but when someone figures out how to make quantum computers work, expect all hell to break loose in the cybersecurity world. At the beginning of this video, I mentioned the marching cubes algorithm, but it deserves a closer look. So first, we start with a 3 d scalar field, which might represent data from an MRI machine. Each point in the 3 d space is represented by a single number or a scalar. The algorithm starts with a single point, then takes its 8 neighboring locations to form an cube, but treats the eight values as a bit in an 8 bit integer. This results in 256 different possibilities, which point to a precalculated array of polygons. The algorithm marches through each point to create a 3 d mesh that can be used in 3 d software. And at the time, this was really cool because as MRI machines produce slices of data that can now be rendered in 3 d. In modern times, though, we're often dealing with distributed systems in the cloud, And that brings us to the Byzantine generals problem. Imagine you're a general in the Byzantine army. You're camped around a city with a few other generals with plans to attack it the next morning. But what if one of the generals skitrunk and wakes up too hungover to attack. The entire system could collapse. Computers have the same problem. Sometimes they might fail or be infiltrated by hackers, and you never know when or where that's going to happen. Luckily, algorithms like PBFT are designed to solve this. They can keep a distributed network working properly even if up to 1 third of its nodes go haywire. It works by having a node broadcast a pre prepare message to other nodes, indicating its readiness to execute some code that will change the system. The other nodes will respond back in agreement. Then after a certain threshold, a consensus is formed. Once there's a consensus, The original node sends back a commit message to all the other nodes, which can then all execute the changes, making the entire state of the system consistent. Algorithms like this are essential for blockchain technology and things like distributed cloud databases. What's really cool about algorithms, though, is that they can also reflect nature, like Boyd's artificial life program. It was created back in 86 and simulates the flocking behavior of birds. What's so cool about it is that it demonstrates the emergent complexity or beauty that we get out of just a few simple rules. In this case, the birdoids have three rules. They steer to avoid crowding, they steer towards the average heading of the flock, and they steer towards the center of mass of their local flockmates. The end result are these intricate patterns that weren't programmed directly, but just emerge naturally. But finally, that brings us to an old algorithm that blew my mind just the other day and inspired this video, Boyer Moore string search. It's weird because it becomes faster and more efficient as the string it's searching becomes bigger. That seems impossible, but it makes sense when you understand the algorithm. It scans skin's text from right to left, then has two rules. When it encounters a bad character not found in the search pattern, it jumps past it based on an estimation made in a preprocess table. Scale. Likewise, if it finds a partial match, then a mismatch occurs. It has a separate pre calculated table that maximizes the number of characters it can safely skip. These rules are called heuristics, which are like functions that are not guaranteed to be perfect, but are far more practical than looping over every single character. In this case, the algorithm gets faster with more text because it's able to skip a higher proportion of characters. And if you've ever wondered why GREP is so fast, you have this algorithm to thank."
In [38]:
Copied!
transcribe_json_list[0]["results"]["summary"]["result"]
transcribe_json_list[0]["results"]["summary"]["result"]
Out[38]:
'success'
In [39]:
Copied!
transcribe_json_list[0]["results"]["summary"]["short"]
transcribe_json_list[0]["results"]["summary"]["short"]
Out[39]:
'The speakers discuss the use of algorithms in scientific research, including random random algorithms like BOGO sort and BOGO sort to solve problems in scientific research, and the potential uses of these algorithms in optimizing algorithms and algorithms for algorithms. They also touch on the use of quantum algorithms in machine design and the future of digital security, including the use of random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random random'
In [ ]:
Copied!
from tqdm import tqdm
final_dataset = []
# for video_link in tqdm(video_links, desc='Processing videos', unit='video'):
for transcribe_json in tqdm(transcribe_json_list,desc='Processing transcribe'):
transcribe = transcribe_json["results"]["channels"][0]["alternatives"][0]["transcript"]
if transcribe_json["results"]["summary"]["result"]=="success":
summary = transcribe_json["results"]["summary"]["short"]
final_json = {
"transcribe": transcribe,
"summary": summary
}
final_dataset.append(final_json)
with open("transcribe_data_final_processed.json", "w") as output:
json.dump(final_dataset, output)
from tqdm import tqdm
final_dataset = []
# for video_link in tqdm(video_links, desc='Processing videos', unit='video'):
for transcribe_json in tqdm(transcribe_json_list,desc='Processing transcribe'):
transcribe = transcribe_json["results"]["channels"][0]["alternatives"][0]["transcript"]
if transcribe_json["results"]["summary"]["result"]=="success":
summary = transcribe_json["results"]["summary"]["short"]
final_json = {
"transcribe": transcribe,
"summary": summary
}
final_dataset.append(final_json)
with open("transcribe_data_final_processed.json", "w") as output:
json.dump(final_dataset, output)
In [ ]:
Copied!
transcribe_json_list[0]["channels"]
transcribe_json_list[0]["channels"]
In [ ]:
Copied!
final_dataset_transcribe = load_dataset("json",data_files="./transcribe_data_final.json")
final_dataset_transcribe = load_dataset("json",data_files="./transcribe_data_final.json")
In [ ]:
Copied!
final_dataset_transcribe.push_to_hub("Huggingface-userId/FS_transcribe_summary")
final_dataset_transcribe.push_to_hub("Huggingface-userId/FS_transcribe_summary")
In [ ]:
Copied!
final_dataset_transcribe
final_dataset_transcribe
In [ ]:
Copied!
final_dataset_transcribe["train"][1]
final_dataset_transcribe["train"][1]
In [ ]:
Copied!
import json
with open("./video_data_and_transcripts.json") as F:
json_data = json.load(F)
import json
with open("./video_data_and_transcripts.json") as F:
json_data = json.load(F)
In [ ]:
Copied!
len(json_data)
len(json_data)