Maldewar commited on
Commit
103fa88
·
1 Parent(s): a8c7c73
Files changed (6) hide show
  1. app.py +658 -0
  2. deeppunkt.py +73 -0
  3. lexrank.py +93 -0
  4. metrics.py +69 -0
  5. mysheet.py +41 -0
  6. yt_stats.py +160 -0
app.py ADDED
@@ -0,0 +1,658 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from youtube_transcript_api import YouTubeTranscriptApi as yta
2
+ from youtube_transcript_api import NoTranscriptFound, TranscriptsDisabled
3
+ import streamlit as st
4
+ from yt_stats import YTstats
5
+ from datetime import datetime
6
+ import isodate
7
+ import pandas as pd
8
+ import deeppunkt
9
+ import time
10
+ import lexrank
11
+ import mysheet
12
+
13
+ def time_it(func):
14
+ def wrapper(*args, **kwargs):
15
+ start = time.time()
16
+ result = func(*args, **kwargs)
17
+ end = time.time()
18
+ elapsed = end - start
19
+ #st.write(f"Elapsed time: {end - start}")
20
+ st.write('Load time: '+str(round(elapsed,1))+' sec')
21
+ return result
22
+ return wrapper
23
+
24
+ def reset_session():
25
+ if 'punkt' in st.session_state:
26
+ del st.session_state.punkt
27
+ if 'extract' in st.session_state:
28
+ del st.session_state.extract
29
+ if 'channel_id' in st.session_state:
30
+ del st.session_state.channel_id
31
+
32
+ def update_param_example():
33
+ #st.session_state.url_vid = st.session_state.ex_vid
34
+ video_id = get_id_from_link(st.session_state.ex_vid)
35
+ st.experimental_set_query_params(vid=video_id)
36
+ reset_session()
37
+
38
+ def update_param_textinput():
39
+ #st.session_state.url_vid = st.session_state.ti_vid
40
+ video_id = get_id_from_link(st.session_state.ti_vid)
41
+ st.experimental_set_query_params(vid=video_id)
42
+ reset_session()
43
+
44
+ def get_link_from_id(video_id):
45
+ if "v=" not in video_id:
46
+ return 'https://www.youtube.com/watch?v='+video_id
47
+ else:
48
+ return video_id
49
+
50
+
51
+ def get_id_from_link(link):
52
+ if "v=" in link:
53
+ return link.split("v=")[1].split("&")[0]
54
+ elif len(link)==11:
55
+ return link
56
+ else:
57
+ return "Error: Invalid Link."
58
+
59
+ # @st.cache(allow_output_mutation=True, suppress_st_warning=True)
60
+ # def retry_access_yt_object(url, max_retries=5, interval_secs=5, on_progress_callback=None):
61
+ # """
62
+ # Retries creating a YouTube object with the given URL and accessing its title several times
63
+ # with a given interval in seconds, until it succeeds or the maximum number of attempts is reached.
64
+ # If the object still cannot be created or the title cannot be accessed after the maximum number
65
+ # of attempts, the last exception is raised.
66
+ # """
67
+ # last_exception = None
68
+ # for i in range(max_retries):
69
+ # try:
70
+ # yt = YouTube(url, on_progress_callback=on_progress_callback)
71
+ # #title = yt.title # Access the title of the YouTube object.
72
+ # #views = yt.views
73
+ # return yt # Return the YouTube object if successful.
74
+ # except Exception as err:
75
+ # last_exception = err # Keep track of the last exception raised.
76
+ # st.write(f"Failed to create YouTube object or access title. Retrying... ({i+1}/{max_retries})")
77
+ # time.sleep(interval_secs) # Wait for the specified interval before retrying.
78
+
79
+ # # If the YouTube object still cannot be created or the title cannot be accessed after the maximum number of attempts, raise the last exception.
80
+ # raise last_exception
81
+
82
+ @st.cache_data()
83
+ def get_video_data(_yt, video_id):
84
+
85
+ yt_img = f'http://img.youtube.com/vi/{video_id}/mqdefault.jpg'
86
+ yt_img_html = '<img src='+yt_img+' width="250" height="150" />'
87
+ yt_img_html_link = '<a href='+url+'>'+yt_img_html+'</a>'
88
+
89
+ snippet = yt._get_single_video_data(video_id,'snippet')
90
+ yt_publish_date = snippet['publishedAt']
91
+ yt_title = snippet['title']
92
+ yt_author = snippet['channelTitle']
93
+ yt_channel_id = snippet['channelId']
94
+
95
+ try:
96
+ yt_keywords = snippet['tags']
97
+ except:
98
+ yt_keywords = []
99
+
100
+
101
+ statistics = yt._get_single_video_data(video_id,'statistics')
102
+ yt_views = statistics['viewCount']
103
+ contentDetails = yt._get_single_video_data(video_id,'contentDetails')
104
+ yt_length = contentDetails['duration']
105
+ yt_length_isodate = isodate.parse_duration(yt_length)
106
+ yt_length_isoformat = isodate.duration_isoformat(yt_length_isodate, "%H:%M:%S")[1:]
107
+
108
+ data = {'Video':[yt_img_html_link],
109
+ 'Author': [yt_author],
110
+ 'Title': [yt_title],
111
+ 'Published': [datetime.strptime(yt_publish_date, '%Y-%m-%dT%H:%M:%SZ').strftime('%B %d, %Y')],
112
+ 'Views':[format(int(yt_views), ",").replace(",", "'")],
113
+ 'Length':[yt_length_isoformat]}
114
+
115
+ return data, yt_keywords, yt_channel_id
116
+
117
+
118
+ @st.cache_data()
119
+ def get_video_data_from_gsheed(df, video_id):
120
+
121
+ yt_img_html_link = df.loc[df["ID"] == video_id]['Video'].to_list()[0]
122
+ yt_author = df.loc[df["ID"] == video_id]['Author'].to_list()[0]
123
+ yt_title = df.loc[df["ID"] == video_id]['Title'].to_list()[0]
124
+ yt_publish_date = df.loc[df["ID"] == video_id]['Published'].to_list()[0]
125
+ yt_views = df.loc[df["ID"] == video_id]['Views'].to_list()[0]
126
+ yt_length_isoformat = df.loc[df["ID"] == video_id]['Length'].to_list()[0]
127
+ yt_keywords = df.loc[df["ID"] == video_id]['Keywords'].to_list()[0].split(';')
128
+ yt_channel_id = df.loc[df["ID"] == video_id]['Channel'].to_list()[0]
129
+
130
+ data = {'Video':[yt_img_html_link],
131
+ 'Author': [yt_author],
132
+ 'Title': [yt_title],
133
+ 'Published': [yt_publish_date],
134
+ 'Views':[yt_views],
135
+ 'Length':[yt_length_isoformat]}
136
+
137
+ return data, yt_keywords, yt_channel_id
138
+
139
+ @time_it
140
+ def get_punctuated_text(raw_text):
141
+ response = deeppunkt.predict('sentences',raw_text)
142
+ st.session_state['punkt'] = response
143
+
144
+
145
+ def get_punctuated_text_to_dict(raw_text):
146
+ #st.session_state['punkt'] = {'data':[raw_text,0,0,0,0], 'duration':0}
147
+ st.session_state['punkt'] = [raw_text,0,0,0,0]
148
+
149
+
150
+ @time_it
151
+ def get_extracted_text(raw_text):
152
+
153
+ response = lexrank.summarize(raw_text)
154
+ st.session_state['extract'] = response
155
+
156
+ def get_extracted_text_to_dict(raw_text):
157
+ st.session_state['extract'] = [raw_text,0,0,0,0]
158
+
159
+ def get_videos_from_yt(yt):
160
+
161
+ vids_thumbnails = []
162
+ vids_videoIds = []
163
+ vids_titles = []
164
+ vids_lengths = []
165
+ vids_published= []
166
+ vids_views= []
167
+ item=0
168
+ for video in yt.video_data:
169
+ if item == item_limit:
170
+ break
171
+ item = item+1
172
+
173
+ vids_video_id = video
174
+ vids_url = 'https://www.youtube.com/watch?v='+vids_video_id
175
+
176
+ yt_img = f'http://img.youtube.com/vi/{vids_video_id}/mqdefault.jpg'
177
+ yt_img_html = '<img src='+yt_img+' width="250" height="150" />'
178
+ yt_img_html_link = '<a href='+vids_url+'>'+yt_img_html+'</a>'
179
+ vids_thumbnails.append(yt_img_html_link)
180
+
181
+ vids_video_id_link = '<a target="_self" href="/?vid='+vids_video_id+'">'+vids_video_id+'</a>'
182
+ vids_videoIds.append(vids_video_id_link)
183
+
184
+ vids_titles.append(yt.video_data[video]['title'])
185
+
186
+ yt_length = yt.video_data[video]['duration']
187
+ yt_length_isodate = isodate.parse_duration(yt_length)
188
+ yt_length_isoformat = isodate.duration_isoformat(yt_length_isodate, "%H:%M:%S")[1:]
189
+ vids_lengths.append(yt_length_isoformat)
190
+
191
+ yt_publish_date = yt.video_data[video]['publishedAt']
192
+ yt_publish_date_formatted = datetime.strptime(yt_publish_date, '%Y-%m-%dT%H:%M:%SZ').strftime('%B %d, %Y')
193
+ vids_published.append(yt_publish_date_formatted)
194
+
195
+ yt_views = yt.video_data[video]['viewCount']
196
+ yt_viws_formatted = format(int(yt_views), ",").replace(",", "'")
197
+ vids_views.append(yt_viws_formatted)
198
+
199
+ df_videos = {'Video': vids_thumbnails,
200
+ 'Video ID':vids_videoIds,
201
+ 'Title':vids_titles,
202
+ 'Published':vids_published,
203
+ 'Views':vids_views,
204
+ 'Length':vids_lengths}
205
+
206
+ return df_videos
207
+
208
+ def get_transcript(video_id):
209
+
210
+ # transcript_list = yta.list_transcripts(video_id)
211
+ # # iterate over all available transcripts
212
+ # for transcript in transcript_list:
213
+ # # the Transcript object provides metadata properties
214
+ # st.write(
215
+ # transcript.video_id,
216
+ # transcript.language,
217
+ # transcript.language_code,
218
+ # # whether it has been manually created or generated by YouTube
219
+ # transcript.is_generated,
220
+ # # whether this transcript can be translated or not
221
+ # transcript.is_translatable,
222
+ # # a list of languages the transcript can be translated to
223
+ # transcript.translation_languages,
224
+ # )
225
+
226
+ transcript_raw = None
227
+ try:
228
+ transcript_list = yta.list_transcripts(video_id)
229
+ transcript_item = transcript_list.find_transcript(['en'])
230
+ except (NoTranscriptFound, TranscriptsDisabled) as e:
231
+ transcript_item = 'No Transcript available.'
232
+ transcript_text = 'No Transcript available.'
233
+ transcript_item_is_generated = False
234
+ return transcript_text, transcript_item_is_generated
235
+
236
+ transcript_item_is_generated = transcript_item.is_generated
237
+ transcript_raw = transcript_item.fetch()
238
+
239
+ if transcript_raw is None:
240
+ return None
241
+
242
+ transcript_text = '\n'.join([i['text'].replace('\n',' ') for i in transcript_raw])
243
+
244
+ return transcript_text, transcript_item_is_generated
245
+
246
+ def get_meta_info(video_id, url):
247
+
248
+ lextext = st.session_state.extract[0]
249
+ gpt_sum = '0'
250
+ gpt_title = '0'
251
+ title_sim = '0'
252
+ if len(lextext) < 10:
253
+ gpt_sum = 'NA'
254
+ gpt_title = 'NA'
255
+ title_sim = 'NA'
256
+
257
+ yt_img = f'http://img.youtube.com/vi/{video_id}/mqdefault.jpg'
258
+ yt_img_html = '<img src='+yt_img+' width="250" height="150" />'
259
+ yt_img_html_link = '<a href='+url+'>'+yt_img_html+'</a>'
260
+ video_info = {'ID': [video_id],
261
+ 'Video':[yt_img_html_link],
262
+ 'Author': [st.session_state["video_data"]["Author"][0]],
263
+ 'Channel':[st.session_state["channel_id"]],
264
+ 'Title': [st.session_state["video_data"]["Title"][0]],
265
+ 'Published': [st.session_state["video_data"]["Published"][0]],
266
+ 'Views':[st.session_state["video_data"]["Views"][0]],
267
+ 'Length':[st.session_state["video_data"]["Length"][0]],
268
+ 'Keywords':['; '.join(st.session_state["keywords"])]}
269
+
270
+ transcript_info = {'Words':[int(st.session_state.extract[1])],
271
+ 'Sentences': [int(st.session_state.extract[2])],
272
+ 'Characters': [int(st.session_state.extract[3])],
273
+ 'Tokens':[int(st.session_state.extract[4])],
274
+ 'Lextext':[st.session_state.extract[0]],
275
+ 'GPTSummary':[gpt_sum],
276
+ 'GPTTitle':[gpt_title],
277
+ 'Titlesim':[title_sim]}
278
+ df_current_ts = pd.DataFrame({**video_info,**transcript_info})
279
+
280
+ return df_current_ts
281
+
282
+
283
+ #######################################################################################
284
+ # Application Start
285
+ #######################################################################################
286
+
287
+
288
+ st.title("Transcriptifier")
289
+ st.subheader("Youtube Transcript Downloader")
290
+
291
+ example_urls = [
292
+ 'https://www.youtube.com/watch?v=8uQDDUfGNPA', # blog
293
+ 'https://www.youtube.com/watch?v=ofZEo0Rzo5s', # h-educate
294
+ 'https://www.youtube.com/watch?v=ReHGSGwV4-A', #wholesale ted
295
+ 'https://www.youtube.com/watch?v=n8JHnLgodRI', #kevindavid
296
+ 'https://www.youtube.com/watch?v=6MI0f6YjJIk', # Nicholas
297
+ 'https://www.youtube.com/watch?v=nr4kmlTr9xw', # Linus
298
+ 'https://www.youtube.com/watch?v=64Izfm24FKA', # Yannic
299
+ 'https://www.youtube.com/watch?v=Mt1P7p9HmkU', # Fogarty
300
+ 'https://www.youtube.com/watch?v=bj9snrsSook', #Geldschnurrbart
301
+ 'https://www.youtube.com/watch?v=0kJz0q0pvgQ', # fcc
302
+ 'https://www.youtube.com/watch?v=gNRGkMeITVU', # iman
303
+ 'https://www.youtube.com/watch?v=vAuQuL8dlXo', #ghiorghiu
304
+ 'https://www.youtube.com/watch?v=5scEDopRAi0', #infohaus
305
+ 'https://www.youtube.com/watch?v=lCnHfTHkhbE', #fcc tutorial
306
+ 'https://www.youtube.com/watch?v=QI2okshNv_4'
307
+ ]
308
+
309
+
310
+ par_vid = st.experimental_get_query_params().get("vid")
311
+ if par_vid:
312
+ par_url = par_vid[0]
313
+ else:
314
+ par_url = None
315
+
316
+ select_examples = st.selectbox(label="Choose an example",options=example_urls, key='ex_vid', on_change=update_param_example)
317
+ url = st.text_input("Or Enter the YouTube video URL or ID:", value=par_url if par_url else select_examples, key='ti_vid', on_change=update_param_textinput)
318
+
319
+
320
+ ########################
321
+ # Load the data for a given video
322
+ ########################
323
+
324
+
325
+ API_KEY = st.secrets["api_key"]
326
+ yt = YTstats(API_KEY)
327
+ #yt = retry_access_yt_object(get_link_from_id(url))
328
+
329
+ if url:
330
+ video_id = get_id_from_link(url)
331
+
332
+ if 'gsheed' not in st.session_state:
333
+ df = mysheet.read_gspread()
334
+ st.session_state.gsheed = df
335
+ #st.write("reading spradsheet")
336
+ else:
337
+ df = st.session_state.gsheed
338
+ #st.write("getting spreadsheed from session_state")
339
+
340
+ gslist=[]
341
+ try:
342
+ gslist = df.ID.to_list()
343
+ except:
344
+ st.write('no items available.')
345
+
346
+ if video_id in gslist:
347
+ #st.write(df.loc[df["ID"] == video_id])
348
+ st.write("reading from sheet")
349
+ #transcript_item_is_generated = False
350
+ #transcript_text = df.loc[df["ID"] == video_id]['Punkttext'].to_list()[0]
351
+ #get_punctuated_text_to_dict(transcript_text)
352
+ extracted_text = df.loc[df["ID"] == video_id]['Lextext'].to_list()[0]
353
+ get_extracted_text_to_dict(extracted_text)
354
+
355
+ video_data, yt_keywords, yt_channel_id = get_video_data_from_gsheed(df, video_id)
356
+ else:
357
+ st.write("reading from api")
358
+ video_data, yt_keywords, yt_channel_id = get_video_data(yt, video_id)
359
+
360
+ st.session_state["video_data"] = video_data
361
+ st.session_state["keywords"] = yt_keywords
362
+ st.session_state["channel_id"] = yt_channel_id
363
+
364
+
365
+ df = pd.DataFrame(st.session_state["video_data"])
366
+ st.markdown(df.style.hide(axis="index").to_html(), unsafe_allow_html=True)
367
+ st.write("")
368
+
369
+ ###########################
370
+ # Load Transcript
371
+ ###########################
372
+
373
+ transcript_text, transcript_item_is_generated = get_transcript(video_id)
374
+
375
+ #if transcript_text is None:
376
+ # st.error("No transcript available.")
377
+ # st.stop()
378
+
379
+ ########################
380
+ # Load Author Keywords, that are not viewable by users
381
+ ########################
382
+
383
+ keywords_data = {'Authors Keywords':yt_keywords}
384
+ st.table(keywords_data)
385
+ st.write("")
386
+
387
+ # TODO
388
+ # or this video (bj9snrsSook) transcripts are available in the following languages:
389
+
390
+ # (MANUALLY CREATED)
391
+ # None
392
+
393
+ # (GENERATED)
394
+ # - de ("Deutsch (automatisch erzeugt)")[TRANSLATABLE]
395
+
396
+ # (TRANSLATION LANGUAGES)
397
+ # - af ("Afrikaans")
398
+
399
+
400
+ ########################
401
+ # Display the transcript along with the download button
402
+ ########################
403
+
404
+ with st.expander('Preview Transcript'):
405
+ st.code(transcript_text, language=None)
406
+ st.download_button('Download Transcript', transcript_text)
407
+
408
+ ########################
409
+ # API Call to deeppunkt-gr
410
+ ########################
411
+
412
+
413
+ st.subheader("Restore Punctuations of Transcript")
414
+ if not transcript_item_is_generated:
415
+ st.write("Transcript is punctuated by author.")
416
+ # TODO
417
+ #check if the transcript contains more than 5 sentences
418
+
419
+ if st.button('Load Punctuated Transcript'):
420
+ with st.spinner('Loading Punctuation...'):
421
+ if 'punkt' not in st.session_state:
422
+ # first figure out if transcript is already punctuated
423
+ if transcript_item_is_generated:
424
+ get_punctuated_text(transcript_text)
425
+ else:
426
+ get_punctuated_text_to_dict(transcript_text)
427
+ #st.write('Load time: '+str(round(st.session_state.punkt['duration'],1))+' sec')
428
+ metrics_data = {'Words':[int(st.session_state.punkt[1])],
429
+ 'Sentences': [int(st.session_state.punkt[2])],
430
+ 'Characters': [int(st.session_state.punkt[3])],
431
+ 'Tokens':[int(st.session_state.punkt[4])]}
432
+ df = pd.DataFrame(metrics_data)
433
+ st.markdown(df.style.hide(axis="index").to_html(), unsafe_allow_html=True)
434
+ st.write("")
435
+ with st.expander('Preview Transcript'):
436
+ st.code(st.session_state.punkt[0], language=None)
437
+
438
+ ########################
439
+ # Call to lexrank-gr
440
+ ########################
441
+
442
+ st.subheader("Extract Core Sentences from Transcript")
443
+
444
+ if st.button('Extract Sentences'):
445
+ # decide if the extract is already available, if not, text has to be punctuated first
446
+ with st.spinner('Loading Extractions ...'):
447
+ if 'extract' not in st.session_state:
448
+ with st.spinner('Loading Punctuation for Extraction ...'):
449
+ if 'punkt' not in st.session_state:
450
+ # first figure out if transcript is already punctuated
451
+ if transcript_item_is_generated:
452
+ get_punctuated_text(transcript_text)
453
+ else:
454
+ get_punctuated_text_to_dict(transcript_text)
455
+
456
+ get_extracted_text(st.session_state.punkt[0])
457
+
458
+ metrics_data = {'Words':[int(st.session_state.extract[1])],
459
+ 'Sentences': [int(st.session_state.extract[2])],
460
+ 'Characters': [int(st.session_state.extract[3])],
461
+ 'Tokens':[int(st.session_state.extract[4])]}
462
+
463
+ df = pd.DataFrame(metrics_data)
464
+ st.markdown(df.style.hide(axis="index").to_html(), unsafe_allow_html=True)
465
+ st.write("")
466
+
467
+ with st.expander('Preview Transcript'):
468
+ st.code(st.session_state.extract[0], language=None)
469
+
470
+ ################
471
+ if 'extract' not in st.session_state:
472
+ st.error('Please run extraction first.', icon="🚨")
473
+ else:
474
+
475
+ df_current_ts = get_meta_info(video_id, url)
476
+
477
+ # initial write.
478
+ #df_new_sheet = pd.concat([df_current_ts])
479
+ #mysheet.write_gspread(df_new_sheet)
480
+ #st.write(video_info)
481
+
482
+ if 'gsheed' not in st.session_state:
483
+ df = mysheet.read_gspread()
484
+ st.session_state.gsheed = df
485
+
486
+ df_sheet = st.session_state.gsheed
487
+ df_current_ts_id = list(df_current_ts.ID)[0]
488
+ if df_current_ts_id not in list(df_sheet.ID):
489
+ df_new_sheet = pd.concat([df_sheet,df_current_ts])
490
+ mysheet.write_gspread(df_new_sheet)
491
+ st.session_state.gsheed = df_new_sheet
492
+ st.write('video added to sheet')
493
+ #else:
494
+ # st.write('video already in sheet')
495
+ # st.write(df_sheet)
496
+
497
+
498
+ #######################
499
+ # write to gspread file
500
+ ########################
501
+
502
+ if st.button('Read Spreadsheet'):
503
+
504
+ if 'gsheed' not in st.session_state:
505
+ df = mysheet.read_gspread()
506
+ st.session_state.gsheed = df
507
+
508
+ st.write(st.session_state.gsheed)
509
+
510
+
511
+ #if st.button('Add to Spreadsheet'):
512
+
513
+
514
+
515
+
516
+ #######################
517
+ # API Call to summarymachine
518
+ ########################
519
+
520
+ # def get_summarized_text(raw_text):
521
+ # response = requests.post("https://wldmr-summarymachine.hf.space/run/predict", json={
522
+ # "data": [
523
+ # raw_text,
524
+ # ]})
525
+ # #response_id = response
526
+ # if response.status_code == 504:
527
+ # raise "Error: Request took too long (>60sec), please try a shorter text."
528
+ # return response.json()
529
+
530
+ # st.subheader("Summarize Extracted Sentences with Flan-T5-large")
531
+
532
+ # if st.button('Summarize Sentences'):
533
+ # command = 'Summarize the transcript in one sentence:\n\n'
534
+ # with st.spinner('Loading Punctuation (Step 1/3)...'):
535
+ # if 'punkt' not in st.session_state:
536
+ # # first figure out if transcript is already punctuated
537
+ # if transcript_item.is_generated:
538
+ # get_punctuated_text(transcript_text)
539
+ # else:
540
+ # get_punctuated_text_to_dict(transcript_text)
541
+ # with st.spinner('Loading Extraction (Step 2/3)...'):
542
+ # if 'extract' not in st.session_state:
543
+ # get_extracted_text(st.session_state.punkt['data'][0])
544
+ # with st.spinner('Loading Summary (Step 3/3)...'):
545
+ # summary_text = get_summarized_text(command+st.session_state.extract['data'][0])
546
+ # st.write('Load time: '+str(round(summary_text['duration'],1))+' sec')
547
+ # with st.expander('Preview Transcript'):
548
+ # st.write(summary_text['data'][0], language=None)
549
+
550
+ ########################
551
+ # Channel
552
+ ########################
553
+
554
+
555
+ st.subheader("Other Videos of the Channel")
556
+ #st.write(st.session_state["channel_id"])
557
+ if 'channel_id' not in st.session_state:
558
+ st.error('Channel ID not available.', icon="🚨")
559
+ else:
560
+ yt.get_channel_statistics(st.session_state["channel_id"])
561
+ stats_data = {'Channel ID': [st.session_state["channel_id"]],
562
+ 'Total Views':[format(int(yt.channel_statistics["viewCount"]), ",").replace(",", "'")],
563
+ 'Total Subscribers':[format(int(yt.channel_statistics["subscriberCount"]), ",").replace(",", "'")],
564
+ 'Total Videos':[format(int(yt.channel_statistics["videoCount"]), ",").replace(",", "'")],
565
+ }
566
+ df = pd.DataFrame(stats_data)
567
+ st.markdown(df.style.hide(axis="index").to_html(), unsafe_allow_html=True)
568
+ st.write("")
569
+
570
+
571
+ if st.button('Load Videos'):
572
+
573
+ if 'gsheed' not in st.session_state:
574
+ df = mysheet.read_gspread()
575
+ st.session_state.gsheed = df
576
+
577
+ progress_text = 'Loading...'
578
+ loading_bar = st.progress(0, text=progress_text)
579
+ item_limit=3
580
+ df = st.session_state.gsheed
581
+ yt.get_channel_video_data(st.session_state["channel_id"],df, loading_bar, progress_text, item_limit)
582
+
583
+ df_videos = get_videos_from_yt(yt)
584
+ dataset = pd.DataFrame(df_videos)
585
+ st.markdown(dataset.style.hide(axis="index").to_html(), unsafe_allow_html=True)
586
+
587
+
588
+ ########################
589
+ # Sequence Loader
590
+ ########################
591
+
592
+
593
+ st.subheader("Sequence Loader")
594
+ # input hash as secret
595
+
596
+ input_hash = st.text_input("Enter Hash:")
597
+ item_limit = st.number_input(label="Number of Videos",value=3)
598
+ if st.button('Load Sequence'):
599
+ HASH_KEY = st.secrets["hash_key"]
600
+ if input_hash == HASH_KEY:
601
+ st.write("Access granted")
602
+ # read in spreadsheet
603
+ if 'gsheed' not in st.session_state:
604
+ df = mysheet.read_gspread()
605
+ st.session_state.gsheed = df
606
+
607
+ progress_text = 'Loading...'
608
+ loading_bar = st.progress(0, text=progress_text)
609
+ df_sheet = st.session_state.gsheed
610
+ yt.get_channel_video_data(st.session_state["channel_id"], df_sheet,loading_bar, progress_text, item_limit)
611
+ df_videos = get_videos_from_yt(yt)
612
+ dataset = pd.DataFrame(df_videos)
613
+ st.markdown(dataset.style.hide(axis="index").to_html(), unsafe_allow_html=True)
614
+
615
+ for sng in dataset['Video ID']:
616
+ subsng = sng[sng.find('>')+1:sng.find('</')]
617
+ st.write(subsng)
618
+
619
+ transcript_text, transcript_item_is_generated = get_transcript(subsng)
620
+
621
+ if transcript_item_is_generated:
622
+ get_punctuated_text(transcript_text)
623
+ else:
624
+ get_punctuated_text_to_dict(transcript_text)
625
+
626
+ get_extracted_text(st.session_state.punkt[0])
627
+
628
+ video_data, yt_keywords, yt_channel_id = get_video_data(yt, subsng)
629
+ st.session_state["video_data"] = video_data
630
+ st.session_state["keywords"] = yt_keywords
631
+ st.session_state["channel_id"] = yt_channel_id
632
+ df_current_ts = get_meta_info(subsng, subsng)
633
+ st.write(df_current_ts)
634
+ df_sheet = st.session_state.gsheed
635
+ df_new_sheet = pd.concat([df_sheet,df_current_ts])
636
+ mysheet.write_gspread(df_new_sheet)
637
+ st.session_state.gsheed = df_new_sheet
638
+
639
+ st.write('done')
640
+
641
+ st.write(st.session_state.gsheed)
642
+
643
+ else:
644
+ st.write("Access denied")
645
+
646
+
647
+
648
+ ###############
649
+ # End of File #
650
+ ###############
651
+ # hide_streamlit_style = """
652
+ # <style>
653
+ # #MainMenu {visibility: hidden;}
654
+ # footer {visibility: hidden;}
655
+ # </style>
656
+ # """
657
+ # st.markdown(hide_streamlit_style, unsafe_allow_html=True)
658
+
deeppunkt.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from deepmultilingualpunctuation import PunctuationModel
2
+ import re
3
+ import metrics
4
+
5
+ def remove_filler_words(transcript):
6
+
7
+ # preserve line brakes
8
+ transcript_hash = " # ".join(transcript.strip().splitlines())
9
+ # preprocess the text by removing filler words
10
+ # Define a list of filler words to remove
11
+ filler_words = ["um", "uh", "hmm", "ha", "er", "ah", "yeah"]
12
+ words = transcript_hash.split()
13
+ clean_words = [word for word in words if word.lower() not in filler_words]
14
+ input_text_clean = ' '.join(clean_words)
15
+ # restore the line brakes
16
+ input_text= input_text_clean.replace(' # ','\n')
17
+ return input_text
18
+ # Define a regular expression pattern that matches any filler word surrounded by whitespace or punctuation
19
+ #pattern = r"(?<=\s|\b)(" + "|".join(fillers) + r")(?=\s|\b)"
20
+ # Use re.sub to replace the filler words with empty strings
21
+ #clean_input_text = re.sub(pattern, "", input_text)
22
+
23
+ def predict(brakes, transcript):
24
+
25
+ input_text = remove_filler_words(transcript)
26
+ # Do the punctuation restauration
27
+ model = PunctuationModel()
28
+ output_text = model.restore_punctuation(input_text)
29
+
30
+ # if any of the line brake methods are implemented,
31
+ # return the text as a single line
32
+ pcnt_file_cr = output_text
33
+
34
+ if 'textlines' in brakes:
35
+
36
+ # preserve line brakes
37
+ srt_file_hash = '# '.join(input_text.strip().splitlines())
38
+ #srt_file_sub=re.sub('\s*\n\s*','# ',srt_file_strip)
39
+ srt_file_array=srt_file_hash.split()
40
+ pcnt_file_array=output_text.split()
41
+
42
+ # goal: restore the break points i.e. the same number of lines as the srt file
43
+ # this is necessary, because each line in the srt file corresponds to a frame from the video
44
+ if len(srt_file_array)!=len(pcnt_file_array):
45
+ return "AssertError: The length of the transcript and the punctuated file should be the same: ",len(srt_file_array),len(pcnt_file_array)
46
+
47
+ pcnt_file_array_hash = []
48
+ for idx, item in enumerate(srt_file_array):
49
+ if item.endswith('#'):
50
+ pcnt_file_array_hash.append(pcnt_file_array[idx]+'#')
51
+ else:
52
+ pcnt_file_array_hash.append(pcnt_file_array[idx])
53
+
54
+ # assemble the array back to a string
55
+ pcnt_file_cr=' '.join(pcnt_file_array_hash).replace('#','\n')
56
+
57
+ elif 'sentences' in brakes:
58
+ split_text = output_text.split('. ')
59
+ pcnt_file_cr = '.\n'.join(split_text)
60
+
61
+ regex1 = r"\bi\b"
62
+ regex2 = r"(?<=[.?!;])\s*\w"
63
+ regex3 = r"^\w"
64
+ pcnt_file_cr_cap = re.sub(regex3, lambda x: x.group().upper(), re.sub(regex2, lambda x: x.group().upper(), re.sub(regex1, "I", pcnt_file_cr)))
65
+
66
+ metrics.load_nltk()
67
+ n_tokens= metrics.num_tokens(pcnt_file_cr_cap)
68
+ n_sents = metrics.num_sentences(pcnt_file_cr_cap)
69
+ n_words = metrics.num_words(pcnt_file_cr_cap)
70
+ n_chars = metrics.num_chars(pcnt_file_cr_cap)
71
+
72
+ return pcnt_file_cr_cap, n_words, n_sents, n_chars, n_tokens
73
+
lexrank.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ from sumy.parsers.html import HtmlParser
4
+ from sumy.parsers.plaintext import PlaintextParser
5
+ from sumy.nlp.tokenizers import Tokenizer
6
+ from sumy.summarizers.lex_rank import LexRankSummarizer
7
+ from sumy.nlp.stemmers import Stemmer
8
+ from sumy.utils import get_stop_words
9
+ import metrics
10
+ import os
11
+ import nltk
12
+
13
+ def summarize(in_text):
14
+
15
+ if len(in_text)==0:
16
+ return 'Error: No text provided', None
17
+
18
+ nltk_file = '/home/user/nltk_data/tokenizers/punkt.zip'
19
+ if os.path.exists(nltk_file):
20
+ print('nltk punkt file exists in ', nltk_file)
21
+ else:
22
+ print("downloading punkt file")
23
+ nltk.download('punkt')
24
+
25
+ in_longtext = []
26
+ # Discard all senteces that have less than 10 words in them
27
+ in_text_sentenses = in_text.split('.')
28
+
29
+ for sen in in_text_sentenses:
30
+ sen_split = sen.split()
31
+ len_sen_split = len(sen_split)
32
+ if len_sen_split > 10:
33
+ in_longtext.append(sen)
34
+ in_text = '.'.join(in_longtext)+'.'
35
+
36
+ # The size of the summary is limited to 1024
37
+ # The Lexrank algorith accepts only sentences as a limit
38
+ # We start with one sentece and check the token size
39
+ # Then increase the number of sentences until the tokensize
40
+ # of the next sentence exceed the limit
41
+ target_tokens = 1024
42
+
43
+ in_sents = metrics.num_sentences(in_text)
44
+
45
+ out_text = get_Summary(in_text,1)
46
+ n_tokens= metrics.num_tokens(out_text)
47
+ prev_n_tokens=0
48
+ for sen in range(2, in_sents):
49
+ if n_tokens >= target_tokens:
50
+ n_tokens = prev_n_tokens
51
+ break
52
+ else:
53
+ out_text = get_Summary(in_text,sen)
54
+ prev_n_tokens = n_tokens
55
+ n_tokens= metrics.num_tokens(out_text)
56
+
57
+ n_sents = metrics.num_sentences(out_text)
58
+ n_words = metrics.num_words(out_text)
59
+ n_chars = metrics.num_chars(out_text)
60
+
61
+ return out_text, n_words, n_sents, n_chars, n_tokens
62
+
63
+ def get_Summary(in_text, nr_sentences):
64
+
65
+ #sentences = in_text.split('. ')
66
+ # summarize small part of the text
67
+ #nr_sentences = 1 #len(sentences)
68
+ #print('nr_sentences: '+str(nr_sentences))
69
+
70
+ if nr_sentences == 0:
71
+ return 'Error: No sentences available', None
72
+ list_summary = get_Lexrank(in_text,nr_sentences)
73
+ # it can happen that for lexrank a sentence consists of multiple actual sentences,
74
+ # that are separated with full stops. Then the correspoinding timestamp cannot be found
75
+ # all items from the lexrank summary must be concatinated and split up by full stops.
76
+ concat_list_summary = '. '.join([str(item).replace('.','') for item in list_summary])#.split('. ')
77
+ concat_list_summary = concat_list_summary.replace('\\n','')
78
+ concat_list_summary = concat_list_summary.replace('. ','.\n')+'.'
79
+
80
+ return concat_list_summary
81
+
82
+ def get_Lexrank(text, nr_sentences):
83
+ summary=[]
84
+ LANGUAGE = "english"
85
+ SENTENCES_COUNT = nr_sentences
86
+ parser = PlaintextParser.from_string(text, Tokenizer(LANGUAGE))
87
+ stemmer = Stemmer(LANGUAGE)
88
+ summarizer = LexRankSummarizer(stemmer)
89
+ summarizer.stop_words = get_stop_words(LANGUAGE)
90
+ for sentence in summarizer(parser.document, SENTENCES_COUNT):
91
+ summary.append(sentence)
92
+
93
+ return summary
metrics.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Import nltk library for natural language processing
2
+ import nltk
3
+ import os
4
+ from transformers import AutoTokenizer
5
+
6
+ def load_nltk():
7
+ nltk_file = '/home/user/nltk_data/tokenizers/punkt.zip'
8
+ if os.path.exists(nltk_file):
9
+ print('nltk punkt file exists in ', nltk_file)
10
+ else:
11
+ print("downloading punkt file")
12
+ nltk.download('punkt')
13
+
14
+
15
+ # Define a function that takes some text as input and returns the number of tokens
16
+ def token_count(text):
17
+ # Import the Encoder class from bpe
18
+ from bpe import Encoder
19
+ # Create an encoder object with a vocabulary size of 10
20
+ encoder = Encoder(vocab_size=14735746)
21
+
22
+ # Train the encoder on the text
23
+ encoder.fit(text.split())
24
+
25
+ # Encode the text into tokens
26
+ tokens = encoder.tokenize(text)
27
+
28
+ # Return the number of tokens
29
+ return tokens
30
+
31
+ def num_tokens(text):
32
+
33
+ tokenizer = AutoTokenizer.from_pretrained("gpt2")
34
+
35
+ token_ids = tokenizer.encode(text)
36
+
37
+ token_size = len(token_ids)
38
+
39
+ return token_size
40
+
41
+ def num_words(text):
42
+ sentences = nltk.sent_tokenize(text)
43
+ # Tokenize each sentence into words using nltk.word_tokenize()
44
+ words = []
45
+ for sentence in sentences:
46
+ words.extend(nltk.word_tokenize(sentence))
47
+
48
+ num_words = len(words)
49
+
50
+ return num_words
51
+
52
+ def num_sentences(text):
53
+ # Tokenize the text into sentences using nltk.sent_tokenize()
54
+ sentences = nltk.sent_tokenize(text)
55
+ num_sentences = len(sentences)
56
+ return num_sentences
57
+
58
+
59
+ def num_chars(text):
60
+ num_characters = len(text)
61
+ return num_characters
62
+
63
+
64
+ # Print out the results
65
+ # print(f"Number of sentences: {num_sentences}")
66
+ # print(f"Number of words: {num_words}")
67
+ # print(f"Number of tokens: {num_tokens}")
68
+ # print(f"Number of trans_tokens: {trans_tokens}")
69
+ # print(f"Number of characters: {num_characters}")
mysheet.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ #from google.oauth2 import service_account
3
+ import pandas as pd
4
+ import gspread
5
+ import json
6
+
7
+ def get_gspread_connection():
8
+ # Create a connection object.
9
+ # credentials = service_account.Credentials.from_service_account_info(
10
+ # st.secrets["gcp_service_account"],
11
+ # scopes=[
12
+ # "https://www.googleapis.com/auth/spreadsheets",
13
+ # ],
14
+ # )
15
+ #client = gspread.authorize(credentials)
16
+
17
+ st_credentials = st.secrets["gcp_service_account"]
18
+ if type(st_credentials) is str:
19
+ print("INFO: transforming str to dict")
20
+ credentials_dict = json.loads(st_credentials, strict=False)
21
+ client = gspread.service_account_from_dict(credentials_dict)
22
+ else:
23
+ print("INFO: using credentials in dict")
24
+ client = gspread.service_account_from_dict(st_credentials)
25
+
26
+
27
+ st_sheet_url = st.secrets["private_gsheets_url"]
28
+ spreadsheet = client.open_by_url(st_sheet_url)
29
+ worksheet = spreadsheet.get_worksheet(0)
30
+ return worksheet
31
+
32
+ #@st.cache_data
33
+ def read_gspread():
34
+ worksheet = get_gspread_connection()
35
+ df = pd.DataFrame(worksheet.get_all_records())
36
+ return df
37
+
38
+ def write_gspread(df):
39
+ #df.loc[len(df)] = ['Mia','worst']
40
+ worksheet = get_gspread_connection()
41
+ worksheet.update([df.columns.values.tolist()] + df.values.tolist())
yt_stats.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import requests
3
+ from tqdm import tqdm
4
+ import isodate
5
+
6
+ class YTstats:
7
+
8
+ def __init__(self, api_key):
9
+ self.api_key = api_key
10
+ self.channel_statistics = None
11
+ self.video_data = None
12
+
13
+ def extract_all(self, channel_id):
14
+ self.get_channel_statistics(channel_id)
15
+ self.get_channel_video_data(channel_id)
16
+
17
+ def get_channel_statistics(self, channel_id):
18
+ """Extract the channel statistics"""
19
+ print('get channel statistics...')
20
+ url = f'https://www.googleapis.com/youtube/v3/channels?part=statistics&id={channel_id}&key={self.api_key}'
21
+ #pbar = tqdm(total=1)
22
+
23
+ json_url = requests.get(url)
24
+ data = json.loads(json_url.text)
25
+ try:
26
+ data = data['items'][0]['statistics']
27
+ except KeyError:
28
+ print('Could not get channel statistics')
29
+ data = {}
30
+
31
+ self.channel_statistics = data
32
+ #pbar.update()
33
+ #pbar.close()
34
+ return data
35
+
36
+ def get_channel_video_data(self, channel_id, df_sheet, loading_bar, progress_text, item_limit=3):
37
+ "Extract all video information of the channel"
38
+ print('get video data...')
39
+ channel_videos, channel_playlists = self._get_channel_content(channel_id, limit=50)
40
+
41
+ channel_videos_out = dict()
42
+
43
+ total_items = len(channel_videos)
44
+ item = 0
45
+ step_size=0
46
+ step=0
47
+ if total_items!=0:
48
+ step_size=round(1/total_items,4)
49
+ #step = step_size
50
+ parts=["snippet", "statistics","contentDetails", "topicDetails"]
51
+ for video_id in tqdm(channel_videos):
52
+ if item == item_limit:
53
+ break
54
+
55
+ loading_bar.progress(step, text=progress_text)
56
+
57
+ for part in parts:
58
+ data = self._get_single_video_data(video_id, part)
59
+ channel_videos[video_id].update(data)
60
+
61
+ duration = isodate.parse_duration(channel_videos[video_id]['duration'])
62
+ short_duration = isodate.parse_duration('PT4M')
63
+
64
+ if duration > short_duration and video_id not in list(df_sheet.ID):
65
+ item = item+1
66
+ step = step +step_size
67
+ channel_videos_out[video_id] = channel_videos[video_id]
68
+
69
+
70
+ step=1.0
71
+ loading_bar.progress(step, text=progress_text)
72
+ self.video_data = channel_videos_out
73
+
74
+
75
+ def _get_single_video_data(self, video_id, part):
76
+ """
77
+ Extract further information for a single video
78
+ parts can be: 'snippet', 'statistics', 'contentDetails', 'topicDetails'
79
+ """
80
+
81
+ url = f"https://www.googleapis.com/youtube/v3/videos?part={part}&id={video_id}&key={self.api_key}"
82
+ json_url = requests.get(url)
83
+ data = json.loads(json_url.text)
84
+ try:
85
+ data = data['items'][0][part]
86
+ except KeyError as e:
87
+ print(f'Error! Could not get {part} part of data: \n{data}')
88
+ data = dict()
89
+ return data
90
+
91
+ def _get_channel_content(self, channel_id, limit=None, check_all_pages=True):
92
+ """
93
+ Extract all videos and playlists, can check all available search pages
94
+ channel_videos = videoId: title, publishedAt
95
+ channel_playlists = playlistId: title, publishedAt
96
+ return channel_videos, channel_playlists
97
+ """
98
+ url = f"https://www.googleapis.com/youtube/v3/search?key={self.api_key}&channelId={channel_id}&part=snippet,id&order=date"
99
+ if limit is not None and isinstance(limit, int):
100
+ url += "&maxResults=" + str(limit)
101
+
102
+ vid, pl, npt = self._get_channel_content_per_page(url)
103
+ idx = 0
104
+ while(check_all_pages and npt is not None and idx < 10):
105
+ nexturl = url + "&pageToken=" + npt
106
+ next_vid, next_pl, npt = self._get_channel_content_per_page(nexturl)
107
+ vid.update(next_vid)
108
+ pl.update(next_pl)
109
+ idx += 1
110
+
111
+ return vid, pl
112
+
113
+ def _get_channel_content_per_page(self, url):
114
+ """
115
+ Extract all videos and playlists per page
116
+ return channel_videos, channel_playlists, nextPageToken
117
+ """
118
+ json_url = requests.get(url)
119
+ data = json.loads(json_url.text)
120
+ channel_videos = dict()
121
+ channel_playlists = dict()
122
+ if 'items' not in data:
123
+ print('Error! Could not get correct channel data!\n', data)
124
+ return channel_videos, channel_videos, None
125
+
126
+ nextPageToken = data.get("nextPageToken", None)
127
+
128
+ item_data = data['items']
129
+ for item in item_data:
130
+ try:
131
+ kind = item['id']['kind']
132
+ published_at = item['snippet']['publishedAt']
133
+ title = item['snippet']['title']
134
+ if kind == 'youtube#video':
135
+ video_id = item['id']['videoId']
136
+ channel_videos[video_id] = {'publishedAt': published_at, 'title': title}
137
+ elif kind == 'youtube#playlist':
138
+ playlist_id = item['id']['playlistId']
139
+ channel_playlists[playlist_id] = {'publishedAt': published_at, 'title': title}
140
+ except KeyError as e:
141
+ print('Error! Could not extract data from item:\n', item)
142
+
143
+ return channel_videos, channel_playlists, nextPageToken
144
+
145
+ def dump(self, channel_id):
146
+ """Dumps channel statistics and video data in a single json file"""
147
+ if self.channel_statistics is None or self.video_data is None:
148
+ print('data is missing!\nCall get_channel_statistics() and get_channel_video_data() first!')
149
+ return
150
+
151
+ fused_data = {channel_id: {"channel_statistics": self.channel_statistics,
152
+ "video_data": self.video_data}}
153
+
154
+ channel_title = self.video_data.popitem()[1].get('channelTitle', channel_id)
155
+ channel_title = channel_title.replace(" ", "_").lower()
156
+ filename = channel_title + '.json'
157
+ with open(filename, 'w') as f:
158
+ json.dump(fused_data, f, indent=4)
159
+
160
+ print('file dumped to', filename)