Compare commits

...

17 commits

Author SHA1 Message Date
b3331b36a7 Merge branch 'intl-tests'
All checks were successful
ci/woodpecker/push/woodpecker Pipeline was successful
2023-05-07 15:06:27 +02:00
781064218d feat: add video duration parser 2023-05-07 14:09:30 +02:00
923e47e5cf chore: update serde_with to 3.0.0 2023-05-06 21:24:42 +02:00
2241223c9f refactor!: made timeago module private 2023-05-06 21:24:11 +02:00
800073df48 feat(codegen): collected video duration samples 2023-05-06 21:12:49 +02:00
19781eab36 fix: improve number parsing, add number_nd_tokens
add dictionary overrides
2023-05-06 17:36:36 +02:00
97492780c6 fix: parsing is_ytm for playlists 2023-05-06 03:17:43 +02:00
0677fd487e fix: parsing music playlist video count 2023-05-06 01:58:23 +02:00
e96d494505 refactor: remove by_char from dict 2023-05-06 01:37:07 +02:00
72d817edd7 fix: update large number samples 2023-05-06 01:22:13 +02:00
e94de9a0f6 fix: update playlist dates 2023-05-05 18:50:25 +02:00
d852746238 tests: reduce number of expected chart items 2023-05-05 18:01:17 +02:00
a45eba4705 refactor: replace VecLogError with standard Deserialize impl 2023-05-05 18:00:33 +02:00
963ff14dc1 fix: playlist deserialization error, add VecSkipErrorWrap 2023-05-05 17:13:03 +02:00
bb396968dc tests: completed for all languages
fix: parsing search videos without duration
2023-05-05 15:18:37 +02:00
25025ef701 refactor: remove bail macros 2023-05-04 22:18:38 +02:00
b88faa9d05 tests: run tests with different lang settings
fix: parsing subscriber count on channel search itms
fix: add warnings for all date and numstr parsing
fix: error parsing search suggestions
2023-05-04 21:44:10 +02:00
73 changed files with 55710 additions and 32328 deletions

View file

@ -40,7 +40,7 @@ reqwest = { version = "0.11.11", default-features = false, features = [
tokio = { version = "1.20.0", features = ["macros", "time"] } tokio = { version = "1.20.0", features = ["macros", "time"] }
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0.82" serde_json = "1.0.82"
serde_with = { version = "2.0.0", features = ["json"] } serde_with = { version = "3.0.0", features = ["json"] }
rand = "0.8.5" rand = "0.8.5"
time = { version = "0.3.15", features = [ time = { version = "0.3.15", features = [
"macros", "macros",

View file

@ -15,8 +15,26 @@ testyt10:
cargo test --all-features --test youtube; \ cargo test --all-features --test youtube; \
done done
testintl:
#!/usr/bin/env bash
set -e
LANGUAGES=(
"af" "am" "ar" "as" "az" "be" "bg" "bn" "bs" "ca" "cs" "da" "de" "el" "en" "en-GB" "en-IN"
"es" "es-419" "es-US" "et" "eu" "fa" "fi" "fil" "fr" "fr-CA" "gl" "gu"
"hi" "hr" "hu" "hy" "id" "is" "it" "iw" "ja" "ka" "kk" "km" "kn" "ko" "ky"
"lo" "lt" "lv" "mk" "ml" "mn" "mr" "ms" "my" "ne" "nl" "no" "or" "pa" "pl"
"pt" "pt-PT" "ro" "ru" "si" "sk" "sl" "sq" "sr" "sr-Latn" "sv" "sw" "ta"
"te" "th" "tr" "uk" "ur" "uz" "vi" "zh-CN" "zh-HK" "zh-TW" "zu"
)
for YT_LANG in "${LANGUAGES[@]}"; do \
echo "---TESTS FOR $YT_LANG ---"; \
YT_LANG="$YT_LANG" cargo test --test youtube -- --skip get_video_details --skip startpage; \
echo "--- $YT_LANG COMPLETED ---"; \
sleep 10; \
done
testfiles: testfiles:
cargo run -p rustypipe-codegen -- -d . download-testfiles cargo run -p rustypipe-codegen download-testfiles
report2yaml: report2yaml:
mkdir -p rustypipe_reports/conv mkdir -p rustypipe_reports/conv

View file

@ -10,7 +10,7 @@ tokio = { version = "1.20.0", features = ["macros", "rt-multi-thread"] }
futures = "0.3.21" futures = "0.3.21"
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0.82" serde_json = "1.0.82"
serde_with = "2.0.0" serde_with = "3.0.0"
anyhow = "1.0" anyhow = "1.0"
log = "0.4.17" log = "0.4.17"
env_logger = "0.10.0" env_logger = "0.10.0"
@ -19,5 +19,7 @@ phf_codegen = "0.11.1"
once_cell = "1.12.0" once_cell = "1.12.0"
regex = "1.7.1" regex = "1.7.1"
indicatif = "0.17.0" indicatif = "0.17.0"
num_enum = "0.5.7" num_enum = "0.6.1"
path_macro = "1.0.0" path_macro = "1.0.0"
intl_pluralrules = "7.0.2"
unic-langid = "0.9.1"

View file

@ -1,18 +1,21 @@
use std::{collections::BTreeMap, fs::File, io::BufReader, path::Path}; use std::{collections::BTreeMap, fs::File, io::BufReader};
use futures::stream::{self, StreamExt}; use futures::stream::{self, StreamExt};
use path_macro::path; use path_macro::path;
use rustypipe::{ use rustypipe::{
client::{ClientType, RustyPipe, RustyPipeQuery, YTContext}, client::{ClientType, RustyPipe, RustyPipeQuery},
model::AlbumType, model::AlbumType,
param::{locale::LANGUAGES, Language}, param::{locale::LANGUAGES, Language},
}; };
use serde::{Deserialize, Serialize}; use serde::Deserialize;
use crate::util::{self, TextRuns}; use crate::{
model::{QBrowse, TextRuns},
util::{self, DICT_DIR},
};
pub async fn collect_album_types(project_root: &Path, concurrency: usize) { pub async fn collect_album_types(concurrency: usize) {
let json_path = path!(project_root / "testfiles" / "dict" / "album_type_samples.json"); let json_path = path!(*DICT_DIR / "album_type_samples.json");
let album_types = [ let album_types = [
(AlbumType::Album, "MPREb_nlBWQROfvjo"), (AlbumType::Album, "MPREb_nlBWQROfvjo"),
@ -48,13 +51,13 @@ pub async fn collect_album_types(project_root: &Path, concurrency: usize) {
serde_json::to_writer_pretty(file, &collected_album_types).unwrap(); serde_json::to_writer_pretty(file, &collected_album_types).unwrap();
} }
pub fn write_samples_to_dict(project_root: &Path) { pub fn write_samples_to_dict() {
let json_path = path!(project_root / "testfiles" / "dict" / "album_type_samples.json"); let json_path = path!(*DICT_DIR / "album_type_samples.json");
let json_file = File::open(json_path).unwrap(); let json_file = File::open(json_path).unwrap();
let collected: BTreeMap<Language, BTreeMap<AlbumType, String>> = let collected: BTreeMap<Language, BTreeMap<AlbumType, String>> =
serde_json::from_reader(BufReader::new(json_file)).unwrap(); serde_json::from_reader(BufReader::new(json_file)).unwrap();
let mut dict = util::read_dict(project_root); let mut dict = util::read_dict();
let langs = dict.keys().map(|k| k.to_owned()).collect::<Vec<_>>(); let langs = dict.keys().map(|k| k.to_owned()).collect::<Vec<_>>();
for lang in langs { for lang in langs {
@ -72,7 +75,7 @@ pub fn write_samples_to_dict(project_root: &Path) {
}); });
} }
util::write_dict(project_root, &dict); util::write_dict(dict);
} }
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
@ -91,13 +94,6 @@ struct HeaderRenderer {
subtitle: TextRuns, subtitle: TextRuns,
} }
#[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
struct QBrowse<'a> {
context: YTContext<'a>,
browse_id: &'a str,
}
async fn get_album_type(query: &RustyPipeQuery, id: &str) -> String { async fn get_album_type(query: &RustyPipeQuery, id: &str) -> String {
let context = query let context = query
.get_context(ClientType::DesktopMusic, true, None) .get_context(ClientType::DesktopMusic, true, None)
@ -105,6 +101,7 @@ async fn get_album_type(query: &RustyPipeQuery, id: &str) -> String {
let body = QBrowse { let body = QBrowse {
context, context,
browse_id: id, browse_id: id,
params: None,
}; };
let response_txt = query let response_txt = query
.raw(ClientType::DesktopMusic, "browse", &body) .raw(ClientType::DesktopMusic, "browse", &body)

View file

@ -1,25 +1,32 @@
use std::collections::{HashMap, HashSet}; use std::sync::Arc;
use std::{collections::BTreeMap, fs::File, io::BufReader, path::Path}; use std::{
collections::{BTreeMap, HashMap, HashSet},
fs::File,
io::BufReader,
};
use anyhow::{Context, Result}; use anyhow::{Context, Result};
use futures::{stream, StreamExt}; use futures::{stream, StreamExt};
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use path_macro::path; use path_macro::path;
use regex::Regex; use regex::Regex;
use reqwest::{header, Client}; use rustypipe::client::{ClientType, RustyPipe, RustyPipeQuery};
use rustypipe::param::{locale::LANGUAGES, Language}; use rustypipe::param::{locale::LANGUAGES, Language};
use serde::Deserialize; use serde::Deserialize;
use serde_with::serde_as;
use serde_with::VecSkipError;
use crate::util::{self, Text}; use crate::model::{Channel, ContinuationResponse};
use crate::util::DICT_DIR;
use crate::{
model::{QBrowse, QCont, TextRuns},
util,
};
type CollectedNumbers = BTreeMap<Language, BTreeMap<u8, (String, u64)>>; type CollectedNumbers = BTreeMap<Language, BTreeMap<String, u64>>;
/// Collect video view count texts in every supported language /// Collect video view count texts in every supported language
/// and write them to `testfiles/dict/large_number_samples.json`. /// and write them to `testfiles/dict/large_number_samples.json`.
/// ///
/// YouTube's API outputs the subscriber count of a channel only in a /// YouTube's API outputs subscriber and view counts only in a
/// approximated format (e.g *880K subscribers*), which varies /// approximated format (e.g *880K subscribers*), which varies
/// by language. /// by language.
/// ///
@ -30,28 +37,75 @@ type CollectedNumbers = BTreeMap<Language, BTreeMap<u8, (String, u64)>>;
/// We extract these instead of subscriber counts because the YouTube API /// We extract these instead of subscriber counts because the YouTube API
/// outputs view counts both in approximated and exact format, so we can use /// outputs view counts both in approximated and exact format, so we can use
/// the exact counts to figure out the tokens. /// the exact counts to figure out the tokens.
pub async fn collect_large_numbers(project_root: &Path, concurrency: usize) { pub async fn collect_large_numbers(concurrency: usize) {
let json_path = path!(project_root / "testfiles" / "dict" / "large_number_samples.json"); let json_path = path!(*DICT_DIR / "large_number_samples_all.json");
let json_path_all = let rp = RustyPipe::new();
path!(project_root / "testfiles" / "dict" / "large_number_samples_all.json");
let channels = [ let channels = [
"UCq-Fj5jknLsUf-MWSy4_brA", // 10e8 (225M) "UCq-Fj5jknLsUf-MWSy4_brA", // 10e8 (241M)
"UCcdwLMPsaU2ezNSJU1nFoBQ", // 10e7 (60M) "UCcdwLMPsaU2ezNSJU1nFoBQ", // 10e7 (67M)
"UC6mIxFTvXkWQVEHPsEdflzQ", // 10e6 (1.7M) "UC6mIxFTvXkWQVEHPsEdflzQ", // 10e6 (1.8M)
"UCD0y51PJfvkZNe3y3FR5riw", // 10e5 (125K) "UCD0y51PJfvkZNe3y3FR5riw", // 10e5 (126K)
"UCNcN0dW43zE0Om3278fjY8A", // 10e4 (27K) "UCNcN0dW43zE0Om3278fjY8A", // 10e4 (33K)
"UC0QEucPrn0-Ddi3JBTcs5Kw", // 10e3 (5K) "UC0QEucPrn0-Ddi3JBTcs5Kw", // 10e3 (5K)
"UCXvtcj9xUQhaqPaitFf2DqA", // (170) "UCXvtcj9xUQhaqPaitFf2DqA", // (275)
"UCq-XMc01T641v-4P3hQYJWg", // (636) "UCq-XMc01T641v-4P3hQYJWg", // (695)
"UCaZL4eLD7a30Fa8QI-sRi_g", // (31K)
"UCO-dylEoJozPTxGYd8fTQxA", // (5)
"UCQXYK94vDqOEkPbTCyL0OjA", // (1)
]; ];
let collected_numbers_all: BTreeMap<Language, BTreeMap<String, u64>> = stream::iter(LANGUAGES) // YTM outputs the subscriber count in a shortened format in some languages
.map(|lang| async move { let music_channels = [
"UC_1N84buVNgR_-3gDZ9Jtxg", // 10e8 (158M)
"UCRw0x9_EfawqmgDI2IgQLLg", // 10e7 (29M)
"UChWu2clmvJ5wN_0Ic5dnqmw", // 10e6 (1.9M)
"UCOYiPDuimprrGHgFy4_Fw8Q", // 10e5 (149K)
"UC8nZf9WyVIxNMly_hy2PTyQ", // 10e4 (17K)
"UCaltNL5XvZ7dKvBsBPi-gqg", // 10e3 (8K)
];
// Build a lookup table for the channel's subscriber counts
let subscriber_counts: Arc<BTreeMap<String, u64>> = stream::iter(channels)
.map(|c| {
let rp = rp.query();
async move {
let channel = get_channel(&rp, c).await.unwrap();
let n = util::parse_largenum_en(&channel.subscriber_count).unwrap();
(c.to_owned(), n)
}
})
.buffer_unordered(concurrency)
.collect::<BTreeMap<_, _>>()
.await
.into();
let music_subscriber_counts: Arc<BTreeMap<String, u64>> = stream::iter(music_channels)
.map(|c| {
let rp = rp.query();
async move {
let subscriber_count = music_channel_subscribers(&rp, c).await.unwrap();
let n = util::parse_largenum_en(&subscriber_count).unwrap();
(c.to_owned(), n)
}
})
.buffer_unordered(concurrency)
.collect::<BTreeMap<_, _>>()
.await
.into();
let collected_numbers: CollectedNumbers = stream::iter(LANGUAGES)
.map(|lang| {
let rp = rp.query().lang(lang);
let subscriber_counts = subscriber_counts.clone();
let music_subscriber_counts = music_subscriber_counts.clone();
async move {
let mut entry = BTreeMap::new(); let mut entry = BTreeMap::new();
for (n, ch_id) in channels.iter().enumerate() { for (n, ch_id) in channels.iter().enumerate() {
let channel = get_channel(ch_id, lang) let channel = get_channel(&rp, ch_id)
.await .await
.context(format!("{lang}-{n}")) .context(format!("{lang}-{n}"))
.unwrap(); .unwrap();
@ -59,69 +113,40 @@ pub async fn collect_large_numbers(project_root: &Path, concurrency: usize) {
channel.view_counts.iter().for_each(|(num, txt)| { channel.view_counts.iter().for_each(|(num, txt)| {
entry.insert(txt.to_owned(), *num); entry.insert(txt.to_owned(), *num);
}); });
entry.insert(channel.subscriber_count, subscriber_counts[*ch_id]);
println!("collected {lang}-{n}"); println!("collected {lang}-{n}");
} }
for (n, ch_id) in music_channels.iter().enumerate() {
let subscriber_count = music_channel_subscribers(&rp, ch_id)
.await
.context(format!("{lang}-music-{n}"))
.unwrap();
entry.insert(subscriber_count, music_subscriber_counts[*ch_id]);
println!("collected {lang}-music-{n}");
}
(lang, entry) (lang, entry)
}
}) })
.buffer_unordered(concurrency) .buffer_unordered(concurrency)
.collect() .collect()
.await; .await;
let collected_numbers: CollectedNumbers = collected_numbers_all
.iter()
.map(|(lang, entry)| {
let mut e2 = BTreeMap::new();
entry.iter().for_each(|(txt, num)| {
e2.insert(get_mag(*num), (txt.to_owned(), *num));
});
(*lang, e2)
})
.collect();
let file = File::create(json_path).unwrap(); let file = File::create(json_path).unwrap();
serde_json::to_writer_pretty(file, &collected_numbers).unwrap(); serde_json::to_writer_pretty(file, &collected_numbers).unwrap();
let file = File::create(json_path_all).unwrap();
serde_json::to_writer_pretty(file, &collected_numbers_all).unwrap();
} }
/// Attempt to parse the numbers collected by `collect-large-numbers` /// Attempt to parse the numbers collected by `collect-large-numbers`
/// and write the results to `dictionary.json`. /// and write the results to `dictionary.json`.
pub fn write_samples_to_dict(project_root: &Path) { pub fn write_samples_to_dict() {
/* let json_path = path!(*DICT_DIR / "large_number_samples.json");
Manual corrections:
as
"কোঃটা": 9,
"নিঃটা": 6,
"নিযুতটা": 6,
"লাখটা": 5,
"হাজাৰটা": 3
ar
"ألف": 3,
"آلاف": 3,
"مليار": 9,
"مليون": 6
bn
"লাটি": 5,
"শত": 2,
"হাটি": 3,
"কোটি": 7
es/es-US
"mil": 3,
"M": 6
*/
let json_path = path!(project_root / "testfiles" / "dict" / "large_number_samples.json");
let json_file = File::open(json_path).unwrap(); let json_file = File::open(json_path).unwrap();
let collected_nums: CollectedNumbers = let collected_nums: CollectedNumbers =
serde_json::from_reader(BufReader::new(json_file)).unwrap(); serde_json::from_reader(BufReader::new(json_file)).unwrap();
let mut dict = util::read_dict(project_root); let mut dict = util::read_dict();
let langs = dict.keys().map(|k| k.to_owned()).collect::<Vec<_>>(); let langs = dict.keys().map(|k| k.to_owned()).collect::<Vec<_>>();
static POINT_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"\d(\.|,)\d{1,3}(?:\D|$)").unwrap()); static POINT_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"\d(\.|,)\d{1,3}(?:\D|$)").unwrap());
@ -132,11 +157,9 @@ pub fn write_samples_to_dict(project_root: &Path) {
let mut e_langs = dict_entry.equivalent.clone(); let mut e_langs = dict_entry.equivalent.clone();
e_langs.push(lang); e_langs.push(lang);
let comma_decimal = collected_nums let comma_decimal = collected_nums[&lang]
.get(&lang)
.unwrap()
.iter() .iter()
.find_map(|(mag, (txt, _))| { .find_map(|(txt, val)| {
let point = POINT_REGEX let point = POINT_REGEX
.captures(txt) .captures(txt)
.map(|c| c.get(1).unwrap().as_str()); .map(|c| c.get(1).unwrap().as_str());
@ -146,8 +169,9 @@ pub fn write_samples_to_dict(project_root: &Path) {
// If the number parsed from all digits has the same order of // If the number parsed from all digits has the same order of
// magnitude as the actual number, it must be a separator. // magnitude as the actual number, it must be a separator.
// Otherwise it is a decimal point // Otherwise it is a decimal point
return Some((get_mag(num_all) == *mag) ^ (point == ",")); return Some((get_mag(num_all) == get_mag(*val)) ^ (point == ","));
} }
None None
}) })
.unwrap(); .unwrap();
@ -165,6 +189,7 @@ pub fn write_samples_to_dict(project_root: &Path) {
// If the token is found again with a different derived order of magnitude, // If the token is found again with a different derived order of magnitude,
// its value in the map is set to None. // its value in the map is set to None.
let mut found_tokens: HashMap<String, Option<u8>> = HashMap::new(); let mut found_tokens: HashMap<String, Option<u8>> = HashMap::new();
let mut found_nd_tokens: HashMap<String, Option<u8>> = HashMap::new();
let mut insert_token = |token: String, mag: u8| { let mut insert_token = |token: String, mag: u8| {
let found_token = found_tokens.entry(token).or_insert(match mag { let found_token = found_tokens.entry(token).or_insert(match mag {
@ -179,19 +204,30 @@ pub fn write_samples_to_dict(project_root: &Path) {
} }
}; };
let mut insert_nd_token = |token: String, n: Option<u8>| {
let found_token = found_nd_tokens.entry(token).or_insert(n);
if let Some(f) = found_token {
if Some(*f) != n {
*found_token = None;
}
}
};
for lang in e_langs { for lang in e_langs {
let entry = collected_nums.get(&lang).unwrap(); let entry = collected_nums.get(&lang).unwrap();
entry.iter().for_each(|(mag, (txt, _))| { entry.iter().for_each(|(txt, val)| {
let filtered = util::filter_largenumstr(txt); let filtered = util::filter_largenumstr(txt);
let mag = get_mag(*val);
let tokens: Vec<String> = match dict_entry.by_char { let tokens: Vec<String> = match dict_entry.by_char || lang == Language::Ko {
true => filtered.chars().map(|c| c.to_string()).collect(), true => filtered.chars().map(|c| c.to_string()).collect(),
false => filtered.split_whitespace().map(|c| c.to_string()).collect(), false => filtered.split_whitespace().map(|c| c.to_string()).collect(),
}; };
let num_before_point = match util::parse_numeric::<u64>(txt.split(decimal_point).next().unwrap()) {
util::parse_numeric::<u64>(txt.split(decimal_point).next().unwrap()).unwrap(); Ok(num_before_point) => {
let mag_before_point = get_mag(num_before_point); let mag_before_point = get_mag(num_before_point);
let mut mag_remaining = mag - mag_before_point; let mut mag_remaining = mag - mag_before_point;
@ -217,7 +253,23 @@ pub fn write_samples_to_dict(project_root: &Path) {
} else { } else {
insert_token(t.to_owned(), mag_remaining); insert_token(t.to_owned(), mag_remaining);
} }
insert_nd_token(t.to_owned(), None);
}); });
}
Err(e) => {
if matches!(e.kind(), std::num::IntErrorKind::Empty) {
// Text does not contain any digits, search for nd_tokens
tokens.iter().for_each(|t| {
insert_nd_token(
t.to_owned(),
Some((*val).try_into().expect("nd_token value too large")),
);
});
} else {
panic!("{e}, txt: {txt}")
}
}
}
}); });
} }
@ -226,6 +278,10 @@ pub fn write_samples_to_dict(project_root: &Path) {
.into_iter() .into_iter()
.filter_map(|(k, v)| v.map(|v| (k, v))) .filter_map(|(k, v)| v.map(|v| (k, v)))
.collect(); .collect();
dict_entry.number_nd_tokens = found_nd_tokens
.into_iter()
.filter_map(|(k, v)| v.map(|v| (k, v)))
.collect();
dict_entry.comma_decimal = comma_decimal; dict_entry.comma_decimal = comma_decimal;
// Check for duplicates // Check for duplicates
@ -233,9 +289,13 @@ pub fn write_samples_to_dict(project_root: &Path) {
if !dict_entry.number_tokens.values().all(|x| uniq.insert(x)) { if !dict_entry.number_tokens.values().all(|x| uniq.insert(x)) {
println!("Warning: collected duplicate tokens for {lang}"); println!("Warning: collected duplicate tokens for {lang}");
} }
let mut uniq = HashSet::new();
if !dict_entry.number_nd_tokens.values().all(|x| uniq.insert(x)) {
println!("Warning: collected duplicate nd_tokens for {lang}");
}
} }
util::write_dict(project_root, &dict); util::write_dict(dict);
} }
fn get_mag(n: u64) -> u8 { fn get_mag(n: u64) -> u8 {
@ -243,145 +303,154 @@ fn get_mag(n: u64) -> u8 {
} }
/* /*
YouTube channel videos response YouTube Music channel data
*/ */
#[derive(Clone, Debug, Deserialize)] #[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
struct Channel { struct MusicChannel {
contents: Contents, header: MusicHeader,
} }
#[derive(Clone, Debug, Deserialize)] #[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
struct Contents { struct MusicHeader {
two_column_browse_results_renderer: TabsRenderer, #[serde(alias = "musicVisualHeaderRenderer")]
music_immersive_header_renderer: MusicHeaderRenderer,
} }
#[serde_as] #[derive(Debug, Deserialize)]
#[derive(Clone, Debug, Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
struct TabsRenderer { struct MusicHeaderRenderer {
#[serde_as(as = "VecSkipError<_>")] subscription_button: SubscriptionButton,
tabs: Vec<TabRendererWrap>,
} }
#[derive(Clone, Debug, Deserialize)] #[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
struct TabRendererWrap { struct SubscriptionButton {
tab_renderer: TabRenderer, subscribe_button_renderer: SubscriptionButtonRenderer,
} }
#[derive(Clone, Debug, Deserialize)] #[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
struct TabRenderer { struct SubscriptionButtonRenderer {
content: SectionListRendererWrap, subscriber_count_text: TextRuns,
} }
#[derive(Clone, Debug, Deserialize)] #[derive(Debug)]
#[serde(rename_all = "camelCase")]
struct SectionListRendererWrap {
section_list_renderer: SectionListRenderer,
}
#[derive(Clone, Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
struct SectionListRenderer {
contents: Vec<ItemSectionRendererWrap>,
}
#[derive(Clone, Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
struct ItemSectionRendererWrap {
item_section_renderer: ItemSectionRenderer,
}
#[derive(Clone, Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
struct ItemSectionRenderer {
contents: Vec<GridRendererWrap>,
}
#[derive(Clone, Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
struct GridRendererWrap {
grid_renderer: GridRenderer,
}
#[serde_as]
#[derive(Clone, Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
struct GridRenderer {
#[serde_as(as = "VecSkipError<_>")]
items: Vec<VideoListItem>,
}
#[derive(Clone, Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
struct VideoListItem {
grid_video_renderer: GridVideoRenderer,
}
#[derive(Clone, Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
struct GridVideoRenderer {
/// `24,194 views`
view_count_text: Text,
/// `19K views`
short_view_count_text: Text,
}
#[derive(Clone, Debug)]
struct ChannelData { struct ChannelData {
view_counts: Vec<(u64, String)>, view_counts: BTreeMap<u64, String>,
subscriber_count: String,
} }
async fn get_channel(channel_id: &str, lang: Language) -> Result<ChannelData> { async fn get_channel(query: &RustyPipeQuery, channel_id: &str) -> Result<ChannelData> {
let client = Client::new(); let resp = query
.raw(
ClientType::Desktop,
"browse",
&QBrowse {
context: query.get_context(ClientType::Desktop, true, None).await,
browse_id: channel_id,
params: Some("EgZ2aWRlb3MYASAAMAE"),
},
)
.await?;
let body = format!( let channel = serde_json::from_str::<Channel>(&resp)?;
"{}{}{}{}{}",
r##"{"context":{"client":{"clientName":"WEB","clientVersion":"2.20220914.06.00","platform":"DESKTOP","originalUrl":"https://www.youtube.com/","hl":""##,
lang,
r##"","gl":"US"},"request":{"internalExperimentFlags":[],"useSsl":true},"user":{"lockedSafetyMode":false}},"params":"EgZ2aWRlb3MYASAAMAE%3D","browseId":""##,
channel_id,
"\"}"
);
let resp = client let tab = &channel.contents.two_column_browse_results_renderer.tabs[0]
.post("https://www.youtube.com/youtubei/v1/browse?key=AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8&prettyPrint=false") .tab_renderer
.header(header::CONTENT_TYPE, "application/json") .content
.body(body) .rich_grid_renderer;
.send().await?
.error_for_status()?;
let channel = resp.json::<Channel>().await?; let popular_token = tab.header.as_ref().and_then(|h| {
h.feed_filter_chip_bar_renderer.contents.get(1).map(|c| {
c.chip_cloud_chip_renderer
.navigation_endpoint
.continuation_command
.token
.to_owned()
})
});
Ok(ChannelData { let mut view_counts: BTreeMap<u64, String> = tab
view_counts: channel
.contents .contents
.two_column_browse_results_renderer
.tabs
.get(0)
.map(|tab| {
tab.tab_renderer.content.section_list_renderer.contents[0]
.item_section_renderer
.contents[0]
.grid_renderer
.items
.iter() .iter()
.map(|itm| { .map(|itm| {
let v = &itm.rich_item_renderer.content.video_renderer;
( (
util::parse_numeric(&itm.grid_video_renderer.view_count_text.text) util::parse_numeric(&v.view_count_text.text).unwrap_or_default(),
.unwrap(), v.short_view_count_text.text.to_owned(),
itm.grid_video_renderer
.short_view_count_text
.text
.to_owned(),
) )
}) })
.collect() .collect();
if let Some(popular_token) = popular_token {
let resp = query
.raw(
ClientType::Desktop,
"browse",
&QCont {
context: query.get_context(ClientType::Desktop, true, None).await,
continuation: &popular_token,
},
)
.await?;
let continuation = serde_json::from_str::<ContinuationResponse>(&resp)?;
continuation
.on_response_received_actions
.iter()
.for_each(|a| {
a.reload_continuation_items_command
.continuation_items
.iter()
.for_each(|itm| {
let v = &itm.rich_item_renderer.content.video_renderer;
view_counts.insert(
util::parse_numeric(&v.view_count_text.text).unwrap(),
v.short_view_count_text.text.to_owned(),
);
}) })
.unwrap_or_default(), });
}
Ok(ChannelData {
view_counts,
subscriber_count: channel
.header
.c4_tabbed_header_renderer
.subscriber_count_text
.text,
}) })
} }
async fn music_channel_subscribers(query: &RustyPipeQuery, channel_id: &str) -> Result<String> {
let resp = query
.raw(
ClientType::DesktopMusic,
"browse",
&QBrowse {
context: query
.get_context(ClientType::DesktopMusic, true, None)
.await,
browse_id: channel_id,
params: None,
},
)
.await?;
let channel = serde_json::from_str::<MusicChannel>(&resp)?;
channel
.header
.music_immersive_header_renderer
.subscription_button
.subscribe_button_renderer
.subscriber_count_text
.runs
.into_iter()
.next()
.map(|t| t.text)
.ok_or_else(|| anyhow::anyhow!("no text"))
}

View file

@ -3,7 +3,6 @@ use std::{
fs::File, fs::File,
hash::Hash, hash::Hash,
io::BufReader, io::BufReader,
path::Path,
}; };
use futures::{stream, StreamExt}; use futures::{stream, StreamExt};
@ -11,11 +10,10 @@ use path_macro::path;
use rustypipe::{ use rustypipe::{
client::RustyPipe, client::RustyPipe,
param::{locale::LANGUAGES, Language}, param::{locale::LANGUAGES, Language},
timeago::{self, TimeAgo},
}; };
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use crate::util; use crate::util::{self, DICT_DIR};
type CollectedDates = BTreeMap<Language, BTreeMap<DateCase, String>>; type CollectedDates = BTreeMap<Language, BTreeMap<DateCase, String>>;
@ -62,16 +60,13 @@ enum DateCase {
/// ///
/// Because the relative dates change with time, the first three playlists /// Because the relative dates change with time, the first three playlists
/// have to checked and eventually changed before running the program. /// have to checked and eventually changed before running the program.
pub async fn collect_dates(project_root: &Path, concurrency: usize) { pub async fn collect_dates(concurrency: usize) {
let json_path = path!(project_root / "testfiles" / "dict" / "playlist_samples.json"); let json_path = path!(*DICT_DIR / "playlist_samples.json");
// These are the sample playlists // These are the sample playlists
let cases = [ let cases = [
( (DateCase::Today, "PLMC9KNkIncKtPzgY-5rmhvj7fax8fdxoj"),
DateCase::Today, (DateCase::Yesterday, "PLcirGkCPmbmFeQ1sm4wFciF03D_EroIfr"),
"RDCLAK5uy_kj3rhiar1LINmyDcuFnXihEO0K1NQa2jI",
),
(DateCase::Yesterday, "PL7zsB-C3aNu2yRY2869T0zj1FhtRIu5am"),
(DateCase::Ago, "PLmB6td997u3kUOrfFwkULZ910ho44oQSy"), (DateCase::Ago, "PLmB6td997u3kUOrfFwkULZ910ho44oQSy"),
(DateCase::Jan, "PL1J-6JOckZtFjcni6Xj1pLYglJp6JCpKD"), (DateCase::Jan, "PL1J-6JOckZtFjcni6Xj1pLYglJp6JCpKD"),
(DateCase::Feb, "PL1J-6JOckZtETrbzwZE7mRIIK6BzWNLAs"), (DateCase::Feb, "PL1J-6JOckZtETrbzwZE7mRIIK6BzWNLAs"),
@ -90,6 +85,7 @@ pub async fn collect_dates(project_root: &Path, concurrency: usize) {
let rp = RustyPipe::new(); let rp = RustyPipe::new();
let collected_dates = stream::iter(LANGUAGES) let collected_dates = stream::iter(LANGUAGES)
.map(|lang| { .map(|lang| {
println!("{lang}");
let rp = rp.clone(); let rp = rp.clone();
async move { async move {
let mut map: BTreeMap<DateCase, String> = BTreeMap::new(); let mut map: BTreeMap<DateCase, String> = BTreeMap::new();
@ -115,13 +111,13 @@ pub async fn collect_dates(project_root: &Path, concurrency: usize) {
/// ///
/// The ND (no digit) tokens (today, tomorrow) of some languages cannot be /// The ND (no digit) tokens (today, tomorrow) of some languages cannot be
/// parsed automatically and require manual work. /// parsed automatically and require manual work.
pub fn write_samples_to_dict(project_root: &Path) { pub fn write_samples_to_dict() {
let json_path = path!(project_root / "testfiles" / "dict" / "playlist_samples.json"); let json_path = path!(*DICT_DIR / "playlist_samples.json");
let json_file = File::open(json_path).unwrap(); let json_file = File::open(json_path).unwrap();
let collected_dates: CollectedDates = let collected_dates: CollectedDates =
serde_json::from_reader(BufReader::new(json_file)).unwrap(); serde_json::from_reader(BufReader::new(json_file)).unwrap();
let mut dict = util::read_dict(project_root); let mut dict = util::read_dict();
let langs = dict.keys().map(|k| k.to_owned()).collect::<Vec<_>>(); let langs = dict.keys().map(|k| k.to_owned()).collect::<Vec<_>>();
let months = [ let months = [
@ -168,19 +164,7 @@ pub fn write_samples_to_dict(project_root: &Path) {
let collect_nd_tokens = !matches!( let collect_nd_tokens = !matches!(
lang, lang,
// ND tokens of these languages must be edited manually // ND tokens of these languages must be edited manually
Language::Ja Language::Ja | Language::ZhCn | Language::ZhHk | Language::ZhTw
| Language::ZhCn
| Language::ZhHk
| Language::ZhTw
| Language::Ko
| Language::Gu
| Language::Pa
| Language::Ur
| Language::Uz
| Language::Te
| Language::PtPt
// Singhalese YT translation has an error (today == tomorrow)
| Language::Si
); );
dict_entry.months = BTreeMap::new(); dict_entry.months = BTreeMap::new();
@ -212,20 +196,6 @@ pub fn write_samples_to_dict(project_root: &Path) {
parse(datestr_table.get(&DateCase::Jan).unwrap(), 0); parse(datestr_table.get(&DateCase::Jan).unwrap(), 0);
} }
// n days ago
{
let datestr = datestr_table.get(&DateCase::Ago).unwrap();
let tago = timeago::parse_timeago(lang, datestr);
assert_eq!(
tago,
Some(TimeAgo {
n: 3,
unit: timeago::TimeUnit::Day
}),
"lang: {lang}, txt: {datestr}"
);
}
// Absolute dates (Jan 3, 2020) // Absolute dates (Jan 3, 2020)
months.iter().enumerate().for_each(|(n, m)| { months.iter().enumerate().for_each(|(n, m)| {
let datestr = datestr_table.get(m).unwrap(); let datestr = datestr_table.get(m).unwrap();
@ -291,13 +261,11 @@ pub fn write_samples_to_dict(project_root: &Path) {
}; };
}); });
if datestr_tables.len() == 1 { if datestr_tables.len() == 1 && dict_entry.timeago_nd_tokens.len() > 2 {
assert_eq!( println!(
dict_entry.timeago_nd_tokens.len(), "INFO: {} has {} nd_tokens. Check manually.",
2,
"lang: {}, nd_tokens: {:?}",
lang, lang,
&dict_entry.timeago_nd_tokens dict_entry.timeago_nd_tokens.len()
); );
} }
} }
@ -305,5 +273,5 @@ pub fn write_samples_to_dict(project_root: &Path) {
dict_entry.date_order = num_order; dict_entry.date_order = num_order;
} }
util::write_dict(project_root, &dict); util::write_dict(dict);
} }

View file

@ -0,0 +1,382 @@
use std::{
collections::{BTreeMap, HashMap},
fs::File,
io::BufReader,
};
use anyhow::Result;
use futures::{stream, StreamExt};
use path_macro::path;
use rustypipe::{
client::{ClientType, RustyPipe, RustyPipeQuery},
param::{locale::LANGUAGES, Language},
};
use crate::{
model::{Channel, QBrowse, TimeAgo, TimeUnit},
util::{self, DICT_DIR},
};
type CollectedDurations = BTreeMap<Language, BTreeMap<String, u32>>;
/// Collect the video duration texts in every supported language
/// and write them to `testfiles/dict/video_duration_samples.json`.
///
/// The length of YouTube short videos is only available in textual form.
/// To parse it correctly, we need to collect samples of this text in every
/// language. We collect these samples from regular channel videos because these
/// include a textual duration in addition to the easy to parse "mm:ss"
/// duration format.
pub async fn collect_video_durations(concurrency: usize) {
let json_path = path!(*DICT_DIR / "video_duration_samples.json");
let rp = RustyPipe::new();
let channels = [
"UCq-Fj5jknLsUf-MWSy4_brA",
"UCMcS5ITpSohfr8Ppzlo4vKw",
"UCXuqSBlHAE6Xw-yeJA0Tunw",
];
let durations: CollectedDurations = stream::iter(LANGUAGES)
.map(|lang| {
let rp = rp.query().lang(lang);
async move {
let mut map = BTreeMap::new();
for (n, ch_id) in channels.iter().enumerate() {
get_channel_vlengths(&rp, ch_id, &mut map).await.unwrap();
println!("collected {lang}-{n}");
}
// Since we are only parsing shorts durations, we do not need durations >= 1h
let map = map.into_iter().filter(|(_, v)| v < &3600).collect();
(lang, map)
}
})
.buffer_unordered(concurrency)
.collect()
.await;
let file = File::create(json_path).unwrap();
serde_json::to_writer_pretty(file, &durations).unwrap();
}
pub fn parse_video_durations() {
let json_path = path!(*DICT_DIR / "video_duration_samples.json");
let json_file = File::open(json_path).unwrap();
let durations: CollectedDurations = serde_json::from_reader(BufReader::new(json_file)).unwrap();
let mut dict = util::read_dict();
let langs = dict.keys().map(|k| k.to_owned()).collect::<Vec<_>>();
for lang in langs {
let dict_entry = dict.entry(lang).or_default();
let mut e_langs = dict_entry.equivalent.clone();
e_langs.push(lang);
for lang in e_langs {
let mut words = HashMap::new();
fn check_add_word(
words: &mut HashMap<String, Option<TimeAgo>>,
by_char: bool,
val: u32,
expect: u32,
w: String,
unit: TimeUnit,
) -> bool {
let ok = val == expect || val * 2 == expect;
if ok {
let mut ins = |w: &str, val: &mut TimeAgo| {
// Filter stop words
if matches!(
w,
"na" | "y"
| "و"
| "ja"
| "et"
| "e"
| "i"
| "և"
| "og"
| "en"
| "и"
| "a"
| "és"
| "ir"
| "un"
| "și"
| "in"
| "และ"
| "\u{0456}"
| ""
| "eta"
| "અને"
| "और"
| "കൂടാതെ"
| "සහ"
) {
return;
}
let entry = words.entry(w.to_owned()).or_insert(Some(*val));
if let Some(e) = entry {
if e != val {
*entry = None;
}
}
};
let mut val = TimeAgo {
n: (expect / val).try_into().unwrap(),
unit,
};
if by_char {
w.chars().for_each(|c| {
if !c.is_whitespace() {
ins(&c.to_string(), &mut val);
}
});
} else {
w.split_whitespace().for_each(|w| ins(w, &mut val));
}
}
ok
}
fn parse(
words: &mut HashMap<String, Option<TimeAgo>>,
lang: Language,
by_char: bool,
txt: &str,
d: u32,
) {
let (m, s) = split_duration(d);
let mut parts =
split_duration_txt(txt, matches!(lang, Language::Si | Language::Sw))
.into_iter();
let p1 = parts.next().unwrap();
let p1_n = p1.digits.parse::<u32>().unwrap_or(1);
let p2: Option<DurationTxtSegment> = parts.next();
match p2 {
Some(p2) => {
let p2_n = p2.digits.parse::<u32>().unwrap_or(1);
assert!(
check_add_word(words, by_char, p1_n, m, p1.word, TimeUnit::Minute),
"{txt}: min parse error"
);
assert!(
check_add_word(words, by_char, p2_n, s, p2.word, TimeUnit::Second),
"{txt}: sec parse error"
);
}
None => {
if s == 0 {
assert!(
check_add_word(words, by_char, p1_n, m, p1.word, TimeUnit::Minute),
"{txt}: min parse error"
);
} else if m == 0 {
assert!(
check_add_word(words, by_char, p1_n, s, p1.word, TimeUnit::Second),
"{txt}: sec parse error"
);
} else {
let p = txt
.find([',', 'و'])
.unwrap_or_else(|| panic!("`{txt}`: only 1 part"));
parse(words, lang, by_char, &txt[0..p], m);
parse(words, lang, by_char, &txt[p..], s);
}
}
}
assert!(parts.next().is_none(), "`{txt}`: more than 2 parts");
}
for (txt, d) in &durations[&lang] {
parse(&mut words, lang, dict_entry.by_char, txt, *d);
}
// dbg!(&words);
words.into_iter().for_each(|(k, v)| {
if let Some(v) = v {
dict_entry.timeago_tokens.insert(k, v.to_string());
}
});
}
}
util::write_dict(dict);
}
fn split_duration(d: u32) -> (u32, u32) {
(d / 60, d % 60)
}
#[derive(Debug, Default)]
struct DurationTxtSegment {
digits: String,
word: String,
}
fn split_duration_txt(txt: &str, start_c: bool) -> Vec<DurationTxtSegment> {
let mut segments = Vec::new();
// 1: parse digits, 2: parse word
let mut state: u8 = 0;
let mut seg = DurationTxtSegment::default();
for c in txt.chars() {
if c.is_ascii_digit() {
if state == 2 && (!seg.digits.is_empty() || (!start_c && segments.is_empty())) {
segments.push(seg);
seg = DurationTxtSegment::default();
}
seg.digits.push(c);
state = 1;
} else {
if (state == 1) && (!seg.word.is_empty() || (start_c && segments.is_empty())) {
segments.push(seg);
seg = DurationTxtSegment::default();
}
if c != ',' {
c.to_lowercase().for_each(|c| seg.word.push(c));
}
state = 2;
}
}
if !seg.word.is_empty() || !seg.digits.is_empty() {
segments.push(seg);
}
segments
}
async fn get_channel_vlengths(
query: &RustyPipeQuery,
channel_id: &str,
map: &mut BTreeMap<String, u32>,
) -> Result<()> {
let resp = query
.raw(
ClientType::Desktop,
"browse",
&QBrowse {
context: query.get_context(ClientType::Desktop, true, None).await,
browse_id: channel_id,
params: Some("EgZ2aWRlb3MYASAAMAE"),
},
)
.await?;
let channel = serde_json::from_str::<Channel>(&resp)?;
let tab = channel
.contents
.two_column_browse_results_renderer
.tabs
.into_iter()
.next()
.unwrap()
.tab_renderer
.content
.rich_grid_renderer;
tab.contents.into_iter().for_each(|c| {
let lt = c.rich_item_renderer.content.video_renderer.length_text;
let duration = util::parse_video_length(&lt.simple_text).unwrap();
map.insert(lt.accessibility.accessibility_data.label, duration);
});
Ok(())
}
#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq, PartialOrd, Ord)]
enum PluralCategory {
Zero,
One,
Two,
Few,
Many,
Other,
}
impl From<intl_pluralrules::PluralCategory> for PluralCategory {
fn from(value: intl_pluralrules::PluralCategory) -> Self {
match value {
intl_pluralrules::PluralCategory::ZERO => Self::Zero,
intl_pluralrules::PluralCategory::ONE => Self::One,
intl_pluralrules::PluralCategory::TWO => Self::Two,
intl_pluralrules::PluralCategory::FEW => Self::Few,
intl_pluralrules::PluralCategory::MANY => Self::Many,
intl_pluralrules::PluralCategory::OTHER => Self::Other,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::HashSet;
use std::io::BufReader;
use intl_pluralrules::{PluralRuleType, PluralRules};
use unic_langid::LanguageIdentifier;
/// Verify that the duration sample set covers all pluralization variants of the languages
#[test]
fn check_video_duration_samples() {
let json_path = path!(*DICT_DIR / "video_duration_samples.json");
let json_file = File::open(json_path).unwrap();
let durations: CollectedDurations =
serde_json::from_reader(BufReader::new(json_file)).unwrap();
let mut failed = false;
for (lang, durations) in durations {
let ul: LanguageIdentifier =
lang.to_string().split('-').next().unwrap().parse().unwrap();
let pr = PluralRules::create(ul, PluralRuleType::CARDINAL).expect(&lang.to_string());
let mut plurals_m: HashSet<PluralCategory> = HashSet::new();
for n in 1..60 {
plurals_m.insert(pr.select(n).unwrap().into());
}
let mut plurals_s = plurals_m.clone();
durations.values().for_each(|v| {
let (m, s) = split_duration(*v);
plurals_m.remove(&pr.select(m).unwrap().into());
plurals_s.remove(&pr.select(s).unwrap().into());
});
if !plurals_m.is_empty() {
println!("{lang}: missing minutes {plurals_m:?}");
failed = true;
}
if !plurals_s.is_empty() {
println!("{lang}: missing seconds {plurals_m:?}");
failed = true;
}
}
assert!(!failed);
}
#[test]
fn t_split_duration_text() {
// video duration:
let res = split_duration_txt("دقيقة وثانيتان", true);
dbg!(&res);
}
}

View file

@ -5,6 +5,7 @@ use std::{
sync::Mutex, sync::Mutex,
}; };
use path_macro::path;
use rustypipe::{ use rustypipe::{
client::{ClientType, RustyPipe}, client::{ClientType, RustyPipe},
param::{ param::{
@ -14,55 +15,54 @@ use rustypipe::{
report::{Report, Reporter}, report::{Report, Reporter},
}; };
pub async fn download_testfiles(project_root: &Path) { use crate::util::TESTFILES_DIR;
let mut testfiles = project_root.to_path_buf();
testfiles.push("testfiles");
player(&testfiles).await; pub async fn download_testfiles() {
player_model(&testfiles).await; player().await;
playlist(&testfiles).await; player_model().await;
playlist_cont(&testfiles).await; playlist().await;
video_details(&testfiles).await; playlist_cont().await;
comments_top(&testfiles).await; video_details().await;
comments_latest(&testfiles).await; comments_top().await;
recommendations(&testfiles).await; comments_latest().await;
channel_videos(&testfiles).await; recommendations().await;
channel_shorts(&testfiles).await; channel_videos().await;
channel_livestreams(&testfiles).await; channel_shorts().await;
channel_playlists(&testfiles).await; channel_livestreams().await;
channel_info(&testfiles).await; channel_playlists().await;
channel_videos_cont(&testfiles).await; channel_info().await;
channel_playlists_cont(&testfiles).await; channel_videos_cont().await;
search(&testfiles).await; channel_playlists_cont().await;
search_cont(&testfiles).await; search().await;
search_playlists(&testfiles).await; search_cont().await;
search_empty(&testfiles).await; search_playlists().await;
startpage(&testfiles).await; search_empty().await;
startpage_cont(&testfiles).await; startpage().await;
trending(&testfiles).await; startpage_cont().await;
trending().await;
music_playlist(&testfiles).await; music_playlist().await;
music_playlist_cont(&testfiles).await; music_playlist_cont().await;
music_playlist_related(&testfiles).await; music_playlist_related().await;
music_album(&testfiles).await; music_album().await;
music_search(&testfiles).await; music_search().await;
music_search_tracks(&testfiles).await; music_search_tracks().await;
music_search_albums(&testfiles).await; music_search_albums().await;
music_search_artists(&testfiles).await; music_search_artists().await;
music_search_playlists(&testfiles).await; music_search_playlists().await;
music_search_cont(&testfiles).await; music_search_cont().await;
music_search_suggestion(&testfiles).await; music_search_suggestion().await;
music_artist(&testfiles).await; music_artist().await;
music_details(&testfiles).await; music_details().await;
music_lyrics(&testfiles).await; music_lyrics().await;
music_related(&testfiles).await; music_related().await;
music_radio(&testfiles).await; music_radio().await;
music_radio_cont(&testfiles).await; music_radio_cont().await;
music_new_albums(&testfiles).await; music_new_albums().await;
music_new_videos(&testfiles).await; music_new_videos().await;
music_charts(&testfiles).await; music_charts().await;
music_genres(&testfiles).await; music_genres().await;
music_genre(&testfiles).await; music_genre().await;
} }
const CLIENT_TYPES: [ClientType; 5] = [ const CLIENT_TYPES: [ClientType; 5] = [
@ -136,14 +136,12 @@ fn rp_testfile(json_path: &Path) -> RustyPipe {
.build() .build()
} }
async fn player(testfiles: &Path) { async fn player() {
let video_id = "pPvd8UxmSbQ"; let video_id = "pPvd8UxmSbQ";
for client_type in CLIENT_TYPES { for client_type in CLIENT_TYPES {
let mut json_path = testfiles.to_path_buf(); let json_path =
json_path.push("player"); path!(*TESTFILES_DIR / "player" / format!("{client_type:?}_video.json").to_lowercase());
json_path.push(format!("{client_type:?}_video.json").to_lowercase());
if json_path.exists() { if json_path.exists() {
continue; continue;
} }
@ -156,14 +154,12 @@ async fn player(testfiles: &Path) {
} }
} }
async fn player_model(testfiles: &Path) { async fn player_model() {
let rp = RustyPipe::builder().strict().build(); let rp = RustyPipe::builder().strict().build();
for (name, id) in [("multilanguage", "tVWWp1PqDus"), ("hdr", "LXb3EKWsInQ")] { for (name, id) in [("multilanguage", "tVWWp1PqDus"), ("hdr", "LXb3EKWsInQ")] {
let mut json_path = testfiles.to_path_buf(); let json_path =
json_path.push("player_model"); path!(*TESTFILES_DIR / "player_model" / format!("{name}.json").to_lowercase());
json_path.push(format!("{name}.json").to_lowercase());
if json_path.exists() { if json_path.exists() {
continue; continue;
} }
@ -180,15 +176,13 @@ async fn player_model(testfiles: &Path) {
} }
} }
async fn playlist(testfiles: &Path) { async fn playlist() {
for (name, id) in [ for (name, id) in [
("short", "RDCLAK5uy_kFQXdnqMaQCVx2wpUM4ZfbsGCDibZtkJk"), ("short", "RDCLAK5uy_kFQXdnqMaQCVx2wpUM4ZfbsGCDibZtkJk"),
("long", "PL5dDx681T4bR7ZF1IuWzOv1omlRbE7PiJ"), ("long", "PL5dDx681T4bR7ZF1IuWzOv1omlRbE7PiJ"),
("nomusic", "PL1J-6JOckZtE_P9Xx8D3b2O6w0idhuKBe"), ("nomusic", "PL1J-6JOckZtE_P9Xx8D3b2O6w0idhuKBe"),
] { ] {
let mut json_path = testfiles.to_path_buf(); let json_path = path!(*TESTFILES_DIR / "playlist" / format!("playlist_{name}.json"));
json_path.push("playlist");
json_path.push(format!("playlist_{name}.json"));
if json_path.exists() { if json_path.exists() {
continue; continue;
} }
@ -198,10 +192,8 @@ async fn playlist(testfiles: &Path) {
} }
} }
async fn playlist_cont(testfiles: &Path) { async fn playlist_cont() {
let mut json_path = testfiles.to_path_buf(); let json_path = path!(*TESTFILES_DIR / "playlist" / "playlist_cont.json");
json_path.push("playlist");
json_path.push("playlist_cont.json");
if json_path.exists() { if json_path.exists() {
return; return;
} }
@ -217,7 +209,7 @@ async fn playlist_cont(testfiles: &Path) {
playlist.videos.next(rp.query()).await.unwrap().unwrap(); playlist.videos.next(rp.query()).await.unwrap().unwrap();
} }
async fn video_details(testfiles: &Path) { async fn video_details() {
for (name, id) in [ for (name, id) in [
("music", "XuM2onMGvTI"), ("music", "XuM2onMGvTI"),
("mv", "ZeerrnuLi5E"), ("mv", "ZeerrnuLi5E"),
@ -226,9 +218,8 @@ async fn video_details(testfiles: &Path) {
("live", "86YLFOog4GM"), ("live", "86YLFOog4GM"),
("agegate", "HRKu0cvrr_o"), ("agegate", "HRKu0cvrr_o"),
] { ] {
let mut json_path = testfiles.to_path_buf(); let json_path =
json_path.push("video_details"); path!(*TESTFILES_DIR / "video_details" / format!("video_details_{name}.json"));
json_path.push(format!("video_details_{name}.json"));
if json_path.exists() { if json_path.exists() {
continue; continue;
} }
@ -238,10 +229,8 @@ async fn video_details(testfiles: &Path) {
} }
} }
async fn comments_top(testfiles: &Path) { async fn comments_top() {
let mut json_path = testfiles.to_path_buf(); let json_path = path!(*TESTFILES_DIR / "video_details" / "comments_top.json");
json_path.push("video_details");
json_path.push("comments_top.json");
if json_path.exists() { if json_path.exists() {
return; return;
} }
@ -258,10 +247,8 @@ async fn comments_top(testfiles: &Path) {
.unwrap(); .unwrap();
} }
async fn comments_latest(testfiles: &Path) { async fn comments_latest() {
let mut json_path = testfiles.to_path_buf(); let json_path = path!(*TESTFILES_DIR / "video_details" / "comments_latest.json");
json_path.push("video_details");
json_path.push("comments_latest.json");
if json_path.exists() { if json_path.exists() {
return; return;
} }
@ -278,10 +265,8 @@ async fn comments_latest(testfiles: &Path) {
.unwrap(); .unwrap();
} }
async fn recommendations(testfiles: &Path) { async fn recommendations() {
let mut json_path = testfiles.to_path_buf(); let json_path = path!(*TESTFILES_DIR / "video_details" / "recommendations.json");
json_path.push("video_details");
json_path.push("recommendations.json");
if json_path.exists() { if json_path.exists() {
return; return;
} }
@ -293,7 +278,7 @@ async fn recommendations(testfiles: &Path) {
details.recommended.next(rp.query()).await.unwrap(); details.recommended.next(rp.query()).await.unwrap();
} }
async fn channel_videos(testfiles: &Path) { async fn channel_videos() {
for (name, id) in [ for (name, id) in [
("base", "UC2DjFE7Xf11URZqWBigcVOQ"), ("base", "UC2DjFE7Xf11URZqWBigcVOQ"),
("music", "UC_vmjW5e1xEHhYjY2a0kK1A"), // YouTube Music channels have no videos ("music", "UC_vmjW5e1xEHhYjY2a0kK1A"), // YouTube Music channels have no videos
@ -302,9 +287,7 @@ async fn channel_videos(testfiles: &Path) {
("empty", "UCxBa895m48H5idw5li7h-0g"), ("empty", "UCxBa895m48H5idw5li7h-0g"),
("upcoming", "UCcvfHa-GHSOHFAjU0-Ie57A"), ("upcoming", "UCcvfHa-GHSOHFAjU0-Ie57A"),
] { ] {
let mut json_path = testfiles.to_path_buf(); let json_path = path!(*TESTFILES_DIR / "channel" / format!("channel_videos_{name}.json"));
json_path.push("channel");
json_path.push(format!("channel_videos_{name}.json"));
if json_path.exists() { if json_path.exists() {
continue; continue;
} }
@ -314,10 +297,8 @@ async fn channel_videos(testfiles: &Path) {
} }
} }
async fn channel_shorts(testfiles: &Path) { async fn channel_shorts() {
let mut json_path = testfiles.to_path_buf(); let json_path = path!(*TESTFILES_DIR / "channel" / "channel_shorts.json");
json_path.push("channel");
json_path.push("channel_shorts.json");
if json_path.exists() { if json_path.exists() {
return; return;
} }
@ -329,10 +310,8 @@ async fn channel_shorts(testfiles: &Path) {
.unwrap(); .unwrap();
} }
async fn channel_livestreams(testfiles: &Path) { async fn channel_livestreams() {
let mut json_path = testfiles.to_path_buf(); let json_path = path!(*TESTFILES_DIR / "channel" / "channel_livestreams.json");
json_path.push("channel");
json_path.push("channel_livestreams.json");
if json_path.exists() { if json_path.exists() {
return; return;
} }
@ -344,10 +323,8 @@ async fn channel_livestreams(testfiles: &Path) {
.unwrap(); .unwrap();
} }
async fn channel_playlists(testfiles: &Path) { async fn channel_playlists() {
let mut json_path = testfiles.to_path_buf(); let json_path = path!(*TESTFILES_DIR / "channel" / "channel_playlists.json");
json_path.push("channel");
json_path.push("channel_playlists.json");
if json_path.exists() { if json_path.exists() {
return; return;
} }
@ -359,10 +336,8 @@ async fn channel_playlists(testfiles: &Path) {
.unwrap(); .unwrap();
} }
async fn channel_info(testfiles: &Path) { async fn channel_info() {
let mut json_path = testfiles.to_path_buf(); let json_path = path!(*TESTFILES_DIR / "channel" / "channel_info.json");
json_path.push("channel");
json_path.push("channel_info.json");
if json_path.exists() { if json_path.exists() {
return; return;
} }
@ -374,10 +349,8 @@ async fn channel_info(testfiles: &Path) {
.unwrap(); .unwrap();
} }
async fn channel_videos_cont(testfiles: &Path) { async fn channel_videos_cont() {
let mut json_path = testfiles.to_path_buf(); let json_path = path!(*TESTFILES_DIR / "channel" / "channel_videos_cont.json");
json_path.push("channel");
json_path.push("channel_videos_cont.json");
if json_path.exists() { if json_path.exists() {
return; return;
} }
@ -393,10 +366,8 @@ async fn channel_videos_cont(testfiles: &Path) {
videos.content.next(rp.query()).await.unwrap().unwrap(); videos.content.next(rp.query()).await.unwrap().unwrap();
} }
async fn channel_playlists_cont(testfiles: &Path) { async fn channel_playlists_cont() {
let mut json_path = testfiles.to_path_buf(); let json_path = path!(*TESTFILES_DIR / "channel" / "channel_playlists_cont.json");
json_path.push("channel");
json_path.push("channel_playlists_cont.json");
if json_path.exists() { if json_path.exists() {
return; return;
} }
@ -412,10 +383,8 @@ async fn channel_playlists_cont(testfiles: &Path) {
playlists.content.next(rp.query()).await.unwrap().unwrap(); playlists.content.next(rp.query()).await.unwrap().unwrap();
} }
async fn search(testfiles: &Path) { async fn search() {
let mut json_path = testfiles.to_path_buf(); let json_path = path!(*TESTFILES_DIR / "search" / "default.json");
json_path.push("search");
json_path.push("default.json");
if json_path.exists() { if json_path.exists() {
return; return;
} }
@ -424,10 +393,8 @@ async fn search(testfiles: &Path) {
rp.query().search("doobydoobap").await.unwrap(); rp.query().search("doobydoobap").await.unwrap();
} }
async fn search_cont(testfiles: &Path) { async fn search_cont() {
let mut json_path = testfiles.to_path_buf(); let json_path = path!(*TESTFILES_DIR / "search" / "cont.json");
json_path.push("search");
json_path.push("cont.json");
if json_path.exists() { if json_path.exists() {
return; return;
} }
@ -439,10 +406,8 @@ async fn search_cont(testfiles: &Path) {
search.items.next(rp.query()).await.unwrap().unwrap(); search.items.next(rp.query()).await.unwrap().unwrap();
} }
async fn search_playlists(testfiles: &Path) { async fn search_playlists() {
let mut json_path = testfiles.to_path_buf(); let json_path = path!(*TESTFILES_DIR / "search" / "playlists.json");
json_path.push("search");
json_path.push("playlists.json");
if json_path.exists() { if json_path.exists() {
return; return;
} }
@ -454,10 +419,8 @@ async fn search_playlists(testfiles: &Path) {
.unwrap(); .unwrap();
} }
async fn search_empty(testfiles: &Path) { async fn search_empty() {
let mut json_path = testfiles.to_path_buf(); let json_path = path!(*TESTFILES_DIR / "search" / "empty.json");
json_path.push("search");
json_path.push("empty.json");
if json_path.exists() { if json_path.exists() {
return; return;
} }
@ -474,10 +437,8 @@ async fn search_empty(testfiles: &Path) {
.unwrap(); .unwrap();
} }
async fn startpage(testfiles: &Path) { async fn startpage() {
let mut json_path = testfiles.to_path_buf(); let json_path = path!(*TESTFILES_DIR / "trends" / "startpage.json");
json_path.push("trends");
json_path.push("startpage.json");
if json_path.exists() { if json_path.exists() {
return; return;
} }
@ -486,10 +447,8 @@ async fn startpage(testfiles: &Path) {
rp.query().startpage().await.unwrap(); rp.query().startpage().await.unwrap();
} }
async fn startpage_cont(testfiles: &Path) { async fn startpage_cont() {
let mut json_path = testfiles.to_path_buf(); let json_path = path!(*TESTFILES_DIR / "trends" / "startpage_cont.json");
json_path.push("trends");
json_path.push("startpage_cont.json");
if json_path.exists() { if json_path.exists() {
return; return;
} }
@ -501,10 +460,8 @@ async fn startpage_cont(testfiles: &Path) {
startpage.next(rp.query()).await.unwrap(); startpage.next(rp.query()).await.unwrap();
} }
async fn trending(testfiles: &Path) { async fn trending() {
let mut json_path = testfiles.to_path_buf(); let json_path = path!(*TESTFILES_DIR / "trends" / "trending_videos.json");
json_path.push("trends");
json_path.push("trending.json");
if json_path.exists() { if json_path.exists() {
return; return;
} }
@ -513,15 +470,13 @@ async fn trending(testfiles: &Path) {
rp.query().trending().await.unwrap(); rp.query().trending().await.unwrap();
} }
async fn music_playlist(testfiles: &Path) { async fn music_playlist() {
for (name, id) in [ for (name, id) in [
("short", "RDCLAK5uy_kFQXdnqMaQCVx2wpUM4ZfbsGCDibZtkJk"), ("short", "RDCLAK5uy_kFQXdnqMaQCVx2wpUM4ZfbsGCDibZtkJk"),
("long", "PL5dDx681T4bR7ZF1IuWzOv1omlRbE7PiJ"), ("long", "PL5dDx681T4bR7ZF1IuWzOv1omlRbE7PiJ"),
("nomusic", "PL1J-6JOckZtE_P9Xx8D3b2O6w0idhuKBe"), ("nomusic", "PL1J-6JOckZtE_P9Xx8D3b2O6w0idhuKBe"),
] { ] {
let mut json_path = testfiles.to_path_buf(); let json_path = path!(*TESTFILES_DIR / "music_playlist" / format!("playlist_{name}.json"));
json_path.push("music_playlist");
json_path.push(format!("playlist_{name}.json"));
if json_path.exists() { if json_path.exists() {
continue; continue;
} }
@ -531,10 +486,8 @@ async fn music_playlist(testfiles: &Path) {
} }
} }
async fn music_playlist_cont(testfiles: &Path) { async fn music_playlist_cont() {
let mut json_path = testfiles.to_path_buf(); let json_path = path!(*TESTFILES_DIR / "music_playlist" / "playlist_cont.json");
json_path.push("music_playlist");
json_path.push("playlist_cont.json");
if json_path.exists() { if json_path.exists() {
return; return;
} }
@ -550,10 +503,8 @@ async fn music_playlist_cont(testfiles: &Path) {
playlist.tracks.next(rp.query()).await.unwrap().unwrap(); playlist.tracks.next(rp.query()).await.unwrap().unwrap();
} }
async fn music_playlist_related(testfiles: &Path) { async fn music_playlist_related() {
let mut json_path = testfiles.to_path_buf(); let json_path = path!(*TESTFILES_DIR / "music_playlist" / "playlist_related.json");
json_path.push("music_playlist");
json_path.push("playlist_related.json");
if json_path.exists() { if json_path.exists() {
return; return;
} }
@ -574,7 +525,7 @@ async fn music_playlist_related(testfiles: &Path) {
.unwrap(); .unwrap();
} }
async fn music_album(testfiles: &Path) { async fn music_album() {
for (name, id) in [ for (name, id) in [
("one_artist", "MPREb_nlBWQROfvjo"), ("one_artist", "MPREb_nlBWQROfvjo"),
("various_artists", "MPREb_8QkDeEIawvX"), ("various_artists", "MPREb_8QkDeEIawvX"),
@ -582,9 +533,7 @@ async fn music_album(testfiles: &Path) {
("description", "MPREb_PiyfuVl6aYd"), ("description", "MPREb_PiyfuVl6aYd"),
("unavailable", "MPREb_AzuWg8qAVVl"), ("unavailable", "MPREb_AzuWg8qAVVl"),
] { ] {
let mut json_path = testfiles.to_path_buf(); let json_path = path!(*TESTFILES_DIR / "music_playlist" / format!("album_{name}.json"));
json_path.push("music_playlist");
json_path.push(format!("album_{name}.json"));
if json_path.exists() { if json_path.exists() {
continue; continue;
} }
@ -594,16 +543,14 @@ async fn music_album(testfiles: &Path) {
} }
} }
async fn music_search(testfiles: &Path) { async fn music_search() {
for (name, query) in [ for (name, query) in [
("default", "black mamba"), ("default", "black mamba"),
("typo", "liblingsmensch"), ("typo", "liblingsmensch"),
("radio", "pop radio"), ("radio", "pop radio"),
("artist", "taylor swift"), ("artist", "taylor swift"),
] { ] {
let mut json_path = testfiles.to_path_buf(); let json_path = path!(*TESTFILES_DIR / "music_search" / format!("main_{name}.json"));
json_path.push("music_search");
json_path.push(format!("main_{name}.json"));
if json_path.exists() { if json_path.exists() {
continue; continue;
} }
@ -613,7 +560,7 @@ async fn music_search(testfiles: &Path) {
} }
} }
async fn music_search_tracks(testfiles: &Path) { async fn music_search_tracks() {
for (name, query, videos) in [ for (name, query, videos) in [
("default", "black mamba", false), ("default", "black mamba", false),
("videos", "black mamba", true), ("videos", "black mamba", true),
@ -624,9 +571,7 @@ async fn music_search_tracks(testfiles: &Path) {
false, false,
), ),
] { ] {
let mut json_path = testfiles.to_path_buf(); let json_path = path!(*TESTFILES_DIR / "music_search" / format!("tracks_{name}.json"));
json_path.push("music_search");
json_path.push(format!("tracks_{name}.json"));
if json_path.exists() { if json_path.exists() {
continue; continue;
} }
@ -640,10 +585,8 @@ async fn music_search_tracks(testfiles: &Path) {
} }
} }
async fn music_search_albums(testfiles: &Path) { async fn music_search_albums() {
let mut json_path = testfiles.to_path_buf(); let json_path = path!(*TESTFILES_DIR / "music_search" / "albums.json");
json_path.push("music_search");
json_path.push("albums.json");
if json_path.exists() { if json_path.exists() {
return; return;
} }
@ -652,10 +595,8 @@ async fn music_search_albums(testfiles: &Path) {
rp.query().music_search_albums("black mamba").await.unwrap(); rp.query().music_search_albums("black mamba").await.unwrap();
} }
async fn music_search_artists(testfiles: &Path) { async fn music_search_artists() {
let mut json_path = testfiles.to_path_buf(); let json_path = path!(*TESTFILES_DIR / "music_search" / "artists.json");
json_path.push("music_search");
json_path.push("artists.json");
if json_path.exists() { if json_path.exists() {
return; return;
} }
@ -667,11 +608,9 @@ async fn music_search_artists(testfiles: &Path) {
.unwrap(); .unwrap();
} }
async fn music_search_playlists(testfiles: &Path) { async fn music_search_playlists() {
for (name, community) in [("ytm", false), ("community", true)] { for (name, community) in [("ytm", false), ("community", true)] {
let mut json_path = testfiles.to_path_buf(); let json_path = path!(*TESTFILES_DIR / "music_search" / format!("playlists_{name}.json"));
json_path.push("music_search");
json_path.push(format!("playlists_{name}.json"));
if json_path.exists() { if json_path.exists() {
continue; continue;
} }
@ -684,10 +623,8 @@ async fn music_search_playlists(testfiles: &Path) {
} }
} }
async fn music_search_cont(testfiles: &Path) { async fn music_search_cont() {
let mut json_path = testfiles.to_path_buf(); let json_path = path!(*TESTFILES_DIR / "music_search" / "tracks_cont.json");
json_path.push("music_search");
json_path.push("tracks_cont.json");
if json_path.exists() { if json_path.exists() {
return; return;
} }
@ -699,11 +636,9 @@ async fn music_search_cont(testfiles: &Path) {
res.items.next(rp.query()).await.unwrap().unwrap(); res.items.next(rp.query()).await.unwrap().unwrap();
} }
async fn music_search_suggestion(testfiles: &Path) { async fn music_search_suggestion() {
for (name, query) in [("default", "t"), ("empty", "reujbhevmfndxnjrze")] { for (name, query) in [("default", "t"), ("empty", "reujbhevmfndxnjrze")] {
let mut json_path = testfiles.to_path_buf(); let json_path = path!(*TESTFILES_DIR / "music_search" / format!("suggestion_{name}.json"));
json_path.push("music_search");
json_path.push(format!("suggestion_{name}.json"));
if json_path.exists() { if json_path.exists() {
continue; continue;
} }
@ -713,7 +648,7 @@ async fn music_search_suggestion(testfiles: &Path) {
} }
} }
async fn music_artist(testfiles: &Path) { async fn music_artist() {
for (name, id, all_albums) in [ for (name, id, all_albums) in [
("default", "UClmXPfaYhXOYsNn_QUyheWQ", true), ("default", "UClmXPfaYhXOYsNn_QUyheWQ", true),
("no_more_albums", "UC_vmjW5e1xEHhYjY2a0kK1A", true), ("no_more_albums", "UC_vmjW5e1xEHhYjY2a0kK1A", true),
@ -722,9 +657,7 @@ async fn music_artist(testfiles: &Path) {
("only_more_singles", "UC0aXrjVxG5pZr99v77wZdPQ", true), ("only_more_singles", "UC0aXrjVxG5pZr99v77wZdPQ", true),
("secondary_channel", "UCC9192yGQD25eBZgFZ84MPw", false), ("secondary_channel", "UCC9192yGQD25eBZgFZ84MPw", false),
] { ] {
let mut json_path = testfiles.to_path_buf(); let json_path = path!(*TESTFILES_DIR / "music_artist" / format!("artist_{name}.json"));
json_path.push("music_artist");
json_path.push(format!("artist_{name}.json"));
if json_path.exists() { if json_path.exists() {
continue; continue;
} }
@ -734,11 +667,9 @@ async fn music_artist(testfiles: &Path) {
} }
} }
async fn music_details(testfiles: &Path) { async fn music_details() {
for (name, id) in [("mv", "ZeerrnuLi5E"), ("track", "7nigXQS1Xb0")] { for (name, id) in [("mv", "ZeerrnuLi5E"), ("track", "7nigXQS1Xb0")] {
let mut json_path = testfiles.to_path_buf(); let json_path = path!(*TESTFILES_DIR / "music_details" / format!("details_{name}.json"));
json_path.push("music_details");
json_path.push(format!("details_{name}.json"));
if json_path.exists() { if json_path.exists() {
continue; continue;
} }
@ -748,10 +679,8 @@ async fn music_details(testfiles: &Path) {
} }
} }
async fn music_lyrics(testfiles: &Path) { async fn music_lyrics() {
let mut json_path = testfiles.to_path_buf(); let json_path = path!(*TESTFILES_DIR / "music_details" / "lyrics.json");
json_path.push("music_details");
json_path.push("lyrics.json");
if json_path.exists() { if json_path.exists() {
return; return;
} }
@ -766,10 +695,8 @@ async fn music_lyrics(testfiles: &Path) {
.unwrap(); .unwrap();
} }
async fn music_related(testfiles: &Path) { async fn music_related() {
let mut json_path = testfiles.to_path_buf(); let json_path = path!(*TESTFILES_DIR / "music_details" / "related.json");
json_path.push("music_details");
json_path.push("related.json");
if json_path.exists() { if json_path.exists() {
return; return;
} }
@ -784,11 +711,9 @@ async fn music_related(testfiles: &Path) {
.unwrap(); .unwrap();
} }
async fn music_radio(testfiles: &Path) { async fn music_radio() {
for (name, id) in [("mv", "RDAMVMZeerrnuLi5E"), ("track", "RDAMVM7nigXQS1Xb0")] { for (name, id) in [("mv", "RDAMVMZeerrnuLi5E"), ("track", "RDAMVM7nigXQS1Xb0")] {
let mut json_path = testfiles.to_path_buf(); let json_path = path!(*TESTFILES_DIR / "music_details" / format!("radio_{name}.json"));
json_path.push("music_details");
json_path.push(format!("radio_{name}.json"));
if json_path.exists() { if json_path.exists() {
continue; continue;
} }
@ -798,10 +723,8 @@ async fn music_radio(testfiles: &Path) {
} }
} }
async fn music_radio_cont(testfiles: &Path) { async fn music_radio_cont() {
let mut json_path = testfiles.to_path_buf(); let json_path = path!(*TESTFILES_DIR / "music_details" / "radio_cont.json");
json_path.push("music_details");
json_path.push("radio_cont.json");
if json_path.exists() { if json_path.exists() {
return; return;
} }
@ -813,10 +736,8 @@ async fn music_radio_cont(testfiles: &Path) {
res.next(rp.query()).await.unwrap().unwrap(); res.next(rp.query()).await.unwrap().unwrap();
} }
async fn music_new_albums(testfiles: &Path) { async fn music_new_albums() {
let mut json_path = testfiles.to_path_buf(); let json_path = path!(*TESTFILES_DIR / "music_new" / "albums_default.json");
json_path.push("music_new");
json_path.push("albums_default.json");
if json_path.exists() { if json_path.exists() {
return; return;
} }
@ -825,10 +746,8 @@ async fn music_new_albums(testfiles: &Path) {
rp.query().music_new_albums().await.unwrap(); rp.query().music_new_albums().await.unwrap();
} }
async fn music_new_videos(testfiles: &Path) { async fn music_new_videos() {
let mut json_path = testfiles.to_path_buf(); let json_path = path!(*TESTFILES_DIR / "music_new" / "videos_default.json");
json_path.push("music_new");
json_path.push("videos_default.json");
if json_path.exists() { if json_path.exists() {
return; return;
} }
@ -837,11 +756,9 @@ async fn music_new_videos(testfiles: &Path) {
rp.query().music_new_videos().await.unwrap(); rp.query().music_new_videos().await.unwrap();
} }
async fn music_charts(testfiles: &Path) { async fn music_charts() {
for (name, country) in [("global", Some(Country::Zz)), ("US", Some(Country::Us))] { for (name, country) in [("global", Some(Country::Zz)), ("US", Some(Country::Us))] {
let mut json_path = testfiles.to_path_buf(); let json_path = path!(*TESTFILES_DIR / "music_charts" / format!("charts_{name}.json"));
json_path.push("music_charts");
json_path.push(&format!("charts_{name}.json"));
if json_path.exists() { if json_path.exists() {
continue; continue;
} }
@ -851,10 +768,8 @@ async fn music_charts(testfiles: &Path) {
} }
} }
async fn music_genres(testfiles: &Path) { async fn music_genres() {
let mut json_path = testfiles.to_path_buf(); let json_path = path!(*TESTFILES_DIR / "music_genres" / "genres.json");
json_path.push("music_genres");
json_path.push("genres.json");
if json_path.exists() { if json_path.exists() {
return; return;
} }
@ -863,14 +778,12 @@ async fn music_genres(testfiles: &Path) {
rp.query().music_genres().await.unwrap(); rp.query().music_genres().await.unwrap();
} }
async fn music_genre(testfiles: &Path) { async fn music_genre() {
for (name, id) in [ for (name, id) in [
("default", "ggMPOg1uX1lMbVZmbzl6NlJ3"), ("default", "ggMPOg1uX1lMbVZmbzl6NlJ3"),
("mood", "ggMPOg1uX1JOQWZFeDByc2Jm"), ("mood", "ggMPOg1uX1JOQWZFeDByc2Jm"),
] { ] {
let mut json_path = testfiles.to_path_buf(); let json_path = path!(*TESTFILES_DIR / "music_genres" / format!("genre_{name}.json"));
json_path.push("music_genres");
json_path.push(&format!("genre_{name}.json"));
if json_path.exists() { if json_path.exists() {
continue; continue;
} }

View file

@ -1,13 +1,13 @@
use std::fmt::Write; use std::fmt::Write;
use std::path::Path;
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use path_macro::path;
use regex::Regex; use regex::Regex;
use rustypipe::timeago::TimeUnit;
use crate::util; use crate::{
model::TimeUnit,
const TARGET_PATH: &str = "src/util/dictionary.rs"; util::{self, SRC_DIR},
};
fn parse_tu(tu: &str) -> (u8, Option<TimeUnit>) { fn parse_tu(tu: &str) -> (u8, Option<TimeUnit>) {
static TU_PATTERN: Lazy<Regex> = Lazy::new(|| Regex::new(r"^(\d*)(\w?)$").unwrap()); static TU_PATTERN: Lazy<Regex> = Lazy::new(|| Regex::new(r"^(\d*)(\w?)$").unwrap());
@ -30,23 +30,20 @@ fn parse_tu(tu: &str) -> (u8, Option<TimeUnit>) {
} }
} }
pub fn generate_dictionary(project_root: &Path) { pub fn generate_dictionary() {
let dict = util::read_dict(project_root); let dict = util::read_dict();
let code_head = r#"// This file is automatically generated. DO NOT EDIT. let code_head = r#"// This file is automatically generated. DO NOT EDIT.
// See codegen/gen_dictionary.rs for the generation code. // See codegen/gen_dictionary.rs for the generation code.
use crate::{ use crate::{
model::AlbumType, model::AlbumType,
param::Language, param::Language,
timeago::{DateCmp, TaToken, TimeUnit}, util::timeago::{DateCmp, TaToken, TimeUnit},
}; };
/// The dictionary contains the information required to parse dates and numbers /// The dictionary contains the information required to parse dates and numbers
/// in all supported languages. /// in all supported languages.
pub(crate) struct Entry { pub(crate) struct Entry {
/// Should the language be parsed by character instead of by word?
/// (e.g. Chinese/Japanese)
pub by_char: bool,
/// Tokens for parsing timeago strings. /// Tokens for parsing timeago strings.
/// ///
/// Format: Parsed token -> \[Quantity\] Identifier /// Format: Parsed token -> \[Quantity\] Identifier
@ -76,6 +73,10 @@ pub(crate) struct Entry {
/// ///
/// Format: Parsed token -> decimal power /// Format: Parsed token -> decimal power
pub number_tokens: phf::Map<&'static str, u8>, pub number_tokens: phf::Map<&'static str, u8>,
/// Tokens for parsing number strings with no digits (e.g. "No videos")
///
/// Format: Parsed token -> value
pub number_nd_tokens: phf::Map<&'static str, u8>,
/// Names of album types (Album, Single, ...) /// Names of album types (Album, Single, ...)
/// ///
/// Format: Parsed text -> Album type /// Format: Parsed text -> Album type
@ -141,6 +142,12 @@ pub(crate) fn entry(lang: Language) -> Entry {
number_tokens.entry(txt, &mag.to_string()); number_tokens.entry(txt, &mag.to_string());
}); });
// Number nd tokens
let mut number_nd_tokens = phf_codegen::Map::<&str>::new();
entry.number_nd_tokens.iter().for_each(|(txt, mag)| {
number_nd_tokens.entry(txt, &mag.to_string());
});
// Album types // Album types
let mut album_types = phf_codegen::Map::<&str>::new(); let mut album_types = phf_codegen::Map::<&str>::new();
entry.album_types.iter().for_each(|(txt, album_type)| { entry.album_types.iter().for_each(|(txt, album_type)| {
@ -151,17 +158,17 @@ pub(crate) fn entry(lang: Language) -> Entry {
let code_ta_nd_tokens = &ta_nd_tokens.build().to_string().replace('\n', "\n "); let code_ta_nd_tokens = &ta_nd_tokens.build().to_string().replace('\n', "\n ");
let code_months = &months.build().to_string().replace('\n', "\n "); let code_months = &months.build().to_string().replace('\n', "\n ");
let code_number_tokens = &number_tokens.build().to_string().replace('\n', "\n "); let code_number_tokens = &number_tokens.build().to_string().replace('\n', "\n ");
let code_number_nd_tokens = &number_nd_tokens.build().to_string().replace('\n', "\n ");
let code_album_types = &album_types.build().to_string().replace('\n', "\n "); let code_album_types = &album_types.build().to_string().replace('\n', "\n ");
let _ = write!(code_timeago_tokens, "{} => Entry {{\n by_char: {:?},\n timeago_tokens: {},\n date_order: {},\n months: {},\n timeago_nd_tokens: {},\n comma_decimal: {:?},\n number_tokens: {},\n album_types: {},\n }},\n ", write!(code_timeago_tokens, "{} => Entry {{\n timeago_tokens: {},\n date_order: {},\n months: {},\n timeago_nd_tokens: {},\n comma_decimal: {:?},\n number_tokens: {},\n number_nd_tokens: {},\n album_types: {},\n }},\n ",
selector, entry.by_char, code_ta_tokens, date_order, code_months, code_ta_nd_tokens, entry.comma_decimal, code_number_tokens, code_album_types); selector, code_ta_tokens, date_order, code_months, code_ta_nd_tokens, entry.comma_decimal, code_number_tokens, code_number_nd_tokens, code_album_types).unwrap();
}); });
code_timeago_tokens = code_timeago_tokens.trim_end().to_owned() + "\n }\n}\n"; code_timeago_tokens = code_timeago_tokens.trim_end().to_owned() + "\n }\n}\n";
let code = format!("{code_head}\n{code_timeago_tokens}"); let code = format!("{code_head}\n{code_timeago_tokens}");
let mut target_path = project_root.to_path_buf(); let target_path = path!(*SRC_DIR / "util" / "dictionary.rs");
target_path.push(TARGET_PATH);
std::fs::write(target_path, code).unwrap(); std::fs::write(target_path, code).unwrap();
} }

View file

@ -1,14 +1,15 @@
use std::collections::BTreeMap; use std::collections::BTreeMap;
use std::fmt::Write; use std::fmt::Write;
use std::path::Path;
use path_macro::path;
use reqwest::header; use reqwest::header;
use reqwest::Client; use reqwest::Client;
use serde::Deserialize; use serde::Deserialize;
use serde_with::serde_as; use serde_with::serde_as;
use serde_with::VecSkipError; use serde_with::VecSkipError;
use crate::util::Text; use crate::model::Text;
use crate::util::SRC_DIR;
#[serde_as] #[serde_as]
#[derive(Clone, Debug, Deserialize)] #[derive(Clone, Debug, Deserialize)]
@ -137,7 +138,7 @@ struct LanguageCountryCommand {
hl: String, hl: String,
} }
pub async fn generate_locales(project_root: &Path) { pub async fn generate_locales() {
let (languages, countries) = get_locales().await; let (languages, countries) = get_locales().await;
let code_head = r#"// This file is automatically generated. DO NOT EDIT. let code_head = r#"// This file is automatically generated. DO NOT EDIT.
@ -288,8 +289,7 @@ pub enum Country {
"{code_head}\n{code_langs}\n{code_countries}\n{code_lang_array}\n{code_country_array}\n{code_lang_names}\n{code_country_names}\n{code_foot}" "{code_head}\n{code_langs}\n{code_countries}\n{code_lang_array}\n{code_country_array}\n{code_lang_names}\n{code_country_names}\n{code_foot}"
); );
let mut target_path = project_root.to_path_buf(); let target_path = path!(*SRC_DIR / "param" / "locale.rs");
target_path.push("src/param/locale.rs");
std::fs::write(target_path, code).unwrap(); std::fs::write(target_path, code).unwrap();
} }

View file

@ -2,21 +2,19 @@ mod abtest;
mod collect_album_types; mod collect_album_types;
mod collect_large_numbers; mod collect_large_numbers;
mod collect_playlist_dates; mod collect_playlist_dates;
mod collect_video_durations;
mod download_testfiles; mod download_testfiles;
mod gen_dictionary; mod gen_dictionary;
mod gen_locales; mod gen_locales;
mod model;
mod util; mod util;
use std::path::PathBuf;
use clap::{Parser, Subcommand}; use clap::{Parser, Subcommand};
#[derive(Parser)] #[derive(Parser)]
struct Cli { struct Cli {
#[clap(subcommand)] #[clap(subcommand)]
command: Commands, command: Commands,
#[clap(short = 'd', default_value = "..")]
project_root: PathBuf,
#[clap(short, default_value = "8")] #[clap(short, default_value = "8")]
concurrency: usize, concurrency: usize,
} }
@ -26,9 +24,11 @@ enum Commands {
CollectPlaylistDates, CollectPlaylistDates,
CollectLargeNumbers, CollectLargeNumbers,
CollectAlbumTypes, CollectAlbumTypes,
CollectVideoDurations,
ParsePlaylistDates, ParsePlaylistDates,
ParseLargeNumbers, ParseLargeNumbers,
ParseAlbumTypes, ParseAlbumTypes,
ParseVideoDurations,
GenLocales, GenLocales,
GenDict, GenDict,
DownloadTestfiles, DownloadTestfiles,
@ -47,28 +47,26 @@ async fn main() {
match cli.command { match cli.command {
Commands::CollectPlaylistDates => { Commands::CollectPlaylistDates => {
collect_playlist_dates::collect_dates(&cli.project_root, cli.concurrency).await; collect_playlist_dates::collect_dates(cli.concurrency).await;
} }
Commands::CollectLargeNumbers => { Commands::CollectLargeNumbers => {
collect_large_numbers::collect_large_numbers(&cli.project_root, cli.concurrency).await; collect_large_numbers::collect_large_numbers(cli.concurrency).await;
} }
Commands::CollectAlbumTypes => { Commands::CollectAlbumTypes => {
collect_album_types::collect_album_types(&cli.project_root, cli.concurrency).await; collect_album_types::collect_album_types(cli.concurrency).await;
} }
Commands::ParsePlaylistDates => { Commands::CollectVideoDurations => {
collect_playlist_dates::write_samples_to_dict(&cli.project_root) collect_video_durations::collect_video_durations(cli.concurrency).await;
} }
Commands::ParseLargeNumbers => { Commands::ParsePlaylistDates => collect_playlist_dates::write_samples_to_dict(),
collect_large_numbers::write_samples_to_dict(&cli.project_root) Commands::ParseLargeNumbers => collect_large_numbers::write_samples_to_dict(),
} Commands::ParseAlbumTypes => collect_album_types::write_samples_to_dict(),
Commands::ParseAlbumTypes => collect_album_types::write_samples_to_dict(&cli.project_root), Commands::ParseVideoDurations => collect_video_durations::parse_video_durations(),
Commands::GenLocales => { Commands::GenLocales => {
gen_locales::generate_locales(&cli.project_root).await; gen_locales::generate_locales().await;
}
Commands::GenDict => gen_dictionary::generate_dictionary(&cli.project_root),
Commands::DownloadTestfiles => {
download_testfiles::download_testfiles(&cli.project_root).await
} }
Commands::GenDict => gen_dictionary::generate_dictionary(),
Commands::DownloadTestfiles => download_testfiles::download_testfiles().await,
Commands::AbTest { id, n } => { Commands::AbTest { id, n } => {
match id { match id {
Some(id) => { Some(id) => {

295
codegen/src/model.rs Normal file
View file

@ -0,0 +1,295 @@
use std::collections::BTreeMap;
use rustypipe::{client::YTContext, model::AlbumType, param::Language};
use serde::{Deserialize, Serialize};
use serde_with::{serde_as, DefaultOnError, VecSkipError};
#[derive(Debug, Default, Serialize, Deserialize)]
#[serde(default)]
pub struct DictEntry {
/// List of languages that should be treated equally (e.g. EnUs/EnGb/EnIn)
pub equivalent: Vec<Language>,
/// Should the language be parsed by character instead of by word?
/// (e.g. Chinese/Japanese)
pub by_char: bool,
/// Tokens for parsing timeago strings.
///
/// Format: Parsed token -> \[Quantity\] Identifier
///
/// Identifiers: `Y`(ear), `M`(month), `W`(eek), `D`(ay),
/// `h`(our), `m`(inute), `s`(econd)
pub timeago_tokens: BTreeMap<String, String>,
/// Order in which to parse numeric date components. Formatted as
/// a string of date identifiers (Y, M, D).
///
/// Examples:
///
/// - 03.01.2020 => `"DMY"`
/// - Jan 3, 2020 => `"DY"`
pub date_order: String,
/// Tokens for parsing month names.
///
/// Format: Parsed token -> Month number (starting from 1)
pub months: BTreeMap<String, u8>,
/// Tokens for parsing date strings with no digits (e.g. Today, Tomorrow)
///
/// Format: Parsed token -> \[Quantity\] Identifier
pub timeago_nd_tokens: BTreeMap<String, String>,
/// Are commas (instead of points) used as decimal separators?
pub comma_decimal: bool,
/// Tokens for parsing decimal prefixes (K, M, B, ...)
///
/// Format: Parsed token -> decimal power
pub number_tokens: BTreeMap<String, u8>,
/// Tokens for parsing number strings with no digits (e.g. "No videos")
///
/// Format: Parsed token -> value
pub number_nd_tokens: BTreeMap<String, u8>,
/// Names of album types (Album, Single, ...)
///
/// Format: Parsed text -> Album type
pub album_types: BTreeMap<String, AlbumType>,
}
/// Parsed TimeAgo string, contains amount and time unit.
///
/// Example: "14 hours ago" => `TimeAgo {n: 14, unit: TimeUnit::Hour}`
#[derive(Debug, Copy, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct TimeAgo {
/// Number of time units
pub n: u8,
/// Time unit
pub unit: TimeUnit,
}
impl ToString for TimeAgo {
fn to_string(&self) -> String {
if self.n > 1 {
format!("{}{}", self.n, self.unit.as_str())
} else {
self.unit.as_str().to_owned()
}
}
}
/// Parsed time unit
#[derive(Debug, Copy, Clone, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[serde(rename_all = "lowercase")]
pub enum TimeUnit {
Second,
Minute,
Hour,
Day,
Week,
Month,
Year,
}
impl TimeUnit {
pub fn as_str(&self) -> &str {
match self {
TimeUnit::Second => "s",
TimeUnit::Minute => "m",
TimeUnit::Hour => "h",
TimeUnit::Day => "D",
TimeUnit::Week => "W",
TimeUnit::Month => "M",
TimeUnit::Year => "Y",
}
}
}
#[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct QBrowse<'a> {
pub context: YTContext<'a>,
pub browse_id: &'a str,
#[serde(skip_serializing_if = "Option::is_none")]
pub params: Option<&'a str>,
}
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
pub struct QCont<'a> {
pub context: YTContext<'a>,
pub continuation: &'a str,
}
#[derive(Clone, Debug, Deserialize)]
pub struct TextRuns {
pub runs: Vec<Text>,
}
#[derive(Clone, Debug, Deserialize)]
pub struct Text {
#[serde(alias = "simpleText")]
pub text: String,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Channel {
pub contents: Contents,
pub header: ChannelHeader,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ChannelHeader {
pub c4_tabbed_header_renderer: HeaderRenderer,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct HeaderRenderer {
pub subscriber_count_text: Text,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Contents {
pub two_column_browse_results_renderer: TabsRenderer,
}
#[serde_as]
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct TabsRenderer {
#[serde_as(as = "VecSkipError<_>")]
pub tabs: Vec<TabRendererWrap>,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct TabRendererWrap {
pub tab_renderer: TabRenderer,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct TabRenderer {
pub content: RichGridRendererWrap,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RichGridRendererWrap {
pub rich_grid_renderer: RichGridRenderer,
}
#[serde_as]
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RichGridRenderer {
#[serde_as(as = "VecSkipError<_>")]
pub contents: Vec<RichItemRendererWrap>,
#[serde(default)]
#[serde_as(as = "DefaultOnError")]
pub header: Option<RichGridHeader>,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RichItemRendererWrap {
pub rich_item_renderer: RichItemRenderer,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RichItemRenderer {
pub content: VideoRendererWrap,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct VideoRendererWrap {
pub video_renderer: VideoRenderer,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct VideoRenderer {
/// `24,194 views`
pub view_count_text: Text,
/// `19K views`
pub short_view_count_text: Text,
pub length_text: LengthText,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct LengthText {
/// `18 minutes, 26 seconds`
pub accessibility: Accessibility,
/// `18:26`
pub simple_text: String,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Accessibility {
pub accessibility_data: AccessibilityData,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct AccessibilityData {
pub label: String,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RichGridHeader {
pub feed_filter_chip_bar_renderer: ChipBar,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ChipBar {
pub contents: Vec<Chip>,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Chip {
pub chip_cloud_chip_renderer: ChipRenderer,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ChipRenderer {
pub navigation_endpoint: NavigationEndpoint,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct NavigationEndpoint {
pub continuation_command: ContinuationCommand,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ContinuationCommand {
pub token: String,
}
#[serde_as]
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ContinuationResponse {
pub on_response_received_actions: Vec<ContinuationAction>,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ContinuationAction {
pub reload_continuation_items_command: ContinuationItemsWrap,
}
#[serde_as]
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ContinuationItemsWrap {
#[serde_as(as = "VecSkipError<_>")]
pub continuation_items: Vec<RichItemRendererWrap>,
}

View file

@ -1,84 +1,75 @@
use std::{ use std::{collections::BTreeMap, fs::File, io::BufReader, path::PathBuf, str::FromStr};
collections::BTreeMap,
fs::File,
io::BufReader,
path::{Path, PathBuf},
str::FromStr,
};
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use path_macro::path; use path_macro::path;
use rustypipe::{model::AlbumType, param::Language}; use regex::Regex;
use rustypipe::param::Language;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
static DICT_PATH: Lazy<PathBuf> = Lazy::new(|| path!("testfiles" / "dict" / "dictionary.json")); use crate::model::DictEntry;
/// Get the path of the `testfiles` directory
pub static TESTFILES_DIR: Lazy<PathBuf> = Lazy::new(|| {
path!(env!("CARGO_MANIFEST_DIR") / ".." / "testfiles")
.canonicalize()
.unwrap()
});
/// Get the path of the `dict` directory
pub static DICT_DIR: Lazy<PathBuf> = Lazy::new(|| path!(*TESTFILES_DIR / "dict"));
/// Get the path of the `src` directory
pub static SRC_DIR: Lazy<PathBuf> = Lazy::new(|| path!(env!("CARGO_MANIFEST_DIR") / ".." / "src"));
type Dictionary = BTreeMap<Language, DictEntry>; type Dictionary = BTreeMap<Language, DictEntry>;
type DictionaryOverride = BTreeMap<Language, DictOverrideEntry>;
#[derive(Debug, Default, Serialize, Deserialize)] #[derive(Debug, Default, Serialize, Deserialize)]
#[serde(default)] #[serde(default)]
pub struct DictEntry { struct DictOverrideEntry {
/// List of languages that should be treated equally (e.g. EnUs/EnGb/EnIn) number_tokens: BTreeMap<String, Option<u8>>,
pub equivalent: Vec<Language>, number_nd_tokens: BTreeMap<String, Option<u8>>,
/// Should the language be parsed by character instead of by word?
/// (e.g. Chinese/Japanese)
pub by_char: bool,
/// Tokens for parsing timeago strings.
///
/// Format: Parsed token -> \[Quantity\] Identifier
///
/// Identifiers: `Y`(ear), `M`(month), `W`(eek), `D`(ay),
/// `h`(our), `m`(inute), `s`(econd)
pub timeago_tokens: BTreeMap<String, String>,
/// Order in which to parse numeric date components. Formatted as
/// a string of date identifiers (Y, M, D).
///
/// Examples:
///
/// - 03.01.2020 => `"DMY"`
/// - Jan 3, 2020 => `"DY"`
pub date_order: String,
/// Tokens for parsing month names.
///
/// Format: Parsed token -> Month number (starting from 1)
pub months: BTreeMap<String, u8>,
/// Tokens for parsing date strings with no digits (e.g. Today, Tomorrow)
///
/// Format: Parsed token -> \[Quantity\] Identifier
pub timeago_nd_tokens: BTreeMap<String, String>,
/// Are commas (instead of points) used as decimal separators?
pub comma_decimal: bool,
/// Tokens for parsing decimal prefixes (K, M, B, ...)
///
/// Format: Parsed token -> decimal power
pub number_tokens: BTreeMap<String, u8>,
/// Names of album types (Album, Single, ...)
///
/// Format: Parsed text -> Album type
pub album_types: BTreeMap<String, AlbumType>,
} }
#[derive(Clone, Debug, Deserialize)] pub fn read_dict() -> Dictionary {
pub struct TextRuns { let json_path = path!(*DICT_DIR / "dictionary.json");
pub runs: Vec<Text>,
}
#[derive(Clone, Debug, Deserialize)]
pub struct Text {
#[serde(alias = "simpleText")]
pub text: String,
}
pub fn read_dict(project_root: &Path) -> Dictionary {
let json_path = path!(project_root / *DICT_PATH);
let json_file = File::open(json_path).unwrap(); let json_file = File::open(json_path).unwrap();
serde_json::from_reader(BufReader::new(json_file)).unwrap() serde_json::from_reader(BufReader::new(json_file)).unwrap()
} }
pub fn write_dict(project_root: &Path, dict: &Dictionary) { fn read_dict_override() -> DictionaryOverride {
let json_path = path!(project_root / *DICT_PATH); let json_path = path!(*DICT_DIR / "dictionary_override.json");
let json_file = File::open(json_path).unwrap();
serde_json::from_reader(BufReader::new(json_file)).unwrap()
}
pub fn write_dict(dict: Dictionary) {
let dict_override = read_dict_override();
let json_path = path!(*DICT_DIR / "dictionary.json");
let json_file = File::create(json_path).unwrap(); let json_file = File::create(json_path).unwrap();
serde_json::to_writer_pretty(json_file, dict).unwrap();
fn apply_map<K: Clone + Ord, V: Clone>(map: &mut BTreeMap<K, V>, or: &BTreeMap<K, Option<V>>) {
or.iter().for_each(|(key, val)| match val {
Some(val) => {
map.insert(key.clone(), val.clone());
}
None => {
map.remove(key);
}
});
}
let dict: Dictionary = dict
.into_iter()
.map(|(lang, mut entry)| {
if let Some(or) = dict_override.get(&lang) {
apply_map(&mut entry.number_tokens, &or.number_tokens);
apply_map(&mut entry.number_nd_tokens, &or.number_nd_tokens);
}
(lang, entry)
})
.collect();
serde_json::to_writer_pretty(json_file, &dict).unwrap();
} }
pub fn filter_datestr(string: &str) -> String { pub fn filter_datestr(string: &str) -> String {
@ -100,7 +91,20 @@ pub fn filter_datestr(string: &str) -> String {
pub fn filter_largenumstr(string: &str) -> String { pub fn filter_largenumstr(string: &str) -> String {
string string
.chars() .chars()
.filter(|c| !matches!(c, '\u{200b}' | '.' | ',') && !c.is_ascii_digit()) .filter(|c| {
!matches!(
c,
'\u{200b}'
| '\u{202b}'
| '\u{202c}'
| '\u{202e}'
| '\u{200e}'
| '\u{200f}'
| '.'
| ','
) && !c.is_ascii_digit()
})
.flat_map(char::to_lowercase)
.collect() .collect()
} }
@ -140,3 +144,63 @@ where
numbers numbers
} }
pub fn parse_largenum_en(string: &str) -> Option<u64> {
let (num, mut exp, filtered) = {
let mut buf = String::new();
let mut filtered = String::new();
let mut exp = 0;
let mut after_point = false;
for c in string.chars() {
if c.is_ascii_digit() {
buf.push(c);
if after_point {
exp -= 1;
}
} else if c == '.' {
after_point = true;
} else if !matches!(c, '\u{200b}' | '.' | ',') {
filtered.push(c);
}
}
(buf.parse::<u64>().ok()?, exp, filtered)
};
let lookup_token = |token: &str| match token {
"K" => Some(3),
"M" => Some(6),
"B" => Some(9),
_ => None,
};
exp += filtered
.split_whitespace()
.filter_map(lookup_token)
.sum::<i32>();
num.checked_mul((10_u64).checked_pow(exp.try_into().ok()?)?)
}
/// Parse textual video length (e.g. `0:49`, `2:02` or `1:48:18`)
/// and return the duration in seconds.
pub fn parse_video_length(text: &str) -> Option<u32> {
static VIDEO_LENGTH_REGEX: Lazy<Regex> =
Lazy::new(|| Regex::new(r#"(?:(\d+)[:.])?(\d{1,2})[:.](\d{2})"#).unwrap());
VIDEO_LENGTH_REGEX.captures(text).map(|cap| {
let hrs = cap
.get(1)
.and_then(|x| x.as_str().parse::<u32>().ok())
.unwrap_or_default();
let min = cap
.get(2)
.and_then(|x| x.as_str().parse::<u32>().ok())
.unwrap_or_default();
let sec = cap
.get(3)
.and_then(|x| x.as_str().parse::<u32>().ok())
.unwrap_or_default();
hrs * 3600 + min * 60 + sec
})
}

34
notes/dictionary.md Normal file
View file

@ -0,0 +1,34 @@
# Parsing localized data from YouTube
Since YouTube's API is outputting the website as it should be rendered by the client,
the data received from the API is already localized. This affects dates, times and
number formats.
To be able to successfully parse them, we need to collect samples in every language and
build a dictionary.
### Timeago
- Relative date format used for video upload dates and comments.
- Examples: "1 hour ago", "3 months ago"
### Playlist dates
- Playlist update dates are always day-accurate, either as textual dates or in the form
of "n days ago"
- Examples: "Last updated on Jan 3, 2020", "Updated today", "Updated yesterday",
"Updated 3 days ago"
### Video duration
- In Danisch ("da") video durations are formatted using dots instead of colons. Example:
"12.31", "3.03.52"
### Numbers
- Large numbers (subscriber/view counts) are rounded and shown using a decimal prefix
- Examples: "1.4M views"
- There is an exception for the value 0 ("no views") and in some languages for the value
1 (pt: "Um vídeo")
- Special case: Language "gu", "જોવાયાની સંખ્યા" = "no views", contains no unique tokens
to parse

View file

@ -179,8 +179,11 @@ impl MapResponse<Channel<Paginator<VideoItem>>> for response::Channel {
lang, lang,
)?; )?;
let mut mapper = let mut mapper = response::YouTubeListMapper::<VideoItem>::with_channel(
response::YouTubeListMapper::<VideoItem>::with_channel(lang, &channel_data); lang,
&channel_data.c,
channel_data.warnings,
);
mapper.map_response(content.content); mapper.map_response(content.content);
let p = Paginator::new_ext( let p = Paginator::new_ext(
None, None,
@ -191,7 +194,7 @@ impl MapResponse<Channel<Paginator<VideoItem>>> for response::Channel {
); );
Ok(MapResult { Ok(MapResult {
c: combine_channel_data(channel_data, p), c: combine_channel_data(channel_data.c, p),
warnings: mapper.warnings, warnings: mapper.warnings,
}) })
} }
@ -219,13 +222,16 @@ impl MapResponse<Channel<Paginator<PlaylistItem>>> for response::Channel {
lang, lang,
)?; )?;
let mut mapper = let mut mapper = response::YouTubeListMapper::<PlaylistItem>::with_channel(
response::YouTubeListMapper::<PlaylistItem>::with_channel(lang, &channel_data); lang,
&channel_data.c,
channel_data.warnings,
);
mapper.map_response(content.content); mapper.map_response(content.content);
let p = Paginator::new(None, mapper.items, mapper.ctoken); let p = Paginator::new(None, mapper.items, mapper.ctoken);
Ok(MapResult { Ok(MapResult {
c: combine_channel_data(channel_data, p), c: combine_channel_data(channel_data.c, p),
warnings: mapper.warnings, warnings: mapper.warnings,
}) })
} }
@ -266,7 +272,7 @@ impl MapResponse<Channel<ChannelInfo>> for response::Channel {
}); });
Ok(MapResult { Ok(MapResult {
c: combine_channel_data(channel_data, cinfo), c: combine_channel_data(channel_data.c, cinfo),
warnings, warnings,
}) })
} }
@ -297,7 +303,7 @@ fn map_channel(
d: MapChannelData, d: MapChannelData,
id: &str, id: &str,
lang: Language, lang: Language,
) -> Result<Channel<()>, ExtractionError> { ) -> Result<MapResult<Channel<()>>, ExtractionError> {
let header = d let header = d
.header .header
.ok_or(ExtractionError::ContentUnavailable(Cow::Borrowed( .ok_or(ExtractionError::ContentUnavailable(Cow::Borrowed(
@ -326,14 +332,16 @@ fn map_channel(
.vanity_channel_url .vanity_channel_url
.as_ref() .as_ref()
.and_then(|url| map_vanity_url(url, id)); .and_then(|url| map_vanity_url(url, id));
let mut warnings = Vec::new();
Ok(match header { Ok(MapResult {
c: match header {
response::channel::Header::C4TabbedHeaderRenderer(header) => Channel { response::channel::Header::C4TabbedHeaderRenderer(header) => Channel {
id: metadata.external_id, id: metadata.external_id,
name: metadata.title, name: metadata.title,
subscriber_count: header subscriber_count: header
.subscriber_count_text .subscriber_count_text
.and_then(|txt| util::parse_large_numstr(&txt, lang)), .and_then(|txt| util::parse_large_numstr_or_warn(&txt, lang, &mut warnings)),
avatar: header.avatar.into(), avatar: header.avatar.into(),
verification: header.badges.into(), verification: header.badges.into(),
description: metadata.description, description: metadata.description,
@ -367,10 +375,9 @@ fn map_channel(
id: metadata.external_id, id: metadata.external_id,
name: metadata.title, name: metadata.title,
subscriber_count: hdata.as_ref().and_then(|hdata| { subscriber_count: hdata.as_ref().and_then(|hdata| {
hdata hdata.0.as_ref().and_then(|txt| {
.0 util::parse_large_numstr_or_warn(txt, lang, &mut warnings)
.as_ref() })
.and_then(|txt| util::parse_large_numstr(txt, lang))
}), }),
avatar: hdata.map(|hdata| hdata.1.into()).unwrap_or_default(), avatar: hdata.map(|hdata| hdata.1.into()).unwrap_or_default(),
verification: crate::model::Verification::Verified, verification: crate::model::Verification::Verified,
@ -386,6 +393,8 @@ fn map_channel(
content: (), content: (),
} }
} }
},
warnings,
}) })
} }
@ -401,7 +410,7 @@ fn map_channel_content(
) -> Result<MappedChannelContent, ExtractionError> { ) -> Result<MappedChannelContent, ExtractionError> {
match contents { match contents {
Some(contents) => { Some(contents) => {
let tabs = contents.two_column_browse_results_renderer.tabs; let tabs = contents.two_column_browse_results_renderer.contents;
if tabs.is_empty() { if tabs.is_empty() {
return Err(ExtractionError::ContentUnavailable( return Err(ExtractionError::ContentUnavailable(
"channel not found".into(), "channel not found".into(),

View file

@ -269,7 +269,7 @@ fn map_artist_page(
} }
} }
let mapped = mapper.group_items(); let mut mapped = mapper.group_items();
static WIKIPEDIA_REGEX: Lazy<Regex> = static WIKIPEDIA_REGEX: Lazy<Regex> =
Lazy::new(|| Regex::new(r"\(?https://[a-z\d-]+\.wikipedia.org/wiki/[^\s]+").unwrap()); Lazy::new(|| Regex::new(r"\(?https://[a-z\d-]+\.wikipedia.org/wiki/[^\s]+").unwrap());
@ -302,9 +302,10 @@ fn map_artist_page(
description: header.description, description: header.description,
wikipedia_url, wikipedia_url,
subscriber_count: header.subscription_button.and_then(|btn| { subscriber_count: header.subscription_button.and_then(|btn| {
util::parse_large_numstr( util::parse_large_numstr_or_warn(
&btn.subscribe_button_renderer.subscriber_count_text, &btn.subscribe_button_renderer.subscriber_count_text,
lang, lang,
&mut mapped.warnings,
) )
}), }),
tracks: mapped.c.tracks, tracks: mapped.c.tracks,

View file

@ -207,22 +207,25 @@ impl MapResponse<TrackDetails> for response::MusicDetails {
response::music_item::PlaylistPanelVideo::None => None, response::music_item::PlaylistPanelVideo::None => None,
}) })
.ok_or(ExtractionError::InvalidData(Cow::Borrowed("no video item")))?; .ok_or(ExtractionError::InvalidData(Cow::Borrowed("no video item")))?;
let track = map_queue_item(track_item, lang); let mut track = map_queue_item(track_item, lang);
if track.id != id { if track.c.id != id {
return Err(ExtractionError::WrongResult(format!( return Err(ExtractionError::WrongResult(format!(
"got wrong video id {}, expected {}", "got wrong video id {}, expected {}",
track.id, id track.c.id, id
))); )));
} }
let mut warnings = content.contents.warnings;
warnings.append(&mut track.warnings);
Ok(MapResult { Ok(MapResult {
c: TrackDetails { c: TrackDetails {
track, track: track.c,
lyrics_id, lyrics_id,
related_id, related_id,
}, },
warnings: content.contents.warnings, warnings,
}) })
} }
} }
@ -251,13 +254,17 @@ impl MapResponse<Paginator<TrackItem>> for response::MusicDetails {
.content .content
.playlist_panel_renderer; .playlist_panel_renderer;
let mut warnings = content.contents.warnings;
let tracks = content let tracks = content
.contents .contents
.c .c
.into_iter() .into_iter()
.filter_map(|item| match item { .filter_map(|item| match item {
response::music_item::PlaylistPanelVideo::PlaylistPanelVideoRenderer(item) => { response::music_item::PlaylistPanelVideo::PlaylistPanelVideoRenderer(item) => {
Some(map_queue_item(item, lang)) let mut track = map_queue_item(item, lang);
warnings.append(&mut track.warnings);
Some(track.c)
} }
response::music_item::PlaylistPanelVideo::None => None, response::music_item::PlaylistPanelVideo::None => None,
}) })
@ -277,7 +284,7 @@ impl MapResponse<Paginator<TrackItem>> for response::MusicDetails {
None, None,
crate::model::paginator::ContinuationEndpoint::MusicNext, crate::model::paginator::ContinuationEndpoint::MusicNext,
), ),
warnings: content.contents.warnings, warnings,
}) })
} }
} }

View file

@ -81,7 +81,7 @@ impl MapResponse<Vec<MusicGenreItem>> for response::MusicGenres {
let genres = content_iter let genres = content_iter
.enumerate() .enumerate()
.flat_map(|(i, grid)| { .flat_map(|(i, grid)| {
let mut grid = grid.grid_renderer.items; let mut grid = grid.grid_renderer.contents;
warnings.append(&mut grid.warnings); warnings.append(&mut grid.warnings);
grid.c.into_iter().filter_map(move |section| match section { grid.c.into_iter().filter_map(move |section| match section {
response::music_genres::NavigationButton::MusicNavigationButtonRenderer( response::music_genres::NavigationButton::MusicNavigationButtonRenderer(

View file

@ -4,7 +4,7 @@ use crate::{
error::{Error, ExtractionError}, error::{Error, ExtractionError},
model::{paginator::Paginator, AlbumId, ChannelId, MusicAlbum, MusicPlaylist, TrackItem}, model::{paginator::Paginator, AlbumId, ChannelId, MusicAlbum, MusicPlaylist, TrackItem},
serializer::MapResult, serializer::MapResult,
util::{self, TryRemove}, util::{self, TryRemove, DOT_SEPARATOR},
}; };
use super::{ use super::{
@ -160,14 +160,19 @@ impl MapResponse<MusicPlaylist> for response::MusicPlaylist {
.try_swap_remove(0) .try_swap_remove(0)
.map(|cont| cont.next_continuation_data.continuation); .map(|cont| cont.next_continuation_data.continuation);
let track_count = match ctoken { let track_count = if ctoken.is_some() {
Some(_) => self.header.as_ref().and_then(|h| { self.header.as_ref().and_then(|h| {
h.music_detail_header_renderer let parts = h
.music_detail_header_renderer
.second_subtitle .second_subtitle
.first() .split(|p| p == DOT_SEPARATOR)
.and_then(|txt| util::parse_numeric::<u64>(txt).ok()) .collect::<Vec<_>>();
}), parts
None => Some(map_res.c.len() as u64), .get(if parts.len() > 2 { 1 } else { 0 })
.and_then(|txt| util::parse_numeric::<u64>(&txt[0]).ok())
})
} else {
Some(map_res.c.len() as u64)
}; };
let related_ctoken = music_contents let related_ctoken = music_contents
@ -179,11 +184,7 @@ impl MapResponse<MusicPlaylist> for response::MusicPlaylist {
Some(header) => { Some(header) => {
let h = header.music_detail_header_renderer; let h = header.music_detail_header_renderer;
let from_ytm = h let from_ytm = h.subtitle.0.iter().any(util::is_ytm);
.subtitle
.0
.iter()
.any(|c| c.as_str() == util::YT_MUSIC_NAME);
let channel = h let channel = h
.subtitle .subtitle
.0 .0

View file

@ -157,7 +157,9 @@ impl MapResponse<Paginator<MusicItem>> for response::MusicContinuation {
mapper.add_warnings(&mut panel.contents.warnings); mapper.add_warnings(&mut panel.contents.warnings);
panel.contents.c.into_iter().for_each(|item| { panel.contents.c.into_iter().for_each(|item| {
if let PlaylistPanelVideo::PlaylistPanelVideoRenderer(item) = item { if let PlaylistPanelVideo::PlaylistPanelVideoRenderer(item) = item {
mapper.add_item(MusicItem::Track(map_queue_item(item, lang))) let mut track = map_queue_item(item, lang);
mapper.add_item(MusicItem::Track(track.c));
mapper.add_warnings(&mut track.warnings);
} }
}); });
} }

View file

@ -376,33 +376,24 @@ fn map_url(
deobf: &Deobfuscator, deobf: &Deobfuscator,
last_nsig: &mut [String; 2], last_nsig: &mut [String; 2],
) -> MapResult<Option<(String, bool)>> { ) -> MapResult<Option<(String, bool)>> {
let (url_base, mut url_params) = match url { let x = match url {
Some(url) => ok_or_bail!( Some(url) => util::url_to_params(url).map_err(|_| format!("Could not parse url `{url}`")),
util::url_to_params(url),
MapResult {
c: None,
warnings: vec![format!("Could not parse url `{url}`")]
}
),
None => match signature_cipher { None => match signature_cipher {
Some(signature_cipher) => match cipher_to_url_params(signature_cipher, deobf) { Some(signature_cipher) => cipher_to_url_params(signature_cipher, deobf).map_err(|e| {
Ok(res) => res, format!("Could not deobfuscate signatureCipher `{signature_cipher}`: {e}")
}),
None => Err("stream contained neither url or cipher".to_owned()),
},
};
let (url_base, mut url_params) = match x {
Ok(x) => x,
Err(e) => { Err(e) => {
return MapResult { return MapResult {
c: None, c: None,
warnings: vec![format!( warnings: vec![e],
"Could not deobfuscate signatureCipher `{signature_cipher}`: {e}"
)],
};
}
},
None => {
return MapResult {
c: None,
warnings: vec!["stream contained neither url nor cipher".to_owned()],
} }
} }
},
}; };
let mut warnings = vec![]; let mut warnings = vec![];
@ -414,21 +405,17 @@ fn map_url(
throttled = true; throttled = true;
}); });
MapResult { match Url::parse_with_params(url_base.as_str(), url_params.iter()) {
c: Some(( Ok(url) => MapResult {
ok_or_bail!( c: Some((url.to_string(), throttled)),
Url::parse_with_params(url_base.as_str(), url_params.iter()), warnings,
MapResult { },
Err(_) => MapResult {
c: None, c: None,
warnings: vec![format!( warnings: vec![format!(
"url could not be joined. url: `{url_base}` params: {url_params:?}" "url could not be joined. url: `{url_base}` params: {url_params:?}"
)], )],
} },
)
.to_string(),
throttled,
)),
warnings,
} }
} }
@ -437,16 +424,27 @@ fn map_video_stream(
deobf: &Deobfuscator, deobf: &Deobfuscator,
last_nsig: &mut [String; 2], last_nsig: &mut [String; 2],
) -> MapResult<Option<VideoStream>> { ) -> MapResult<Option<VideoStream>> {
let (mtype, codecs) = some_or_bail!( let (mtype, codecs) = match parse_mime(&f.mime_type) {
parse_mime(&f.mime_type), Some(x) => x,
MapResult { None => {
return MapResult {
c: None, c: None,
warnings: vec![format!( warnings: vec![format!(
"Invalid mime type `{}` in video format {:?}", "Invalid mime type `{}` in video format {:?}",
&f.mime_type, &f &f.mime_type, &f
)] )],
} }
); }
};
let format = match get_video_format(mtype) {
Some(f) => f,
None => {
return MapResult {
c: None,
warnings: vec![format!("invalid video format. itag: {}", f.itag)],
}
}
};
let map_res = map_url(&f.url, &f.signature_cipher, deobf, last_nsig); let map_res = map_url(&f.url, &f.signature_cipher, deobf, last_nsig);
match map_res.c { match map_res.c {
@ -469,13 +467,7 @@ fn map_video_stream(
hdr: f.color_info.unwrap_or_default().primaries hdr: f.color_info.unwrap_or_default().primaries
== player::Primaries::ColorPrimariesBt2020, == player::Primaries::ColorPrimariesBt2020,
mime: f.mime_type.to_owned(), mime: f.mime_type.to_owned(),
format: some_or_bail!( format,
get_video_format(mtype),
MapResult {
c: None,
warnings: vec![format!("invalid video format. itag: {}", f.itag)]
}
),
codec: get_video_codec(codecs), codec: get_video_codec(codecs),
throttled, throttled,
}), }),
@ -495,16 +487,27 @@ fn map_audio_stream(
) -> MapResult<Option<AudioStream>> { ) -> MapResult<Option<AudioStream>> {
static LANG_PATTERN: Lazy<Regex> = Lazy::new(|| Regex::new(r#"^([a-z]{2,3})\."#).unwrap()); static LANG_PATTERN: Lazy<Regex> = Lazy::new(|| Regex::new(r#"^([a-z]{2,3})\."#).unwrap());
let (mtype, codecs) = some_or_bail!( let (mtype, codecs) = match parse_mime(&f.mime_type) {
parse_mime(&f.mime_type), Some(x) => x,
MapResult { None => {
return MapResult {
c: None, c: None,
warnings: vec![format!( warnings: vec![format!(
"Invalid mime type `{}` in video format {:?}", "Invalid mime type `{}` in video format {:?}",
&f.mime_type, &f &f.mime_type, &f
)] )],
} }
); }
};
let format = match get_audio_format(mtype) {
Some(f) => f,
None => {
return MapResult {
c: None,
warnings: vec![format!("invalid audio format. itag: {}", f.itag)],
}
}
};
let map_res = map_url(&f.url, &f.signature_cipher, deobf, last_nsig); let map_res = map_url(&f.url, &f.signature_cipher, deobf, last_nsig);
match map_res.c { match map_res.c {
@ -519,13 +522,7 @@ fn map_audio_stream(
init_range: f.init_range, init_range: f.init_range,
duration_ms: f.approx_duration_ms, duration_ms: f.approx_duration_ms,
mime: f.mime_type.to_owned(), mime: f.mime_type.to_owned(),
format: some_or_bail!( format,
get_audio_format(mtype),
MapResult {
c: None,
warnings: vec![format!("invalid audio format. itag: {}", f.itag)]
}
),
codec: get_audio_codec(codecs), codec: get_audio_codec(codecs),
channels: f.audio_channels, channels: f.audio_channels,
loudness_db: f.loudness_db, loudness_db: f.loudness_db,
@ -559,7 +556,7 @@ fn parse_mime(mime: &str) -> Option<(&str, Vec<&str>)> {
static PATTERN: Lazy<Regex> = static PATTERN: Lazy<Regex> =
Lazy::new(|| Regex::new(r#"(\w+/\w+);\scodecs="([a-zA-Z-0-9.,\s]*)""#).unwrap()); Lazy::new(|| Regex::new(r#"(\w+/\w+);\scodecs="([a-zA-Z-0-9.,\s]*)""#).unwrap());
let captures = some_or_bail!(PATTERN.captures(mime), None); let captures = PATTERN.captures(mime)?;
Some(( Some((
captures.get(1).unwrap().as_str(), captures.get(1).unwrap().as_str(),
captures captures

View file

@ -5,8 +5,7 @@ use time::OffsetDateTime;
use crate::{ use crate::{
error::{Error, ExtractionError}, error::{Error, ExtractionError},
model::{paginator::Paginator, ChannelId, Playlist, PlaylistVideo}, model::{paginator::Paginator, ChannelId, Playlist, PlaylistVideo},
timeago, util::{self, timeago, TryRemove},
util::{self, TryRemove},
}; };
use super::{response, ClientType, MapResponse, MapResult, QBrowse, QContinuation, RustyPipeQuery}; use super::{response, ClientType, MapResponse, MapResult, QBrowse, QContinuation, RustyPipeQuery};
@ -94,7 +93,7 @@ impl MapResponse<Playlist> for response::Playlist {
let (thumbnails, last_update_txt) = match self.sidebar { let (thumbnails, last_update_txt) = match self.sidebar {
Some(sidebar) => { Some(sidebar) => {
let mut sidebar_items = sidebar.playlist_sidebar_renderer.items; let mut sidebar_items = sidebar.playlist_sidebar_renderer.contents;
let mut primary = let mut primary =
sidebar_items sidebar_items
.try_swap_remove(0) .try_swap_remove(0)

View file

@ -3,7 +3,7 @@ use serde_with::{rust::deserialize_ignore_any, serde_as, DefaultOnError, VecSkip
use super::{ use super::{
video_item::YouTubeListRenderer, Alert, ChannelBadge, ContentsRenderer, ResponseContext, video_item::YouTubeListRenderer, Alert, ChannelBadge, ContentsRenderer, ResponseContext,
Thumbnails, Thumbnails, TwoColumnBrowseResults,
}; };
use crate::serializer::text::Text; use crate::serializer::text::Text;
@ -22,21 +22,7 @@ pub(crate) struct Channel {
pub response_context: ResponseContext, pub response_context: ResponseContext,
} }
#[derive(Debug, Deserialize)] pub(crate) type Contents = TwoColumnBrowseResults<TabRendererWrap>;
#[serde(rename_all = "camelCase")]
pub(crate) struct Contents {
pub two_column_browse_results_renderer: TabsRenderer,
}
/// YouTube channel tab view. Contains multiple tabs
/// (Home, Videos, Playlists, About...). We can ignore unknown tabs.
#[serde_as]
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct TabsRenderer {
#[serde_as(as = "VecSkipError<_>")]
pub tabs: Vec<TabRendererWrap>,
}
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]

View file

@ -47,12 +47,17 @@ pub(crate) mod channel_rss;
#[cfg(feature = "rss")] #[cfg(feature = "rss")]
pub(crate) use channel_rss::ChannelRss; pub(crate) use channel_rss::ChannelRss;
use serde::Deserialize; use std::borrow::Cow;
use std::marker::PhantomData;
use serde::{
de::{IgnoredAny, Visitor},
Deserialize,
};
use serde_with::{json::JsonString, serde_as, VecSkipError}; use serde_with::{json::JsonString, serde_as, VecSkipError};
use crate::error::ExtractionError; use crate::error::ExtractionError;
use crate::serializer::MapResult; use crate::serializer::{text::Text, MapResult, VecSkipErrorWrap};
use crate::serializer::{text::Text, VecLogError};
use self::video_item::YouTubeListRenderer; use self::video_item::YouTubeListRenderer;
@ -62,13 +67,17 @@ pub(crate) struct ContentRenderer<T> {
pub content: T, pub content: T,
} }
#[derive(Debug, Deserialize)] #[derive(Debug)]
#[serde(rename_all = "camelCase")]
pub(crate) struct ContentsRenderer<T> { pub(crate) struct ContentsRenderer<T> {
#[serde(alias = "tabs")]
pub contents: Vec<T>, pub contents: Vec<T>,
} }
#[derive(Debug, Deserialize)]
pub(crate) struct ContentsRendererLogged<T> {
#[serde(alias = "items")]
pub contents: MapResult<Vec<T>>,
}
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub(crate) struct Tab<T> { pub(crate) struct Tab<T> {
@ -81,6 +90,12 @@ pub(crate) struct SectionList<T> {
pub section_list_renderer: ContentsRenderer<T>, pub section_list_renderer: ContentsRenderer<T>,
} }
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct TwoColumnBrowseResults<T> {
pub two_column_browse_results_renderer: ContentsRenderer<T>,
}
#[derive(Default, Debug, Deserialize)] #[derive(Default, Debug, Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub(crate) struct ThumbnailsWrap { pub(crate) struct ThumbnailsWrap {
@ -207,11 +222,9 @@ pub(crate) struct ContinuationActionWrap {
pub append_continuation_items_action: ContinuationAction, pub append_continuation_items_action: ContinuationAction,
} }
#[serde_as]
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub(crate) struct ContinuationAction { pub(crate) struct ContinuationAction {
#[serde_as(as = "VecLogError<_>")]
pub continuation_items: MapResult<Vec<YouTubeListItem>>, pub continuation_items: MapResult<Vec<YouTubeListItem>>,
} }
@ -248,9 +261,53 @@ pub(crate) struct ErrorResponseContent {
pub message: String, pub message: String,
} }
/* // DESERIALIZER
#MAPPING
*/ impl<'de, T> Deserialize<'de> for ContentsRenderer<T>
where
T: Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
struct ItemVisitor<T>(PhantomData<T>);
impl<'de, T> Visitor<'de> for ItemVisitor<T>
where
T: Deserialize<'de>,
{
type Value = ContentsRenderer<T>;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("map")
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error>
where
A: serde::de::MapAccess<'de>,
{
let mut contents = None;
while let Some(k) = map.next_key::<Cow<'de, str>>()? {
if k == "contents" || k == "tabs" || k == "items" {
contents = Some(ContentsRenderer {
contents: map.next_value::<VecSkipErrorWrap<T>>()?.0,
});
} else {
map.next_value::<IgnoredAny>()?;
}
}
contents.ok_or(serde::de::Error::missing_field("contents"))
}
}
deserializer.deserialize_map(ItemVisitor(PhantomData::<T>))
}
}
// MAPPING
impl From<Thumbnail> for crate::model::Thumbnail { impl From<Thumbnail> for crate::model::Thumbnail {
fn from(tn: Thumbnail) -> Self { fn from(tn: Thumbnail) -> Self {

View file

@ -1,12 +1,12 @@
use serde::Deserialize; use serde::Deserialize;
use serde_with::{rust::deserialize_ignore_any, serde_as}; use serde_with::{rust::deserialize_ignore_any, serde_as};
use crate::serializer::{text::Text, MapResult, VecLogError}; use crate::serializer::text::Text;
use super::{ use super::{
music_item::{ItemSection, SimpleHeader, SingleColumnBrowseResult}, music_item::{ItemSection, SimpleHeader, SingleColumnBrowseResult},
url_endpoint::BrowseEndpointWrap, url_endpoint::BrowseEndpointWrap,
SectionList, Tab, ContentsRendererLogged, SectionList, Tab,
}; };
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
@ -18,15 +18,7 @@ pub(crate) struct MusicGenres {
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub(crate) struct Grid { pub(crate) struct Grid {
pub grid_renderer: GridRenderer, pub grid_renderer: ContentsRendererLogged<NavigationButton>,
}
#[serde_as]
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct GridRenderer {
#[serde_as(as = "VecLogError<_>")]
pub items: MapResult<Vec<NavigationButton>>,
} }
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]

View file

@ -9,7 +9,7 @@ use crate::{
param::Language, param::Language,
serializer::{ serializer::{
text::{Text, TextComponents}, text::{Text, TextComponents},
MapResult, VecLogError, MapResult,
}, },
util::{self, dictionary, TryRemove}, util::{self, dictionary, TryRemove},
}; };
@ -39,7 +39,6 @@ pub(crate) enum ItemSection {
pub(crate) struct MusicShelf { pub(crate) struct MusicShelf {
/// Playlist ID (only for playlists) /// Playlist ID (only for playlists)
pub playlist_id: Option<String>, pub playlist_id: Option<String>,
#[serde_as(as = "VecLogError<_>")]
pub contents: MapResult<Vec<MusicResponseItem>>, pub contents: MapResult<Vec<MusicResponseItem>>,
/// Continuation token for fetching more (>100) playlist items /// Continuation token for fetching more (>100) playlist items
#[serde(default)] #[serde(default)]
@ -53,12 +52,10 @@ pub(crate) struct MusicShelf {
/// MusicCarouselShelf represents a horizontal list of music items displayed with /// MusicCarouselShelf represents a horizontal list of music items displayed with
/// large covers. /// large covers.
#[serde_as]
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub(crate) struct MusicCarouselShelf { pub(crate) struct MusicCarouselShelf {
pub header: Option<MusicCarouselShelfHeader>, pub header: Option<MusicCarouselShelfHeader>,
#[serde_as(as = "VecLogError<_>")]
pub contents: MapResult<Vec<MusicResponseItem>>, pub contents: MapResult<Vec<MusicResponseItem>>,
} }
@ -76,7 +73,6 @@ pub(crate) struct MusicCardShelf {
#[serde(default)] #[serde(default)]
pub thumbnail: MusicThumbnailRenderer, pub thumbnail: MusicThumbnailRenderer,
#[serde(default)] #[serde(default)]
#[serde_as(as = "VecLogError<_>")]
pub contents: MapResult<Vec<MusicResponseItem>>, pub contents: MapResult<Vec<MusicResponseItem>>,
} }
@ -227,7 +223,6 @@ pub(crate) struct CoverMusicItem {
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub(crate) struct PlaylistPanelRenderer { pub(crate) struct PlaylistPanelRenderer {
#[serde_as(as = "VecLogError<_>")]
pub contents: MapResult<Vec<PlaylistPanelVideo>>, pub contents: MapResult<Vec<PlaylistPanelVideo>>,
/// Continuation token for fetching more radio items /// Continuation token for fetching more radio items
#[serde(default)] #[serde(default)]
@ -362,15 +357,7 @@ pub(crate) struct ButtonRenderer {
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub(crate) struct MusicItemMenu { pub(crate) struct MusicItemMenu {
pub menu_renderer: MusicItemMenuRenderer, pub menu_renderer: ContentsRenderer<MusicItemMenuEntry>,
}
#[serde_as]
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct MusicItemMenuRenderer {
#[serde_as(as = "VecSkipError<_>")]
pub items: Vec<MusicItemMenuEntry>,
} }
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
@ -385,11 +372,9 @@ pub(crate) struct Grid {
pub grid_renderer: GridRenderer, pub grid_renderer: GridRenderer,
} }
#[serde_as]
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub(crate) struct GridRenderer { pub(crate) struct GridRenderer {
#[serde_as(as = "VecLogError<_>")]
pub items: MapResult<Vec<MusicResponseItem>>, pub items: MapResult<Vec<MusicResponseItem>>,
pub header: Option<GridHeader>, pub header: Option<GridHeader>,
} }
@ -587,7 +572,9 @@ impl MusicListMapper {
(subtitle_parts.rev().next(), None, None) (subtitle_parts.rev().next(), None, None)
} else { } else {
// Skip first part (track type) // Skip first part (track type)
if subtitle_parts.len() > 3 { if subtitle_parts.len() > 3
|| (is_video && subtitle_parts.len() == 2)
{
subtitle_parts.next(); subtitle_parts.next();
} }
@ -618,7 +605,11 @@ impl MusicListMapper {
(FlexColumnDisplayStyle::TwoLines, true) => ( (FlexColumnDisplayStyle::TwoLines, true) => (
None, None,
album_p.and_then(|p| { album_p.and_then(|p| {
util::parse_large_numstr(p.first_str(), self.lang) util::parse_large_numstr_or_warn(
p.first_str(),
self.lang,
&mut self.warnings,
)
}), }),
), ),
(_, false) => ( (_, false) => (
@ -692,7 +683,11 @@ impl MusicListMapper {
match page_type { match page_type {
MusicPageType::Artist => { MusicPageType::Artist => {
let subscriber_count = subtitle_p2.and_then(|p| { let subscriber_count = subtitle_p2.and_then(|p| {
util::parse_large_numstr(p.first_str(), self.lang) util::parse_large_numstr_or_warn(
p.first_str(),
self.lang,
&mut self.warnings,
)
}); });
self.items.push(MusicItem::Artist(ArtistItem { self.items.push(MusicItem::Artist(ArtistItem {
@ -736,7 +731,8 @@ impl MusicListMapper {
let from_ytm = channel_p let from_ytm = channel_p
.as_ref() .as_ref()
.map(|p| p.first_str() == util::YT_MUSIC_NAME) .and_then(|p| p.0.first())
.map(util::is_ytm)
.unwrap_or_default(); .unwrap_or_default();
let channel = channel_p.and_then(|p| { let channel = channel_p.and_then(|p| {
p.0.into_iter().find_map(|c| ChannelId::try_from(c).ok()) p.0.into_iter().find_map(|c| ChannelId::try_from(c).ok())
@ -792,7 +788,11 @@ impl MusicListMapper {
artists, artists,
album: None, album: None,
view_count: subtitle_p2.and_then(|c| { view_count: subtitle_p2.and_then(|c| {
util::parse_large_numstr(c.first_str(), self.lang) util::parse_large_numstr_or_warn(
c.first_str(),
self.lang,
&mut self.warnings,
)
}), }),
is_video, is_video,
track_nr: None, track_nr: None,
@ -801,8 +801,13 @@ impl MusicListMapper {
Ok(Some(MusicItemType::Track)) Ok(Some(MusicItemType::Track))
} }
MusicPageType::Artist => { MusicPageType::Artist => {
let subscriber_count = subtitle_p1 let subscriber_count = subtitle_p1.and_then(|p| {
.and_then(|p| util::parse_large_numstr(p.first_str(), self.lang)); util::parse_large_numstr_or_warn(
p.first_str(),
self.lang,
&mut self.warnings,
)
});
self.items.push(MusicItem::Artist(ArtistItem { self.items.push(MusicItem::Artist(ArtistItem {
id, id,
@ -868,7 +873,8 @@ impl MusicListMapper {
// (featured on the startpage or in genres) // (featured on the startpage or in genres)
let from_ytm = subtitle_p2 let from_ytm = subtitle_p2
.as_ref() .as_ref()
.map(|p| p.first_str() == util::YT_MUSIC_NAME) .and_then(|p| p.0.first())
.map(util::is_ytm)
.unwrap_or(true); .unwrap_or(true);
let channel = subtitle_p2.and_then(|p| { let channel = subtitle_p2.and_then(|p| {
p.0.into_iter().find_map(|c| ChannelId::try_from(c).ok()) p.0.into_iter().find_map(|c| ChannelId::try_from(c).ok())
@ -927,8 +933,13 @@ impl MusicListMapper {
let item_type = match card.on_tap.music_page() { let item_type = match card.on_tap.music_page() {
Some((page_type, id)) => match page_type { Some((page_type, id)) => match page_type {
MusicPageType::Artist => { MusicPageType::Artist => {
let subscriber_count = subtitle_p2 let subscriber_count = subtitle_p2.and_then(|p| {
.and_then(|p| util::parse_large_numstr(p.first_str(), self.lang)); util::parse_large_numstr_or_warn(
p.first_str(),
self.lang,
&mut self.warnings,
)
});
self.items.push(MusicItem::Artist(ArtistItem { self.items.push(MusicItem::Artist(ArtistItem {
id, id,
@ -963,8 +974,13 @@ impl MusicListMapper {
let (album, view_count) = if is_video { let (album, view_count) = if is_video {
( (
None, None,
subtitle_p3 subtitle_p3.and_then(|p| {
.and_then(|p| util::parse_large_numstr(p.first_str(), self.lang)), util::parse_large_numstr_or_warn(
p.first_str(),
self.lang,
&mut self.warnings,
)
}),
) )
} else { } else {
( (
@ -993,7 +1009,8 @@ impl MusicListMapper {
MusicPageType::Playlist => { MusicPageType::Playlist => {
let from_ytm = subtitle_p2 let from_ytm = subtitle_p2
.as_ref() .as_ref()
.map(|p| p.first_str() == util::YT_MUSIC_NAME) .and_then(|p| p.0.first())
.map(util::is_ytm)
.unwrap_or(true); .unwrap_or(true);
let channel = subtitle_p2 let channel = subtitle_p2
.and_then(|p| p.0.into_iter().find_map(|c| ChannelId::try_from(c).ok())); .and_then(|p| p.0.into_iter().find_map(|c| ChannelId::try_from(c).ok()));
@ -1118,7 +1135,7 @@ fn map_artist_id_fallback(
menu: Option<MusicItemMenu>, menu: Option<MusicItemMenu>,
fallback_artist: Option<&ArtistId>, fallback_artist: Option<&ArtistId>,
) -> Option<String> { ) -> Option<String> {
menu.and_then(|m| map_artist_id(m.menu_renderer.items)) menu.and_then(|m| map_artist_id(m.menu_renderer.contents))
.or_else(|| fallback_artist.and_then(|a| a.id.to_owned())) .or_else(|| fallback_artist.and_then(|a| a.id.to_owned()))
} }
@ -1149,7 +1166,8 @@ pub(crate) fn map_album_type(txt: &str, lang: Language) -> AlbumType {
.unwrap_or_default() .unwrap_or_default()
} }
pub(crate) fn map_queue_item(item: QueueMusicItem, lang: Language) -> TrackItem { pub(crate) fn map_queue_item(item: QueueMusicItem, lang: Language) -> MapResult<TrackItem> {
let mut warnings = Vec::new();
let mut subtitle_parts = item.long_byline_text.split(util::DOT_SEPARATOR).into_iter(); let mut subtitle_parts = item.long_byline_text.split(util::DOT_SEPARATOR).into_iter();
let is_video = !item let is_video = !item
@ -1167,7 +1185,8 @@ pub(crate) fn map_queue_item(item: QueueMusicItem, lang: Language) -> TrackItem
let (album, view_count) = if is_video { let (album, view_count) = if is_video {
( (
None, None,
subtitle_p2.and_then(|p| util::parse_large_numstr(p.first_str(), lang)), subtitle_p2
.and_then(|p| util::parse_large_numstr_or_warn(p.first_str(), lang, &mut warnings)),
) )
} else { } else {
( (
@ -1176,7 +1195,8 @@ pub(crate) fn map_queue_item(item: QueueMusicItem, lang: Language) -> TrackItem
) )
}; };
TrackItem { MapResult {
c: TrackItem {
id: item.video_id, id: item.video_id,
name: item.title, name: item.title,
duration: item duration: item
@ -1190,6 +1210,8 @@ pub(crate) fn map_queue_item(item: QueueMusicItem, lang: Language) -> TrackItem
is_video, is_video,
track_nr: None, track_nr: None,
by_va, by_va,
},
warnings,
} }
} }

View file

@ -58,6 +58,8 @@ pub(crate) struct HeaderRenderer {
/// Missing on artist_tracks view. /// Missing on artist_tracks view.
/// ///
/// `"64 songs", " • ", "3 hours, 40 minutes"` /// `"64 songs", " • ", "3 hours, 40 minutes"`
///
/// `"1B views", " • ", "200 songs", " • ", "6+ hours"`
#[serde(default)] #[serde(default)]
#[serde_as(as = "Text")] #[serde_as(as = "Text")]
pub second_subtitle: Vec<String>, pub second_subtitle: Vec<String>,

View file

@ -5,7 +5,7 @@ use serde_with::serde_as;
use serde_with::{json::JsonString, DefaultOnError}; use serde_with::{json::JsonString, DefaultOnError};
use super::{ResponseContext, Thumbnails}; use super::{ResponseContext, Thumbnails};
use crate::serializer::{text::Text, MapResult, VecLogError}; use crate::serializer::{text::Text, MapResult};
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
@ -75,10 +75,8 @@ pub(crate) struct StreamingData {
#[serde_as(as = "JsonString")] #[serde_as(as = "JsonString")]
pub expires_in_seconds: u32, pub expires_in_seconds: u32,
#[serde(default)] #[serde(default)]
#[serde_as(as = "VecLogError<_>")]
pub formats: MapResult<Vec<Format>>, pub formats: MapResult<Vec<Format>>,
#[serde(default)] #[serde(default)]
#[serde_as(as = "VecLogError<_>")]
pub adaptive_formats: MapResult<Vec<Format>>, pub adaptive_formats: MapResult<Vec<Format>>,
/// Only on livestreams /// Only on livestreams
pub dash_manifest_url: Option<String>, pub dash_manifest_url: Option<String>,

View file

@ -3,20 +3,22 @@ use serde_with::{
json::JsonString, rust::deserialize_ignore_any, serde_as, DefaultOnError, VecSkipError, json::JsonString, rust::deserialize_ignore_any, serde_as, DefaultOnError, VecSkipError,
}; };
use crate::serializer::text::{Text, TextComponent}; use crate::serializer::{
use crate::serializer::{MapResult, VecLogError}; text::{Text, TextComponent},
MapResult,
};
use crate::util::MappingError; use crate::util::MappingError;
use super::{ use super::{
Alert, ContentsRenderer, ContinuationEndpoint, ResponseContext, SectionList, Tab, Thumbnails, Alert, ContentsRenderer, ContinuationEndpoint, ResponseContext, SectionList, Tab, Thumbnails,
ThumbnailsWrap, ThumbnailsWrap, TwoColumnBrowseResults,
}; };
#[serde_as] #[serde_as]
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub(crate) struct Playlist { pub(crate) struct Playlist {
pub contents: Option<Contents>, pub contents: Option<TwoColumnBrowseResults<Tab<SectionList<ItemSection>>>>,
pub header: Option<Header>, pub header: Option<Header>,
pub sidebar: Option<Sidebar>, pub sidebar: Option<Sidebar>,
#[serde_as(as = "Option<DefaultOnError>")] #[serde_as(as = "Option<DefaultOnError>")]
@ -33,12 +35,6 @@ pub(crate) struct PlaylistCont {
pub on_response_received_actions: Vec<OnResponseReceivedAction>, pub on_response_received_actions: Vec<OnResponseReceivedAction>,
} }
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct Contents {
pub two_column_browse_results_renderer: ContentsRenderer<Tab<SectionList<ItemSection>>>,
}
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub(crate) struct ItemSection { pub(crate) struct ItemSection {
@ -51,11 +47,9 @@ pub(crate) struct PlaylistVideoListRenderer {
pub playlist_video_list_renderer: PlaylistVideoList, pub playlist_video_list_renderer: PlaylistVideoList,
} }
#[serde_as]
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub(crate) struct PlaylistVideoList { pub(crate) struct PlaylistVideoList {
#[serde_as(as = "VecLogError<_>")]
pub contents: MapResult<Vec<PlaylistItem>>, pub contents: MapResult<Vec<PlaylistItem>>,
} }
@ -108,15 +102,7 @@ pub(crate) struct BylineRenderer {
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub(crate) struct Sidebar { pub(crate) struct Sidebar {
pub playlist_sidebar_renderer: SidebarRenderer, pub playlist_sidebar_renderer: ContentsRenderer<SidebarItemPrimary>,
}
#[serde_as]
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct SidebarRenderer {
#[serde_as(as = "VecSkipError<_>")]
pub items: Vec<SidebarItemPrimary>,
} }
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
@ -199,10 +185,8 @@ pub(crate) struct OnResponseReceivedAction {
pub append_continuation_items_action: AppendAction, pub append_continuation_items_action: AppendAction,
} }
#[serde_as]
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub(crate) struct AppendAction { pub(crate) struct AppendAction {
#[serde_as(as = "VecLogError<_>")]
pub continuation_items: MapResult<Vec<PlaylistItem>>, pub continuation_items: MapResult<Vec<PlaylistItem>>,
} }

View file

@ -1,4 +1,7 @@
use serde::{de::IgnoredAny, Deserialize}; use serde::{
de::{IgnoredAny, Visitor},
Deserialize,
};
use serde_with::{json::JsonString, serde_as}; use serde_with::{json::JsonString, serde_as};
use super::{video_item::YouTubeListRendererWrap, ResponseContext}; use super::{video_item::YouTubeListRendererWrap, ResponseContext};
@ -26,8 +29,40 @@ pub(crate) struct TwoColumnSearchResultsRenderer {
} }
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
pub(crate) struct SearchSuggestion( pub(crate) struct SearchSuggestion(IgnoredAny, pub Vec<SearchSuggestionItem>, IgnoredAny);
IgnoredAny,
pub Vec<(String, IgnoredAny, IgnoredAny)>, #[derive(Debug)]
IgnoredAny, pub(crate) struct SearchSuggestionItem(pub String);
);
impl<'de> Deserialize<'de> for SearchSuggestionItem {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
struct ItemVisitor;
impl<'de> Visitor<'de> for ItemVisitor {
type Value = SearchSuggestionItem;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("search suggestion item")
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: serde::de::SeqAccess<'de>,
{
match seq.next_element::<String>()? {
Some(s) => {
// Ignore the rest of the list
while seq.next_element::<IgnoredAny>()?.is_some() {}
Ok(SearchSuggestionItem(s))
}
None => Err(serde::de::Error::invalid_length(0, &"1")),
}
}
}
deserializer.deserialize_seq(ItemVisitor)
}
}

View file

@ -1,7 +1,6 @@
use serde::Deserialize; use serde::Deserialize;
use serde_with::{serde_as, VecSkipError};
use super::{video_item::YouTubeListRendererWrap, ResponseContext, Tab}; use super::{video_item::YouTubeListRendererWrap, ResponseContext, Tab, TwoColumnBrowseResults};
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
@ -16,16 +15,4 @@ pub(crate) struct Trending {
pub contents: Contents, pub contents: Contents,
} }
#[derive(Debug, Deserialize)] type Contents = TwoColumnBrowseResults<Tab<YouTubeListRendererWrap>>;
#[serde(rename_all = "camelCase")]
pub(crate) struct Contents {
pub two_column_browse_results_renderer: BrowseResults,
}
#[serde_as]
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct BrowseResults {
#[serde_as(as = "VecSkipError<_>")]
pub tabs: Vec<Tab<YouTubeListRendererWrap>>,
}

View file

@ -6,21 +6,20 @@ use serde_with::{rust::deserialize_ignore_any, serde_as, DefaultOnError, VecSkip
use crate::serializer::text::TextComponent; use crate::serializer::text::TextComponent;
use crate::serializer::{ use crate::serializer::{
text::{AccessibilityText, AttributedText, Text, TextComponents}, text::{AccessibilityText, AttributedText, Text, TextComponents},
MapResult, VecLogError, MapResult,
}; };
use super::{ use super::{
url_endpoint::BrowseEndpointWrap, ContinuationEndpoint, ContinuationItemRenderer, Icon, url_endpoint::BrowseEndpointWrap, ContinuationEndpoint, ContinuationItemRenderer, Icon,
MusicContinuationData, Thumbnails, MusicContinuationData, Thumbnails,
}; };
use super::{ChannelBadge, ResponseContext, YouTubeListItem}; use super::{ChannelBadge, ContentsRendererLogged, ResponseContext, YouTubeListItem};
/* /*
#VIDEO DETAILS #VIDEO DETAILS
*/ */
/// Video details response /// Video details response
#[serde_as]
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub(crate) struct VideoDetails { pub(crate) struct VideoDetails {
@ -29,7 +28,6 @@ pub(crate) struct VideoDetails {
/// Video ID /// Video ID
pub current_video_endpoint: Option<CurrentVideoEndpoint>, pub current_video_endpoint: Option<CurrentVideoEndpoint>,
/// Video chapters + comment section /// Video chapters + comment section
#[serde_as(as = "VecLogError<_>")]
pub engagement_panels: MapResult<Vec<EngagementPanel>>, pub engagement_panels: MapResult<Vec<EngagementPanel>>,
pub response_context: ResponseContext, pub response_context: ResponseContext,
} }
@ -60,11 +58,9 @@ pub(crate) struct VideoResultsWrap {
} }
/// Video metadata items /// Video metadata items
#[serde_as]
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub(crate) struct VideoResults { pub(crate) struct VideoResults {
#[serde_as(as = "Option<VecLogError<_>>")]
pub contents: Option<MapResult<Vec<VideoResultsItem>>>, pub contents: Option<MapResult<Vec<VideoResultsItem>>>,
} }
@ -303,7 +299,6 @@ pub(crate) struct RecommendationResultsWrap {
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub(crate) struct RecommendationResults { pub(crate) struct RecommendationResults {
/// Can be `None` for age-restricted videos /// Can be `None` for age-restricted videos
#[serde_as(as = "Option<VecLogError<_>>")]
pub results: Option<MapResult<Vec<YouTubeListItem>>>, pub results: Option<MapResult<Vec<YouTubeListItem>>>,
#[serde_as(as = "Option<VecSkipError<_>>")] #[serde_as(as = "Option<VecSkipError<_>>")]
pub continuations: Option<Vec<MusicContinuationData>>, pub continuations: Option<Vec<MusicContinuationData>>,
@ -341,16 +336,7 @@ pub(crate) enum EngagementPanelRenderer {
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub(crate) struct ChapterMarkersContent { pub(crate) struct ChapterMarkersContent {
pub macro_markers_list_renderer: MacroMarkersListRenderer, pub macro_markers_list_renderer: ContentsRendererLogged<MacroMarkersListItem>,
}
/// Chapter markers
#[serde_as]
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct MacroMarkersListRenderer {
#[serde_as(as = "VecLogError<_>")]
pub contents: MapResult<Vec<MacroMarkersListItem>>,
} }
/// Chapter marker /// Chapter marker
@ -436,7 +422,6 @@ pub(crate) struct CommentItemSectionHeaderMenuItem {
*/ */
/// Video comments continuation response /// Video comments continuation response
#[serde_as]
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub(crate) struct VideoComments { pub(crate) struct VideoComments {
@ -450,7 +435,6 @@ pub(crate) struct VideoComments {
/// - Comment replies: appendContinuationItemsAction /// - Comment replies: appendContinuationItemsAction
/// - n*commentRenderer, continuationItemRenderer: /// - n*commentRenderer, continuationItemRenderer:
/// replies + continuation /// replies + continuation
#[serde_as(as = "VecLogError<_>")]
pub on_response_received_endpoints: MapResult<Vec<CommentsContItem>>, pub on_response_received_endpoints: MapResult<Vec<CommentsContItem>>,
} }
@ -463,11 +447,9 @@ pub(crate) struct CommentsContItem {
} }
/// Video comments continuation action /// Video comments continuation action
#[serde_as]
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub(crate) struct AppendComments { pub(crate) struct AppendComments {
#[serde_as(as = "VecLogError<_>")]
pub continuation_items: MapResult<Vec<CommentListItem>>, pub continuation_items: MapResult<Vec<CommentListItem>>,
} }
@ -536,6 +518,8 @@ pub(crate) struct CommentRenderer {
pub author_comment_badge: Option<AuthorCommentBadge>, pub author_comment_badge: Option<AuthorCommentBadge>,
#[serde(default)] #[serde(default)]
pub reply_count: u64, pub reply_count: u64,
#[serde_as(as = "Option<Text>")]
pub vote_count: Option<String>,
/// Buttons for comment interaction (Like/Dislike/Reply) /// Buttons for comment interaction (Like/Dislike/Reply)
pub action_buttons: CommentActionButtons, pub action_buttons: CommentActionButtons,
} }
@ -581,7 +565,6 @@ pub(crate) struct CommentActionButtons {
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub(crate) struct CommentActionButtonsRenderer { pub(crate) struct CommentActionButtonsRenderer {
pub like_button: ToggleButtonWrap,
pub creator_heart: Option<CreatorHeart>, pub creator_heart: Option<CreatorHeart>,
} }

View file

@ -4,7 +4,7 @@ use serde::Deserialize;
use serde_with::{ use serde_with::{
json::JsonString, rust::deserialize_ignore_any, serde_as, DefaultOnError, VecSkipError, json::JsonString, rust::deserialize_ignore_any, serde_as, DefaultOnError, VecSkipError,
}; };
use time::{Duration, OffsetDateTime}; use time::OffsetDateTime;
use super::{url_endpoint::NavigationEndpoint, ChannelBadge, ContinuationEndpoint, Thumbnails}; use super::{url_endpoint::NavigationEndpoint, ChannelBadge, ContinuationEndpoint, Thumbnails};
use crate::{ use crate::{
@ -15,10 +15,9 @@ use crate::{
param::Language, param::Language,
serializer::{ serializer::{
text::{AccessibilityText, Text, TextComponent}, text::{AccessibilityText, Text, TextComponent},
MapResult, VecLogError, MapResult,
}, },
timeago, util::{self, timeago, TryRemove},
util::{self, TryRemove},
}; };
#[serde_as] #[serde_as]
@ -69,7 +68,6 @@ pub(crate) enum YouTubeListItem {
#[serde(alias = "expandedShelfContentsRenderer", alias = "gridRenderer")] #[serde(alias = "expandedShelfContentsRenderer", alias = "gridRenderer")]
ItemSectionRenderer { ItemSectionRenderer {
#[serde(alias = "items")] #[serde(alias = "items")]
#[serde_as(as = "VecLogError<_>")]
contents: MapResult<Vec<YouTubeListItem>>, contents: MapResult<Vec<YouTubeListItem>>,
}, },
@ -206,11 +204,9 @@ pub(crate) struct YouTubeListRendererWrap {
pub section_list_renderer: YouTubeListRenderer, pub section_list_renderer: YouTubeListRenderer,
} }
#[serde_as]
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub(crate) struct YouTubeListRenderer { pub(crate) struct YouTubeListRenderer {
#[serde_as(as = "VecLogError<_>")]
pub contents: MapResult<Vec<YouTubeListItem>>, pub contents: MapResult<Vec<YouTubeListItem>>,
} }
@ -415,7 +411,7 @@ impl<T> YouTubeListMapper<T> {
} }
} }
pub fn with_channel<C>(lang: Language, channel: &Channel<C>) -> Self { pub fn with_channel<C>(lang: Language, channel: &Channel<C>, warnings: Vec<String>) -> Self {
Self { Self {
lang, lang,
channel: Some(ChannelTag { channel: Some(ChannelTag {
@ -426,7 +422,7 @@ impl<T> YouTubeListMapper<T> {
subscriber_count: channel.subscriber_count, subscriber_count: channel.subscriber_count,
}), }),
items: Vec::new(), items: Vec::new(),
warnings: Vec::new(), warnings,
ctoken: None, ctoken: None,
corrected_query: None, corrected_query: None,
channel_info: None, channel_info: None,
@ -505,8 +501,11 @@ impl<T> YouTubeListMapper<T> {
length: video.accessibility.and_then(|acc| { length: video.accessibility.and_then(|acc| {
ACCESSIBILITY_SEP_REGEX.captures(&acc).and_then(|cap| { ACCESSIBILITY_SEP_REGEX.captures(&acc).and_then(|cap| {
cap.get(1).and_then(|c| { cap.get(1).and_then(|c| {
timeago::parse_timeago_or_warn(self.lang, c.as_str(), &mut self.warnings) timeago::parse_video_duration_or_warn(
.map(|ta| Duration::from(ta).whole_seconds() as u32) self.lang,
c.as_str(),
&mut self.warnings,
)
}) })
}) })
}), }),
@ -518,7 +517,7 @@ impl<T> YouTubeListMapper<T> {
publish_date_txt: pub_date_txt, publish_date_txt: pub_date_txt,
view_count: video view_count: video
.view_count_text .view_count_text
.map(|txt| util::parse_large_numstr(&txt, lang).unwrap_or_default()), .and_then(|txt| util::parse_large_numstr_or_warn(&txt, lang, &mut self.warnings)),
is_live: false, is_live: false,
is_short: true, is_short: true,
is_upcoming: false, is_upcoming: false,
@ -572,10 +571,12 @@ impl<T> YouTubeListMapper<T> {
name: channel.title, name: channel.title,
avatar: channel.thumbnail.into(), avatar: channel.thumbnail.into(),
verification: channel.owner_badges.into(), verification: channel.owner_badges.into(),
subscriber_count: sc_txt subscriber_count: sc_txt.and_then(|txt| {
.and_then(|txt| util::parse_numeric_or_warn(&txt, &mut self.warnings)), util::parse_large_numstr_or_warn(&txt, self.lang, &mut self.warnings)
video_count: vc_text }),
.and_then(|txt| util::parse_numeric_or_warn(&txt, &mut self.warnings)), video_count: vc_text.and_then(|txt| {
util::parse_large_numstr_or_warn(&txt, self.lang, &mut self.warnings)
}),
short_description: channel.description_snippet, short_description: channel.description_snippet,
} }
} }

View file

@ -22,7 +22,7 @@ SearchResult(
), ),
], ],
verification: Verified, verification: Verified,
subscriber_count: Some(582), subscriber_count: Some(582000),
video_count: None, video_count: None,
short_description: "Music Submissions: https://monstafluff.edmdistrict.com/", short_description: "Music Submissions: https://monstafluff.edmdistrict.com/",
)), )),
@ -42,7 +42,7 @@ SearchResult(
), ),
], ],
verification: Artist, verification: Artist,
subscriber_count: Some(403), subscriber_count: Some(4030000),
video_count: None, video_count: None,
short_description: "Welcome to the official Music Travel Love YouTube channel! We travel the world making music, friends, videos and memories!", short_description: "Welcome to the official Music Travel Love YouTube channel! We travel the world making music, friends, videos and memories!",
)), )),
@ -62,7 +62,7 @@ SearchResult(
), ),
], ],
verification: Verified, verification: Verified,
subscriber_count: Some(167), subscriber_count: Some(167000),
video_count: None, video_count: None,
short_description: "MUSIC IN HARMONY WITH YOUR LIFE!!! If any producer, label, artist or photographer has an issue with any of the music or\u{a0}...", short_description: "MUSIC IN HARMONY WITH YOUR LIFE!!! If any producer, label, artist or photographer has an issue with any of the music or\u{a0}...",
)), )),
@ -82,7 +82,7 @@ SearchResult(
), ),
], ],
verification: Artist, verification: Artist,
subscriber_count: Some(411), subscriber_count: Some(411000),
video_count: None, video_count: None,
short_description: "The official YouTube channel of HAEVN Music. Receiving a piano from his grandfather had a great impact on Jorrit\'s life.", short_description: "The official YouTube channel of HAEVN Music. Receiving a piano from his grandfather had a great impact on Jorrit\'s life.",
)), )),
@ -102,7 +102,7 @@ SearchResult(
), ),
], ],
verification: None, verification: None,
subscriber_count: Some(312), subscriber_count: Some(31200),
video_count: None, video_count: None,
short_description: "Hello and welcome to \"Artemis Music\"! Music can play an effective role in helping us lead a better and more productive life.", short_description: "Hello and welcome to \"Artemis Music\"! Music can play an effective role in helping us lead a better and more productive life.",
)), )),
@ -122,7 +122,7 @@ SearchResult(
), ),
], ],
verification: Verified, verification: Verified,
subscriber_count: Some(372), subscriber_count: Some(372000),
video_count: None, video_count: None,
short_description: "Music is the only language in which you cannot say a mean or sarcastic thing. Have fun listening to music.", short_description: "Music is the only language in which you cannot say a mean or sarcastic thing. Have fun listening to music.",
)), )),
@ -142,7 +142,7 @@ SearchResult(
), ),
], ],
verification: Verified, verification: Verified,
subscriber_count: Some(178), subscriber_count: Some(178000),
video_count: None, video_count: None,
short_description: "S!X - Music is an independent Hip-Hop label. Soundcloud : https://soundcloud.com/s1xmusic Facebook\u{a0}...", short_description: "S!X - Music is an independent Hip-Hop label. Soundcloud : https://soundcloud.com/s1xmusic Facebook\u{a0}...",
)), )),
@ -162,7 +162,7 @@ SearchResult(
), ),
], ],
verification: Verified, verification: Verified,
subscriber_count: Some(104), subscriber_count: Some(1040000),
video_count: None, video_count: None,
short_description: "Welcome to Shake Music, a Trap & Bass Channel / Record Label dedicated to bringing you the best tracks. All tracks on Shake\u{a0}...", short_description: "Welcome to Shake Music, a Trap & Bass Channel / Record Label dedicated to bringing you the best tracks. All tracks on Shake\u{a0}...",
)), )),
@ -182,7 +182,7 @@ SearchResult(
), ),
], ],
verification: Verified, verification: Verified,
subscriber_count: Some(822), subscriber_count: Some(822000),
video_count: None, video_count: None,
short_description: "Welcome to Miracle Music! On this channel you will find a wide variety of different Deep House, Tropical House, Chill Out, EDM,.", short_description: "Welcome to Miracle Music! On this channel you will find a wide variety of different Deep House, Tropical House, Chill Out, EDM,.",
)), )),
@ -202,7 +202,7 @@ SearchResult(
), ),
], ],
verification: Verified, verification: Verified,
subscriber_count: Some(462), subscriber_count: Some(4620000),
video_count: None, video_count: None,
short_description: "", short_description: "",
)), )),
@ -222,7 +222,7 @@ SearchResult(
), ),
], ],
verification: Verified, verification: Verified,
subscriber_count: Some(105), subscriber_count: Some(1050000),
video_count: None, video_count: None,
short_description: "BRINGING YOU ONLY THE BEST EDM - TRAP Submit your own track for promotion here:\u{a0}...", short_description: "BRINGING YOU ONLY THE BEST EDM - TRAP Submit your own track for promotion here:\u{a0}...",
)), )),
@ -242,7 +242,7 @@ SearchResult(
), ),
], ],
verification: Verified, verification: Verified,
subscriber_count: Some(709), subscriber_count: Some(709000),
video_count: None, video_count: None,
short_description: "Hey there! I am Mr MoMo My channel focus on Japan music, lofi, trap & bass type beat and Japanese instrumental. I mindfully\u{a0}...", short_description: "Hey there! I am Mr MoMo My channel focus on Japan music, lofi, trap & bass type beat and Japanese instrumental. I mindfully\u{a0}...",
)), )),
@ -262,7 +262,7 @@ SearchResult(
), ),
], ],
verification: None, verification: None,
subscriber_count: Some(544), subscriber_count: Some(54400),
video_count: None, video_count: None,
short_description: "", short_description: "",
)), )),
@ -282,7 +282,7 @@ SearchResult(
), ),
], ],
verification: None, verification: None,
subscriber_count: Some(359), subscriber_count: Some(3590),
video_count: None, video_count: None,
short_description: "Welcome to our Energy Transformation Relaxing Music . This chakra music channel will focus on developing the best chakra\u{a0}...", short_description: "Welcome to our Energy Transformation Relaxing Music . This chakra music channel will focus on developing the best chakra\u{a0}...",
)), )),
@ -302,7 +302,7 @@ SearchResult(
), ),
], ],
verification: Verified, verification: Verified,
subscriber_count: Some(416), subscriber_count: Some(416000),
video_count: None, video_count: None,
short_description: "Nonstop Music - Home of 1h videos of your favourite songs and mixes. Nonstop Genres: Pop • Chillout • Tropical House • Deep\u{a0}...", short_description: "Nonstop Music - Home of 1h videos of your favourite songs and mixes. Nonstop Genres: Pop • Chillout • Tropical House • Deep\u{a0}...",
)), )),
@ -322,7 +322,7 @@ SearchResult(
), ),
], ],
verification: Verified, verification: Verified,
subscriber_count: Some(3), subscriber_count: Some(3000000),
video_count: None, video_count: None,
short_description: "Vibe Music strives to bring the best lyric videos of popular Rap & Hip Hop songs. Be sure to Subscribe to see new videos we\u{a0}...", short_description: "Vibe Music strives to bring the best lyric videos of popular Rap & Hip Hop songs. Be sure to Subscribe to see new videos we\u{a0}...",
)), )),
@ -342,7 +342,7 @@ SearchResult(
), ),
], ],
verification: None, verification: None,
subscriber_count: Some(120), subscriber_count: Some(120000),
video_count: None, video_count: None,
short_description: "", short_description: "",
)), )),
@ -362,7 +362,7 @@ SearchResult(
), ),
], ],
verification: None, verification: None,
subscriber_count: Some(817), subscriber_count: Some(81700),
video_count: None, video_count: None,
short_description: "", short_description: "",
)), )),
@ -382,7 +382,7 @@ SearchResult(
), ),
], ],
verification: None, verification: None,
subscriber_count: Some(53), subscriber_count: Some(53000),
video_count: None, video_count: None,
short_description: "Welcome to my channel - Helios Music. I created this channel to help people have the most relaxing, refreshing and comfortable\u{a0}...", short_description: "Welcome to my channel - Helios Music. I created this channel to help people have the most relaxing, refreshing and comfortable\u{a0}...",
)), )),
@ -402,7 +402,7 @@ SearchResult(
), ),
], ],
verification: None, verification: None,
subscriber_count: Some(129), subscriber_count: Some(129000),
video_count: None, video_count: None,
short_description: "Music On (UNOFFICIAL CHANNEL)", short_description: "Music On (UNOFFICIAL CHANNEL)",
)), )),

View file

@ -22,7 +22,7 @@ SearchResult(
), ),
], ],
verification: Verified, verification: Verified,
subscriber_count: Some(292), subscriber_count: Some(2920000),
video_count: Some(219), video_count: Some(219),
short_description: "Hi, I\'m Tina, aka Doobydobap! Food is the medium I use to tell stories and connect with people who share the same passion as I\u{a0}...", short_description: "Hi, I\'m Tina, aka Doobydobap! Food is the medium I use to tell stories and connect with people who share the same passion as I\u{a0}...",
)), )),

View file

@ -56,7 +56,7 @@ impl MapResponse<Paginator<VideoItem>> for response::Startpage {
lang: crate::param::Language, lang: crate::param::Language,
_deobf: Option<&crate::deobfuscate::DeobfData>, _deobf: Option<&crate::deobfuscate::DeobfData>,
) -> Result<MapResult<Paginator<VideoItem>>, ExtractionError> { ) -> Result<MapResult<Paginator<VideoItem>>, ExtractionError> {
let mut contents = self.contents.two_column_browse_results_renderer.tabs; let mut contents = self.contents.two_column_browse_results_renderer.contents;
let grid = contents let grid = contents
.try_swap_remove(0) .try_swap_remove(0)
.ok_or(ExtractionError::InvalidData(Cow::Borrowed("no contents")))? .ok_or(ExtractionError::InvalidData(Cow::Borrowed("no contents")))?
@ -80,7 +80,7 @@ impl MapResponse<Vec<VideoItem>> for response::Trending {
lang: crate::param::Language, lang: crate::param::Language,
_deobf: Option<&crate::deobfuscate::DeobfData>, _deobf: Option<&crate::deobfuscate::DeobfData>,
) -> Result<MapResult<Vec<VideoItem>>, ExtractionError> { ) -> Result<MapResult<Vec<VideoItem>>, ExtractionError> {
let mut contents = self.contents.two_column_browse_results_renderer.tabs; let mut contents = self.contents.two_column_browse_results_renderer.contents;
let items = contents let items = contents
.try_swap_remove(0) .try_swap_remove(0)
.ok_or(ExtractionError::InvalidData(Cow::Borrowed("no contents")))? .ok_or(ExtractionError::InvalidData(Cow::Borrowed("no contents")))?

View file

@ -7,8 +7,7 @@ use crate::{
model::{paginator::Paginator, ChannelTag, Chapter, Comment, VideoDetails, VideoItem}, model::{paginator::Paginator, ChannelTag, Chapter, Comment, VideoDetails, VideoItem},
param::Language, param::Language,
serializer::MapResult, serializer::MapResult,
timeago, util::{self, timeago, TryRemove},
util::{self, TryRemove},
}; };
use super::{ use super::{
@ -191,9 +190,10 @@ impl MapResponse<VideoDetails> for response::VideoDetails {
}; };
let comment_count = comment_count_section.and_then(|s| { let comment_count = comment_count_section.and_then(|s| {
util::parse_large_numstr::<u64>( util::parse_large_numstr_or_warn::<u64>(
&s.comments_entry_point_header_renderer.comment_count, &s.comments_entry_point_header_renderer.comment_count,
lang, lang,
&mut warnings,
) )
}); });
@ -331,9 +331,9 @@ impl MapResponse<VideoDetails> for response::VideoDetails {
name: channel_name, name: channel_name,
avatar: owner.thumbnail.into(), avatar: owner.thumbnail.into(),
verification: owner.badges.into(), verification: owner.badges.into(),
subscriber_count: owner subscriber_count: owner.subscriber_count_text.and_then(|txt| {
.subscriber_count_text util::parse_large_numstr_or_warn(&txt, lang, &mut warnings)
.and_then(|txt| util::parse_large_numstr(&txt, lang)), }),
}, },
view_count, view_count,
like_count, like_count,
@ -505,16 +505,16 @@ fn map_comment(
}), }),
_ => None, _ => None,
}, },
publish_date: timeago::parse_timeago_to_dt(lang, &c.published_time_text), publish_date: timeago::parse_timeago_dt_or_warn(
publish_date_txt: c.published_time_text, lang,
like_count: util::parse_numeric_or_warn( &c.published_time_text,
&c.action_buttons
.comment_action_buttons_renderer
.like_button
.toggle_button_renderer
.accessibility_data,
&mut warnings, &mut warnings,
), ),
publish_date_txt: c.published_time_text,
like_count: match c.vote_count {
Some(txt) => util::parse_numeric_or_warn(&txt, &mut warnings),
None => Some(0),
},
reply_count: c.reply_count as u32, reply_count: c.reply_count as u32,
replies: replies replies: replies
.map(|items| Paginator::new(Some(c.reply_count), items, reply_ctoken)) .map(|items| Paginator::new(Some(c.reply_count), items, reply_ctoken))

View file

@ -1,9 +1,6 @@
#![doc = include_str!("../README.md")] #![doc = include_str!("../README.md")]
#![warn(missing_docs, clippy::todo, clippy::dbg_macro)] #![warn(missing_docs, clippy::todo, clippy::dbg_macro)]
#[macro_use]
mod macros;
mod deobfuscate; mod deobfuscate;
mod serializer; mod serializer;
mod util; mod util;
@ -14,5 +11,4 @@ pub mod error;
pub mod model; pub mod model;
pub mod param; pub mod param;
pub mod report; pub mod report;
pub mod timeago;
pub mod validate; pub mod validate;

View file

@ -1,23 +0,0 @@
/// Returns an unwrapped Option if Some() otherwise returns the passed expression
macro_rules! some_or_bail {
($opt:expr, $ret:expr $(,)?) => {{
match $opt {
Some(stuff) => stuff,
None => {
return $ret;
}
}
}};
}
/// Returns an unwrapped Result if Ok() otherwise returns the passed expression
macro_rules! ok_or_bail {
($result:expr, $ret:expr $(,)?) => {{
match $result {
Ok(stuff) => stuff,
Err(_) => {
return $ret;
}
}
}};
}

View file

@ -6,7 +6,7 @@ mod vec_log_err;
pub use date::DateYmd; pub use date::DateYmd;
pub use range::Range; pub use range::Range;
pub use vec_log_err::VecLogError; pub use vec_log_err::VecSkipErrorWrap;
use std::fmt::Debug; use std::fmt::Debug;

View file

@ -1,10 +1,9 @@
use std::{fmt, marker::PhantomData}; use std::{fmt, marker::PhantomData};
use serde::{ use serde::{
de::{SeqAccess, Visitor}, de::{IgnoredAny, SeqAccess, Visitor},
Deserialize, Deserialize,
}; };
use serde_with::{de::DeserializeAsWrap, DeserializeAs};
use super::MapResult; use super::MapResult;
@ -13,39 +12,26 @@ use super::MapResult;
/// ///
/// This is similar to `VecSkipError`, but it does not silently ignore /// This is similar to `VecSkipError`, but it does not silently ignore
/// faulty items. /// faulty items.
pub struct VecLogError<T>(PhantomData<T>); impl<'de, T> Deserialize<'de> for MapResult<Vec<T>>
impl<'de, T, U> DeserializeAs<'de, MapResult<Vec<T>>> for VecLogError<U>
where where
U: DeserializeAs<'de, T>, T: Deserialize<'de>,
{ {
fn deserialize_as<D>(deserializer: D) -> Result<MapResult<Vec<T>>, D::Error> fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where where
D: serde::Deserializer<'de>, D: serde::Deserializer<'de>,
{ {
#[derive(serde::Deserialize)] #[derive(serde::Deserialize)]
#[serde( #[serde(untagged)]
untagged, enum GoodOrError<T> {
bound(deserialize = "DeserializeAsWrap<T, TAs>: Deserialize<'de>") Good(T),
)] Error(serde_json::Value),
enum GoodOrError<'a, T, TAs>
where
TAs: DeserializeAs<'a, T>,
{
Good(DeserializeAsWrap<T, TAs>),
Error(serde_json::value::Value),
#[serde(skip)]
_JustAMarkerForTheLifetime(PhantomData<&'a u32>),
} }
struct SeqVisitor<T, U> { struct SeqVisitor<T>(PhantomData<T>);
marker: PhantomData<T>,
marker2: PhantomData<U>,
}
impl<'de, T, U> Visitor<'de> for SeqVisitor<T, U> impl<'de, T> Visitor<'de> for SeqVisitor<T>
where where
U: DeserializeAs<'de, T>, T: Deserialize<'de>,
{ {
type Value = MapResult<Vec<T>>; type Value = MapResult<Vec<T>>;
@ -62,16 +48,15 @@ where
while let Some(value) = seq.next_element()? { while let Some(value) = seq.next_element()? {
match value { match value {
GoodOrError::<T, U>::Good(value) => { GoodOrError::<T>::Good(value) => {
values.push(value.into_inner()); values.push(value);
} }
GoodOrError::<T, U>::Error(value) => { GoodOrError::<T>::Error(value) => {
warnings.push(format!( warnings.push(format!(
"error deserializing item: {}", "error deserializing item: {}",
serde_json::to_string(&value).unwrap_or_default() serde_json::to_string(&value).unwrap_or_default()
)); ));
} }
_ => {}
} }
} }
Ok(MapResult { Ok(MapResult {
@ -81,43 +66,113 @@ where
} }
} }
let visitor = SeqVisitor::<T, U> { deserializer.deserialize_seq(SeqVisitor(PhantomData::<T>))
marker: PhantomData, }
marker2: PhantomData, }
};
deserializer.deserialize_seq(visitor) /// Reimplementation of VecSkipError using a wrapper type
/// to allow use with generics
pub struct VecSkipErrorWrap<T>(pub Vec<T>);
impl<'de, T> Deserialize<'de> for VecSkipErrorWrap<T>
where
T: Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
#[derive(serde::Deserialize)]
#[serde(untagged)]
enum GoodOrError<T> {
Good(T),
Error(IgnoredAny),
}
struct SeqVisitor<T>(PhantomData<T>);
impl<'de, T> Visitor<'de> for SeqVisitor<T>
where
T: Deserialize<'de>,
{
type Value = VecSkipErrorWrap<T>;
fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
formatter.write_str("a sequence")
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: SeqAccess<'de>,
{
let mut values = Vec::with_capacity(seq.size_hint().unwrap_or_default());
while let Some(value) = seq.next_element()? {
match value {
GoodOrError::<T>::Good(value) => {
values.push(value);
}
GoodOrError::<T>::Error(_) => {}
}
}
Ok(VecSkipErrorWrap(values))
}
}
deserializer.deserialize_seq(SeqVisitor(PhantomData::<T>))
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use serde::Deserialize; use serde::Deserialize;
use serde_with::serde_as;
use crate::serializer::MapResult; use crate::serializer::MapResult;
#[serde_as] use super::VecSkipErrorWrap;
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
#[allow(dead_code)] #[allow(dead_code)]
struct S { struct SLog {
#[serde_as(as = "crate::serializer::VecLogError<_>")]
items: MapResult<Vec<Item>>, items: MapResult<Vec<Item>>,
} }
#[derive(Deserialize)]
#[allow(dead_code)]
struct SSkip {
items: VecSkipErrorWrap<Item>,
}
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
#[allow(dead_code)] #[allow(dead_code)]
struct Item { struct Item {
name: String, name: String,
} }
#[test] const JSON: &str =
fn test() { r#"{"items": [{"name": "i1"}, {"xyz": "i2"}, {"name": "i3"}, {"namra": "i4"}]}"#;
let json = r#"{"items": [{"name": "i1"}, {"xyz": "i2"}, {"name": "i3"}, {"namra": "i4"}]}"#;
let res = serde_json::from_str::<S>(json).unwrap(); #[test]
fn skip_error() {
let res = serde_json::from_str::<SSkip>(JSON).unwrap();
insta::assert_debug_snapshot!(res.items.0, @r###"
[
Item {
name: "i1",
},
Item {
name: "i3",
},
]
"###);
}
#[test]
fn log_error() {
let res = serde_json::from_str::<SLog>(JSON).unwrap();
insta::assert_debug_snapshot!(res, @r###" insta::assert_debug_snapshot!(res, @r###"
S { SLog {
items: [ items: [
Item { Item {
name: "i1", name: "i1",

File diff suppressed because it is too large Load diff

View file

@ -2,6 +2,7 @@ mod date;
mod protobuf; mod protobuf;
pub mod dictionary; pub mod dictionary;
pub mod timeago;
pub use date::{now_sec, shift_months, shift_years}; pub use date::{now_sec, shift_months, shift_years};
pub use protobuf::{string_from_pb, ProtoBuilder}; pub use protobuf::{string_from_pb, ProtoBuilder};
@ -19,7 +20,7 @@ use rand::Rng;
use regex::Regex; use regex::Regex;
use url::Url; use url::Url;
use crate::{error::Error, param::Language}; use crate::{error::Error, param::Language, serializer::text::TextComponent};
pub static VIDEO_ID_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"^[A-Za-z0-9_-]{11}$").unwrap()); pub static VIDEO_ID_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"^[A-Za-z0-9_-]{11}$").unwrap());
pub static CHANNEL_ID_REGEX: Lazy<Regex> = pub static CHANNEL_ID_REGEX: Lazy<Regex> =
@ -34,8 +35,6 @@ pub static VANITY_PATH_REGEX: Lazy<Regex> = Lazy::new(|| {
/// Separator string for YouTube Music subtitles /// Separator string for YouTube Music subtitles
pub const DOT_SEPARATOR: &str = ""; pub const DOT_SEPARATOR: &str = "";
/// YouTube Music name (author of official playlists)
pub const YT_MUSIC_NAME: &str = "YouTube Music";
pub const VARIOUS_ARTISTS: &str = "Various Artists"; pub const VARIOUS_ARTISTS: &str = "Various Artists";
pub const PLAYLIST_ID_ALBUM_PREFIX: &str = "OLAK"; pub const PLAYLIST_ID_ALBUM_PREFIX: &str = "OLAK";
@ -143,7 +142,7 @@ where
/// and return the duration in seconds. /// and return the duration in seconds.
pub fn parse_video_length(text: &str) -> Option<u32> { pub fn parse_video_length(text: &str) -> Option<u32> {
static VIDEO_LENGTH_REGEX: Lazy<Regex> = static VIDEO_LENGTH_REGEX: Lazy<Regex> =
Lazy::new(|| Regex::new(r#"(?:(\d+):)?(\d{1,2}):(\d{2})"#).unwrap()); Lazy::new(|| Regex::new(r#"(?:(\d+)[:.])?(\d{1,2})[:.](\d{2})"#).unwrap());
VIDEO_LENGTH_REGEX.captures(text).map(|cap| { VIDEO_LENGTH_REGEX.captures(text).map(|cap| {
let hrs = cap let hrs = cap
.get(1) .get(1)
@ -193,14 +192,15 @@ pub fn retry_delay(
/// Also strips google analytics tracking parameters /// Also strips google analytics tracking parameters
/// (`utm_source`, `utm_medium`, `utm_campaign`, `utm_content`) because google analytics is bad. /// (`utm_source`, `utm_medium`, `utm_campaign`, `utm_content`) because google analytics is bad.
pub fn sanitize_yt_url(url: &str) -> String { pub fn sanitize_yt_url(url: &str) -> String {
let mut parsed_url = ok_or_bail!(Url::parse(url), url.to_owned()); fn sanitize_yt_url_inner(url: &str) -> Option<String> {
let mut parsed_url = Url::parse(url).ok()?;
// Convert redirect url // Convert redirect url
if parsed_url.host_str().unwrap_or_default() == "www.youtube.com" if parsed_url.host_str().unwrap_or_default() == "www.youtube.com"
&& parsed_url.path() == "/redirect" && parsed_url.path() == "/redirect"
{ {
if let Some((_, url)) = parsed_url.query_pairs().find(|(k, _)| k == "q") { if let Some((_, url)) = parsed_url.query_pairs().find(|(k, _)| k == "q") {
parsed_url = ok_or_bail!(Url::parse(url.as_ref()), url.to_string()); parsed_url = Url::parse(url.as_ref()).ok()?;
} }
} }
@ -225,8 +225,10 @@ pub fn sanitize_yt_url(url: &str) -> String {
.finish(); .finish();
} }
} }
Some(parsed_url.to_string())
}
parsed_url.to_string() sanitize_yt_url_inner(url).unwrap_or_else(|| url.to_string())
} }
pub trait TryRemove<T> { pub trait TryRemove<T> {
@ -269,44 +271,86 @@ impl<T> TryRemove<T> for Vec<T> {
} }
} }
/// Check if a channel name equals "YouTube Music"
/// (the author of original YouTube music playlists)
pub(crate) fn is_ytm(text: &TextComponent) -> bool {
if let TextComponent::Text { text } = text {
text.starts_with("YouTube")
} else {
false
}
}
/// Check if a language should be parsed by character
pub fn lang_by_char(lang: Language) -> bool {
matches!(
lang,
Language::Ja | Language::ZhCn | Language::ZhHk | Language::ZhTw
)
}
/// Parse a large, textual number (e.g. `1.4M subscribers`, `22K views`) /// Parse a large, textual number (e.g. `1.4M subscribers`, `22K views`)
pub fn parse_large_numstr<F>(string: &str, lang: Language) -> Option<F> pub fn parse_large_numstr<F>(string: &str, lang: Language) -> Option<F>
where where
F: TryFrom<u64>, F: TryFrom<u64>,
{ {
// Special case for Gujarati: the "no views" text does not contain
// any parseable tokens: the 2 words occur in any view count text.
// This may be a translation error.
if lang == Language::Gu && string == "જોવાયાની સંખ્યા" {
return 0.try_into().ok();
}
let dict_entry = dictionary::entry(lang); let dict_entry = dictionary::entry(lang);
let by_char = lang_by_char(lang) || lang == Language::Ko;
let decimal_point = match dict_entry.comma_decimal { let decimal_point = match dict_entry.comma_decimal {
true => ',', true => ',',
false => '.', false => '.',
}; };
let (num, mut exp, filtered) = { let mut digits = String::new();
let mut buf = String::new();
let mut filtered = String::new(); let mut filtered = String::new();
let mut exp = 0; let mut exp = 0;
let mut after_point = false; let mut after_point = false;
for c in string.chars() { for c in string.chars() {
if c.is_ascii_digit() { if c.is_ascii_digit() {
buf.push(c); digits.push(c);
if after_point { if after_point {
exp -= 1; exp -= 1;
} }
} else if c == decimal_point { } else if c == decimal_point {
after_point = true; after_point = true;
} else if !matches!(c, '\u{200b}' | '.' | ',') { } else if !matches!(
filtered.push(c); c,
'\u{200b}' | '\u{202b}' | '\u{202c}' | '\u{202e}' | '\u{200e}' | '\u{200f}' | '.' | ','
) {
c.to_lowercase().for_each(|c| filtered.push(c));
} }
} }
(ok_or_bail!(buf.parse::<u64>(), None), exp, filtered)
}; if digits.is_empty() {
if by_char {
filtered
.chars()
.find_map(|c| dict_entry.number_nd_tokens.get(&c.to_string()))
.and_then(|n| (*n as u64).try_into().ok())
} else {
filtered
.split_whitespace()
.find_map(|token| dict_entry.number_nd_tokens.get(token))
.and_then(|n| (*n as u64).try_into().ok())
}
} else {
let num = digits.parse::<u64>().ok()?;
let lookup_token = |token: &str| match token { let lookup_token = |token: &str| match token {
"K" | "k" => Some(3), "k" => Some(3),
_ => dict_entry.number_tokens.get(token).map(|t| *t as i32), _ => dict_entry.number_tokens.get(token).map(|t| *t as i32),
}; };
if dict_entry.by_char { if by_char {
exp += filtered exp += filtered
.chars() .chars()
.filter_map(|token| lookup_token(&token.to_string())) .filter_map(|token| lookup_token(&token.to_string()))
@ -318,14 +362,23 @@ where
.sum::<i32>(); .sum::<i32>();
} }
F::try_from(some_or_bail!( F::try_from(num.checked_mul((10_u64).checked_pow(exp.try_into().ok()?)?)?).ok()
num.checked_mul(some_or_bail!( }
(10_u64).checked_pow(ok_or_bail!(exp.try_into(), None)), }
None
)), pub fn parse_large_numstr_or_warn<F>(
None string: &str,
)) lang: Language,
.ok() warnings: &mut Vec<String>,
) -> Option<F>
where
F: TryFrom<u64>,
{
let res = parse_large_numstr::<F>(string, lang);
if res.is_none() {
warnings.push(format!("could not parse numstr `{string}`"));
}
res
} }
/// Replace all html control characters to make a string safe for inserting into HTML. /// Replace all html control characters to make a string safe for inserting into HTML.
@ -452,23 +505,21 @@ pub(crate) mod tests {
assert_eq!(res, expect); assert_eq!(res, expect);
} }
#[test] #[rstest]
fn t_parse_large_numstr_samples() { #[case(
let json_path = path!(*TESTFILES / "dict" / "large_number_samples.json"); Language::Iw,
let json_file = File::open(json_path).unwrap(); "\u{200f}\u{202b}3.36M\u{200f}\u{202c}\u{200f} \u{200f}מנויים\u{200f}",
let number_samples: BTreeMap<Language, BTreeMap<u8, (String, u64)>> = 3_360_000
serde_json::from_reader(BufReader::new(json_file)).unwrap(); )]
#[case(Language::As, "১ জন গ্ৰাহক", 1)]
number_samples.iter().for_each(|(lang, entry)| { fn t_parse_large_numstr(#[case] lang: Language, #[case] string: &str, #[case] expect: u64) {
entry.iter().for_each(|(_, (txt, expect))| { let res = parse_large_numstr::<u64>(string, lang).unwrap();
testcase_parse_large_numstr(txt, *lang, *expect); assert_eq!(res, expect);
});
});
} }
#[test] #[test]
fn t_parse_large_numstr_samples2() { fn t_parse_large_numstr_samples() {
let json_path = path!(*TESTFILES / "dict" / "large_number_samples_all.json"); let json_path = path!(*TESTFILES / "dict" / "large_number_samples.json");
let json_file = File::open(json_path).unwrap(); let json_file = File::open(json_path).unwrap();
let number_samples: BTreeMap<Language, BTreeMap<String, u64>> = let number_samples: BTreeMap<Language, BTreeMap<String, u64>> =
serde_json::from_reader(BufReader::new(json_file)).unwrap(); serde_json::from_reader(BufReader::new(json_file)).unwrap();
@ -485,12 +536,18 @@ pub(crate) mod tests {
// in the string. // in the string.
let rounded = { let rounded = {
let n_significant_d = string.chars().filter(char::is_ascii_digit).count(); let n_significant_d = string.chars().filter(char::is_ascii_digit).count();
if n_significant_d == 0 {
expect
} else {
let mag = (expect as f64).log10().floor(); let mag = (expect as f64).log10().floor();
let factor = 10_u64.pow(1 + mag as u32 - n_significant_d as u32); let factor = 10_u64.pow(1 + mag as u32 - n_significant_d as u32);
(((expect as f64) / factor as f64).floor() as u64) * factor (((expect as f64) / factor as f64).floor() as u64) * factor
}
}; };
let res = parse_large_numstr::<u64>(string, lang).expect(string); let emsg = format!("{string} (lang: {lang}, exact: {expect})");
assert_eq!(res, rounded, "{string} (lang: {lang}, exact: {expect})");
let res = parse_large_numstr::<u64>(string, lang).expect(&emsg);
assert_eq!(res, rounded, "{emsg}");
} }
} }

View file

@ -98,11 +98,11 @@ pub fn string_from_pb<P: IntoIterator<Item = u8>>(pb: P, field: u32) -> Option<S
5 => 4, 5 => 4,
// string // string
2 => { 2 => {
let len = some_or_bail!(parse_varint(&mut pb), None); let len = parse_varint(&mut pb)?;
if this_field == field { if this_field == field {
let mut buf = Vec::new(); let mut buf = Vec::new();
for _ in 0..len { for _ in 0..len {
buf.push(some_or_bail!(pb.next(), None)); buf.push(pb.next()?);
} }
return String::from_utf8(buf).ok(); return String::from_utf8(buf).ok();
} else { } else {

View file

@ -9,11 +9,6 @@
//! //!
//! This module can parse these dates using an embedded dictionary which //! This module can parse these dates using an embedded dictionary which
//! contains date/time unit tokens for all supported languages. //! contains date/time unit tokens for all supported languages.
//!
//! Note that this module is public so it can be tested from outside
//! the crate, which is important for including new languages, too.
//!
//! It is not intended to be used to parse textual dates that are not from YouTube.
use std::ops::Mul; use std::ops::Mul;
@ -70,17 +65,37 @@ pub enum TimeUnit {
/// Value of a parsed TimeAgo token, used in the dictionary /// Value of a parsed TimeAgo token, used in the dictionary
#[derive(Debug, Copy, Clone, PartialEq, Eq)] #[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub(crate) struct TaToken { pub struct TaToken {
pub n: u8, pub n: u8,
pub unit: Option<TimeUnit>, pub unit: Option<TimeUnit>,
} }
pub(crate) enum DateCmp { pub enum DateCmp {
Y, Y,
M, M,
D, D,
} }
impl TimeUnit {
pub fn secs(&self) -> i64 {
match self {
TimeUnit::Second => 1,
TimeUnit::Minute => 60,
TimeUnit::Hour => 3600,
TimeUnit::Day => 24 * 3600,
TimeUnit::Week => 7 * 24 * 3600,
TimeUnit::Month => 30 * 24 * 3600,
TimeUnit::Year => 365 * 24 * 3600,
}
}
}
impl TimeAgo {
fn secs(&self) -> i64 {
i64::from(self.n) * self.unit.secs()
}
}
impl Mul<u8> for TimeAgo { impl Mul<u8> for TimeAgo {
type Output = Self; type Output = Self;
@ -94,15 +109,7 @@ impl Mul<u8> for TimeAgo {
impl From<TimeAgo> for Duration { impl From<TimeAgo> for Duration {
fn from(ta: TimeAgo) -> Self { fn from(ta: TimeAgo) -> Self {
match ta.unit { Duration::seconds(ta.secs())
TimeUnit::Second => Duration::seconds(ta.n as i64),
TimeUnit::Minute => Duration::minutes(ta.n as i64),
TimeUnit::Hour => Duration::hours(ta.n as i64),
TimeUnit::Day => Duration::days(ta.n as i64),
TimeUnit::Week => Duration::weeks(ta.n as i64),
TimeUnit::Month => Duration::days(ta.n as i64 * 30),
TimeUnit::Year => Duration::days(ta.n as i64 * 365),
}
} }
} }
@ -142,14 +149,19 @@ fn filter_str(string: &str) -> String {
.collect() .collect()
} }
fn parse_ta_token(entry: &dictionary::Entry, nd: bool, filtered_str: &str) -> Option<TimeAgo> { fn parse_ta_token(
entry: &dictionary::Entry,
by_char: bool,
nd: bool,
filtered_str: &str,
) -> Option<TimeAgo> {
let tokens = match nd { let tokens = match nd {
true => &entry.timeago_nd_tokens, true => &entry.timeago_nd_tokens,
false => &entry.timeago_tokens, false => &entry.timeago_tokens,
}; };
let mut qu = 1; let mut qu = 1;
if entry.by_char { if by_char {
filtered_str.chars().find_map(|word| { filtered_str.chars().find_map(|word| {
tokens.get(&word.to_string()).and_then(|t| match t.unit { tokens.get(&word.to_string()).and_then(|t| match t.unit {
Some(unit) => Some(TimeAgo { n: t.n * qu, unit }), Some(unit) => Some(TimeAgo { n: t.n * qu, unit }),
@ -172,54 +184,78 @@ fn parse_ta_token(entry: &dictionary::Entry, nd: bool, filtered_str: &str) -> Op
} }
} }
fn parse_textual_month(entry: &dictionary::Entry, filtered_str: &str) -> Option<u8> { fn parse_ta_tokens(
if entry.by_char { entry: &dictionary::Entry,
// Chinese/Japanese dont use textual months by_char: bool,
nd: bool,
filtered_str: &str,
) -> Vec<TimeAgo> {
let tokens = match nd {
true => &entry.timeago_nd_tokens,
false => &entry.timeago_tokens,
};
let mut qu = 1;
if by_char {
filtered_str
.chars()
.filter_map(|word| {
tokens.get(&word.to_string()).and_then(|t| match t.unit {
Some(unit) => Some(TimeAgo { n: t.n * qu, unit }),
None => {
qu = t.n;
None None
}
})
})
.collect()
} else { } else {
filtered_str
.split_whitespace()
.filter_map(|word| {
tokens.get(word).and_then(|t| match t.unit {
Some(unit) => Some(TimeAgo { n: t.n * qu, unit }),
None => {
qu = t.n;
None
}
})
})
.collect()
}
}
fn parse_textual_month(entry: &dictionary::Entry, filtered_str: &str) -> Option<u8> {
filtered_str filtered_str
.split_whitespace() .split_whitespace()
.find_map(|word| entry.months.get(word).copied()) .find_map(|word| entry.months.get(word).copied())
} }
}
/// Parse a TimeAgo string (e.g. "29 minutes ago") into a TimeAgo object. /// Parse a TimeAgo string (e.g. "29 minutes ago") into a TimeAgo object.
/// ///
/// Returns None if the date could not be parsed. /// Returns [`None`] if the date could not be parsed.
pub fn parse_timeago(lang: Language, textual_date: &str) -> Option<TimeAgo> { pub fn parse_timeago(lang: Language, textual_date: &str) -> Option<TimeAgo> {
let entry = dictionary::entry(lang); let entry = dictionary::entry(lang);
let filtered_str = filter_str(textual_date); let filtered_str = filter_str(textual_date);
let qu: u8 = util::parse_numeric(textual_date).unwrap_or(1); let qu: u8 = util::parse_numeric(textual_date).unwrap_or(1);
parse_ta_token(&entry, false, &filtered_str).map(|ta| ta * qu) parse_ta_token(&entry, util::lang_by_char(lang), false, &filtered_str).map(|ta| ta * qu)
} }
/// Parse a TimeAgo string (e.g. "29 minutes ago") into a Chrono DateTime object. /// Parse a TimeAgo string (e.g. "29 minutes ago") into a Chrono DateTime object.
/// ///
/// Returns None if the date could not be parsed. /// Returns [`None`] if the date could not be parsed.
pub fn parse_timeago_to_dt(lang: Language, textual_date: &str) -> Option<OffsetDateTime> { pub fn parse_timeago_dt(lang: Language, textual_date: &str) -> Option<OffsetDateTime> {
parse_timeago(lang, textual_date).map(|ta| ta.into()) parse_timeago(lang, textual_date).map(|ta| ta.into())
} }
pub(crate) fn parse_timeago_or_warn( pub fn parse_timeago_dt_or_warn(
lang: Language,
textual_date: &str,
warnings: &mut Vec<String>,
) -> Option<TimeAgo> {
let res = parse_timeago(lang, textual_date);
if res.is_none() {
warnings.push(format!("could not parse timeago `{textual_date}`"));
}
res
}
pub(crate) fn parse_timeago_dt_or_warn(
lang: Language, lang: Language,
textual_date: &str, textual_date: &str,
warnings: &mut Vec<String>, warnings: &mut Vec<String>,
) -> Option<OffsetDateTime> { ) -> Option<OffsetDateTime> {
let res = parse_timeago_to_dt(lang, textual_date); let res = parse_timeago_dt(lang, textual_date);
if res.is_none() { if res.is_none() {
warnings.push(format!("could not parse timeago `{textual_date}`")); warnings.push(format!("could not parse timeago `{textual_date}`"));
} }
@ -228,19 +264,20 @@ pub(crate) fn parse_timeago_dt_or_warn(
/// Parse a textual date (e.g. "29 minutes ago" or "Jul 2, 2014") into a ParsedDate object. /// Parse a textual date (e.g. "29 minutes ago" or "Jul 2, 2014") into a ParsedDate object.
/// ///
/// Returns None if the date could not be parsed. /// Returns [`None`] if the date could not be parsed.
pub fn parse_textual_date(lang: Language, textual_date: &str) -> Option<ParsedDate> { pub fn parse_textual_date(lang: Language, textual_date: &str) -> Option<ParsedDate> {
let entry = dictionary::entry(lang); let entry = dictionary::entry(lang);
let by_char = util::lang_by_char(lang);
let filtered_str = filter_str(textual_date); let filtered_str = filter_str(textual_date);
let nums = util::parse_numeric_vec::<u16>(textual_date); let nums = util::parse_numeric_vec::<u16>(textual_date);
match nums.len() { match nums.len() {
0 => match parse_ta_token(&entry, true, &filtered_str) { 0 => match parse_ta_token(&entry, by_char, true, &filtered_str) {
Some(timeago) => Some(ParsedDate::Relative(timeago)), Some(timeago) => Some(ParsedDate::Relative(timeago)),
None => parse_ta_token(&entry, false, &filtered_str).map(ParsedDate::Relative), None => parse_ta_token(&entry, by_char, false, &filtered_str).map(ParsedDate::Relative),
}, },
1 => parse_ta_token(&entry, false, &filtered_str) 1 => parse_ta_token(&entry, by_char, false, &filtered_str)
.map(|timeago| ParsedDate::Relative(timeago * nums[0] as u8)), .map(|timeago| ParsedDate::Relative(timeago * nums[0] as u8)),
2..=3 => { 2..=3 => {
if nums.len() == entry.date_order.len() { if nums.len() == entry.date_order.len() {
@ -256,7 +293,8 @@ pub fn parse_textual_date(lang: Language, textual_date: &str) -> Option<ParsedDa
DateCmp::D => d = Some(*n), DateCmp::D => d = Some(*n),
}); });
if m.is_none() { // Chinese/Japanese dont use textual months
if m.is_none() && !by_char {
m = parse_textual_month(&entry, &filtered_str).map(|n| n as u16); m = parse_textual_month(&entry, &filtered_str).map(|n| n as u16);
} }
@ -282,7 +320,7 @@ pub fn parse_textual_date_to_dt(lang: Language, textual_date: &str) -> Option<Of
parse_textual_date(lang, textual_date).map(|ta| ta.into()) parse_textual_date(lang, textual_date).map(|ta| ta.into())
} }
pub(crate) fn parse_textual_date_or_warn( pub fn parse_textual_date_or_warn(
lang: Language, lang: Language,
textual_date: &str, textual_date: &str,
warnings: &mut Vec<String>, warnings: &mut Vec<String>,
@ -294,6 +332,87 @@ pub(crate) fn parse_textual_date_or_warn(
res res
} }
/// Parse a textual video duration (e.g. "11 minutes, 20 seconds")
///
/// Returns None if the duration could not be parsed
pub fn parse_video_duration(lang: Language, video_duration: &str) -> Option<u32> {
let entry = dictionary::entry(lang);
let by_char = util::lang_by_char(lang);
let parts = split_duration_txt(video_duration, matches!(lang, Language::Si | Language::Sw));
let mut secs = 0;
for part in parts {
let mut n = if part.digits.is_empty() {
1
} else {
part.digits.parse::<u32>().ok()?
};
let tokens = parse_ta_tokens(&entry, by_char, false, &part.word);
if tokens.is_empty() {
return None;
}
tokens.iter().for_each(|ta| {
secs += n * ta.secs() as u32;
n = 1;
});
}
Some(secs)
}
pub fn parse_video_duration_or_warn(
lang: Language,
video_duration: &str,
warnings: &mut Vec<String>,
) -> Option<u32> {
let res = parse_video_duration(lang, video_duration);
if res.is_none() {
warnings.push(format!("could not parse video duration `{video_duration}`"));
}
res
}
#[derive(Default)]
struct DurationTxtSegment {
digits: String,
word: String,
}
fn split_duration_txt(txt: &str, start_c: bool) -> Vec<DurationTxtSegment> {
let mut segments = Vec::new();
// 1: parse digits, 2: parse word
let mut state: u8 = 0;
let mut seg = DurationTxtSegment::default();
for c in txt.chars() {
if c.is_ascii_digit() {
if state == 2 && (!seg.digits.is_empty() || (!start_c && segments.is_empty())) {
segments.push(seg);
seg = DurationTxtSegment::default();
}
seg.digits.push(c);
state = 1;
} else {
if (state == 1) && (!seg.word.is_empty() || (start_c && segments.is_empty())) {
segments.push(seg);
seg = DurationTxtSegment::default();
}
if c != ',' {
c.to_lowercase().for_each(|c| seg.word.push(c));
}
state = 2;
}
}
if !seg.word.is_empty() || !seg.digits.is_empty() {
segments.push(seg);
}
segments
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::{collections::BTreeMap, fs::File, io::BufReader}; use std::{collections::BTreeMap, fs::File, io::BufReader};
@ -536,6 +655,11 @@ mod tests {
"Last updated on Jun 04, 2003", "Last updated on Jun 04, 2003",
Some(ParsedDate::Absolute(date!(2003-6-4))) Some(ParsedDate::Absolute(date!(2003-6-4)))
)] )]
#[case(
Language::Bn,
"যোগ দিয়েছেন 24 সেপ, 2013",
Some(ParsedDate::Absolute(date!(2013-9-24)))
)]
fn t_parse_date( fn t_parse_date(
#[case] lang: Language, #[case] lang: Language,
#[case] textual_date: &str, #[case] textual_date: &str,
@ -564,11 +688,7 @@ mod tests {
assert_eq!( assert_eq!(
parse_textual_date(*lang, samples.get("Yesterday").unwrap()), parse_textual_date(*lang, samples.get("Yesterday").unwrap()),
Some(ParsedDate::Relative(TimeAgo { Some(ParsedDate::Relative(TimeAgo {
// YT's Singhalese translation has an error (yesterday == today) n: 1,
n: match lang {
Language::Si => 0,
_ => 1,
},
unit: TimeUnit::Day unit: TimeUnit::Day
})), })),
"lang: {lang}" "lang: {lang}"
@ -576,7 +696,7 @@ mod tests {
assert_eq!( assert_eq!(
parse_textual_date(*lang, samples.get("Ago").unwrap()), parse_textual_date(*lang, samples.get("Ago").unwrap()),
Some(ParsedDate::Relative(TimeAgo { Some(ParsedDate::Relative(TimeAgo {
n: 3, n: 5,
unit: TimeUnit::Day unit: TimeUnit::Day
})), })),
"lang: {lang}" "lang: {lang}"
@ -644,6 +764,36 @@ mod tests {
}) })
} }
#[test]
fn t_parse_video_duration() {
let json_path = path!(*TESTFILES / "dict" / "video_duration_samples.json");
let json_file = File::open(json_path).unwrap();
let date_samples: BTreeMap<Language, BTreeMap<String, u32>> =
serde_json::from_reader(BufReader::new(json_file)).unwrap();
date_samples.iter().for_each(|(lang, samples)| {
samples.iter().for_each(|(txt, duration)| {
assert_eq!(
parse_video_duration(*lang, txt),
Some(*duration),
"lang: {lang}; txt: `{txt}`"
);
})
});
}
#[rstest]
#[case(Language::Ar, "19 دقيقة وثانيتان", 1142)]
#[case(Language::Ar, "دقيقة و13 ثانية", 73)]
#[case(Language::Sw, "dakika 1 na sekunde 13", 73)]
fn t_parse_video_duration2(
#[case] lang: Language,
#[case] video_duration: &str,
#[case] expect: u32,
) {
assert_eq!(parse_video_duration(lang, video_duration), Some(expect));
}
#[test] #[test]
fn t_to_datetime() { fn t_to_datetime() {
// Absolute date // Absolute date

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,5 @@
// This code is used to test the deobfuscation javascript extraction. // This code is used to test the deobfuscation javascript extraction.
// Since YouTube's player code is copyrighted, I can just copy-paste it // Since YouTube's player code is copyrighted, I can't just copy-paste it
// into my project. // into my project.
/* /*

View file

@ -1,902 +0,0 @@
{
"supplemental": {
"version": {
"_unicodeVersion": "13.0.0",
"_cldrVersion": "37"
},
"plurals-type-cardinal": {
"af": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ak": {
"pluralRule-count-one": "n = 0..1 @integer 0, 1 @decimal 0.0, 1.0, 0.00, 1.00, 0.000, 1.000, 0.0000, 1.0000",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"am": {
"pluralRule-count-one": "i = 0 or n = 1 @integer 0, 1 @decimal 0.0~1.0, 0.00~0.04",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 1.1~2.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"an": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ar": {
"pluralRule-count-zero": "n = 0 @integer 0 @decimal 0.0, 0.00, 0.000, 0.0000",
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-two": "n = 2 @integer 2 @decimal 2.0, 2.00, 2.000, 2.0000",
"pluralRule-count-few": "n % 100 = 3..10 @integer 3~10, 103~110, 1003, … @decimal 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 103.0, 1003.0, …",
"pluralRule-count-many": "n % 100 = 11..99 @integer 11~26, 111, 1011, … @decimal 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 111.0, 1011.0, …",
"pluralRule-count-other": " @integer 100~102, 200~202, 300~302, 400~402, 500~502, 600, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.1, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ars": {
"pluralRule-count-zero": "n = 0 @integer 0 @decimal 0.0, 0.00, 0.000, 0.0000",
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-two": "n = 2 @integer 2 @decimal 2.0, 2.00, 2.000, 2.0000",
"pluralRule-count-few": "n % 100 = 3..10 @integer 3~10, 103~110, 1003, … @decimal 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 103.0, 1003.0, …",
"pluralRule-count-many": "n % 100 = 11..99 @integer 11~26, 111, 1011, … @decimal 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 111.0, 1011.0, …",
"pluralRule-count-other": " @integer 100~102, 200~202, 300~302, 400~402, 500~502, 600, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.1, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"as": {
"pluralRule-count-one": "i = 0 or n = 1 @integer 0, 1 @decimal 0.0~1.0, 0.00~0.04",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 1.1~2.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"asa": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ast": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"az": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"be": {
"pluralRule-count-one": "n % 10 = 1 and n % 100 != 11 @integer 1, 21, 31, 41, 51, 61, 71, 81, 101, 1001, … @decimal 1.0, 21.0, 31.0, 41.0, 51.0, 61.0, 71.0, 81.0, 101.0, 1001.0, …",
"pluralRule-count-few": "n % 10 = 2..4 and n % 100 != 12..14 @integer 2~4, 22~24, 32~34, 42~44, 52~54, 62, 102, 1002, … @decimal 2.0, 3.0, 4.0, 22.0, 23.0, 24.0, 32.0, 33.0, 102.0, 1002.0, …",
"pluralRule-count-many": "n % 10 = 0 or n % 10 = 5..9 or n % 100 = 11..14 @integer 0, 5~19, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …",
"pluralRule-count-other": " @decimal 0.1~0.9, 1.1~1.7, 10.1, 100.1, 1000.1, …"
},
"bem": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"bez": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"bg": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"bho": {
"pluralRule-count-one": "n = 0..1 @integer 0, 1 @decimal 0.0, 1.0, 0.00, 1.00, 0.000, 1.000, 0.0000, 1.0000",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"bm": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"bn": {
"pluralRule-count-one": "i = 0 or n = 1 @integer 0, 1 @decimal 0.0~1.0, 0.00~0.04",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 1.1~2.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"bo": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"br": {
"pluralRule-count-one": "n % 10 = 1 and n % 100 != 11,71,91 @integer 1, 21, 31, 41, 51, 61, 81, 101, 1001, … @decimal 1.0, 21.0, 31.0, 41.0, 51.0, 61.0, 81.0, 101.0, 1001.0, …",
"pluralRule-count-two": "n % 10 = 2 and n % 100 != 12,72,92 @integer 2, 22, 32, 42, 52, 62, 82, 102, 1002, … @decimal 2.0, 22.0, 32.0, 42.0, 52.0, 62.0, 82.0, 102.0, 1002.0, …",
"pluralRule-count-few": "n % 10 = 3..4,9 and n % 100 != 10..19,70..79,90..99 @integer 3, 4, 9, 23, 24, 29, 33, 34, 39, 43, 44, 49, 103, 1003, … @decimal 3.0, 4.0, 9.0, 23.0, 24.0, 29.0, 33.0, 34.0, 103.0, 1003.0, …",
"pluralRule-count-many": "n != 0 and n % 1000000 = 0 @integer 1000000, … @decimal 1000000.0, 1000000.00, 1000000.000, …",
"pluralRule-count-other": " @integer 0, 5~8, 10~20, 100, 1000, 10000, 100000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, …"
},
"brx": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"bs": {
"pluralRule-count-one": "v = 0 and i % 10 = 1 and i % 100 != 11 or f % 10 = 1 and f % 100 != 11 @integer 1, 21, 31, 41, 51, 61, 71, 81, 101, 1001, … @decimal 0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1, 10.1, 100.1, 1000.1, …",
"pluralRule-count-few": "v = 0 and i % 10 = 2..4 and i % 100 != 12..14 or f % 10 = 2..4 and f % 100 != 12..14 @integer 2~4, 22~24, 32~34, 42~44, 52~54, 62, 102, 1002, … @decimal 0.2~0.4, 1.2~1.4, 2.2~2.4, 3.2~3.4, 4.2~4.4, 5.2, 10.2, 100.2, 1000.2, …",
"pluralRule-count-other": " @integer 0, 5~19, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0, 0.5~1.0, 1.5~2.0, 2.5~2.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ca": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ce": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ceb": {
"pluralRule-count-one": "v = 0 and i = 1,2,3 or v = 0 and i % 10 != 4,6,9 or v != 0 and f % 10 != 4,6,9 @integer 0~3, 5, 7, 8, 10~13, 15, 17, 18, 20, 21, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.3, 0.5, 0.7, 0.8, 1.0~1.3, 1.5, 1.7, 1.8, 2.0, 2.1, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …",
"pluralRule-count-other": " @integer 4, 6, 9, 14, 16, 19, 24, 26, 104, 1004, … @decimal 0.4, 0.6, 0.9, 1.4, 1.6, 1.9, 2.4, 2.6, 10.4, 100.4, 1000.4, …"
},
"cgg": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"chr": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ckb": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"cs": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-few": "i = 2..4 and v = 0 @integer 2~4",
"pluralRule-count-many": "v != 0 @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …",
"pluralRule-count-other": " @integer 0, 5~19, 100, 1000, 10000, 100000, 1000000, …"
},
"cy": {
"pluralRule-count-zero": "n = 0 @integer 0 @decimal 0.0, 0.00, 0.000, 0.0000",
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-two": "n = 2 @integer 2 @decimal 2.0, 2.00, 2.000, 2.0000",
"pluralRule-count-few": "n = 3 @integer 3 @decimal 3.0, 3.00, 3.000, 3.0000",
"pluralRule-count-many": "n = 6 @integer 6 @decimal 6.0, 6.00, 6.000, 6.0000",
"pluralRule-count-other": " @integer 4, 5, 7~20, 100, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"da": {
"pluralRule-count-one": "n = 1 or t != 0 and i = 0,1 @integer 1 @decimal 0.1~1.6",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0, 2.0~3.4, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"de": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"dsb": {
"pluralRule-count-one": "v = 0 and i % 100 = 1 or f % 100 = 1 @integer 1, 101, 201, 301, 401, 501, 601, 701, 1001, … @decimal 0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1, 10.1, 100.1, 1000.1, …",
"pluralRule-count-two": "v = 0 and i % 100 = 2 or f % 100 = 2 @integer 2, 102, 202, 302, 402, 502, 602, 702, 1002, … @decimal 0.2, 1.2, 2.2, 3.2, 4.2, 5.2, 6.2, 7.2, 10.2, 100.2, 1000.2, …",
"pluralRule-count-few": "v = 0 and i % 100 = 3..4 or f % 100 = 3..4 @integer 3, 4, 103, 104, 203, 204, 303, 304, 403, 404, 503, 504, 603, 604, 703, 704, 1003, … @decimal 0.3, 0.4, 1.3, 1.4, 2.3, 2.4, 3.3, 3.4, 4.3, 4.4, 5.3, 5.4, 6.3, 6.4, 7.3, 7.4, 10.3, 100.3, 1000.3, …",
"pluralRule-count-other": " @integer 0, 5~19, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0, 0.5~1.0, 1.5~2.0, 2.5~2.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"dv": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"dz": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ee": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"el": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"en": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"eo": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"es": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"et": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"eu": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"fa": {
"pluralRule-count-one": "i = 0 or n = 1 @integer 0, 1 @decimal 0.0~1.0, 0.00~0.04",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 1.1~2.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ff": {
"pluralRule-count-one": "i = 0,1 @integer 0, 1 @decimal 0.0~1.5",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 2.0~3.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"fi": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"fil": {
"pluralRule-count-one": "v = 0 and i = 1,2,3 or v = 0 and i % 10 != 4,6,9 or v != 0 and f % 10 != 4,6,9 @integer 0~3, 5, 7, 8, 10~13, 15, 17, 18, 20, 21, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.3, 0.5, 0.7, 0.8, 1.0~1.3, 1.5, 1.7, 1.8, 2.0, 2.1, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …",
"pluralRule-count-other": " @integer 4, 6, 9, 14, 16, 19, 24, 26, 104, 1004, … @decimal 0.4, 0.6, 0.9, 1.4, 1.6, 1.9, 2.4, 2.6, 10.4, 100.4, 1000.4, …"
},
"fo": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"fr": {
"pluralRule-count-one": "i = 0,1 @integer 0, 1 @decimal 0.0~1.5",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 2.0~3.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"fur": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"fy": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ga": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-two": "n = 2 @integer 2 @decimal 2.0, 2.00, 2.000, 2.0000",
"pluralRule-count-few": "n = 3..6 @integer 3~6 @decimal 3.0, 4.0, 5.0, 6.0, 3.00, 4.00, 5.00, 6.00, 3.000, 4.000, 5.000, 6.000, 3.0000, 4.0000, 5.0000, 6.0000",
"pluralRule-count-many": "n = 7..10 @integer 7~10 @decimal 7.0, 8.0, 9.0, 10.0, 7.00, 8.00, 9.00, 10.00, 7.000, 8.000, 9.000, 10.000, 7.0000, 8.0000, 9.0000, 10.0000",
"pluralRule-count-other": " @integer 0, 11~25, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.1, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"gd": {
"pluralRule-count-one": "n = 1,11 @integer 1, 11 @decimal 1.0, 11.0, 1.00, 11.00, 1.000, 11.000, 1.0000",
"pluralRule-count-two": "n = 2,12 @integer 2, 12 @decimal 2.0, 12.0, 2.00, 12.00, 2.000, 12.000, 2.0000",
"pluralRule-count-few": "n = 3..10,13..19 @integer 3~10, 13~19 @decimal 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 3.00",
"pluralRule-count-other": " @integer 0, 20~34, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.1, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"gl": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"gsw": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"gu": {
"pluralRule-count-one": "i = 0 or n = 1 @integer 0, 1 @decimal 0.0~1.0, 0.00~0.04",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 1.1~2.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"guw": {
"pluralRule-count-one": "n = 0..1 @integer 0, 1 @decimal 0.0, 1.0, 0.00, 1.00, 0.000, 1.000, 0.0000, 1.0000",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"gv": {
"pluralRule-count-one": "v = 0 and i % 10 = 1 @integer 1, 11, 21, 31, 41, 51, 61, 71, 101, 1001, …",
"pluralRule-count-two": "v = 0 and i % 10 = 2 @integer 2, 12, 22, 32, 42, 52, 62, 72, 102, 1002, …",
"pluralRule-count-few": "v = 0 and i % 100 = 0,20,40,60,80 @integer 0, 20, 40, 60, 80, 100, 120, 140, 1000, 10000, 100000, 1000000, …",
"pluralRule-count-many": "v != 0 @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …",
"pluralRule-count-other": " @integer 3~10, 13~19, 23, 103, 1003, …"
},
"ha": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"haw": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"he": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-two": "i = 2 and v = 0 @integer 2",
"pluralRule-count-many": "v = 0 and n != 0..10 and n % 10 = 0 @integer 20, 30, 40, 50, 60, 70, 80, 90, 100, 1000, 10000, 100000, 1000000, …",
"pluralRule-count-other": " @integer 0, 3~17, 101, 1001, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"hi": {
"pluralRule-count-one": "i = 0 or n = 1 @integer 0, 1 @decimal 0.0~1.0, 0.00~0.04",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 1.1~2.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"hr": {
"pluralRule-count-one": "v = 0 and i % 10 = 1 and i % 100 != 11 or f % 10 = 1 and f % 100 != 11 @integer 1, 21, 31, 41, 51, 61, 71, 81, 101, 1001, … @decimal 0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1, 10.1, 100.1, 1000.1, …",
"pluralRule-count-few": "v = 0 and i % 10 = 2..4 and i % 100 != 12..14 or f % 10 = 2..4 and f % 100 != 12..14 @integer 2~4, 22~24, 32~34, 42~44, 52~54, 62, 102, 1002, … @decimal 0.2~0.4, 1.2~1.4, 2.2~2.4, 3.2~3.4, 4.2~4.4, 5.2, 10.2, 100.2, 1000.2, …",
"pluralRule-count-other": " @integer 0, 5~19, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0, 0.5~1.0, 1.5~2.0, 2.5~2.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"hsb": {
"pluralRule-count-one": "v = 0 and i % 100 = 1 or f % 100 = 1 @integer 1, 101, 201, 301, 401, 501, 601, 701, 1001, … @decimal 0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1, 10.1, 100.1, 1000.1, …",
"pluralRule-count-two": "v = 0 and i % 100 = 2 or f % 100 = 2 @integer 2, 102, 202, 302, 402, 502, 602, 702, 1002, … @decimal 0.2, 1.2, 2.2, 3.2, 4.2, 5.2, 6.2, 7.2, 10.2, 100.2, 1000.2, …",
"pluralRule-count-few": "v = 0 and i % 100 = 3..4 or f % 100 = 3..4 @integer 3, 4, 103, 104, 203, 204, 303, 304, 403, 404, 503, 504, 603, 604, 703, 704, 1003, … @decimal 0.3, 0.4, 1.3, 1.4, 2.3, 2.4, 3.3, 3.4, 4.3, 4.4, 5.3, 5.4, 6.3, 6.4, 7.3, 7.4, 10.3, 100.3, 1000.3, …",
"pluralRule-count-other": " @integer 0, 5~19, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0, 0.5~1.0, 1.5~2.0, 2.5~2.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"hu": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"hy": {
"pluralRule-count-one": "i = 0,1 @integer 0, 1 @decimal 0.0~1.5",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 2.0~3.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ia": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"id": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ig": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ii": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"in": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"io": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"is": {
"pluralRule-count-one": "t = 0 and i % 10 = 1 and i % 100 != 11 or t != 0 @integer 1, 21, 31, 41, 51, 61, 71, 81, 101, 1001, … @decimal 0.1~1.6, 10.1, 100.1, 1000.1, …",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"it": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"iu": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-two": "n = 2 @integer 2 @decimal 2.0, 2.00, 2.000, 2.0000",
"pluralRule-count-other": " @integer 0, 3~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"iw": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-two": "i = 2 and v = 0 @integer 2",
"pluralRule-count-many": "v = 0 and n != 0..10 and n % 10 = 0 @integer 20, 30, 40, 50, 60, 70, 80, 90, 100, 1000, 10000, 100000, 1000000, …",
"pluralRule-count-other": " @integer 0, 3~17, 101, 1001, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ja": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"jbo": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"jgo": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ji": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"jmc": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"jv": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"jw": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ka": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"kab": {
"pluralRule-count-one": "i = 0,1 @integer 0, 1 @decimal 0.0~1.5",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 2.0~3.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"kaj": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"kcg": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"kde": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"kea": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"kk": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"kkj": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"kl": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"km": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"kn": {
"pluralRule-count-one": "i = 0 or n = 1 @integer 0, 1 @decimal 0.0~1.0, 0.00~0.04",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 1.1~2.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ko": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ks": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ksb": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ksh": {
"pluralRule-count-zero": "n = 0 @integer 0 @decimal 0.0, 0.00, 0.000, 0.0000",
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ku": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"kw": {
"pluralRule-count-zero": "n = 0 @integer 0 @decimal 0.0, 0.00, 0.000, 0.0000",
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-two": "n % 100 = 2,22,42,62,82 or n % 1000 = 0 and n % 100000 = 1000..20000,40000,60000,80000 or n != 0 and n % 1000000 = 100000 @integer 2, 22, 42, 62, 82, 102, 122, 142, 1000, 10000, 100000, … @decimal 2.0, 22.0, 42.0, 62.0, 82.0, 102.0, 122.0, 142.0, 1000.0, 10000.0, 100000.0, …",
"pluralRule-count-few": "n % 100 = 3,23,43,63,83 @integer 3, 23, 43, 63, 83, 103, 123, 143, 1003, … @decimal 3.0, 23.0, 43.0, 63.0, 83.0, 103.0, 123.0, 143.0, 1003.0, …",
"pluralRule-count-many": "n != 1 and n % 100 = 1,21,41,61,81 @integer 21, 41, 61, 81, 101, 121, 141, 161, 1001, … @decimal 21.0, 41.0, 61.0, 81.0, 101.0, 121.0, 141.0, 161.0, 1001.0, …",
"pluralRule-count-other": " @integer 4~19, 100, 1004, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.0, 100.0, 1000.1, 1000000.0, …"
},
"ky": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"lag": {
"pluralRule-count-zero": "n = 0 @integer 0 @decimal 0.0, 0.00, 0.000, 0.0000",
"pluralRule-count-one": "i = 0,1 and n != 0 @integer 1 @decimal 0.1~1.6",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 2.0~3.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"lb": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"lg": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"lkt": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ln": {
"pluralRule-count-one": "n = 0..1 @integer 0, 1 @decimal 0.0, 1.0, 0.00, 1.00, 0.000, 1.000, 0.0000, 1.0000",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"lo": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"lt": {
"pluralRule-count-one": "n % 10 = 1 and n % 100 != 11..19 @integer 1, 21, 31, 41, 51, 61, 71, 81, 101, 1001, … @decimal 1.0, 21.0, 31.0, 41.0, 51.0, 61.0, 71.0, 81.0, 101.0, 1001.0, …",
"pluralRule-count-few": "n % 10 = 2..9 and n % 100 != 11..19 @integer 2~9, 22~29, 102, 1002, … @decimal 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 22.0, 102.0, 1002.0, …",
"pluralRule-count-many": "f != 0 @decimal 0.1~0.9, 1.1~1.7, 10.1, 100.1, 1000.1, …",
"pluralRule-count-other": " @integer 0, 10~20, 30, 40, 50, 60, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"lv": {
"pluralRule-count-zero": "n % 10 = 0 or n % 100 = 11..19 or v = 2 and f % 100 = 11..19 @integer 0, 10~20, 30, 40, 50, 60, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …",
"pluralRule-count-one": "n % 10 = 1 and n % 100 != 11 or v = 2 and f % 10 = 1 and f % 100 != 11 or v != 2 and f % 10 = 1 @integer 1, 21, 31, 41, 51, 61, 71, 81, 101, 1001, … @decimal 0.1, 1.0, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1, 10.1, 100.1, 1000.1, …",
"pluralRule-count-other": " @integer 2~9, 22~29, 102, 1002, … @decimal 0.2~0.9, 1.2~1.9, 10.2, 100.2, 1000.2, …"
},
"mas": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"mg": {
"pluralRule-count-one": "n = 0..1 @integer 0, 1 @decimal 0.0, 1.0, 0.00, 1.00, 0.000, 1.000, 0.0000, 1.0000",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"mgo": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"mk": {
"pluralRule-count-one": "v = 0 and i % 10 = 1 and i % 100 != 11 or f % 10 = 1 and f % 100 != 11 @integer 1, 21, 31, 41, 51, 61, 71, 81, 101, 1001, … @decimal 0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1, 10.1, 100.1, 1000.1, …",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0, 0.2~1.0, 1.2~1.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ml": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"mn": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"mo": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-few": "v != 0 or n = 0 or n % 100 = 2..19 @integer 0, 2~16, 102, 1002, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …",
"pluralRule-count-other": " @integer 20~35, 100, 1000, 10000, 100000, 1000000, …"
},
"mr": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ms": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"mt": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-few": "n = 0 or n % 100 = 2..10 @integer 0, 2~10, 102~107, 1002, … @decimal 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 10.0, 102.0, 1002.0, …",
"pluralRule-count-many": "n % 100 = 11..19 @integer 11~19, 111~117, 1011, … @decimal 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 111.0, 1011.0, …",
"pluralRule-count-other": " @integer 20~35, 100, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.1, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"my": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"nah": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"naq": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-two": "n = 2 @integer 2 @decimal 2.0, 2.00, 2.000, 2.0000",
"pluralRule-count-other": " @integer 0, 3~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"nb": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"nd": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ne": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"nl": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"nn": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"nnh": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"no": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"nqo": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"nr": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"nso": {
"pluralRule-count-one": "n = 0..1 @integer 0, 1 @decimal 0.0, 1.0, 0.00, 1.00, 0.000, 1.000, 0.0000, 1.0000",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ny": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"nyn": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"om": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"or": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"os": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"osa": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"pa": {
"pluralRule-count-one": "n = 0..1 @integer 0, 1 @decimal 0.0, 1.0, 0.00, 1.00, 0.000, 1.000, 0.0000, 1.0000",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"pap": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"pcm": {
"pluralRule-count-one": "i = 0 or n = 1 @integer 0, 1 @decimal 0.0~1.0, 0.00~0.04",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 1.1~2.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"pl": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-few": "v = 0 and i % 10 = 2..4 and i % 100 != 12..14 @integer 2~4, 22~24, 32~34, 42~44, 52~54, 62, 102, 1002, …",
"pluralRule-count-many": "v = 0 and i != 1 and i % 10 = 0..1 or v = 0 and i % 10 = 5..9 or v = 0 and i % 100 = 12..14 @integer 0, 5~19, 100, 1000, 10000, 100000, 1000000, …",
"pluralRule-count-other": " @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"prg": {
"pluralRule-count-zero": "n % 10 = 0 or n % 100 = 11..19 or v = 2 and f % 100 = 11..19 @integer 0, 10~20, 30, 40, 50, 60, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …",
"pluralRule-count-one": "n % 10 = 1 and n % 100 != 11 or v = 2 and f % 10 = 1 and f % 100 != 11 or v != 2 and f % 10 = 1 @integer 1, 21, 31, 41, 51, 61, 71, 81, 101, 1001, … @decimal 0.1, 1.0, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1, 10.1, 100.1, 1000.1, …",
"pluralRule-count-other": " @integer 2~9, 22~29, 102, 1002, … @decimal 0.2~0.9, 1.2~1.9, 10.2, 100.2, 1000.2, …"
},
"ps": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"pt": {
"pluralRule-count-one": "i = 0..1 @integer 0, 1 @decimal 0.0~1.5",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 2.0~3.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"pt-PT": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"rm": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ro": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-few": "v != 0 or n = 0 or n % 100 = 2..19 @integer 0, 2~16, 102, 1002, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …",
"pluralRule-count-other": " @integer 20~35, 100, 1000, 10000, 100000, 1000000, …"
},
"rof": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"root": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ru": {
"pluralRule-count-one": "v = 0 and i % 10 = 1 and i % 100 != 11 @integer 1, 21, 31, 41, 51, 61, 71, 81, 101, 1001, …",
"pluralRule-count-few": "v = 0 and i % 10 = 2..4 and i % 100 != 12..14 @integer 2~4, 22~24, 32~34, 42~44, 52~54, 62, 102, 1002, …",
"pluralRule-count-many": "v = 0 and i % 10 = 0 or v = 0 and i % 10 = 5..9 or v = 0 and i % 100 = 11..14 @integer 0, 5~19, 100, 1000, 10000, 100000, 1000000, …",
"pluralRule-count-other": " @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"rwk": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"sah": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"saq": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"sat": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-two": "n = 2 @integer 2 @decimal 2.0, 2.00, 2.000, 2.0000",
"pluralRule-count-other": " @integer 0, 3~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"sc": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"scn": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"sd": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"sdh": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"se": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-two": "n = 2 @integer 2 @decimal 2.0, 2.00, 2.000, 2.0000",
"pluralRule-count-other": " @integer 0, 3~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"seh": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ses": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"sg": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"sh": {
"pluralRule-count-one": "v = 0 and i % 10 = 1 and i % 100 != 11 or f % 10 = 1 and f % 100 != 11 @integer 1, 21, 31, 41, 51, 61, 71, 81, 101, 1001, … @decimal 0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1, 10.1, 100.1, 1000.1, …",
"pluralRule-count-few": "v = 0 and i % 10 = 2..4 and i % 100 != 12..14 or f % 10 = 2..4 and f % 100 != 12..14 @integer 2~4, 22~24, 32~34, 42~44, 52~54, 62, 102, 1002, … @decimal 0.2~0.4, 1.2~1.4, 2.2~2.4, 3.2~3.4, 4.2~4.4, 5.2, 10.2, 100.2, 1000.2, …",
"pluralRule-count-other": " @integer 0, 5~19, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0, 0.5~1.0, 1.5~2.0, 2.5~2.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"shi": {
"pluralRule-count-one": "i = 0 or n = 1 @integer 0, 1 @decimal 0.0~1.0, 0.00~0.04",
"pluralRule-count-few": "n = 2..10 @integer 2~10 @decimal 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 2.00, 3.00, 4.00, 5.00, 6.00, 7.00, 8.00",
"pluralRule-count-other": " @integer 11~26, 100, 1000, 10000, 100000, 1000000, … @decimal 1.1~1.9, 2.1~2.7, 10.1, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"si": {
"pluralRule-count-one": "n = 0,1 or i = 0 and f = 1 @integer 0, 1 @decimal 0.0, 0.1, 1.0, 0.00, 0.01, 1.00, 0.000, 0.001, 1.000, 0.0000, 0.0001, 1.0000",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.2~0.9, 1.1~1.8, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"sk": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-few": "i = 2..4 and v = 0 @integer 2~4",
"pluralRule-count-many": "v != 0 @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …",
"pluralRule-count-other": " @integer 0, 5~19, 100, 1000, 10000, 100000, 1000000, …"
},
"sl": {
"pluralRule-count-one": "v = 0 and i % 100 = 1 @integer 1, 101, 201, 301, 401, 501, 601, 701, 1001, …",
"pluralRule-count-two": "v = 0 and i % 100 = 2 @integer 2, 102, 202, 302, 402, 502, 602, 702, 1002, …",
"pluralRule-count-few": "v = 0 and i % 100 = 3..4 or v != 0 @integer 3, 4, 103, 104, 203, 204, 303, 304, 403, 404, 503, 504, 603, 604, 703, 704, 1003, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …",
"pluralRule-count-other": " @integer 0, 5~19, 100, 1000, 10000, 100000, 1000000, …"
},
"sma": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-two": "n = 2 @integer 2 @decimal 2.0, 2.00, 2.000, 2.0000",
"pluralRule-count-other": " @integer 0, 3~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"smi": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-two": "n = 2 @integer 2 @decimal 2.0, 2.00, 2.000, 2.0000",
"pluralRule-count-other": " @integer 0, 3~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"smj": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-two": "n = 2 @integer 2 @decimal 2.0, 2.00, 2.000, 2.0000",
"pluralRule-count-other": " @integer 0, 3~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"smn": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-two": "n = 2 @integer 2 @decimal 2.0, 2.00, 2.000, 2.0000",
"pluralRule-count-other": " @integer 0, 3~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"sms": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-two": "n = 2 @integer 2 @decimal 2.0, 2.00, 2.000, 2.0000",
"pluralRule-count-other": " @integer 0, 3~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"sn": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"so": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"sq": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"sr": {
"pluralRule-count-one": "v = 0 and i % 10 = 1 and i % 100 != 11 or f % 10 = 1 and f % 100 != 11 @integer 1, 21, 31, 41, 51, 61, 71, 81, 101, 1001, … @decimal 0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1, 10.1, 100.1, 1000.1, …",
"pluralRule-count-few": "v = 0 and i % 10 = 2..4 and i % 100 != 12..14 or f % 10 = 2..4 and f % 100 != 12..14 @integer 2~4, 22~24, 32~34, 42~44, 52~54, 62, 102, 1002, … @decimal 0.2~0.4, 1.2~1.4, 2.2~2.4, 3.2~3.4, 4.2~4.4, 5.2, 10.2, 100.2, 1000.2, …",
"pluralRule-count-other": " @integer 0, 5~19, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0, 0.5~1.0, 1.5~2.0, 2.5~2.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ss": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ssy": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"st": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"su": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"sv": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"sw": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"syr": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ta": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"te": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"teo": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"th": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ti": {
"pluralRule-count-one": "n = 0..1 @integer 0, 1 @decimal 0.0, 1.0, 0.00, 1.00, 0.000, 1.000, 0.0000, 1.0000",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"tig": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"tk": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"tl": {
"pluralRule-count-one": "v = 0 and i = 1,2,3 or v = 0 and i % 10 != 4,6,9 or v != 0 and f % 10 != 4,6,9 @integer 0~3, 5, 7, 8, 10~13, 15, 17, 18, 20, 21, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.3, 0.5, 0.7, 0.8, 1.0~1.3, 1.5, 1.7, 1.8, 2.0, 2.1, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …",
"pluralRule-count-other": " @integer 4, 6, 9, 14, 16, 19, 24, 26, 104, 1004, … @decimal 0.4, 0.6, 0.9, 1.4, 1.6, 1.9, 2.4, 2.6, 10.4, 100.4, 1000.4, …"
},
"tn": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"to": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"tr": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ts": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"tzm": {
"pluralRule-count-one": "n = 0..1 or n = 11..99 @integer 0, 1, 11~24 @decimal 0.0, 1.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0",
"pluralRule-count-other": " @integer 2~10, 100~106, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ug": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"uk": {
"pluralRule-count-one": "v = 0 and i % 10 = 1 and i % 100 != 11 @integer 1, 21, 31, 41, 51, 61, 71, 81, 101, 1001, …",
"pluralRule-count-few": "v = 0 and i % 10 = 2..4 and i % 100 != 12..14 @integer 2~4, 22~24, 32~34, 42~44, 52~54, 62, 102, 1002, …",
"pluralRule-count-many": "v = 0 and i % 10 = 0 or v = 0 and i % 10 = 5..9 or v = 0 and i % 100 = 11..14 @integer 0, 5~19, 100, 1000, 10000, 100000, 1000000, …",
"pluralRule-count-other": " @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ur": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"uz": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ve": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"vi": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"vo": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"vun": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"wa": {
"pluralRule-count-one": "n = 0..1 @integer 0, 1 @decimal 0.0, 1.0, 0.00, 1.00, 0.000, 1.000, 0.0000, 1.0000",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"wae": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"wo": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"xh": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"xog": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"yi": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"yo": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"yue": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"zh": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"zu": {
"pluralRule-count-one": "i = 0 or n = 1 @integer 0, 1 @decimal 0.0~1.0, 0.00~0.04",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 1.1~2.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
}
}
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,163 @@
{
"af": {
"number_nd_tokens": {
"geen": null
}
},
"am": {
"number_nd_tokens": {
"ምንም": null
}
},
"as": {
"number_tokens": {
"লা": 5,
"হা": 3,
"শঃ": null
},
"number_nd_tokens": {
"কোনো": null
}
},
"bn": {
"number_tokens": {
"কোটি": 7,
"শত": 2
}
},
"es": {
"number_tokens": {
"m": 6,
"mil": 3
}
},
"es-US": {
"number_tokens": {
"m": 6,
"mil": 3
}
},
"et": {
"number_nd_tokens": {
"vaatamisi": null
}
},
"eu": {
"number_nd_tokens": {
"dago": null,
"ikustaldirik": null
}
},
"fr": {
"number_tokens": {
"dabonnés": null
}
},
"hy": {
"number_nd_tokens": {
"Դիտումներ": null
}
},
"is": {
"number_nd_tokens": {
"áskrifandi": null,
"enn": null
}
},
"iw": {
"number_nd_tokens": {
"מנוי": null
}
},
"ka": {
"number_nd_tokens": {
"არის": null,
"ნახვები": null
}
},
"kk": {
"number_nd_tokens": {
"көрмеген": null
}
},
"kn": {
"number_nd_tokens": {
"ಯಾವುದೇ": null
}
},
"ko": {
"number_nd_tokens": {
"음": null
}
},
"ky": {
"number_nd_tokens": {
"ким": null,
"көрө": null,
"элек": null
}
},
"my": {
"number_tokens": {
"ကုဋေ": 7,
"သောင်း": 4,
"ထ": 3
}
},
"ne": {
"number_nd_tokens": {
"कुनै": null
}
},
"no": {
"number_nd_tokens": {
"avspillinger": null
}
},
"or": {
"number_tokens": {
"ବିଜଣ": 9,
"ବି": 9
},
"number_nd_tokens": {
"କୌଣସି": null
}
},
"pa": {
"number_nd_tokens": {
"ਕਿਸੇ": null,
"ਨੇ": null
}
},
"ro": {
"number_nd_tokens": {
"abonat": null,
"vizionare": null
}
},
"sq": {
"number_nd_tokens": {
"ka": null
}
},
"uk": {
"number_nd_tokens": {
"перегляду": null
}
},
"ur": {
"number_nd_tokens": {
"کوئی": null
}
},
"zh-CN": {
"number_nd_tokens": {
"人": null
}
},
"zu": {
"number_nd_tokens": {
"kubukwa": null
}
}
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,129 @@
---
source: tests/youtube.rs
expression: album
---
MusicAlbum(
id: "MPREb_u1I69lSAe5v",
playlist_id: Some("OLAK5uy_lGP_zv0vJDUlecQDzugUJmjcF7pvyVNyY"),
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UCpJyCbFbdTrx0M90HCNBHFQ"),
name: "[name]",
),
],
artist_id: Some("UCpJyCbFbdTrx0M90HCNBHFQ"),
description: "[description]",
album_type: Ep,
year: Some(2016),
by_va: false,
tracks: [
TrackItem(
id: "aGd3VKSOTxY",
name: "[name]",
duration: Some(221),
cover: [],
artists: [
ArtistId(
id: Some("UCpJyCbFbdTrx0M90HCNBHFQ"),
name: "[name]",
),
],
artist_id: Some("UCpJyCbFbdTrx0M90HCNBHFQ"),
album: Some(AlbumId(
id: "MPREb_u1I69lSAe5v",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(1),
by_va: false,
),
TrackItem(
id: "Jz-26iiDuYs",
name: "[name]",
duration: Some(208),
cover: [],
artists: [
ArtistId(
id: Some("UCpJyCbFbdTrx0M90HCNBHFQ"),
name: "[name]",
),
],
artist_id: Some("UCpJyCbFbdTrx0M90HCNBHFQ"),
album: Some(AlbumId(
id: "MPREb_u1I69lSAe5v",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(2),
by_va: false,
),
TrackItem(
id: "Bu26uFtpt58",
name: "[name]",
duration: Some(223),
cover: [],
artists: [
ArtistId(
id: Some("UCpJyCbFbdTrx0M90HCNBHFQ"),
name: "[name]",
),
],
artist_id: Some("UCpJyCbFbdTrx0M90HCNBHFQ"),
album: Some(AlbumId(
id: "MPREb_u1I69lSAe5v",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(3),
by_va: false,
),
TrackItem(
id: "RgwNqqiVqdY",
name: "[name]",
duration: Some(221),
cover: [],
artists: [
ArtistId(
id: Some("UCpJyCbFbdTrx0M90HCNBHFQ"),
name: "[name]",
),
],
artist_id: Some("UCpJyCbFbdTrx0M90HCNBHFQ"),
album: Some(AlbumId(
id: "MPREb_u1I69lSAe5v",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(4),
by_va: false,
),
TrackItem(
id: "2TuOh30XbCI",
name: "[name]",
duration: Some(197),
cover: [],
artists: [
ArtistId(
id: Some("UCpJyCbFbdTrx0M90HCNBHFQ"),
name: "[name]",
),
],
artist_id: Some("UCpJyCbFbdTrx0M90HCNBHFQ"),
album: Some(AlbumId(
id: "MPREb_u1I69lSAe5v",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(5),
by_va: false,
),
],
variants: [],
)

View file

@ -0,0 +1,134 @@
---
source: tests/youtube.rs
expression: album
---
MusicAlbum(
id: "MPREb_bqWA6mAZFWS",
playlist_id: Some("OLAK5uy_mUiRbMqeQXFUH6h9KB87RcEmNtm45Qvs0"),
name: "[name]",
cover: "[cover]",
artists: [],
artist_id: None,
description: "[description]",
album_type: Ep,
year: Some(1968),
by_va: false,
tracks: [
TrackItem(
id: "EX7-pOQHPyE",
name: "[name]",
duration: Some(267),
cover: [],
artists: [
ArtistId(
id: Some("UC1C05NyYICFB2mVGn9_ttEw"),
name: "[name]",
),
],
artist_id: Some("UC1C05NyYICFB2mVGn9_ttEw"),
album: Some(AlbumId(
id: "MPREb_bqWA6mAZFWS",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(1),
by_va: false,
),
TrackItem(
id: "0AyWB-Quj4A",
name: "[name]",
duration: Some(179),
cover: [],
artists: [
ArtistId(
id: Some("UCDqpyYkgWy2h03HamIfODjw"),
name: "[name]",
),
],
artist_id: Some("UCDqpyYkgWy2h03HamIfODjw"),
album: Some(AlbumId(
id: "MPREb_bqWA6mAZFWS",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(2),
by_va: false,
),
TrackItem(
id: "s0Sb-GZLXSM",
name: "[name]",
duration: Some(155),
cover: [],
artists: [
ArtistId(
id: None,
name: "[name]",
),
],
artist_id: None,
album: Some(AlbumId(
id: "MPREb_bqWA6mAZFWS",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(3),
by_va: false,
),
TrackItem(
id: "P4XAaXjlCDA",
name: "[name]",
duration: Some(229),
cover: [],
artists: [
ArtistId(
id: Some("UCl4iPtukwe7m0kIxUMskkgA"),
name: "[name]",
),
],
artist_id: Some("UCl4iPtukwe7m0kIxUMskkgA"),
album: Some(AlbumId(
id: "MPREb_bqWA6mAZFWS",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(4),
by_va: false,
),
],
variants: [
AlbumItem(
id: "MPREb_h8ltx5oKvyY",
name: "Pedha Rasi Peddamma Katha",
cover: [
Thumbnail(
url: "https://lh3.googleusercontent.com/iZtBdPWBGNB-GAWvOp9seuYj5QqKrUYGSe-B5J026yxHqFSWv4zsxHy-LxX5LbFlnepOPRWNLrajO-_-=w226-h226-l90-rj",
width: 226,
height: 226,
),
Thumbnail(
url: "https://lh3.googleusercontent.com/iZtBdPWBGNB-GAWvOp9seuYj5QqKrUYGSe-B5J026yxHqFSWv4zsxHy-LxX5LbFlnepOPRWNLrajO-_-=w544-h544-l90-rj",
width: 544,
height: 544,
),
],
artists: [
ArtistId(
id: Some("UCl4iPtukwe7m0kIxUMskkgA"),
name: "[name]",
),
ArtistId(
id: Some("UCWgAqlYG7mXTUxrFiLyDSsg"),
name: "[name]",
),
],
artist_id: Some("UCl4iPtukwe7m0kIxUMskkgA"),
album_type: Ep,
year: None,
by_va: false,
),
],
)

View file

@ -0,0 +1,61 @@
---
source: tests/youtube.rs
expression: album
---
MusicAlbum(
id: "MPREb_F3Af9UZZVxX",
playlist_id: Some("OLAK5uy_nim4i4eycEtlBtS3Ci6j4SvvTmdfBcRX4"),
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UCAJwa_1l4rHzBJyWbeBtGZw"),
name: "[name]",
),
ArtistId(
id: Some("UCbBaYg2UToDaoOwo-R6xi4g"),
name: "[name]",
),
ArtistId(
id: Some("UCiY3z8HAGD6BlSNKVn2kSvQ"),
name: "[name]",
),
],
artist_id: Some("UCAJwa_1l4rHzBJyWbeBtGZw"),
description: "[description]",
album_type: Single,
year: None,
by_va: false,
tracks: [
TrackItem(
id: "1Sz3lUVGBSM",
name: "[name]",
duration: Some(229),
cover: [],
artists: [
ArtistId(
id: Some("UCAJwa_1l4rHzBJyWbeBtGZw"),
name: "[name]",
),
ArtistId(
id: Some("UCbBaYg2UToDaoOwo-R6xi4g"),
name: "[name]",
),
ArtistId(
id: Some("UCiY3z8HAGD6BlSNKVn2kSvQ"),
name: "[name]",
),
],
artist_id: Some("UCAJwa_1l4rHzBJyWbeBtGZw"),
album: Some(AlbumId(
id: "MPREb_F3Af9UZZVxX",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(1),
by_va: false,
),
],
variants: [],
)

View file

@ -0,0 +1,429 @@
---
source: tests/youtube.rs
expression: album
---
MusicAlbum(
id: "MPREb_nlBWQROfvjo",
playlist_id: Some("OLAK5uy_myZkBX2d2TzcrlQhIwLy3hCj2MkAMaPR4"),
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
name: "[name]",
),
],
artist_id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
description: "[description]",
album_type: Album,
year: Some(2016),
by_va: false,
tracks: [
TrackItem(
id: "g0iRiJ_ck48",
name: "[name]",
duration: Some(216),
cover: [],
artists: [
ArtistId(
id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
name: "[name]",
),
],
artist_id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
album: Some(AlbumId(
id: "MPREb_nlBWQROfvjo",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(1),
by_va: false,
),
TrackItem(
id: "rREEBXp0y9s",
name: "[name]",
duration: Some(224),
cover: [],
artists: [
ArtistId(
id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
name: "[name]",
),
],
artist_id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
album: Some(AlbumId(
id: "MPREb_nlBWQROfvjo",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(2),
by_va: false,
),
TrackItem(
id: "zvU5Y8Q19hU",
name: "[name]",
duration: Some(176),
cover: [],
artists: [
ArtistId(
id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
name: "[name]",
),
],
artist_id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
album: Some(AlbumId(
id: "MPREb_nlBWQROfvjo",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(3),
by_va: false,
),
TrackItem(
id: "ARKLrzzTQA0",
name: "[name]",
duration: Some(215),
cover: [],
artists: [
ArtistId(
id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
name: "[name]",
),
],
artist_id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
album: Some(AlbumId(
id: "MPREb_nlBWQROfvjo",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(4),
by_va: false,
),
TrackItem(
id: "tstLgN8A_Ng",
name: "[name]",
duration: Some(268),
cover: [],
artists: [
ArtistId(
id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
name: "[name]",
),
],
artist_id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
album: Some(AlbumId(
id: "MPREb_nlBWQROfvjo",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(5),
by_va: false,
),
TrackItem(
id: "k2DjgQOY3Ts",
name: "[name]",
duration: Some(202),
cover: [],
artists: [
ArtistId(
id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
name: "[name]",
),
],
artist_id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
album: Some(AlbumId(
id: "MPREb_nlBWQROfvjo",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(6),
by_va: false,
),
TrackItem(
id: "azHwhecxEsI",
name: "[name]",
duration: Some(185),
cover: [],
artists: [
ArtistId(
id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
name: "[name]",
),
],
artist_id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
album: Some(AlbumId(
id: "MPREb_nlBWQROfvjo",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(7),
by_va: false,
),
TrackItem(
id: "_FcsdYIQ2co",
name: "[name]",
duration: Some(226),
cover: [],
artists: [
ArtistId(
id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
name: "[name]",
),
],
artist_id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
album: Some(AlbumId(
id: "MPREb_nlBWQROfvjo",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(8),
by_va: false,
),
TrackItem(
id: "27bOWEbshyE",
name: "[name]",
duration: Some(207),
cover: [],
artists: [
ArtistId(
id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
name: "[name]",
),
],
artist_id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
album: Some(AlbumId(
id: "MPREb_nlBWQROfvjo",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(9),
by_va: false,
),
TrackItem(
id: "riD_3oZwt8w",
name: "[name]",
duration: Some(211),
cover: [],
artists: [
ArtistId(
id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
name: "[name]",
),
],
artist_id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
album: Some(AlbumId(
id: "MPREb_nlBWQROfvjo",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(10),
by_va: false,
),
TrackItem(
id: "8GNvjF3no9s",
name: "[name]",
duration: Some(179),
cover: [],
artists: [
ArtistId(
id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
name: "[name]",
),
],
artist_id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
album: Some(AlbumId(
id: "MPREb_nlBWQROfvjo",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(11),
by_va: false,
),
TrackItem(
id: "YHMFzf1uN2U",
name: "[name]",
duration: Some(218),
cover: [],
artists: [
ArtistId(
id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
name: "[name]",
),
],
artist_id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
album: Some(AlbumId(
id: "MPREb_nlBWQROfvjo",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(12),
by_va: false,
),
TrackItem(
id: "jvV-z5F3oAo",
name: "[name]",
duration: Some(277),
cover: [],
artists: [
ArtistId(
id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
name: "[name]",
),
],
artist_id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
album: Some(AlbumId(
id: "MPREb_nlBWQROfvjo",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(13),
by_va: false,
),
TrackItem(
id: "u8_9cxlrh8k",
name: "[name]",
duration: Some(204),
cover: [],
artists: [
ArtistId(
id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
name: "[name]",
),
],
artist_id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
album: Some(AlbumId(
id: "MPREb_nlBWQROfvjo",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(14),
by_va: false,
),
TrackItem(
id: "gSvKcvM1Wk0",
name: "[name]",
duration: Some(202),
cover: [],
artists: [
ArtistId(
id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
name: "[name]",
),
],
artist_id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
album: Some(AlbumId(
id: "MPREb_nlBWQROfvjo",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(15),
by_va: false,
),
TrackItem(
id: "wQHgKRJ0pDQ",
name: "[name]",
duration: Some(222),
cover: [],
artists: [
ArtistId(
id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
name: "[name]",
),
],
artist_id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
album: Some(AlbumId(
id: "MPREb_nlBWQROfvjo",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(16),
by_va: false,
),
TrackItem(
id: "Ckz5i6-hzf0",
name: "[name]",
duration: Some(177),
cover: [],
artists: [
ArtistId(
id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
name: "[name]",
),
],
artist_id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
album: Some(AlbumId(
id: "MPREb_nlBWQROfvjo",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(17),
by_va: false,
),
TrackItem(
id: "y5zuUgyFqrc",
name: "[name]",
duration: Some(220),
cover: [],
artists: [
ArtistId(
id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
name: "[name]",
),
],
artist_id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
album: Some(AlbumId(
id: "MPREb_nlBWQROfvjo",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(18),
by_va: false,
),
],
variants: [
AlbumItem(
id: "MPREb_jk6Msw8izou",
name: "Märchen enden gut (Nyáre Ranta (Märchenedition))",
cover: [
Thumbnail(
url: "https://lh3.googleusercontent.com/BKgnW_-hapCHk599AtRfTYZGdXVIo0C4bJp1Bh7qUpGK7fNAXGW8Bhv2x-ukeFM8cuxKbGqqGaTo8fZASA=w226-h226-l90-rj",
width: 226,
height: 226,
),
Thumbnail(
url: "https://lh3.googleusercontent.com/BKgnW_-hapCHk599AtRfTYZGdXVIo0C4bJp1Bh7qUpGK7fNAXGW8Bhv2x-ukeFM8cuxKbGqqGaTo8fZASA=w544-h544-l90-rj",
width: 544,
height: 544,
),
],
artists: [
ArtistId(
id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
name: "[name]",
),
],
artist_id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
album_type: Album,
year: None,
by_va: false,
),
],
)

View file

@ -0,0 +1,318 @@
---
source: tests/youtube.rs
expression: album
---
MusicAlbum(
id: "MPREb_cwzk8EUwypZ",
playlist_id: Some("OLAK5uy_kODvYZ5CEpYdtd4VPsmg0eRTlpazG0dvA"),
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
name: "[name]",
),
],
artist_id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
description: "[description]",
album_type: Show,
year: Some(2022),
by_va: false,
tracks: [
TrackItem(
id: "lSbKz5LWvKE",
name: "[name]",
duration: Some(229),
cover: [],
artists: [
ArtistId(
id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
name: "[name]",
),
],
artist_id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
album: Some(AlbumId(
id: "MPREb_cwzk8EUwypZ",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(1),
by_va: false,
),
TrackItem(
id: "fdO6gu4qjRw",
name: "[name]",
duration: Some(235),
cover: [],
artists: [
ArtistId(
id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
name: "[name]",
),
],
artist_id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
album: Some(AlbumId(
id: "MPREb_cwzk8EUwypZ",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(2),
by_va: false,
),
TrackItem(
id: "muCxstXirvY",
name: "[name]",
duration: Some(197),
cover: [],
artists: [
ArtistId(
id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
name: "[name]",
),
],
artist_id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
album: Some(AlbumId(
id: "MPREb_cwzk8EUwypZ",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(3),
by_va: false,
),
TrackItem(
id: "aG1N0vo__Ng",
name: "[name]",
duration: Some(186),
cover: [],
artists: [
ArtistId(
id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
name: "[name]",
),
],
artist_id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
album: Some(AlbumId(
id: "MPREb_cwzk8EUwypZ",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(4),
by_va: false,
),
TrackItem(
id: "roHhLNYS9yo",
name: "[name]",
duration: Some(188),
cover: [],
artists: [
ArtistId(
id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
name: "[name]",
),
],
artist_id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
album: Some(AlbumId(
id: "MPREb_cwzk8EUwypZ",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(5),
by_va: false,
),
TrackItem(
id: "nJ49NuLvcAw",
name: "[name]",
duration: Some(205),
cover: [],
artists: [
ArtistId(
id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
name: "[name]",
),
],
artist_id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
album: Some(AlbumId(
id: "MPREb_cwzk8EUwypZ",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(6),
by_va: false,
),
TrackItem(
id: "Me119D570h0",
name: "[name]",
duration: Some(219),
cover: [],
artists: [
ArtistId(
id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
name: "[name]",
),
],
artist_id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
album: Some(AlbumId(
id: "MPREb_cwzk8EUwypZ",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(7),
by_va: false,
),
TrackItem(
id: "YXnRLK-qKG8",
name: "[name]",
duration: Some(240),
cover: [],
artists: [
ArtistId(
id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
name: "[name]",
),
],
artist_id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
album: Some(AlbumId(
id: "MPREb_cwzk8EUwypZ",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(8),
by_va: false,
),
TrackItem(
id: "A61wz1jz9X0",
name: "[name]",
duration: Some(239),
cover: [],
artists: [
ArtistId(
id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
name: "[name]",
),
],
artist_id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
album: Some(AlbumId(
id: "MPREb_cwzk8EUwypZ",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(9),
by_va: false,
),
TrackItem(
id: "u_S08EJOTUg",
name: "[name]",
duration: Some(197),
cover: [],
artists: [
ArtistId(
id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
name: "[name]",
),
],
artist_id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
album: Some(AlbumId(
id: "MPREb_cwzk8EUwypZ",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(10),
by_va: false,
),
TrackItem(
id: "0qwYJihV1EU",
name: "[name]",
duration: Some(201),
cover: [],
artists: [
ArtistId(
id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
name: "[name]",
),
],
artist_id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
album: Some(AlbumId(
id: "MPREb_cwzk8EUwypZ",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(11),
by_va: false,
),
TrackItem(
id: "zjhoyTnEzuQ",
name: "[name]",
duration: Some(187),
cover: [],
artists: [
ArtistId(
id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
name: "[name]",
),
],
artist_id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
album: Some(AlbumId(
id: "MPREb_cwzk8EUwypZ",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(12),
by_va: false,
),
TrackItem(
id: "oDjDd0UBzAY",
name: "[name]",
duration: Some(183),
cover: [],
artists: [
ArtistId(
id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
name: "[name]",
),
],
artist_id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
album: Some(AlbumId(
id: "MPREb_cwzk8EUwypZ",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(13),
by_va: false,
),
TrackItem(
id: "_3-WVmqgi-Q",
name: "[name]",
duration: Some(193),
cover: [],
artists: [
ArtistId(
id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
name: "[name]",
),
],
artist_id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
album: Some(AlbumId(
id: "MPREb_cwzk8EUwypZ",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(14),
by_va: false,
),
],
variants: [],
)

View file

@ -0,0 +1,53 @@
---
source: tests/youtube.rs
expression: album
---
MusicAlbum(
id: "MPREb_bHfHGoy7vuv",
playlist_id: Some("OLAK5uy_kdSWBZ-9AZDkYkuy0QCc3p0KO9DEHVNH0"),
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UCXGYZ-OhdOpPBamHX3K9YRg"),
name: "[name]",
),
ArtistId(
id: Some("UCFTcSVPYRWlDoHisR-ZKwgw"),
name: "[name]",
),
],
artist_id: Some("UCXGYZ-OhdOpPBamHX3K9YRg"),
description: "[description]",
album_type: Single,
year: Some(2020),
by_va: false,
tracks: [
TrackItem(
id: "VU6lEv0PKAo",
name: "[name]",
duration: Some(183),
cover: [],
artists: [
ArtistId(
id: Some("UCXGYZ-OhdOpPBamHX3K9YRg"),
name: "[name]",
),
ArtistId(
id: Some("UCFTcSVPYRWlDoHisR-ZKwgw"),
name: "[name]",
),
],
artist_id: Some("UCXGYZ-OhdOpPBamHX3K9YRg"),
album: Some(AlbumId(
id: "MPREb_bHfHGoy7vuv",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(1),
by_va: false,
),
],
variants: [],
)

View file

@ -0,0 +1,271 @@
---
source: tests/youtube.rs
expression: album
---
MusicAlbum(
id: "MPREb_AzuWg8qAVVl",
playlist_id: Some("OLAK5uy_mux5ygfN9sbiR1ma3yh1GHTmqNekZNoAI"),
name: "[name]",
cover: "[cover]",
artists: [],
artist_id: None,
description: "[description]",
album_type: Album,
year: Some(2019),
by_va: true,
tracks: [
TrackItem(
id: "R3VIKRtzAdE",
name: "[name]",
duration: Some(205),
cover: [],
artists: [
ArtistId(
id: Some("UCCj0RlDqqahEB5BXVtDcPqg"),
name: "[name]",
),
],
artist_id: Some("UCCj0RlDqqahEB5BXVtDcPqg"),
album: Some(AlbumId(
id: "MPREb_AzuWg8qAVVl",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(1),
by_va: false,
),
TrackItem(
id: "t0v0UOgOt18",
name: "[name]",
duration: Some(174),
cover: [],
artists: [
ArtistId(
id: Some("UCMrCoizKiBxqeg5pTpBXn1A"),
name: "[name]",
),
],
artist_id: Some("UCMrCoizKiBxqeg5pTpBXn1A"),
album: Some(AlbumId(
id: "MPREb_AzuWg8qAVVl",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(2),
by_va: false,
),
TrackItem(
id: "HjJYAkUXrxI",
name: "[name]",
duration: Some(199),
cover: [],
artists: [
ArtistId(
id: Some("UCWjoDY2SXJ5dvcdunWI6mjQ"),
name: "[name]",
),
],
artist_id: Some("UCWjoDY2SXJ5dvcdunWI6mjQ"),
album: Some(AlbumId(
id: "MPREb_AzuWg8qAVVl",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(3),
by_va: false,
),
TrackItem(
id: "Hg0KUOTL06I",
name: "[name]",
duration: Some(187),
cover: [],
artists: [
ArtistId(
id: Some("UChzK2t3sjnQkWzGnyKXOSSg"),
name: "[name]",
),
],
artist_id: Some("UChzK2t3sjnQkWzGnyKXOSSg"),
album: Some(AlbumId(
id: "MPREb_AzuWg8qAVVl",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(5),
by_va: false,
),
TrackItem(
id: "c8AfY6yhdkM",
name: "[name]",
duration: Some(159),
cover: [],
artists: [
ArtistId(
id: Some("UCvsgN5NKOzXnAURfaf3TOig"),
name: "[name]",
),
],
artist_id: Some("UCvsgN5NKOzXnAURfaf3TOig"),
album: Some(AlbumId(
id: "MPREb_AzuWg8qAVVl",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(6),
by_va: false,
),
TrackItem(
id: "_ZmdHjVvwhc",
name: "[name]",
duration: Some(186),
cover: [],
artists: [
ArtistId(
id: Some("UCI4YNnmHjXFaaKvfdmpWvJQ"),
name: "[name]",
),
],
artist_id: Some("UCI4YNnmHjXFaaKvfdmpWvJQ"),
album: Some(AlbumId(
id: "MPREb_AzuWg8qAVVl",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(7),
by_va: false,
),
TrackItem(
id: "wBe1Zi3q1n8",
name: "[name]",
duration: Some(209),
cover: [],
artists: [
ArtistId(
id: Some("UCDaFVUr2n8T7_X1f5yJ1xlw"),
name: "[name]",
),
],
artist_id: Some("UCDaFVUr2n8T7_X1f5yJ1xlw"),
album: Some(AlbumId(
id: "MPREb_AzuWg8qAVVl",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(8),
by_va: false,
),
TrackItem(
id: "l8Pj8s9uPGc",
name: "[name]",
duration: Some(209),
cover: [],
artists: [
ArtistId(
id: Some("UCZcc-WkffIMBVGUr6j9e6aQ"),
name: "[name]",
),
],
artist_id: Some("UCZcc-WkffIMBVGUr6j9e6aQ"),
album: Some(AlbumId(
id: "MPREb_AzuWg8qAVVl",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(9),
by_va: false,
),
TrackItem(
id: "Kn3cruxYj0c",
name: "[name]",
duration: Some(174),
cover: [],
artists: [
ArtistId(
id: Some("UCQPPz_A65SWYi2wXX8z76AQ"),
name: "[name]",
),
],
artist_id: Some("UCQPPz_A65SWYi2wXX8z76AQ"),
album: Some(AlbumId(
id: "MPREb_AzuWg8qAVVl",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(11),
by_va: false,
),
TrackItem(
id: "Sy1lIOl1YN0",
name: "[name]",
duration: Some(185),
cover: [],
artists: [
ArtistId(
id: Some("UChTOXkDhGJ0JftnfMWjpCCg"),
name: "[name]",
),
],
artist_id: Some("UChTOXkDhGJ0JftnfMWjpCCg"),
album: Some(AlbumId(
id: "MPREb_AzuWg8qAVVl",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(12),
by_va: false,
),
TrackItem(
id: "njdlNT1RRo4",
name: "[name]",
duration: Some(237),
cover: [],
artists: [
ArtistId(
id: Some("UCMUB52aO4CqrUXmLwbfRWYA"),
name: "[name]",
),
],
artist_id: Some("UCMUB52aO4CqrUXmLwbfRWYA"),
album: Some(AlbumId(
id: "MPREb_AzuWg8qAVVl",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(13),
by_va: false,
),
TrackItem(
id: "Si-CXM8CHqQ",
name: "[name]",
duration: Some(246),
cover: [],
artists: [
ArtistId(
id: Some("UC4YvDAbE1EYwZpj6gQ-lpLw"),
name: "[name]",
),
],
artist_id: Some("UC4YvDAbE1EYwZpj6gQ-lpLw"),
album: Some(AlbumId(
id: "MPREb_AzuWg8qAVVl",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(18),
by_va: false,
),
],
variants: [],
)

View file

@ -0,0 +1,145 @@
---
source: tests/youtube.rs
expression: album
---
MusicAlbum(
id: "MPREb_8QkDeEIawvX",
playlist_id: Some("OLAK5uy_mEX9ljZeeEWgTM1xLL1isyiGaWXoPyoOk"),
name: "[name]",
cover: "[cover]",
artists: [],
artist_id: None,
description: "[description]",
album_type: Single,
year: Some(2022),
by_va: true,
tracks: [
TrackItem(
id: "Tzai7JXo45w",
name: "[name]",
duration: Some(274),
cover: [],
artists: [
ArtistId(
id: None,
name: "[name]",
),
],
artist_id: None,
album: Some(AlbumId(
id: "MPREb_8QkDeEIawvX",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(1),
by_va: false,
),
TrackItem(
id: "9WYpLYAEub0",
name: "[name]",
duration: Some(216),
cover: [],
artists: [
ArtistId(
id: None,
name: "[name]",
),
],
artist_id: None,
album: Some(AlbumId(
id: "MPREb_8QkDeEIawvX",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(2),
by_va: false,
),
TrackItem(
id: "R48tE237bW4",
name: "[name]",
duration: Some(239),
cover: [],
artists: [
ArtistId(
id: Some("UCAKvDuIX3m1AUdPpDSqV_3w"),
name: "[name]",
),
],
artist_id: Some("UCAKvDuIX3m1AUdPpDSqV_3w"),
album: Some(AlbumId(
id: "MPREb_8QkDeEIawvX",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(3),
by_va: false,
),
TrackItem(
id: "-UzsoR6z-vg",
name: "[name]",
duration: Some(254),
cover: [],
artists: [
ArtistId(
id: None,
name: "[name]",
),
],
artist_id: None,
album: Some(AlbumId(
id: "MPREb_8QkDeEIawvX",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(4),
by_va: false,
),
TrackItem(
id: "kbNVyn8Ex28",
name: "[name]",
duration: Some(187),
cover: [],
artists: [
ArtistId(
id: None,
name: "[name]",
),
],
artist_id: None,
album: Some(AlbumId(
id: "MPREb_8QkDeEIawvX",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(5),
by_va: false,
),
TrackItem(
id: "NJrQZUzWP5Y",
name: "[name]",
duration: Some(224),
cover: [],
artists: [
ArtistId(
id: None,
name: "[name]",
),
],
artist_id: None,
album: Some(AlbumId(
id: "MPREb_8QkDeEIawvX",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(6),
by_va: false,
),
],
variants: [],
)

View file

@ -0,0 +1,138 @@
---
source: tests/youtube.rs
expression: album
---
MusicAlbum(
id: "MPREb_h8ltx5oKvyY",
playlist_id: Some("OLAK5uy_lIDfTi_k8V1RJ54MeJJGK_BduAeYbm-0s"),
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UCl4iPtukwe7m0kIxUMskkgA"),
name: "[name]",
),
ArtistId(
id: Some("UCWgAqlYG7mXTUxrFiLyDSsg"),
name: "[name]",
),
],
artist_id: Some("UCl4iPtukwe7m0kIxUMskkgA"),
description: "[description]",
album_type: Ep,
year: Some(1968),
by_va: false,
tracks: [
TrackItem(
id: "AKJ3IJZKPWc",
name: "[name]",
duration: Some(228),
cover: [],
artists: [
ArtistId(
id: Some("UCl4iPtukwe7m0kIxUMskkgA"),
name: "[name]",
),
ArtistId(
id: Some("UCWgAqlYG7mXTUxrFiLyDSsg"),
name: "[name]",
),
],
artist_id: Some("UCl4iPtukwe7m0kIxUMskkgA"),
album: Some(AlbumId(
id: "MPREb_h8ltx5oKvyY",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(1),
by_va: false,
),
TrackItem(
id: "WnpZuHNB33E",
name: "[name]",
duration: Some(266),
cover: [],
artists: [
ArtistId(
id: Some("UC1C05NyYICFB2mVGn9_ttEw"),
name: "[name]",
),
],
artist_id: Some("UC1C05NyYICFB2mVGn9_ttEw"),
album: Some(AlbumId(
id: "MPREb_h8ltx5oKvyY",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(2),
by_va: false,
),
TrackItem(
id: "pRqoDGXg1-I",
name: "[name]",
duration: Some(154),
cover: [],
artists: [
ArtistId(
id: Some("UC_KQPMiRQl3CFAIKTVfCHwA"),
name: "[name]",
),
],
artist_id: Some("UC_KQPMiRQl3CFAIKTVfCHwA"),
album: Some(AlbumId(
id: "MPREb_h8ltx5oKvyY",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(3),
by_va: false,
),
TrackItem(
id: "20vIKLJxjBY",
name: "[name]",
duration: Some(178),
cover: [],
artists: [
ArtistId(
id: None,
name: "[name]",
),
],
artist_id: Some("UCDqpyYkgWy2h03HamIfODjw"),
album: Some(AlbumId(
id: "MPREb_h8ltx5oKvyY",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(4),
by_va: false,
),
],
variants: [
AlbumItem(
id: "MPREb_bqWA6mAZFWS",
name: "Pedha Rasi Peddamma Katha",
cover: [
Thumbnail(
url: "https://lh3.googleusercontent.com/cyKTDdyucqYv8xfv0t3Vs9CkhmvssXRKsGzlWN_DU6A9uapXvovV0Ys2fXc9-r7Jv7V4UB1OD48iYH5z=w226-h226-l90-rj",
width: 226,
height: 226,
),
Thumbnail(
url: "https://lh3.googleusercontent.com/cyKTDdyucqYv8xfv0t3Vs9CkhmvssXRKsGzlWN_DU6A9uapXvovV0Ys2fXc9-r7Jv7V4UB1OD48iYH5z=w544-h544-l90-rj",
width: 544,
height: 544,
),
],
artists: [],
artist_id: None,
album_type: Ep,
year: None,
by_va: true,
),
],
)

View file

@ -0,0 +1,665 @@
---
source: tests/youtube.rs
expression: artist
---
MusicArtist(
id: "UC7cl4MmM6ZZ2TcFyMk_b4pg",
name: "[name]",
header_image: "[header_image]",
description: "[description]",
wikipedia_url: "[wikipedia_url]",
subscriber_count: "[subscriber_count]",
tracks: "[tracks]",
albums: [
AlbumItem(
id: "MPREb_43NWLzXChnh",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2010),
by_va: false,
),
AlbumItem(
id: "MPREb_585fV7eqUP8",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2014),
by_va: false,
),
AlbumItem(
id: "MPREb_6PEkIQE7sWY",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Ep,
year: Some(2008),
by_va: false,
),
AlbumItem(
id: "MPREb_7nIPO6oeETY",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2012),
by_va: false,
),
AlbumItem(
id: "MPREb_88p7e6nBtgz",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2012),
by_va: false,
),
AlbumItem(
id: "MPREb_8rukEzdytkN",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2015),
by_va: false,
),
AlbumItem(
id: "MPREb_BJKvCuKo7nJ",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2015),
by_va: false,
),
AlbumItem(
id: "MPREb_EAiIEvINDHB",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2012),
by_va: false,
),
AlbumItem(
id: "MPREb_HrCgErOdgCv",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2004),
by_va: false,
),
AlbumItem(
id: "MPREb_Md2aZrjaqHX",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2021),
by_va: false,
),
AlbumItem(
id: "MPREb_OW1GOBZ64ap",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2018),
by_va: false,
),
AlbumItem(
id: "MPREb_Oq0WKqNwSVY",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2003),
by_va: false,
),
AlbumItem(
id: "MPREb_QEClJsuO9xM",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2012),
by_va: false,
),
AlbumItem(
id: "MPREb_QyGCcLWExXj",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2014),
by_va: false,
),
AlbumItem(
id: "MPREb_R3p5kDRIGKL",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2006),
by_va: false,
),
AlbumItem(
id: "MPREb_T4fJMmrfxXk",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2000),
by_va: false,
),
AlbumItem(
id: "MPREb_TiIBQqCFttT",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2016),
by_va: false,
),
AlbumItem(
id: "MPREb_U9HLD8nF7H5",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2017),
by_va: false,
),
AlbumItem(
id: "MPREb_U9dMPQUeR9q",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2012),
by_va: false,
),
AlbumItem(
id: "MPREb_V0FEmw2pj2u",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2017),
by_va: false,
),
AlbumItem(
id: "MPREb_WYx2c0e95TA",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2008),
by_va: false,
),
AlbumItem(
id: "MPREb_Wc8Ehka0R0T",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2021),
by_va: false,
),
AlbumItem(
id: "MPREb_Yj49s4xy7fM",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2021),
by_va: false,
),
AlbumItem(
id: "MPREb_baIxpKBcYbF",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Ep,
year: Some(2003),
by_va: false,
),
AlbumItem(
id: "MPREb_eiYjUXT1Mn3",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2010),
by_va: false,
),
AlbumItem(
id: "MPREb_f4MhYbccbPi",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2006),
by_va: false,
),
AlbumItem(
id: "MPREb_gHlGAdNjEZI",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2010),
by_va: false,
),
AlbumItem(
id: "MPREb_kW2NAMSZElX",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2015),
by_va: false,
),
AlbumItem(
id: "MPREb_m5U1xZasDSy",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2002),
by_va: false,
),
AlbumItem(
id: "MPREb_n1H3JiFyGkv",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Ep,
year: Some(2015),
by_va: false,
),
AlbumItem(
id: "MPREb_ohcGTZrqKPZ",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2004),
by_va: false,
),
AlbumItem(
id: "MPREb_pWpeXxATZYb",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2014),
by_va: false,
),
AlbumItem(
id: "MPREb_ptO8gh250LP",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Ep,
year: Some(2003),
by_va: false,
),
AlbumItem(
id: "MPREb_qbJv3f0ijrk",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2007),
by_va: false,
),
AlbumItem(
id: "MPREb_rHhaDLqalbT",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Ep,
year: Some(2010),
by_va: false,
),
AlbumItem(
id: "MPREb_rdrfznTDhSX",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2012),
by_va: false,
),
AlbumItem(
id: "MPREb_saXgTKNPaSu",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2014),
by_va: false,
),
AlbumItem(
id: "MPREb_t6zStv8YrVG",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2010),
by_va: false,
),
AlbumItem(
id: "MPREb_vM0cMpn8pHh",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2008),
by_va: false,
),
AlbumItem(
id: "MPREb_wgm3k1qxpbF",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2010),
by_va: false,
),
AlbumItem(
id: "MPREb_wmSecJVDwPB",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2008),
by_va: false,
),
AlbumItem(
id: "MPREb_xCehp2mGhCk",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2010),
by_va: false,
),
AlbumItem(
id: "MPREb_y5fUQ2toJwT",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2017),
by_va: false,
),
],
playlists: "[playlists]",
similar_artists: "[artists]",
tracks_playlist_id: Some("OLAK5uy_n6aX-F_lCQxgyTIv4FJhp78bXV93b9NUM"),
videos_playlist_id: Some("OLAK5uy_nrePwvOEzmO7SydszEFfCDu8gAJxKfFtw"),
radio_id: Some("RDEMdgjzN3Qrk_GD7BooQbkJ4A"),
)

View file

@ -0,0 +1,320 @@
---
source: tests/youtube.rs
expression: artist
---
MusicArtist(
id: "UC7cl4MmM6ZZ2TcFyMk_b4pg",
name: "[name]",
header_image: "[header_image]",
description: "[description]",
wikipedia_url: "[wikipedia_url]",
subscriber_count: "[subscriber_count]",
tracks: "[tracks]",
albums: [
AlbumItem(
id: "MPREb_43NWLzXChnh",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2010),
by_va: false,
),
AlbumItem(
id: "MPREb_585fV7eqUP8",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2014),
by_va: false,
),
AlbumItem(
id: "MPREb_6PEkIQE7sWY",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Ep,
year: Some(2008),
by_va: false,
),
AlbumItem(
id: "MPREb_88p7e6nBtgz",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2012),
by_va: false,
),
AlbumItem(
id: "MPREb_Md2aZrjaqHX",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2021),
by_va: false,
),
AlbumItem(
id: "MPREb_OW1GOBZ64ap",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2018),
by_va: false,
),
AlbumItem(
id: "MPREb_QyGCcLWExXj",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2014),
by_va: false,
),
AlbumItem(
id: "MPREb_R3p5kDRIGKL",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2006),
by_va: false,
),
AlbumItem(
id: "MPREb_TiIBQqCFttT",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2016),
by_va: false,
),
AlbumItem(
id: "MPREb_U9HLD8nF7H5",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2017),
by_va: false,
),
AlbumItem(
id: "MPREb_V0FEmw2pj2u",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2017),
by_va: false,
),
AlbumItem(
id: "MPREb_WYx2c0e95TA",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2008),
by_va: false,
),
AlbumItem(
id: "MPREb_Yj49s4xy7fM",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2021),
by_va: false,
),
AlbumItem(
id: "MPREb_f4MhYbccbPi",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2006),
by_va: false,
),
AlbumItem(
id: "MPREb_kW2NAMSZElX",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2015),
by_va: false,
),
AlbumItem(
id: "MPREb_n1H3JiFyGkv",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Ep,
year: Some(2015),
by_va: false,
),
AlbumItem(
id: "MPREb_pWpeXxATZYb",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2014),
by_va: false,
),
AlbumItem(
id: "MPREb_rHhaDLqalbT",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Ep,
year: Some(2010),
by_va: false,
),
AlbumItem(
id: "MPREb_saXgTKNPaSu",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2014),
by_va: false,
),
AlbumItem(
id: "MPREb_wmSecJVDwPB",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2008),
by_va: false,
),
],
playlists: "[playlists]",
similar_artists: "[artists]",
tracks_playlist_id: Some("OLAK5uy_n6aX-F_lCQxgyTIv4FJhp78bXV93b9NUM"),
videos_playlist_id: Some("OLAK5uy_nrePwvOEzmO7SydszEFfCDu8gAJxKfFtw"),
radio_id: Some("RDEMdgjzN3Qrk_GD7BooQbkJ4A"),
)

View file

@ -0,0 +1,19 @@
---
source: tests/youtube.rs
expression: artist
---
MusicArtist(
id: "UCh8gHdtzO2tXd593_bjErWg",
name: "[name]",
header_image: "[header_image]",
description: "[description]",
wikipedia_url: "[wikipedia_url]",
subscriber_count: "[subscriber_count]",
tracks: "[tracks]",
albums: [],
playlists: "[playlists]",
similar_artists: "[artists]",
tracks_playlist_id: None,
videos_playlist_id: None,
radio_id: None,
)

View file

@ -0,0 +1,155 @@
---
source: tests/youtube.rs
expression: artist
---
MusicArtist(
id: "UCOR4_bSVIXPsGa4BbCSt60Q",
name: "[name]",
header_image: "[header_image]",
description: "[description]",
wikipedia_url: "[wikipedia_url]",
subscriber_count: "[subscriber_count]",
tracks: "[tracks]",
albums: [
AlbumItem(
id: "MPREb_8PsIyll0LFV",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UCOR4_bSVIXPsGa4BbCSt60Q"),
name: "[name]",
),
],
artist_id: Some("UCOR4_bSVIXPsGa4BbCSt60Q"),
album_type: Single,
year: Some(2014),
by_va: false,
),
AlbumItem(
id: "MPREb_HPXN9BBzFpV",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UCOR4_bSVIXPsGa4BbCSt60Q"),
name: "[name]",
),
],
artist_id: Some("UCOR4_bSVIXPsGa4BbCSt60Q"),
album_type: Single,
year: Some(2017),
by_va: false,
),
AlbumItem(
id: "MPREb_POeT6m0bw9q",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UCOR4_bSVIXPsGa4BbCSt60Q"),
name: "[name]",
),
],
artist_id: Some("UCOR4_bSVIXPsGa4BbCSt60Q"),
album_type: Ep,
year: Some(2014),
by_va: false,
),
AlbumItem(
id: "MPREb_R6EV2L1q0oc",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UCOR4_bSVIXPsGa4BbCSt60Q"),
name: "[name]",
),
],
artist_id: Some("UCOR4_bSVIXPsGa4BbCSt60Q"),
album_type: Single,
year: Some(2017),
by_va: false,
),
AlbumItem(
id: "MPREb_UYdRV1nnK2J",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UCOR4_bSVIXPsGa4BbCSt60Q"),
name: "[name]",
),
],
artist_id: Some("UCOR4_bSVIXPsGa4BbCSt60Q"),
album_type: Album,
year: Some(2017),
by_va: false,
),
AlbumItem(
id: "MPREb_bi34SGT1xlc",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UCOR4_bSVIXPsGa4BbCSt60Q"),
name: "[name]",
),
],
artist_id: Some("UCOR4_bSVIXPsGa4BbCSt60Q"),
album_type: Album,
year: Some(2014),
by_va: false,
),
AlbumItem(
id: "MPREb_hcK0fXETEf9",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UCOR4_bSVIXPsGa4BbCSt60Q"),
name: "[name]",
),
],
artist_id: Some("UCOR4_bSVIXPsGa4BbCSt60Q"),
album_type: Single,
year: Some(2017),
by_va: false,
),
AlbumItem(
id: "MPREb_kLvmX2AzYBL",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UCOR4_bSVIXPsGa4BbCSt60Q"),
name: "[name]",
),
],
artist_id: Some("UCOR4_bSVIXPsGa4BbCSt60Q"),
album_type: Single,
year: Some(2014),
by_va: false,
),
AlbumItem(
id: "MPREb_oHieBHkXn3A",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UCOR4_bSVIXPsGa4BbCSt60Q"),
name: "[name]",
),
],
artist_id: Some("UCOR4_bSVIXPsGa4BbCSt60Q"),
album_type: Single,
year: Some(2014),
by_va: false,
),
],
playlists: "[playlists]",
similar_artists: "[artists]",
tracks_playlist_id: Some("OLAK5uy_miHesZCUQY5S9EwqfoNP2tZR9nZ0NBAeU"),
videos_playlist_id: Some("OLAK5uy_mqbgE6T9uvusUWrAxJGiImf4_P4dM7IvQ"),
radio_id: Some("RDEM7AbogW0cCnElSU0WYm1GqA"),
)

View file

@ -0,0 +1,35 @@
---
source: tests/youtube.rs
expression: artist
---
MusicArtist(
id: "UCfwCE5VhPMGxNPFxtVv7lRw",
name: "[name]",
header_image: "[header_image]",
description: "[description]",
wikipedia_url: "[wikipedia_url]",
subscriber_count: "[subscriber_count]",
tracks: "[tracks]",
albums: [
AlbumItem(
id: "MPREb_vq8dZfFBEdx",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UCfwCE5VhPMGxNPFxtVv7lRw"),
name: "[name]",
),
],
artist_id: Some("UCfwCE5VhPMGxNPFxtVv7lRw"),
album_type: Single,
year: Some(2019),
by_va: false,
),
],
playlists: "[playlists]",
similar_artists: "[artists]",
tracks_playlist_id: None,
videos_playlist_id: Some("OLAK5uy_lmH3iVq6lqjsnLkBWzpvRTh0DidLzbU-I"),
radio_id: Some("RDEMYsk_DTFHAng1G7n5toi_oA"),
)

View file

@ -4,26 +4,30 @@ expression: track
--- ---
TrackDetails( TrackDetails(
track: TrackItem( track: TrackItem(
id: "7nigXQS1Xb0", id: "qIZ-vvg-wiU",
name: "INVU", name: "Scheiße baut sich nicht von alleine",
duration: Some(205), duration: Some(232),
cover: "[cover]", cover: "[cover]",
artists: [ artists: [
ArtistId( ArtistId(
id: Some("UCwzCuKxyMY_sT7hr1E8G1XA"), id: Some("UCAbxL0lZcmlaQrzoUbrvS3A"),
name: "TAEYEON", name: "SDP",
),
ArtistId(
id: Some("UCVRREKn7V1Cb8qvf43dwZ6w"),
name: "257ers",
), ),
], ],
artist_id: Some("UCwzCuKxyMY_sT7hr1E8G1XA"), artist_id: Some("UCAbxL0lZcmlaQrzoUbrvS3A"),
album: Some(AlbumId( album: Some(AlbumId(
id: "MPREb_4xbv14CiQJm", id: "MPREb_cjEzeaBgZAq",
name: "INVU - The 3rd Album", name: "Ein Gutes Schlechtes Vorbild",
)), )),
view_count: "[view_count]", view_count: "[view_count]",
is_video: false, is_video: false,
track_nr: None, track_nr: None,
by_va: false, by_va: false,
), ),
lyrics_id: Some("MPLYt_4xbv14CiQJm-1"), lyrics_id: Some("MPLYt_cjEzeaBgZAq-2"),
related_id: Some("MPTRt_4xbv14CiQJm-1"), related_id: Some("MPTRt_cjEzeaBgZAq-2"),
) )

View file

@ -1,8 +1,5 @@
--- ---
source: tests/youtube.rs source: tests/youtube.rs
expression: lyrics expression: lyrics.body
--- ---
Lyrics( "Eyes, in the sky, gazing far into the night\nI raise my hand to the fire, but it\'s no use\n\'Cause you can\'t stop it from shining through\nIt\'s true\nBaby let the light shine through\nIf you believe it\'s true\nBaby won\'t you let the light shine through\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nWon\'t you let the light shine through\n\nEyes, in the sky, gazing far into the night\nI raise my hand to the fire, but it\'s no use\n\'Cause you can\'t stop it from shining through\nIt\'s true\nBaby let the light shine through\nIf you believe it\'s true\nBaby won\'t you let the light shine through\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you"
body: "Eyes, in the sky, gazing far into the night\nI raise my hand to the fire, but it\'s no use\n\'Cause you can\'t stop it from shining through\nIt\'s true\nBaby let the light shine through\nIf you believe it\'s true\nBaby won\'t you let the light shine through\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nWon\'t you let the light shine through\n\nEyes, in the sky, gazing far into the night\nI raise my hand to the fire, but it\'s no use\n\'Cause you can\'t stop it from shining through\nIt\'s true\nBaby let the light shine through\nIf you believe it\'s true\nBaby won\'t you let the light shine through\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you",
footer: "Source: Musixmatch",
)

View file

@ -1,8 +1,10 @@
use std::collections::HashSet; use std::collections::HashSet;
use std::fmt::Display; use std::fmt::Display;
use std::str::FromStr;
use rstest::{fixture, rstest}; use rstest::{fixture, rstest};
use rustypipe::model::paginator::ContinuationEndpoint; use rustypipe::model::paginator::ContinuationEndpoint;
use rustypipe::param::Language;
use rustypipe::validate; use rustypipe::validate;
use time::macros::date; use time::macros::date;
use time::OffsetDateTime; use time::OffsetDateTime;
@ -346,11 +348,14 @@ fn get_playlist(
#[case] description: Option<String>, #[case] description: Option<String>,
#[case] channel: Option<(&str, &str)>, #[case] channel: Option<(&str, &str)>,
rp: RustyPipe, rp: RustyPipe,
unlocalized: bool,
) { ) {
let playlist = tokio_test::block_on(rp.query().playlist(id)).unwrap(); let playlist = tokio_test::block_on(rp.query().playlist(id)).unwrap();
assert_eq!(playlist.id, id); assert_eq!(playlist.id, id);
if unlocalized {
assert_eq!(playlist.name, name); assert_eq!(playlist.name, name);
}
assert!(!playlist.videos.is_empty()); assert!(!playlist.videos.is_empty());
assert_eq!(!playlist.videos.is_exhausted(), is_long); assert_eq!(!playlist.videos.is_exhausted(), is_long);
assert_gte( assert_gte(
@ -921,35 +926,46 @@ fn assert_channel_eevblog<T>(channel: &Channel<T>) {
} }
#[rstest] #[rstest]
#[case::artist("UC_vmjW5e1xEHhYjY2a0kK1A", "Oonagh - Topic", false, false)] #[case::artist("UC_vmjW5e1xEHhYjY2a0kK1A", "Oonagh - Topic", false, false, false)]
#[case::shorts("UCh8gHdtzO2tXd593_bjErWg", "Doobydobap", true, true)] #[case::shorts("UCh8gHdtzO2tXd593_bjErWg", "Doobydobap", true, true, true)]
#[case::livestream( #[case::livestream(
"UChs0pSaEoNLV4mevBFGaoKA", "UChs0pSaEoNLV4mevBFGaoKA",
"The Good Life Radio x Sensual Musique", "The Good Life Radio x Sensual Musique",
true, true,
true,
true true
)] )]
#[case::music("UC-9-kyTW8ZkZNDHQJ6FgpwQ", "Music", false, false)] #[case::music("UC-9-kyTW8ZkZNDHQJ6FgpwQ", "Music", false, false, false)]
fn channel_more( fn channel_more(
#[case] id: &str, #[case] id: &str,
#[case] name: &str, #[case] name: &str,
#[case] has_videos: bool, #[case] has_videos: bool,
#[case] has_playlists: bool, #[case] has_playlists: bool,
#[case] name_unlocalized: bool,
rp: RustyPipe, rp: RustyPipe,
unlocalized: bool,
) { ) {
fn assert_channel<T>(channel: &Channel<T>, id: &str, name: &str) { fn assert_channel<T>(channel: &Channel<T>, id: &str, name: &str, unlocalized: bool) {
assert_eq!(channel.id, id); assert_eq!(channel.id, id);
if unlocalized {
assert_eq!(channel.name, name); assert_eq!(channel.name, name);
} }
}
let channel_videos = tokio_test::block_on(rp.query().channel_videos(&id)).unwrap(); let channel_videos = tokio_test::block_on(rp.query().channel_videos(&id)).unwrap();
assert_channel(&channel_videos, id, name); assert_channel(&channel_videos, id, name, unlocalized || name_unlocalized);
if has_videos { if has_videos {
assert!(!channel_videos.content.items.is_empty(), "got no videos"); assert!(!channel_videos.content.items.is_empty(), "got no videos");
} }
let channel_playlists = tokio_test::block_on(rp.query().channel_playlists(&id)).unwrap(); let channel_playlists = tokio_test::block_on(rp.query().channel_playlists(&id)).unwrap();
assert_channel(&channel_playlists, id, name); assert_channel(
&channel_playlists,
id,
name,
unlocalized || name_unlocalized,
);
if has_playlists { if has_playlists {
assert!( assert!(
!channel_playlists.content.items.is_empty(), !channel_playlists.content.items.is_empty(),
@ -958,7 +974,7 @@ fn channel_more(
} }
let channel_info = tokio_test::block_on(rp.query().channel_info(&id)).unwrap(); let channel_info = tokio_test::block_on(rp.query().channel_info(&id)).unwrap();
assert_channel(&channel_info, id, name); assert_channel(&channel_info, id, name, unlocalized || name_unlocalized);
} }
#[rstest] #[rstest]
@ -968,7 +984,7 @@ fn channel_more(
#[case::sports("UCEgdi0XIXXZ-qJOFPf4JSKw")] #[case::sports("UCEgdi0XIXXZ-qJOFPf4JSKw")]
#[case::learning("UCtFRv9O2AHqOZjjynzrv-xg")] #[case::learning("UCtFRv9O2AHqOZjjynzrv-xg")]
#[case::live("UC4R8DWoMoI7CAwX8_LjQHig")] #[case::live("UC4R8DWoMoI7CAwX8_LjQHig")]
#[case::news("UCYfdidRxbB8Qhf0Nx7ioOYw")] // #[case::news("UCYfdidRxbB8Qhf0Nx7ioOYw")]
fn channel_not_found(#[case] id: &str, rp: RustyPipe) { fn channel_not_found(#[case] id: &str, rp: RustyPipe) {
let err = tokio_test::block_on(rp.query().channel_videos(&id)).unwrap_err(); let err = tokio_test::block_on(rp.query().channel_videos(&id)).unwrap_err();
@ -1030,15 +1046,18 @@ mod channel_rss {
//#SEARCH //#SEARCH
#[rstest] #[rstest]
fn search(rp: RustyPipe) { fn search(rp: RustyPipe, unlocalized: bool) {
let result = tokio_test::block_on(rp.query().search("doobydoobap")).unwrap(); let result = tokio_test::block_on(rp.query().search("doobydoobap")).unwrap();
assert!( assert_gte(
result.items.count.unwrap() > 7000, result.items.count.unwrap(),
"expected > 7000 total results, got {}", if unlocalized { 7000 } else { 150 },
result.items.count.unwrap() "results",
); );
if unlocalized {
assert_eq!(result.corrected_query.unwrap(), "doobydobap"); assert_eq!(result.corrected_query.unwrap(), "doobydobap");
}
assert_next(result.items, rp.query(), 10, 2); assert_next(result.items, rp.query(), 10, 2);
} }
@ -1094,8 +1113,12 @@ fn search_suggestion(rp: RustyPipe) {
#[rstest] #[rstest]
fn search_suggestion_empty(rp: RustyPipe) { fn search_suggestion_empty(rp: RustyPipe) {
let result = let result = tokio_test::block_on(
tokio_test::block_on(rp.query().search_suggestion("fjew327%4ifjelwfvnewg49")).unwrap(); rp.query()
.lang(Language::Th)
.search_suggestion("fjew327p4ifjelwfvnewg49"),
)
.unwrap();
assert!(result.is_empty()); assert!(result.is_empty());
} }
@ -1214,11 +1237,11 @@ fn music_playlist(
#[case] channel: Option<(&str, &str)>, #[case] channel: Option<(&str, &str)>,
#[case] from_ytm: bool, #[case] from_ytm: bool,
rp: RustyPipe, rp: RustyPipe,
unlocalized: bool,
) { ) {
let playlist = tokio_test::block_on(rp.query().music_playlist(id)).unwrap(); let playlist = tokio_test::block_on(rp.query().music_playlist(id)).unwrap();
assert_eq!(playlist.id, id); assert_eq!(playlist.id, id);
assert_eq!(playlist.name, name);
assert!(!playlist.tracks.is_empty()); assert!(!playlist.tracks.is_empty());
assert_eq!(!playlist.tracks.is_exhausted(), is_long); assert_eq!(!playlist.tracks.is_exhausted(), is_long);
assert_gte( assert_gte(
@ -1226,7 +1249,10 @@ fn music_playlist(
if is_long { 100 } else { 10 }, if is_long { 100 } else { 10 },
"track count", "track count",
); );
if unlocalized {
assert_eq!(playlist.name, name);
assert_eq!(playlist.description, description); assert_eq!(playlist.description, description);
}
if let Some(expect) = channel { if let Some(expect) = channel {
let c = playlist.channel.unwrap(); let c = playlist.channel.unwrap();
@ -1296,14 +1322,29 @@ fn music_playlist_not_found(rp: RustyPipe) {
#[case::no_year("no_year", "MPREb_F3Af9UZZVxX")] #[case::no_year("no_year", "MPREb_F3Af9UZZVxX")]
#[case::version_no_artist("version_no_artist", "MPREb_h8ltx5oKvyY")] #[case::version_no_artist("version_no_artist", "MPREb_h8ltx5oKvyY")]
#[case::no_artist("no_artist", "MPREb_bqWA6mAZFWS")] #[case::no_artist("no_artist", "MPREb_bqWA6mAZFWS")]
fn music_album(#[case] name: &str, #[case] id: &str, rp: RustyPipe) { fn music_album(#[case] name: &str, #[case] id: &str, rp: RustyPipe, unlocalized: bool) {
let album = tokio_test::block_on(rp.query().music_album(id)).unwrap(); let album = tokio_test::block_on(rp.query().music_album(id)).unwrap();
assert!(!album.cover.is_empty(), "got no cover"); assert!(!album.cover.is_empty(), "got no cover");
if unlocalized {
insta::assert_ron_snapshot!(format!("music_album_{name}"), album, insta::assert_ron_snapshot!(format!("music_album_{name}"), album,
{".cover" => "[cover]"} {".cover" => "[cover]"}
); );
} else {
insta::assert_ron_snapshot!(format!("music_album_{name}_intl"), album,
{
".name" => "[name]",
".cover" => "[cover]",
".description" => "[description]",
".artists[].name" => "[name]",
".tracks[].name" => "[name]",
".tracks[].album.name" => "[name]",
".tracks[].artists[].name" => "[name]",
".variants[].artists[].name" => "[name]",
}
);
}
} }
#[rstest] #[rstest]
@ -1320,8 +1361,9 @@ fn music_album_not_found(rp: RustyPipe) {
} }
#[rstest] #[rstest]
#[case::basic_all("basic_all", "UC7cl4MmM6ZZ2TcFyMk_b4pg", true, 15, 2)] // TODO: fix this/swap artist
#[case::basic("basic", "UC7cl4MmM6ZZ2TcFyMk_b4pg", false, 15, 2)] // #[case::basic_all("basic_all", "UC7cl4MmM6ZZ2TcFyMk_b4pg", true, 15, 2)]
// #[case::basic("basic", "UC7cl4MmM6ZZ2TcFyMk_b4pg", false, 15, 2)]
#[case::no_more_albums("no_more_albums", "UCOR4_bSVIXPsGa4BbCSt60Q", true, 15, 0)] #[case::no_more_albums("no_more_albums", "UCOR4_bSVIXPsGa4BbCSt60Q", true, 15, 0)]
#[case::only_singles("only_singles", "UCfwCE5VhPMGxNPFxtVv7lRw", false, 13, 0)] #[case::only_singles("only_singles", "UCfwCE5VhPMGxNPFxtVv7lRw", false, 13, 0)]
#[case::no_artist("no_artist", "UCh8gHdtzO2tXd593_bjErWg", false, 0, 2)] #[case::no_artist("no_artist", "UCh8gHdtzO2tXd593_bjErWg", false, 0, 2)]
@ -1335,6 +1377,7 @@ fn music_artist(
#[case] min_tracks: usize, #[case] min_tracks: usize,
#[case] min_playlists: usize, #[case] min_playlists: usize,
rp: RustyPipe, rp: RustyPipe,
unlocalized: bool,
) { ) {
let mut artist = tokio_test::block_on(rp.query().music_artist(id, all_albums)).unwrap(); let mut artist = tokio_test::block_on(rp.query().music_artist(id, all_albums)).unwrap();
@ -1370,6 +1413,7 @@ fn music_artist(
// Sort albums to ensure consistent order // Sort albums to ensure consistent order
artist.albums.sort_by_key(|a| a.id.to_owned()); artist.albums.sort_by_key(|a| a.id.to_owned());
if unlocalized {
insta::assert_ron_snapshot!(format!("music_artist_{name}"), artist, { insta::assert_ron_snapshot!(format!("music_artist_{name}"), artist, {
".header_image" => "[header_image]", ".header_image" => "[header_image]",
".subscriber_count" => "[subscriber_count]", ".subscriber_count" => "[subscriber_count]",
@ -1378,6 +1422,21 @@ fn music_artist(
".playlists" => "[playlists]", ".playlists" => "[playlists]",
".similar_artists" => "[artists]", ".similar_artists" => "[artists]",
}); });
} else {
insta::assert_ron_snapshot!(format!("music_artist_{name}_intl"), artist, {
".name" => "[name]",
".header_image" => "[header_image]",
".description" => "[description]",
".wikipedia_url" => "[wikipedia_url]",
".subscriber_count" => "[subscriber_count]",
".albums[].name" => "[name]",
".albums[].cover" => "[cover]",
".albums[].artists[].name" => "[name]",
".tracks" => "[tracks]",
".playlists" => "[playlists]",
".similar_artists" => "[artists]",
});
}
} }
#[rstest] #[rstest]
@ -1397,7 +1456,7 @@ fn music_artist_not_found(rp: RustyPipe) {
#[rstest] #[rstest]
#[case::default(false)] #[case::default(false)]
#[case::typo(true)] #[case::typo(true)]
fn music_search(#[case] typo: bool, rp: RustyPipe) { fn music_search(#[case] typo: bool, rp: RustyPipe, unlocalized: bool) {
let res = tokio_test::block_on(rp.query().music_search(match typo { let res = tokio_test::block_on(rp.query().music_search(match typo {
false => "lieblingsmensch namika", false => "lieblingsmensch namika",
true => "lieblingsmesch namika", true => "lieblingsmesch namika",
@ -1411,7 +1470,9 @@ fn music_search(#[case] typo: bool, rp: RustyPipe) {
assert_eq!(res.order[0], MusicItemType::Track); assert_eq!(res.order[0], MusicItemType::Track);
if typo { if typo {
if unlocalized {
assert_eq!(res.corrected_query.unwrap(), "lieblingsmensch namika"); assert_eq!(res.corrected_query.unwrap(), "lieblingsmensch namika");
}
} else { } else {
assert_eq!(res.corrected_query, None); assert_eq!(res.corrected_query, None);
} }
@ -1434,7 +1495,9 @@ fn music_search(#[case] typo: bool, rp: RustyPipe) {
track_artist.id.as_ref().unwrap(), track_artist.id.as_ref().unwrap(),
"UCIh4j8fXWf2U0ro0qnGU8Mg" "UCIh4j8fXWf2U0ro0qnGU8Mg"
); );
if unlocalized {
assert_eq!(track_artist.name, "Namika"); assert_eq!(track_artist.name, "Namika");
}
let track_album = track.album.as_ref().unwrap(); let track_album = track.album.as_ref().unwrap();
assert_eq!(track_album.id, "MPREb_RXHxrUFfrvQ"); assert_eq!(track_album.id, "MPREb_RXHxrUFfrvQ");
@ -1446,7 +1509,7 @@ fn music_search(#[case] typo: bool, rp: RustyPipe) {
} }
#[rstest] #[rstest]
fn music_search2(rp: RustyPipe) { fn music_search2(rp: RustyPipe, unlocalized: bool) {
let res = tokio_test::block_on(rp.query().music_search("taylor swift")).unwrap(); let res = tokio_test::block_on(rp.query().music_search("taylor swift")).unwrap();
assert!(!res.tracks.is_empty(), "no tracks"); assert!(!res.tracks.is_empty(), "no tracks");
@ -1463,12 +1526,14 @@ fn music_search2(rp: RustyPipe) {
panic!("could not find artist, got {:#?}", &res.artists); panic!("could not find artist, got {:#?}", &res.artists);
}); });
if unlocalized {
assert_eq!(artist.name, "Taylor Swift"); assert_eq!(artist.name, "Taylor Swift");
}
assert!(!artist.avatar.is_empty(), "got no avatar"); assert!(!artist.avatar.is_empty(), "got no avatar");
} }
#[rstest] #[rstest]
fn music_search_tracks(rp: RustyPipe) { fn music_search_tracks(rp: RustyPipe, unlocalized: bool) {
let res = tokio_test::block_on(rp.query().music_search_tracks("black mamba")).unwrap(); let res = tokio_test::block_on(rp.query().music_search_tracks("black mamba")).unwrap();
let track = &res let track = &res
@ -1489,7 +1554,9 @@ fn music_search_tracks(rp: RustyPipe) {
track_artist.id.as_ref().unwrap(), track_artist.id.as_ref().unwrap(),
"UCEdZAdnnKqbaHOlv8nM6OtA" "UCEdZAdnnKqbaHOlv8nM6OtA"
); );
if unlocalized {
assert_eq!(track_artist.name, "aespa"); assert_eq!(track_artist.name, "aespa");
}
assert_eq!(track.duration.unwrap(), 175); assert_eq!(track.duration.unwrap(), 175);
@ -1501,7 +1568,7 @@ fn music_search_tracks(rp: RustyPipe) {
} }
#[rstest] #[rstest]
fn music_search_videos(rp: RustyPipe) { fn music_search_videos(rp: RustyPipe, unlocalized: bool) {
let res = tokio_test::block_on(rp.query().music_search_videos("black mamba")).unwrap(); let res = tokio_test::block_on(rp.query().music_search_videos("black mamba")).unwrap();
let track = &res let track = &res
@ -1522,7 +1589,9 @@ fn music_search_videos(rp: RustyPipe) {
track_artist.id.as_ref().unwrap(), track_artist.id.as_ref().unwrap(),
"UCEdZAdnnKqbaHOlv8nM6OtA" "UCEdZAdnnKqbaHOlv8nM6OtA"
); );
if unlocalized {
assert_eq!(track_artist.name, "aespa"); assert_eq!(track_artist.name, "aespa");
}
assert_eq!(track.duration.unwrap(), 230); assert_eq!(track.duration.unwrap(), 230);
assert_eq!(track.album, None); assert_eq!(track.album, None);
@ -1531,8 +1600,6 @@ fn music_search_videos(rp: RustyPipe) {
assert_next(res.items, rp.query(), 15, 2); assert_next(res.items, rp.query(), 15, 2);
} }
// This podcast was removed from YouTube Music and I could not find another one
/*
#[tokio::test] #[tokio::test]
async fn music_search_episode() { async fn music_search_episode() {
let rp = RustyPipe::builder().strict().build(); let rp = RustyPipe::builder().strict().build();
@ -1554,7 +1621,7 @@ async fn music_search_episode() {
"Blond - Da muss man dabei gewesen sein: Das Hörspiel - Fall #1" "Blond - Da muss man dabei gewesen sein: Das Hörspiel - Fall #1"
); );
assert!(!track.cover.is_empty(), "got no cover"); assert!(!track.cover.is_empty(), "got no cover");
}*/ }
#[rstest] #[rstest]
#[case::single( #[case::single(
@ -1597,6 +1664,7 @@ fn music_search_albums(
#[case] album_type: AlbumType, #[case] album_type: AlbumType,
#[case] more: bool, #[case] more: bool,
rp: RustyPipe, rp: RustyPipe,
unlocalized: bool,
) { ) {
let res = tokio_test::block_on(rp.query().music_search_albums(query)).unwrap(); let res = tokio_test::block_on(rp.query().music_search_albums(query)).unwrap();
@ -1606,7 +1674,9 @@ fn music_search_albums(
assert_eq!(album.artists.len(), 1); assert_eq!(album.artists.len(), 1);
let album_artist = &album.artists[0]; let album_artist = &album.artists[0];
assert_eq!(album_artist.id.as_ref().unwrap(), artist_id); assert_eq!(album_artist.id.as_ref().unwrap(), artist_id);
if unlocalized {
assert_eq!(album_artist.name, artist); assert_eq!(album_artist.name, artist);
}
assert_eq!(album.artist_id.as_ref().unwrap(), artist_id); assert_eq!(album.artist_id.as_ref().unwrap(), artist_id);
assert!(!album.cover.is_empty(), "got no cover"); assert!(!album.cover.is_empty(), "got no cover");
@ -1615,13 +1685,13 @@ fn music_search_albums(
assert_eq!(res.corrected_query, None); assert_eq!(res.corrected_query, None);
if more { if more && unlocalized {
assert_next(res.items, rp.query(), 15, 1); assert_next(res.items, rp.query(), 15, 1);
} }
} }
#[rstest] #[rstest]
fn music_search_artists(rp: RustyPipe) { fn music_search_artists(rp: RustyPipe, unlocalized: bool) {
let res = tokio_test::block_on(rp.query().music_search_artists("namika")).unwrap(); let res = tokio_test::block_on(rp.query().music_search_artists("namika")).unwrap();
let artist = res let artist = res
@ -1630,7 +1700,9 @@ fn music_search_artists(rp: RustyPipe) {
.iter() .iter()
.find(|a| a.id == "UCIh4j8fXWf2U0ro0qnGU8Mg") .find(|a| a.id == "UCIh4j8fXWf2U0ro0qnGU8Mg")
.unwrap(); .unwrap();
if unlocalized {
assert_eq!(artist.name, "Namika"); assert_eq!(artist.name, "Namika");
}
assert!(!artist.avatar.is_empty(), "got no avatar"); assert!(!artist.avatar.is_empty(), "got no avatar");
assert!( assert!(
artist.subscriber_count.unwrap() > 735_000, artist.subscriber_count.unwrap() > 735_000,
@ -1651,7 +1723,7 @@ fn music_search_artists_cont(rp: RustyPipe) {
#[rstest] #[rstest]
#[case::ytm(false)] #[case::ytm(false)]
#[case::default(true)] #[case::default(true)]
fn music_search_playlists(#[case] with_community: bool, rp: RustyPipe) { fn music_search_playlists(#[case] with_community: bool, rp: RustyPipe, unlocalized: bool) {
let res = if with_community { let res = if with_community {
tokio_test::block_on(rp.query().music_search_playlists("pop biggest hits")).unwrap() tokio_test::block_on(rp.query().music_search_playlists("pop biggest hits")).unwrap()
} else { } else {
@ -1670,7 +1742,9 @@ fn music_search_playlists(#[case] with_community: bool, rp: RustyPipe) {
.find(|p| p.id == "RDCLAK5uy_nmS3YoxSwVVQk9lEQJ0UX4ZCjXsW_psU8") .find(|p| p.id == "RDCLAK5uy_nmS3YoxSwVVQk9lEQJ0UX4ZCjXsW_psU8")
.unwrap(); .unwrap();
if unlocalized {
assert_eq!(playlist.name, "Pop's Biggest Hits"); assert_eq!(playlist.name, "Pop's Biggest Hits");
}
assert!(!playlist.thumbnail.is_empty(), "got no thumbnail"); assert!(!playlist.thumbnail.is_empty(), "got no thumbnail");
assert_gte(playlist.track_count.unwrap(), 100, "tracks"); assert_gte(playlist.track_count.unwrap(), 100, "tracks");
assert_eq!(playlist.channel, None); assert_eq!(playlist.channel, None);
@ -1761,7 +1835,7 @@ fn music_search_suggestion(
#[rstest] #[rstest]
#[case::mv("mv", "ZeerrnuLi5E")] #[case::mv("mv", "ZeerrnuLi5E")]
#[case::track("track", "7nigXQS1Xb0")] #[case::track("track", "qIZ-vvg-wiU")]
fn music_details(#[case] name: &str, #[case] id: &str, rp: RustyPipe) { fn music_details(#[case] name: &str, #[case] id: &str, rp: RustyPipe) {
let track = tokio_test::block_on(rp.query().music_details(id)).unwrap(); let track = tokio_test::block_on(rp.query().music_details(id)).unwrap();
@ -1784,7 +1858,12 @@ fn music_details(#[case] name: &str, #[case] id: &str, rp: RustyPipe) {
fn music_lyrics(rp: RustyPipe) { fn music_lyrics(rp: RustyPipe) {
let track = tokio_test::block_on(rp.query().music_details("60ImQ8DS3Vs")).unwrap(); let track = tokio_test::block_on(rp.query().music_details("60ImQ8DS3Vs")).unwrap();
let lyrics = tokio_test::block_on(rp.query().music_lyrics(&track.lyrics_id.unwrap())).unwrap(); let lyrics = tokio_test::block_on(rp.query().music_lyrics(&track.lyrics_id.unwrap())).unwrap();
insta::assert_ron_snapshot!(lyrics); insta::assert_ron_snapshot!(lyrics.body);
assert!(
lyrics.footer.contains("Musixmatch"),
"footer text: {}",
lyrics.footer
)
} }
#[rstest] #[rstest]
@ -2004,8 +2083,8 @@ fn music_charts(
assert_eq!(charts.top_playlist_id.unwrap(), plid_top); assert_eq!(charts.top_playlist_id.unwrap(), plid_top);
assert_eq!(charts.trending_playlist_id.unwrap(), plid_trend); assert_eq!(charts.trending_playlist_id.unwrap(), plid_trend);
assert_gte(charts.top_tracks.len(), 40, "top tracks"); assert_gte(charts.top_tracks.len(), 30, "top tracks");
assert_gte(charts.artists.len(), 40, "top artists"); assert_gte(charts.artists.len(), 30, "top artists");
assert_gte(charts.trending_tracks.len(), 15, "trending tracks"); assert_gte(charts.trending_tracks.len(), 15, "trending tracks");
// Chart playlists only available in USA // Chart playlists only available in USA
@ -2041,14 +2120,16 @@ fn music_new_videos(rp: RustyPipe) {
} }
#[rstest] #[rstest]
fn music_genres(rp: RustyPipe) { fn music_genres(rp: RustyPipe, unlocalized: bool) {
let genres = tokio_test::block_on(rp.query().music_genres()).unwrap(); let genres = tokio_test::block_on(rp.query().music_genres()).unwrap();
let chill = genres let chill = genres
.iter() .iter()
.find(|g| g.id == "ggMPOg1uX1JOQWZFeDByc2Jm") .find(|g| g.id == "ggMPOg1uX1JOQWZFeDByc2Jm")
.unwrap(); .unwrap();
if unlocalized {
assert_eq!(chill.name, "Chill"); assert_eq!(chill.name, "Chill");
}
assert!(chill.is_mood); assert!(chill.is_mood);
let pop = genres let pop = genres
@ -2067,12 +2148,19 @@ fn music_genres(rp: RustyPipe) {
#[rstest] #[rstest]
#[case::chill("ggMPOg1uX1JOQWZFeDByc2Jm", "Chill")] #[case::chill("ggMPOg1uX1JOQWZFeDByc2Jm", "Chill")]
#[case::pop("ggMPOg1uX1lMbVZmbzl6NlJ3", "Pop")] #[case::pop("ggMPOg1uX1lMbVZmbzl6NlJ3", "Pop")]
fn music_genre(#[case] id: &str, #[case] name: &str, rp: RustyPipe) { fn music_genre(#[case] id: &str, #[case] name: &str, rp: RustyPipe, unlocalized: bool) {
let genre = tokio_test::block_on(rp.query().music_genre(id)).unwrap(); let genre = tokio_test::block_on(rp.query().music_genre(id)).unwrap();
fn check_music_genre(genre: MusicGenre, id: &str, name: &str) -> Vec<(String, String)> { fn check_music_genre(
genre: MusicGenre,
id: &str,
name: &str,
unlocalized: bool,
) -> Vec<(String, String)> {
assert_eq!(genre.id, id); assert_eq!(genre.id, id);
if unlocalized {
assert_eq!(genre.name, name); assert_eq!(genre.name, name);
}
assert_gte(genre.sections.len(), 2, "genre sections"); assert_gte(genre.sections.len(), 2, "genre sections");
let mut subgenres = Vec::new(); let mut subgenres = Vec::new();
@ -2105,7 +2193,7 @@ fn music_genre(#[case] id: &str, #[case] name: &str, rp: RustyPipe) {
subgenres subgenres
} }
let subgenres = check_music_genre(genre, id, name); let subgenres = check_music_genre(genre, id, name, unlocalized);
if name == "Chill" { if name == "Chill" {
assert_gte(subgenres.len(), 2, "subgenres"); assert_gte(subgenres.len(), 2, "subgenres");
@ -2113,7 +2201,7 @@ fn music_genre(#[case] id: &str, #[case] name: &str, rp: RustyPipe) {
for (id, name) in subgenres { for (id, name) in subgenres {
let genre = tokio_test::block_on(rp.query().music_genre(&id)).unwrap(); let genre = tokio_test::block_on(rp.query().music_genre(&id)).unwrap();
check_music_genre(genre, &id, &name); check_music_genre(genre, &id, &name, unlocalized);
} }
} }
@ -2167,10 +2255,25 @@ fn invalid_ctoken(#[case] ep: ContinuationEndpoint, rp: RustyPipe) {
//#TESTUTIL //#TESTUTIL
/// Get the language setting from the environment variable
#[fixture]
fn lang() -> Language {
std::env::var("YT_LANG")
.ok()
.map(|l| Language::from_str(&l).unwrap())
.unwrap_or(Language::En)
}
/// Get a new RustyPipe instance /// Get a new RustyPipe instance
#[fixture] #[fixture]
fn rp() -> RustyPipe { fn rp(lang: Language) -> RustyPipe {
RustyPipe::builder().strict().build() RustyPipe::builder().strict().lang(lang).build()
}
/// Get a flag signaling if the language is set to English
#[fixture]
fn unlocalized(lang: Language) -> bool {
lang == Language::En
} }
/// Get a new RustyPipe instance with pre-set visitor data /// Get a new RustyPipe instance with pre-set visitor data