Compare commits

..

No commits in common. "b3331b36a7a1331f8a7834424cebce006e9bffff" and "6a99540ef518e61fcb5497546d5307161c83b8b8" have entirely different histories.

73 changed files with 32277 additions and 55659 deletions

View file

@ -40,7 +40,7 @@ reqwest = { version = "0.11.11", default-features = false, features = [
tokio = { version = "1.20.0", features = ["macros", "time"] }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0.82"
serde_with = { version = "3.0.0", features = ["json"] }
serde_with = { version = "2.0.0", features = ["json"] }
rand = "0.8.5"
time = { version = "0.3.15", features = [
"macros",

View file

@ -15,26 +15,8 @@ testyt10:
cargo test --all-features --test youtube; \
done
testintl:
#!/usr/bin/env bash
set -e
LANGUAGES=(
"af" "am" "ar" "as" "az" "be" "bg" "bn" "bs" "ca" "cs" "da" "de" "el" "en" "en-GB" "en-IN"
"es" "es-419" "es-US" "et" "eu" "fa" "fi" "fil" "fr" "fr-CA" "gl" "gu"
"hi" "hr" "hu" "hy" "id" "is" "it" "iw" "ja" "ka" "kk" "km" "kn" "ko" "ky"
"lo" "lt" "lv" "mk" "ml" "mn" "mr" "ms" "my" "ne" "nl" "no" "or" "pa" "pl"
"pt" "pt-PT" "ro" "ru" "si" "sk" "sl" "sq" "sr" "sr-Latn" "sv" "sw" "ta"
"te" "th" "tr" "uk" "ur" "uz" "vi" "zh-CN" "zh-HK" "zh-TW" "zu"
)
for YT_LANG in "${LANGUAGES[@]}"; do \
echo "---TESTS FOR $YT_LANG ---"; \
YT_LANG="$YT_LANG" cargo test --test youtube -- --skip get_video_details --skip startpage; \
echo "--- $YT_LANG COMPLETED ---"; \
sleep 10; \
done
testfiles:
cargo run -p rustypipe-codegen download-testfiles
cargo run -p rustypipe-codegen -- -d . download-testfiles
report2yaml:
mkdir -p rustypipe_reports/conv

View file

@ -10,7 +10,7 @@ tokio = { version = "1.20.0", features = ["macros", "rt-multi-thread"] }
futures = "0.3.21"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0.82"
serde_with = "3.0.0"
serde_with = "2.0.0"
anyhow = "1.0"
log = "0.4.17"
env_logger = "0.10.0"
@ -19,7 +19,5 @@ phf_codegen = "0.11.1"
once_cell = "1.12.0"
regex = "1.7.1"
indicatif = "0.17.0"
num_enum = "0.6.1"
num_enum = "0.5.7"
path_macro = "1.0.0"
intl_pluralrules = "7.0.2"
unic-langid = "0.9.1"

View file

@ -1,21 +1,18 @@
use std::{collections::BTreeMap, fs::File, io::BufReader};
use std::{collections::BTreeMap, fs::File, io::BufReader, path::Path};
use futures::stream::{self, StreamExt};
use path_macro::path;
use rustypipe::{
client::{ClientType, RustyPipe, RustyPipeQuery},
client::{ClientType, RustyPipe, RustyPipeQuery, YTContext},
model::AlbumType,
param::{locale::LANGUAGES, Language},
};
use serde::Deserialize;
use serde::{Deserialize, Serialize};
use crate::{
model::{QBrowse, TextRuns},
util::{self, DICT_DIR},
};
use crate::util::{self, TextRuns};
pub async fn collect_album_types(concurrency: usize) {
let json_path = path!(*DICT_DIR / "album_type_samples.json");
pub async fn collect_album_types(project_root: &Path, concurrency: usize) {
let json_path = path!(project_root / "testfiles" / "dict" / "album_type_samples.json");
let album_types = [
(AlbumType::Album, "MPREb_nlBWQROfvjo"),
@ -51,13 +48,13 @@ pub async fn collect_album_types(concurrency: usize) {
serde_json::to_writer_pretty(file, &collected_album_types).unwrap();
}
pub fn write_samples_to_dict() {
let json_path = path!(*DICT_DIR / "album_type_samples.json");
pub fn write_samples_to_dict(project_root: &Path) {
let json_path = path!(project_root / "testfiles" / "dict" / "album_type_samples.json");
let json_file = File::open(json_path).unwrap();
let collected: BTreeMap<Language, BTreeMap<AlbumType, String>> =
serde_json::from_reader(BufReader::new(json_file)).unwrap();
let mut dict = util::read_dict();
let mut dict = util::read_dict(project_root);
let langs = dict.keys().map(|k| k.to_owned()).collect::<Vec<_>>();
for lang in langs {
@ -75,7 +72,7 @@ pub fn write_samples_to_dict() {
});
}
util::write_dict(dict);
util::write_dict(project_root, &dict);
}
#[derive(Debug, Deserialize)]
@ -94,6 +91,13 @@ struct HeaderRenderer {
subtitle: TextRuns,
}
#[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
struct QBrowse<'a> {
context: YTContext<'a>,
browse_id: &'a str,
}
async fn get_album_type(query: &RustyPipeQuery, id: &str) -> String {
let context = query
.get_context(ClientType::DesktopMusic, true, None)
@ -101,7 +105,6 @@ async fn get_album_type(query: &RustyPipeQuery, id: &str) -> String {
let body = QBrowse {
context,
browse_id: id,
params: None,
};
let response_txt = query
.raw(ClientType::DesktopMusic, "browse", &body)

View file

@ -1,32 +1,25 @@
use std::sync::Arc;
use std::{
collections::{BTreeMap, HashMap, HashSet},
fs::File,
io::BufReader,
};
use std::collections::{HashMap, HashSet};
use std::{collections::BTreeMap, fs::File, io::BufReader, path::Path};
use anyhow::{Context, Result};
use futures::{stream, StreamExt};
use once_cell::sync::Lazy;
use path_macro::path;
use regex::Regex;
use rustypipe::client::{ClientType, RustyPipe, RustyPipeQuery};
use reqwest::{header, Client};
use rustypipe::param::{locale::LANGUAGES, Language};
use serde::Deserialize;
use serde_with::serde_as;
use serde_with::VecSkipError;
use crate::model::{Channel, ContinuationResponse};
use crate::util::DICT_DIR;
use crate::{
model::{QBrowse, QCont, TextRuns},
util,
};
use crate::util::{self, Text};
type CollectedNumbers = BTreeMap<Language, BTreeMap<String, u64>>;
type CollectedNumbers = BTreeMap<Language, BTreeMap<u8, (String, u64)>>;
/// Collect video view count texts in every supported language
/// and write them to `testfiles/dict/large_number_samples.json`.
///
/// YouTube's API outputs subscriber and view counts only in a
/// YouTube's API outputs the subscriber count of a channel only in a
/// approximated format (e.g *880K subscribers*), which varies
/// by language.
///
@ -37,116 +30,98 @@ type CollectedNumbers = BTreeMap<Language, BTreeMap<String, u64>>;
/// We extract these instead of subscriber counts because the YouTube API
/// outputs view counts both in approximated and exact format, so we can use
/// the exact counts to figure out the tokens.
pub async fn collect_large_numbers(concurrency: usize) {
let json_path = path!(*DICT_DIR / "large_number_samples_all.json");
let rp = RustyPipe::new();
pub async fn collect_large_numbers(project_root: &Path, concurrency: usize) {
let json_path = path!(project_root / "testfiles" / "dict" / "large_number_samples.json");
let json_path_all =
path!(project_root / "testfiles" / "dict" / "large_number_samples_all.json");
let channels = [
"UCq-Fj5jknLsUf-MWSy4_brA", // 10e8 (241M)
"UCcdwLMPsaU2ezNSJU1nFoBQ", // 10e7 (67M)
"UC6mIxFTvXkWQVEHPsEdflzQ", // 10e6 (1.8M)
"UCD0y51PJfvkZNe3y3FR5riw", // 10e5 (126K)
"UCNcN0dW43zE0Om3278fjY8A", // 10e4 (33K)
"UCq-Fj5jknLsUf-MWSy4_brA", // 10e8 (225M)
"UCcdwLMPsaU2ezNSJU1nFoBQ", // 10e7 (60M)
"UC6mIxFTvXkWQVEHPsEdflzQ", // 10e6 (1.7M)
"UCD0y51PJfvkZNe3y3FR5riw", // 10e5 (125K)
"UCNcN0dW43zE0Om3278fjY8A", // 10e4 (27K)
"UC0QEucPrn0-Ddi3JBTcs5Kw", // 10e3 (5K)
"UCXvtcj9xUQhaqPaitFf2DqA", // (275)
"UCq-XMc01T641v-4P3hQYJWg", // (695)
"UCaZL4eLD7a30Fa8QI-sRi_g", // (31K)
"UCO-dylEoJozPTxGYd8fTQxA", // (5)
"UCQXYK94vDqOEkPbTCyL0OjA", // (1)
"UCXvtcj9xUQhaqPaitFf2DqA", // (170)
"UCq-XMc01T641v-4P3hQYJWg", // (636)
];
// YTM outputs the subscriber count in a shortened format in some languages
let music_channels = [
"UC_1N84buVNgR_-3gDZ9Jtxg", // 10e8 (158M)
"UCRw0x9_EfawqmgDI2IgQLLg", // 10e7 (29M)
"UChWu2clmvJ5wN_0Ic5dnqmw", // 10e6 (1.9M)
"UCOYiPDuimprrGHgFy4_Fw8Q", // 10e5 (149K)
"UC8nZf9WyVIxNMly_hy2PTyQ", // 10e4 (17K)
"UCaltNL5XvZ7dKvBsBPi-gqg", // 10e3 (8K)
];
let collected_numbers_all: BTreeMap<Language, BTreeMap<String, u64>> = stream::iter(LANGUAGES)
.map(|lang| async move {
let mut entry = BTreeMap::new();
// Build a lookup table for the channel's subscriber counts
let subscriber_counts: Arc<BTreeMap<String, u64>> = stream::iter(channels)
.map(|c| {
let rp = rp.query();
async move {
let channel = get_channel(&rp, c).await.unwrap();
for (n, ch_id) in channels.iter().enumerate() {
let channel = get_channel(ch_id, lang)
.await
.context(format!("{lang}-{n}"))
.unwrap();
let n = util::parse_largenum_en(&channel.subscriber_count).unwrap();
(c.to_owned(), n)
channel.view_counts.iter().for_each(|(num, txt)| {
entry.insert(txt.to_owned(), *num);
});
println!("collected {lang}-{n}");
}
})
.buffer_unordered(concurrency)
.collect::<BTreeMap<_, _>>()
.await
.into();
let music_subscriber_counts: Arc<BTreeMap<String, u64>> = stream::iter(music_channels)
.map(|c| {
let rp = rp.query();
async move {
let subscriber_count = music_channel_subscribers(&rp, c).await.unwrap();
let n = util::parse_largenum_en(&subscriber_count).unwrap();
(c.to_owned(), n)
}
})
.buffer_unordered(concurrency)
.collect::<BTreeMap<_, _>>()
.await
.into();
let collected_numbers: CollectedNumbers = stream::iter(LANGUAGES)
.map(|lang| {
let rp = rp.query().lang(lang);
let subscriber_counts = subscriber_counts.clone();
let music_subscriber_counts = music_subscriber_counts.clone();
async move {
let mut entry = BTreeMap::new();
for (n, ch_id) in channels.iter().enumerate() {
let channel = get_channel(&rp, ch_id)
.await
.context(format!("{lang}-{n}"))
.unwrap();
channel.view_counts.iter().for_each(|(num, txt)| {
entry.insert(txt.to_owned(), *num);
});
entry.insert(channel.subscriber_count, subscriber_counts[*ch_id]);
println!("collected {lang}-{n}");
}
for (n, ch_id) in music_channels.iter().enumerate() {
let subscriber_count = music_channel_subscribers(&rp, ch_id)
.await
.context(format!("{lang}-music-{n}"))
.unwrap();
entry.insert(subscriber_count, music_subscriber_counts[*ch_id]);
println!("collected {lang}-music-{n}");
}
(lang, entry)
}
(lang, entry)
})
.buffer_unordered(concurrency)
.collect()
.await;
let collected_numbers: CollectedNumbers = collected_numbers_all
.iter()
.map(|(lang, entry)| {
let mut e2 = BTreeMap::new();
entry.iter().for_each(|(txt, num)| {
e2.insert(get_mag(*num), (txt.to_owned(), *num));
});
(*lang, e2)
})
.collect();
let file = File::create(json_path).unwrap();
serde_json::to_writer_pretty(file, &collected_numbers).unwrap();
let file = File::create(json_path_all).unwrap();
serde_json::to_writer_pretty(file, &collected_numbers_all).unwrap();
}
/// Attempt to parse the numbers collected by `collect-large-numbers`
/// and write the results to `dictionary.json`.
pub fn write_samples_to_dict() {
let json_path = path!(*DICT_DIR / "large_number_samples.json");
pub fn write_samples_to_dict(project_root: &Path) {
/*
Manual corrections:
as
"কোঃটা": 9,
"নিঃটা": 6,
"নিযুতটা": 6,
"লাখটা": 5,
"হাজাৰটা": 3
ar
"ألف": 3,
"آلاف": 3,
"مليار": 9,
"مليون": 6
bn
"লাটি": 5,
"শত": 2,
"হাটি": 3,
"কোটি": 7
es/es-US
"mil": 3,
"M": 6
*/
let json_path = path!(project_root / "testfiles" / "dict" / "large_number_samples.json");
let json_file = File::open(json_path).unwrap();
let collected_nums: CollectedNumbers =
serde_json::from_reader(BufReader::new(json_file)).unwrap();
let mut dict = util::read_dict();
let mut dict = util::read_dict(project_root);
let langs = dict.keys().map(|k| k.to_owned()).collect::<Vec<_>>();
static POINT_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"\d(\.|,)\d{1,3}(?:\D|$)").unwrap());
@ -157,9 +132,11 @@ pub fn write_samples_to_dict() {
let mut e_langs = dict_entry.equivalent.clone();
e_langs.push(lang);
let comma_decimal = collected_nums[&lang]
let comma_decimal = collected_nums
.get(&lang)
.unwrap()
.iter()
.find_map(|(txt, val)| {
.find_map(|(mag, (txt, _))| {
let point = POINT_REGEX
.captures(txt)
.map(|c| c.get(1).unwrap().as_str());
@ -169,9 +146,8 @@ pub fn write_samples_to_dict() {
// If the number parsed from all digits has the same order of
// magnitude as the actual number, it must be a separator.
// Otherwise it is a decimal point
return Some((get_mag(num_all) == get_mag(*val)) ^ (point == ","));
return Some((get_mag(num_all) == *mag) ^ (point == ","));
}
None
})
.unwrap();
@ -189,7 +165,6 @@ pub fn write_samples_to_dict() {
// If the token is found again with a different derived order of magnitude,
// its value in the map is set to None.
let mut found_tokens: HashMap<String, Option<u8>> = HashMap::new();
let mut found_nd_tokens: HashMap<String, Option<u8>> = HashMap::new();
let mut insert_token = |token: String, mag: u8| {
let found_token = found_tokens.entry(token).or_insert(match mag {
@ -204,72 +179,45 @@ pub fn write_samples_to_dict() {
}
};
let mut insert_nd_token = |token: String, n: Option<u8>| {
let found_token = found_nd_tokens.entry(token).or_insert(n);
if let Some(f) = found_token {
if Some(*f) != n {
*found_token = None;
}
}
};
for lang in e_langs {
let entry = collected_nums.get(&lang).unwrap();
entry.iter().for_each(|(txt, val)| {
entry.iter().for_each(|(mag, (txt, _))| {
let filtered = util::filter_largenumstr(txt);
let mag = get_mag(*val);
let tokens: Vec<String> = match dict_entry.by_char || lang == Language::Ko {
let tokens: Vec<String> = match dict_entry.by_char {
true => filtered.chars().map(|c| c.to_string()).collect(),
false => filtered.split_whitespace().map(|c| c.to_string()).collect(),
};
match util::parse_numeric::<u64>(txt.split(decimal_point).next().unwrap()) {
Ok(num_before_point) => {
let mag_before_point = get_mag(num_before_point);
let mut mag_remaining = mag - mag_before_point;
let num_before_point =
util::parse_numeric::<u64>(txt.split(decimal_point).next().unwrap()).unwrap();
let mag_before_point = get_mag(num_before_point);
let mut mag_remaining = mag - mag_before_point;
tokens.iter().for_each(|t| {
// These tokens are correct in all languages
// and are used to parse combined prefixes like `1.1K crore` (en-IN)
let known_tmag: u8 = if t.len() == 1 {
match t.as_str() {
"K" | "k" => 3,
// 'm' means 10^3 in Catalan, 'B' means 10^3 in Turkish
// 'M' means 10^9 in Indonesian
_ => 0,
}
} else {
0
};
// K/M/B
if known_tmag > 0 {
mag_remaining = mag_remaining
.checked_sub(known_tmag)
.expect("known magnitude incorrect");
} else {
insert_token(t.to_owned(), mag_remaining);
}
insert_nd_token(t.to_owned(), None);
});
}
Err(e) => {
if matches!(e.kind(), std::num::IntErrorKind::Empty) {
// Text does not contain any digits, search for nd_tokens
tokens.iter().for_each(|t| {
insert_nd_token(
t.to_owned(),
Some((*val).try_into().expect("nd_token value too large")),
);
});
} else {
panic!("{e}, txt: {txt}")
tokens.iter().for_each(|t| {
// These tokens are correct in all languages
// and are used to parse combined prefixes like `1.1K crore` (en-IN)
let known_tmag: u8 = if t.len() == 1 {
match t.as_str() {
"K" | "k" => 3,
// 'm' means 10^3 in Catalan, 'B' means 10^3 in Turkish
// 'M' means 10^9 in Indonesian
_ => 0,
}
} else {
0
};
// K/M/B
if known_tmag > 0 {
mag_remaining = mag_remaining
.checked_sub(known_tmag)
.expect("known magnitude incorrect");
} else {
insert_token(t.to_owned(), mag_remaining);
}
}
});
});
}
@ -278,10 +226,6 @@ pub fn write_samples_to_dict() {
.into_iter()
.filter_map(|(k, v)| v.map(|v| (k, v)))
.collect();
dict_entry.number_nd_tokens = found_nd_tokens
.into_iter()
.filter_map(|(k, v)| v.map(|v| (k, v)))
.collect();
dict_entry.comma_decimal = comma_decimal;
// Check for duplicates
@ -289,13 +233,9 @@ pub fn write_samples_to_dict() {
if !dict_entry.number_tokens.values().all(|x| uniq.insert(x)) {
println!("Warning: collected duplicate tokens for {lang}");
}
let mut uniq = HashSet::new();
if !dict_entry.number_nd_tokens.values().all(|x| uniq.insert(x)) {
println!("Warning: collected duplicate nd_tokens for {lang}");
}
}
util::write_dict(dict);
util::write_dict(project_root, &dict);
}
fn get_mag(n: u64) -> u8 {
@ -303,154 +243,145 @@ fn get_mag(n: u64) -> u8 {
}
/*
YouTube Music channel data
YouTube channel videos response
*/
#[derive(Debug, Deserialize)]
#[derive(Clone, Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
struct MusicChannel {
header: MusicHeader,
struct Channel {
contents: Contents,
}
#[derive(Debug, Deserialize)]
#[derive(Clone, Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
struct MusicHeader {
#[serde(alias = "musicVisualHeaderRenderer")]
music_immersive_header_renderer: MusicHeaderRenderer,
struct Contents {
two_column_browse_results_renderer: TabsRenderer,
}
#[derive(Debug, Deserialize)]
#[serde_as]
#[derive(Clone, Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
struct MusicHeaderRenderer {
subscription_button: SubscriptionButton,
struct TabsRenderer {
#[serde_as(as = "VecSkipError<_>")]
tabs: Vec<TabRendererWrap>,
}
#[derive(Debug, Deserialize)]
#[derive(Clone, Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
struct SubscriptionButton {
subscribe_button_renderer: SubscriptionButtonRenderer,
struct TabRendererWrap {
tab_renderer: TabRenderer,
}
#[derive(Debug, Deserialize)]
#[derive(Clone, Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
struct SubscriptionButtonRenderer {
subscriber_count_text: TextRuns,
struct TabRenderer {
content: SectionListRendererWrap,
}
#[derive(Debug)]
#[derive(Clone, Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
struct SectionListRendererWrap {
section_list_renderer: SectionListRenderer,
}
#[derive(Clone, Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
struct SectionListRenderer {
contents: Vec<ItemSectionRendererWrap>,
}
#[derive(Clone, Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
struct ItemSectionRendererWrap {
item_section_renderer: ItemSectionRenderer,
}
#[derive(Clone, Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
struct ItemSectionRenderer {
contents: Vec<GridRendererWrap>,
}
#[derive(Clone, Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
struct GridRendererWrap {
grid_renderer: GridRenderer,
}
#[serde_as]
#[derive(Clone, Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
struct GridRenderer {
#[serde_as(as = "VecSkipError<_>")]
items: Vec<VideoListItem>,
}
#[derive(Clone, Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
struct VideoListItem {
grid_video_renderer: GridVideoRenderer,
}
#[derive(Clone, Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
struct GridVideoRenderer {
/// `24,194 views`
view_count_text: Text,
/// `19K views`
short_view_count_text: Text,
}
#[derive(Clone, Debug)]
struct ChannelData {
view_counts: BTreeMap<u64, String>,
subscriber_count: String,
view_counts: Vec<(u64, String)>,
}
async fn get_channel(query: &RustyPipeQuery, channel_id: &str) -> Result<ChannelData> {
let resp = query
.raw(
ClientType::Desktop,
"browse",
&QBrowse {
context: query.get_context(ClientType::Desktop, true, None).await,
browse_id: channel_id,
params: Some("EgZ2aWRlb3MYASAAMAE"),
},
)
.await?;
async fn get_channel(channel_id: &str, lang: Language) -> Result<ChannelData> {
let client = Client::new();
let channel = serde_json::from_str::<Channel>(&resp)?;
let body = format!(
"{}{}{}{}{}",
r##"{"context":{"client":{"clientName":"WEB","clientVersion":"2.20220914.06.00","platform":"DESKTOP","originalUrl":"https://www.youtube.com/","hl":""##,
lang,
r##"","gl":"US"},"request":{"internalExperimentFlags":[],"useSsl":true},"user":{"lockedSafetyMode":false}},"params":"EgZ2aWRlb3MYASAAMAE%3D","browseId":""##,
channel_id,
"\"}"
);
let tab = &channel.contents.two_column_browse_results_renderer.tabs[0]
.tab_renderer
.content
.rich_grid_renderer;
let resp = client
.post("https://www.youtube.com/youtubei/v1/browse?key=AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8&prettyPrint=false")
.header(header::CONTENT_TYPE, "application/json")
.body(body)
.send().await?
.error_for_status()?;
let popular_token = tab.header.as_ref().and_then(|h| {
h.feed_filter_chip_bar_renderer.contents.get(1).map(|c| {
c.chip_cloud_chip_renderer
.navigation_endpoint
.continuation_command
.token
.to_owned()
})
});
let mut view_counts: BTreeMap<u64, String> = tab
.contents
.iter()
.map(|itm| {
let v = &itm.rich_item_renderer.content.video_renderer;
(
util::parse_numeric(&v.view_count_text.text).unwrap_or_default(),
v.short_view_count_text.text.to_owned(),
)
})
.collect();
if let Some(popular_token) = popular_token {
let resp = query
.raw(
ClientType::Desktop,
"browse",
&QCont {
context: query.get_context(ClientType::Desktop, true, None).await,
continuation: &popular_token,
},
)
.await?;
let continuation = serde_json::from_str::<ContinuationResponse>(&resp)?;
continuation
.on_response_received_actions
.iter()
.for_each(|a| {
a.reload_continuation_items_command
.continuation_items
.iter()
.for_each(|itm| {
let v = &itm.rich_item_renderer.content.video_renderer;
view_counts.insert(
util::parse_numeric(&v.view_count_text.text).unwrap(),
v.short_view_count_text.text.to_owned(),
);
})
});
}
let channel = resp.json::<Channel>().await?;
Ok(ChannelData {
view_counts,
subscriber_count: channel
.header
.c4_tabbed_header_renderer
.subscriber_count_text
.text,
view_counts: channel
.contents
.two_column_browse_results_renderer
.tabs
.get(0)
.map(|tab| {
tab.tab_renderer.content.section_list_renderer.contents[0]
.item_section_renderer
.contents[0]
.grid_renderer
.items
.iter()
.map(|itm| {
(
util::parse_numeric(&itm.grid_video_renderer.view_count_text.text)
.unwrap(),
itm.grid_video_renderer
.short_view_count_text
.text
.to_owned(),
)
})
.collect()
})
.unwrap_or_default(),
})
}
async fn music_channel_subscribers(query: &RustyPipeQuery, channel_id: &str) -> Result<String> {
let resp = query
.raw(
ClientType::DesktopMusic,
"browse",
&QBrowse {
context: query
.get_context(ClientType::DesktopMusic, true, None)
.await,
browse_id: channel_id,
params: None,
},
)
.await?;
let channel = serde_json::from_str::<MusicChannel>(&resp)?;
channel
.header
.music_immersive_header_renderer
.subscription_button
.subscribe_button_renderer
.subscriber_count_text
.runs
.into_iter()
.next()
.map(|t| t.text)
.ok_or_else(|| anyhow::anyhow!("no text"))
}

View file

@ -3,6 +3,7 @@ use std::{
fs::File,
hash::Hash,
io::BufReader,
path::Path,
};
use futures::{stream, StreamExt};
@ -10,10 +11,11 @@ use path_macro::path;
use rustypipe::{
client::RustyPipe,
param::{locale::LANGUAGES, Language},
timeago::{self, TimeAgo},
};
use serde::{Deserialize, Serialize};
use crate::util::{self, DICT_DIR};
use crate::util;
type CollectedDates = BTreeMap<Language, BTreeMap<DateCase, String>>;
@ -60,13 +62,16 @@ enum DateCase {
///
/// Because the relative dates change with time, the first three playlists
/// have to checked and eventually changed before running the program.
pub async fn collect_dates(concurrency: usize) {
let json_path = path!(*DICT_DIR / "playlist_samples.json");
pub async fn collect_dates(project_root: &Path, concurrency: usize) {
let json_path = path!(project_root / "testfiles" / "dict" / "playlist_samples.json");
// These are the sample playlists
let cases = [
(DateCase::Today, "PLMC9KNkIncKtPzgY-5rmhvj7fax8fdxoj"),
(DateCase::Yesterday, "PLcirGkCPmbmFeQ1sm4wFciF03D_EroIfr"),
(
DateCase::Today,
"RDCLAK5uy_kj3rhiar1LINmyDcuFnXihEO0K1NQa2jI",
),
(DateCase::Yesterday, "PL7zsB-C3aNu2yRY2869T0zj1FhtRIu5am"),
(DateCase::Ago, "PLmB6td997u3kUOrfFwkULZ910ho44oQSy"),
(DateCase::Jan, "PL1J-6JOckZtFjcni6Xj1pLYglJp6JCpKD"),
(DateCase::Feb, "PL1J-6JOckZtETrbzwZE7mRIIK6BzWNLAs"),
@ -85,7 +90,6 @@ pub async fn collect_dates(concurrency: usize) {
let rp = RustyPipe::new();
let collected_dates = stream::iter(LANGUAGES)
.map(|lang| {
println!("{lang}");
let rp = rp.clone();
async move {
let mut map: BTreeMap<DateCase, String> = BTreeMap::new();
@ -111,13 +115,13 @@ pub async fn collect_dates(concurrency: usize) {
///
/// The ND (no digit) tokens (today, tomorrow) of some languages cannot be
/// parsed automatically and require manual work.
pub fn write_samples_to_dict() {
let json_path = path!(*DICT_DIR / "playlist_samples.json");
pub fn write_samples_to_dict(project_root: &Path) {
let json_path = path!(project_root / "testfiles" / "dict" / "playlist_samples.json");
let json_file = File::open(json_path).unwrap();
let collected_dates: CollectedDates =
serde_json::from_reader(BufReader::new(json_file)).unwrap();
let mut dict = util::read_dict();
let mut dict = util::read_dict(project_root);
let langs = dict.keys().map(|k| k.to_owned()).collect::<Vec<_>>();
let months = [
@ -164,7 +168,19 @@ pub fn write_samples_to_dict() {
let collect_nd_tokens = !matches!(
lang,
// ND tokens of these languages must be edited manually
Language::Ja | Language::ZhCn | Language::ZhHk | Language::ZhTw
Language::Ja
| Language::ZhCn
| Language::ZhHk
| Language::ZhTw
| Language::Ko
| Language::Gu
| Language::Pa
| Language::Ur
| Language::Uz
| Language::Te
| Language::PtPt
// Singhalese YT translation has an error (today == tomorrow)
| Language::Si
);
dict_entry.months = BTreeMap::new();
@ -196,6 +212,20 @@ pub fn write_samples_to_dict() {
parse(datestr_table.get(&DateCase::Jan).unwrap(), 0);
}
// n days ago
{
let datestr = datestr_table.get(&DateCase::Ago).unwrap();
let tago = timeago::parse_timeago(lang, datestr);
assert_eq!(
tago,
Some(TimeAgo {
n: 3,
unit: timeago::TimeUnit::Day
}),
"lang: {lang}, txt: {datestr}"
);
}
// Absolute dates (Jan 3, 2020)
months.iter().enumerate().for_each(|(n, m)| {
let datestr = datestr_table.get(m).unwrap();
@ -261,11 +291,13 @@ pub fn write_samples_to_dict() {
};
});
if datestr_tables.len() == 1 && dict_entry.timeago_nd_tokens.len() > 2 {
println!(
"INFO: {} has {} nd_tokens. Check manually.",
if datestr_tables.len() == 1 {
assert_eq!(
dict_entry.timeago_nd_tokens.len(),
2,
"lang: {}, nd_tokens: {:?}",
lang,
dict_entry.timeago_nd_tokens.len()
&dict_entry.timeago_nd_tokens
);
}
}
@ -273,5 +305,5 @@ pub fn write_samples_to_dict() {
dict_entry.date_order = num_order;
}
util::write_dict(dict);
util::write_dict(project_root, &dict);
}

View file

@ -1,382 +0,0 @@
use std::{
collections::{BTreeMap, HashMap},
fs::File,
io::BufReader,
};
use anyhow::Result;
use futures::{stream, StreamExt};
use path_macro::path;
use rustypipe::{
client::{ClientType, RustyPipe, RustyPipeQuery},
param::{locale::LANGUAGES, Language},
};
use crate::{
model::{Channel, QBrowse, TimeAgo, TimeUnit},
util::{self, DICT_DIR},
};
type CollectedDurations = BTreeMap<Language, BTreeMap<String, u32>>;
/// Collect the video duration texts in every supported language
/// and write them to `testfiles/dict/video_duration_samples.json`.
///
/// The length of YouTube short videos is only available in textual form.
/// To parse it correctly, we need to collect samples of this text in every
/// language. We collect these samples from regular channel videos because these
/// include a textual duration in addition to the easy to parse "mm:ss"
/// duration format.
pub async fn collect_video_durations(concurrency: usize) {
let json_path = path!(*DICT_DIR / "video_duration_samples.json");
let rp = RustyPipe::new();
let channels = [
"UCq-Fj5jknLsUf-MWSy4_brA",
"UCMcS5ITpSohfr8Ppzlo4vKw",
"UCXuqSBlHAE6Xw-yeJA0Tunw",
];
let durations: CollectedDurations = stream::iter(LANGUAGES)
.map(|lang| {
let rp = rp.query().lang(lang);
async move {
let mut map = BTreeMap::new();
for (n, ch_id) in channels.iter().enumerate() {
get_channel_vlengths(&rp, ch_id, &mut map).await.unwrap();
println!("collected {lang}-{n}");
}
// Since we are only parsing shorts durations, we do not need durations >= 1h
let map = map.into_iter().filter(|(_, v)| v < &3600).collect();
(lang, map)
}
})
.buffer_unordered(concurrency)
.collect()
.await;
let file = File::create(json_path).unwrap();
serde_json::to_writer_pretty(file, &durations).unwrap();
}
pub fn parse_video_durations() {
let json_path = path!(*DICT_DIR / "video_duration_samples.json");
let json_file = File::open(json_path).unwrap();
let durations: CollectedDurations = serde_json::from_reader(BufReader::new(json_file)).unwrap();
let mut dict = util::read_dict();
let langs = dict.keys().map(|k| k.to_owned()).collect::<Vec<_>>();
for lang in langs {
let dict_entry = dict.entry(lang).or_default();
let mut e_langs = dict_entry.equivalent.clone();
e_langs.push(lang);
for lang in e_langs {
let mut words = HashMap::new();
fn check_add_word(
words: &mut HashMap<String, Option<TimeAgo>>,
by_char: bool,
val: u32,
expect: u32,
w: String,
unit: TimeUnit,
) -> bool {
let ok = val == expect || val * 2 == expect;
if ok {
let mut ins = |w: &str, val: &mut TimeAgo| {
// Filter stop words
if matches!(
w,
"na" | "y"
| "و"
| "ja"
| "et"
| "e"
| "i"
| "և"
| "og"
| "en"
| "и"
| "a"
| "és"
| "ir"
| "un"
| "și"
| "in"
| "และ"
| "\u{0456}"
| ""
| "eta"
| "અને"
| "और"
| "കൂടാതെ"
| "සහ"
) {
return;
}
let entry = words.entry(w.to_owned()).or_insert(Some(*val));
if let Some(e) = entry {
if e != val {
*entry = None;
}
}
};
let mut val = TimeAgo {
n: (expect / val).try_into().unwrap(),
unit,
};
if by_char {
w.chars().for_each(|c| {
if !c.is_whitespace() {
ins(&c.to_string(), &mut val);
}
});
} else {
w.split_whitespace().for_each(|w| ins(w, &mut val));
}
}
ok
}
fn parse(
words: &mut HashMap<String, Option<TimeAgo>>,
lang: Language,
by_char: bool,
txt: &str,
d: u32,
) {
let (m, s) = split_duration(d);
let mut parts =
split_duration_txt(txt, matches!(lang, Language::Si | Language::Sw))
.into_iter();
let p1 = parts.next().unwrap();
let p1_n = p1.digits.parse::<u32>().unwrap_or(1);
let p2: Option<DurationTxtSegment> = parts.next();
match p2 {
Some(p2) => {
let p2_n = p2.digits.parse::<u32>().unwrap_or(1);
assert!(
check_add_word(words, by_char, p1_n, m, p1.word, TimeUnit::Minute),
"{txt}: min parse error"
);
assert!(
check_add_word(words, by_char, p2_n, s, p2.word, TimeUnit::Second),
"{txt}: sec parse error"
);
}
None => {
if s == 0 {
assert!(
check_add_word(words, by_char, p1_n, m, p1.word, TimeUnit::Minute),
"{txt}: min parse error"
);
} else if m == 0 {
assert!(
check_add_word(words, by_char, p1_n, s, p1.word, TimeUnit::Second),
"{txt}: sec parse error"
);
} else {
let p = txt
.find([',', 'و'])
.unwrap_or_else(|| panic!("`{txt}`: only 1 part"));
parse(words, lang, by_char, &txt[0..p], m);
parse(words, lang, by_char, &txt[p..], s);
}
}
}
assert!(parts.next().is_none(), "`{txt}`: more than 2 parts");
}
for (txt, d) in &durations[&lang] {
parse(&mut words, lang, dict_entry.by_char, txt, *d);
}
// dbg!(&words);
words.into_iter().for_each(|(k, v)| {
if let Some(v) = v {
dict_entry.timeago_tokens.insert(k, v.to_string());
}
});
}
}
util::write_dict(dict);
}
fn split_duration(d: u32) -> (u32, u32) {
(d / 60, d % 60)
}
#[derive(Debug, Default)]
struct DurationTxtSegment {
digits: String,
word: String,
}
fn split_duration_txt(txt: &str, start_c: bool) -> Vec<DurationTxtSegment> {
let mut segments = Vec::new();
// 1: parse digits, 2: parse word
let mut state: u8 = 0;
let mut seg = DurationTxtSegment::default();
for c in txt.chars() {
if c.is_ascii_digit() {
if state == 2 && (!seg.digits.is_empty() || (!start_c && segments.is_empty())) {
segments.push(seg);
seg = DurationTxtSegment::default();
}
seg.digits.push(c);
state = 1;
} else {
if (state == 1) && (!seg.word.is_empty() || (start_c && segments.is_empty())) {
segments.push(seg);
seg = DurationTxtSegment::default();
}
if c != ',' {
c.to_lowercase().for_each(|c| seg.word.push(c));
}
state = 2;
}
}
if !seg.word.is_empty() || !seg.digits.is_empty() {
segments.push(seg);
}
segments
}
async fn get_channel_vlengths(
query: &RustyPipeQuery,
channel_id: &str,
map: &mut BTreeMap<String, u32>,
) -> Result<()> {
let resp = query
.raw(
ClientType::Desktop,
"browse",
&QBrowse {
context: query.get_context(ClientType::Desktop, true, None).await,
browse_id: channel_id,
params: Some("EgZ2aWRlb3MYASAAMAE"),
},
)
.await?;
let channel = serde_json::from_str::<Channel>(&resp)?;
let tab = channel
.contents
.two_column_browse_results_renderer
.tabs
.into_iter()
.next()
.unwrap()
.tab_renderer
.content
.rich_grid_renderer;
tab.contents.into_iter().for_each(|c| {
let lt = c.rich_item_renderer.content.video_renderer.length_text;
let duration = util::parse_video_length(&lt.simple_text).unwrap();
map.insert(lt.accessibility.accessibility_data.label, duration);
});
Ok(())
}
#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq, PartialOrd, Ord)]
enum PluralCategory {
Zero,
One,
Two,
Few,
Many,
Other,
}
impl From<intl_pluralrules::PluralCategory> for PluralCategory {
fn from(value: intl_pluralrules::PluralCategory) -> Self {
match value {
intl_pluralrules::PluralCategory::ZERO => Self::Zero,
intl_pluralrules::PluralCategory::ONE => Self::One,
intl_pluralrules::PluralCategory::TWO => Self::Two,
intl_pluralrules::PluralCategory::FEW => Self::Few,
intl_pluralrules::PluralCategory::MANY => Self::Many,
intl_pluralrules::PluralCategory::OTHER => Self::Other,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::HashSet;
use std::io::BufReader;
use intl_pluralrules::{PluralRuleType, PluralRules};
use unic_langid::LanguageIdentifier;
/// Verify that the duration sample set covers all pluralization variants of the languages
#[test]
fn check_video_duration_samples() {
let json_path = path!(*DICT_DIR / "video_duration_samples.json");
let json_file = File::open(json_path).unwrap();
let durations: CollectedDurations =
serde_json::from_reader(BufReader::new(json_file)).unwrap();
let mut failed = false;
for (lang, durations) in durations {
let ul: LanguageIdentifier =
lang.to_string().split('-').next().unwrap().parse().unwrap();
let pr = PluralRules::create(ul, PluralRuleType::CARDINAL).expect(&lang.to_string());
let mut plurals_m: HashSet<PluralCategory> = HashSet::new();
for n in 1..60 {
plurals_m.insert(pr.select(n).unwrap().into());
}
let mut plurals_s = plurals_m.clone();
durations.values().for_each(|v| {
let (m, s) = split_duration(*v);
plurals_m.remove(&pr.select(m).unwrap().into());
plurals_s.remove(&pr.select(s).unwrap().into());
});
if !plurals_m.is_empty() {
println!("{lang}: missing minutes {plurals_m:?}");
failed = true;
}
if !plurals_s.is_empty() {
println!("{lang}: missing seconds {plurals_m:?}");
failed = true;
}
}
assert!(!failed);
}
#[test]
fn t_split_duration_text() {
// video duration:
let res = split_duration_txt("دقيقة وثانيتان", true);
dbg!(&res);
}
}

View file

@ -5,7 +5,6 @@ use std::{
sync::Mutex,
};
use path_macro::path;
use rustypipe::{
client::{ClientType, RustyPipe},
param::{
@ -15,54 +14,55 @@ use rustypipe::{
report::{Report, Reporter},
};
use crate::util::TESTFILES_DIR;
pub async fn download_testfiles(project_root: &Path) {
let mut testfiles = project_root.to_path_buf();
testfiles.push("testfiles");
pub async fn download_testfiles() {
player().await;
player_model().await;
playlist().await;
playlist_cont().await;
video_details().await;
comments_top().await;
comments_latest().await;
recommendations().await;
channel_videos().await;
channel_shorts().await;
channel_livestreams().await;
channel_playlists().await;
channel_info().await;
channel_videos_cont().await;
channel_playlists_cont().await;
search().await;
search_cont().await;
search_playlists().await;
search_empty().await;
startpage().await;
startpage_cont().await;
trending().await;
player(&testfiles).await;
player_model(&testfiles).await;
playlist(&testfiles).await;
playlist_cont(&testfiles).await;
video_details(&testfiles).await;
comments_top(&testfiles).await;
comments_latest(&testfiles).await;
recommendations(&testfiles).await;
channel_videos(&testfiles).await;
channel_shorts(&testfiles).await;
channel_livestreams(&testfiles).await;
channel_playlists(&testfiles).await;
channel_info(&testfiles).await;
channel_videos_cont(&testfiles).await;
channel_playlists_cont(&testfiles).await;
search(&testfiles).await;
search_cont(&testfiles).await;
search_playlists(&testfiles).await;
search_empty(&testfiles).await;
startpage(&testfiles).await;
startpage_cont(&testfiles).await;
trending(&testfiles).await;
music_playlist().await;
music_playlist_cont().await;
music_playlist_related().await;
music_album().await;
music_search().await;
music_search_tracks().await;
music_search_albums().await;
music_search_artists().await;
music_search_playlists().await;
music_search_cont().await;
music_search_suggestion().await;
music_artist().await;
music_details().await;
music_lyrics().await;
music_related().await;
music_radio().await;
music_radio_cont().await;
music_new_albums().await;
music_new_videos().await;
music_charts().await;
music_genres().await;
music_genre().await;
music_playlist(&testfiles).await;
music_playlist_cont(&testfiles).await;
music_playlist_related(&testfiles).await;
music_album(&testfiles).await;
music_search(&testfiles).await;
music_search_tracks(&testfiles).await;
music_search_albums(&testfiles).await;
music_search_artists(&testfiles).await;
music_search_playlists(&testfiles).await;
music_search_cont(&testfiles).await;
music_search_suggestion(&testfiles).await;
music_artist(&testfiles).await;
music_details(&testfiles).await;
music_lyrics(&testfiles).await;
music_related(&testfiles).await;
music_radio(&testfiles).await;
music_radio_cont(&testfiles).await;
music_new_albums(&testfiles).await;
music_new_videos(&testfiles).await;
music_charts(&testfiles).await;
music_genres(&testfiles).await;
music_genre(&testfiles).await;
}
const CLIENT_TYPES: [ClientType; 5] = [
@ -136,12 +136,14 @@ fn rp_testfile(json_path: &Path) -> RustyPipe {
.build()
}
async fn player() {
async fn player(testfiles: &Path) {
let video_id = "pPvd8UxmSbQ";
for client_type in CLIENT_TYPES {
let json_path =
path!(*TESTFILES_DIR / "player" / format!("{client_type:?}_video.json").to_lowercase());
let mut json_path = testfiles.to_path_buf();
json_path.push("player");
json_path.push(format!("{client_type:?}_video.json").to_lowercase());
if json_path.exists() {
continue;
}
@ -154,12 +156,14 @@ async fn player() {
}
}
async fn player_model() {
async fn player_model(testfiles: &Path) {
let rp = RustyPipe::builder().strict().build();
for (name, id) in [("multilanguage", "tVWWp1PqDus"), ("hdr", "LXb3EKWsInQ")] {
let json_path =
path!(*TESTFILES_DIR / "player_model" / format!("{name}.json").to_lowercase());
let mut json_path = testfiles.to_path_buf();
json_path.push("player_model");
json_path.push(format!("{name}.json").to_lowercase());
if json_path.exists() {
continue;
}
@ -176,13 +180,15 @@ async fn player_model() {
}
}
async fn playlist() {
async fn playlist(testfiles: &Path) {
for (name, id) in [
("short", "RDCLAK5uy_kFQXdnqMaQCVx2wpUM4ZfbsGCDibZtkJk"),
("long", "PL5dDx681T4bR7ZF1IuWzOv1omlRbE7PiJ"),
("nomusic", "PL1J-6JOckZtE_P9Xx8D3b2O6w0idhuKBe"),
] {
let json_path = path!(*TESTFILES_DIR / "playlist" / format!("playlist_{name}.json"));
let mut json_path = testfiles.to_path_buf();
json_path.push("playlist");
json_path.push(format!("playlist_{name}.json"));
if json_path.exists() {
continue;
}
@ -192,8 +198,10 @@ async fn playlist() {
}
}
async fn playlist_cont() {
let json_path = path!(*TESTFILES_DIR / "playlist" / "playlist_cont.json");
async fn playlist_cont(testfiles: &Path) {
let mut json_path = testfiles.to_path_buf();
json_path.push("playlist");
json_path.push("playlist_cont.json");
if json_path.exists() {
return;
}
@ -209,7 +217,7 @@ async fn playlist_cont() {
playlist.videos.next(rp.query()).await.unwrap().unwrap();
}
async fn video_details() {
async fn video_details(testfiles: &Path) {
for (name, id) in [
("music", "XuM2onMGvTI"),
("mv", "ZeerrnuLi5E"),
@ -218,8 +226,9 @@ async fn video_details() {
("live", "86YLFOog4GM"),
("agegate", "HRKu0cvrr_o"),
] {
let json_path =
path!(*TESTFILES_DIR / "video_details" / format!("video_details_{name}.json"));
let mut json_path = testfiles.to_path_buf();
json_path.push("video_details");
json_path.push(format!("video_details_{name}.json"));
if json_path.exists() {
continue;
}
@ -229,8 +238,10 @@ async fn video_details() {
}
}
async fn comments_top() {
let json_path = path!(*TESTFILES_DIR / "video_details" / "comments_top.json");
async fn comments_top(testfiles: &Path) {
let mut json_path = testfiles.to_path_buf();
json_path.push("video_details");
json_path.push("comments_top.json");
if json_path.exists() {
return;
}
@ -247,8 +258,10 @@ async fn comments_top() {
.unwrap();
}
async fn comments_latest() {
let json_path = path!(*TESTFILES_DIR / "video_details" / "comments_latest.json");
async fn comments_latest(testfiles: &Path) {
let mut json_path = testfiles.to_path_buf();
json_path.push("video_details");
json_path.push("comments_latest.json");
if json_path.exists() {
return;
}
@ -265,8 +278,10 @@ async fn comments_latest() {
.unwrap();
}
async fn recommendations() {
let json_path = path!(*TESTFILES_DIR / "video_details" / "recommendations.json");
async fn recommendations(testfiles: &Path) {
let mut json_path = testfiles.to_path_buf();
json_path.push("video_details");
json_path.push("recommendations.json");
if json_path.exists() {
return;
}
@ -278,7 +293,7 @@ async fn recommendations() {
details.recommended.next(rp.query()).await.unwrap();
}
async fn channel_videos() {
async fn channel_videos(testfiles: &Path) {
for (name, id) in [
("base", "UC2DjFE7Xf11URZqWBigcVOQ"),
("music", "UC_vmjW5e1xEHhYjY2a0kK1A"), // YouTube Music channels have no videos
@ -287,7 +302,9 @@ async fn channel_videos() {
("empty", "UCxBa895m48H5idw5li7h-0g"),
("upcoming", "UCcvfHa-GHSOHFAjU0-Ie57A"),
] {
let json_path = path!(*TESTFILES_DIR / "channel" / format!("channel_videos_{name}.json"));
let mut json_path = testfiles.to_path_buf();
json_path.push("channel");
json_path.push(format!("channel_videos_{name}.json"));
if json_path.exists() {
continue;
}
@ -297,8 +314,10 @@ async fn channel_videos() {
}
}
async fn channel_shorts() {
let json_path = path!(*TESTFILES_DIR / "channel" / "channel_shorts.json");
async fn channel_shorts(testfiles: &Path) {
let mut json_path = testfiles.to_path_buf();
json_path.push("channel");
json_path.push("channel_shorts.json");
if json_path.exists() {
return;
}
@ -310,8 +329,10 @@ async fn channel_shorts() {
.unwrap();
}
async fn channel_livestreams() {
let json_path = path!(*TESTFILES_DIR / "channel" / "channel_livestreams.json");
async fn channel_livestreams(testfiles: &Path) {
let mut json_path = testfiles.to_path_buf();
json_path.push("channel");
json_path.push("channel_livestreams.json");
if json_path.exists() {
return;
}
@ -323,8 +344,10 @@ async fn channel_livestreams() {
.unwrap();
}
async fn channel_playlists() {
let json_path = path!(*TESTFILES_DIR / "channel" / "channel_playlists.json");
async fn channel_playlists(testfiles: &Path) {
let mut json_path = testfiles.to_path_buf();
json_path.push("channel");
json_path.push("channel_playlists.json");
if json_path.exists() {
return;
}
@ -336,8 +359,10 @@ async fn channel_playlists() {
.unwrap();
}
async fn channel_info() {
let json_path = path!(*TESTFILES_DIR / "channel" / "channel_info.json");
async fn channel_info(testfiles: &Path) {
let mut json_path = testfiles.to_path_buf();
json_path.push("channel");
json_path.push("channel_info.json");
if json_path.exists() {
return;
}
@ -349,8 +374,10 @@ async fn channel_info() {
.unwrap();
}
async fn channel_videos_cont() {
let json_path = path!(*TESTFILES_DIR / "channel" / "channel_videos_cont.json");
async fn channel_videos_cont(testfiles: &Path) {
let mut json_path = testfiles.to_path_buf();
json_path.push("channel");
json_path.push("channel_videos_cont.json");
if json_path.exists() {
return;
}
@ -366,8 +393,10 @@ async fn channel_videos_cont() {
videos.content.next(rp.query()).await.unwrap().unwrap();
}
async fn channel_playlists_cont() {
let json_path = path!(*TESTFILES_DIR / "channel" / "channel_playlists_cont.json");
async fn channel_playlists_cont(testfiles: &Path) {
let mut json_path = testfiles.to_path_buf();
json_path.push("channel");
json_path.push("channel_playlists_cont.json");
if json_path.exists() {
return;
}
@ -383,8 +412,10 @@ async fn channel_playlists_cont() {
playlists.content.next(rp.query()).await.unwrap().unwrap();
}
async fn search() {
let json_path = path!(*TESTFILES_DIR / "search" / "default.json");
async fn search(testfiles: &Path) {
let mut json_path = testfiles.to_path_buf();
json_path.push("search");
json_path.push("default.json");
if json_path.exists() {
return;
}
@ -393,8 +424,10 @@ async fn search() {
rp.query().search("doobydoobap").await.unwrap();
}
async fn search_cont() {
let json_path = path!(*TESTFILES_DIR / "search" / "cont.json");
async fn search_cont(testfiles: &Path) {
let mut json_path = testfiles.to_path_buf();
json_path.push("search");
json_path.push("cont.json");
if json_path.exists() {
return;
}
@ -406,8 +439,10 @@ async fn search_cont() {
search.items.next(rp.query()).await.unwrap().unwrap();
}
async fn search_playlists() {
let json_path = path!(*TESTFILES_DIR / "search" / "playlists.json");
async fn search_playlists(testfiles: &Path) {
let mut json_path = testfiles.to_path_buf();
json_path.push("search");
json_path.push("playlists.json");
if json_path.exists() {
return;
}
@ -419,8 +454,10 @@ async fn search_playlists() {
.unwrap();
}
async fn search_empty() {
let json_path = path!(*TESTFILES_DIR / "search" / "empty.json");
async fn search_empty(testfiles: &Path) {
let mut json_path = testfiles.to_path_buf();
json_path.push("search");
json_path.push("empty.json");
if json_path.exists() {
return;
}
@ -437,8 +474,10 @@ async fn search_empty() {
.unwrap();
}
async fn startpage() {
let json_path = path!(*TESTFILES_DIR / "trends" / "startpage.json");
async fn startpage(testfiles: &Path) {
let mut json_path = testfiles.to_path_buf();
json_path.push("trends");
json_path.push("startpage.json");
if json_path.exists() {
return;
}
@ -447,8 +486,10 @@ async fn startpage() {
rp.query().startpage().await.unwrap();
}
async fn startpage_cont() {
let json_path = path!(*TESTFILES_DIR / "trends" / "startpage_cont.json");
async fn startpage_cont(testfiles: &Path) {
let mut json_path = testfiles.to_path_buf();
json_path.push("trends");
json_path.push("startpage_cont.json");
if json_path.exists() {
return;
}
@ -460,8 +501,10 @@ async fn startpage_cont() {
startpage.next(rp.query()).await.unwrap();
}
async fn trending() {
let json_path = path!(*TESTFILES_DIR / "trends" / "trending_videos.json");
async fn trending(testfiles: &Path) {
let mut json_path = testfiles.to_path_buf();
json_path.push("trends");
json_path.push("trending.json");
if json_path.exists() {
return;
}
@ -470,13 +513,15 @@ async fn trending() {
rp.query().trending().await.unwrap();
}
async fn music_playlist() {
async fn music_playlist(testfiles: &Path) {
for (name, id) in [
("short", "RDCLAK5uy_kFQXdnqMaQCVx2wpUM4ZfbsGCDibZtkJk"),
("long", "PL5dDx681T4bR7ZF1IuWzOv1omlRbE7PiJ"),
("nomusic", "PL1J-6JOckZtE_P9Xx8D3b2O6w0idhuKBe"),
] {
let json_path = path!(*TESTFILES_DIR / "music_playlist" / format!("playlist_{name}.json"));
let mut json_path = testfiles.to_path_buf();
json_path.push("music_playlist");
json_path.push(format!("playlist_{name}.json"));
if json_path.exists() {
continue;
}
@ -486,8 +531,10 @@ async fn music_playlist() {
}
}
async fn music_playlist_cont() {
let json_path = path!(*TESTFILES_DIR / "music_playlist" / "playlist_cont.json");
async fn music_playlist_cont(testfiles: &Path) {
let mut json_path = testfiles.to_path_buf();
json_path.push("music_playlist");
json_path.push("playlist_cont.json");
if json_path.exists() {
return;
}
@ -503,8 +550,10 @@ async fn music_playlist_cont() {
playlist.tracks.next(rp.query()).await.unwrap().unwrap();
}
async fn music_playlist_related() {
let json_path = path!(*TESTFILES_DIR / "music_playlist" / "playlist_related.json");
async fn music_playlist_related(testfiles: &Path) {
let mut json_path = testfiles.to_path_buf();
json_path.push("music_playlist");
json_path.push("playlist_related.json");
if json_path.exists() {
return;
}
@ -525,7 +574,7 @@ async fn music_playlist_related() {
.unwrap();
}
async fn music_album() {
async fn music_album(testfiles: &Path) {
for (name, id) in [
("one_artist", "MPREb_nlBWQROfvjo"),
("various_artists", "MPREb_8QkDeEIawvX"),
@ -533,7 +582,9 @@ async fn music_album() {
("description", "MPREb_PiyfuVl6aYd"),
("unavailable", "MPREb_AzuWg8qAVVl"),
] {
let json_path = path!(*TESTFILES_DIR / "music_playlist" / format!("album_{name}.json"));
let mut json_path = testfiles.to_path_buf();
json_path.push("music_playlist");
json_path.push(format!("album_{name}.json"));
if json_path.exists() {
continue;
}
@ -543,14 +594,16 @@ async fn music_album() {
}
}
async fn music_search() {
async fn music_search(testfiles: &Path) {
for (name, query) in [
("default", "black mamba"),
("typo", "liblingsmensch"),
("radio", "pop radio"),
("artist", "taylor swift"),
] {
let json_path = path!(*TESTFILES_DIR / "music_search" / format!("main_{name}.json"));
let mut json_path = testfiles.to_path_buf();
json_path.push("music_search");
json_path.push(format!("main_{name}.json"));
if json_path.exists() {
continue;
}
@ -560,7 +613,7 @@ async fn music_search() {
}
}
async fn music_search_tracks() {
async fn music_search_tracks(testfiles: &Path) {
for (name, query, videos) in [
("default", "black mamba", false),
("videos", "black mamba", true),
@ -571,7 +624,9 @@ async fn music_search_tracks() {
false,
),
] {
let json_path = path!(*TESTFILES_DIR / "music_search" / format!("tracks_{name}.json"));
let mut json_path = testfiles.to_path_buf();
json_path.push("music_search");
json_path.push(format!("tracks_{name}.json"));
if json_path.exists() {
continue;
}
@ -585,8 +640,10 @@ async fn music_search_tracks() {
}
}
async fn music_search_albums() {
let json_path = path!(*TESTFILES_DIR / "music_search" / "albums.json");
async fn music_search_albums(testfiles: &Path) {
let mut json_path = testfiles.to_path_buf();
json_path.push("music_search");
json_path.push("albums.json");
if json_path.exists() {
return;
}
@ -595,8 +652,10 @@ async fn music_search_albums() {
rp.query().music_search_albums("black mamba").await.unwrap();
}
async fn music_search_artists() {
let json_path = path!(*TESTFILES_DIR / "music_search" / "artists.json");
async fn music_search_artists(testfiles: &Path) {
let mut json_path = testfiles.to_path_buf();
json_path.push("music_search");
json_path.push("artists.json");
if json_path.exists() {
return;
}
@ -608,9 +667,11 @@ async fn music_search_artists() {
.unwrap();
}
async fn music_search_playlists() {
async fn music_search_playlists(testfiles: &Path) {
for (name, community) in [("ytm", false), ("community", true)] {
let json_path = path!(*TESTFILES_DIR / "music_search" / format!("playlists_{name}.json"));
let mut json_path = testfiles.to_path_buf();
json_path.push("music_search");
json_path.push(format!("playlists_{name}.json"));
if json_path.exists() {
continue;
}
@ -623,8 +684,10 @@ async fn music_search_playlists() {
}
}
async fn music_search_cont() {
let json_path = path!(*TESTFILES_DIR / "music_search" / "tracks_cont.json");
async fn music_search_cont(testfiles: &Path) {
let mut json_path = testfiles.to_path_buf();
json_path.push("music_search");
json_path.push("tracks_cont.json");
if json_path.exists() {
return;
}
@ -636,9 +699,11 @@ async fn music_search_cont() {
res.items.next(rp.query()).await.unwrap().unwrap();
}
async fn music_search_suggestion() {
async fn music_search_suggestion(testfiles: &Path) {
for (name, query) in [("default", "t"), ("empty", "reujbhevmfndxnjrze")] {
let json_path = path!(*TESTFILES_DIR / "music_search" / format!("suggestion_{name}.json"));
let mut json_path = testfiles.to_path_buf();
json_path.push("music_search");
json_path.push(format!("suggestion_{name}.json"));
if json_path.exists() {
continue;
}
@ -648,7 +713,7 @@ async fn music_search_suggestion() {
}
}
async fn music_artist() {
async fn music_artist(testfiles: &Path) {
for (name, id, all_albums) in [
("default", "UClmXPfaYhXOYsNn_QUyheWQ", true),
("no_more_albums", "UC_vmjW5e1xEHhYjY2a0kK1A", true),
@ -657,7 +722,9 @@ async fn music_artist() {
("only_more_singles", "UC0aXrjVxG5pZr99v77wZdPQ", true),
("secondary_channel", "UCC9192yGQD25eBZgFZ84MPw", false),
] {
let json_path = path!(*TESTFILES_DIR / "music_artist" / format!("artist_{name}.json"));
let mut json_path = testfiles.to_path_buf();
json_path.push("music_artist");
json_path.push(format!("artist_{name}.json"));
if json_path.exists() {
continue;
}
@ -667,9 +734,11 @@ async fn music_artist() {
}
}
async fn music_details() {
async fn music_details(testfiles: &Path) {
for (name, id) in [("mv", "ZeerrnuLi5E"), ("track", "7nigXQS1Xb0")] {
let json_path = path!(*TESTFILES_DIR / "music_details" / format!("details_{name}.json"));
let mut json_path = testfiles.to_path_buf();
json_path.push("music_details");
json_path.push(format!("details_{name}.json"));
if json_path.exists() {
continue;
}
@ -679,8 +748,10 @@ async fn music_details() {
}
}
async fn music_lyrics() {
let json_path = path!(*TESTFILES_DIR / "music_details" / "lyrics.json");
async fn music_lyrics(testfiles: &Path) {
let mut json_path = testfiles.to_path_buf();
json_path.push("music_details");
json_path.push("lyrics.json");
if json_path.exists() {
return;
}
@ -695,8 +766,10 @@ async fn music_lyrics() {
.unwrap();
}
async fn music_related() {
let json_path = path!(*TESTFILES_DIR / "music_details" / "related.json");
async fn music_related(testfiles: &Path) {
let mut json_path = testfiles.to_path_buf();
json_path.push("music_details");
json_path.push("related.json");
if json_path.exists() {
return;
}
@ -711,9 +784,11 @@ async fn music_related() {
.unwrap();
}
async fn music_radio() {
async fn music_radio(testfiles: &Path) {
for (name, id) in [("mv", "RDAMVMZeerrnuLi5E"), ("track", "RDAMVM7nigXQS1Xb0")] {
let json_path = path!(*TESTFILES_DIR / "music_details" / format!("radio_{name}.json"));
let mut json_path = testfiles.to_path_buf();
json_path.push("music_details");
json_path.push(format!("radio_{name}.json"));
if json_path.exists() {
continue;
}
@ -723,8 +798,10 @@ async fn music_radio() {
}
}
async fn music_radio_cont() {
let json_path = path!(*TESTFILES_DIR / "music_details" / "radio_cont.json");
async fn music_radio_cont(testfiles: &Path) {
let mut json_path = testfiles.to_path_buf();
json_path.push("music_details");
json_path.push("radio_cont.json");
if json_path.exists() {
return;
}
@ -736,8 +813,10 @@ async fn music_radio_cont() {
res.next(rp.query()).await.unwrap().unwrap();
}
async fn music_new_albums() {
let json_path = path!(*TESTFILES_DIR / "music_new" / "albums_default.json");
async fn music_new_albums(testfiles: &Path) {
let mut json_path = testfiles.to_path_buf();
json_path.push("music_new");
json_path.push("albums_default.json");
if json_path.exists() {
return;
}
@ -746,8 +825,10 @@ async fn music_new_albums() {
rp.query().music_new_albums().await.unwrap();
}
async fn music_new_videos() {
let json_path = path!(*TESTFILES_DIR / "music_new" / "videos_default.json");
async fn music_new_videos(testfiles: &Path) {
let mut json_path = testfiles.to_path_buf();
json_path.push("music_new");
json_path.push("videos_default.json");
if json_path.exists() {
return;
}
@ -756,9 +837,11 @@ async fn music_new_videos() {
rp.query().music_new_videos().await.unwrap();
}
async fn music_charts() {
async fn music_charts(testfiles: &Path) {
for (name, country) in [("global", Some(Country::Zz)), ("US", Some(Country::Us))] {
let json_path = path!(*TESTFILES_DIR / "music_charts" / format!("charts_{name}.json"));
let mut json_path = testfiles.to_path_buf();
json_path.push("music_charts");
json_path.push(&format!("charts_{name}.json"));
if json_path.exists() {
continue;
}
@ -768,8 +851,10 @@ async fn music_charts() {
}
}
async fn music_genres() {
let json_path = path!(*TESTFILES_DIR / "music_genres" / "genres.json");
async fn music_genres(testfiles: &Path) {
let mut json_path = testfiles.to_path_buf();
json_path.push("music_genres");
json_path.push("genres.json");
if json_path.exists() {
return;
}
@ -778,12 +863,14 @@ async fn music_genres() {
rp.query().music_genres().await.unwrap();
}
async fn music_genre() {
async fn music_genre(testfiles: &Path) {
for (name, id) in [
("default", "ggMPOg1uX1lMbVZmbzl6NlJ3"),
("mood", "ggMPOg1uX1JOQWZFeDByc2Jm"),
] {
let json_path = path!(*TESTFILES_DIR / "music_genres" / format!("genre_{name}.json"));
let mut json_path = testfiles.to_path_buf();
json_path.push("music_genres");
json_path.push(&format!("genre_{name}.json"));
if json_path.exists() {
continue;
}

View file

@ -1,13 +1,13 @@
use std::fmt::Write;
use std::path::Path;
use once_cell::sync::Lazy;
use path_macro::path;
use regex::Regex;
use rustypipe::timeago::TimeUnit;
use crate::{
model::TimeUnit,
util::{self, SRC_DIR},
};
use crate::util;
const TARGET_PATH: &str = "src/util/dictionary.rs";
fn parse_tu(tu: &str) -> (u8, Option<TimeUnit>) {
static TU_PATTERN: Lazy<Regex> = Lazy::new(|| Regex::new(r"^(\d*)(\w?)$").unwrap());
@ -30,20 +30,23 @@ fn parse_tu(tu: &str) -> (u8, Option<TimeUnit>) {
}
}
pub fn generate_dictionary() {
let dict = util::read_dict();
pub fn generate_dictionary(project_root: &Path) {
let dict = util::read_dict(project_root);
let code_head = r#"// This file is automatically generated. DO NOT EDIT.
// See codegen/gen_dictionary.rs for the generation code.
use crate::{
model::AlbumType,
param::Language,
util::timeago::{DateCmp, TaToken, TimeUnit},
timeago::{DateCmp, TaToken, TimeUnit},
};
/// The dictionary contains the information required to parse dates and numbers
/// in all supported languages.
pub(crate) struct Entry {
/// Should the language be parsed by character instead of by word?
/// (e.g. Chinese/Japanese)
pub by_char: bool,
/// Tokens for parsing timeago strings.
///
/// Format: Parsed token -> \[Quantity\] Identifier
@ -73,10 +76,6 @@ pub(crate) struct Entry {
///
/// Format: Parsed token -> decimal power
pub number_tokens: phf::Map<&'static str, u8>,
/// Tokens for parsing number strings with no digits (e.g. "No videos")
///
/// Format: Parsed token -> value
pub number_nd_tokens: phf::Map<&'static str, u8>,
/// Names of album types (Album, Single, ...)
///
/// Format: Parsed text -> Album type
@ -142,12 +141,6 @@ pub(crate) fn entry(lang: Language) -> Entry {
number_tokens.entry(txt, &mag.to_string());
});
// Number nd tokens
let mut number_nd_tokens = phf_codegen::Map::<&str>::new();
entry.number_nd_tokens.iter().for_each(|(txt, mag)| {
number_nd_tokens.entry(txt, &mag.to_string());
});
// Album types
let mut album_types = phf_codegen::Map::<&str>::new();
entry.album_types.iter().for_each(|(txt, album_type)| {
@ -158,17 +151,17 @@ pub(crate) fn entry(lang: Language) -> Entry {
let code_ta_nd_tokens = &ta_nd_tokens.build().to_string().replace('\n', "\n ");
let code_months = &months.build().to_string().replace('\n', "\n ");
let code_number_tokens = &number_tokens.build().to_string().replace('\n', "\n ");
let code_number_nd_tokens = &number_nd_tokens.build().to_string().replace('\n', "\n ");
let code_album_types = &album_types.build().to_string().replace('\n', "\n ");
write!(code_timeago_tokens, "{} => Entry {{\n timeago_tokens: {},\n date_order: {},\n months: {},\n timeago_nd_tokens: {},\n comma_decimal: {:?},\n number_tokens: {},\n number_nd_tokens: {},\n album_types: {},\n }},\n ",
selector, code_ta_tokens, date_order, code_months, code_ta_nd_tokens, entry.comma_decimal, code_number_tokens, code_number_nd_tokens, code_album_types).unwrap();
let _ = write!(code_timeago_tokens, "{} => Entry {{\n by_char: {:?},\n timeago_tokens: {},\n date_order: {},\n months: {},\n timeago_nd_tokens: {},\n comma_decimal: {:?},\n number_tokens: {},\n album_types: {},\n }},\n ",
selector, entry.by_char, code_ta_tokens, date_order, code_months, code_ta_nd_tokens, entry.comma_decimal, code_number_tokens, code_album_types);
});
code_timeago_tokens = code_timeago_tokens.trim_end().to_owned() + "\n }\n}\n";
let code = format!("{code_head}\n{code_timeago_tokens}");
let target_path = path!(*SRC_DIR / "util" / "dictionary.rs");
let mut target_path = project_root.to_path_buf();
target_path.push(TARGET_PATH);
std::fs::write(target_path, code).unwrap();
}

View file

@ -1,15 +1,14 @@
use std::collections::BTreeMap;
use std::fmt::Write;
use std::path::Path;
use path_macro::path;
use reqwest::header;
use reqwest::Client;
use serde::Deserialize;
use serde_with::serde_as;
use serde_with::VecSkipError;
use crate::model::Text;
use crate::util::SRC_DIR;
use crate::util::Text;
#[serde_as]
#[derive(Clone, Debug, Deserialize)]
@ -138,7 +137,7 @@ struct LanguageCountryCommand {
hl: String,
}
pub async fn generate_locales() {
pub async fn generate_locales(project_root: &Path) {
let (languages, countries) = get_locales().await;
let code_head = r#"// This file is automatically generated. DO NOT EDIT.
@ -289,7 +288,8 @@ pub enum Country {
"{code_head}\n{code_langs}\n{code_countries}\n{code_lang_array}\n{code_country_array}\n{code_lang_names}\n{code_country_names}\n{code_foot}"
);
let target_path = path!(*SRC_DIR / "param" / "locale.rs");
let mut target_path = project_root.to_path_buf();
target_path.push("src/param/locale.rs");
std::fs::write(target_path, code).unwrap();
}

View file

@ -2,19 +2,21 @@ mod abtest;
mod collect_album_types;
mod collect_large_numbers;
mod collect_playlist_dates;
mod collect_video_durations;
mod download_testfiles;
mod gen_dictionary;
mod gen_locales;
mod model;
mod util;
use std::path::PathBuf;
use clap::{Parser, Subcommand};
#[derive(Parser)]
struct Cli {
#[clap(subcommand)]
command: Commands,
#[clap(short = 'd', default_value = "..")]
project_root: PathBuf,
#[clap(short, default_value = "8")]
concurrency: usize,
}
@ -24,11 +26,9 @@ enum Commands {
CollectPlaylistDates,
CollectLargeNumbers,
CollectAlbumTypes,
CollectVideoDurations,
ParsePlaylistDates,
ParseLargeNumbers,
ParseAlbumTypes,
ParseVideoDurations,
GenLocales,
GenDict,
DownloadTestfiles,
@ -47,26 +47,28 @@ async fn main() {
match cli.command {
Commands::CollectPlaylistDates => {
collect_playlist_dates::collect_dates(cli.concurrency).await;
collect_playlist_dates::collect_dates(&cli.project_root, cli.concurrency).await;
}
Commands::CollectLargeNumbers => {
collect_large_numbers::collect_large_numbers(cli.concurrency).await;
collect_large_numbers::collect_large_numbers(&cli.project_root, cli.concurrency).await;
}
Commands::CollectAlbumTypes => {
collect_album_types::collect_album_types(cli.concurrency).await;
collect_album_types::collect_album_types(&cli.project_root, cli.concurrency).await;
}
Commands::CollectVideoDurations => {
collect_video_durations::collect_video_durations(cli.concurrency).await;
Commands::ParsePlaylistDates => {
collect_playlist_dates::write_samples_to_dict(&cli.project_root)
}
Commands::ParsePlaylistDates => collect_playlist_dates::write_samples_to_dict(),
Commands::ParseLargeNumbers => collect_large_numbers::write_samples_to_dict(),
Commands::ParseAlbumTypes => collect_album_types::write_samples_to_dict(),
Commands::ParseVideoDurations => collect_video_durations::parse_video_durations(),
Commands::ParseLargeNumbers => {
collect_large_numbers::write_samples_to_dict(&cli.project_root)
}
Commands::ParseAlbumTypes => collect_album_types::write_samples_to_dict(&cli.project_root),
Commands::GenLocales => {
gen_locales::generate_locales().await;
gen_locales::generate_locales(&cli.project_root).await;
}
Commands::GenDict => gen_dictionary::generate_dictionary(&cli.project_root),
Commands::DownloadTestfiles => {
download_testfiles::download_testfiles(&cli.project_root).await
}
Commands::GenDict => gen_dictionary::generate_dictionary(),
Commands::DownloadTestfiles => download_testfiles::download_testfiles().await,
Commands::AbTest { id, n } => {
match id {
Some(id) => {

View file

@ -1,295 +0,0 @@
use std::collections::BTreeMap;
use rustypipe::{client::YTContext, model::AlbumType, param::Language};
use serde::{Deserialize, Serialize};
use serde_with::{serde_as, DefaultOnError, VecSkipError};
#[derive(Debug, Default, Serialize, Deserialize)]
#[serde(default)]
pub struct DictEntry {
/// List of languages that should be treated equally (e.g. EnUs/EnGb/EnIn)
pub equivalent: Vec<Language>,
/// Should the language be parsed by character instead of by word?
/// (e.g. Chinese/Japanese)
pub by_char: bool,
/// Tokens for parsing timeago strings.
///
/// Format: Parsed token -> \[Quantity\] Identifier
///
/// Identifiers: `Y`(ear), `M`(month), `W`(eek), `D`(ay),
/// `h`(our), `m`(inute), `s`(econd)
pub timeago_tokens: BTreeMap<String, String>,
/// Order in which to parse numeric date components. Formatted as
/// a string of date identifiers (Y, M, D).
///
/// Examples:
///
/// - 03.01.2020 => `"DMY"`
/// - Jan 3, 2020 => `"DY"`
pub date_order: String,
/// Tokens for parsing month names.
///
/// Format: Parsed token -> Month number (starting from 1)
pub months: BTreeMap<String, u8>,
/// Tokens for parsing date strings with no digits (e.g. Today, Tomorrow)
///
/// Format: Parsed token -> \[Quantity\] Identifier
pub timeago_nd_tokens: BTreeMap<String, String>,
/// Are commas (instead of points) used as decimal separators?
pub comma_decimal: bool,
/// Tokens for parsing decimal prefixes (K, M, B, ...)
///
/// Format: Parsed token -> decimal power
pub number_tokens: BTreeMap<String, u8>,
/// Tokens for parsing number strings with no digits (e.g. "No videos")
///
/// Format: Parsed token -> value
pub number_nd_tokens: BTreeMap<String, u8>,
/// Names of album types (Album, Single, ...)
///
/// Format: Parsed text -> Album type
pub album_types: BTreeMap<String, AlbumType>,
}
/// Parsed TimeAgo string, contains amount and time unit.
///
/// Example: "14 hours ago" => `TimeAgo {n: 14, unit: TimeUnit::Hour}`
#[derive(Debug, Copy, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct TimeAgo {
/// Number of time units
pub n: u8,
/// Time unit
pub unit: TimeUnit,
}
impl ToString for TimeAgo {
fn to_string(&self) -> String {
if self.n > 1 {
format!("{}{}", self.n, self.unit.as_str())
} else {
self.unit.as_str().to_owned()
}
}
}
/// Parsed time unit
#[derive(Debug, Copy, Clone, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[serde(rename_all = "lowercase")]
pub enum TimeUnit {
Second,
Minute,
Hour,
Day,
Week,
Month,
Year,
}
impl TimeUnit {
pub fn as_str(&self) -> &str {
match self {
TimeUnit::Second => "s",
TimeUnit::Minute => "m",
TimeUnit::Hour => "h",
TimeUnit::Day => "D",
TimeUnit::Week => "W",
TimeUnit::Month => "M",
TimeUnit::Year => "Y",
}
}
}
#[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct QBrowse<'a> {
pub context: YTContext<'a>,
pub browse_id: &'a str,
#[serde(skip_serializing_if = "Option::is_none")]
pub params: Option<&'a str>,
}
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
pub struct QCont<'a> {
pub context: YTContext<'a>,
pub continuation: &'a str,
}
#[derive(Clone, Debug, Deserialize)]
pub struct TextRuns {
pub runs: Vec<Text>,
}
#[derive(Clone, Debug, Deserialize)]
pub struct Text {
#[serde(alias = "simpleText")]
pub text: String,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Channel {
pub contents: Contents,
pub header: ChannelHeader,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ChannelHeader {
pub c4_tabbed_header_renderer: HeaderRenderer,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct HeaderRenderer {
pub subscriber_count_text: Text,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Contents {
pub two_column_browse_results_renderer: TabsRenderer,
}
#[serde_as]
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct TabsRenderer {
#[serde_as(as = "VecSkipError<_>")]
pub tabs: Vec<TabRendererWrap>,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct TabRendererWrap {
pub tab_renderer: TabRenderer,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct TabRenderer {
pub content: RichGridRendererWrap,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RichGridRendererWrap {
pub rich_grid_renderer: RichGridRenderer,
}
#[serde_as]
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RichGridRenderer {
#[serde_as(as = "VecSkipError<_>")]
pub contents: Vec<RichItemRendererWrap>,
#[serde(default)]
#[serde_as(as = "DefaultOnError")]
pub header: Option<RichGridHeader>,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RichItemRendererWrap {
pub rich_item_renderer: RichItemRenderer,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RichItemRenderer {
pub content: VideoRendererWrap,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct VideoRendererWrap {
pub video_renderer: VideoRenderer,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct VideoRenderer {
/// `24,194 views`
pub view_count_text: Text,
/// `19K views`
pub short_view_count_text: Text,
pub length_text: LengthText,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct LengthText {
/// `18 minutes, 26 seconds`
pub accessibility: Accessibility,
/// `18:26`
pub simple_text: String,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Accessibility {
pub accessibility_data: AccessibilityData,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct AccessibilityData {
pub label: String,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RichGridHeader {
pub feed_filter_chip_bar_renderer: ChipBar,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ChipBar {
pub contents: Vec<Chip>,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Chip {
pub chip_cloud_chip_renderer: ChipRenderer,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ChipRenderer {
pub navigation_endpoint: NavigationEndpoint,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct NavigationEndpoint {
pub continuation_command: ContinuationCommand,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ContinuationCommand {
pub token: String,
}
#[serde_as]
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ContinuationResponse {
pub on_response_received_actions: Vec<ContinuationAction>,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ContinuationAction {
pub reload_continuation_items_command: ContinuationItemsWrap,
}
#[serde_as]
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ContinuationItemsWrap {
#[serde_as(as = "VecSkipError<_>")]
pub continuation_items: Vec<RichItemRendererWrap>,
}

View file

@ -1,75 +1,84 @@
use std::{collections::BTreeMap, fs::File, io::BufReader, path::PathBuf, str::FromStr};
use std::{
collections::BTreeMap,
fs::File,
io::BufReader,
path::{Path, PathBuf},
str::FromStr,
};
use once_cell::sync::Lazy;
use path_macro::path;
use regex::Regex;
use rustypipe::param::Language;
use rustypipe::{model::AlbumType, param::Language};
use serde::{Deserialize, Serialize};
use crate::model::DictEntry;
/// Get the path of the `testfiles` directory
pub static TESTFILES_DIR: Lazy<PathBuf> = Lazy::new(|| {
path!(env!("CARGO_MANIFEST_DIR") / ".." / "testfiles")
.canonicalize()
.unwrap()
});
/// Get the path of the `dict` directory
pub static DICT_DIR: Lazy<PathBuf> = Lazy::new(|| path!(*TESTFILES_DIR / "dict"));
/// Get the path of the `src` directory
pub static SRC_DIR: Lazy<PathBuf> = Lazy::new(|| path!(env!("CARGO_MANIFEST_DIR") / ".." / "src"));
static DICT_PATH: Lazy<PathBuf> = Lazy::new(|| path!("testfiles" / "dict" / "dictionary.json"));
type Dictionary = BTreeMap<Language, DictEntry>;
type DictionaryOverride = BTreeMap<Language, DictOverrideEntry>;
#[derive(Debug, Default, Serialize, Deserialize)]
#[serde(default)]
struct DictOverrideEntry {
number_tokens: BTreeMap<String, Option<u8>>,
number_nd_tokens: BTreeMap<String, Option<u8>>,
pub struct DictEntry {
/// List of languages that should be treated equally (e.g. EnUs/EnGb/EnIn)
pub equivalent: Vec<Language>,
/// Should the language be parsed by character instead of by word?
/// (e.g. Chinese/Japanese)
pub by_char: bool,
/// Tokens for parsing timeago strings.
///
/// Format: Parsed token -> \[Quantity\] Identifier
///
/// Identifiers: `Y`(ear), `M`(month), `W`(eek), `D`(ay),
/// `h`(our), `m`(inute), `s`(econd)
pub timeago_tokens: BTreeMap<String, String>,
/// Order in which to parse numeric date components. Formatted as
/// a string of date identifiers (Y, M, D).
///
/// Examples:
///
/// - 03.01.2020 => `"DMY"`
/// - Jan 3, 2020 => `"DY"`
pub date_order: String,
/// Tokens for parsing month names.
///
/// Format: Parsed token -> Month number (starting from 1)
pub months: BTreeMap<String, u8>,
/// Tokens for parsing date strings with no digits (e.g. Today, Tomorrow)
///
/// Format: Parsed token -> \[Quantity\] Identifier
pub timeago_nd_tokens: BTreeMap<String, String>,
/// Are commas (instead of points) used as decimal separators?
pub comma_decimal: bool,
/// Tokens for parsing decimal prefixes (K, M, B, ...)
///
/// Format: Parsed token -> decimal power
pub number_tokens: BTreeMap<String, u8>,
/// Names of album types (Album, Single, ...)
///
/// Format: Parsed text -> Album type
pub album_types: BTreeMap<String, AlbumType>,
}
pub fn read_dict() -> Dictionary {
let json_path = path!(*DICT_DIR / "dictionary.json");
#[derive(Clone, Debug, Deserialize)]
pub struct TextRuns {
pub runs: Vec<Text>,
}
#[derive(Clone, Debug, Deserialize)]
pub struct Text {
#[serde(alias = "simpleText")]
pub text: String,
}
pub fn read_dict(project_root: &Path) -> Dictionary {
let json_path = path!(project_root / *DICT_PATH);
let json_file = File::open(json_path).unwrap();
serde_json::from_reader(BufReader::new(json_file)).unwrap()
}
fn read_dict_override() -> DictionaryOverride {
let json_path = path!(*DICT_DIR / "dictionary_override.json");
let json_file = File::open(json_path).unwrap();
serde_json::from_reader(BufReader::new(json_file)).unwrap()
}
pub fn write_dict(dict: Dictionary) {
let dict_override = read_dict_override();
let json_path = path!(*DICT_DIR / "dictionary.json");
pub fn write_dict(project_root: &Path, dict: &Dictionary) {
let json_path = path!(project_root / *DICT_PATH);
let json_file = File::create(json_path).unwrap();
fn apply_map<K: Clone + Ord, V: Clone>(map: &mut BTreeMap<K, V>, or: &BTreeMap<K, Option<V>>) {
or.iter().for_each(|(key, val)| match val {
Some(val) => {
map.insert(key.clone(), val.clone());
}
None => {
map.remove(key);
}
});
}
let dict: Dictionary = dict
.into_iter()
.map(|(lang, mut entry)| {
if let Some(or) = dict_override.get(&lang) {
apply_map(&mut entry.number_tokens, &or.number_tokens);
apply_map(&mut entry.number_nd_tokens, &or.number_nd_tokens);
}
(lang, entry)
})
.collect();
serde_json::to_writer_pretty(json_file, &dict).unwrap();
serde_json::to_writer_pretty(json_file, dict).unwrap();
}
pub fn filter_datestr(string: &str) -> String {
@ -91,20 +100,7 @@ pub fn filter_datestr(string: &str) -> String {
pub fn filter_largenumstr(string: &str) -> String {
string
.chars()
.filter(|c| {
!matches!(
c,
'\u{200b}'
| '\u{202b}'
| '\u{202c}'
| '\u{202e}'
| '\u{200e}'
| '\u{200f}'
| '.'
| ','
) && !c.is_ascii_digit()
})
.flat_map(char::to_lowercase)
.filter(|c| !matches!(c, '\u{200b}' | '.' | ',') && !c.is_ascii_digit())
.collect()
}
@ -144,63 +140,3 @@ where
numbers
}
pub fn parse_largenum_en(string: &str) -> Option<u64> {
let (num, mut exp, filtered) = {
let mut buf = String::new();
let mut filtered = String::new();
let mut exp = 0;
let mut after_point = false;
for c in string.chars() {
if c.is_ascii_digit() {
buf.push(c);
if after_point {
exp -= 1;
}
} else if c == '.' {
after_point = true;
} else if !matches!(c, '\u{200b}' | '.' | ',') {
filtered.push(c);
}
}
(buf.parse::<u64>().ok()?, exp, filtered)
};
let lookup_token = |token: &str| match token {
"K" => Some(3),
"M" => Some(6),
"B" => Some(9),
_ => None,
};
exp += filtered
.split_whitespace()
.filter_map(lookup_token)
.sum::<i32>();
num.checked_mul((10_u64).checked_pow(exp.try_into().ok()?)?)
}
/// Parse textual video length (e.g. `0:49`, `2:02` or `1:48:18`)
/// and return the duration in seconds.
pub fn parse_video_length(text: &str) -> Option<u32> {
static VIDEO_LENGTH_REGEX: Lazy<Regex> =
Lazy::new(|| Regex::new(r#"(?:(\d+)[:.])?(\d{1,2})[:.](\d{2})"#).unwrap());
VIDEO_LENGTH_REGEX.captures(text).map(|cap| {
let hrs = cap
.get(1)
.and_then(|x| x.as_str().parse::<u32>().ok())
.unwrap_or_default();
let min = cap
.get(2)
.and_then(|x| x.as_str().parse::<u32>().ok())
.unwrap_or_default();
let sec = cap
.get(3)
.and_then(|x| x.as_str().parse::<u32>().ok())
.unwrap_or_default();
hrs * 3600 + min * 60 + sec
})
}

View file

@ -1,34 +0,0 @@
# Parsing localized data from YouTube
Since YouTube's API is outputting the website as it should be rendered by the client,
the data received from the API is already localized. This affects dates, times and
number formats.
To be able to successfully parse them, we need to collect samples in every language and
build a dictionary.
### Timeago
- Relative date format used for video upload dates and comments.
- Examples: "1 hour ago", "3 months ago"
### Playlist dates
- Playlist update dates are always day-accurate, either as textual dates or in the form
of "n days ago"
- Examples: "Last updated on Jan 3, 2020", "Updated today", "Updated yesterday",
"Updated 3 days ago"
### Video duration
- In Danisch ("da") video durations are formatted using dots instead of colons. Example:
"12.31", "3.03.52"
### Numbers
- Large numbers (subscriber/view counts) are rounded and shown using a decimal prefix
- Examples: "1.4M views"
- There is an exception for the value 0 ("no views") and in some languages for the value
1 (pt: "Um vídeo")
- Special case: Language "gu", "જોવાયાની સંખ્યા" = "no views", contains no unique tokens
to parse

View file

@ -179,11 +179,8 @@ impl MapResponse<Channel<Paginator<VideoItem>>> for response::Channel {
lang,
)?;
let mut mapper = response::YouTubeListMapper::<VideoItem>::with_channel(
lang,
&channel_data.c,
channel_data.warnings,
);
let mut mapper =
response::YouTubeListMapper::<VideoItem>::with_channel(lang, &channel_data);
mapper.map_response(content.content);
let p = Paginator::new_ext(
None,
@ -194,7 +191,7 @@ impl MapResponse<Channel<Paginator<VideoItem>>> for response::Channel {
);
Ok(MapResult {
c: combine_channel_data(channel_data.c, p),
c: combine_channel_data(channel_data, p),
warnings: mapper.warnings,
})
}
@ -222,16 +219,13 @@ impl MapResponse<Channel<Paginator<PlaylistItem>>> for response::Channel {
lang,
)?;
let mut mapper = response::YouTubeListMapper::<PlaylistItem>::with_channel(
lang,
&channel_data.c,
channel_data.warnings,
);
let mut mapper =
response::YouTubeListMapper::<PlaylistItem>::with_channel(lang, &channel_data);
mapper.map_response(content.content);
let p = Paginator::new(None, mapper.items, mapper.ctoken);
Ok(MapResult {
c: combine_channel_data(channel_data.c, p),
c: combine_channel_data(channel_data, p),
warnings: mapper.warnings,
})
}
@ -272,7 +266,7 @@ impl MapResponse<Channel<ChannelInfo>> for response::Channel {
});
Ok(MapResult {
c: combine_channel_data(channel_data.c, cinfo),
c: combine_channel_data(channel_data, cinfo),
warnings,
})
}
@ -303,7 +297,7 @@ fn map_channel(
d: MapChannelData,
id: &str,
lang: Language,
) -> Result<MapResult<Channel<()>>, ExtractionError> {
) -> Result<Channel<()>, ExtractionError> {
let header = d
.header
.ok_or(ExtractionError::ContentUnavailable(Cow::Borrowed(
@ -332,35 +326,33 @@ fn map_channel(
.vanity_channel_url
.as_ref()
.and_then(|url| map_vanity_url(url, id));
let mut warnings = Vec::new();
Ok(MapResult {
c: match header {
response::channel::Header::C4TabbedHeaderRenderer(header) => Channel {
id: metadata.external_id,
name: metadata.title,
subscriber_count: header
.subscriber_count_text
.and_then(|txt| util::parse_large_numstr_or_warn(&txt, lang, &mut warnings)),
avatar: header.avatar.into(),
verification: header.badges.into(),
description: metadata.description,
tags: microformat.microformat_data_renderer.tags,
vanity_url,
banner: header.banner.into(),
mobile_banner: header.mobile_banner.into(),
tv_banner: header.tv_banner.into(),
has_shorts: d.has_shorts,
has_live: d.has_live,
visitor_data: d.visitor_data,
content: (),
},
response::channel::Header::CarouselHeaderRenderer(carousel) => {
let hdata = carousel
.contents
.into_iter()
.filter_map(|item| {
match item {
Ok(match header {
response::channel::Header::C4TabbedHeaderRenderer(header) => Channel {
id: metadata.external_id,
name: metadata.title,
subscriber_count: header
.subscriber_count_text
.and_then(|txt| util::parse_large_numstr(&txt, lang)),
avatar: header.avatar.into(),
verification: header.badges.into(),
description: metadata.description,
tags: microformat.microformat_data_renderer.tags,
vanity_url,
banner: header.banner.into(),
mobile_banner: header.mobile_banner.into(),
tv_banner: header.tv_banner.into(),
has_shorts: d.has_shorts,
has_live: d.has_live,
visitor_data: d.visitor_data,
content: (),
},
response::channel::Header::CarouselHeaderRenderer(carousel) => {
let hdata = carousel
.contents
.into_iter()
.filter_map(|item| {
match item {
response::channel::CarouselHeaderRendererItem::TopicChannelDetailsRenderer {
subscriber_count_text,
subtitle,
@ -368,33 +360,32 @@ fn map_channel(
} => Some((subscriber_count_text.or(subtitle), avatar)),
response::channel::CarouselHeaderRendererItem::None => None,
}
})
.next();
})
.next();
Channel {
id: metadata.external_id,
name: metadata.title,
subscriber_count: hdata.as_ref().and_then(|hdata| {
hdata.0.as_ref().and_then(|txt| {
util::parse_large_numstr_or_warn(txt, lang, &mut warnings)
})
}),
avatar: hdata.map(|hdata| hdata.1.into()).unwrap_or_default(),
verification: crate::model::Verification::Verified,
description: metadata.description,
tags: microformat.microformat_data_renderer.tags,
vanity_url,
banner: Vec::new(),
mobile_banner: Vec::new(),
tv_banner: Vec::new(),
has_shorts: d.has_shorts,
has_live: d.has_live,
visitor_data: d.visitor_data,
content: (),
}
Channel {
id: metadata.external_id,
name: metadata.title,
subscriber_count: hdata.as_ref().and_then(|hdata| {
hdata
.0
.as_ref()
.and_then(|txt| util::parse_large_numstr(txt, lang))
}),
avatar: hdata.map(|hdata| hdata.1.into()).unwrap_or_default(),
verification: crate::model::Verification::Verified,
description: metadata.description,
tags: microformat.microformat_data_renderer.tags,
vanity_url,
banner: Vec::new(),
mobile_banner: Vec::new(),
tv_banner: Vec::new(),
has_shorts: d.has_shorts,
has_live: d.has_live,
visitor_data: d.visitor_data,
content: (),
}
},
warnings,
}
})
}
@ -410,7 +401,7 @@ fn map_channel_content(
) -> Result<MappedChannelContent, ExtractionError> {
match contents {
Some(contents) => {
let tabs = contents.two_column_browse_results_renderer.contents;
let tabs = contents.two_column_browse_results_renderer.tabs;
if tabs.is_empty() {
return Err(ExtractionError::ContentUnavailable(
"channel not found".into(),

View file

@ -269,7 +269,7 @@ fn map_artist_page(
}
}
let mut mapped = mapper.group_items();
let mapped = mapper.group_items();
static WIKIPEDIA_REGEX: Lazy<Regex> =
Lazy::new(|| Regex::new(r"\(?https://[a-z\d-]+\.wikipedia.org/wiki/[^\s]+").unwrap());
@ -302,10 +302,9 @@ fn map_artist_page(
description: header.description,
wikipedia_url,
subscriber_count: header.subscription_button.and_then(|btn| {
util::parse_large_numstr_or_warn(
util::parse_large_numstr(
&btn.subscribe_button_renderer.subscriber_count_text,
lang,
&mut mapped.warnings,
)
}),
tracks: mapped.c.tracks,

View file

@ -207,25 +207,22 @@ impl MapResponse<TrackDetails> for response::MusicDetails {
response::music_item::PlaylistPanelVideo::None => None,
})
.ok_or(ExtractionError::InvalidData(Cow::Borrowed("no video item")))?;
let mut track = map_queue_item(track_item, lang);
let track = map_queue_item(track_item, lang);
if track.c.id != id {
if track.id != id {
return Err(ExtractionError::WrongResult(format!(
"got wrong video id {}, expected {}",
track.c.id, id
track.id, id
)));
}
let mut warnings = content.contents.warnings;
warnings.append(&mut track.warnings);
Ok(MapResult {
c: TrackDetails {
track: track.c,
track,
lyrics_id,
related_id,
},
warnings,
warnings: content.contents.warnings,
})
}
}
@ -254,17 +251,13 @@ impl MapResponse<Paginator<TrackItem>> for response::MusicDetails {
.content
.playlist_panel_renderer;
let mut warnings = content.contents.warnings;
let tracks = content
.contents
.c
.into_iter()
.filter_map(|item| match item {
response::music_item::PlaylistPanelVideo::PlaylistPanelVideoRenderer(item) => {
let mut track = map_queue_item(item, lang);
warnings.append(&mut track.warnings);
Some(track.c)
Some(map_queue_item(item, lang))
}
response::music_item::PlaylistPanelVideo::None => None,
})
@ -284,7 +277,7 @@ impl MapResponse<Paginator<TrackItem>> for response::MusicDetails {
None,
crate::model::paginator::ContinuationEndpoint::MusicNext,
),
warnings,
warnings: content.contents.warnings,
})
}
}

View file

@ -81,7 +81,7 @@ impl MapResponse<Vec<MusicGenreItem>> for response::MusicGenres {
let genres = content_iter
.enumerate()
.flat_map(|(i, grid)| {
let mut grid = grid.grid_renderer.contents;
let mut grid = grid.grid_renderer.items;
warnings.append(&mut grid.warnings);
grid.c.into_iter().filter_map(move |section| match section {
response::music_genres::NavigationButton::MusicNavigationButtonRenderer(

View file

@ -4,7 +4,7 @@ use crate::{
error::{Error, ExtractionError},
model::{paginator::Paginator, AlbumId, ChannelId, MusicAlbum, MusicPlaylist, TrackItem},
serializer::MapResult,
util::{self, TryRemove, DOT_SEPARATOR},
util::{self, TryRemove},
};
use super::{
@ -160,19 +160,14 @@ impl MapResponse<MusicPlaylist> for response::MusicPlaylist {
.try_swap_remove(0)
.map(|cont| cont.next_continuation_data.continuation);
let track_count = if ctoken.is_some() {
self.header.as_ref().and_then(|h| {
let parts = h
.music_detail_header_renderer
let track_count = match ctoken {
Some(_) => self.header.as_ref().and_then(|h| {
h.music_detail_header_renderer
.second_subtitle
.split(|p| p == DOT_SEPARATOR)
.collect::<Vec<_>>();
parts
.get(if parts.len() > 2 { 1 } else { 0 })
.and_then(|txt| util::parse_numeric::<u64>(&txt[0]).ok())
})
} else {
Some(map_res.c.len() as u64)
.first()
.and_then(|txt| util::parse_numeric::<u64>(txt).ok())
}),
None => Some(map_res.c.len() as u64),
};
let related_ctoken = music_contents
@ -184,7 +179,11 @@ impl MapResponse<MusicPlaylist> for response::MusicPlaylist {
Some(header) => {
let h = header.music_detail_header_renderer;
let from_ytm = h.subtitle.0.iter().any(util::is_ytm);
let from_ytm = h
.subtitle
.0
.iter()
.any(|c| c.as_str() == util::YT_MUSIC_NAME);
let channel = h
.subtitle
.0

View file

@ -157,9 +157,7 @@ impl MapResponse<Paginator<MusicItem>> for response::MusicContinuation {
mapper.add_warnings(&mut panel.contents.warnings);
panel.contents.c.into_iter().for_each(|item| {
if let PlaylistPanelVideo::PlaylistPanelVideoRenderer(item) = item {
let mut track = map_queue_item(item, lang);
mapper.add_item(MusicItem::Track(track.c));
mapper.add_warnings(&mut track.warnings);
mapper.add_item(MusicItem::Track(map_queue_item(item, lang)))
}
});
}

View file

@ -376,24 +376,33 @@ fn map_url(
deobf: &Deobfuscator,
last_nsig: &mut [String; 2],
) -> MapResult<Option<(String, bool)>> {
let x = match url {
Some(url) => util::url_to_params(url).map_err(|_| format!("Could not parse url `{url}`")),
None => match signature_cipher {
Some(signature_cipher) => cipher_to_url_params(signature_cipher, deobf).map_err(|e| {
format!("Could not deobfuscate signatureCipher `{signature_cipher}`: {e}")
}),
None => Err("stream contained neither url or cipher".to_owned()),
},
};
let (url_base, mut url_params) = match x {
Ok(x) => x,
Err(e) => {
return MapResult {
let (url_base, mut url_params) = match url {
Some(url) => ok_or_bail!(
util::url_to_params(url),
MapResult {
c: None,
warnings: vec![e],
warnings: vec![format!("Could not parse url `{url}`")]
}
}
),
None => match signature_cipher {
Some(signature_cipher) => match cipher_to_url_params(signature_cipher, deobf) {
Ok(res) => res,
Err(e) => {
return MapResult {
c: None,
warnings: vec![format!(
"Could not deobfuscate signatureCipher `{signature_cipher}`: {e}"
)],
};
}
},
None => {
return MapResult {
c: None,
warnings: vec!["stream contained neither url nor cipher".to_owned()],
}
}
},
};
let mut warnings = vec![];
@ -405,17 +414,21 @@ fn map_url(
throttled = true;
});
match Url::parse_with_params(url_base.as_str(), url_params.iter()) {
Ok(url) => MapResult {
c: Some((url.to_string(), throttled)),
warnings,
},
Err(_) => MapResult {
c: None,
warnings: vec![format!(
"url could not be joined. url: `{url_base}` params: {url_params:?}"
)],
},
MapResult {
c: Some((
ok_or_bail!(
Url::parse_with_params(url_base.as_str(), url_params.iter()),
MapResult {
c: None,
warnings: vec![format!(
"url could not be joined. url: `{url_base}` params: {url_params:?}"
)],
}
)
.to_string(),
throttled,
)),
warnings,
}
}
@ -424,27 +437,16 @@ fn map_video_stream(
deobf: &Deobfuscator,
last_nsig: &mut [String; 2],
) -> MapResult<Option<VideoStream>> {
let (mtype, codecs) = match parse_mime(&f.mime_type) {
Some(x) => x,
None => {
return MapResult {
c: None,
warnings: vec![format!(
"Invalid mime type `{}` in video format {:?}",
&f.mime_type, &f
)],
}
let (mtype, codecs) = some_or_bail!(
parse_mime(&f.mime_type),
MapResult {
c: None,
warnings: vec![format!(
"Invalid mime type `{}` in video format {:?}",
&f.mime_type, &f
)]
}
};
let format = match get_video_format(mtype) {
Some(f) => f,
None => {
return MapResult {
c: None,
warnings: vec![format!("invalid video format. itag: {}", f.itag)],
}
}
};
);
let map_res = map_url(&f.url, &f.signature_cipher, deobf, last_nsig);
match map_res.c {
@ -467,7 +469,13 @@ fn map_video_stream(
hdr: f.color_info.unwrap_or_default().primaries
== player::Primaries::ColorPrimariesBt2020,
mime: f.mime_type.to_owned(),
format,
format: some_or_bail!(
get_video_format(mtype),
MapResult {
c: None,
warnings: vec![format!("invalid video format. itag: {}", f.itag)]
}
),
codec: get_video_codec(codecs),
throttled,
}),
@ -487,27 +495,16 @@ fn map_audio_stream(
) -> MapResult<Option<AudioStream>> {
static LANG_PATTERN: Lazy<Regex> = Lazy::new(|| Regex::new(r#"^([a-z]{2,3})\."#).unwrap());
let (mtype, codecs) = match parse_mime(&f.mime_type) {
Some(x) => x,
None => {
return MapResult {
c: None,
warnings: vec![format!(
"Invalid mime type `{}` in video format {:?}",
&f.mime_type, &f
)],
}
let (mtype, codecs) = some_or_bail!(
parse_mime(&f.mime_type),
MapResult {
c: None,
warnings: vec![format!(
"Invalid mime type `{}` in video format {:?}",
&f.mime_type, &f
)]
}
};
let format = match get_audio_format(mtype) {
Some(f) => f,
None => {
return MapResult {
c: None,
warnings: vec![format!("invalid audio format. itag: {}", f.itag)],
}
}
};
);
let map_res = map_url(&f.url, &f.signature_cipher, deobf, last_nsig);
match map_res.c {
@ -522,7 +519,13 @@ fn map_audio_stream(
init_range: f.init_range,
duration_ms: f.approx_duration_ms,
mime: f.mime_type.to_owned(),
format,
format: some_or_bail!(
get_audio_format(mtype),
MapResult {
c: None,
warnings: vec![format!("invalid audio format. itag: {}", f.itag)]
}
),
codec: get_audio_codec(codecs),
channels: f.audio_channels,
loudness_db: f.loudness_db,
@ -556,7 +559,7 @@ fn parse_mime(mime: &str) -> Option<(&str, Vec<&str>)> {
static PATTERN: Lazy<Regex> =
Lazy::new(|| Regex::new(r#"(\w+/\w+);\scodecs="([a-zA-Z-0-9.,\s]*)""#).unwrap());
let captures = PATTERN.captures(mime)?;
let captures = some_or_bail!(PATTERN.captures(mime), None);
Some((
captures.get(1).unwrap().as_str(),
captures

View file

@ -5,7 +5,8 @@ use time::OffsetDateTime;
use crate::{
error::{Error, ExtractionError},
model::{paginator::Paginator, ChannelId, Playlist, PlaylistVideo},
util::{self, timeago, TryRemove},
timeago,
util::{self, TryRemove},
};
use super::{response, ClientType, MapResponse, MapResult, QBrowse, QContinuation, RustyPipeQuery};
@ -93,7 +94,7 @@ impl MapResponse<Playlist> for response::Playlist {
let (thumbnails, last_update_txt) = match self.sidebar {
Some(sidebar) => {
let mut sidebar_items = sidebar.playlist_sidebar_renderer.contents;
let mut sidebar_items = sidebar.playlist_sidebar_renderer.items;
let mut primary =
sidebar_items
.try_swap_remove(0)

View file

@ -3,7 +3,7 @@ use serde_with::{rust::deserialize_ignore_any, serde_as, DefaultOnError, VecSkip
use super::{
video_item::YouTubeListRenderer, Alert, ChannelBadge, ContentsRenderer, ResponseContext,
Thumbnails, TwoColumnBrowseResults,
Thumbnails,
};
use crate::serializer::text::Text;
@ -22,7 +22,21 @@ pub(crate) struct Channel {
pub response_context: ResponseContext,
}
pub(crate) type Contents = TwoColumnBrowseResults<TabRendererWrap>;
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct Contents {
pub two_column_browse_results_renderer: TabsRenderer,
}
/// YouTube channel tab view. Contains multiple tabs
/// (Home, Videos, Playlists, About...). We can ignore unknown tabs.
#[serde_as]
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct TabsRenderer {
#[serde_as(as = "VecSkipError<_>")]
pub tabs: Vec<TabRendererWrap>,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]

View file

@ -47,17 +47,12 @@ pub(crate) mod channel_rss;
#[cfg(feature = "rss")]
pub(crate) use channel_rss::ChannelRss;
use std::borrow::Cow;
use std::marker::PhantomData;
use serde::{
de::{IgnoredAny, Visitor},
Deserialize,
};
use serde::Deserialize;
use serde_with::{json::JsonString, serde_as, VecSkipError};
use crate::error::ExtractionError;
use crate::serializer::{text::Text, MapResult, VecSkipErrorWrap};
use crate::serializer::MapResult;
use crate::serializer::{text::Text, VecLogError};
use self::video_item::YouTubeListRenderer;
@ -67,15 +62,11 @@ pub(crate) struct ContentRenderer<T> {
pub content: T,
}
#[derive(Debug)]
pub(crate) struct ContentsRenderer<T> {
pub contents: Vec<T>,
}
#[derive(Debug, Deserialize)]
pub(crate) struct ContentsRendererLogged<T> {
#[serde(alias = "items")]
pub contents: MapResult<Vec<T>>,
#[serde(rename_all = "camelCase")]
pub(crate) struct ContentsRenderer<T> {
#[serde(alias = "tabs")]
pub contents: Vec<T>,
}
#[derive(Debug, Deserialize)]
@ -90,12 +81,6 @@ pub(crate) struct SectionList<T> {
pub section_list_renderer: ContentsRenderer<T>,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct TwoColumnBrowseResults<T> {
pub two_column_browse_results_renderer: ContentsRenderer<T>,
}
#[derive(Default, Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct ThumbnailsWrap {
@ -222,9 +207,11 @@ pub(crate) struct ContinuationActionWrap {
pub append_continuation_items_action: ContinuationAction,
}
#[serde_as]
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct ContinuationAction {
#[serde_as(as = "VecLogError<_>")]
pub continuation_items: MapResult<Vec<YouTubeListItem>>,
}
@ -261,53 +248,9 @@ pub(crate) struct ErrorResponseContent {
pub message: String,
}
// DESERIALIZER
impl<'de, T> Deserialize<'de> for ContentsRenderer<T>
where
T: Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
struct ItemVisitor<T>(PhantomData<T>);
impl<'de, T> Visitor<'de> for ItemVisitor<T>
where
T: Deserialize<'de>,
{
type Value = ContentsRenderer<T>;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("map")
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error>
where
A: serde::de::MapAccess<'de>,
{
let mut contents = None;
while let Some(k) = map.next_key::<Cow<'de, str>>()? {
if k == "contents" || k == "tabs" || k == "items" {
contents = Some(ContentsRenderer {
contents: map.next_value::<VecSkipErrorWrap<T>>()?.0,
});
} else {
map.next_value::<IgnoredAny>()?;
}
}
contents.ok_or(serde::de::Error::missing_field("contents"))
}
}
deserializer.deserialize_map(ItemVisitor(PhantomData::<T>))
}
}
// MAPPING
/*
#MAPPING
*/
impl From<Thumbnail> for crate::model::Thumbnail {
fn from(tn: Thumbnail) -> Self {

View file

@ -1,12 +1,12 @@
use serde::Deserialize;
use serde_with::{rust::deserialize_ignore_any, serde_as};
use crate::serializer::text::Text;
use crate::serializer::{text::Text, MapResult, VecLogError};
use super::{
music_item::{ItemSection, SimpleHeader, SingleColumnBrowseResult},
url_endpoint::BrowseEndpointWrap,
ContentsRendererLogged, SectionList, Tab,
SectionList, Tab,
};
#[derive(Debug, Deserialize)]
@ -18,7 +18,15 @@ pub(crate) struct MusicGenres {
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct Grid {
pub grid_renderer: ContentsRendererLogged<NavigationButton>,
pub grid_renderer: GridRenderer,
}
#[serde_as]
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct GridRenderer {
#[serde_as(as = "VecLogError<_>")]
pub items: MapResult<Vec<NavigationButton>>,
}
#[derive(Debug, Deserialize)]

View file

@ -9,7 +9,7 @@ use crate::{
param::Language,
serializer::{
text::{Text, TextComponents},
MapResult,
MapResult, VecLogError,
},
util::{self, dictionary, TryRemove},
};
@ -39,6 +39,7 @@ pub(crate) enum ItemSection {
pub(crate) struct MusicShelf {
/// Playlist ID (only for playlists)
pub playlist_id: Option<String>,
#[serde_as(as = "VecLogError<_>")]
pub contents: MapResult<Vec<MusicResponseItem>>,
/// Continuation token for fetching more (>100) playlist items
#[serde(default)]
@ -52,10 +53,12 @@ pub(crate) struct MusicShelf {
/// MusicCarouselShelf represents a horizontal list of music items displayed with
/// large covers.
#[serde_as]
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct MusicCarouselShelf {
pub header: Option<MusicCarouselShelfHeader>,
#[serde_as(as = "VecLogError<_>")]
pub contents: MapResult<Vec<MusicResponseItem>>,
}
@ -73,6 +76,7 @@ pub(crate) struct MusicCardShelf {
#[serde(default)]
pub thumbnail: MusicThumbnailRenderer,
#[serde(default)]
#[serde_as(as = "VecLogError<_>")]
pub contents: MapResult<Vec<MusicResponseItem>>,
}
@ -223,6 +227,7 @@ pub(crate) struct CoverMusicItem {
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct PlaylistPanelRenderer {
#[serde_as(as = "VecLogError<_>")]
pub contents: MapResult<Vec<PlaylistPanelVideo>>,
/// Continuation token for fetching more radio items
#[serde(default)]
@ -357,7 +362,15 @@ pub(crate) struct ButtonRenderer {
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct MusicItemMenu {
pub menu_renderer: ContentsRenderer<MusicItemMenuEntry>,
pub menu_renderer: MusicItemMenuRenderer,
}
#[serde_as]
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct MusicItemMenuRenderer {
#[serde_as(as = "VecSkipError<_>")]
pub items: Vec<MusicItemMenuEntry>,
}
#[derive(Debug, Deserialize)]
@ -372,9 +385,11 @@ pub(crate) struct Grid {
pub grid_renderer: GridRenderer,
}
#[serde_as]
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct GridRenderer {
#[serde_as(as = "VecLogError<_>")]
pub items: MapResult<Vec<MusicResponseItem>>,
pub header: Option<GridHeader>,
}
@ -572,9 +587,7 @@ impl MusicListMapper {
(subtitle_parts.rev().next(), None, None)
} else {
// Skip first part (track type)
if subtitle_parts.len() > 3
|| (is_video && subtitle_parts.len() == 2)
{
if subtitle_parts.len() > 3 {
subtitle_parts.next();
}
@ -605,11 +618,7 @@ impl MusicListMapper {
(FlexColumnDisplayStyle::TwoLines, true) => (
None,
album_p.and_then(|p| {
util::parse_large_numstr_or_warn(
p.first_str(),
self.lang,
&mut self.warnings,
)
util::parse_large_numstr(p.first_str(), self.lang)
}),
),
(_, false) => (
@ -683,11 +692,7 @@ impl MusicListMapper {
match page_type {
MusicPageType::Artist => {
let subscriber_count = subtitle_p2.and_then(|p| {
util::parse_large_numstr_or_warn(
p.first_str(),
self.lang,
&mut self.warnings,
)
util::parse_large_numstr(p.first_str(), self.lang)
});
self.items.push(MusicItem::Artist(ArtistItem {
@ -731,8 +736,7 @@ impl MusicListMapper {
let from_ytm = channel_p
.as_ref()
.and_then(|p| p.0.first())
.map(util::is_ytm)
.map(|p| p.first_str() == util::YT_MUSIC_NAME)
.unwrap_or_default();
let channel = channel_p.and_then(|p| {
p.0.into_iter().find_map(|c| ChannelId::try_from(c).ok())
@ -788,11 +792,7 @@ impl MusicListMapper {
artists,
album: None,
view_count: subtitle_p2.and_then(|c| {
util::parse_large_numstr_or_warn(
c.first_str(),
self.lang,
&mut self.warnings,
)
util::parse_large_numstr(c.first_str(), self.lang)
}),
is_video,
track_nr: None,
@ -801,13 +801,8 @@ impl MusicListMapper {
Ok(Some(MusicItemType::Track))
}
MusicPageType::Artist => {
let subscriber_count = subtitle_p1.and_then(|p| {
util::parse_large_numstr_or_warn(
p.first_str(),
self.lang,
&mut self.warnings,
)
});
let subscriber_count = subtitle_p1
.and_then(|p| util::parse_large_numstr(p.first_str(), self.lang));
self.items.push(MusicItem::Artist(ArtistItem {
id,
@ -873,8 +868,7 @@ impl MusicListMapper {
// (featured on the startpage or in genres)
let from_ytm = subtitle_p2
.as_ref()
.and_then(|p| p.0.first())
.map(util::is_ytm)
.map(|p| p.first_str() == util::YT_MUSIC_NAME)
.unwrap_or(true);
let channel = subtitle_p2.and_then(|p| {
p.0.into_iter().find_map(|c| ChannelId::try_from(c).ok())
@ -933,13 +927,8 @@ impl MusicListMapper {
let item_type = match card.on_tap.music_page() {
Some((page_type, id)) => match page_type {
MusicPageType::Artist => {
let subscriber_count = subtitle_p2.and_then(|p| {
util::parse_large_numstr_or_warn(
p.first_str(),
self.lang,
&mut self.warnings,
)
});
let subscriber_count = subtitle_p2
.and_then(|p| util::parse_large_numstr(p.first_str(), self.lang));
self.items.push(MusicItem::Artist(ArtistItem {
id,
@ -974,13 +963,8 @@ impl MusicListMapper {
let (album, view_count) = if is_video {
(
None,
subtitle_p3.and_then(|p| {
util::parse_large_numstr_or_warn(
p.first_str(),
self.lang,
&mut self.warnings,
)
}),
subtitle_p3
.and_then(|p| util::parse_large_numstr(p.first_str(), self.lang)),
)
} else {
(
@ -1009,8 +993,7 @@ impl MusicListMapper {
MusicPageType::Playlist => {
let from_ytm = subtitle_p2
.as_ref()
.and_then(|p| p.0.first())
.map(util::is_ytm)
.map(|p| p.first_str() == util::YT_MUSIC_NAME)
.unwrap_or(true);
let channel = subtitle_p2
.and_then(|p| p.0.into_iter().find_map(|c| ChannelId::try_from(c).ok()));
@ -1135,7 +1118,7 @@ fn map_artist_id_fallback(
menu: Option<MusicItemMenu>,
fallback_artist: Option<&ArtistId>,
) -> Option<String> {
menu.and_then(|m| map_artist_id(m.menu_renderer.contents))
menu.and_then(|m| map_artist_id(m.menu_renderer.items))
.or_else(|| fallback_artist.and_then(|a| a.id.to_owned()))
}
@ -1166,8 +1149,7 @@ pub(crate) fn map_album_type(txt: &str, lang: Language) -> AlbumType {
.unwrap_or_default()
}
pub(crate) fn map_queue_item(item: QueueMusicItem, lang: Language) -> MapResult<TrackItem> {
let mut warnings = Vec::new();
pub(crate) fn map_queue_item(item: QueueMusicItem, lang: Language) -> TrackItem {
let mut subtitle_parts = item.long_byline_text.split(util::DOT_SEPARATOR).into_iter();
let is_video = !item
@ -1185,8 +1167,7 @@ pub(crate) fn map_queue_item(item: QueueMusicItem, lang: Language) -> MapResult<
let (album, view_count) = if is_video {
(
None,
subtitle_p2
.and_then(|p| util::parse_large_numstr_or_warn(p.first_str(), lang, &mut warnings)),
subtitle_p2.and_then(|p| util::parse_large_numstr(p.first_str(), lang)),
)
} else {
(
@ -1195,23 +1176,20 @@ pub(crate) fn map_queue_item(item: QueueMusicItem, lang: Language) -> MapResult<
)
};
MapResult {
c: TrackItem {
id: item.video_id,
name: item.title,
duration: item
.length_text
.and_then(|txt| util::parse_video_length(&txt)),
cover: item.thumbnail.into(),
artists,
artist_id,
album,
view_count,
is_video,
track_nr: None,
by_va,
},
warnings,
TrackItem {
id: item.video_id,
name: item.title,
duration: item
.length_text
.and_then(|txt| util::parse_video_length(&txt)),
cover: item.thumbnail.into(),
artists,
artist_id,
album,
view_count,
is_video,
track_nr: None,
by_va,
}
}

View file

@ -58,8 +58,6 @@ pub(crate) struct HeaderRenderer {
/// Missing on artist_tracks view.
///
/// `"64 songs", " • ", "3 hours, 40 minutes"`
///
/// `"1B views", " • ", "200 songs", " • ", "6+ hours"`
#[serde(default)]
#[serde_as(as = "Text")]
pub second_subtitle: Vec<String>,

View file

@ -5,7 +5,7 @@ use serde_with::serde_as;
use serde_with::{json::JsonString, DefaultOnError};
use super::{ResponseContext, Thumbnails};
use crate::serializer::{text::Text, MapResult};
use crate::serializer::{text::Text, MapResult, VecLogError};
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
@ -75,8 +75,10 @@ pub(crate) struct StreamingData {
#[serde_as(as = "JsonString")]
pub expires_in_seconds: u32,
#[serde(default)]
#[serde_as(as = "VecLogError<_>")]
pub formats: MapResult<Vec<Format>>,
#[serde(default)]
#[serde_as(as = "VecLogError<_>")]
pub adaptive_formats: MapResult<Vec<Format>>,
/// Only on livestreams
pub dash_manifest_url: Option<String>,

View file

@ -3,22 +3,20 @@ use serde_with::{
json::JsonString, rust::deserialize_ignore_any, serde_as, DefaultOnError, VecSkipError,
};
use crate::serializer::{
text::{Text, TextComponent},
MapResult,
};
use crate::serializer::text::{Text, TextComponent};
use crate::serializer::{MapResult, VecLogError};
use crate::util::MappingError;
use super::{
Alert, ContentsRenderer, ContinuationEndpoint, ResponseContext, SectionList, Tab, Thumbnails,
ThumbnailsWrap, TwoColumnBrowseResults,
ThumbnailsWrap,
};
#[serde_as]
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct Playlist {
pub contents: Option<TwoColumnBrowseResults<Tab<SectionList<ItemSection>>>>,
pub contents: Option<Contents>,
pub header: Option<Header>,
pub sidebar: Option<Sidebar>,
#[serde_as(as = "Option<DefaultOnError>")]
@ -35,6 +33,12 @@ pub(crate) struct PlaylistCont {
pub on_response_received_actions: Vec<OnResponseReceivedAction>,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct Contents {
pub two_column_browse_results_renderer: ContentsRenderer<Tab<SectionList<ItemSection>>>,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct ItemSection {
@ -47,9 +51,11 @@ pub(crate) struct PlaylistVideoListRenderer {
pub playlist_video_list_renderer: PlaylistVideoList,
}
#[serde_as]
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct PlaylistVideoList {
#[serde_as(as = "VecLogError<_>")]
pub contents: MapResult<Vec<PlaylistItem>>,
}
@ -102,7 +108,15 @@ pub(crate) struct BylineRenderer {
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct Sidebar {
pub playlist_sidebar_renderer: ContentsRenderer<SidebarItemPrimary>,
pub playlist_sidebar_renderer: SidebarRenderer,
}
#[serde_as]
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct SidebarRenderer {
#[serde_as(as = "VecSkipError<_>")]
pub items: Vec<SidebarItemPrimary>,
}
#[derive(Debug, Deserialize)]
@ -185,8 +199,10 @@ pub(crate) struct OnResponseReceivedAction {
pub append_continuation_items_action: AppendAction,
}
#[serde_as]
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct AppendAction {
#[serde_as(as = "VecLogError<_>")]
pub continuation_items: MapResult<Vec<PlaylistItem>>,
}

View file

@ -1,7 +1,4 @@
use serde::{
de::{IgnoredAny, Visitor},
Deserialize,
};
use serde::{de::IgnoredAny, Deserialize};
use serde_with::{json::JsonString, serde_as};
use super::{video_item::YouTubeListRendererWrap, ResponseContext};
@ -29,40 +26,8 @@ pub(crate) struct TwoColumnSearchResultsRenderer {
}
#[derive(Debug, Deserialize)]
pub(crate) struct SearchSuggestion(IgnoredAny, pub Vec<SearchSuggestionItem>, IgnoredAny);
#[derive(Debug)]
pub(crate) struct SearchSuggestionItem(pub String);
impl<'de> Deserialize<'de> for SearchSuggestionItem {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
struct ItemVisitor;
impl<'de> Visitor<'de> for ItemVisitor {
type Value = SearchSuggestionItem;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("search suggestion item")
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: serde::de::SeqAccess<'de>,
{
match seq.next_element::<String>()? {
Some(s) => {
// Ignore the rest of the list
while seq.next_element::<IgnoredAny>()?.is_some() {}
Ok(SearchSuggestionItem(s))
}
None => Err(serde::de::Error::invalid_length(0, &"1")),
}
}
}
deserializer.deserialize_seq(ItemVisitor)
}
}
pub(crate) struct SearchSuggestion(
IgnoredAny,
pub Vec<(String, IgnoredAny, IgnoredAny)>,
IgnoredAny,
);

View file

@ -1,6 +1,7 @@
use serde::Deserialize;
use serde_with::{serde_as, VecSkipError};
use super::{video_item::YouTubeListRendererWrap, ResponseContext, Tab, TwoColumnBrowseResults};
use super::{video_item::YouTubeListRendererWrap, ResponseContext, Tab};
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
@ -15,4 +16,16 @@ pub(crate) struct Trending {
pub contents: Contents,
}
type Contents = TwoColumnBrowseResults<Tab<YouTubeListRendererWrap>>;
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct Contents {
pub two_column_browse_results_renderer: BrowseResults,
}
#[serde_as]
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct BrowseResults {
#[serde_as(as = "VecSkipError<_>")]
pub tabs: Vec<Tab<YouTubeListRendererWrap>>,
}

View file

@ -6,20 +6,21 @@ use serde_with::{rust::deserialize_ignore_any, serde_as, DefaultOnError, VecSkip
use crate::serializer::text::TextComponent;
use crate::serializer::{
text::{AccessibilityText, AttributedText, Text, TextComponents},
MapResult,
MapResult, VecLogError,
};
use super::{
url_endpoint::BrowseEndpointWrap, ContinuationEndpoint, ContinuationItemRenderer, Icon,
MusicContinuationData, Thumbnails,
};
use super::{ChannelBadge, ContentsRendererLogged, ResponseContext, YouTubeListItem};
use super::{ChannelBadge, ResponseContext, YouTubeListItem};
/*
#VIDEO DETAILS
*/
/// Video details response
#[serde_as]
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct VideoDetails {
@ -28,6 +29,7 @@ pub(crate) struct VideoDetails {
/// Video ID
pub current_video_endpoint: Option<CurrentVideoEndpoint>,
/// Video chapters + comment section
#[serde_as(as = "VecLogError<_>")]
pub engagement_panels: MapResult<Vec<EngagementPanel>>,
pub response_context: ResponseContext,
}
@ -58,9 +60,11 @@ pub(crate) struct VideoResultsWrap {
}
/// Video metadata items
#[serde_as]
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct VideoResults {
#[serde_as(as = "Option<VecLogError<_>>")]
pub contents: Option<MapResult<Vec<VideoResultsItem>>>,
}
@ -299,6 +303,7 @@ pub(crate) struct RecommendationResultsWrap {
#[serde(rename_all = "camelCase")]
pub(crate) struct RecommendationResults {
/// Can be `None` for age-restricted videos
#[serde_as(as = "Option<VecLogError<_>>")]
pub results: Option<MapResult<Vec<YouTubeListItem>>>,
#[serde_as(as = "Option<VecSkipError<_>>")]
pub continuations: Option<Vec<MusicContinuationData>>,
@ -336,7 +341,16 @@ pub(crate) enum EngagementPanelRenderer {
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct ChapterMarkersContent {
pub macro_markers_list_renderer: ContentsRendererLogged<MacroMarkersListItem>,
pub macro_markers_list_renderer: MacroMarkersListRenderer,
}
/// Chapter markers
#[serde_as]
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct MacroMarkersListRenderer {
#[serde_as(as = "VecLogError<_>")]
pub contents: MapResult<Vec<MacroMarkersListItem>>,
}
/// Chapter marker
@ -422,6 +436,7 @@ pub(crate) struct CommentItemSectionHeaderMenuItem {
*/
/// Video comments continuation response
#[serde_as]
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct VideoComments {
@ -435,6 +450,7 @@ pub(crate) struct VideoComments {
/// - Comment replies: appendContinuationItemsAction
/// - n*commentRenderer, continuationItemRenderer:
/// replies + continuation
#[serde_as(as = "VecLogError<_>")]
pub on_response_received_endpoints: MapResult<Vec<CommentsContItem>>,
}
@ -447,9 +463,11 @@ pub(crate) struct CommentsContItem {
}
/// Video comments continuation action
#[serde_as]
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct AppendComments {
#[serde_as(as = "VecLogError<_>")]
pub continuation_items: MapResult<Vec<CommentListItem>>,
}
@ -518,8 +536,6 @@ pub(crate) struct CommentRenderer {
pub author_comment_badge: Option<AuthorCommentBadge>,
#[serde(default)]
pub reply_count: u64,
#[serde_as(as = "Option<Text>")]
pub vote_count: Option<String>,
/// Buttons for comment interaction (Like/Dislike/Reply)
pub action_buttons: CommentActionButtons,
}
@ -565,6 +581,7 @@ pub(crate) struct CommentActionButtons {
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct CommentActionButtonsRenderer {
pub like_button: ToggleButtonWrap,
pub creator_heart: Option<CreatorHeart>,
}

View file

@ -4,7 +4,7 @@ use serde::Deserialize;
use serde_with::{
json::JsonString, rust::deserialize_ignore_any, serde_as, DefaultOnError, VecSkipError,
};
use time::OffsetDateTime;
use time::{Duration, OffsetDateTime};
use super::{url_endpoint::NavigationEndpoint, ChannelBadge, ContinuationEndpoint, Thumbnails};
use crate::{
@ -15,9 +15,10 @@ use crate::{
param::Language,
serializer::{
text::{AccessibilityText, Text, TextComponent},
MapResult,
MapResult, VecLogError,
},
util::{self, timeago, TryRemove},
timeago,
util::{self, TryRemove},
};
#[serde_as]
@ -68,6 +69,7 @@ pub(crate) enum YouTubeListItem {
#[serde(alias = "expandedShelfContentsRenderer", alias = "gridRenderer")]
ItemSectionRenderer {
#[serde(alias = "items")]
#[serde_as(as = "VecLogError<_>")]
contents: MapResult<Vec<YouTubeListItem>>,
},
@ -204,9 +206,11 @@ pub(crate) struct YouTubeListRendererWrap {
pub section_list_renderer: YouTubeListRenderer,
}
#[serde_as]
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub(crate) struct YouTubeListRenderer {
#[serde_as(as = "VecLogError<_>")]
pub contents: MapResult<Vec<YouTubeListItem>>,
}
@ -411,7 +415,7 @@ impl<T> YouTubeListMapper<T> {
}
}
pub fn with_channel<C>(lang: Language, channel: &Channel<C>, warnings: Vec<String>) -> Self {
pub fn with_channel<C>(lang: Language, channel: &Channel<C>) -> Self {
Self {
lang,
channel: Some(ChannelTag {
@ -422,7 +426,7 @@ impl<T> YouTubeListMapper<T> {
subscriber_count: channel.subscriber_count,
}),
items: Vec::new(),
warnings,
warnings: Vec::new(),
ctoken: None,
corrected_query: None,
channel_info: None,
@ -501,11 +505,8 @@ impl<T> YouTubeListMapper<T> {
length: video.accessibility.and_then(|acc| {
ACCESSIBILITY_SEP_REGEX.captures(&acc).and_then(|cap| {
cap.get(1).and_then(|c| {
timeago::parse_video_duration_or_warn(
self.lang,
c.as_str(),
&mut self.warnings,
)
timeago::parse_timeago_or_warn(self.lang, c.as_str(), &mut self.warnings)
.map(|ta| Duration::from(ta).whole_seconds() as u32)
})
})
}),
@ -517,7 +518,7 @@ impl<T> YouTubeListMapper<T> {
publish_date_txt: pub_date_txt,
view_count: video
.view_count_text
.and_then(|txt| util::parse_large_numstr_or_warn(&txt, lang, &mut self.warnings)),
.map(|txt| util::parse_large_numstr(&txt, lang).unwrap_or_default()),
is_live: false,
is_short: true,
is_upcoming: false,
@ -571,12 +572,10 @@ impl<T> YouTubeListMapper<T> {
name: channel.title,
avatar: channel.thumbnail.into(),
verification: channel.owner_badges.into(),
subscriber_count: sc_txt.and_then(|txt| {
util::parse_large_numstr_or_warn(&txt, self.lang, &mut self.warnings)
}),
video_count: vc_text.and_then(|txt| {
util::parse_large_numstr_or_warn(&txt, self.lang, &mut self.warnings)
}),
subscriber_count: sc_txt
.and_then(|txt| util::parse_numeric_or_warn(&txt, &mut self.warnings)),
video_count: vc_text
.and_then(|txt| util::parse_numeric_or_warn(&txt, &mut self.warnings)),
short_description: channel.description_snippet,
}
}

View file

@ -22,7 +22,7 @@ SearchResult(
),
],
verification: Verified,
subscriber_count: Some(582000),
subscriber_count: Some(582),
video_count: None,
short_description: "Music Submissions: https://monstafluff.edmdistrict.com/",
)),
@ -42,7 +42,7 @@ SearchResult(
),
],
verification: Artist,
subscriber_count: Some(4030000),
subscriber_count: Some(403),
video_count: None,
short_description: "Welcome to the official Music Travel Love YouTube channel! We travel the world making music, friends, videos and memories!",
)),
@ -62,7 +62,7 @@ SearchResult(
),
],
verification: Verified,
subscriber_count: Some(167000),
subscriber_count: Some(167),
video_count: None,
short_description: "MUSIC IN HARMONY WITH YOUR LIFE!!! If any producer, label, artist or photographer has an issue with any of the music or\u{a0}...",
)),
@ -82,7 +82,7 @@ SearchResult(
),
],
verification: Artist,
subscriber_count: Some(411000),
subscriber_count: Some(411),
video_count: None,
short_description: "The official YouTube channel of HAEVN Music. Receiving a piano from his grandfather had a great impact on Jorrit\'s life.",
)),
@ -102,7 +102,7 @@ SearchResult(
),
],
verification: None,
subscriber_count: Some(31200),
subscriber_count: Some(312),
video_count: None,
short_description: "Hello and welcome to \"Artemis Music\"! Music can play an effective role in helping us lead a better and more productive life.",
)),
@ -122,7 +122,7 @@ SearchResult(
),
],
verification: Verified,
subscriber_count: Some(372000),
subscriber_count: Some(372),
video_count: None,
short_description: "Music is the only language in which you cannot say a mean or sarcastic thing. Have fun listening to music.",
)),
@ -142,7 +142,7 @@ SearchResult(
),
],
verification: Verified,
subscriber_count: Some(178000),
subscriber_count: Some(178),
video_count: None,
short_description: "S!X - Music is an independent Hip-Hop label. Soundcloud : https://soundcloud.com/s1xmusic Facebook\u{a0}...",
)),
@ -162,7 +162,7 @@ SearchResult(
),
],
verification: Verified,
subscriber_count: Some(1040000),
subscriber_count: Some(104),
video_count: None,
short_description: "Welcome to Shake Music, a Trap & Bass Channel / Record Label dedicated to bringing you the best tracks. All tracks on Shake\u{a0}...",
)),
@ -182,7 +182,7 @@ SearchResult(
),
],
verification: Verified,
subscriber_count: Some(822000),
subscriber_count: Some(822),
video_count: None,
short_description: "Welcome to Miracle Music! On this channel you will find a wide variety of different Deep House, Tropical House, Chill Out, EDM,.",
)),
@ -202,7 +202,7 @@ SearchResult(
),
],
verification: Verified,
subscriber_count: Some(4620000),
subscriber_count: Some(462),
video_count: None,
short_description: "",
)),
@ -222,7 +222,7 @@ SearchResult(
),
],
verification: Verified,
subscriber_count: Some(1050000),
subscriber_count: Some(105),
video_count: None,
short_description: "BRINGING YOU ONLY THE BEST EDM - TRAP Submit your own track for promotion here:\u{a0}...",
)),
@ -242,7 +242,7 @@ SearchResult(
),
],
verification: Verified,
subscriber_count: Some(709000),
subscriber_count: Some(709),
video_count: None,
short_description: "Hey there! I am Mr MoMo My channel focus on Japan music, lofi, trap & bass type beat and Japanese instrumental. I mindfully\u{a0}...",
)),
@ -262,7 +262,7 @@ SearchResult(
),
],
verification: None,
subscriber_count: Some(54400),
subscriber_count: Some(544),
video_count: None,
short_description: "",
)),
@ -282,7 +282,7 @@ SearchResult(
),
],
verification: None,
subscriber_count: Some(3590),
subscriber_count: Some(359),
video_count: None,
short_description: "Welcome to our Energy Transformation Relaxing Music . This chakra music channel will focus on developing the best chakra\u{a0}...",
)),
@ -302,7 +302,7 @@ SearchResult(
),
],
verification: Verified,
subscriber_count: Some(416000),
subscriber_count: Some(416),
video_count: None,
short_description: "Nonstop Music - Home of 1h videos of your favourite songs and mixes. Nonstop Genres: Pop • Chillout • Tropical House • Deep\u{a0}...",
)),
@ -322,7 +322,7 @@ SearchResult(
),
],
verification: Verified,
subscriber_count: Some(3000000),
subscriber_count: Some(3),
video_count: None,
short_description: "Vibe Music strives to bring the best lyric videos of popular Rap & Hip Hop songs. Be sure to Subscribe to see new videos we\u{a0}...",
)),
@ -342,7 +342,7 @@ SearchResult(
),
],
verification: None,
subscriber_count: Some(120000),
subscriber_count: Some(120),
video_count: None,
short_description: "",
)),
@ -362,7 +362,7 @@ SearchResult(
),
],
verification: None,
subscriber_count: Some(81700),
subscriber_count: Some(817),
video_count: None,
short_description: "",
)),
@ -382,7 +382,7 @@ SearchResult(
),
],
verification: None,
subscriber_count: Some(53000),
subscriber_count: Some(53),
video_count: None,
short_description: "Welcome to my channel - Helios Music. I created this channel to help people have the most relaxing, refreshing and comfortable\u{a0}...",
)),
@ -402,7 +402,7 @@ SearchResult(
),
],
verification: None,
subscriber_count: Some(129000),
subscriber_count: Some(129),
video_count: None,
short_description: "Music On (UNOFFICIAL CHANNEL)",
)),

View file

@ -22,7 +22,7 @@ SearchResult(
),
],
verification: Verified,
subscriber_count: Some(2920000),
subscriber_count: Some(292),
video_count: Some(219),
short_description: "Hi, I\'m Tina, aka Doobydobap! Food is the medium I use to tell stories and connect with people who share the same passion as I\u{a0}...",
)),

View file

@ -56,7 +56,7 @@ impl MapResponse<Paginator<VideoItem>> for response::Startpage {
lang: crate::param::Language,
_deobf: Option<&crate::deobfuscate::DeobfData>,
) -> Result<MapResult<Paginator<VideoItem>>, ExtractionError> {
let mut contents = self.contents.two_column_browse_results_renderer.contents;
let mut contents = self.contents.two_column_browse_results_renderer.tabs;
let grid = contents
.try_swap_remove(0)
.ok_or(ExtractionError::InvalidData(Cow::Borrowed("no contents")))?
@ -80,7 +80,7 @@ impl MapResponse<Vec<VideoItem>> for response::Trending {
lang: crate::param::Language,
_deobf: Option<&crate::deobfuscate::DeobfData>,
) -> Result<MapResult<Vec<VideoItem>>, ExtractionError> {
let mut contents = self.contents.two_column_browse_results_renderer.contents;
let mut contents = self.contents.two_column_browse_results_renderer.tabs;
let items = contents
.try_swap_remove(0)
.ok_or(ExtractionError::InvalidData(Cow::Borrowed("no contents")))?

View file

@ -7,7 +7,8 @@ use crate::{
model::{paginator::Paginator, ChannelTag, Chapter, Comment, VideoDetails, VideoItem},
param::Language,
serializer::MapResult,
util::{self, timeago, TryRemove},
timeago,
util::{self, TryRemove},
};
use super::{
@ -190,10 +191,9 @@ impl MapResponse<VideoDetails> for response::VideoDetails {
};
let comment_count = comment_count_section.and_then(|s| {
util::parse_large_numstr_or_warn::<u64>(
util::parse_large_numstr::<u64>(
&s.comments_entry_point_header_renderer.comment_count,
lang,
&mut warnings,
)
});
@ -331,9 +331,9 @@ impl MapResponse<VideoDetails> for response::VideoDetails {
name: channel_name,
avatar: owner.thumbnail.into(),
verification: owner.badges.into(),
subscriber_count: owner.subscriber_count_text.and_then(|txt| {
util::parse_large_numstr_or_warn(&txt, lang, &mut warnings)
}),
subscriber_count: owner
.subscriber_count_text
.and_then(|txt| util::parse_large_numstr(&txt, lang)),
},
view_count,
like_count,
@ -505,16 +505,16 @@ fn map_comment(
}),
_ => None,
},
publish_date: timeago::parse_timeago_dt_or_warn(
lang,
&c.published_time_text,
publish_date: timeago::parse_timeago_to_dt(lang, &c.published_time_text),
publish_date_txt: c.published_time_text,
like_count: util::parse_numeric_or_warn(
&c.action_buttons
.comment_action_buttons_renderer
.like_button
.toggle_button_renderer
.accessibility_data,
&mut warnings,
),
publish_date_txt: c.published_time_text,
like_count: match c.vote_count {
Some(txt) => util::parse_numeric_or_warn(&txt, &mut warnings),
None => Some(0),
},
reply_count: c.reply_count as u32,
replies: replies
.map(|items| Paginator::new(Some(c.reply_count), items, reply_ctoken))

View file

@ -1,6 +1,9 @@
#![doc = include_str!("../README.md")]
#![warn(missing_docs, clippy::todo, clippy::dbg_macro)]
#[macro_use]
mod macros;
mod deobfuscate;
mod serializer;
mod util;
@ -11,4 +14,5 @@ pub mod error;
pub mod model;
pub mod param;
pub mod report;
pub mod timeago;
pub mod validate;

23
src/macros.rs Normal file
View file

@ -0,0 +1,23 @@
/// Returns an unwrapped Option if Some() otherwise returns the passed expression
macro_rules! some_or_bail {
($opt:expr, $ret:expr $(,)?) => {{
match $opt {
Some(stuff) => stuff,
None => {
return $ret;
}
}
}};
}
/// Returns an unwrapped Result if Ok() otherwise returns the passed expression
macro_rules! ok_or_bail {
($result:expr, $ret:expr $(,)?) => {{
match $result {
Ok(stuff) => stuff,
Err(_) => {
return $ret;
}
}
}};
}

View file

@ -6,7 +6,7 @@ mod vec_log_err;
pub use date::DateYmd;
pub use range::Range;
pub use vec_log_err::VecSkipErrorWrap;
pub use vec_log_err::VecLogError;
use std::fmt::Debug;

View file

@ -1,9 +1,10 @@
use std::{fmt, marker::PhantomData};
use serde::{
de::{IgnoredAny, SeqAccess, Visitor},
de::{SeqAccess, Visitor},
Deserialize,
};
use serde_with::{de::DeserializeAsWrap, DeserializeAs};
use super::MapResult;
@ -12,26 +13,39 @@ use super::MapResult;
///
/// This is similar to `VecSkipError`, but it does not silently ignore
/// faulty items.
impl<'de, T> Deserialize<'de> for MapResult<Vec<T>>
pub struct VecLogError<T>(PhantomData<T>);
impl<'de, T, U> DeserializeAs<'de, MapResult<Vec<T>>> for VecLogError<U>
where
T: Deserialize<'de>,
U: DeserializeAs<'de, T>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
fn deserialize_as<D>(deserializer: D) -> Result<MapResult<Vec<T>>, D::Error>
where
D: serde::Deserializer<'de>,
{
#[derive(serde::Deserialize)]
#[serde(untagged)]
enum GoodOrError<T> {
Good(T),
Error(serde_json::Value),
#[serde(
untagged,
bound(deserialize = "DeserializeAsWrap<T, TAs>: Deserialize<'de>")
)]
enum GoodOrError<'a, T, TAs>
where
TAs: DeserializeAs<'a, T>,
{
Good(DeserializeAsWrap<T, TAs>),
Error(serde_json::value::Value),
#[serde(skip)]
_JustAMarkerForTheLifetime(PhantomData<&'a u32>),
}
struct SeqVisitor<T>(PhantomData<T>);
struct SeqVisitor<T, U> {
marker: PhantomData<T>,
marker2: PhantomData<U>,
}
impl<'de, T> Visitor<'de> for SeqVisitor<T>
impl<'de, T, U> Visitor<'de> for SeqVisitor<T, U>
where
T: Deserialize<'de>,
U: DeserializeAs<'de, T>,
{
type Value = MapResult<Vec<T>>;
@ -48,15 +62,16 @@ where
while let Some(value) = seq.next_element()? {
match value {
GoodOrError::<T>::Good(value) => {
values.push(value);
GoodOrError::<T, U>::Good(value) => {
values.push(value.into_inner());
}
GoodOrError::<T>::Error(value) => {
GoodOrError::<T, U>::Error(value) => {
warnings.push(format!(
"error deserializing item: {}",
serde_json::to_string(&value).unwrap_or_default()
));
}
_ => {}
}
}
Ok(MapResult {
@ -66,113 +81,43 @@ where
}
}
deserializer.deserialize_seq(SeqVisitor(PhantomData::<T>))
}
}
/// Reimplementation of VecSkipError using a wrapper type
/// to allow use with generics
pub struct VecSkipErrorWrap<T>(pub Vec<T>);
impl<'de, T> Deserialize<'de> for VecSkipErrorWrap<T>
where
T: Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
#[derive(serde::Deserialize)]
#[serde(untagged)]
enum GoodOrError<T> {
Good(T),
Error(IgnoredAny),
}
struct SeqVisitor<T>(PhantomData<T>);
impl<'de, T> Visitor<'de> for SeqVisitor<T>
where
T: Deserialize<'de>,
{
type Value = VecSkipErrorWrap<T>;
fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
formatter.write_str("a sequence")
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: SeqAccess<'de>,
{
let mut values = Vec::with_capacity(seq.size_hint().unwrap_or_default());
while let Some(value) = seq.next_element()? {
match value {
GoodOrError::<T>::Good(value) => {
values.push(value);
}
GoodOrError::<T>::Error(_) => {}
}
}
Ok(VecSkipErrorWrap(values))
}
}
deserializer.deserialize_seq(SeqVisitor(PhantomData::<T>))
let visitor = SeqVisitor::<T, U> {
marker: PhantomData,
marker2: PhantomData,
};
deserializer.deserialize_seq(visitor)
}
}
#[cfg(test)]
mod tests {
use serde::Deserialize;
use serde_with::serde_as;
use crate::serializer::MapResult;
use super::VecSkipErrorWrap;
#[serde_as]
#[derive(Debug, Deserialize)]
#[allow(dead_code)]
struct SLog {
struct S {
#[serde_as(as = "crate::serializer::VecLogError<_>")]
items: MapResult<Vec<Item>>,
}
#[derive(Deserialize)]
#[allow(dead_code)]
struct SSkip {
items: VecSkipErrorWrap<Item>,
}
#[derive(Debug, Deserialize)]
#[allow(dead_code)]
struct Item {
name: String,
}
const JSON: &str =
r#"{"items": [{"name": "i1"}, {"xyz": "i2"}, {"name": "i3"}, {"namra": "i4"}]}"#;
#[test]
fn skip_error() {
let res = serde_json::from_str::<SSkip>(JSON).unwrap();
insta::assert_debug_snapshot!(res.items.0, @r###"
[
Item {
name: "i1",
},
Item {
name: "i3",
},
]
"###);
}
fn test() {
let json = r#"{"items": [{"name": "i1"}, {"xyz": "i2"}, {"name": "i3"}, {"namra": "i4"}]}"#;
#[test]
fn log_error() {
let res = serde_json::from_str::<SLog>(JSON).unwrap();
let res = serde_json::from_str::<S>(json).unwrap();
insta::assert_debug_snapshot!(res, @r###"
SLog {
S {
items: [
Item {
name: "i1",

View file

@ -9,6 +9,11 @@
//!
//! This module can parse these dates using an embedded dictionary which
//! contains date/time unit tokens for all supported languages.
//!
//! Note that this module is public so it can be tested from outside
//! the crate, which is important for including new languages, too.
//!
//! It is not intended to be used to parse textual dates that are not from YouTube.
use std::ops::Mul;
@ -65,37 +70,17 @@ pub enum TimeUnit {
/// Value of a parsed TimeAgo token, used in the dictionary
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub struct TaToken {
pub(crate) struct TaToken {
pub n: u8,
pub unit: Option<TimeUnit>,
}
pub enum DateCmp {
pub(crate) enum DateCmp {
Y,
M,
D,
}
impl TimeUnit {
pub fn secs(&self) -> i64 {
match self {
TimeUnit::Second => 1,
TimeUnit::Minute => 60,
TimeUnit::Hour => 3600,
TimeUnit::Day => 24 * 3600,
TimeUnit::Week => 7 * 24 * 3600,
TimeUnit::Month => 30 * 24 * 3600,
TimeUnit::Year => 365 * 24 * 3600,
}
}
}
impl TimeAgo {
fn secs(&self) -> i64 {
i64::from(self.n) * self.unit.secs()
}
}
impl Mul<u8> for TimeAgo {
type Output = Self;
@ -109,7 +94,15 @@ impl Mul<u8> for TimeAgo {
impl From<TimeAgo> for Duration {
fn from(ta: TimeAgo) -> Self {
Duration::seconds(ta.secs())
match ta.unit {
TimeUnit::Second => Duration::seconds(ta.n as i64),
TimeUnit::Minute => Duration::minutes(ta.n as i64),
TimeUnit::Hour => Duration::hours(ta.n as i64),
TimeUnit::Day => Duration::days(ta.n as i64),
TimeUnit::Week => Duration::weeks(ta.n as i64),
TimeUnit::Month => Duration::days(ta.n as i64 * 30),
TimeUnit::Year => Duration::days(ta.n as i64 * 365),
}
}
}
@ -149,19 +142,14 @@ fn filter_str(string: &str) -> String {
.collect()
}
fn parse_ta_token(
entry: &dictionary::Entry,
by_char: bool,
nd: bool,
filtered_str: &str,
) -> Option<TimeAgo> {
fn parse_ta_token(entry: &dictionary::Entry, nd: bool, filtered_str: &str) -> Option<TimeAgo> {
let tokens = match nd {
true => &entry.timeago_nd_tokens,
false => &entry.timeago_tokens,
};
let mut qu = 1;
if by_char {
if entry.by_char {
filtered_str.chars().find_map(|word| {
tokens.get(&word.to_string()).and_then(|t| match t.unit {
Some(unit) => Some(TimeAgo { n: t.n * qu, unit }),
@ -184,78 +172,54 @@ fn parse_ta_token(
}
}
fn parse_ta_tokens(
entry: &dictionary::Entry,
by_char: bool,
nd: bool,
filtered_str: &str,
) -> Vec<TimeAgo> {
let tokens = match nd {
true => &entry.timeago_nd_tokens,
false => &entry.timeago_tokens,
};
let mut qu = 1;
if by_char {
filtered_str
.chars()
.filter_map(|word| {
tokens.get(&word.to_string()).and_then(|t| match t.unit {
Some(unit) => Some(TimeAgo { n: t.n * qu, unit }),
None => {
qu = t.n;
None
}
})
})
.collect()
fn parse_textual_month(entry: &dictionary::Entry, filtered_str: &str) -> Option<u8> {
if entry.by_char {
// Chinese/Japanese dont use textual months
None
} else {
filtered_str
.split_whitespace()
.filter_map(|word| {
tokens.get(word).and_then(|t| match t.unit {
Some(unit) => Some(TimeAgo { n: t.n * qu, unit }),
None => {
qu = t.n;
None
}
})
})
.collect()
.find_map(|word| entry.months.get(word).copied())
}
}
fn parse_textual_month(entry: &dictionary::Entry, filtered_str: &str) -> Option<u8> {
filtered_str
.split_whitespace()
.find_map(|word| entry.months.get(word).copied())
}
/// Parse a TimeAgo string (e.g. "29 minutes ago") into a TimeAgo object.
///
/// Returns [`None`] if the date could not be parsed.
/// Returns None if the date could not be parsed.
pub fn parse_timeago(lang: Language, textual_date: &str) -> Option<TimeAgo> {
let entry = dictionary::entry(lang);
let filtered_str = filter_str(textual_date);
let qu: u8 = util::parse_numeric(textual_date).unwrap_or(1);
parse_ta_token(&entry, util::lang_by_char(lang), false, &filtered_str).map(|ta| ta * qu)
parse_ta_token(&entry, false, &filtered_str).map(|ta| ta * qu)
}
/// Parse a TimeAgo string (e.g. "29 minutes ago") into a Chrono DateTime object.
///
/// Returns [`None`] if the date could not be parsed.
pub fn parse_timeago_dt(lang: Language, textual_date: &str) -> Option<OffsetDateTime> {
/// Returns None if the date could not be parsed.
pub fn parse_timeago_to_dt(lang: Language, textual_date: &str) -> Option<OffsetDateTime> {
parse_timeago(lang, textual_date).map(|ta| ta.into())
}
pub fn parse_timeago_dt_or_warn(
pub(crate) fn parse_timeago_or_warn(
lang: Language,
textual_date: &str,
warnings: &mut Vec<String>,
) -> Option<TimeAgo> {
let res = parse_timeago(lang, textual_date);
if res.is_none() {
warnings.push(format!("could not parse timeago `{textual_date}`"));
}
res
}
pub(crate) fn parse_timeago_dt_or_warn(
lang: Language,
textual_date: &str,
warnings: &mut Vec<String>,
) -> Option<OffsetDateTime> {
let res = parse_timeago_dt(lang, textual_date);
let res = parse_timeago_to_dt(lang, textual_date);
if res.is_none() {
warnings.push(format!("could not parse timeago `{textual_date}`"));
}
@ -264,20 +228,19 @@ pub fn parse_timeago_dt_or_warn(
/// Parse a textual date (e.g. "29 minutes ago" or "Jul 2, 2014") into a ParsedDate object.
///
/// Returns [`None`] if the date could not be parsed.
/// Returns None if the date could not be parsed.
pub fn parse_textual_date(lang: Language, textual_date: &str) -> Option<ParsedDate> {
let entry = dictionary::entry(lang);
let by_char = util::lang_by_char(lang);
let filtered_str = filter_str(textual_date);
let nums = util::parse_numeric_vec::<u16>(textual_date);
match nums.len() {
0 => match parse_ta_token(&entry, by_char, true, &filtered_str) {
0 => match parse_ta_token(&entry, true, &filtered_str) {
Some(timeago) => Some(ParsedDate::Relative(timeago)),
None => parse_ta_token(&entry, by_char, false, &filtered_str).map(ParsedDate::Relative),
None => parse_ta_token(&entry, false, &filtered_str).map(ParsedDate::Relative),
},
1 => parse_ta_token(&entry, by_char, false, &filtered_str)
1 => parse_ta_token(&entry, false, &filtered_str)
.map(|timeago| ParsedDate::Relative(timeago * nums[0] as u8)),
2..=3 => {
if nums.len() == entry.date_order.len() {
@ -293,8 +256,7 @@ pub fn parse_textual_date(lang: Language, textual_date: &str) -> Option<ParsedDa
DateCmp::D => d = Some(*n),
});
// Chinese/Japanese dont use textual months
if m.is_none() && !by_char {
if m.is_none() {
m = parse_textual_month(&entry, &filtered_str).map(|n| n as u16);
}
@ -320,7 +282,7 @@ pub fn parse_textual_date_to_dt(lang: Language, textual_date: &str) -> Option<Of
parse_textual_date(lang, textual_date).map(|ta| ta.into())
}
pub fn parse_textual_date_or_warn(
pub(crate) fn parse_textual_date_or_warn(
lang: Language,
textual_date: &str,
warnings: &mut Vec<String>,
@ -332,87 +294,6 @@ pub fn parse_textual_date_or_warn(
res
}
/// Parse a textual video duration (e.g. "11 minutes, 20 seconds")
///
/// Returns None if the duration could not be parsed
pub fn parse_video_duration(lang: Language, video_duration: &str) -> Option<u32> {
let entry = dictionary::entry(lang);
let by_char = util::lang_by_char(lang);
let parts = split_duration_txt(video_duration, matches!(lang, Language::Si | Language::Sw));
let mut secs = 0;
for part in parts {
let mut n = if part.digits.is_empty() {
1
} else {
part.digits.parse::<u32>().ok()?
};
let tokens = parse_ta_tokens(&entry, by_char, false, &part.word);
if tokens.is_empty() {
return None;
}
tokens.iter().for_each(|ta| {
secs += n * ta.secs() as u32;
n = 1;
});
}
Some(secs)
}
pub fn parse_video_duration_or_warn(
lang: Language,
video_duration: &str,
warnings: &mut Vec<String>,
) -> Option<u32> {
let res = parse_video_duration(lang, video_duration);
if res.is_none() {
warnings.push(format!("could not parse video duration `{video_duration}`"));
}
res
}
#[derive(Default)]
struct DurationTxtSegment {
digits: String,
word: String,
}
fn split_duration_txt(txt: &str, start_c: bool) -> Vec<DurationTxtSegment> {
let mut segments = Vec::new();
// 1: parse digits, 2: parse word
let mut state: u8 = 0;
let mut seg = DurationTxtSegment::default();
for c in txt.chars() {
if c.is_ascii_digit() {
if state == 2 && (!seg.digits.is_empty() || (!start_c && segments.is_empty())) {
segments.push(seg);
seg = DurationTxtSegment::default();
}
seg.digits.push(c);
state = 1;
} else {
if (state == 1) && (!seg.word.is_empty() || (start_c && segments.is_empty())) {
segments.push(seg);
seg = DurationTxtSegment::default();
}
if c != ',' {
c.to_lowercase().for_each(|c| seg.word.push(c));
}
state = 2;
}
}
if !seg.word.is_empty() || !seg.digits.is_empty() {
segments.push(seg);
}
segments
}
#[cfg(test)]
mod tests {
use std::{collections::BTreeMap, fs::File, io::BufReader};
@ -655,11 +536,6 @@ mod tests {
"Last updated on Jun 04, 2003",
Some(ParsedDate::Absolute(date!(2003-6-4)))
)]
#[case(
Language::Bn,
"যোগ দিয়েছেন 24 সেপ, 2013",
Some(ParsedDate::Absolute(date!(2013-9-24)))
)]
fn t_parse_date(
#[case] lang: Language,
#[case] textual_date: &str,
@ -688,7 +564,11 @@ mod tests {
assert_eq!(
parse_textual_date(*lang, samples.get("Yesterday").unwrap()),
Some(ParsedDate::Relative(TimeAgo {
n: 1,
// YT's Singhalese translation has an error (yesterday == today)
n: match lang {
Language::Si => 0,
_ => 1,
},
unit: TimeUnit::Day
})),
"lang: {lang}"
@ -696,7 +576,7 @@ mod tests {
assert_eq!(
parse_textual_date(*lang, samples.get("Ago").unwrap()),
Some(ParsedDate::Relative(TimeAgo {
n: 5,
n: 3,
unit: TimeUnit::Day
})),
"lang: {lang}"
@ -764,36 +644,6 @@ mod tests {
})
}
#[test]
fn t_parse_video_duration() {
let json_path = path!(*TESTFILES / "dict" / "video_duration_samples.json");
let json_file = File::open(json_path).unwrap();
let date_samples: BTreeMap<Language, BTreeMap<String, u32>> =
serde_json::from_reader(BufReader::new(json_file)).unwrap();
date_samples.iter().for_each(|(lang, samples)| {
samples.iter().for_each(|(txt, duration)| {
assert_eq!(
parse_video_duration(*lang, txt),
Some(*duration),
"lang: {lang}; txt: `{txt}`"
);
})
});
}
#[rstest]
#[case(Language::Ar, "19 دقيقة وثانيتان", 1142)]
#[case(Language::Ar, "دقيقة و13 ثانية", 73)]
#[case(Language::Sw, "dakika 1 na sekunde 13", 73)]
fn t_parse_video_duration2(
#[case] lang: Language,
#[case] video_duration: &str,
#[case] expect: u32,
) {
assert_eq!(parse_video_duration(lang, video_duration), Some(expect));
}
#[test]
fn t_to_datetime() {
// Absolute date

File diff suppressed because it is too large Load diff

View file

@ -2,7 +2,6 @@ mod date;
mod protobuf;
pub mod dictionary;
pub mod timeago;
pub use date::{now_sec, shift_months, shift_years};
pub use protobuf::{string_from_pb, ProtoBuilder};
@ -20,7 +19,7 @@ use rand::Rng;
use regex::Regex;
use url::Url;
use crate::{error::Error, param::Language, serializer::text::TextComponent};
use crate::{error::Error, param::Language};
pub static VIDEO_ID_REGEX: Lazy<Regex> = Lazy::new(|| Regex::new(r"^[A-Za-z0-9_-]{11}$").unwrap());
pub static CHANNEL_ID_REGEX: Lazy<Regex> =
@ -35,6 +34,8 @@ pub static VANITY_PATH_REGEX: Lazy<Regex> = Lazy::new(|| {
/// Separator string for YouTube Music subtitles
pub const DOT_SEPARATOR: &str = "";
/// YouTube Music name (author of official playlists)
pub const YT_MUSIC_NAME: &str = "YouTube Music";
pub const VARIOUS_ARTISTS: &str = "Various Artists";
pub const PLAYLIST_ID_ALBUM_PREFIX: &str = "OLAK";
@ -142,7 +143,7 @@ where
/// and return the duration in seconds.
pub fn parse_video_length(text: &str) -> Option<u32> {
static VIDEO_LENGTH_REGEX: Lazy<Regex> =
Lazy::new(|| Regex::new(r#"(?:(\d+)[:.])?(\d{1,2})[:.](\d{2})"#).unwrap());
Lazy::new(|| Regex::new(r#"(?:(\d+):)?(\d{1,2}):(\d{2})"#).unwrap());
VIDEO_LENGTH_REGEX.captures(text).map(|cap| {
let hrs = cap
.get(1)
@ -192,43 +193,40 @@ pub fn retry_delay(
/// Also strips google analytics tracking parameters
/// (`utm_source`, `utm_medium`, `utm_campaign`, `utm_content`) because google analytics is bad.
pub fn sanitize_yt_url(url: &str) -> String {
fn sanitize_yt_url_inner(url: &str) -> Option<String> {
let mut parsed_url = Url::parse(url).ok()?;
let mut parsed_url = ok_or_bail!(Url::parse(url), url.to_owned());
// Convert redirect url
if parsed_url.host_str().unwrap_or_default() == "www.youtube.com"
&& parsed_url.path() == "/redirect"
{
if let Some((_, url)) = parsed_url.query_pairs().find(|(k, _)| k == "q") {
parsed_url = Url::parse(url.as_ref()).ok()?;
}
// Convert redirect url
if parsed_url.host_str().unwrap_or_default() == "www.youtube.com"
&& parsed_url.path() == "/redirect"
{
if let Some((_, url)) = parsed_url.query_pairs().find(|(k, _)| k == "q") {
parsed_url = ok_or_bail!(Url::parse(url.as_ref()), url.to_string());
}
// Remove GA tracking params
if parsed_url.query().is_some() {
let params = parsed_url
.query_pairs()
.filter_map(|(k, v)| match k.borrow() {
"utm_source" | "utm_medium" | "utm_campaign" | "utm_content" => None,
_ => Some((k.to_string(), v.to_string())),
})
.collect::<Vec<_>>();
// Set empty query string if there are no parameters to prevent urls from ending with /?
if params.is_empty() {
parsed_url.set_query(None);
} else {
parsed_url
.query_pairs_mut()
.clear()
.extend_pairs(params)
.finish();
}
}
Some(parsed_url.to_string())
}
sanitize_yt_url_inner(url).unwrap_or_else(|| url.to_string())
// Remove GA tracking params
if parsed_url.query().is_some() {
let params = parsed_url
.query_pairs()
.filter_map(|(k, v)| match k.borrow() {
"utm_source" | "utm_medium" | "utm_campaign" | "utm_content" => None,
_ => Some((k.to_string(), v.to_string())),
})
.collect::<Vec<_>>();
// Set empty query string if there are no parameters to prevent urls from ending with /?
if params.is_empty() {
parsed_url.set_query(None);
} else {
parsed_url
.query_pairs_mut()
.clear()
.extend_pairs(params)
.finish();
}
}
parsed_url.to_string()
}
pub trait TryRemove<T> {
@ -271,114 +269,63 @@ impl<T> TryRemove<T> for Vec<T> {
}
}
/// Check if a channel name equals "YouTube Music"
/// (the author of original YouTube music playlists)
pub(crate) fn is_ytm(text: &TextComponent) -> bool {
if let TextComponent::Text { text } = text {
text.starts_with("YouTube")
} else {
false
}
}
/// Check if a language should be parsed by character
pub fn lang_by_char(lang: Language) -> bool {
matches!(
lang,
Language::Ja | Language::ZhCn | Language::ZhHk | Language::ZhTw
)
}
/// Parse a large, textual number (e.g. `1.4M subscribers`, `22K views`)
pub fn parse_large_numstr<F>(string: &str, lang: Language) -> Option<F>
where
F: TryFrom<u64>,
{
// Special case for Gujarati: the "no views" text does not contain
// any parseable tokens: the 2 words occur in any view count text.
// This may be a translation error.
if lang == Language::Gu && string == "જોવાયાની સંખ્યા" {
return 0.try_into().ok();
}
let dict_entry = dictionary::entry(lang);
let by_char = lang_by_char(lang) || lang == Language::Ko;
let decimal_point = match dict_entry.comma_decimal {
true => ',',
false => '.',
};
let mut digits = String::new();
let mut filtered = String::new();
let mut exp = 0;
let mut after_point = false;
let (num, mut exp, filtered) = {
let mut buf = String::new();
let mut filtered = String::new();
let mut exp = 0;
let mut after_point = false;
for c in string.chars() {
if c.is_ascii_digit() {
buf.push(c);
for c in string.chars() {
if c.is_ascii_digit() {
digits.push(c);
if after_point {
exp -= 1;
if after_point {
exp -= 1;
}
} else if c == decimal_point {
after_point = true;
} else if !matches!(c, '\u{200b}' | '.' | ',') {
filtered.push(c);
}
} else if c == decimal_point {
after_point = true;
} else if !matches!(
c,
'\u{200b}' | '\u{202b}' | '\u{202c}' | '\u{202e}' | '\u{200e}' | '\u{200f}' | '.' | ','
) {
c.to_lowercase().for_each(|c| filtered.push(c));
}
}
(ok_or_bail!(buf.parse::<u64>(), None), exp, filtered)
};
if digits.is_empty() {
if by_char {
filtered
.chars()
.find_map(|c| dict_entry.number_nd_tokens.get(&c.to_string()))
.and_then(|n| (*n as u64).try_into().ok())
} else {
filtered
.split_whitespace()
.find_map(|token| dict_entry.number_nd_tokens.get(token))
.and_then(|n| (*n as u64).try_into().ok())
}
let lookup_token = |token: &str| match token {
"K" | "k" => Some(3),
_ => dict_entry.number_tokens.get(token).map(|t| *t as i32),
};
if dict_entry.by_char {
exp += filtered
.chars()
.filter_map(|token| lookup_token(&token.to_string()))
.sum::<i32>();
} else {
let num = digits.parse::<u64>().ok()?;
let lookup_token = |token: &str| match token {
"k" => Some(3),
_ => dict_entry.number_tokens.get(token).map(|t| *t as i32),
};
if by_char {
exp += filtered
.chars()
.filter_map(|token| lookup_token(&token.to_string()))
.sum::<i32>();
} else {
exp += filtered
.split_whitespace()
.filter_map(lookup_token)
.sum::<i32>();
}
F::try_from(num.checked_mul((10_u64).checked_pow(exp.try_into().ok()?)?)?).ok()
exp += filtered
.split_whitespace()
.filter_map(lookup_token)
.sum::<i32>();
}
}
pub fn parse_large_numstr_or_warn<F>(
string: &str,
lang: Language,
warnings: &mut Vec<String>,
) -> Option<F>
where
F: TryFrom<u64>,
{
let res = parse_large_numstr::<F>(string, lang);
if res.is_none() {
warnings.push(format!("could not parse numstr `{string}`"));
}
res
F::try_from(some_or_bail!(
num.checked_mul(some_or_bail!(
(10_u64).checked_pow(ok_or_bail!(exp.try_into(), None)),
None
)),
None
))
.ok()
}
/// Replace all html control characters to make a string safe for inserting into HTML.
@ -505,22 +452,24 @@ pub(crate) mod tests {
assert_eq!(res, expect);
}
#[rstest]
#[case(
Language::Iw,
"\u{200f}\u{202b}3.36M\u{200f}\u{202c}\u{200f} \u{200f}מנויים\u{200f}",
3_360_000
)]
#[case(Language::As, "১ জন গ্ৰাহক", 1)]
fn t_parse_large_numstr(#[case] lang: Language, #[case] string: &str, #[case] expect: u64) {
let res = parse_large_numstr::<u64>(string, lang).unwrap();
assert_eq!(res, expect);
}
#[test]
fn t_parse_large_numstr_samples() {
let json_path = path!(*TESTFILES / "dict" / "large_number_samples.json");
let json_file = File::open(json_path).unwrap();
let number_samples: BTreeMap<Language, BTreeMap<u8, (String, u64)>> =
serde_json::from_reader(BufReader::new(json_file)).unwrap();
number_samples.iter().for_each(|(lang, entry)| {
entry.iter().for_each(|(_, (txt, expect))| {
testcase_parse_large_numstr(txt, *lang, *expect);
});
});
}
#[test]
fn t_parse_large_numstr_samples2() {
let json_path = path!(*TESTFILES / "dict" / "large_number_samples_all.json");
let json_file = File::open(json_path).unwrap();
let number_samples: BTreeMap<Language, BTreeMap<String, u64>> =
serde_json::from_reader(BufReader::new(json_file)).unwrap();
@ -536,18 +485,12 @@ pub(crate) mod tests {
// in the string.
let rounded = {
let n_significant_d = string.chars().filter(char::is_ascii_digit).count();
if n_significant_d == 0 {
expect
} else {
let mag = (expect as f64).log10().floor();
let factor = 10_u64.pow(1 + mag as u32 - n_significant_d as u32);
(((expect as f64) / factor as f64).floor() as u64) * factor
}
let mag = (expect as f64).log10().floor();
let factor = 10_u64.pow(1 + mag as u32 - n_significant_d as u32);
(((expect as f64) / factor as f64).floor() as u64) * factor
};
let emsg = format!("{string} (lang: {lang}, exact: {expect})");
let res = parse_large_numstr::<u64>(string, lang).expect(&emsg);
assert_eq!(res, rounded, "{emsg}");
let res = parse_large_numstr::<u64>(string, lang).expect(string);
assert_eq!(res, rounded, "{string} (lang: {lang}, exact: {expect})");
}
}

View file

@ -98,11 +98,11 @@ pub fn string_from_pb<P: IntoIterator<Item = u8>>(pb: P, field: u32) -> Option<S
5 => 4,
// string
2 => {
let len = parse_varint(&mut pb)?;
let len = some_or_bail!(parse_varint(&mut pb), None);
if this_field == field {
let mut buf = Vec::new();
for _ in 0..len {
buf.push(pb.next()?);
buf.push(some_or_bail!(pb.next(), None));
}
return String::from_utf8(buf).ok();
} else {

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,5 @@
// This code is used to test the deobfuscation javascript extraction.
// Since YouTube's player code is copyrighted, I can't just copy-paste it
// Since YouTube's player code is copyrighted, I can just copy-paste it
// into my project.
/*

View file

@ -0,0 +1,902 @@
{
"supplemental": {
"version": {
"_unicodeVersion": "13.0.0",
"_cldrVersion": "37"
},
"plurals-type-cardinal": {
"af": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ak": {
"pluralRule-count-one": "n = 0..1 @integer 0, 1 @decimal 0.0, 1.0, 0.00, 1.00, 0.000, 1.000, 0.0000, 1.0000",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"am": {
"pluralRule-count-one": "i = 0 or n = 1 @integer 0, 1 @decimal 0.0~1.0, 0.00~0.04",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 1.1~2.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"an": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ar": {
"pluralRule-count-zero": "n = 0 @integer 0 @decimal 0.0, 0.00, 0.000, 0.0000",
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-two": "n = 2 @integer 2 @decimal 2.0, 2.00, 2.000, 2.0000",
"pluralRule-count-few": "n % 100 = 3..10 @integer 3~10, 103~110, 1003, … @decimal 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 103.0, 1003.0, …",
"pluralRule-count-many": "n % 100 = 11..99 @integer 11~26, 111, 1011, … @decimal 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 111.0, 1011.0, …",
"pluralRule-count-other": " @integer 100~102, 200~202, 300~302, 400~402, 500~502, 600, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.1, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ars": {
"pluralRule-count-zero": "n = 0 @integer 0 @decimal 0.0, 0.00, 0.000, 0.0000",
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-two": "n = 2 @integer 2 @decimal 2.0, 2.00, 2.000, 2.0000",
"pluralRule-count-few": "n % 100 = 3..10 @integer 3~10, 103~110, 1003, … @decimal 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 103.0, 1003.0, …",
"pluralRule-count-many": "n % 100 = 11..99 @integer 11~26, 111, 1011, … @decimal 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 111.0, 1011.0, …",
"pluralRule-count-other": " @integer 100~102, 200~202, 300~302, 400~402, 500~502, 600, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.1, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"as": {
"pluralRule-count-one": "i = 0 or n = 1 @integer 0, 1 @decimal 0.0~1.0, 0.00~0.04",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 1.1~2.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"asa": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ast": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"az": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"be": {
"pluralRule-count-one": "n % 10 = 1 and n % 100 != 11 @integer 1, 21, 31, 41, 51, 61, 71, 81, 101, 1001, … @decimal 1.0, 21.0, 31.0, 41.0, 51.0, 61.0, 71.0, 81.0, 101.0, 1001.0, …",
"pluralRule-count-few": "n % 10 = 2..4 and n % 100 != 12..14 @integer 2~4, 22~24, 32~34, 42~44, 52~54, 62, 102, 1002, … @decimal 2.0, 3.0, 4.0, 22.0, 23.0, 24.0, 32.0, 33.0, 102.0, 1002.0, …",
"pluralRule-count-many": "n % 10 = 0 or n % 10 = 5..9 or n % 100 = 11..14 @integer 0, 5~19, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …",
"pluralRule-count-other": " @decimal 0.1~0.9, 1.1~1.7, 10.1, 100.1, 1000.1, …"
},
"bem": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"bez": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"bg": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"bho": {
"pluralRule-count-one": "n = 0..1 @integer 0, 1 @decimal 0.0, 1.0, 0.00, 1.00, 0.000, 1.000, 0.0000, 1.0000",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"bm": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"bn": {
"pluralRule-count-one": "i = 0 or n = 1 @integer 0, 1 @decimal 0.0~1.0, 0.00~0.04",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 1.1~2.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"bo": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"br": {
"pluralRule-count-one": "n % 10 = 1 and n % 100 != 11,71,91 @integer 1, 21, 31, 41, 51, 61, 81, 101, 1001, … @decimal 1.0, 21.0, 31.0, 41.0, 51.0, 61.0, 81.0, 101.0, 1001.0, …",
"pluralRule-count-two": "n % 10 = 2 and n % 100 != 12,72,92 @integer 2, 22, 32, 42, 52, 62, 82, 102, 1002, … @decimal 2.0, 22.0, 32.0, 42.0, 52.0, 62.0, 82.0, 102.0, 1002.0, …",
"pluralRule-count-few": "n % 10 = 3..4,9 and n % 100 != 10..19,70..79,90..99 @integer 3, 4, 9, 23, 24, 29, 33, 34, 39, 43, 44, 49, 103, 1003, … @decimal 3.0, 4.0, 9.0, 23.0, 24.0, 29.0, 33.0, 34.0, 103.0, 1003.0, …",
"pluralRule-count-many": "n != 0 and n % 1000000 = 0 @integer 1000000, … @decimal 1000000.0, 1000000.00, 1000000.000, …",
"pluralRule-count-other": " @integer 0, 5~8, 10~20, 100, 1000, 10000, 100000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, …"
},
"brx": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"bs": {
"pluralRule-count-one": "v = 0 and i % 10 = 1 and i % 100 != 11 or f % 10 = 1 and f % 100 != 11 @integer 1, 21, 31, 41, 51, 61, 71, 81, 101, 1001, … @decimal 0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1, 10.1, 100.1, 1000.1, …",
"pluralRule-count-few": "v = 0 and i % 10 = 2..4 and i % 100 != 12..14 or f % 10 = 2..4 and f % 100 != 12..14 @integer 2~4, 22~24, 32~34, 42~44, 52~54, 62, 102, 1002, … @decimal 0.2~0.4, 1.2~1.4, 2.2~2.4, 3.2~3.4, 4.2~4.4, 5.2, 10.2, 100.2, 1000.2, …",
"pluralRule-count-other": " @integer 0, 5~19, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0, 0.5~1.0, 1.5~2.0, 2.5~2.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ca": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ce": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ceb": {
"pluralRule-count-one": "v = 0 and i = 1,2,3 or v = 0 and i % 10 != 4,6,9 or v != 0 and f % 10 != 4,6,9 @integer 0~3, 5, 7, 8, 10~13, 15, 17, 18, 20, 21, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.3, 0.5, 0.7, 0.8, 1.0~1.3, 1.5, 1.7, 1.8, 2.0, 2.1, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …",
"pluralRule-count-other": " @integer 4, 6, 9, 14, 16, 19, 24, 26, 104, 1004, … @decimal 0.4, 0.6, 0.9, 1.4, 1.6, 1.9, 2.4, 2.6, 10.4, 100.4, 1000.4, …"
},
"cgg": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"chr": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ckb": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"cs": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-few": "i = 2..4 and v = 0 @integer 2~4",
"pluralRule-count-many": "v != 0 @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …",
"pluralRule-count-other": " @integer 0, 5~19, 100, 1000, 10000, 100000, 1000000, …"
},
"cy": {
"pluralRule-count-zero": "n = 0 @integer 0 @decimal 0.0, 0.00, 0.000, 0.0000",
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-two": "n = 2 @integer 2 @decimal 2.0, 2.00, 2.000, 2.0000",
"pluralRule-count-few": "n = 3 @integer 3 @decimal 3.0, 3.00, 3.000, 3.0000",
"pluralRule-count-many": "n = 6 @integer 6 @decimal 6.0, 6.00, 6.000, 6.0000",
"pluralRule-count-other": " @integer 4, 5, 7~20, 100, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"da": {
"pluralRule-count-one": "n = 1 or t != 0 and i = 0,1 @integer 1 @decimal 0.1~1.6",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0, 2.0~3.4, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"de": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"dsb": {
"pluralRule-count-one": "v = 0 and i % 100 = 1 or f % 100 = 1 @integer 1, 101, 201, 301, 401, 501, 601, 701, 1001, … @decimal 0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1, 10.1, 100.1, 1000.1, …",
"pluralRule-count-two": "v = 0 and i % 100 = 2 or f % 100 = 2 @integer 2, 102, 202, 302, 402, 502, 602, 702, 1002, … @decimal 0.2, 1.2, 2.2, 3.2, 4.2, 5.2, 6.2, 7.2, 10.2, 100.2, 1000.2, …",
"pluralRule-count-few": "v = 0 and i % 100 = 3..4 or f % 100 = 3..4 @integer 3, 4, 103, 104, 203, 204, 303, 304, 403, 404, 503, 504, 603, 604, 703, 704, 1003, … @decimal 0.3, 0.4, 1.3, 1.4, 2.3, 2.4, 3.3, 3.4, 4.3, 4.4, 5.3, 5.4, 6.3, 6.4, 7.3, 7.4, 10.3, 100.3, 1000.3, …",
"pluralRule-count-other": " @integer 0, 5~19, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0, 0.5~1.0, 1.5~2.0, 2.5~2.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"dv": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"dz": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ee": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"el": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"en": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"eo": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"es": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"et": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"eu": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"fa": {
"pluralRule-count-one": "i = 0 or n = 1 @integer 0, 1 @decimal 0.0~1.0, 0.00~0.04",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 1.1~2.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ff": {
"pluralRule-count-one": "i = 0,1 @integer 0, 1 @decimal 0.0~1.5",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 2.0~3.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"fi": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"fil": {
"pluralRule-count-one": "v = 0 and i = 1,2,3 or v = 0 and i % 10 != 4,6,9 or v != 0 and f % 10 != 4,6,9 @integer 0~3, 5, 7, 8, 10~13, 15, 17, 18, 20, 21, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.3, 0.5, 0.7, 0.8, 1.0~1.3, 1.5, 1.7, 1.8, 2.0, 2.1, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …",
"pluralRule-count-other": " @integer 4, 6, 9, 14, 16, 19, 24, 26, 104, 1004, … @decimal 0.4, 0.6, 0.9, 1.4, 1.6, 1.9, 2.4, 2.6, 10.4, 100.4, 1000.4, …"
},
"fo": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"fr": {
"pluralRule-count-one": "i = 0,1 @integer 0, 1 @decimal 0.0~1.5",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 2.0~3.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"fur": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"fy": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ga": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-two": "n = 2 @integer 2 @decimal 2.0, 2.00, 2.000, 2.0000",
"pluralRule-count-few": "n = 3..6 @integer 3~6 @decimal 3.0, 4.0, 5.0, 6.0, 3.00, 4.00, 5.00, 6.00, 3.000, 4.000, 5.000, 6.000, 3.0000, 4.0000, 5.0000, 6.0000",
"pluralRule-count-many": "n = 7..10 @integer 7~10 @decimal 7.0, 8.0, 9.0, 10.0, 7.00, 8.00, 9.00, 10.00, 7.000, 8.000, 9.000, 10.000, 7.0000, 8.0000, 9.0000, 10.0000",
"pluralRule-count-other": " @integer 0, 11~25, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.1, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"gd": {
"pluralRule-count-one": "n = 1,11 @integer 1, 11 @decimal 1.0, 11.0, 1.00, 11.00, 1.000, 11.000, 1.0000",
"pluralRule-count-two": "n = 2,12 @integer 2, 12 @decimal 2.0, 12.0, 2.00, 12.00, 2.000, 12.000, 2.0000",
"pluralRule-count-few": "n = 3..10,13..19 @integer 3~10, 13~19 @decimal 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 3.00",
"pluralRule-count-other": " @integer 0, 20~34, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.1, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"gl": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"gsw": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"gu": {
"pluralRule-count-one": "i = 0 or n = 1 @integer 0, 1 @decimal 0.0~1.0, 0.00~0.04",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 1.1~2.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"guw": {
"pluralRule-count-one": "n = 0..1 @integer 0, 1 @decimal 0.0, 1.0, 0.00, 1.00, 0.000, 1.000, 0.0000, 1.0000",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"gv": {
"pluralRule-count-one": "v = 0 and i % 10 = 1 @integer 1, 11, 21, 31, 41, 51, 61, 71, 101, 1001, …",
"pluralRule-count-two": "v = 0 and i % 10 = 2 @integer 2, 12, 22, 32, 42, 52, 62, 72, 102, 1002, …",
"pluralRule-count-few": "v = 0 and i % 100 = 0,20,40,60,80 @integer 0, 20, 40, 60, 80, 100, 120, 140, 1000, 10000, 100000, 1000000, …",
"pluralRule-count-many": "v != 0 @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …",
"pluralRule-count-other": " @integer 3~10, 13~19, 23, 103, 1003, …"
},
"ha": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"haw": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"he": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-two": "i = 2 and v = 0 @integer 2",
"pluralRule-count-many": "v = 0 and n != 0..10 and n % 10 = 0 @integer 20, 30, 40, 50, 60, 70, 80, 90, 100, 1000, 10000, 100000, 1000000, …",
"pluralRule-count-other": " @integer 0, 3~17, 101, 1001, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"hi": {
"pluralRule-count-one": "i = 0 or n = 1 @integer 0, 1 @decimal 0.0~1.0, 0.00~0.04",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 1.1~2.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"hr": {
"pluralRule-count-one": "v = 0 and i % 10 = 1 and i % 100 != 11 or f % 10 = 1 and f % 100 != 11 @integer 1, 21, 31, 41, 51, 61, 71, 81, 101, 1001, … @decimal 0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1, 10.1, 100.1, 1000.1, …",
"pluralRule-count-few": "v = 0 and i % 10 = 2..4 and i % 100 != 12..14 or f % 10 = 2..4 and f % 100 != 12..14 @integer 2~4, 22~24, 32~34, 42~44, 52~54, 62, 102, 1002, … @decimal 0.2~0.4, 1.2~1.4, 2.2~2.4, 3.2~3.4, 4.2~4.4, 5.2, 10.2, 100.2, 1000.2, …",
"pluralRule-count-other": " @integer 0, 5~19, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0, 0.5~1.0, 1.5~2.0, 2.5~2.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"hsb": {
"pluralRule-count-one": "v = 0 and i % 100 = 1 or f % 100 = 1 @integer 1, 101, 201, 301, 401, 501, 601, 701, 1001, … @decimal 0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1, 10.1, 100.1, 1000.1, …",
"pluralRule-count-two": "v = 0 and i % 100 = 2 or f % 100 = 2 @integer 2, 102, 202, 302, 402, 502, 602, 702, 1002, … @decimal 0.2, 1.2, 2.2, 3.2, 4.2, 5.2, 6.2, 7.2, 10.2, 100.2, 1000.2, …",
"pluralRule-count-few": "v = 0 and i % 100 = 3..4 or f % 100 = 3..4 @integer 3, 4, 103, 104, 203, 204, 303, 304, 403, 404, 503, 504, 603, 604, 703, 704, 1003, … @decimal 0.3, 0.4, 1.3, 1.4, 2.3, 2.4, 3.3, 3.4, 4.3, 4.4, 5.3, 5.4, 6.3, 6.4, 7.3, 7.4, 10.3, 100.3, 1000.3, …",
"pluralRule-count-other": " @integer 0, 5~19, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0, 0.5~1.0, 1.5~2.0, 2.5~2.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"hu": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"hy": {
"pluralRule-count-one": "i = 0,1 @integer 0, 1 @decimal 0.0~1.5",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 2.0~3.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ia": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"id": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ig": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ii": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"in": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"io": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"is": {
"pluralRule-count-one": "t = 0 and i % 10 = 1 and i % 100 != 11 or t != 0 @integer 1, 21, 31, 41, 51, 61, 71, 81, 101, 1001, … @decimal 0.1~1.6, 10.1, 100.1, 1000.1, …",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"it": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"iu": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-two": "n = 2 @integer 2 @decimal 2.0, 2.00, 2.000, 2.0000",
"pluralRule-count-other": " @integer 0, 3~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"iw": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-two": "i = 2 and v = 0 @integer 2",
"pluralRule-count-many": "v = 0 and n != 0..10 and n % 10 = 0 @integer 20, 30, 40, 50, 60, 70, 80, 90, 100, 1000, 10000, 100000, 1000000, …",
"pluralRule-count-other": " @integer 0, 3~17, 101, 1001, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ja": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"jbo": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"jgo": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ji": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"jmc": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"jv": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"jw": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ka": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"kab": {
"pluralRule-count-one": "i = 0,1 @integer 0, 1 @decimal 0.0~1.5",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 2.0~3.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"kaj": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"kcg": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"kde": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"kea": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"kk": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"kkj": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"kl": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"km": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"kn": {
"pluralRule-count-one": "i = 0 or n = 1 @integer 0, 1 @decimal 0.0~1.0, 0.00~0.04",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 1.1~2.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ko": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ks": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ksb": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ksh": {
"pluralRule-count-zero": "n = 0 @integer 0 @decimal 0.0, 0.00, 0.000, 0.0000",
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ku": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"kw": {
"pluralRule-count-zero": "n = 0 @integer 0 @decimal 0.0, 0.00, 0.000, 0.0000",
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-two": "n % 100 = 2,22,42,62,82 or n % 1000 = 0 and n % 100000 = 1000..20000,40000,60000,80000 or n != 0 and n % 1000000 = 100000 @integer 2, 22, 42, 62, 82, 102, 122, 142, 1000, 10000, 100000, … @decimal 2.0, 22.0, 42.0, 62.0, 82.0, 102.0, 122.0, 142.0, 1000.0, 10000.0, 100000.0, …",
"pluralRule-count-few": "n % 100 = 3,23,43,63,83 @integer 3, 23, 43, 63, 83, 103, 123, 143, 1003, … @decimal 3.0, 23.0, 43.0, 63.0, 83.0, 103.0, 123.0, 143.0, 1003.0, …",
"pluralRule-count-many": "n != 1 and n % 100 = 1,21,41,61,81 @integer 21, 41, 61, 81, 101, 121, 141, 161, 1001, … @decimal 21.0, 41.0, 61.0, 81.0, 101.0, 121.0, 141.0, 161.0, 1001.0, …",
"pluralRule-count-other": " @integer 4~19, 100, 1004, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.0, 100.0, 1000.1, 1000000.0, …"
},
"ky": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"lag": {
"pluralRule-count-zero": "n = 0 @integer 0 @decimal 0.0, 0.00, 0.000, 0.0000",
"pluralRule-count-one": "i = 0,1 and n != 0 @integer 1 @decimal 0.1~1.6",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 2.0~3.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"lb": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"lg": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"lkt": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ln": {
"pluralRule-count-one": "n = 0..1 @integer 0, 1 @decimal 0.0, 1.0, 0.00, 1.00, 0.000, 1.000, 0.0000, 1.0000",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"lo": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"lt": {
"pluralRule-count-one": "n % 10 = 1 and n % 100 != 11..19 @integer 1, 21, 31, 41, 51, 61, 71, 81, 101, 1001, … @decimal 1.0, 21.0, 31.0, 41.0, 51.0, 61.0, 71.0, 81.0, 101.0, 1001.0, …",
"pluralRule-count-few": "n % 10 = 2..9 and n % 100 != 11..19 @integer 2~9, 22~29, 102, 1002, … @decimal 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 22.0, 102.0, 1002.0, …",
"pluralRule-count-many": "f != 0 @decimal 0.1~0.9, 1.1~1.7, 10.1, 100.1, 1000.1, …",
"pluralRule-count-other": " @integer 0, 10~20, 30, 40, 50, 60, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"lv": {
"pluralRule-count-zero": "n % 10 = 0 or n % 100 = 11..19 or v = 2 and f % 100 = 11..19 @integer 0, 10~20, 30, 40, 50, 60, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …",
"pluralRule-count-one": "n % 10 = 1 and n % 100 != 11 or v = 2 and f % 10 = 1 and f % 100 != 11 or v != 2 and f % 10 = 1 @integer 1, 21, 31, 41, 51, 61, 71, 81, 101, 1001, … @decimal 0.1, 1.0, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1, 10.1, 100.1, 1000.1, …",
"pluralRule-count-other": " @integer 2~9, 22~29, 102, 1002, … @decimal 0.2~0.9, 1.2~1.9, 10.2, 100.2, 1000.2, …"
},
"mas": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"mg": {
"pluralRule-count-one": "n = 0..1 @integer 0, 1 @decimal 0.0, 1.0, 0.00, 1.00, 0.000, 1.000, 0.0000, 1.0000",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"mgo": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"mk": {
"pluralRule-count-one": "v = 0 and i % 10 = 1 and i % 100 != 11 or f % 10 = 1 and f % 100 != 11 @integer 1, 21, 31, 41, 51, 61, 71, 81, 101, 1001, … @decimal 0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1, 10.1, 100.1, 1000.1, …",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0, 0.2~1.0, 1.2~1.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ml": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"mn": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"mo": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-few": "v != 0 or n = 0 or n % 100 = 2..19 @integer 0, 2~16, 102, 1002, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …",
"pluralRule-count-other": " @integer 20~35, 100, 1000, 10000, 100000, 1000000, …"
},
"mr": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ms": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"mt": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-few": "n = 0 or n % 100 = 2..10 @integer 0, 2~10, 102~107, 1002, … @decimal 0.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 10.0, 102.0, 1002.0, …",
"pluralRule-count-many": "n % 100 = 11..19 @integer 11~19, 111~117, 1011, … @decimal 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 111.0, 1011.0, …",
"pluralRule-count-other": " @integer 20~35, 100, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.1, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"my": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"nah": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"naq": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-two": "n = 2 @integer 2 @decimal 2.0, 2.00, 2.000, 2.0000",
"pluralRule-count-other": " @integer 0, 3~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"nb": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"nd": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ne": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"nl": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"nn": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"nnh": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"no": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"nqo": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"nr": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"nso": {
"pluralRule-count-one": "n = 0..1 @integer 0, 1 @decimal 0.0, 1.0, 0.00, 1.00, 0.000, 1.000, 0.0000, 1.0000",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ny": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"nyn": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"om": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"or": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"os": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"osa": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"pa": {
"pluralRule-count-one": "n = 0..1 @integer 0, 1 @decimal 0.0, 1.0, 0.00, 1.00, 0.000, 1.000, 0.0000, 1.0000",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"pap": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"pcm": {
"pluralRule-count-one": "i = 0 or n = 1 @integer 0, 1 @decimal 0.0~1.0, 0.00~0.04",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 1.1~2.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"pl": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-few": "v = 0 and i % 10 = 2..4 and i % 100 != 12..14 @integer 2~4, 22~24, 32~34, 42~44, 52~54, 62, 102, 1002, …",
"pluralRule-count-many": "v = 0 and i != 1 and i % 10 = 0..1 or v = 0 and i % 10 = 5..9 or v = 0 and i % 100 = 12..14 @integer 0, 5~19, 100, 1000, 10000, 100000, 1000000, …",
"pluralRule-count-other": " @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"prg": {
"pluralRule-count-zero": "n % 10 = 0 or n % 100 = 11..19 or v = 2 and f % 100 = 11..19 @integer 0, 10~20, 30, 40, 50, 60, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …",
"pluralRule-count-one": "n % 10 = 1 and n % 100 != 11 or v = 2 and f % 10 = 1 and f % 100 != 11 or v != 2 and f % 10 = 1 @integer 1, 21, 31, 41, 51, 61, 71, 81, 101, 1001, … @decimal 0.1, 1.0, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1, 10.1, 100.1, 1000.1, …",
"pluralRule-count-other": " @integer 2~9, 22~29, 102, 1002, … @decimal 0.2~0.9, 1.2~1.9, 10.2, 100.2, 1000.2, …"
},
"ps": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"pt": {
"pluralRule-count-one": "i = 0..1 @integer 0, 1 @decimal 0.0~1.5",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 2.0~3.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"pt-PT": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"rm": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ro": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-few": "v != 0 or n = 0 or n % 100 = 2..19 @integer 0, 2~16, 102, 1002, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …",
"pluralRule-count-other": " @integer 20~35, 100, 1000, 10000, 100000, 1000000, …"
},
"rof": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"root": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ru": {
"pluralRule-count-one": "v = 0 and i % 10 = 1 and i % 100 != 11 @integer 1, 21, 31, 41, 51, 61, 71, 81, 101, 1001, …",
"pluralRule-count-few": "v = 0 and i % 10 = 2..4 and i % 100 != 12..14 @integer 2~4, 22~24, 32~34, 42~44, 52~54, 62, 102, 1002, …",
"pluralRule-count-many": "v = 0 and i % 10 = 0 or v = 0 and i % 10 = 5..9 or v = 0 and i % 100 = 11..14 @integer 0, 5~19, 100, 1000, 10000, 100000, 1000000, …",
"pluralRule-count-other": " @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"rwk": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"sah": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"saq": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"sat": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-two": "n = 2 @integer 2 @decimal 2.0, 2.00, 2.000, 2.0000",
"pluralRule-count-other": " @integer 0, 3~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"sc": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"scn": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"sd": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"sdh": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"se": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-two": "n = 2 @integer 2 @decimal 2.0, 2.00, 2.000, 2.0000",
"pluralRule-count-other": " @integer 0, 3~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"seh": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ses": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"sg": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"sh": {
"pluralRule-count-one": "v = 0 and i % 10 = 1 and i % 100 != 11 or f % 10 = 1 and f % 100 != 11 @integer 1, 21, 31, 41, 51, 61, 71, 81, 101, 1001, … @decimal 0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1, 10.1, 100.1, 1000.1, …",
"pluralRule-count-few": "v = 0 and i % 10 = 2..4 and i % 100 != 12..14 or f % 10 = 2..4 and f % 100 != 12..14 @integer 2~4, 22~24, 32~34, 42~44, 52~54, 62, 102, 1002, … @decimal 0.2~0.4, 1.2~1.4, 2.2~2.4, 3.2~3.4, 4.2~4.4, 5.2, 10.2, 100.2, 1000.2, …",
"pluralRule-count-other": " @integer 0, 5~19, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0, 0.5~1.0, 1.5~2.0, 2.5~2.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"shi": {
"pluralRule-count-one": "i = 0 or n = 1 @integer 0, 1 @decimal 0.0~1.0, 0.00~0.04",
"pluralRule-count-few": "n = 2..10 @integer 2~10 @decimal 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 2.00, 3.00, 4.00, 5.00, 6.00, 7.00, 8.00",
"pluralRule-count-other": " @integer 11~26, 100, 1000, 10000, 100000, 1000000, … @decimal 1.1~1.9, 2.1~2.7, 10.1, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"si": {
"pluralRule-count-one": "n = 0,1 or i = 0 and f = 1 @integer 0, 1 @decimal 0.0, 0.1, 1.0, 0.00, 0.01, 1.00, 0.000, 0.001, 1.000, 0.0000, 0.0001, 1.0000",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.2~0.9, 1.1~1.8, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"sk": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-few": "i = 2..4 and v = 0 @integer 2~4",
"pluralRule-count-many": "v != 0 @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …",
"pluralRule-count-other": " @integer 0, 5~19, 100, 1000, 10000, 100000, 1000000, …"
},
"sl": {
"pluralRule-count-one": "v = 0 and i % 100 = 1 @integer 1, 101, 201, 301, 401, 501, 601, 701, 1001, …",
"pluralRule-count-two": "v = 0 and i % 100 = 2 @integer 2, 102, 202, 302, 402, 502, 602, 702, 1002, …",
"pluralRule-count-few": "v = 0 and i % 100 = 3..4 or v != 0 @integer 3, 4, 103, 104, 203, 204, 303, 304, 403, 404, 503, 504, 603, 604, 703, 704, 1003, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …",
"pluralRule-count-other": " @integer 0, 5~19, 100, 1000, 10000, 100000, 1000000, …"
},
"sma": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-two": "n = 2 @integer 2 @decimal 2.0, 2.00, 2.000, 2.0000",
"pluralRule-count-other": " @integer 0, 3~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"smi": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-two": "n = 2 @integer 2 @decimal 2.0, 2.00, 2.000, 2.0000",
"pluralRule-count-other": " @integer 0, 3~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"smj": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-two": "n = 2 @integer 2 @decimal 2.0, 2.00, 2.000, 2.0000",
"pluralRule-count-other": " @integer 0, 3~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"smn": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-two": "n = 2 @integer 2 @decimal 2.0, 2.00, 2.000, 2.0000",
"pluralRule-count-other": " @integer 0, 3~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"sms": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-two": "n = 2 @integer 2 @decimal 2.0, 2.00, 2.000, 2.0000",
"pluralRule-count-other": " @integer 0, 3~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"sn": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"so": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"sq": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"sr": {
"pluralRule-count-one": "v = 0 and i % 10 = 1 and i % 100 != 11 or f % 10 = 1 and f % 100 != 11 @integer 1, 21, 31, 41, 51, 61, 71, 81, 101, 1001, … @decimal 0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1, 10.1, 100.1, 1000.1, …",
"pluralRule-count-few": "v = 0 and i % 10 = 2..4 and i % 100 != 12..14 or f % 10 = 2..4 and f % 100 != 12..14 @integer 2~4, 22~24, 32~34, 42~44, 52~54, 62, 102, 1002, … @decimal 0.2~0.4, 1.2~1.4, 2.2~2.4, 3.2~3.4, 4.2~4.4, 5.2, 10.2, 100.2, 1000.2, …",
"pluralRule-count-other": " @integer 0, 5~19, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0, 0.5~1.0, 1.5~2.0, 2.5~2.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ss": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ssy": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"st": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"su": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"sv": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"sw": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"syr": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ta": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"te": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"teo": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"th": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ti": {
"pluralRule-count-one": "n = 0..1 @integer 0, 1 @decimal 0.0, 1.0, 0.00, 1.00, 0.000, 1.000, 0.0000, 1.0000",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"tig": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"tk": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"tl": {
"pluralRule-count-one": "v = 0 and i = 1,2,3 or v = 0 and i % 10 != 4,6,9 or v != 0 and f % 10 != 4,6,9 @integer 0~3, 5, 7, 8, 10~13, 15, 17, 18, 20, 21, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.3, 0.5, 0.7, 0.8, 1.0~1.3, 1.5, 1.7, 1.8, 2.0, 2.1, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …",
"pluralRule-count-other": " @integer 4, 6, 9, 14, 16, 19, 24, 26, 104, 1004, … @decimal 0.4, 0.6, 0.9, 1.4, 1.6, 1.9, 2.4, 2.6, 10.4, 100.4, 1000.4, …"
},
"tn": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"to": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"tr": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ts": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"tzm": {
"pluralRule-count-one": "n = 0..1 or n = 11..99 @integer 0, 1, 11~24 @decimal 0.0, 1.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0",
"pluralRule-count-other": " @integer 2~10, 100~106, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ug": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"uk": {
"pluralRule-count-one": "v = 0 and i % 10 = 1 and i % 100 != 11 @integer 1, 21, 31, 41, 51, 61, 71, 81, 101, 1001, …",
"pluralRule-count-few": "v = 0 and i % 10 = 2..4 and i % 100 != 12..14 @integer 2~4, 22~24, 32~34, 42~44, 52~54, 62, 102, 1002, …",
"pluralRule-count-many": "v = 0 and i % 10 = 0 or v = 0 and i % 10 = 5..9 or v = 0 and i % 100 = 11..14 @integer 0, 5~19, 100, 1000, 10000, 100000, 1000000, …",
"pluralRule-count-other": " @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ur": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"uz": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"ve": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"vi": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"vo": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"vun": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"wa": {
"pluralRule-count-one": "n = 0..1 @integer 0, 1 @decimal 0.0, 1.0, 0.00, 1.00, 0.000, 1.000, 0.0000, 1.0000",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 0.1~0.9, 1.1~1.7, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"wae": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"wo": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"xh": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"xog": {
"pluralRule-count-one": "n = 1 @integer 1 @decimal 1.0, 1.00, 1.000, 1.0000",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~0.9, 1.1~1.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"yi": {
"pluralRule-count-one": "i = 1 and v = 0 @integer 1",
"pluralRule-count-other": " @integer 0, 2~16, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"yo": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"yue": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"zh": {
"pluralRule-count-other": " @integer 0~15, 100, 1000, 10000, 100000, 1000000, … @decimal 0.0~1.5, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
},
"zu": {
"pluralRule-count-one": "i = 0 or n = 1 @integer 0, 1 @decimal 0.0~1.0, 0.00~0.04",
"pluralRule-count-other": " @integer 2~17, 100, 1000, 10000, 100000, 1000000, … @decimal 1.1~2.6, 10.0, 100.0, 1000.0, 10000.0, 100000.0, 1000000.0, …"
}
}
}
}

File diff suppressed because it is too large Load diff

View file

@ -1,163 +0,0 @@
{
"af": {
"number_nd_tokens": {
"geen": null
}
},
"am": {
"number_nd_tokens": {
"ምንም": null
}
},
"as": {
"number_tokens": {
"লা": 5,
"হা": 3,
"শঃ": null
},
"number_nd_tokens": {
"কোনো": null
}
},
"bn": {
"number_tokens": {
"কোটি": 7,
"শত": 2
}
},
"es": {
"number_tokens": {
"m": 6,
"mil": 3
}
},
"es-US": {
"number_tokens": {
"m": 6,
"mil": 3
}
},
"et": {
"number_nd_tokens": {
"vaatamisi": null
}
},
"eu": {
"number_nd_tokens": {
"dago": null,
"ikustaldirik": null
}
},
"fr": {
"number_tokens": {
"dabonnés": null
}
},
"hy": {
"number_nd_tokens": {
"Դիտումներ": null
}
},
"is": {
"number_nd_tokens": {
"áskrifandi": null,
"enn": null
}
},
"iw": {
"number_nd_tokens": {
"מנוי": null
}
},
"ka": {
"number_nd_tokens": {
"არის": null,
"ნახვები": null
}
},
"kk": {
"number_nd_tokens": {
"көрмеген": null
}
},
"kn": {
"number_nd_tokens": {
"ಯಾವುದೇ": null
}
},
"ko": {
"number_nd_tokens": {
"음": null
}
},
"ky": {
"number_nd_tokens": {
"ким": null,
"көрө": null,
"элек": null
}
},
"my": {
"number_tokens": {
"ကုဋေ": 7,
"သောင်း": 4,
"ထ": 3
}
},
"ne": {
"number_nd_tokens": {
"कुनै": null
}
},
"no": {
"number_nd_tokens": {
"avspillinger": null
}
},
"or": {
"number_tokens": {
"ବିଜଣ": 9,
"ବି": 9
},
"number_nd_tokens": {
"କୌଣସି": null
}
},
"pa": {
"number_nd_tokens": {
"ਕਿਸੇ": null,
"ਨੇ": null
}
},
"ro": {
"number_nd_tokens": {
"abonat": null,
"vizionare": null
}
},
"sq": {
"number_nd_tokens": {
"ka": null
}
},
"uk": {
"number_nd_tokens": {
"перегляду": null
}
},
"ur": {
"number_nd_tokens": {
"کوئی": null
}
},
"zh-CN": {
"number_nd_tokens": {
"人": null
}
},
"zu": {
"number_nd_tokens": {
"kubukwa": null
}
}
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,129 +0,0 @@
---
source: tests/youtube.rs
expression: album
---
MusicAlbum(
id: "MPREb_u1I69lSAe5v",
playlist_id: Some("OLAK5uy_lGP_zv0vJDUlecQDzugUJmjcF7pvyVNyY"),
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UCpJyCbFbdTrx0M90HCNBHFQ"),
name: "[name]",
),
],
artist_id: Some("UCpJyCbFbdTrx0M90HCNBHFQ"),
description: "[description]",
album_type: Ep,
year: Some(2016),
by_va: false,
tracks: [
TrackItem(
id: "aGd3VKSOTxY",
name: "[name]",
duration: Some(221),
cover: [],
artists: [
ArtistId(
id: Some("UCpJyCbFbdTrx0M90HCNBHFQ"),
name: "[name]",
),
],
artist_id: Some("UCpJyCbFbdTrx0M90HCNBHFQ"),
album: Some(AlbumId(
id: "MPREb_u1I69lSAe5v",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(1),
by_va: false,
),
TrackItem(
id: "Jz-26iiDuYs",
name: "[name]",
duration: Some(208),
cover: [],
artists: [
ArtistId(
id: Some("UCpJyCbFbdTrx0M90HCNBHFQ"),
name: "[name]",
),
],
artist_id: Some("UCpJyCbFbdTrx0M90HCNBHFQ"),
album: Some(AlbumId(
id: "MPREb_u1I69lSAe5v",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(2),
by_va: false,
),
TrackItem(
id: "Bu26uFtpt58",
name: "[name]",
duration: Some(223),
cover: [],
artists: [
ArtistId(
id: Some("UCpJyCbFbdTrx0M90HCNBHFQ"),
name: "[name]",
),
],
artist_id: Some("UCpJyCbFbdTrx0M90HCNBHFQ"),
album: Some(AlbumId(
id: "MPREb_u1I69lSAe5v",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(3),
by_va: false,
),
TrackItem(
id: "RgwNqqiVqdY",
name: "[name]",
duration: Some(221),
cover: [],
artists: [
ArtistId(
id: Some("UCpJyCbFbdTrx0M90HCNBHFQ"),
name: "[name]",
),
],
artist_id: Some("UCpJyCbFbdTrx0M90HCNBHFQ"),
album: Some(AlbumId(
id: "MPREb_u1I69lSAe5v",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(4),
by_va: false,
),
TrackItem(
id: "2TuOh30XbCI",
name: "[name]",
duration: Some(197),
cover: [],
artists: [
ArtistId(
id: Some("UCpJyCbFbdTrx0M90HCNBHFQ"),
name: "[name]",
),
],
artist_id: Some("UCpJyCbFbdTrx0M90HCNBHFQ"),
album: Some(AlbumId(
id: "MPREb_u1I69lSAe5v",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(5),
by_va: false,
),
],
variants: [],
)

View file

@ -1,134 +0,0 @@
---
source: tests/youtube.rs
expression: album
---
MusicAlbum(
id: "MPREb_bqWA6mAZFWS",
playlist_id: Some("OLAK5uy_mUiRbMqeQXFUH6h9KB87RcEmNtm45Qvs0"),
name: "[name]",
cover: "[cover]",
artists: [],
artist_id: None,
description: "[description]",
album_type: Ep,
year: Some(1968),
by_va: false,
tracks: [
TrackItem(
id: "EX7-pOQHPyE",
name: "[name]",
duration: Some(267),
cover: [],
artists: [
ArtistId(
id: Some("UC1C05NyYICFB2mVGn9_ttEw"),
name: "[name]",
),
],
artist_id: Some("UC1C05NyYICFB2mVGn9_ttEw"),
album: Some(AlbumId(
id: "MPREb_bqWA6mAZFWS",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(1),
by_va: false,
),
TrackItem(
id: "0AyWB-Quj4A",
name: "[name]",
duration: Some(179),
cover: [],
artists: [
ArtistId(
id: Some("UCDqpyYkgWy2h03HamIfODjw"),
name: "[name]",
),
],
artist_id: Some("UCDqpyYkgWy2h03HamIfODjw"),
album: Some(AlbumId(
id: "MPREb_bqWA6mAZFWS",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(2),
by_va: false,
),
TrackItem(
id: "s0Sb-GZLXSM",
name: "[name]",
duration: Some(155),
cover: [],
artists: [
ArtistId(
id: None,
name: "[name]",
),
],
artist_id: None,
album: Some(AlbumId(
id: "MPREb_bqWA6mAZFWS",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(3),
by_va: false,
),
TrackItem(
id: "P4XAaXjlCDA",
name: "[name]",
duration: Some(229),
cover: [],
artists: [
ArtistId(
id: Some("UCl4iPtukwe7m0kIxUMskkgA"),
name: "[name]",
),
],
artist_id: Some("UCl4iPtukwe7m0kIxUMskkgA"),
album: Some(AlbumId(
id: "MPREb_bqWA6mAZFWS",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(4),
by_va: false,
),
],
variants: [
AlbumItem(
id: "MPREb_h8ltx5oKvyY",
name: "Pedha Rasi Peddamma Katha",
cover: [
Thumbnail(
url: "https://lh3.googleusercontent.com/iZtBdPWBGNB-GAWvOp9seuYj5QqKrUYGSe-B5J026yxHqFSWv4zsxHy-LxX5LbFlnepOPRWNLrajO-_-=w226-h226-l90-rj",
width: 226,
height: 226,
),
Thumbnail(
url: "https://lh3.googleusercontent.com/iZtBdPWBGNB-GAWvOp9seuYj5QqKrUYGSe-B5J026yxHqFSWv4zsxHy-LxX5LbFlnepOPRWNLrajO-_-=w544-h544-l90-rj",
width: 544,
height: 544,
),
],
artists: [
ArtistId(
id: Some("UCl4iPtukwe7m0kIxUMskkgA"),
name: "[name]",
),
ArtistId(
id: Some("UCWgAqlYG7mXTUxrFiLyDSsg"),
name: "[name]",
),
],
artist_id: Some("UCl4iPtukwe7m0kIxUMskkgA"),
album_type: Ep,
year: None,
by_va: false,
),
],
)

View file

@ -1,61 +0,0 @@
---
source: tests/youtube.rs
expression: album
---
MusicAlbum(
id: "MPREb_F3Af9UZZVxX",
playlist_id: Some("OLAK5uy_nim4i4eycEtlBtS3Ci6j4SvvTmdfBcRX4"),
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UCAJwa_1l4rHzBJyWbeBtGZw"),
name: "[name]",
),
ArtistId(
id: Some("UCbBaYg2UToDaoOwo-R6xi4g"),
name: "[name]",
),
ArtistId(
id: Some("UCiY3z8HAGD6BlSNKVn2kSvQ"),
name: "[name]",
),
],
artist_id: Some("UCAJwa_1l4rHzBJyWbeBtGZw"),
description: "[description]",
album_type: Single,
year: None,
by_va: false,
tracks: [
TrackItem(
id: "1Sz3lUVGBSM",
name: "[name]",
duration: Some(229),
cover: [],
artists: [
ArtistId(
id: Some("UCAJwa_1l4rHzBJyWbeBtGZw"),
name: "[name]",
),
ArtistId(
id: Some("UCbBaYg2UToDaoOwo-R6xi4g"),
name: "[name]",
),
ArtistId(
id: Some("UCiY3z8HAGD6BlSNKVn2kSvQ"),
name: "[name]",
),
],
artist_id: Some("UCAJwa_1l4rHzBJyWbeBtGZw"),
album: Some(AlbumId(
id: "MPREb_F3Af9UZZVxX",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(1),
by_va: false,
),
],
variants: [],
)

View file

@ -1,429 +0,0 @@
---
source: tests/youtube.rs
expression: album
---
MusicAlbum(
id: "MPREb_nlBWQROfvjo",
playlist_id: Some("OLAK5uy_myZkBX2d2TzcrlQhIwLy3hCj2MkAMaPR4"),
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
name: "[name]",
),
],
artist_id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
description: "[description]",
album_type: Album,
year: Some(2016),
by_va: false,
tracks: [
TrackItem(
id: "g0iRiJ_ck48",
name: "[name]",
duration: Some(216),
cover: [],
artists: [
ArtistId(
id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
name: "[name]",
),
],
artist_id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
album: Some(AlbumId(
id: "MPREb_nlBWQROfvjo",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(1),
by_va: false,
),
TrackItem(
id: "rREEBXp0y9s",
name: "[name]",
duration: Some(224),
cover: [],
artists: [
ArtistId(
id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
name: "[name]",
),
],
artist_id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
album: Some(AlbumId(
id: "MPREb_nlBWQROfvjo",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(2),
by_va: false,
),
TrackItem(
id: "zvU5Y8Q19hU",
name: "[name]",
duration: Some(176),
cover: [],
artists: [
ArtistId(
id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
name: "[name]",
),
],
artist_id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
album: Some(AlbumId(
id: "MPREb_nlBWQROfvjo",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(3),
by_va: false,
),
TrackItem(
id: "ARKLrzzTQA0",
name: "[name]",
duration: Some(215),
cover: [],
artists: [
ArtistId(
id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
name: "[name]",
),
],
artist_id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
album: Some(AlbumId(
id: "MPREb_nlBWQROfvjo",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(4),
by_va: false,
),
TrackItem(
id: "tstLgN8A_Ng",
name: "[name]",
duration: Some(268),
cover: [],
artists: [
ArtistId(
id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
name: "[name]",
),
],
artist_id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
album: Some(AlbumId(
id: "MPREb_nlBWQROfvjo",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(5),
by_va: false,
),
TrackItem(
id: "k2DjgQOY3Ts",
name: "[name]",
duration: Some(202),
cover: [],
artists: [
ArtistId(
id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
name: "[name]",
),
],
artist_id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
album: Some(AlbumId(
id: "MPREb_nlBWQROfvjo",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(6),
by_va: false,
),
TrackItem(
id: "azHwhecxEsI",
name: "[name]",
duration: Some(185),
cover: [],
artists: [
ArtistId(
id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
name: "[name]",
),
],
artist_id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
album: Some(AlbumId(
id: "MPREb_nlBWQROfvjo",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(7),
by_va: false,
),
TrackItem(
id: "_FcsdYIQ2co",
name: "[name]",
duration: Some(226),
cover: [],
artists: [
ArtistId(
id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
name: "[name]",
),
],
artist_id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
album: Some(AlbumId(
id: "MPREb_nlBWQROfvjo",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(8),
by_va: false,
),
TrackItem(
id: "27bOWEbshyE",
name: "[name]",
duration: Some(207),
cover: [],
artists: [
ArtistId(
id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
name: "[name]",
),
],
artist_id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
album: Some(AlbumId(
id: "MPREb_nlBWQROfvjo",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(9),
by_va: false,
),
TrackItem(
id: "riD_3oZwt8w",
name: "[name]",
duration: Some(211),
cover: [],
artists: [
ArtistId(
id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
name: "[name]",
),
],
artist_id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
album: Some(AlbumId(
id: "MPREb_nlBWQROfvjo",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(10),
by_va: false,
),
TrackItem(
id: "8GNvjF3no9s",
name: "[name]",
duration: Some(179),
cover: [],
artists: [
ArtistId(
id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
name: "[name]",
),
],
artist_id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
album: Some(AlbumId(
id: "MPREb_nlBWQROfvjo",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(11),
by_va: false,
),
TrackItem(
id: "YHMFzf1uN2U",
name: "[name]",
duration: Some(218),
cover: [],
artists: [
ArtistId(
id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
name: "[name]",
),
],
artist_id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
album: Some(AlbumId(
id: "MPREb_nlBWQROfvjo",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(12),
by_va: false,
),
TrackItem(
id: "jvV-z5F3oAo",
name: "[name]",
duration: Some(277),
cover: [],
artists: [
ArtistId(
id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
name: "[name]",
),
],
artist_id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
album: Some(AlbumId(
id: "MPREb_nlBWQROfvjo",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(13),
by_va: false,
),
TrackItem(
id: "u8_9cxlrh8k",
name: "[name]",
duration: Some(204),
cover: [],
artists: [
ArtistId(
id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
name: "[name]",
),
],
artist_id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
album: Some(AlbumId(
id: "MPREb_nlBWQROfvjo",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(14),
by_va: false,
),
TrackItem(
id: "gSvKcvM1Wk0",
name: "[name]",
duration: Some(202),
cover: [],
artists: [
ArtistId(
id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
name: "[name]",
),
],
artist_id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
album: Some(AlbumId(
id: "MPREb_nlBWQROfvjo",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(15),
by_va: false,
),
TrackItem(
id: "wQHgKRJ0pDQ",
name: "[name]",
duration: Some(222),
cover: [],
artists: [
ArtistId(
id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
name: "[name]",
),
],
artist_id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
album: Some(AlbumId(
id: "MPREb_nlBWQROfvjo",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(16),
by_va: false,
),
TrackItem(
id: "Ckz5i6-hzf0",
name: "[name]",
duration: Some(177),
cover: [],
artists: [
ArtistId(
id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
name: "[name]",
),
],
artist_id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
album: Some(AlbumId(
id: "MPREb_nlBWQROfvjo",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(17),
by_va: false,
),
TrackItem(
id: "y5zuUgyFqrc",
name: "[name]",
duration: Some(220),
cover: [],
artists: [
ArtistId(
id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
name: "[name]",
),
],
artist_id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
album: Some(AlbumId(
id: "MPREb_nlBWQROfvjo",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(18),
by_va: false,
),
],
variants: [
AlbumItem(
id: "MPREb_jk6Msw8izou",
name: "Märchen enden gut (Nyáre Ranta (Märchenedition))",
cover: [
Thumbnail(
url: "https://lh3.googleusercontent.com/BKgnW_-hapCHk599AtRfTYZGdXVIo0C4bJp1Bh7qUpGK7fNAXGW8Bhv2x-ukeFM8cuxKbGqqGaTo8fZASA=w226-h226-l90-rj",
width: 226,
height: 226,
),
Thumbnail(
url: "https://lh3.googleusercontent.com/BKgnW_-hapCHk599AtRfTYZGdXVIo0C4bJp1Bh7qUpGK7fNAXGW8Bhv2x-ukeFM8cuxKbGqqGaTo8fZASA=w544-h544-l90-rj",
width: 544,
height: 544,
),
],
artists: [
ArtistId(
id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
name: "[name]",
),
],
artist_id: Some("UC_vmjW5e1xEHhYjY2a0kK1A"),
album_type: Album,
year: None,
by_va: false,
),
],
)

View file

@ -1,318 +0,0 @@
---
source: tests/youtube.rs
expression: album
---
MusicAlbum(
id: "MPREb_cwzk8EUwypZ",
playlist_id: Some("OLAK5uy_kODvYZ5CEpYdtd4VPsmg0eRTlpazG0dvA"),
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
name: "[name]",
),
],
artist_id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
description: "[description]",
album_type: Show,
year: Some(2022),
by_va: false,
tracks: [
TrackItem(
id: "lSbKz5LWvKE",
name: "[name]",
duration: Some(229),
cover: [],
artists: [
ArtistId(
id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
name: "[name]",
),
],
artist_id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
album: Some(AlbumId(
id: "MPREb_cwzk8EUwypZ",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(1),
by_va: false,
),
TrackItem(
id: "fdO6gu4qjRw",
name: "[name]",
duration: Some(235),
cover: [],
artists: [
ArtistId(
id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
name: "[name]",
),
],
artist_id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
album: Some(AlbumId(
id: "MPREb_cwzk8EUwypZ",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(2),
by_va: false,
),
TrackItem(
id: "muCxstXirvY",
name: "[name]",
duration: Some(197),
cover: [],
artists: [
ArtistId(
id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
name: "[name]",
),
],
artist_id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
album: Some(AlbumId(
id: "MPREb_cwzk8EUwypZ",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(3),
by_va: false,
),
TrackItem(
id: "aG1N0vo__Ng",
name: "[name]",
duration: Some(186),
cover: [],
artists: [
ArtistId(
id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
name: "[name]",
),
],
artist_id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
album: Some(AlbumId(
id: "MPREb_cwzk8EUwypZ",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(4),
by_va: false,
),
TrackItem(
id: "roHhLNYS9yo",
name: "[name]",
duration: Some(188),
cover: [],
artists: [
ArtistId(
id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
name: "[name]",
),
],
artist_id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
album: Some(AlbumId(
id: "MPREb_cwzk8EUwypZ",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(5),
by_va: false,
),
TrackItem(
id: "nJ49NuLvcAw",
name: "[name]",
duration: Some(205),
cover: [],
artists: [
ArtistId(
id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
name: "[name]",
),
],
artist_id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
album: Some(AlbumId(
id: "MPREb_cwzk8EUwypZ",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(6),
by_va: false,
),
TrackItem(
id: "Me119D570h0",
name: "[name]",
duration: Some(219),
cover: [],
artists: [
ArtistId(
id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
name: "[name]",
),
],
artist_id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
album: Some(AlbumId(
id: "MPREb_cwzk8EUwypZ",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(7),
by_va: false,
),
TrackItem(
id: "YXnRLK-qKG8",
name: "[name]",
duration: Some(240),
cover: [],
artists: [
ArtistId(
id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
name: "[name]",
),
],
artist_id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
album: Some(AlbumId(
id: "MPREb_cwzk8EUwypZ",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(8),
by_va: false,
),
TrackItem(
id: "A61wz1jz9X0",
name: "[name]",
duration: Some(239),
cover: [],
artists: [
ArtistId(
id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
name: "[name]",
),
],
artist_id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
album: Some(AlbumId(
id: "MPREb_cwzk8EUwypZ",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(9),
by_va: false,
),
TrackItem(
id: "u_S08EJOTUg",
name: "[name]",
duration: Some(197),
cover: [],
artists: [
ArtistId(
id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
name: "[name]",
),
],
artist_id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
album: Some(AlbumId(
id: "MPREb_cwzk8EUwypZ",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(10),
by_va: false,
),
TrackItem(
id: "0qwYJihV1EU",
name: "[name]",
duration: Some(201),
cover: [],
artists: [
ArtistId(
id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
name: "[name]",
),
],
artist_id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
album: Some(AlbumId(
id: "MPREb_cwzk8EUwypZ",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(11),
by_va: false,
),
TrackItem(
id: "zjhoyTnEzuQ",
name: "[name]",
duration: Some(187),
cover: [],
artists: [
ArtistId(
id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
name: "[name]",
),
],
artist_id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
album: Some(AlbumId(
id: "MPREb_cwzk8EUwypZ",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(12),
by_va: false,
),
TrackItem(
id: "oDjDd0UBzAY",
name: "[name]",
duration: Some(183),
cover: [],
artists: [
ArtistId(
id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
name: "[name]",
),
],
artist_id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
album: Some(AlbumId(
id: "MPREb_cwzk8EUwypZ",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(13),
by_va: false,
),
TrackItem(
id: "_3-WVmqgi-Q",
name: "[name]",
duration: Some(193),
cover: [],
artists: [
ArtistId(
id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
name: "[name]",
),
],
artist_id: Some("UCNoyEM0e2A7WlsBmP2w3avg"),
album: Some(AlbumId(
id: "MPREb_cwzk8EUwypZ",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(14),
by_va: false,
),
],
variants: [],
)

View file

@ -1,53 +0,0 @@
---
source: tests/youtube.rs
expression: album
---
MusicAlbum(
id: "MPREb_bHfHGoy7vuv",
playlist_id: Some("OLAK5uy_kdSWBZ-9AZDkYkuy0QCc3p0KO9DEHVNH0"),
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UCXGYZ-OhdOpPBamHX3K9YRg"),
name: "[name]",
),
ArtistId(
id: Some("UCFTcSVPYRWlDoHisR-ZKwgw"),
name: "[name]",
),
],
artist_id: Some("UCXGYZ-OhdOpPBamHX3K9YRg"),
description: "[description]",
album_type: Single,
year: Some(2020),
by_va: false,
tracks: [
TrackItem(
id: "VU6lEv0PKAo",
name: "[name]",
duration: Some(183),
cover: [],
artists: [
ArtistId(
id: Some("UCXGYZ-OhdOpPBamHX3K9YRg"),
name: "[name]",
),
ArtistId(
id: Some("UCFTcSVPYRWlDoHisR-ZKwgw"),
name: "[name]",
),
],
artist_id: Some("UCXGYZ-OhdOpPBamHX3K9YRg"),
album: Some(AlbumId(
id: "MPREb_bHfHGoy7vuv",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(1),
by_va: false,
),
],
variants: [],
)

View file

@ -1,271 +0,0 @@
---
source: tests/youtube.rs
expression: album
---
MusicAlbum(
id: "MPREb_AzuWg8qAVVl",
playlist_id: Some("OLAK5uy_mux5ygfN9sbiR1ma3yh1GHTmqNekZNoAI"),
name: "[name]",
cover: "[cover]",
artists: [],
artist_id: None,
description: "[description]",
album_type: Album,
year: Some(2019),
by_va: true,
tracks: [
TrackItem(
id: "R3VIKRtzAdE",
name: "[name]",
duration: Some(205),
cover: [],
artists: [
ArtistId(
id: Some("UCCj0RlDqqahEB5BXVtDcPqg"),
name: "[name]",
),
],
artist_id: Some("UCCj0RlDqqahEB5BXVtDcPqg"),
album: Some(AlbumId(
id: "MPREb_AzuWg8qAVVl",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(1),
by_va: false,
),
TrackItem(
id: "t0v0UOgOt18",
name: "[name]",
duration: Some(174),
cover: [],
artists: [
ArtistId(
id: Some("UCMrCoizKiBxqeg5pTpBXn1A"),
name: "[name]",
),
],
artist_id: Some("UCMrCoizKiBxqeg5pTpBXn1A"),
album: Some(AlbumId(
id: "MPREb_AzuWg8qAVVl",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(2),
by_va: false,
),
TrackItem(
id: "HjJYAkUXrxI",
name: "[name]",
duration: Some(199),
cover: [],
artists: [
ArtistId(
id: Some("UCWjoDY2SXJ5dvcdunWI6mjQ"),
name: "[name]",
),
],
artist_id: Some("UCWjoDY2SXJ5dvcdunWI6mjQ"),
album: Some(AlbumId(
id: "MPREb_AzuWg8qAVVl",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(3),
by_va: false,
),
TrackItem(
id: "Hg0KUOTL06I",
name: "[name]",
duration: Some(187),
cover: [],
artists: [
ArtistId(
id: Some("UChzK2t3sjnQkWzGnyKXOSSg"),
name: "[name]",
),
],
artist_id: Some("UChzK2t3sjnQkWzGnyKXOSSg"),
album: Some(AlbumId(
id: "MPREb_AzuWg8qAVVl",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(5),
by_va: false,
),
TrackItem(
id: "c8AfY6yhdkM",
name: "[name]",
duration: Some(159),
cover: [],
artists: [
ArtistId(
id: Some("UCvsgN5NKOzXnAURfaf3TOig"),
name: "[name]",
),
],
artist_id: Some("UCvsgN5NKOzXnAURfaf3TOig"),
album: Some(AlbumId(
id: "MPREb_AzuWg8qAVVl",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(6),
by_va: false,
),
TrackItem(
id: "_ZmdHjVvwhc",
name: "[name]",
duration: Some(186),
cover: [],
artists: [
ArtistId(
id: Some("UCI4YNnmHjXFaaKvfdmpWvJQ"),
name: "[name]",
),
],
artist_id: Some("UCI4YNnmHjXFaaKvfdmpWvJQ"),
album: Some(AlbumId(
id: "MPREb_AzuWg8qAVVl",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(7),
by_va: false,
),
TrackItem(
id: "wBe1Zi3q1n8",
name: "[name]",
duration: Some(209),
cover: [],
artists: [
ArtistId(
id: Some("UCDaFVUr2n8T7_X1f5yJ1xlw"),
name: "[name]",
),
],
artist_id: Some("UCDaFVUr2n8T7_X1f5yJ1xlw"),
album: Some(AlbumId(
id: "MPREb_AzuWg8qAVVl",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(8),
by_va: false,
),
TrackItem(
id: "l8Pj8s9uPGc",
name: "[name]",
duration: Some(209),
cover: [],
artists: [
ArtistId(
id: Some("UCZcc-WkffIMBVGUr6j9e6aQ"),
name: "[name]",
),
],
artist_id: Some("UCZcc-WkffIMBVGUr6j9e6aQ"),
album: Some(AlbumId(
id: "MPREb_AzuWg8qAVVl",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(9),
by_va: false,
),
TrackItem(
id: "Kn3cruxYj0c",
name: "[name]",
duration: Some(174),
cover: [],
artists: [
ArtistId(
id: Some("UCQPPz_A65SWYi2wXX8z76AQ"),
name: "[name]",
),
],
artist_id: Some("UCQPPz_A65SWYi2wXX8z76AQ"),
album: Some(AlbumId(
id: "MPREb_AzuWg8qAVVl",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(11),
by_va: false,
),
TrackItem(
id: "Sy1lIOl1YN0",
name: "[name]",
duration: Some(185),
cover: [],
artists: [
ArtistId(
id: Some("UChTOXkDhGJ0JftnfMWjpCCg"),
name: "[name]",
),
],
artist_id: Some("UChTOXkDhGJ0JftnfMWjpCCg"),
album: Some(AlbumId(
id: "MPREb_AzuWg8qAVVl",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(12),
by_va: false,
),
TrackItem(
id: "njdlNT1RRo4",
name: "[name]",
duration: Some(237),
cover: [],
artists: [
ArtistId(
id: Some("UCMUB52aO4CqrUXmLwbfRWYA"),
name: "[name]",
),
],
artist_id: Some("UCMUB52aO4CqrUXmLwbfRWYA"),
album: Some(AlbumId(
id: "MPREb_AzuWg8qAVVl",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(13),
by_va: false,
),
TrackItem(
id: "Si-CXM8CHqQ",
name: "[name]",
duration: Some(246),
cover: [],
artists: [
ArtistId(
id: Some("UC4YvDAbE1EYwZpj6gQ-lpLw"),
name: "[name]",
),
],
artist_id: Some("UC4YvDAbE1EYwZpj6gQ-lpLw"),
album: Some(AlbumId(
id: "MPREb_AzuWg8qAVVl",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(18),
by_va: false,
),
],
variants: [],
)

View file

@ -1,145 +0,0 @@
---
source: tests/youtube.rs
expression: album
---
MusicAlbum(
id: "MPREb_8QkDeEIawvX",
playlist_id: Some("OLAK5uy_mEX9ljZeeEWgTM1xLL1isyiGaWXoPyoOk"),
name: "[name]",
cover: "[cover]",
artists: [],
artist_id: None,
description: "[description]",
album_type: Single,
year: Some(2022),
by_va: true,
tracks: [
TrackItem(
id: "Tzai7JXo45w",
name: "[name]",
duration: Some(274),
cover: [],
artists: [
ArtistId(
id: None,
name: "[name]",
),
],
artist_id: None,
album: Some(AlbumId(
id: "MPREb_8QkDeEIawvX",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(1),
by_va: false,
),
TrackItem(
id: "9WYpLYAEub0",
name: "[name]",
duration: Some(216),
cover: [],
artists: [
ArtistId(
id: None,
name: "[name]",
),
],
artist_id: None,
album: Some(AlbumId(
id: "MPREb_8QkDeEIawvX",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(2),
by_va: false,
),
TrackItem(
id: "R48tE237bW4",
name: "[name]",
duration: Some(239),
cover: [],
artists: [
ArtistId(
id: Some("UCAKvDuIX3m1AUdPpDSqV_3w"),
name: "[name]",
),
],
artist_id: Some("UCAKvDuIX3m1AUdPpDSqV_3w"),
album: Some(AlbumId(
id: "MPREb_8QkDeEIawvX",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(3),
by_va: false,
),
TrackItem(
id: "-UzsoR6z-vg",
name: "[name]",
duration: Some(254),
cover: [],
artists: [
ArtistId(
id: None,
name: "[name]",
),
],
artist_id: None,
album: Some(AlbumId(
id: "MPREb_8QkDeEIawvX",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(4),
by_va: false,
),
TrackItem(
id: "kbNVyn8Ex28",
name: "[name]",
duration: Some(187),
cover: [],
artists: [
ArtistId(
id: None,
name: "[name]",
),
],
artist_id: None,
album: Some(AlbumId(
id: "MPREb_8QkDeEIawvX",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(5),
by_va: false,
),
TrackItem(
id: "NJrQZUzWP5Y",
name: "[name]",
duration: Some(224),
cover: [],
artists: [
ArtistId(
id: None,
name: "[name]",
),
],
artist_id: None,
album: Some(AlbumId(
id: "MPREb_8QkDeEIawvX",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(6),
by_va: false,
),
],
variants: [],
)

View file

@ -1,138 +0,0 @@
---
source: tests/youtube.rs
expression: album
---
MusicAlbum(
id: "MPREb_h8ltx5oKvyY",
playlist_id: Some("OLAK5uy_lIDfTi_k8V1RJ54MeJJGK_BduAeYbm-0s"),
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UCl4iPtukwe7m0kIxUMskkgA"),
name: "[name]",
),
ArtistId(
id: Some("UCWgAqlYG7mXTUxrFiLyDSsg"),
name: "[name]",
),
],
artist_id: Some("UCl4iPtukwe7m0kIxUMskkgA"),
description: "[description]",
album_type: Ep,
year: Some(1968),
by_va: false,
tracks: [
TrackItem(
id: "AKJ3IJZKPWc",
name: "[name]",
duration: Some(228),
cover: [],
artists: [
ArtistId(
id: Some("UCl4iPtukwe7m0kIxUMskkgA"),
name: "[name]",
),
ArtistId(
id: Some("UCWgAqlYG7mXTUxrFiLyDSsg"),
name: "[name]",
),
],
artist_id: Some("UCl4iPtukwe7m0kIxUMskkgA"),
album: Some(AlbumId(
id: "MPREb_h8ltx5oKvyY",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(1),
by_va: false,
),
TrackItem(
id: "WnpZuHNB33E",
name: "[name]",
duration: Some(266),
cover: [],
artists: [
ArtistId(
id: Some("UC1C05NyYICFB2mVGn9_ttEw"),
name: "[name]",
),
],
artist_id: Some("UC1C05NyYICFB2mVGn9_ttEw"),
album: Some(AlbumId(
id: "MPREb_h8ltx5oKvyY",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(2),
by_va: false,
),
TrackItem(
id: "pRqoDGXg1-I",
name: "[name]",
duration: Some(154),
cover: [],
artists: [
ArtistId(
id: Some("UC_KQPMiRQl3CFAIKTVfCHwA"),
name: "[name]",
),
],
artist_id: Some("UC_KQPMiRQl3CFAIKTVfCHwA"),
album: Some(AlbumId(
id: "MPREb_h8ltx5oKvyY",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(3),
by_va: false,
),
TrackItem(
id: "20vIKLJxjBY",
name: "[name]",
duration: Some(178),
cover: [],
artists: [
ArtistId(
id: None,
name: "[name]",
),
],
artist_id: Some("UCDqpyYkgWy2h03HamIfODjw"),
album: Some(AlbumId(
id: "MPREb_h8ltx5oKvyY",
name: "[name]",
)),
view_count: None,
is_video: false,
track_nr: Some(4),
by_va: false,
),
],
variants: [
AlbumItem(
id: "MPREb_bqWA6mAZFWS",
name: "Pedha Rasi Peddamma Katha",
cover: [
Thumbnail(
url: "https://lh3.googleusercontent.com/cyKTDdyucqYv8xfv0t3Vs9CkhmvssXRKsGzlWN_DU6A9uapXvovV0Ys2fXc9-r7Jv7V4UB1OD48iYH5z=w226-h226-l90-rj",
width: 226,
height: 226,
),
Thumbnail(
url: "https://lh3.googleusercontent.com/cyKTDdyucqYv8xfv0t3Vs9CkhmvssXRKsGzlWN_DU6A9uapXvovV0Ys2fXc9-r7Jv7V4UB1OD48iYH5z=w544-h544-l90-rj",
width: 544,
height: 544,
),
],
artists: [],
artist_id: None,
album_type: Ep,
year: None,
by_va: true,
),
],
)

View file

@ -1,665 +0,0 @@
---
source: tests/youtube.rs
expression: artist
---
MusicArtist(
id: "UC7cl4MmM6ZZ2TcFyMk_b4pg",
name: "[name]",
header_image: "[header_image]",
description: "[description]",
wikipedia_url: "[wikipedia_url]",
subscriber_count: "[subscriber_count]",
tracks: "[tracks]",
albums: [
AlbumItem(
id: "MPREb_43NWLzXChnh",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2010),
by_va: false,
),
AlbumItem(
id: "MPREb_585fV7eqUP8",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2014),
by_va: false,
),
AlbumItem(
id: "MPREb_6PEkIQE7sWY",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Ep,
year: Some(2008),
by_va: false,
),
AlbumItem(
id: "MPREb_7nIPO6oeETY",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2012),
by_va: false,
),
AlbumItem(
id: "MPREb_88p7e6nBtgz",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2012),
by_va: false,
),
AlbumItem(
id: "MPREb_8rukEzdytkN",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2015),
by_va: false,
),
AlbumItem(
id: "MPREb_BJKvCuKo7nJ",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2015),
by_va: false,
),
AlbumItem(
id: "MPREb_EAiIEvINDHB",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2012),
by_va: false,
),
AlbumItem(
id: "MPREb_HrCgErOdgCv",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2004),
by_va: false,
),
AlbumItem(
id: "MPREb_Md2aZrjaqHX",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2021),
by_va: false,
),
AlbumItem(
id: "MPREb_OW1GOBZ64ap",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2018),
by_va: false,
),
AlbumItem(
id: "MPREb_Oq0WKqNwSVY",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2003),
by_va: false,
),
AlbumItem(
id: "MPREb_QEClJsuO9xM",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2012),
by_va: false,
),
AlbumItem(
id: "MPREb_QyGCcLWExXj",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2014),
by_va: false,
),
AlbumItem(
id: "MPREb_R3p5kDRIGKL",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2006),
by_va: false,
),
AlbumItem(
id: "MPREb_T4fJMmrfxXk",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2000),
by_va: false,
),
AlbumItem(
id: "MPREb_TiIBQqCFttT",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2016),
by_va: false,
),
AlbumItem(
id: "MPREb_U9HLD8nF7H5",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2017),
by_va: false,
),
AlbumItem(
id: "MPREb_U9dMPQUeR9q",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2012),
by_va: false,
),
AlbumItem(
id: "MPREb_V0FEmw2pj2u",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2017),
by_va: false,
),
AlbumItem(
id: "MPREb_WYx2c0e95TA",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2008),
by_va: false,
),
AlbumItem(
id: "MPREb_Wc8Ehka0R0T",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2021),
by_va: false,
),
AlbumItem(
id: "MPREb_Yj49s4xy7fM",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2021),
by_va: false,
),
AlbumItem(
id: "MPREb_baIxpKBcYbF",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Ep,
year: Some(2003),
by_va: false,
),
AlbumItem(
id: "MPREb_eiYjUXT1Mn3",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2010),
by_va: false,
),
AlbumItem(
id: "MPREb_f4MhYbccbPi",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2006),
by_va: false,
),
AlbumItem(
id: "MPREb_gHlGAdNjEZI",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2010),
by_va: false,
),
AlbumItem(
id: "MPREb_kW2NAMSZElX",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2015),
by_va: false,
),
AlbumItem(
id: "MPREb_m5U1xZasDSy",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2002),
by_va: false,
),
AlbumItem(
id: "MPREb_n1H3JiFyGkv",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Ep,
year: Some(2015),
by_va: false,
),
AlbumItem(
id: "MPREb_ohcGTZrqKPZ",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2004),
by_va: false,
),
AlbumItem(
id: "MPREb_pWpeXxATZYb",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2014),
by_va: false,
),
AlbumItem(
id: "MPREb_ptO8gh250LP",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Ep,
year: Some(2003),
by_va: false,
),
AlbumItem(
id: "MPREb_qbJv3f0ijrk",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2007),
by_va: false,
),
AlbumItem(
id: "MPREb_rHhaDLqalbT",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Ep,
year: Some(2010),
by_va: false,
),
AlbumItem(
id: "MPREb_rdrfznTDhSX",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2012),
by_va: false,
),
AlbumItem(
id: "MPREb_saXgTKNPaSu",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2014),
by_va: false,
),
AlbumItem(
id: "MPREb_t6zStv8YrVG",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2010),
by_va: false,
),
AlbumItem(
id: "MPREb_vM0cMpn8pHh",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2008),
by_va: false,
),
AlbumItem(
id: "MPREb_wgm3k1qxpbF",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2010),
by_va: false,
),
AlbumItem(
id: "MPREb_wmSecJVDwPB",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2008),
by_va: false,
),
AlbumItem(
id: "MPREb_xCehp2mGhCk",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2010),
by_va: false,
),
AlbumItem(
id: "MPREb_y5fUQ2toJwT",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2017),
by_va: false,
),
],
playlists: "[playlists]",
similar_artists: "[artists]",
tracks_playlist_id: Some("OLAK5uy_n6aX-F_lCQxgyTIv4FJhp78bXV93b9NUM"),
videos_playlist_id: Some("OLAK5uy_nrePwvOEzmO7SydszEFfCDu8gAJxKfFtw"),
radio_id: Some("RDEMdgjzN3Qrk_GD7BooQbkJ4A"),
)

View file

@ -1,320 +0,0 @@
---
source: tests/youtube.rs
expression: artist
---
MusicArtist(
id: "UC7cl4MmM6ZZ2TcFyMk_b4pg",
name: "[name]",
header_image: "[header_image]",
description: "[description]",
wikipedia_url: "[wikipedia_url]",
subscriber_count: "[subscriber_count]",
tracks: "[tracks]",
albums: [
AlbumItem(
id: "MPREb_43NWLzXChnh",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2010),
by_va: false,
),
AlbumItem(
id: "MPREb_585fV7eqUP8",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2014),
by_va: false,
),
AlbumItem(
id: "MPREb_6PEkIQE7sWY",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Ep,
year: Some(2008),
by_va: false,
),
AlbumItem(
id: "MPREb_88p7e6nBtgz",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2012),
by_va: false,
),
AlbumItem(
id: "MPREb_Md2aZrjaqHX",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2021),
by_va: false,
),
AlbumItem(
id: "MPREb_OW1GOBZ64ap",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2018),
by_va: false,
),
AlbumItem(
id: "MPREb_QyGCcLWExXj",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2014),
by_va: false,
),
AlbumItem(
id: "MPREb_R3p5kDRIGKL",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2006),
by_va: false,
),
AlbumItem(
id: "MPREb_TiIBQqCFttT",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2016),
by_va: false,
),
AlbumItem(
id: "MPREb_U9HLD8nF7H5",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2017),
by_va: false,
),
AlbumItem(
id: "MPREb_V0FEmw2pj2u",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2017),
by_va: false,
),
AlbumItem(
id: "MPREb_WYx2c0e95TA",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2008),
by_va: false,
),
AlbumItem(
id: "MPREb_Yj49s4xy7fM",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2021),
by_va: false,
),
AlbumItem(
id: "MPREb_f4MhYbccbPi",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2006),
by_va: false,
),
AlbumItem(
id: "MPREb_kW2NAMSZElX",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2015),
by_va: false,
),
AlbumItem(
id: "MPREb_n1H3JiFyGkv",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Ep,
year: Some(2015),
by_va: false,
),
AlbumItem(
id: "MPREb_pWpeXxATZYb",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2014),
by_va: false,
),
AlbumItem(
id: "MPREb_rHhaDLqalbT",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Ep,
year: Some(2010),
by_va: false,
),
AlbumItem(
id: "MPREb_saXgTKNPaSu",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Single,
year: Some(2014),
by_va: false,
),
AlbumItem(
id: "MPREb_wmSecJVDwPB",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
name: "[name]",
),
],
artist_id: Some("UC7cl4MmM6ZZ2TcFyMk_b4pg"),
album_type: Album,
year: Some(2008),
by_va: false,
),
],
playlists: "[playlists]",
similar_artists: "[artists]",
tracks_playlist_id: Some("OLAK5uy_n6aX-F_lCQxgyTIv4FJhp78bXV93b9NUM"),
videos_playlist_id: Some("OLAK5uy_nrePwvOEzmO7SydszEFfCDu8gAJxKfFtw"),
radio_id: Some("RDEMdgjzN3Qrk_GD7BooQbkJ4A"),
)

View file

@ -1,19 +0,0 @@
---
source: tests/youtube.rs
expression: artist
---
MusicArtist(
id: "UCh8gHdtzO2tXd593_bjErWg",
name: "[name]",
header_image: "[header_image]",
description: "[description]",
wikipedia_url: "[wikipedia_url]",
subscriber_count: "[subscriber_count]",
tracks: "[tracks]",
albums: [],
playlists: "[playlists]",
similar_artists: "[artists]",
tracks_playlist_id: None,
videos_playlist_id: None,
radio_id: None,
)

View file

@ -1,155 +0,0 @@
---
source: tests/youtube.rs
expression: artist
---
MusicArtist(
id: "UCOR4_bSVIXPsGa4BbCSt60Q",
name: "[name]",
header_image: "[header_image]",
description: "[description]",
wikipedia_url: "[wikipedia_url]",
subscriber_count: "[subscriber_count]",
tracks: "[tracks]",
albums: [
AlbumItem(
id: "MPREb_8PsIyll0LFV",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UCOR4_bSVIXPsGa4BbCSt60Q"),
name: "[name]",
),
],
artist_id: Some("UCOR4_bSVIXPsGa4BbCSt60Q"),
album_type: Single,
year: Some(2014),
by_va: false,
),
AlbumItem(
id: "MPREb_HPXN9BBzFpV",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UCOR4_bSVIXPsGa4BbCSt60Q"),
name: "[name]",
),
],
artist_id: Some("UCOR4_bSVIXPsGa4BbCSt60Q"),
album_type: Single,
year: Some(2017),
by_va: false,
),
AlbumItem(
id: "MPREb_POeT6m0bw9q",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UCOR4_bSVIXPsGa4BbCSt60Q"),
name: "[name]",
),
],
artist_id: Some("UCOR4_bSVIXPsGa4BbCSt60Q"),
album_type: Ep,
year: Some(2014),
by_va: false,
),
AlbumItem(
id: "MPREb_R6EV2L1q0oc",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UCOR4_bSVIXPsGa4BbCSt60Q"),
name: "[name]",
),
],
artist_id: Some("UCOR4_bSVIXPsGa4BbCSt60Q"),
album_type: Single,
year: Some(2017),
by_va: false,
),
AlbumItem(
id: "MPREb_UYdRV1nnK2J",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UCOR4_bSVIXPsGa4BbCSt60Q"),
name: "[name]",
),
],
artist_id: Some("UCOR4_bSVIXPsGa4BbCSt60Q"),
album_type: Album,
year: Some(2017),
by_va: false,
),
AlbumItem(
id: "MPREb_bi34SGT1xlc",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UCOR4_bSVIXPsGa4BbCSt60Q"),
name: "[name]",
),
],
artist_id: Some("UCOR4_bSVIXPsGa4BbCSt60Q"),
album_type: Album,
year: Some(2014),
by_va: false,
),
AlbumItem(
id: "MPREb_hcK0fXETEf9",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UCOR4_bSVIXPsGa4BbCSt60Q"),
name: "[name]",
),
],
artist_id: Some("UCOR4_bSVIXPsGa4BbCSt60Q"),
album_type: Single,
year: Some(2017),
by_va: false,
),
AlbumItem(
id: "MPREb_kLvmX2AzYBL",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UCOR4_bSVIXPsGa4BbCSt60Q"),
name: "[name]",
),
],
artist_id: Some("UCOR4_bSVIXPsGa4BbCSt60Q"),
album_type: Single,
year: Some(2014),
by_va: false,
),
AlbumItem(
id: "MPREb_oHieBHkXn3A",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UCOR4_bSVIXPsGa4BbCSt60Q"),
name: "[name]",
),
],
artist_id: Some("UCOR4_bSVIXPsGa4BbCSt60Q"),
album_type: Single,
year: Some(2014),
by_va: false,
),
],
playlists: "[playlists]",
similar_artists: "[artists]",
tracks_playlist_id: Some("OLAK5uy_miHesZCUQY5S9EwqfoNP2tZR9nZ0NBAeU"),
videos_playlist_id: Some("OLAK5uy_mqbgE6T9uvusUWrAxJGiImf4_P4dM7IvQ"),
radio_id: Some("RDEM7AbogW0cCnElSU0WYm1GqA"),
)

View file

@ -1,35 +0,0 @@
---
source: tests/youtube.rs
expression: artist
---
MusicArtist(
id: "UCfwCE5VhPMGxNPFxtVv7lRw",
name: "[name]",
header_image: "[header_image]",
description: "[description]",
wikipedia_url: "[wikipedia_url]",
subscriber_count: "[subscriber_count]",
tracks: "[tracks]",
albums: [
AlbumItem(
id: "MPREb_vq8dZfFBEdx",
name: "[name]",
cover: "[cover]",
artists: [
ArtistId(
id: Some("UCfwCE5VhPMGxNPFxtVv7lRw"),
name: "[name]",
),
],
artist_id: Some("UCfwCE5VhPMGxNPFxtVv7lRw"),
album_type: Single,
year: Some(2019),
by_va: false,
),
],
playlists: "[playlists]",
similar_artists: "[artists]",
tracks_playlist_id: None,
videos_playlist_id: Some("OLAK5uy_lmH3iVq6lqjsnLkBWzpvRTh0DidLzbU-I"),
radio_id: Some("RDEMYsk_DTFHAng1G7n5toi_oA"),
)

View file

@ -4,30 +4,26 @@ expression: track
---
TrackDetails(
track: TrackItem(
id: "qIZ-vvg-wiU",
name: "Scheiße baut sich nicht von alleine",
duration: Some(232),
id: "7nigXQS1Xb0",
name: "INVU",
duration: Some(205),
cover: "[cover]",
artists: [
ArtistId(
id: Some("UCAbxL0lZcmlaQrzoUbrvS3A"),
name: "SDP",
),
ArtistId(
id: Some("UCVRREKn7V1Cb8qvf43dwZ6w"),
name: "257ers",
id: Some("UCwzCuKxyMY_sT7hr1E8G1XA"),
name: "TAEYEON",
),
],
artist_id: Some("UCAbxL0lZcmlaQrzoUbrvS3A"),
artist_id: Some("UCwzCuKxyMY_sT7hr1E8G1XA"),
album: Some(AlbumId(
id: "MPREb_cjEzeaBgZAq",
name: "Ein Gutes Schlechtes Vorbild",
id: "MPREb_4xbv14CiQJm",
name: "INVU - The 3rd Album",
)),
view_count: "[view_count]",
is_video: false,
track_nr: None,
by_va: false,
),
lyrics_id: Some("MPLYt_cjEzeaBgZAq-2"),
related_id: Some("MPTRt_cjEzeaBgZAq-2"),
lyrics_id: Some("MPLYt_4xbv14CiQJm-1"),
related_id: Some("MPTRt_4xbv14CiQJm-1"),
)

View file

@ -1,5 +1,8 @@
---
source: tests/youtube.rs
expression: lyrics.body
expression: lyrics
---
"Eyes, in the sky, gazing far into the night\nI raise my hand to the fire, but it\'s no use\n\'Cause you can\'t stop it from shining through\nIt\'s true\nBaby let the light shine through\nIf you believe it\'s true\nBaby won\'t you let the light shine through\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nWon\'t you let the light shine through\n\nEyes, in the sky, gazing far into the night\nI raise my hand to the fire, but it\'s no use\n\'Cause you can\'t stop it from shining through\nIt\'s true\nBaby let the light shine through\nIf you believe it\'s true\nBaby won\'t you let the light shine through\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you"
Lyrics(
body: "Eyes, in the sky, gazing far into the night\nI raise my hand to the fire, but it\'s no use\n\'Cause you can\'t stop it from shining through\nIt\'s true\nBaby let the light shine through\nIf you believe it\'s true\nBaby won\'t you let the light shine through\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nWon\'t you let the light shine through\n\nEyes, in the sky, gazing far into the night\nI raise my hand to the fire, but it\'s no use\n\'Cause you can\'t stop it from shining through\nIt\'s true\nBaby let the light shine through\nIf you believe it\'s true\nBaby won\'t you let the light shine through\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you\nFor you",
footer: "Source: Musixmatch",
)

View file

@ -1,10 +1,8 @@
use std::collections::HashSet;
use std::fmt::Display;
use std::str::FromStr;
use rstest::{fixture, rstest};
use rustypipe::model::paginator::ContinuationEndpoint;
use rustypipe::param::Language;
use rustypipe::validate;
use time::macros::date;
use time::OffsetDateTime;
@ -348,14 +346,11 @@ fn get_playlist(
#[case] description: Option<String>,
#[case] channel: Option<(&str, &str)>,
rp: RustyPipe,
unlocalized: bool,
) {
let playlist = tokio_test::block_on(rp.query().playlist(id)).unwrap();
assert_eq!(playlist.id, id);
if unlocalized {
assert_eq!(playlist.name, name);
}
assert_eq!(playlist.name, name);
assert!(!playlist.videos.is_empty());
assert_eq!(!playlist.videos.is_exhausted(), is_long);
assert_gte(
@ -926,46 +921,35 @@ fn assert_channel_eevblog<T>(channel: &Channel<T>) {
}
#[rstest]
#[case::artist("UC_vmjW5e1xEHhYjY2a0kK1A", "Oonagh - Topic", false, false, false)]
#[case::shorts("UCh8gHdtzO2tXd593_bjErWg", "Doobydobap", true, true, true)]
#[case::artist("UC_vmjW5e1xEHhYjY2a0kK1A", "Oonagh - Topic", false, false)]
#[case::shorts("UCh8gHdtzO2tXd593_bjErWg", "Doobydobap", true, true)]
#[case::livestream(
"UChs0pSaEoNLV4mevBFGaoKA",
"The Good Life Radio x Sensual Musique",
true,
true,
true
)]
#[case::music("UC-9-kyTW8ZkZNDHQJ6FgpwQ", "Music", false, false, false)]
#[case::music("UC-9-kyTW8ZkZNDHQJ6FgpwQ", "Music", false, false)]
fn channel_more(
#[case] id: &str,
#[case] name: &str,
#[case] has_videos: bool,
#[case] has_playlists: bool,
#[case] name_unlocalized: bool,
rp: RustyPipe,
unlocalized: bool,
) {
fn assert_channel<T>(channel: &Channel<T>, id: &str, name: &str, unlocalized: bool) {
fn assert_channel<T>(channel: &Channel<T>, id: &str, name: &str) {
assert_eq!(channel.id, id);
if unlocalized {
assert_eq!(channel.name, name);
}
assert_eq!(channel.name, name);
}
let channel_videos = tokio_test::block_on(rp.query().channel_videos(&id)).unwrap();
assert_channel(&channel_videos, id, name, unlocalized || name_unlocalized);
assert_channel(&channel_videos, id, name);
if has_videos {
assert!(!channel_videos.content.items.is_empty(), "got no videos");
}
let channel_playlists = tokio_test::block_on(rp.query().channel_playlists(&id)).unwrap();
assert_channel(
&channel_playlists,
id,
name,
unlocalized || name_unlocalized,
);
assert_channel(&channel_playlists, id, name);
if has_playlists {
assert!(
!channel_playlists.content.items.is_empty(),
@ -974,7 +958,7 @@ fn channel_more(
}
let channel_info = tokio_test::block_on(rp.query().channel_info(&id)).unwrap();
assert_channel(&channel_info, id, name, unlocalized || name_unlocalized);
assert_channel(&channel_info, id, name);
}
#[rstest]
@ -984,7 +968,7 @@ fn channel_more(
#[case::sports("UCEgdi0XIXXZ-qJOFPf4JSKw")]
#[case::learning("UCtFRv9O2AHqOZjjynzrv-xg")]
#[case::live("UC4R8DWoMoI7CAwX8_LjQHig")]
// #[case::news("UCYfdidRxbB8Qhf0Nx7ioOYw")]
#[case::news("UCYfdidRxbB8Qhf0Nx7ioOYw")]
fn channel_not_found(#[case] id: &str, rp: RustyPipe) {
let err = tokio_test::block_on(rp.query().channel_videos(&id)).unwrap_err();
@ -1046,18 +1030,15 @@ mod channel_rss {
//#SEARCH
#[rstest]
fn search(rp: RustyPipe, unlocalized: bool) {
fn search(rp: RustyPipe) {
let result = tokio_test::block_on(rp.query().search("doobydoobap")).unwrap();
assert_gte(
result.items.count.unwrap(),
if unlocalized { 7000 } else { 150 },
"results",
assert!(
result.items.count.unwrap() > 7000,
"expected > 7000 total results, got {}",
result.items.count.unwrap()
);
if unlocalized {
assert_eq!(result.corrected_query.unwrap(), "doobydobap");
}
assert_eq!(result.corrected_query.unwrap(), "doobydobap");
assert_next(result.items, rp.query(), 10, 2);
}
@ -1113,12 +1094,8 @@ fn search_suggestion(rp: RustyPipe) {
#[rstest]
fn search_suggestion_empty(rp: RustyPipe) {
let result = tokio_test::block_on(
rp.query()
.lang(Language::Th)
.search_suggestion("fjew327p4ifjelwfvnewg49"),
)
.unwrap();
let result =
tokio_test::block_on(rp.query().search_suggestion("fjew327%4ifjelwfvnewg49")).unwrap();
assert!(result.is_empty());
}
@ -1237,11 +1214,11 @@ fn music_playlist(
#[case] channel: Option<(&str, &str)>,
#[case] from_ytm: bool,
rp: RustyPipe,
unlocalized: bool,
) {
let playlist = tokio_test::block_on(rp.query().music_playlist(id)).unwrap();
assert_eq!(playlist.id, id);
assert_eq!(playlist.name, name);
assert!(!playlist.tracks.is_empty());
assert_eq!(!playlist.tracks.is_exhausted(), is_long);
assert_gte(
@ -1249,10 +1226,7 @@ fn music_playlist(
if is_long { 100 } else { 10 },
"track count",
);
if unlocalized {
assert_eq!(playlist.name, name);
assert_eq!(playlist.description, description);
}
assert_eq!(playlist.description, description);
if let Some(expect) = channel {
let c = playlist.channel.unwrap();
@ -1322,29 +1296,14 @@ fn music_playlist_not_found(rp: RustyPipe) {
#[case::no_year("no_year", "MPREb_F3Af9UZZVxX")]
#[case::version_no_artist("version_no_artist", "MPREb_h8ltx5oKvyY")]
#[case::no_artist("no_artist", "MPREb_bqWA6mAZFWS")]
fn music_album(#[case] name: &str, #[case] id: &str, rp: RustyPipe, unlocalized: bool) {
fn music_album(#[case] name: &str, #[case] id: &str, rp: RustyPipe) {
let album = tokio_test::block_on(rp.query().music_album(id)).unwrap();
assert!(!album.cover.is_empty(), "got no cover");
if unlocalized {
insta::assert_ron_snapshot!(format!("music_album_{name}"), album,
{".cover" => "[cover]"}
);
} else {
insta::assert_ron_snapshot!(format!("music_album_{name}_intl"), album,
{
".name" => "[name]",
".cover" => "[cover]",
".description" => "[description]",
".artists[].name" => "[name]",
".tracks[].name" => "[name]",
".tracks[].album.name" => "[name]",
".tracks[].artists[].name" => "[name]",
".variants[].artists[].name" => "[name]",
}
);
}
insta::assert_ron_snapshot!(format!("music_album_{name}"), album,
{".cover" => "[cover]"}
);
}
#[rstest]
@ -1361,9 +1320,8 @@ fn music_album_not_found(rp: RustyPipe) {
}
#[rstest]
// TODO: fix this/swap artist
// #[case::basic_all("basic_all", "UC7cl4MmM6ZZ2TcFyMk_b4pg", true, 15, 2)]
// #[case::basic("basic", "UC7cl4MmM6ZZ2TcFyMk_b4pg", false, 15, 2)]
#[case::basic_all("basic_all", "UC7cl4MmM6ZZ2TcFyMk_b4pg", true, 15, 2)]
#[case::basic("basic", "UC7cl4MmM6ZZ2TcFyMk_b4pg", false, 15, 2)]
#[case::no_more_albums("no_more_albums", "UCOR4_bSVIXPsGa4BbCSt60Q", true, 15, 0)]
#[case::only_singles("only_singles", "UCfwCE5VhPMGxNPFxtVv7lRw", false, 13, 0)]
#[case::no_artist("no_artist", "UCh8gHdtzO2tXd593_bjErWg", false, 0, 2)]
@ -1377,7 +1335,6 @@ fn music_artist(
#[case] min_tracks: usize,
#[case] min_playlists: usize,
rp: RustyPipe,
unlocalized: bool,
) {
let mut artist = tokio_test::block_on(rp.query().music_artist(id, all_albums)).unwrap();
@ -1413,30 +1370,14 @@ fn music_artist(
// Sort albums to ensure consistent order
artist.albums.sort_by_key(|a| a.id.to_owned());
if unlocalized {
insta::assert_ron_snapshot!(format!("music_artist_{name}"), artist, {
".header_image" => "[header_image]",
".subscriber_count" => "[subscriber_count]",
".albums[].cover" => "[cover]",
".tracks" => "[tracks]",
".playlists" => "[playlists]",
".similar_artists" => "[artists]",
});
} else {
insta::assert_ron_snapshot!(format!("music_artist_{name}_intl"), artist, {
".name" => "[name]",
".header_image" => "[header_image]",
".description" => "[description]",
".wikipedia_url" => "[wikipedia_url]",
".subscriber_count" => "[subscriber_count]",
".albums[].name" => "[name]",
".albums[].cover" => "[cover]",
".albums[].artists[].name" => "[name]",
".tracks" => "[tracks]",
".playlists" => "[playlists]",
".similar_artists" => "[artists]",
});
}
insta::assert_ron_snapshot!(format!("music_artist_{name}"), artist, {
".header_image" => "[header_image]",
".subscriber_count" => "[subscriber_count]",
".albums[].cover" => "[cover]",
".tracks" => "[tracks]",
".playlists" => "[playlists]",
".similar_artists" => "[artists]",
});
}
#[rstest]
@ -1456,7 +1397,7 @@ fn music_artist_not_found(rp: RustyPipe) {
#[rstest]
#[case::default(false)]
#[case::typo(true)]
fn music_search(#[case] typo: bool, rp: RustyPipe, unlocalized: bool) {
fn music_search(#[case] typo: bool, rp: RustyPipe) {
let res = tokio_test::block_on(rp.query().music_search(match typo {
false => "lieblingsmensch namika",
true => "lieblingsmesch namika",
@ -1470,9 +1411,7 @@ fn music_search(#[case] typo: bool, rp: RustyPipe, unlocalized: bool) {
assert_eq!(res.order[0], MusicItemType::Track);
if typo {
if unlocalized {
assert_eq!(res.corrected_query.unwrap(), "lieblingsmensch namika");
}
assert_eq!(res.corrected_query.unwrap(), "lieblingsmensch namika");
} else {
assert_eq!(res.corrected_query, None);
}
@ -1495,9 +1434,7 @@ fn music_search(#[case] typo: bool, rp: RustyPipe, unlocalized: bool) {
track_artist.id.as_ref().unwrap(),
"UCIh4j8fXWf2U0ro0qnGU8Mg"
);
if unlocalized {
assert_eq!(track_artist.name, "Namika");
}
assert_eq!(track_artist.name, "Namika");
let track_album = track.album.as_ref().unwrap();
assert_eq!(track_album.id, "MPREb_RXHxrUFfrvQ");
@ -1509,7 +1446,7 @@ fn music_search(#[case] typo: bool, rp: RustyPipe, unlocalized: bool) {
}
#[rstest]
fn music_search2(rp: RustyPipe, unlocalized: bool) {
fn music_search2(rp: RustyPipe) {
let res = tokio_test::block_on(rp.query().music_search("taylor swift")).unwrap();
assert!(!res.tracks.is_empty(), "no tracks");
@ -1526,14 +1463,12 @@ fn music_search2(rp: RustyPipe, unlocalized: bool) {
panic!("could not find artist, got {:#?}", &res.artists);
});
if unlocalized {
assert_eq!(artist.name, "Taylor Swift");
}
assert_eq!(artist.name, "Taylor Swift");
assert!(!artist.avatar.is_empty(), "got no avatar");
}
#[rstest]
fn music_search_tracks(rp: RustyPipe, unlocalized: bool) {
fn music_search_tracks(rp: RustyPipe) {
let res = tokio_test::block_on(rp.query().music_search_tracks("black mamba")).unwrap();
let track = &res
@ -1554,9 +1489,7 @@ fn music_search_tracks(rp: RustyPipe, unlocalized: bool) {
track_artist.id.as_ref().unwrap(),
"UCEdZAdnnKqbaHOlv8nM6OtA"
);
if unlocalized {
assert_eq!(track_artist.name, "aespa");
}
assert_eq!(track_artist.name, "aespa");
assert_eq!(track.duration.unwrap(), 175);
@ -1568,7 +1501,7 @@ fn music_search_tracks(rp: RustyPipe, unlocalized: bool) {
}
#[rstest]
fn music_search_videos(rp: RustyPipe, unlocalized: bool) {
fn music_search_videos(rp: RustyPipe) {
let res = tokio_test::block_on(rp.query().music_search_videos("black mamba")).unwrap();
let track = &res
@ -1589,9 +1522,7 @@ fn music_search_videos(rp: RustyPipe, unlocalized: bool) {
track_artist.id.as_ref().unwrap(),
"UCEdZAdnnKqbaHOlv8nM6OtA"
);
if unlocalized {
assert_eq!(track_artist.name, "aespa");
}
assert_eq!(track_artist.name, "aespa");
assert_eq!(track.duration.unwrap(), 230);
assert_eq!(track.album, None);
@ -1600,6 +1531,8 @@ fn music_search_videos(rp: RustyPipe, unlocalized: bool) {
assert_next(res.items, rp.query(), 15, 2);
}
// This podcast was removed from YouTube Music and I could not find another one
/*
#[tokio::test]
async fn music_search_episode() {
let rp = RustyPipe::builder().strict().build();
@ -1621,7 +1554,7 @@ async fn music_search_episode() {
"Blond - Da muss man dabei gewesen sein: Das Hörspiel - Fall #1"
);
assert!(!track.cover.is_empty(), "got no cover");
}
}*/
#[rstest]
#[case::single(
@ -1664,7 +1597,6 @@ fn music_search_albums(
#[case] album_type: AlbumType,
#[case] more: bool,
rp: RustyPipe,
unlocalized: bool,
) {
let res = tokio_test::block_on(rp.query().music_search_albums(query)).unwrap();
@ -1674,9 +1606,7 @@ fn music_search_albums(
assert_eq!(album.artists.len(), 1);
let album_artist = &album.artists[0];
assert_eq!(album_artist.id.as_ref().unwrap(), artist_id);
if unlocalized {
assert_eq!(album_artist.name, artist);
}
assert_eq!(album_artist.name, artist);
assert_eq!(album.artist_id.as_ref().unwrap(), artist_id);
assert!(!album.cover.is_empty(), "got no cover");
@ -1685,13 +1615,13 @@ fn music_search_albums(
assert_eq!(res.corrected_query, None);
if more && unlocalized {
if more {
assert_next(res.items, rp.query(), 15, 1);
}
}
#[rstest]
fn music_search_artists(rp: RustyPipe, unlocalized: bool) {
fn music_search_artists(rp: RustyPipe) {
let res = tokio_test::block_on(rp.query().music_search_artists("namika")).unwrap();
let artist = res
@ -1700,9 +1630,7 @@ fn music_search_artists(rp: RustyPipe, unlocalized: bool) {
.iter()
.find(|a| a.id == "UCIh4j8fXWf2U0ro0qnGU8Mg")
.unwrap();
if unlocalized {
assert_eq!(artist.name, "Namika");
}
assert_eq!(artist.name, "Namika");
assert!(!artist.avatar.is_empty(), "got no avatar");
assert!(
artist.subscriber_count.unwrap() > 735_000,
@ -1723,7 +1651,7 @@ fn music_search_artists_cont(rp: RustyPipe) {
#[rstest]
#[case::ytm(false)]
#[case::default(true)]
fn music_search_playlists(#[case] with_community: bool, rp: RustyPipe, unlocalized: bool) {
fn music_search_playlists(#[case] with_community: bool, rp: RustyPipe) {
let res = if with_community {
tokio_test::block_on(rp.query().music_search_playlists("pop biggest hits")).unwrap()
} else {
@ -1742,9 +1670,7 @@ fn music_search_playlists(#[case] with_community: bool, rp: RustyPipe, unlocaliz
.find(|p| p.id == "RDCLAK5uy_nmS3YoxSwVVQk9lEQJ0UX4ZCjXsW_psU8")
.unwrap();
if unlocalized {
assert_eq!(playlist.name, "Pop's Biggest Hits");
}
assert_eq!(playlist.name, "Pop's Biggest Hits");
assert!(!playlist.thumbnail.is_empty(), "got no thumbnail");
assert_gte(playlist.track_count.unwrap(), 100, "tracks");
assert_eq!(playlist.channel, None);
@ -1835,7 +1761,7 @@ fn music_search_suggestion(
#[rstest]
#[case::mv("mv", "ZeerrnuLi5E")]
#[case::track("track", "qIZ-vvg-wiU")]
#[case::track("track", "7nigXQS1Xb0")]
fn music_details(#[case] name: &str, #[case] id: &str, rp: RustyPipe) {
let track = tokio_test::block_on(rp.query().music_details(id)).unwrap();
@ -1858,12 +1784,7 @@ fn music_details(#[case] name: &str, #[case] id: &str, rp: RustyPipe) {
fn music_lyrics(rp: RustyPipe) {
let track = tokio_test::block_on(rp.query().music_details("60ImQ8DS3Vs")).unwrap();
let lyrics = tokio_test::block_on(rp.query().music_lyrics(&track.lyrics_id.unwrap())).unwrap();
insta::assert_ron_snapshot!(lyrics.body);
assert!(
lyrics.footer.contains("Musixmatch"),
"footer text: {}",
lyrics.footer
)
insta::assert_ron_snapshot!(lyrics);
}
#[rstest]
@ -2083,8 +2004,8 @@ fn music_charts(
assert_eq!(charts.top_playlist_id.unwrap(), plid_top);
assert_eq!(charts.trending_playlist_id.unwrap(), plid_trend);
assert_gte(charts.top_tracks.len(), 30, "top tracks");
assert_gte(charts.artists.len(), 30, "top artists");
assert_gte(charts.top_tracks.len(), 40, "top tracks");
assert_gte(charts.artists.len(), 40, "top artists");
assert_gte(charts.trending_tracks.len(), 15, "trending tracks");
// Chart playlists only available in USA
@ -2120,16 +2041,14 @@ fn music_new_videos(rp: RustyPipe) {
}
#[rstest]
fn music_genres(rp: RustyPipe, unlocalized: bool) {
fn music_genres(rp: RustyPipe) {
let genres = tokio_test::block_on(rp.query().music_genres()).unwrap();
let chill = genres
.iter()
.find(|g| g.id == "ggMPOg1uX1JOQWZFeDByc2Jm")
.unwrap();
if unlocalized {
assert_eq!(chill.name, "Chill");
}
assert_eq!(chill.name, "Chill");
assert!(chill.is_mood);
let pop = genres
@ -2148,19 +2067,12 @@ fn music_genres(rp: RustyPipe, unlocalized: bool) {
#[rstest]
#[case::chill("ggMPOg1uX1JOQWZFeDByc2Jm", "Chill")]
#[case::pop("ggMPOg1uX1lMbVZmbzl6NlJ3", "Pop")]
fn music_genre(#[case] id: &str, #[case] name: &str, rp: RustyPipe, unlocalized: bool) {
fn music_genre(#[case] id: &str, #[case] name: &str, rp: RustyPipe) {
let genre = tokio_test::block_on(rp.query().music_genre(id)).unwrap();
fn check_music_genre(
genre: MusicGenre,
id: &str,
name: &str,
unlocalized: bool,
) -> Vec<(String, String)> {
fn check_music_genre(genre: MusicGenre, id: &str, name: &str) -> Vec<(String, String)> {
assert_eq!(genre.id, id);
if unlocalized {
assert_eq!(genre.name, name);
}
assert_eq!(genre.name, name);
assert_gte(genre.sections.len(), 2, "genre sections");
let mut subgenres = Vec::new();
@ -2193,7 +2105,7 @@ fn music_genre(#[case] id: &str, #[case] name: &str, rp: RustyPipe, unlocalized:
subgenres
}
let subgenres = check_music_genre(genre, id, name, unlocalized);
let subgenres = check_music_genre(genre, id, name);
if name == "Chill" {
assert_gte(subgenres.len(), 2, "subgenres");
@ -2201,7 +2113,7 @@ fn music_genre(#[case] id: &str, #[case] name: &str, rp: RustyPipe, unlocalized:
for (id, name) in subgenres {
let genre = tokio_test::block_on(rp.query().music_genre(&id)).unwrap();
check_music_genre(genre, &id, &name, unlocalized);
check_music_genre(genre, &id, &name);
}
}
@ -2255,25 +2167,10 @@ fn invalid_ctoken(#[case] ep: ContinuationEndpoint, rp: RustyPipe) {
//#TESTUTIL
/// Get the language setting from the environment variable
#[fixture]
fn lang() -> Language {
std::env::var("YT_LANG")
.ok()
.map(|l| Language::from_str(&l).unwrap())
.unwrap_or(Language::En)
}
/// Get a new RustyPipe instance
#[fixture]
fn rp(lang: Language) -> RustyPipe {
RustyPipe::builder().strict().lang(lang).build()
}
/// Get a flag signaling if the language is set to English
#[fixture]
fn unlocalized(lang: Language) -> bool {
lang == Language::En
fn rp() -> RustyPipe {
RustyPipe::builder().strict().build()
}
/// Get a new RustyPipe instance with pre-set visitor data