From 5af28a291c6f1d9b6197649d8ec6394d1582f3fe Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 16 Dec 2022 09:39:51 +0000 Subject: [PATCH 01/12] fix: the pedantic clippy warnings --- src/config.rs | 4 ++-- src/protocol/info_hash.rs | 2 +- src/tracker/auth.rs | 2 +- src/udp/handlers.rs | 6 ++++-- tests/api.rs | 6 ++++-- tests/udp.rs | 8 +++++--- 6 files changed, 17 insertions(+), 11 deletions(-) diff --git a/src/config.rs b/src/config.rs index a7e7e9df6..ba99e0f45 100644 --- a/src/config.rs +++ b/src/config.rs @@ -74,7 +74,7 @@ impl std::fmt::Display for Error { Error::ConfigError(e) => e.fmt(f), Error::IOError(e) => e.fmt(f), Error::ParseError(e) => e.fmt(f), - Error::TrackerModeIncompatible => write!(f, "{:?}", self), + Error::TrackerModeIncompatible => write!(f, "{self:?}"), } } } @@ -296,6 +296,6 @@ mod tests { fn configuration_error_could_be_displayed() { let error = Error::TrackerModeIncompatible; - assert_eq!(format!("{}", error), "TrackerModeIncompatible"); + assert_eq!(format!("{error}"), "TrackerModeIncompatible"); } } diff --git a/src/protocol/info_hash.rs b/src/protocol/info_hash.rs index 3b9b2fa35..9a0900063 100644 --- a/src/protocol/info_hash.rs +++ b/src/protocol/info_hash.rs @@ -133,7 +133,7 @@ mod tests { fn an_info_hash_should_by_displayed_like_a_40_utf8_lowercased_char_hex_string() { let info_hash = InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); - let output = format!("{}", info_hash); + let output = format!("{info_hash}"); assert_eq!(output, "ffffffffffffffffffffffffffffffffffffffff"); } diff --git a/src/tracker/auth.rs b/src/tracker/auth.rs index 7ac6d6939..02450dc82 100644 --- a/src/tracker/auth.rs +++ b/src/tracker/auth.rs @@ -90,7 +90,7 @@ pub enum Error { impl From for Error { fn from(e: r2d2_sqlite::rusqlite::Error) -> Self { - eprintln!("{}", e); + eprintln!("{e}"); Error::KeyVerificationError } } diff --git a/src/udp/handlers.rs b/src/udp/handlers.rs index d167b3e6d..001fb2380 100644 --- a/src/udp/handlers.rs +++ b/src/udp/handlers.rs @@ -255,8 +255,10 @@ mod tests { } fn default_testing_tracker_configuration() -> Configuration { - let mut config = Configuration::default(); - config.log_level = Some("off".to_owned()); + let mut config = Configuration { + log_level: Some("off".to_owned()), + ..Default::default() + }; // Ephemeral socket address let port = ephemeral_random_port(); diff --git a/tests/api.rs b/tests/api.rs index 706cd0b8d..84ddac573 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -203,8 +203,10 @@ mod tracker_api { } fn tracker_configuration() -> Arc { - let mut config = Configuration::default(); - config.log_level = Some("off".to_owned()); + let mut config = Configuration { + log_level: Some("off".to_owned()), + ..Default::default() + }; // Ephemeral socket address let port = ephemeral_random_port(); diff --git a/tests/udp.rs b/tests/udp.rs index 5f7a66856..55384db05 100644 --- a/tests/udp.rs +++ b/tests/udp.rs @@ -28,8 +28,10 @@ mod udp_tracker_server { use crate::common::ephemeral_random_port; fn tracker_configuration() -> Arc { - let mut config = Configuration::default(); - config.log_level = Some("off".to_owned()); + let mut config = Configuration { + log_level: Some("off".to_owned()), + ..Default::default() + }; // Ephemeral socket address let port = ephemeral_random_port(); @@ -181,7 +183,7 @@ mod udp_tracker_server { /// Generates the source address for the UDP client fn source_address(port: u16) -> String { - format!("127.0.0.1:{}", port) + format!("127.0.0.1:{port}") } fn is_error_response(response: &Response, error_message: &str) -> bool { From b23d64b9c3d58a6f4f7dab8a60775fc234aaadbd Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 16 Dec 2022 08:49:56 +0000 Subject: [PATCH 02/12] feat: add ssl support for the API New config options have been added to support HTTPs conenctionto the API: ``` [http_api] ssl_enabled = false ssl_cert_path = "./storage/ssl_certificates/localhost.crt" ssl_key_path = "./storage/ssl_certificates/localhost.key" ``` --- src/api/mod.rs | 18 +++ src/api/routes.rs | 307 ++++++++++++++++++++++++++++++++++++ src/api/server.rs | 333 +++------------------------------------ src/config.rs | 41 +++-- src/jobs/http_tracker.rs | 4 +- src/jobs/tracker_api.rs | 26 +-- src/jobs/udp_tracker.rs | 4 +- src/setup.rs | 2 +- tests/api.rs | 2 +- 9 files changed, 393 insertions(+), 344 deletions(-) create mode 100644 src/api/routes.rs diff --git a/src/api/mod.rs b/src/api/mod.rs index 16abb8e27..d254c91ac 100644 --- a/src/api/mod.rs +++ b/src/api/mod.rs @@ -1,2 +1,20 @@ pub mod resource; +pub mod routes; pub mod server; + +use serde::{Deserialize, Serialize}; + +#[derive(Deserialize, Debug)] +pub struct TorrentInfoQuery { + offset: Option, + limit: Option, +} + +#[derive(Serialize, Debug)] +#[serde(tag = "status", rename_all = "snake_case")] +enum ActionStatus<'a> { + Ok, + Err { reason: std::borrow::Cow<'a, str> }, +} + +impl warp::reject::Reject for ActionStatus<'static> {} diff --git a/src/api/routes.rs b/src/api/routes.rs new file mode 100644 index 000000000..76b449e9b --- /dev/null +++ b/src/api/routes.rs @@ -0,0 +1,307 @@ +use std::cmp::min; +use std::collections::{HashMap, HashSet}; +use std::sync::Arc; +use std::time::Duration; + +use serde::Deserialize; +use warp::{filters, reply, Filter}; + +use super::resource::auth_key::AuthKey; +use super::resource::peer; +use super::resource::stats::Stats; +use super::resource::torrent::{ListItem, Torrent}; +use super::{ActionStatus, TorrentInfoQuery}; +use crate::protocol::info_hash::InfoHash; +use crate::tracker; + +fn authenticate(tokens: HashMap) -> impl Filter + Clone { + #[derive(Deserialize)] + struct AuthToken { + token: Option, + } + + let tokens: HashSet = tokens.into_values().collect(); + + let tokens = Arc::new(tokens); + warp::filters::any::any() + .map(move || tokens.clone()) + .and(filters::query::query::()) + .and_then(|tokens: Arc>, token: AuthToken| async move { + match token.token { + Some(token) => { + if !tokens.contains(&token) { + return Err(warp::reject::custom(ActionStatus::Err { + reason: "token not valid".into(), + })); + } + + Ok(()) + } + None => Err(warp::reject::custom(ActionStatus::Err { + reason: "unauthorized".into(), + })), + } + }) + .untuple_one() +} + +#[allow(clippy::too_many_lines)] +#[must_use] +pub fn routes(tracker: &Arc) -> impl Filter + Clone { + // GET /api/torrents?offset=:u32&limit=:u32 + // View torrent list + let api_torrents = tracker.clone(); + let view_torrent_list = filters::method::get() + .and(filters::path::path("torrents")) + .and(filters::path::end()) + .and(filters::query::query()) + .map(move |limits| { + let tracker = api_torrents.clone(); + (limits, tracker) + }) + .and_then(|(limits, tracker): (TorrentInfoQuery, Arc)| async move { + let offset = limits.offset.unwrap_or(0); + let limit = min(limits.limit.unwrap_or(1000), 4000); + + let db = tracker.get_torrents().await; + let results: Vec<_> = db + .iter() + .map(|(info_hash, torrent_entry)| { + let (seeders, completed, leechers) = torrent_entry.get_stats(); + ListItem { + info_hash: info_hash.to_string(), + seeders, + completed, + leechers, + peers: None, + } + }) + .skip(offset as usize) + .take(limit as usize) + .collect(); + + Result::<_, warp::reject::Rejection>::Ok(reply::json(&results)) + }); + + // GET /api/stats + // View tracker status + let api_stats = tracker.clone(); + let view_stats_list = filters::method::get() + .and(filters::path::path("stats")) + .and(filters::path::end()) + .map(move || api_stats.clone()) + .and_then(|tracker: Arc| async move { + let mut results = Stats { + torrents: 0, + seeders: 0, + completed: 0, + leechers: 0, + tcp4_connections_handled: 0, + tcp4_announces_handled: 0, + tcp4_scrapes_handled: 0, + tcp6_connections_handled: 0, + tcp6_announces_handled: 0, + tcp6_scrapes_handled: 0, + udp4_connections_handled: 0, + udp4_announces_handled: 0, + udp4_scrapes_handled: 0, + udp6_connections_handled: 0, + udp6_announces_handled: 0, + udp6_scrapes_handled: 0, + }; + + let db = tracker.get_torrents().await; + + db.values().for_each(|torrent_entry| { + let (seeders, completed, leechers) = torrent_entry.get_stats(); + results.seeders += seeders; + results.completed += completed; + results.leechers += leechers; + results.torrents += 1; + }); + + let stats = tracker.get_stats().await; + + #[allow(clippy::cast_possible_truncation)] + { + results.tcp4_connections_handled = stats.tcp4_connections_handled as u32; + results.tcp4_announces_handled = stats.tcp4_announces_handled as u32; + results.tcp4_scrapes_handled = stats.tcp4_scrapes_handled as u32; + results.tcp6_connections_handled = stats.tcp6_connections_handled as u32; + results.tcp6_announces_handled = stats.tcp6_announces_handled as u32; + results.tcp6_scrapes_handled = stats.tcp6_scrapes_handled as u32; + results.udp4_connections_handled = stats.udp4_connections_handled as u32; + results.udp4_announces_handled = stats.udp4_announces_handled as u32; + results.udp4_scrapes_handled = stats.udp4_scrapes_handled as u32; + results.udp6_connections_handled = stats.udp6_connections_handled as u32; + results.udp6_announces_handled = stats.udp6_announces_handled as u32; + results.udp6_scrapes_handled = stats.udp6_scrapes_handled as u32; + } + + Result::<_, warp::reject::Rejection>::Ok(reply::json(&results)) + }); + + // GET /api/torrent/:info_hash + // View torrent info + let t2 = tracker.clone(); + let view_torrent_info = filters::method::get() + .and(filters::path::path("torrent")) + .and(filters::path::param()) + .and(filters::path::end()) + .map(move |info_hash: InfoHash| { + let tracker = t2.clone(); + (info_hash, tracker) + }) + .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { + let db = tracker.get_torrents().await; + let torrent_entry_option = db.get(&info_hash); + + let torrent_entry = match torrent_entry_option { + Some(torrent_entry) => torrent_entry, + None => { + return Result::<_, warp::reject::Rejection>::Ok(reply::json(&"torrent not known")); + } + }; + let (seeders, completed, leechers) = torrent_entry.get_stats(); + + let peers = torrent_entry.get_peers(None); + + let peer_resources = peers.iter().map(|peer| peer::Peer::from(**peer)).collect(); + + Ok(reply::json(&Torrent { + info_hash: info_hash.to_string(), + seeders, + completed, + leechers, + peers: Some(peer_resources), + })) + }); + + // DELETE /api/whitelist/:info_hash + // Delete info hash from whitelist + let t3 = tracker.clone(); + let delete_torrent = filters::method::delete() + .and(filters::path::path("whitelist")) + .and(filters::path::param()) + .and(filters::path::end()) + .map(move |info_hash: InfoHash| { + let tracker = t3.clone(); + (info_hash, tracker) + }) + .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { + match tracker.remove_torrent_from_whitelist(&info_hash).await { + Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), + Err(_) => Err(warp::reject::custom(ActionStatus::Err { + reason: "failed to remove torrent from whitelist".into(), + })), + } + }); + + // POST /api/whitelist/:info_hash + // Add info hash to whitelist + let t4 = tracker.clone(); + let add_torrent = filters::method::post() + .and(filters::path::path("whitelist")) + .and(filters::path::param()) + .and(filters::path::end()) + .map(move |info_hash: InfoHash| { + let tracker = t4.clone(); + (info_hash, tracker) + }) + .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { + match tracker.add_torrent_to_whitelist(&info_hash).await { + Ok(..) => Ok(warp::reply::json(&ActionStatus::Ok)), + Err(..) => Err(warp::reject::custom(ActionStatus::Err { + reason: "failed to whitelist torrent".into(), + })), + } + }); + + // POST /api/key/:seconds_valid + // Generate new key + let t5 = tracker.clone(); + let create_key = filters::method::post() + .and(filters::path::path("key")) + .and(filters::path::param()) + .and(filters::path::end()) + .map(move |seconds_valid: u64| { + let tracker = t5.clone(); + (seconds_valid, tracker) + }) + .and_then(|(seconds_valid, tracker): (u64, Arc)| async move { + match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { + Ok(auth_key) => Ok(warp::reply::json(&AuthKey::from(auth_key))), + Err(..) => Err(warp::reject::custom(ActionStatus::Err { + reason: "failed to generate key".into(), + })), + } + }); + + // DELETE /api/key/:key + // Delete key + let t6 = tracker.clone(); + let delete_key = filters::method::delete() + .and(filters::path::path("key")) + .and(filters::path::param()) + .and(filters::path::end()) + .map(move |key: String| { + let tracker = t6.clone(); + (key, tracker) + }) + .and_then(|(key, tracker): (String, Arc)| async move { + match tracker.remove_auth_key(&key).await { + Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), + Err(_) => Err(warp::reject::custom(ActionStatus::Err { + reason: "failed to delete key".into(), + })), + } + }); + + // GET /api/whitelist/reload + // Reload whitelist + let t7 = tracker.clone(); + let reload_whitelist = filters::method::get() + .and(filters::path::path("whitelist")) + .and(filters::path::path("reload")) + .and(filters::path::end()) + .map(move || t7.clone()) + .and_then(|tracker: Arc| async move { + match tracker.load_whitelist().await { + Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), + Err(_) => Err(warp::reject::custom(ActionStatus::Err { + reason: "failed to reload whitelist".into(), + })), + } + }); + + // GET /api/keys/reload + // Reload whitelist + let t8 = tracker.clone(); + let reload_keys = filters::method::get() + .and(filters::path::path("keys")) + .and(filters::path::path("reload")) + .and(filters::path::end()) + .map(move || t8.clone()) + .and_then(|tracker: Arc| async move { + match tracker.load_keys().await { + Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), + Err(_) => Err(warp::reject::custom(ActionStatus::Err { + reason: "failed to reload keys".into(), + })), + } + }); + + let api_routes = filters::path::path("api").and( + view_torrent_list + .or(delete_torrent) + .or(view_torrent_info) + .or(view_stats_list) + .or(add_torrent) + .or(create_key) + .or(delete_key) + .or(reload_whitelist) + .or(reload_keys), + ); + + api_routes.and(authenticate(tracker.config.http_api.access_tokens.clone())) +} diff --git a/src/api/server.rs b/src/api/server.rs index 5967a8be4..5d6a3cdfd 100644 --- a/src/api/server.rs +++ b/src/api/server.rs @@ -1,327 +1,32 @@ -use std::cmp::min; -use std::collections::{HashMap, HashSet}; use std::net::SocketAddr; use std::sync::Arc; -use std::time::Duration; -use serde::{Deserialize, Serialize}; -use warp::{filters, reply, serve, Filter}; +use warp::serve; -use super::resource::auth_key::AuthKey; -use super::resource::peer; -use super::resource::stats::Stats; -use super::resource::torrent::{ListItem, Torrent}; -use crate::protocol::info_hash::InfoHash; +use super::routes::routes; use crate::tracker; -#[derive(Deserialize, Debug)] -struct TorrentInfoQuery { - offset: Option, - limit: Option, -} - -#[derive(Serialize, Debug)] -#[serde(tag = "status", rename_all = "snake_case")] -enum ActionStatus<'a> { - Ok, - Err { reason: std::borrow::Cow<'a, str> }, -} - -impl warp::reject::Reject for ActionStatus<'static> {} - -fn authenticate(tokens: HashMap) -> impl Filter + Clone { - #[derive(Deserialize)] - struct AuthToken { - token: Option, - } - - let tokens: HashSet = tokens.into_iter().map(|(_, v)| v).collect(); - - let tokens = Arc::new(tokens); - warp::filters::any::any() - .map(move || tokens.clone()) - .and(filters::query::query::()) - .and_then(|tokens: Arc>, token: AuthToken| async move { - match token.token { - Some(token) => { - if !tokens.contains(&token) { - return Err(warp::reject::custom(ActionStatus::Err { - reason: "token not valid".into(), - })); - } - - Ok(()) - } - None => Err(warp::reject::custom(ActionStatus::Err { - reason: "unauthorized".into(), - })), - } - }) - .untuple_one() -} - -#[allow(clippy::too_many_lines)] pub fn start(socket_addr: SocketAddr, tracker: &Arc) -> impl warp::Future { - // GET /api/torrents?offset=:u32&limit=:u32 - // View torrent list - let api_torrents = tracker.clone(); - let view_torrent_list = filters::method::get() - .and(filters::path::path("torrents")) - .and(filters::path::end()) - .and(filters::query::query()) - .map(move |limits| { - let tracker = api_torrents.clone(); - (limits, tracker) - }) - .and_then(|(limits, tracker): (TorrentInfoQuery, Arc)| async move { - let offset = limits.offset.unwrap_or(0); - let limit = min(limits.limit.unwrap_or(1000), 4000); - - let db = tracker.get_torrents().await; - let results: Vec<_> = db - .iter() - .map(|(info_hash, torrent_entry)| { - let (seeders, completed, leechers) = torrent_entry.get_stats(); - ListItem { - info_hash: info_hash.to_string(), - seeders, - completed, - leechers, - peers: None, - } - }) - .skip(offset as usize) - .take(limit as usize) - .collect(); - - Result::<_, warp::reject::Rejection>::Ok(reply::json(&results)) - }); - - // GET /api/stats - // View tracker status - let api_stats = tracker.clone(); - let view_stats_list = filters::method::get() - .and(filters::path::path("stats")) - .and(filters::path::end()) - .map(move || api_stats.clone()) - .and_then(|tracker: Arc| async move { - let mut results = Stats { - torrents: 0, - seeders: 0, - completed: 0, - leechers: 0, - tcp4_connections_handled: 0, - tcp4_announces_handled: 0, - tcp4_scrapes_handled: 0, - tcp6_connections_handled: 0, - tcp6_announces_handled: 0, - tcp6_scrapes_handled: 0, - udp4_connections_handled: 0, - udp4_announces_handled: 0, - udp4_scrapes_handled: 0, - udp6_connections_handled: 0, - udp6_announces_handled: 0, - udp6_scrapes_handled: 0, - }; - - let db = tracker.get_torrents().await; - - db.values().for_each(|torrent_entry| { - let (seeders, completed, leechers) = torrent_entry.get_stats(); - results.seeders += seeders; - results.completed += completed; - results.leechers += leechers; - results.torrents += 1; - }); - - let stats = tracker.get_stats().await; - - #[allow(clippy::cast_possible_truncation)] - { - results.tcp4_connections_handled = stats.tcp4_connections_handled as u32; - results.tcp4_announces_handled = stats.tcp4_announces_handled as u32; - results.tcp4_scrapes_handled = stats.tcp4_scrapes_handled as u32; - results.tcp6_connections_handled = stats.tcp6_connections_handled as u32; - results.tcp6_announces_handled = stats.tcp6_announces_handled as u32; - results.tcp6_scrapes_handled = stats.tcp6_scrapes_handled as u32; - results.udp4_connections_handled = stats.udp4_connections_handled as u32; - results.udp4_announces_handled = stats.udp4_announces_handled as u32; - results.udp4_scrapes_handled = stats.udp4_scrapes_handled as u32; - results.udp6_connections_handled = stats.udp6_connections_handled as u32; - results.udp6_announces_handled = stats.udp6_announces_handled as u32; - results.udp6_scrapes_handled = stats.udp6_scrapes_handled as u32; - } - - Result::<_, warp::reject::Rejection>::Ok(reply::json(&results)) - }); - - // GET /api/torrent/:info_hash - // View torrent info - let t2 = tracker.clone(); - let view_torrent_info = filters::method::get() - .and(filters::path::path("torrent")) - .and(filters::path::param()) - .and(filters::path::end()) - .map(move |info_hash: InfoHash| { - let tracker = t2.clone(); - (info_hash, tracker) - }) - .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { - let db = tracker.get_torrents().await; - let torrent_entry_option = db.get(&info_hash); - - let torrent_entry = match torrent_entry_option { - Some(torrent_entry) => torrent_entry, - None => { - return Result::<_, warp::reject::Rejection>::Ok(reply::json(&"torrent not known")); - } - }; - let (seeders, completed, leechers) = torrent_entry.get_stats(); - - let peers = torrent_entry.get_peers(None); - - let peer_resources = peers.iter().map(|peer| peer::Peer::from(**peer)).collect(); - - Ok(reply::json(&Torrent { - info_hash: info_hash.to_string(), - seeders, - completed, - leechers, - peers: Some(peer_resources), - })) - }); - - // DELETE /api/whitelist/:info_hash - // Delete info hash from whitelist - let t3 = tracker.clone(); - let delete_torrent = filters::method::delete() - .and(filters::path::path("whitelist")) - .and(filters::path::param()) - .and(filters::path::end()) - .map(move |info_hash: InfoHash| { - let tracker = t3.clone(); - (info_hash, tracker) - }) - .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { - match tracker.remove_torrent_from_whitelist(&info_hash).await { - Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), - Err(_) => Err(warp::reject::custom(ActionStatus::Err { - reason: "failed to remove torrent from whitelist".into(), - })), - } - }); - - // POST /api/whitelist/:info_hash - // Add info hash to whitelist - let t4 = tracker.clone(); - let add_torrent = filters::method::post() - .and(filters::path::path("whitelist")) - .and(filters::path::param()) - .and(filters::path::end()) - .map(move |info_hash: InfoHash| { - let tracker = t4.clone(); - (info_hash, tracker) - }) - .and_then(|(info_hash, tracker): (InfoHash, Arc)| async move { - match tracker.add_torrent_to_whitelist(&info_hash).await { - Ok(..) => Ok(warp::reply::json(&ActionStatus::Ok)), - Err(..) => Err(warp::reject::custom(ActionStatus::Err { - reason: "failed to whitelist torrent".into(), - })), - } - }); - - // POST /api/key/:seconds_valid - // Generate new key - let t5 = tracker.clone(); - let create_key = filters::method::post() - .and(filters::path::path("key")) - .and(filters::path::param()) - .and(filters::path::end()) - .map(move |seconds_valid: u64| { - let tracker = t5.clone(); - (seconds_valid, tracker) - }) - .and_then(|(seconds_valid, tracker): (u64, Arc)| async move { - match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { - Ok(auth_key) => Ok(warp::reply::json(&AuthKey::from(auth_key))), - Err(..) => Err(warp::reject::custom(ActionStatus::Err { - reason: "failed to generate key".into(), - })), - } - }); - - // DELETE /api/key/:key - // Delete key - let t6 = tracker.clone(); - let delete_key = filters::method::delete() - .and(filters::path::path("key")) - .and(filters::path::param()) - .and(filters::path::end()) - .map(move |key: String| { - let tracker = t6.clone(); - (key, tracker) - }) - .and_then(|(key, tracker): (String, Arc)| async move { - match tracker.remove_auth_key(&key).await { - Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), - Err(_) => Err(warp::reject::custom(ActionStatus::Err { - reason: "failed to delete key".into(), - })), - } - }); + let (_addr, api_server) = serve(routes(tracker)).bind_with_graceful_shutdown(socket_addr, async move { + tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); + }); - // GET /api/whitelist/reload - // Reload whitelist - let t7 = tracker.clone(); - let reload_whitelist = filters::method::get() - .and(filters::path::path("whitelist")) - .and(filters::path::path("reload")) - .and(filters::path::end()) - .map(move || t7.clone()) - .and_then(|tracker: Arc| async move { - match tracker.load_whitelist().await { - Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), - Err(_) => Err(warp::reject::custom(ActionStatus::Err { - reason: "failed to reload whitelist".into(), - })), - } - }); + api_server +} - // GET /api/keys/reload - // Reload whitelist - let t8 = tracker.clone(); - let reload_keys = filters::method::get() - .and(filters::path::path("keys")) - .and(filters::path::path("reload")) - .and(filters::path::end()) - .map(move || t8.clone()) - .and_then(|tracker: Arc| async move { - match tracker.load_keys().await { - Ok(_) => Ok(warp::reply::json(&ActionStatus::Ok)), - Err(_) => Err(warp::reject::custom(ActionStatus::Err { - reason: "failed to reload keys".into(), - })), - } +pub fn start_tls( + socket_addr: SocketAddr, + ssl_cert_path: String, + ssl_key_path: String, + tracker: &Arc, +) -> impl warp::Future { + let (_addr, api_server) = serve(routes(tracker)) + .tls() + .cert_path(ssl_cert_path) + .key_path(ssl_key_path) + .bind_with_graceful_shutdown(socket_addr, async move { + tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); }); - let api_routes = filters::path::path("api").and( - view_torrent_list - .or(delete_torrent) - .or(view_torrent_info) - .or(view_stats_list) - .or(add_torrent) - .or(create_key) - .or(delete_key) - .or(reload_whitelist) - .or(reload_keys), - ); - - let server = api_routes.and(authenticate(tracker.config.http_api.access_tokens.clone())); - - let (_addr, api_server) = serve(server).bind_with_graceful_shutdown(socket_addr, async move { - tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); - }); - api_server } diff --git a/src/config.rs b/src/config.rs index ba99e0f45..66def17cd 100644 --- a/src/config.rs +++ b/src/config.rs @@ -30,10 +30,16 @@ pub struct HttpTracker { pub ssl_key_path: Option, } +#[serde_as] #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] pub struct HttpApi { pub enabled: bool, pub bind_address: String, + pub ssl_enabled: bool, + #[serde_as(as = "NoneAsEmptyString")] + pub ssl_cert_path: Option, + #[serde_as(as = "NoneAsEmptyString")] + pub ssl_key_path: Option, pub access_tokens: HashMap, } @@ -81,20 +87,8 @@ impl std::fmt::Display for Error { impl std::error::Error for Error {} -impl Configuration { - #[must_use] - pub fn get_ext_ip(&self) -> Option { - match &self.external_ip { - None => None, - Some(external_ip) => match IpAddr::from_str(external_ip) { - Ok(external_ip) => Some(external_ip), - Err(_) => None, - }, - } - } - - #[must_use] - pub fn default() -> Configuration { +impl Default for Configuration { + fn default() -> Self { let mut configuration = Configuration { log_level: Option::from(String::from("info")), mode: mode::Mode::Public, @@ -114,6 +108,9 @@ impl Configuration { http_api: HttpApi { enabled: true, bind_address: String::from("127.0.0.1:1212"), + ssl_enabled: false, + ssl_cert_path: None, + ssl_key_path: None, access_tokens: [(String::from("admin"), String::from("MyAccessToken"))] .iter() .cloned() @@ -133,6 +130,19 @@ impl Configuration { }); configuration } +} + +impl Configuration { + #[must_use] + pub fn get_ext_ip(&self) -> Option { + match &self.external_ip { + None => None, + Some(external_ip) => match IpAddr::from_str(external_ip) { + Ok(external_ip) => Some(external_ip), + Err(_) => None, + }, + } + } /// # Errors /// @@ -208,6 +218,9 @@ mod tests { [http_api] enabled = true bind_address = "127.0.0.1:1212" + ssl_enabled = false + ssl_cert_path = "" + ssl_key_path = "" [http_api.access_tokens] admin = "MyAccessToken" diff --git a/src/jobs/http_tracker.rs b/src/jobs/http_tracker.rs index b8f031f5a..c62bc5cc9 100644 --- a/src/jobs/http_tracker.rs +++ b/src/jobs/http_tracker.rs @@ -22,10 +22,10 @@ pub fn start_job(config: &HttpTracker, tracker: Arc) -> JoinHa let http_tracker = Http::new(tracker); if !ssl_enabled { - info!("Starting HTTP server on: {}", bind_addr); + info!("Starting HTTP server on: http://{}", bind_addr); http_tracker.start(bind_addr).await; } else if ssl_enabled && ssl_cert_path.is_some() && ssl_key_path.is_some() { - info!("Starting HTTPS server on: {} (TLS)", bind_addr); + info!("Starting HTTPS server on: https://{} (TLS)", bind_addr); http_tracker .start_tls(bind_addr, ssl_cert_path.unwrap(), ssl_key_path.unwrap()) .await; diff --git a/src/jobs/tracker_api.rs b/src/jobs/tracker_api.rs index 2c00aa453..211174f35 100644 --- a/src/jobs/tracker_api.rs +++ b/src/jobs/tracker_api.rs @@ -5,7 +5,7 @@ use tokio::sync::oneshot; use tokio::task::JoinHandle; use crate::api::server; -use crate::config::Configuration; +use crate::config::HttpApi; use crate::tracker; #[derive(Debug)] @@ -14,24 +14,30 @@ pub struct ApiServerJobStarted(); /// # Panics /// /// It would panic if unable to send the `ApiServerJobStarted` notice. -pub async fn start_job(config: &Configuration, tracker: Arc) -> JoinHandle<()> { +pub async fn start_job(config: &HttpApi, tracker: Arc) -> JoinHandle<()> { let bind_addr = config - .http_api .bind_address .parse::() .expect("Tracker API bind_address invalid."); - - info!("Starting Torrust API server on: {}", bind_addr); + let ssl_enabled = config.ssl_enabled; + let ssl_cert_path = config.ssl_cert_path.clone(); + let ssl_key_path = config.ssl_key_path.clone(); let (tx, rx) = oneshot::channel::(); // Run the API server let join_handle = tokio::spawn(async move { - let handel = server::start(bind_addr, &tracker); - - tx.send(ApiServerJobStarted()).expect("the start job dropped"); - - handel.await; + if !ssl_enabled { + info!("Starting Torrust API server on: http://{}", bind_addr); + let handle = server::start(bind_addr, &tracker); + tx.send(ApiServerJobStarted()).expect("the start job dropped"); + handle.await; + } else if ssl_enabled && ssl_cert_path.is_some() && ssl_key_path.is_some() { + info!("Starting Torrust API server on: https://{}", bind_addr); + let handle = server::start_tls(bind_addr, ssl_cert_path.unwrap(), ssl_key_path.unwrap(), &tracker); + tx.send(ApiServerJobStarted()).expect("the start job dropped"); + handle.await; + } }); // Wait until the API server job is running diff --git a/src/jobs/udp_tracker.rs b/src/jobs/udp_tracker.rs index 57369f660..d0907c976 100644 --- a/src/jobs/udp_tracker.rs +++ b/src/jobs/udp_tracker.rs @@ -14,11 +14,11 @@ pub fn start_job(config: &UdpTracker, tracker: Arc) -> JoinHan tokio::spawn(async move { match Udp::new(tracker, &bind_addr).await { Ok(udp_server) => { - info!("Starting UDP server on: {}", bind_addr); + info!("Starting UDP server on: udp://{}", bind_addr); udp_server.start().await; } Err(e) => { - warn!("Could not start UDP tracker on: {}", bind_addr); + warn!("Could not start UDP tracker on: udp://{}", bind_addr); error!("{}", e); } } diff --git a/src/setup.rs b/src/setup.rs index a7b7c5a82..c045310bb 100644 --- a/src/setup.rs +++ b/src/setup.rs @@ -49,7 +49,7 @@ pub async fn setup(config: &Configuration, tracker: Arc) -> Ve // Start HTTP API server if config.http_api.enabled { - jobs.push(tracker_api::start_job(config, tracker.clone()).await); + jobs.push(tracker_api::start_job(&config.http_api, tracker.clone()).await); } // Remove torrents without peers, every interval diff --git a/tests/api.rs b/tests/api.rs index 84ddac573..dfb8d81b3 100644 --- a/tests/api.rs +++ b/tests/api.rs @@ -292,7 +292,7 @@ mod tracker_api { logging::setup(&configuration); // Start the HTTP API job - self.job = Some(tracker_api::start_job(&configuration, tracker).await); + self.job = Some(tracker_api::start_job(&configuration.http_api, tracker).await); self.started.store(true, Ordering::Relaxed); } From 19abf0f31df8fcc0c9332f60ae6ab74c181df776 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 16 Dec 2022 11:06:10 +0000 Subject: [PATCH 03/12] fix: error when udp response can't be written Instead of using a "debug" log level. --- src/udp/server.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/udp/server.rs b/src/udp/server.rs index 5bd835365..a868cbd10 100644 --- a/src/udp/server.rs +++ b/src/udp/server.rs @@ -3,7 +3,7 @@ use std::net::SocketAddr; use std::sync::Arc; use aquatic_udp_protocol::Response; -use log::{debug, info}; +use log::{debug, error, info}; use tokio::net::UdpSocket; use crate::tracker; @@ -71,7 +71,7 @@ impl Udp { Udp::send_packet(socket, &remote_addr, &inner[..position]).await; } Err(_) => { - debug!("could not write response to bytes."); + error!("could not write response to bytes."); } } } From b1ec9dfc86643be3463a3b4d7b7cd1ed2bf2a4b4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 16 Dec 2022 11:15:50 +0000 Subject: [PATCH 04/12] feat: change udp tracker console output Using "debug" for sensitive data like IP address and info for generic info we can log even on production. --- src/udp/server.rs | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/src/udp/server.rs b/src/udp/server.rs index a868cbd10..e85c81e9d 100644 --- a/src/udp/server.rs +++ b/src/udp/server.rs @@ -45,10 +45,12 @@ impl Udp { Ok((valid_bytes, remote_addr)) = socket.recv_from(&mut data) => { let payload = data[..valid_bytes].to_vec(); - debug!("Received {} bytes from {}", payload.len(), remote_addr); - debug!("{:?}", payload); + info!("Received {} bytes", payload.len()); + debug!("From: {}", &remote_addr); + debug!("Payload: {:?}", payload); let response = handle_packet(remote_addr, payload, tracker).await; + Udp::send_response(socket, remote_addr, response).await; } } @@ -56,8 +58,6 @@ impl Udp { } async fn send_response(socket: Arc, remote_addr: SocketAddr, response: Response) { - debug!("sending response to: {:?}", &remote_addr); - let buffer = vec![0u8; MAX_PACKET_SIZE]; let mut cursor = Cursor::new(buffer); @@ -67,8 +67,13 @@ impl Udp { let position = cursor.position() as usize; let inner = cursor.get_ref(); - debug!("{:?}", &inner[..position]); + info!("Sending {} bytes ...", &inner[..position].len()); + debug!("To: {:?}", &remote_addr); + debug!("Payload: {:?}", &inner[..position]); + Udp::send_packet(socket, &remote_addr, &inner[..position]).await; + + info!("{} bytes sent", &inner[..position].len()); } Err(_) => { error!("could not write response to bytes."); From ca0e8afce4a4b5430631020648894215865fe838 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 16 Dec 2022 11:20:13 +0000 Subject: [PATCH 05/12] feat: change default http tracker port to 7070 Azure Container Instances do not allow you to open the same port as UDP and TCP. --- README.md | 2 +- src/config.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index beb2591ea..4e464dd68 100644 --- a/README.md +++ b/README.md @@ -70,7 +70,7 @@ bind_address = "0.0.0.0:6969" [[http_trackers]] enabled = true -bind_address = "0.0.0.0:6969" +bind_address = "0.0.0.0:7070" ssl_enabled = false ssl_cert_path = "" ssl_key_path = "" diff --git a/src/config.rs b/src/config.rs index 66def17cd..d56c2d34d 100644 --- a/src/config.rs +++ b/src/config.rs @@ -123,7 +123,7 @@ impl Default for Configuration { }); configuration.http_trackers.push(HttpTracker { enabled: false, - bind_address: String::from("0.0.0.0:6969"), + bind_address: String::from("0.0.0.0:7070"), ssl_enabled: false, ssl_cert_path: None, ssl_key_path: None, @@ -210,7 +210,7 @@ mod tests { [[http_trackers]] enabled = false - bind_address = "0.0.0.0:6969" + bind_address = "0.0.0.0:7070" ssl_enabled = false ssl_cert_path = "" ssl_key_path = "" From 269e5f5bb085d08f00edb98305313f7b86471719 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 16 Dec 2022 12:00:26 +0000 Subject: [PATCH 06/12] feat: move default db_path to storage folder Azure Container Intances do not allow you to mount a single file. I've created a storage folder where we can put all the things we want to persist. --- .gitignore | 2 ++ src/config.rs | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index ba9ceeb53..b80e2038c 100644 --- a/.gitignore +++ b/.gitignore @@ -6,4 +6,6 @@ /config.toml /data.db /.vscode/launch.json +/storage/ + diff --git a/src/config.rs b/src/config.rs index d56c2d34d..fdfcb3d09 100644 --- a/src/config.rs +++ b/src/config.rs @@ -93,7 +93,7 @@ impl Default for Configuration { log_level: Option::from(String::from("info")), mode: mode::Mode::Public, db_driver: Driver::Sqlite3, - db_path: String::from("data.db"), + db_path: String::from("./storage/database/data.db"), announce_interval: 120, min_announce_interval: 120, max_peer_timeout: 900, @@ -193,7 +193,7 @@ mod tests { let config = r#"log_level = "info" mode = "public" db_driver = "Sqlite3" - db_path = "data.db" + db_path = "./storage/database/data.db" announce_interval = 120 min_announce_interval = 120 max_peer_timeout = 900 From 3098ed2c59c420167797b3a6f697c697d440c0f2 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 16 Dec 2022 13:26:09 +0000 Subject: [PATCH 07/12] feat: remove strip from Cargo.toml The option "strip = true" in the Cargo.toml file prevetns docker to use the cache for the cargo dependencies. ``` [profile.release] ... strip = true ``` More info: https://github.com/LukeMathWalker/cargo-chef/issues/172 --- Cargo.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 80e9009f1..6e835bcb5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,7 +16,6 @@ lto = "thin" debug = 1 opt-level = 3 lto = "fat" -strip = true [dependencies] tokio = { version = "1", features = [ From f8700aacaeaf1fb9d0201377aa35414012e298e4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 16 Dec 2022 13:30:59 +0000 Subject: [PATCH 08/12] feat: allow to inject configuration from env var You can use an env var to pass the configuration instead of using the configuration file in the root folder `config.toml` ``` TORRUST_TRACKER_CONFIG=$(cat config.toml) TORRUST_TRACKER_CONFIG=`cat config.toml` cargo run ``` This allow the applciation to be executed in dockerized environments whithout needing to mount a file or volume for the configuration. --- src/config.rs | 26 +++++++++++++++++++++++--- src/main.rs | 15 +++++++++------ 2 files changed, 32 insertions(+), 9 deletions(-) diff --git a/src/config.rs b/src/config.rs index fdfcb3d09..48e28b358 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1,10 +1,10 @@ use std::collections::HashMap; -use std::fs; use std::net::IpAddr; use std::path::Path; use std::str::FromStr; +use std::{env, fs}; -use config::{Config, ConfigError, File}; +use config::{Config, ConfigError, File, FileFormat}; use serde::{Deserialize, Serialize}; use serde_with::{serde_as, NoneAsEmptyString}; use {std, toml}; @@ -164,7 +164,7 @@ impl Configuration { let config = Configuration::default(); config.save_to_file(path)?; return Err(Error::Message( - "Please edit the config.TOML in the root folder and restart the tracker.".to_string(), + "Please edit the config.TOML and restart the tracker.".to_string(), )); } @@ -173,6 +173,26 @@ impl Configuration { Ok(torrust_config) } + /// # Errors + /// + /// Will return `Err` if the environment variable does not exist or has a bad configuration. + pub fn load_from_env_var(config_env_var_name: &str) -> Result { + match env::var(config_env_var_name) { + Ok(config_toml) => { + let config_builder = Config::builder() + .add_source(File::from_str(&config_toml, FileFormat::Toml)) + .build() + .map_err(Error::ConfigError)?; + let config = config_builder.try_deserialize().map_err(Error::ConfigError)?; + Ok(config) + } + Err(_) => Err(Error::Message(format!( + "No environment variable for configuration found: {}", + &config_env_var_name + ))), + } + } + /// # Errors /// /// Will return `Err` if `filename` does not exist or the user does not have diff --git a/src/main.rs b/src/main.rs index a7316cef2..199e8f5c5 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,3 +1,4 @@ +use std::env; use std::sync::Arc; use log::info; @@ -7,7 +8,8 @@ use torrust_tracker::{ephemeral_instance_keys, logging, setup, static_time, trac #[tokio::main] async fn main() { - const CONFIG_PATH: &str = "config.toml"; + const CONFIG_PATH: &str = "./config.toml"; + const CONFIG_ENV_VAR_NAME: &str = "TORRUST_TRACKER_CONFIG"; // Set the time of Torrust app starting lazy_static::initialize(&static_time::TIME_AT_APP_START); @@ -16,11 +18,12 @@ async fn main() { lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); // Initialize Torrust config - let config = match Configuration::load_from_file(CONFIG_PATH) { - Ok(config) => Arc::new(config), - Err(error) => { - panic!("{}", error) - } + let config = if env::var(CONFIG_ENV_VAR_NAME).is_ok() { + println!("Loading configuration from env var {CONFIG_ENV_VAR_NAME}"); + Arc::new(Configuration::load_from_env_var(CONFIG_ENV_VAR_NAME).unwrap()) + } else { + println!("Loading configuration from config file {CONFIG_PATH}"); + Arc::new(Configuration::load_from_file(CONFIG_PATH).unwrap()) }; // Initialize statistics From 46e1a37ec08d6ebaee294348e1fa64245e7d5046 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 16 Dec 2022 13:34:57 +0000 Subject: [PATCH 09/12] feat: docker support --- .dockerignore | 16 ++ .env.local | 1 + .github/workflows/publish_docker_image.yml | 73 ++++++ .github/workflows/test_docker.yml | 26 +++ .gitignore | 1 + Dockerfile | 80 +++++++ bin/install.sh | 13 ++ cSpell.json | 9 + compose.yaml | 48 ++++ config.toml.local | 34 +++ docker/README.md | 250 +++++++++++++++++++++ docker/bin/build.sh | 13 ++ docker/bin/install.sh | 4 + docker/bin/run.sh | 13 ++ 14 files changed, 581 insertions(+) create mode 100644 .dockerignore create mode 100644 .env.local create mode 100644 .github/workflows/publish_docker_image.yml create mode 100644 .github/workflows/test_docker.yml create mode 100644 Dockerfile create mode 100755 bin/install.sh create mode 100644 compose.yaml create mode 100644 config.toml.local create mode 100644 docker/README.md create mode 100755 docker/bin/build.sh create mode 100755 docker/bin/install.sh create mode 100755 docker/bin/run.sh diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 000000000..3d8a25cce --- /dev/null +++ b/.dockerignore @@ -0,0 +1,16 @@ +.git +.git-blame-ignore +.github +.gitignore +.vscode +bin/ +config.toml +config.toml.local +cSpell.json +data.db +docker/ +NOTICE +README.md +rustfmt.toml +storage/ +target/ diff --git a/.env.local b/.env.local new file mode 100644 index 000000000..fefed56c4 --- /dev/null +++ b/.env.local @@ -0,0 +1 @@ +TORRUST_TRACKER_USER_UID=1000 \ No newline at end of file diff --git a/.github/workflows/publish_docker_image.yml b/.github/workflows/publish_docker_image.yml new file mode 100644 index 000000000..b8e3791ed --- /dev/null +++ b/.github/workflows/publish_docker_image.yml @@ -0,0 +1,73 @@ +name: Publish docker image + +on: + push: + branches: + - 'develop' + # todo: only during development of issue 11 + - 'docker' + - 'docker-reorganized-pr' + tags: + - "v*" + +env: + # Azure file share volume mount requires the Linux container run as root + # https://learn.microsoft.com/en-us/azure/container-instances/container-instances-volume-azure-files#limitations + TORRUST_TRACKER_RUN_AS_USER: root + +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + components: llvm-tools-preview + - uses: Swatinem/rust-cache@v1 + - name: Run Tests + run: cargo test + + dockerhub: + needs: test + runs-on: ubuntu-latest + environment: dockerhub-torrust + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Docker meta + id: meta + uses: docker/metadata-action@v4 + with: + images: | + # For example: torrust/tracker + "${{ secrets.DOCKER_HUB_USERNAME }}/${{secrets.DOCKER_HUB_REPOSITORY_NAME }}" + tags: | + type=ref,event=branch + type=ref,event=pr + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + + - name: Login to Docker Hub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_HUB_USERNAME }} + password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Build and push + uses: docker/build-push-action@v3 + with: + context: . + file: ./Dockerfile + build-args: | + RUN_AS_USER=${{ env.TORRUST_TRACKER_RUN_AS_USER }} + push: ${{ github.event_name != 'pull_request' }} + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + cache-from: type=gha + cache-to: type=gha,mode=max diff --git a/.github/workflows/test_docker.yml b/.github/workflows/test_docker.yml new file mode 100644 index 000000000..2cfa4de5c --- /dev/null +++ b/.github/workflows/test_docker.yml @@ -0,0 +1,26 @@ +name: Test docker build + +on: + push: + pull_request: + +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Build docker image + uses: docker/build-push-action@v3 + with: + context: . + file: ./Dockerfile + push: false + cache-from: type=gha + cache-to: type=gha,mode=max + + - name: Build docker-compose images + run: docker compose build diff --git a/.gitignore b/.gitignore index b80e2038c..d574298da 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ +.env /target **/*.rs.bk /database.json.bz2 diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 000000000..96d21fa84 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,80 @@ +FROM clux/muslrust:stable AS chef +WORKDIR /app +RUN cargo install cargo-chef + + +FROM chef AS planner +WORKDIR /app +COPY . . +RUN cargo chef prepare --recipe-path recipe.json + + +FROM chef as development +WORKDIR /app +ARG UID=1000 +ARG RUN_AS_USER=appuser +ARG TRACKER_UDP_PORT=6969 +ARG TRACKER_HTTP_PORT=7070 +ARG TRACKER_API_PORT=1212 +# Add the app user for development +ENV USER=appuser +ENV UID=$UID +RUN adduser --uid "${UID}" "${USER}" +# Build dependencies +COPY --from=planner /app/recipe.json recipe.json +RUN cargo chef cook --recipe-path recipe.json +# Build the application +COPY . . +RUN cargo build --bin torrust-tracker +USER $RUN_AS_USER:$RUN_AS_USER +EXPOSE $TRACKER_UDP_PORT/udp +EXPOSE $TRACKER_HTTP_PORT/tcp +EXPOSE $TRACKER_API_PORT/tcp +CMD ["cargo", "run"] + + +FROM chef AS builder +WORKDIR /app +ARG UID=1000 +# Add the app user for production +ENV USER=appuser +ENV UID=$UID +RUN adduser \ + --disabled-password \ + --gecos "" \ + --home "/nonexistent" \ + --shell "/sbin/nologin" \ + --no-create-home \ + --uid "${UID}" \ + "${USER}" +# Build dependencies +COPY --from=planner /app/recipe.json recipe.json +RUN cargo chef cook --release --target x86_64-unknown-linux-musl --recipe-path recipe.json +# Build the application +COPY . . +RUN cargo build --release --target x86_64-unknown-linux-musl --bin torrust-tracker +# Strip the binary +# More info: https://github.com/LukeMathWalker/cargo-chef/issues/149 +RUN strip /app/target/x86_64-unknown-linux-musl/release/torrust-tracker + + +FROM alpine:latest +WORKDIR /app +ARG RUN_AS_USER=appuser +ARG TRACKER_UDP_PORT=6969 +ARG TRACKER_HTTP_PORT=7070 +ARG TRACKER_API_PORT=1212 +RUN apk --no-cache add ca-certificates +ENV TZ=Etc/UTC +ENV RUN_AS_USER=$RUN_AS_USER +COPY --from=builder /etc/passwd /etc/passwd +COPY --from=builder /etc/group /etc/group +COPY --from=builder --chown=$RUN_AS_USER \ + /app/target/x86_64-unknown-linux-musl/release/torrust-tracker \ + /app/torrust-tracker +RUN chown -R $RUN_AS_USER:$RUN_AS_USER /app +USER $RUN_AS_USER:$RUN_AS_USER +EXPOSE $TRACKER_UDP_PORT/udp +EXPOSE $TRACKER_HTTP_PORT/tcp +EXPOSE $TRACKER_API_PORT/tcp +ENTRYPOINT ["/app/torrust-tracker"] \ No newline at end of file diff --git a/bin/install.sh b/bin/install.sh new file mode 100755 index 000000000..d4314ce93 --- /dev/null +++ b/bin/install.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +# Generate the default settings file if it does not exist +if ! [ -f "./config.toml" ]; then + cp ./config.toml.local ./config.toml +fi + +# Generate the sqlite database if it does not exist +if ! [ -f "./storage/database/data.db" ]; then + # todo: it should get the path from config.toml and only do it when we use sqlite + touch ./storage/database/data.db + echo ";" | sqlite3 ./storage/database/data.db +fi diff --git a/cSpell.json b/cSpell.json index cc3359d58..5bc67a0c8 100644 --- a/cSpell.json +++ b/cSpell.json @@ -9,31 +9,38 @@ "Bitflu", "bools", "bufs", + "Buildx", "byteorder", "canonicalize", "canonicalized", "chrono", "clippy", "completei", + "dockerhub", "downloadedi", "filesd", "Freebox", "hasher", "hexlify", + "hlocalhost", "Hydranode", "incompletei", + "infoschema", "intervali", "leecher", "leechers", "libtorrent", "Lphant", "mockall", + "myacicontext", "nanos", "nextest", "nocapture", "oneshot", "ostr", "Pando", + "proot", + "Quickstart", "Rasterbar", "repr", "reqwest", @@ -50,9 +57,11 @@ "thiserror", "Torrentstorm", "torrust", + "torrustracker", "typenum", "Unamed", "untuple", + "uroot", "Vagaa", "Xtorrent", "Xunlei" diff --git a/compose.yaml b/compose.yaml new file mode 100644 index 000000000..d11f9c8ae --- /dev/null +++ b/compose.yaml @@ -0,0 +1,48 @@ +name: torrust +services: + + tracker: + build: + context: . + target: development + user: ${TORRUST_TRACKER_USER_UID:-1000}:${TORRUST_TRACKER_USER_UID:-1000} + tty: true + networks: + - server_side + ports: + - 6969:6969/udp + - 7070:7070 + - 1212:1212 + volumes: + - ./:/app + - ~/.cargo:/home/appuser/.cargo + depends_on: + - mysql + + mysql: + image: mysql:8.0 + command: '--default-authentication-plugin=mysql_native_password' + restart: always + healthcheck: + test: ['CMD-SHELL', 'mysqladmin ping -h 127.0.0.1 --password="$$(cat /run/secrets/db-password)" --silent'] + interval: 3s + retries: 5 + start_period: 30s + environment: + - MYSQL_ROOT_HOST=% + - MYSQL_ROOT_PASSWORD=root_secret_password + - MYSQL_DATABASE=torrust_tracker + - MYSQL_USER=db_user + - MYSQL_PASSWORD=db_user_secret_password + networks: + - server_side + ports: + - 3306:3306 + volumes: + - mysql_data:/var/lib/mysql + +networks: + server_side: {} + +volumes: + mysql_data: {} \ No newline at end of file diff --git a/config.toml.local b/config.toml.local new file mode 100644 index 000000000..baf272d5a --- /dev/null +++ b/config.toml.local @@ -0,0 +1,34 @@ +log_level = "info" +mode = "public" +db_driver = "Sqlite3" +db_path = "./storage/database/data.db" +announce_interval = 120 +min_announce_interval = 120 +max_peer_timeout = 900 +on_reverse_proxy = false +external_ip = "0.0.0.0" +tracker_usage_statistics = true +persistent_torrent_completed_stat = false +inactive_peer_cleanup_interval = 600 +remove_peerless_torrents = true + +[[udp_trackers]] +enabled = false +bind_address = "0.0.0.0:6969" + +[[http_trackers]] +enabled = false +bind_address = "0.0.0.0:7070" +ssl_enabled = false +ssl_cert_path = "" +ssl_key_path = "" + +[http_api] +enabled = true +bind_address = "127.0.0.1:1212" +ssl_enabled = false +ssl_cert_path = "" +ssl_key_path = "" + +[http_api.access_tokens] +admin = "MyAccessToken" diff --git a/docker/README.md b/docker/README.md new file mode 100644 index 000000000..8646b952e --- /dev/null +++ b/docker/README.md @@ -0,0 +1,250 @@ +# Docker + +## Requirements + +- Docker version 20.10.21 +- You need to create the `storage` directory with this structure and files: + +```s +$ tree storage/ +storage/ +├── database +│   └── data.db +└── ssl_certificates + ├── localhost.crt + └── localhost.key +``` + +> NOTE: you only need the `ssl_certificates` directory and certificates in case you have enabled SSL for the one HTTP tracker or the API. + +## Dev environment + +### With docker + +Build and run locally: + +```s +docker context use default +export TORRUST_TRACKER_USER_UID=1000 +./docker/bin/build.sh $TORRUST_TRACKER_USER_UID +./bin/install.sh +./docker/bin/run.sh $TORRUST_TRACKER_USER_UID +``` + +Run using the pre-built public docker image: + +```s +export TORRUST_TRACKER_USER_UID=1000 +docker run -it \ + --user="$TORRUST_TRACKER_USER_UID" \ + --publish 6969:6969/udp \ + --publish 7070:7070/tcp \ + --publish 1212:1212/tcp \ + --volume "$(pwd)/storage":"/app/storage" \ + torrust/torrust-tracker +``` + +> NOTES: +> +> - You have to create the SQLite DB (`data.db`) and configuration (`config.toml`) before running the tracker. See `bin/install.sh`. +> - You have to replace the user UID (`1000`) with yours. +> - Remember to switch to your default docker context `docker context use default`. + +### With docker-compose + +The docker-compose configuration includes the MySQL service configuration. If you want to use MySQL instead of SQLite you have to change your `config.toml` configuration: + +```toml +db_driver = "MySQL" +db_path = "mysql://db_user:db_user_secret_password@mysql:3306/torrust_tracker" +``` + +If you want to inject an environment variable into docker-compose you can use the file `.env`. There is a template `.env.local`. + +Build and run it locally: + +```s +docker compose up --build +``` + +After running the "up" command you will have two running containers: + +```s +$ docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +06feacb91a9e torrust-tracker "cargo run" 18 minutes ago Up 4 seconds 0.0.0.0:1212->1212/tcp, :::1212->1212/tcp, 0.0.0.0:7070->7070/tcp, :::7070->7070/tcp, 0.0.0.0:6969->6969/udp, :::6969->6969/udp torrust-tracker-1 +34d29e792ee2 mysql:8.0 "docker-entrypoint.s…" 18 minutes ago Up 5 seconds (healthy) 0.0.0.0:3306->3306/tcp, :::3306->3306/tcp, 33060/tcp torrust-mysql-1 +``` + +And you should be able to use the application, for example making a request to the API: + + + +You can stop the containers with: + +```s +docker compose down +``` + +Additionally, you can delete all resources (containers, volumes, networks) with: + +```s +docker compose down -v +``` + +### Access Mysql with docker + +These are some useful commands for MySQL. + +Open a shell in the MySQL container using docker or docker-compose. + +```s +docker exec -it torrust-mysql-1 /bin/bash +docker compose exec mysql /bin/bash +``` + +Connect to MySQL from inside the MySQL container or from the host: + +```s +mysql -h127.0.0.1 -uroot -proot_secret_password +``` + +The when MySQL container is started the first time, it creates the database, user, and permissions needed. +If you see the error "Host is not allowed to connect to this MySQL server" you can check that users have the right permissions in the database. Make sure the user `root` and `db_user` can connect from any host (`%`). + +```s +mysql> SELECT host, user FROM mysql.user; ++-----------+------------------+ +| host | user | ++-----------+------------------+ +| % | db_user | +| % | root | +| localhost | mysql.infoschema | +| localhost | mysql.session | +| localhost | mysql.sys | +| localhost | root | ++-----------+------------------+ +6 rows in set (0.00 sec) +``` + +If the database, user or permissions are not created the reason could be the MySQL container volume can be corrupted. Delete it and start again the containers. + +### SSL Certificates + +You can use a certificate for localhost. You can create your [localhost certificate](https://letsencrypt.org/docs/certificates-for-localhost/#making-and-trusting-your-own-certificates) and use it in the `storage` folder and the configuration file (`config.toml`). For example: + +The storage folder must contain your certificates: + +```s +$ tree storage/ +storage/ +├── database +│   └── data.db +└── ssl_certificates + ├── localhost.crt + └── localhost.key +``` + +You have not enabled it in your `config.toml` file: + +```toml +... +[[http_trackers]] +enabled = true +bind_address = "0.0.0.0:7070" +ssl_enabled = true +ssl_cert_path = "./storage/ssl_certificates/localhost.crt" +ssl_key_path = "./storage/ssl_certificates/localhost.key" + +[http_api] +enabled = true +bind_address = "0.0.0.0:1212" +ssl_enabled = true +ssl_cert_path = "./storage/ssl_certificates/localhost.crt" +ssl_key_path = "./storage/ssl_certificates/localhost.key" +... +``` + +> NOTE: you can enable it independently for each HTTP tracker or the API. + +If you enable the SSL certificate for the API, for example, you can load the API with this URL: + + + +## Prod environment + +In this section, you will learn how to deploy the tracker to a single docker container in Azure Container Instances. + +> NOTE: Azure Container Instances is a solution when you want to run an isolated container. If you need full container orchestration, including service discovery across multiple containers, automatic scaling, and coordinated application upgrades, we recommend [Kubernetes](https://kubernetes.io/). + +Deploy to Azure Container Instance following [docker documentation](https://docs.docker.com/cloud/aci-integration/). + +You have to create the ACI context and the storage: + +```s +docker context create aci myacicontext +docker context use myacicontext +docker volume create test-volume --storage-account torrustracker +``` + +You need to create all the files needed by the application in the storage dir `storage/database`. + +And finally, you can run the container: + +```s +docker run \ + --publish 6969:6969/udp \ + --publish 7070:7070/tcp \ + --publish 1212:1212/tcp \ + --volume torrustracker/test-volume:/app/storage \ + registry.hub.docker.com/torrust/torrust-tracker:latest +``` + +Detach from container logs when the container starts. By default, the command line stays attached and follows container logs. + +```s +docker run \ + --detach + --publish 6969:6969/udp \ + --publish 7070:7070/tcp \ + --publish 1212:1212/tcp \latest + --volume torrustracker/test-volume:/app/storage \ + registry.hub.docker.com/torrust/torrust-tracker:latest +``` + +You should see something like this: + +```s +[+] Running 2/2 + ⠿ Group intelligent-hawking Created 5.0s + ⠿ intelligent-hawking Created 41.7s +2022-12-08T18:39:19.697869300+00:00 [torrust_tracker::logging][INFO] logging initialized. +2022-12-08T18:39:19.712651100+00:00 [torrust_tracker::jobs::udp_tracker][INFO] Starting UDP server on: 0.0.0.0:6969 +2022-12-08T18:39:19.712792700+00:00 [torrust_tracker::jobs::tracker_api][INFO] Starting Torrust API server on: 0.0.0.0:1212 +2022-12-08T18:39:19.725124+00:00 [torrust_tracker::jobs::tracker_api][INFO] Torrust API server started +``` + +You can see the container with: + +```s +$ docker ps +CONTAINER ID IMAGE COMMAND STATUS PORTS +intelligent-hawking registry.hub.docker.com/torrust/torrust-tracker:latest Running 4.236.213.57:6969->6969/udp, 4.236.213.57:1212->1212/tcp +``` + +After a while, you can use the tracker API `http://4.236.213.57:1212/api/stats?token=MyAccessToken` and the UDP tracker with your BitTorrent client using this tracker announce URL `udp://4.236.213.57:6969`. + +> NOTES: +> +> - [There is no support for mounting a single file](https://docs.docker.com/cloud/aci-container-features/#persistent-volumes), or mounting a subfolder from an `Azure File Share`. +> - [ACI does not allow port mapping](https://docs.docker.com/cloud/aci-integration/#exposing-ports). +> - [Azure file share volume mount requires the Linux container run as root](https://learn.microsoft.com/en-us/azure/container-instances/container-instances-volume-azure-files#limitations). +> - It can take some minutes until the public IP for the ACI container is available. +> - You can use the Azure web UI to download files from the storage. For example, the SQLite database. +> - [It seems you can only expose web interfaces on port 80 on Azure Container Instances](https://stackoverflow.com/a/56768087/3012842). Not official documentation! + +## Links + +- [Deploying Docker containers on Azure](https://docs.docker.com/cloud/aci-integration/). +- [Docker run options for ACI containers](https://docs.docker.com/cloud/aci-container-features/). +- [Quickstart: Deploy a container instance in Azure using the Docker CLI](https://learn.microsoft.com/en-us/azure/container-instances/quickstart-docker-cli). diff --git a/docker/bin/build.sh b/docker/bin/build.sh new file mode 100755 index 000000000..d77d1ad34 --- /dev/null +++ b/docker/bin/build.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +TORRUST_TRACKER_USER_UID=${TORRUST_TRACKER_USER_UID:-1000} +TORRUST_TRACKER_RUN_AS_USER=${TORRUST_TRACKER_RUN_AS_USER:-appuser} + +echo "Building docker image ..." +echo "TORRUST_TRACKER_USER_UID: $TORRUST_TRACKER_USER_UID" +echo "TORRUST_TRACKER_RUN_AS_USER: $TORRUST_TRACKER_RUN_AS_USER" + +docker build \ + --build-arg UID="$TORRUST_TRACKER_USER_UID" \ + --build-arg RUN_AS_USER="$TORRUST_TRACKER_RUN_AS_USER" \ + -t torrust-tracker . diff --git a/docker/bin/install.sh b/docker/bin/install.sh new file mode 100755 index 000000000..a58969378 --- /dev/null +++ b/docker/bin/install.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +./docker/bin/build.sh +./bin/install.sh diff --git a/docker/bin/run.sh b/docker/bin/run.sh new file mode 100755 index 000000000..86465baeb --- /dev/null +++ b/docker/bin/run.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +TORRUST_TRACKER_USER_UID=${TORRUST_TRACKER_USER_UID:-1000} +TORRUST_TRACKER_CONFIG=$(cat config.toml) + +docker run -it \ + --user="$TORRUST_TRACKER_USER_UID" \ + --publish 6969:6969/udp \ + --publish 7070:7070/tcp \ + --publish 1212:1212/tcp \ + --env TORRUST_TRACKER_CONFIG="$TORRUST_TRACKER_CONFIG" \ + --volume "$(pwd)/storage":"/app/storage" \ + torrust-tracker From 032f6a63af2c7ad95f1426e7fdba409569170b89 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 16 Dec 2022 15:58:06 +0000 Subject: [PATCH 10/12] fix: docker repo name in README --- docker/README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docker/README.md b/docker/README.md index 8646b952e..e5b4dfe74 100644 --- a/docker/README.md +++ b/docker/README.md @@ -41,7 +41,7 @@ docker run -it \ --publish 7070:7070/tcp \ --publish 1212:1212/tcp \ --volume "$(pwd)/storage":"/app/storage" \ - torrust/torrust-tracker + torrust/tracker ``` > NOTES: @@ -197,7 +197,7 @@ docker run \ --publish 7070:7070/tcp \ --publish 1212:1212/tcp \ --volume torrustracker/test-volume:/app/storage \ - registry.hub.docker.com/torrust/torrust-tracker:latest + registry.hub.docker.com/torrust/tracker:latest ``` Detach from container logs when the container starts. By default, the command line stays attached and follows container logs. @@ -209,7 +209,7 @@ docker run \ --publish 7070:7070/tcp \ --publish 1212:1212/tcp \latest --volume torrustracker/test-volume:/app/storage \ - registry.hub.docker.com/torrust/torrust-tracker:latest + registry.hub.docker.com/torrust/tracker:latest ``` You should see something like this: @@ -229,7 +229,7 @@ You can see the container with: ```s $ docker ps CONTAINER ID IMAGE COMMAND STATUS PORTS -intelligent-hawking registry.hub.docker.com/torrust/torrust-tracker:latest Running 4.236.213.57:6969->6969/udp, 4.236.213.57:1212->1212/tcp +intelligent-hawking registry.hub.docker.com/torrust/tracker:latest Running 4.236.213.57:6969->6969/udp, 4.236.213.57:1212->1212/tcp ``` After a while, you can use the tracker API `http://4.236.213.57:1212/api/stats?token=MyAccessToken` and the UDP tracker with your BitTorrent client using this tracker announce URL `udp://4.236.213.57:6969`. From 171a37d3c48cd365b987eaf280b73a5f35855e20 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 16 Dec 2022 15:58:53 +0000 Subject: [PATCH 11/12] feat: publish docker image for tags, develop aand main branches --- .github/workflows/publish_docker_image.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/workflows/publish_docker_image.yml b/.github/workflows/publish_docker_image.yml index b8e3791ed..c6a103931 100644 --- a/.github/workflows/publish_docker_image.yml +++ b/.github/workflows/publish_docker_image.yml @@ -3,10 +3,8 @@ name: Publish docker image on: push: branches: + - 'main' - 'develop' - # todo: only during development of issue 11 - - 'docker' - - 'docker-reorganized-pr' tags: - "v*" From 6851ec5fc1adf206c0baaa20d6201e119c85a4af Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 21 Dec 2022 11:52:20 +0000 Subject: [PATCH 12/12] fix: docker image run as non root The published docker image runs by deafult as non root user. Some services like ACI could require run the container as root but for those cases they can run their customs builds or change the user while launching the container. --- .github/workflows/publish_docker_image.yml | 7 ++++--- cSpell.json | 1 + 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/.github/workflows/publish_docker_image.yml b/.github/workflows/publish_docker_image.yml index c6a103931..7593fb680 100644 --- a/.github/workflows/publish_docker_image.yml +++ b/.github/workflows/publish_docker_image.yml @@ -3,15 +3,16 @@ name: Publish docker image on: push: branches: - - 'main' - - 'develop' + - "main" + - "develop" tags: - "v*" env: # Azure file share volume mount requires the Linux container run as root # https://learn.microsoft.com/en-us/azure/container-instances/container-instances-volume-azure-files#limitations - TORRUST_TRACKER_RUN_AS_USER: root + # TORRUST_TRACKER_RUN_AS_USER: root + TORRUST_TRACKER_RUN_AS_USER: appuser jobs: test: diff --git a/cSpell.json b/cSpell.json index 5bc67a0c8..57b9f3b67 100644 --- a/cSpell.json +++ b/cSpell.json @@ -1,5 +1,6 @@ { "words": [ + "appuser", "AUTOINCREMENT", "automock", "Avicora",