NewPipeExtractor/extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/SoundcloudParsingHelper.java

233 lines
9.4 KiB
Java
Raw Normal View History

package org.schabi.newpipe.extractor.services.soundcloud;
import com.grack.nanojson.JsonArray;
import com.grack.nanojson.JsonObject;
import com.grack.nanojson.JsonParser;
import com.grack.nanojson.JsonParserException;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.schabi.newpipe.extractor.Downloader;
import org.schabi.newpipe.extractor.NewPipe;
2018-02-24 22:20:50 +01:00
import org.schabi.newpipe.extractor.channel.ChannelInfoItemsCollector;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.exceptions.ReCaptchaException;
2018-02-24 22:20:50 +01:00
import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector;
import org.schabi.newpipe.extractor.utils.Parser;
import org.schabi.newpipe.extractor.utils.Parser.RegexException;
import javax.annotation.Nonnull;
2017-08-06 22:20:15 +02:00
import java.io.IOException;
import java.net.URLEncoder;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Date;
import static org.schabi.newpipe.extractor.utils.Utils.replaceHttpWithHttps;
public class SoundcloudParsingHelper {
2017-08-06 22:20:15 +02:00
private static String clientId;
private SoundcloudParsingHelper() {
}
2017-08-06 22:20:15 +02:00
public static String clientId() throws ReCaptchaException, IOException, RegexException {
if (clientId != null && !clientId.isEmpty()) return clientId;
Downloader dl = NewPipe.getDownloader();
String response = dl.download("https://soundcloud.com");
Document doc = Jsoup.parse(response);
2017-08-06 22:20:15 +02:00
// TODO: Find a less heavy way to get the client_id
// Currently we are downloading a 1MB file (!) just to get the client_id,
// youtube-dl don't have a way too, they are just hardcoding and updating it when it becomes invalid.
// The embed mode has a way to get it, but we still have to download a heavy file (~800KB).
Element jsElement = doc.select("script[src^=https://a-v2.sndcdn.com/assets/app]").first();
String js = dl.download(jsElement.attr("src"));
return clientId = Parser.matchGroup1(",client_id:\"(.*?)\"", js);
}
2017-08-06 22:20:15 +02:00
public static String toDateString(String time) throws ParsingException {
try {
2017-08-06 22:20:15 +02:00
Date date;
// Have two date formats, one for the 'api.soundc...' and the other 'api-v2.soundc...'.
try {
date = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'").parse(time);
} catch (Exception e) {
date = new SimpleDateFormat("yyyy/MM/dd HH:mm:ss +0000").parse(time);
}
2017-08-06 22:20:15 +02:00
SimpleDateFormat newDateFormat = new SimpleDateFormat("yyyy-MM-dd");
return newDateFormat.format(date);
} catch (ParseException e) {
throw new ParsingException(e.getMessage(), e);
}
}
2017-08-06 22:20:15 +02:00
/**
* Call the endpoint "/resolve" of the api.<p>
*
2017-08-06 22:20:15 +02:00
* See https://developers.soundcloud.com/docs/api/reference#resolve
*/
2017-11-28 13:37:01 +01:00
public static JsonObject resolveFor(Downloader downloader, String url) throws IOException, ReCaptchaException, ParsingException {
2017-08-06 22:20:15 +02:00
String apiUrl = "https://api.soundcloud.com/resolve"
+ "?url=" + URLEncoder.encode(url, "UTF-8")
+ "&client_id=" + clientId();
try {
2017-11-28 13:37:01 +01:00
return JsonParser.object().from(downloader.download(apiUrl));
} catch (JsonParserException e) {
throw new ParsingException("Could not parse json response", e);
}
2017-08-06 22:20:15 +02:00
}
2017-08-06 22:20:15 +02:00
/**
* Fetch the embed player with the apiUrl and return the canonical url (like the permalink_url from the json api).
2017-08-06 22:20:15 +02:00
*
* @return the url resolved
*/
public static String resolveUrlWithEmbedPlayer(String apiUrl) throws IOException, ReCaptchaException, ParsingException {
String response = NewPipe.getDownloader().download("https://w.soundcloud.com/player/?url="
+ URLEncoder.encode(apiUrl, "UTF-8"));
return Jsoup.parse(response).select("link[rel=\"canonical\"]").first().attr("abs:href");
}
/**
* Fetch the embed player with the url and return the id (like the id from the json api).
2017-08-06 22:20:15 +02:00
*
* @return the resolved id
2017-08-06 22:20:15 +02:00
*/
public static String resolveIdWithEmbedPlayer(String url) throws IOException, ReCaptchaException, ParsingException {
String response = NewPipe.getDownloader().download("https://w.soundcloud.com/player/?url="
+ URLEncoder.encode(url, "UTF-8"));
return Parser.matchGroup1(",\"id\":(.*?),", response);
}
/**
* Fetch the users from the given api and commit each of them to the collector.
* <p>
2018-02-24 22:20:50 +01:00
* This differ from {@link #getUsersFromApi(ChannelInfoItemsCollector, String)} in the sense that they will always
* get MIN_ITEMS or more.
*
* @param minItems the method will return only when it have extracted that many items (equal or more)
*/
2018-02-24 22:20:50 +01:00
public static String getUsersFromApiMinItems(int minItems, ChannelInfoItemsCollector collector, String apiUrl) throws IOException, ReCaptchaException, ParsingException {
String nextPageUrl = SoundcloudParsingHelper.getUsersFromApi(collector, apiUrl);
while (!nextPageUrl.isEmpty() && collector.getItems().size() < minItems) {
2018-02-24 22:20:50 +01:00
nextPageUrl = SoundcloudParsingHelper.getUsersFromApi(collector, nextPageUrl);
}
2018-02-24 22:20:50 +01:00
return nextPageUrl;
}
/**
* Fetch the user items from the given api and commit each of them to the collector.
*
* @return the next streams url, empty if don't have
*/
2018-02-24 22:20:50 +01:00
public static String getUsersFromApi(ChannelInfoItemsCollector collector, String apiUrl) throws IOException, ReCaptchaException, ParsingException {
String response = NewPipe.getDownloader().download(apiUrl);
JsonObject responseObject;
try {
responseObject = JsonParser.object().from(response);
} catch (JsonParserException e) {
throw new ParsingException("Could not parse json response", e);
}
JsonArray responseCollection = responseObject.getArray("collection");
for (Object o : responseCollection) {
if (o instanceof JsonObject) {
JsonObject object = (JsonObject) o;
collector.commit(new SoundcloudChannelInfoItemExtractor(object));
}
}
2018-02-24 22:20:50 +01:00
String nextPageUrl;
try {
2018-02-24 22:20:50 +01:00
nextPageUrl = responseObject.getString("next_href");
if (!nextPageUrl.contains("client_id=")) nextPageUrl += "&client_id=" + SoundcloudParsingHelper.clientId();
} catch (Exception ignored) {
2018-02-24 22:20:50 +01:00
nextPageUrl = "";
}
2018-02-24 22:20:50 +01:00
return nextPageUrl;
}
2017-08-06 22:20:15 +02:00
/**
* Fetch the streams from the given api and commit each of them to the collector.
* <p>
2018-02-24 22:20:50 +01:00
* This differ from {@link #getStreamsFromApi(StreamInfoItemsCollector, String)} in the sense that they will always
2017-08-06 22:20:15 +02:00
* get MIN_ITEMS or more items.
*
* @param minItems the method will return only when it have extracted that many items (equal or more)
*/
2018-02-24 22:20:50 +01:00
public static String getStreamsFromApiMinItems(int minItems, StreamInfoItemsCollector collector, String apiUrl) throws IOException, ReCaptchaException, ParsingException {
String nextPageUrl = SoundcloudParsingHelper.getStreamsFromApi(collector, apiUrl);
2017-08-06 22:20:15 +02:00
while (!nextPageUrl.isEmpty() && collector.getItems().size() < minItems) {
2018-02-24 22:20:50 +01:00
nextPageUrl = SoundcloudParsingHelper.getStreamsFromApi(collector, nextPageUrl);
}
2017-08-06 22:20:15 +02:00
2018-02-24 22:20:50 +01:00
return nextPageUrl;
}
2017-08-06 22:20:15 +02:00
/**
* Fetch the streams from the given api and commit each of them to the collector.
*
* @return the next streams url, empty if don't have
*/
2018-02-24 22:20:50 +01:00
public static String getStreamsFromApi(StreamInfoItemsCollector collector, String apiUrl, boolean charts) throws IOException, ReCaptchaException, ParsingException {
2017-08-06 22:20:15 +02:00
String response = NewPipe.getDownloader().download(apiUrl);
JsonObject responseObject;
try {
responseObject = JsonParser.object().from(response);
} catch (JsonParserException e) {
throw new ParsingException("Could not parse json response", e);
}
2017-08-06 22:20:15 +02:00
JsonArray responseCollection = responseObject.getArray("collection");
for (Object o : responseCollection) {
if (o instanceof JsonObject) {
JsonObject object = (JsonObject) o;
collector.commit(new SoundcloudStreamInfoItemExtractor(charts ? object.getObject("track") : object));
}
2017-08-06 22:20:15 +02:00
}
2018-02-24 22:20:50 +01:00
String nextPageUrl;
try {
2018-02-24 22:20:50 +01:00
nextPageUrl = responseObject.getString("next_href");
if (!nextPageUrl.contains("client_id=")) nextPageUrl += "&client_id=" + SoundcloudParsingHelper.clientId();
2017-08-06 22:20:15 +02:00
} catch (Exception ignored) {
2018-02-24 22:20:50 +01:00
nextPageUrl = "";
}
2018-02-24 22:20:50 +01:00
return nextPageUrl;
2017-08-06 22:20:15 +02:00
}
2017-08-20 10:03:41 +02:00
2018-02-24 22:20:50 +01:00
public static String getStreamsFromApi(StreamInfoItemsCollector collector, String apiUrl) throws ReCaptchaException, ParsingException, IOException {
2017-08-20 10:03:41 +02:00
return getStreamsFromApi(collector, apiUrl, false);
}
@Nonnull
static String getUploaderUrl(JsonObject object) {
String url = object.getObject("user").getString("permalink_url", "");
return replaceHttpWithHttps(url);
}
@Nonnull
static String getAvatarUrl(JsonObject object) {
String url = object.getObject("user", new JsonObject()).getString("avatar_url", "");
return replaceHttpWithHttps(url);
}
public static String getUploaderName(JsonObject object) {
return object.getObject("user").getString("username", "");
}
}