Throw IllegalArgumentException when Page is invalid

This commit is contained in:
wb9688 2020-05-11 15:25:18 +02:00
parent 4cc312086a
commit 9b6fe1dea6
19 changed files with 107 additions and 18 deletions

View File

@ -80,7 +80,6 @@ public class MediaCCCConferenceExtractor extends ChannelExtractor {
}
@Override
public InfoItemsPage<StreamInfoItem> getPage(final Page page) {
return InfoItemsPage.emptyPage();
}

View File

@ -24,6 +24,7 @@ import java.io.IOException;
import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.COUNT_KEY;
import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.ITEMS_PER_PAGE;
import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.START_KEY;
import static org.schabi.newpipe.extractor.utils.Utils.isNullOrEmpty;
public class PeertubeAccountExtractor extends ChannelExtractor {
private JsonObject json;
@ -110,7 +111,12 @@ public class PeertubeAccountExtractor extends ChannelExtractor {
@Override
public InfoItemsPage<StreamInfoItem> getPage(final Page page) throws IOException, ExtractionException {
if (page == null || isNullOrEmpty(page.getUrl())) {
throw new IllegalArgumentException("Page doesn't contain an URL");
}
final Response response = getDownloader().get(page.getUrl());
JsonObject json = null;
if (response != null && !Utils.isBlank(response.responseBody())) {
try {

View File

@ -24,6 +24,8 @@ import java.io.IOException;
import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.COUNT_KEY;
import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.ITEMS_PER_PAGE;
import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.START_KEY;
import static org.schabi.newpipe.extractor.utils.Utils.isNullOrEmpty;
public class PeertubeChannelExtractor extends ChannelExtractor {
private JsonObject json;
@ -116,7 +118,12 @@ public class PeertubeChannelExtractor extends ChannelExtractor {
@Override
public InfoItemsPage<StreamInfoItem> getPage(final Page page) throws IOException, ExtractionException {
if (page == null || isNullOrEmpty(page.getUrl())) {
throw new IllegalArgumentException("Page doesn't contain an URL");
}
final Response response = getDownloader().get(page.getUrl());
JsonObject json = null;
if (response != null && !Utils.isBlank(response.responseBody())) {
try {

View File

@ -23,6 +23,7 @@ import java.io.IOException;
import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.COUNT_KEY;
import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.ITEMS_PER_PAGE;
import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.START_KEY;
import static org.schabi.newpipe.extractor.utils.Utils.isNullOrEmpty;
public class PeertubeCommentsExtractor extends CommentsExtractor {
public PeertubeCommentsExtractor(final StreamingService service, final ListLinkHandler uiHandler) {
@ -51,7 +52,12 @@ public class PeertubeCommentsExtractor extends CommentsExtractor {
@Override
public InfoItemsPage<CommentsInfoItem> getPage(final Page page) throws IOException, ExtractionException {
if (page == null || isNullOrEmpty(page.getUrl())) {
throw new IllegalArgumentException("Page doesn't contain an URL");
}
final Response response = getDownloader().get(page.getUrl());
JsonObject json = null;
if (response != null && !Utils.isBlank(response.responseBody())) {
try {

View File

@ -26,6 +26,7 @@ import javax.annotation.Nonnull;
import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.COUNT_KEY;
import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.ITEMS_PER_PAGE;
import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.START_KEY;
import static org.schabi.newpipe.extractor.utils.Utils.isNullOrEmpty;
public class PeertubePlaylistExtractor extends PlaylistExtractor {
private JsonObject playlistInfo;
@ -108,7 +109,12 @@ public class PeertubePlaylistExtractor extends PlaylistExtractor {
@Override
public InfoItemsPage<StreamInfoItem> getPage(final Page page) throws IOException, ExtractionException {
if (page == null || isNullOrEmpty(page.getUrl())) {
throw new IllegalArgumentException("Page doesn't contain an URL");
}
final Response response = getDownloader().get(page.getUrl());
JsonObject json = null;
if (response != null && !Utils.isBlank(response.responseBody())) {
try {

View File

@ -25,6 +25,7 @@ import javax.annotation.Nonnull;
import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.COUNT_KEY;
import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.ITEMS_PER_PAGE;
import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.START_KEY;
import static org.schabi.newpipe.extractor.utils.Utils.isNullOrEmpty;
public class PeertubeSearchExtractor extends SearchExtractor {
public PeertubeSearchExtractor(StreamingService service, SearchQueryHandler linkHandler) {
@ -68,7 +69,12 @@ public class PeertubeSearchExtractor extends SearchExtractor {
@Override
public InfoItemsPage<InfoItem> getPage(final Page page) throws IOException, ExtractionException {
if (page == null || isNullOrEmpty(page.getUrl())) {
throw new IllegalArgumentException("Page doesn't contain an URL");
}
final Response response = getDownloader().get(page.getUrl());
JsonObject json = null;
if (response != null && !Utils.isBlank(response.responseBody())) {
try {

View File

@ -20,9 +20,12 @@ import org.schabi.newpipe.extractor.utils.Utils;
import java.io.IOException;
import javax.annotation.Nonnull;
import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.COUNT_KEY;
import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.ITEMS_PER_PAGE;
import static org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper.START_KEY;
import static org.schabi.newpipe.extractor.utils.Utils.isNullOrEmpty;
public class PeertubeTrendingExtractor extends KioskExtractor<StreamInfoItem> {
public PeertubeTrendingExtractor(final StreamingService streamingService, final ListLinkHandler linkHandler, final String kioskId) {
@ -60,7 +63,12 @@ public class PeertubeTrendingExtractor extends KioskExtractor<StreamInfoItem> {
@Override
public InfoItemsPage<StreamInfoItem> getPage(final Page page) throws IOException, ExtractionException {
if (page == null || isNullOrEmpty(page.getUrl())) {
throw new IllegalArgumentException("Page doesn't contain an URL");
}
final Response response = getDownloader().get(page.getUrl());
JsonObject json = null;
if (response != null && !Utils.isBlank(response.responseBody())) {
try {
@ -84,5 +92,5 @@ public class PeertubeTrendingExtractor extends KioskExtractor<StreamInfoItem> {
}
@Override
public void onFetchPage(Downloader downloader) throws IOException, ExtractionException { }
public void onFetchPage(@Nonnull final Downloader downloader) throws IOException, ExtractionException { }
}

View File

@ -1,9 +1,9 @@
package org.schabi.newpipe.extractor.services.soundcloud.extractors;
import com.grack.nanojson.JsonArray;
import com.grack.nanojson.JsonObject;
import com.grack.nanojson.JsonParser;
import com.grack.nanojson.JsonParserException;
import org.schabi.newpipe.extractor.Page;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.channel.ChannelExtractor;
@ -15,9 +15,10 @@ import org.schabi.newpipe.extractor.services.soundcloud.SoundcloudParsingHelper;
import org.schabi.newpipe.extractor.stream.StreamInfoItem;
import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector;
import javax.annotation.Nonnull;
import java.io.IOException;
import javax.annotation.Nonnull;
import static org.schabi.newpipe.extractor.utils.JsonUtils.EMPTY_STRING;
import static org.schabi.newpipe.extractor.utils.Utils.isNullOrEmpty;
@ -118,6 +119,10 @@ public class SoundcloudChannelExtractor extends ChannelExtractor {
@Override
public InfoItemsPage<StreamInfoItem> getPage(final Page page) throws IOException, ExtractionException {
if (page == null || isNullOrEmpty(page.getUrl())) {
throw new IllegalArgumentException("Page doesn't contain an URL");
}
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
String nextPageUrl = SoundcloudParsingHelper.getStreamsFromApiMinItems(15, collector, page.getUrl());

View File

@ -10,9 +10,10 @@ import org.schabi.newpipe.extractor.services.soundcloud.SoundcloudParsingHelper;
import org.schabi.newpipe.extractor.stream.StreamInfoItem;
import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector;
import javax.annotation.Nonnull;
import java.io.IOException;
import javax.annotation.Nonnull;
import static org.schabi.newpipe.extractor.ServiceList.SoundCloud;
import static org.schabi.newpipe.extractor.utils.Utils.isNullOrEmpty;
@ -34,7 +35,11 @@ public class SoundcloudChartsExtractor extends KioskExtractor<StreamInfoItem> {
}
@Override
public InfoItemsPage<StreamInfoItem> getPage(Page page) throws IOException, ExtractionException {
public InfoItemsPage<StreamInfoItem> getPage(final Page page) throws IOException, ExtractionException {
if (page == null || isNullOrEmpty(page.getUrl())) {
throw new IllegalArgumentException("Page doesn't contain an URL");
}
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
String nextPageUrl = SoundcloudParsingHelper.getStreamsFromApi(collector, page.getUrl(), true);

View File

@ -21,6 +21,8 @@ import java.io.IOException;
import javax.annotation.Nonnull;
import static org.schabi.newpipe.extractor.utils.Utils.isNullOrEmpty;
public class SoundcloudCommentsExtractor extends CommentsExtractor {
public SoundcloudCommentsExtractor(final StreamingService service, final ListLinkHandler uiHandler) {
super(service, uiHandler);
@ -48,6 +50,10 @@ public class SoundcloudCommentsExtractor extends CommentsExtractor {
@Override
public InfoItemsPage<CommentsInfoItem> getPage(final Page page) throws ExtractionException, IOException {
if (page == null || isNullOrEmpty(page.getUrl())) {
throw new IllegalArgumentException("Page doesn't contain an URL");
}
final Downloader downloader = NewPipe.getDownloader();
final Response response = downloader.get(page.getUrl());

View File

@ -155,8 +155,11 @@ public class SoundcloudPlaylistExtractor extends PlaylistExtractor {
}
@Override
public InfoItemsPage<StreamInfoItem> getPage(final Page page) throws IOException, ExtractionException {
if (page == null || isNullOrEmpty(page.getIds())) {
throw new IllegalArgumentException("Page doesn't contain IDs");
}
public InfoItemsPage<StreamInfoItem> getPage(Page page) throws IOException, ExtractionException {
final List<String> currentIds;
final List<String> nextIds;
if (page.getIds().size() <= STREAMS_PER_REQUESTED_PAGE) {

View File

@ -4,6 +4,7 @@ import com.grack.nanojson.JsonArray;
import com.grack.nanojson.JsonObject;
import com.grack.nanojson.JsonParser;
import com.grack.nanojson.JsonParserException;
import org.schabi.newpipe.extractor.InfoItem;
import org.schabi.newpipe.extractor.InfoItemExtractor;
import org.schabi.newpipe.extractor.InfoItemsCollector;
@ -17,14 +18,16 @@ import org.schabi.newpipe.extractor.search.InfoItemsSearchCollector;
import org.schabi.newpipe.extractor.search.SearchExtractor;
import org.schabi.newpipe.extractor.utils.Parser;
import javax.annotation.Nonnull;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.MalformedURLException;
import java.net.URL;
import javax.annotation.Nonnull;
import static org.schabi.newpipe.extractor.services.soundcloud.linkHandler.SoundcloudSearchQueryHandlerFactory.ITEMS_PER_PAGE;
import static org.schabi.newpipe.extractor.utils.JsonUtils.EMPTY_STRING;
import static org.schabi.newpipe.extractor.utils.Utils.isNullOrEmpty;
public class SoundcloudSearchExtractor extends SearchExtractor {
private JsonArray searchCollection;
@ -51,7 +54,11 @@ public class SoundcloudSearchExtractor extends SearchExtractor {
}
@Override
public InfoItemsPage<InfoItem> getPage(Page page) throws IOException, ExtractionException {
public InfoItemsPage<InfoItem> getPage(final Page page) throws IOException, ExtractionException {
if (page == null || isNullOrEmpty(page.getUrl())) {
throw new IllegalArgumentException("Page doesn't contain an URL");
}
final Downloader dl = getDownloader();
try {
final String response = dl.get(page.getUrl(), getExtractorLocalization()).responseBody();

View File

@ -2,6 +2,7 @@ package org.schabi.newpipe.extractor.services.youtube.extractors;
import com.grack.nanojson.JsonArray;
import com.grack.nanojson.JsonObject;
import org.schabi.newpipe.extractor.Page;
import org.schabi.newpipe.extractor.StreamingService;
import org.schabi.newpipe.extractor.channel.ChannelExtractor;
@ -17,10 +18,13 @@ import org.schabi.newpipe.extractor.stream.StreamInfoItem;
import org.schabi.newpipe.extractor.stream.StreamInfoItemsCollector;
import org.schabi.newpipe.extractor.utils.Utils;
import javax.annotation.Nonnull;
import java.io.IOException;
import static org.schabi.newpipe.extractor.services.youtube.YoutubeParsingHelper.*;
import javax.annotation.Nonnull;
import static org.schabi.newpipe.extractor.services.youtube.YoutubeParsingHelper.fixThumbnailUrl;
import static org.schabi.newpipe.extractor.services.youtube.YoutubeParsingHelper.getJsonResponse;
import static org.schabi.newpipe.extractor.services.youtube.YoutubeParsingHelper.getTextFromObject;
import static org.schabi.newpipe.extractor.utils.JsonUtils.EMPTY_STRING;
import static org.schabi.newpipe.extractor.utils.Utils.isNullOrEmpty;
@ -241,7 +245,11 @@ public class YoutubeChannelExtractor extends ChannelExtractor {
}
@Override
public InfoItemsPage<StreamInfoItem> getPage(Page page) throws IOException, ExtractionException {
public InfoItemsPage<StreamInfoItem> getPage(final Page page) throws IOException, ExtractionException {
if (page == null || isNullOrEmpty(page.getUrl())) {
throw new IllegalArgumentException("Page doesn't contain an URL");
}
// Unfortunately, we have to fetch the page even if we are only getting next streams,
// as they don't deliver enough information on their own (the channel name, for example).
fetchPage();

View File

@ -30,6 +30,7 @@ import java.util.regex.Pattern;
import javax.annotation.Nonnull;
import static java.util.Collections.singletonList;
import static org.schabi.newpipe.extractor.utils.Utils.isNullOrEmpty;
public class YoutubeCommentsExtractor extends CommentsExtractor {
// using the mobile site for comments because it loads faster and uses get requests instead of post
@ -83,7 +84,11 @@ public class YoutubeCommentsExtractor extends CommentsExtractor {
}
@Override
public InfoItemsPage<CommentsInfoItem> getPage(Page page) throws IOException, ExtractionException {
public InfoItemsPage<CommentsInfoItem> getPage(final Page page) throws IOException, ExtractionException {
if (page == null || isNullOrEmpty(page.getUrl())) {
throw new IllegalArgumentException("Page doesn't contain an URL");
}
String ajaxResponse = makeAjaxRequest(page.getUrl());
JsonObject ajaxJson;
try {

View File

@ -68,7 +68,7 @@ public class YoutubeFeedExtractor extends FeedExtractor {
}
@Override
public InfoItemsPage<StreamInfoItem> getPage(Page page) {
return null;
public InfoItemsPage<StreamInfoItem> getPage(final Page page) {
return InfoItemsPage.emptyPage();
}
}

View File

@ -187,6 +187,10 @@ public class YoutubeMusicSearchExtractor extends SearchExtractor {
@Override
public InfoItemsPage<InfoItem> getPage(final Page page) throws IOException, ExtractionException {
if (page == null || isNullOrEmpty(page.getUrl())) {
throw new IllegalArgumentException("Page doesn't contain an URL");
}
final InfoItemsSearchCollector collector = new InfoItemsSearchCollector(getServiceId());
final String[] youtubeMusicKeys = YoutubeParsingHelper.getYoutubeMusicKeys();

View File

@ -200,7 +200,11 @@ public class YoutubePlaylistExtractor extends PlaylistExtractor {
@Override
public InfoItemsPage<StreamInfoItem> getPage(final Page page) throws IOException, ExtractionException {
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
if (page == null || isNullOrEmpty(page.getUrl())) {
throw new IllegalArgumentException("Page doesn't contain an URL");
}
final StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
final JsonArray ajaxJson = getJsonResponse(page.getUrl(), getExtractorLocalization());
final JsonObject sectionListContinuation = ajaxJson.getObject(1).getObject("response")

View File

@ -117,6 +117,10 @@ public class YoutubeSearchExtractor extends SearchExtractor {
@Override
public InfoItemsPage<InfoItem> getPage(final Page page) throws IOException, ExtractionException {
if (page == null || isNullOrEmpty(page.getUrl())) {
throw new IllegalArgumentException("Page doesn't contain an URL");
}
final InfoItemsSearchCollector collector = new InfoItemsSearchCollector(getServiceId());
final JsonArray ajaxJson = getJsonResponse(page.getUrl(), getExtractorLocalization());

View File

@ -62,8 +62,8 @@ public class YoutubeTrendingExtractor extends KioskExtractor<StreamInfoItem> {
}
@Override
public InfoItemsPage<StreamInfoItem> getPage(Page page) {
return null;
public InfoItemsPage<StreamInfoItem> getPage(final Page page) {
return InfoItemsPage.emptyPage();
}
@Nonnull