[Mix] Create MultiInfoItemsCollector

It is a collector that can handle many extractor types, to be used when a list contains items of different types (e.g. search). It was renamed from InfoItemsSearchCollector so that it can now be used not just for search but for any extractor needing it. It supports, streams, channels, playlists and *mixes*.
This commit is contained in:
Stypox 2022-02-02 19:18:20 +01:00
parent 53673d64c6
commit 638da1756c
No known key found for this signature in database
GPG Key ID: 4BDF1B40A49FDD23
7 changed files with 23 additions and 25 deletions

View File

@ -1,8 +1,5 @@
package org.schabi.newpipe.extractor.search;
package org.schabi.newpipe.extractor;
import org.schabi.newpipe.extractor.InfoItem;
import org.schabi.newpipe.extractor.InfoItemExtractor;
import org.schabi.newpipe.extractor.InfoItemsCollector;
import org.schabi.newpipe.extractor.channel.ChannelInfoItemExtractor;
import org.schabi.newpipe.extractor.channel.ChannelInfoItemsCollector;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
@ -36,7 +33,8 @@ import java.util.List;
*/
/**
* Collector for search results
* A collector that can handle many extractor types, to be used when a list contains items of
* different types (e.g. search)
* <p>
* This collector can handle the following extractor types:
* <ul>
@ -44,15 +42,15 @@ import java.util.List;
* <li>{@link ChannelInfoItemExtractor}</li>
* <li>{@link PlaylistInfoItemExtractor}</li>
* </ul>
* Calling {@link #extract(InfoItemExtractor)} or {@link #commit(Object)} with any
* Calling {@link #extract(InfoItemExtractor)} or {@link #commit(InfoItemExtractor)} with any
* other extractor type will raise an exception.
*/
public class InfoItemsSearchCollector extends InfoItemsCollector<InfoItem, InfoItemExtractor> {
public class MultiInfoItemsCollector extends InfoItemsCollector<InfoItem, InfoItemExtractor> {
private final StreamInfoItemsCollector streamCollector;
private final ChannelInfoItemsCollector userCollector;
private final PlaylistInfoItemsCollector playlistCollector;
public InfoItemsSearchCollector(int serviceId) {
public MultiInfoItemsCollector(int serviceId) {
super(serviceId);
streamCollector = new StreamInfoItemsCollector(serviceId);
userCollector = new ChannelInfoItemsCollector(serviceId);

View File

@ -15,7 +15,7 @@ import org.schabi.newpipe.extractor.downloader.Downloader;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.linkhandler.SearchQueryHandler;
import org.schabi.newpipe.extractor.search.InfoItemsSearchCollector;
import org.schabi.newpipe.extractor.MultiInfoItemsCollector;
import org.schabi.newpipe.extractor.search.SearchExtractor;
import org.schabi.newpipe.extractor.services.bandcamp.extractors.streaminfoitem.BandcampSearchStreamInfoItemExtractor;
@ -50,7 +50,7 @@ public class BandcampSearchExtractor extends SearchExtractor {
public InfoItemsPage<InfoItem> getPage(final Page page) throws IOException, ExtractionException {
final String html = getDownloader().get(page.getUrl()).responseBody();
final InfoItemsSearchCollector collector = new InfoItemsSearchCollector(getServiceId());
final MultiInfoItemsCollector collector = new MultiInfoItemsCollector(getServiceId());
final Document d = Jsoup.parse(html);

View File

@ -15,7 +15,7 @@ import org.schabi.newpipe.extractor.downloader.Downloader;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.linkhandler.SearchQueryHandler;
import org.schabi.newpipe.extractor.search.InfoItemsSearchCollector;
import org.schabi.newpipe.extractor.MultiInfoItemsCollector;
import org.schabi.newpipe.extractor.search.SearchExtractor;
import org.schabi.newpipe.extractor.services.media_ccc.extractors.infoItems.MediaCCCStreamInfoItemExtractor;
import org.schabi.newpipe.extractor.services.media_ccc.linkHandler.MediaCCCConferencesListLinkHandlerFactory;
@ -66,7 +66,7 @@ public class MediaCCCSearchExtractor extends SearchExtractor {
@Nonnull
@Override
public InfoItemsPage<InfoItem> getInitialPage() {
final InfoItemsSearchCollector searchItems = new InfoItemsSearchCollector(getServiceId());
final MultiInfoItemsCollector searchItems = new MultiInfoItemsCollector(getServiceId());
if (getLinkHandler().getContentFilters().contains(CONFERENCES)
|| getLinkHandler().getContentFilters().contains(ALL)
@ -122,7 +122,7 @@ public class MediaCCCSearchExtractor extends SearchExtractor {
private void searchConferences(final String searchString,
final List<ChannelInfoItem> channelItems,
final InfoItemsSearchCollector collector) {
final MultiInfoItemsCollector collector) {
for (final ChannelInfoItem item : channelItems) {
if (item.getName().toUpperCase().contains(
searchString.toUpperCase())) {

View File

@ -12,7 +12,7 @@ import org.schabi.newpipe.extractor.downloader.Response;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.linkhandler.SearchQueryHandler;
import org.schabi.newpipe.extractor.search.InfoItemsSearchCollector;
import org.schabi.newpipe.extractor.MultiInfoItemsCollector;
import org.schabi.newpipe.extractor.search.SearchExtractor;
import org.schabi.newpipe.extractor.services.peertube.PeertubeParsingHelper;
import org.schabi.newpipe.extractor.utils.Utils;
@ -87,7 +87,7 @@ public class PeertubeSearchExtractor extends SearchExtractor {
PeertubeParsingHelper.validate(json);
final long total = json.getLong("total");
final InfoItemsSearchCollector collector = new InfoItemsSearchCollector(getServiceId());
final MultiInfoItemsCollector collector = new MultiInfoItemsCollector(getServiceId());
collectStreamsFrom(collector, json, getBaseUrl(), sepia);
return new InfoItemsPage<>(collector, PeertubeParsingHelper.getNextPage(page.getUrl(), total));

View File

@ -9,7 +9,7 @@ import org.schabi.newpipe.extractor.downloader.Downloader;
import org.schabi.newpipe.extractor.exceptions.ExtractionException;
import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.linkhandler.SearchQueryHandler;
import org.schabi.newpipe.extractor.search.InfoItemsSearchCollector;
import org.schabi.newpipe.extractor.MultiInfoItemsCollector;
import org.schabi.newpipe.extractor.search.SearchExtractor;
import org.schabi.newpipe.extractor.utils.Parser;
@ -100,7 +100,7 @@ public class SoundcloudSearchExtractor extends SearchExtractor {
private InfoItemsCollector<InfoItem, InfoItemExtractor> collectItems(
final JsonArray searchCollection) {
final InfoItemsSearchCollector collector = new InfoItemsSearchCollector(getServiceId());
final MultiInfoItemsCollector collector = new MultiInfoItemsCollector(getServiceId());
for (final Object result : searchCollection) {
if (!(result instanceof JsonObject)) continue;

View File

@ -12,7 +12,7 @@ import org.schabi.newpipe.extractor.exceptions.ReCaptchaException;
import org.schabi.newpipe.extractor.linkhandler.SearchQueryHandler;
import org.schabi.newpipe.extractor.localization.DateWrapper;
import org.schabi.newpipe.extractor.localization.TimeAgoParser;
import org.schabi.newpipe.extractor.search.InfoItemsSearchCollector;
import org.schabi.newpipe.extractor.MultiInfoItemsCollector;
import org.schabi.newpipe.extractor.search.SearchExtractor;
import org.schabi.newpipe.extractor.services.youtube.YoutubeParsingHelper;
import org.schabi.newpipe.extractor.utils.JsonUtils;
@ -177,7 +177,7 @@ public class YoutubeMusicSearchExtractor extends SearchExtractor {
@Nonnull
@Override
public InfoItemsPage<InfoItem> getInitialPage() throws IOException, ExtractionException {
final InfoItemsSearchCollector collector = new InfoItemsSearchCollector(getServiceId());
final MultiInfoItemsCollector collector = new MultiInfoItemsCollector(getServiceId());
final JsonArray contents = JsonUtils.getArray(JsonUtils.getArray(initialData,
"contents.tabbedSearchResultsRenderer.tabs").getObject(0),
@ -206,7 +206,7 @@ public class YoutubeMusicSearchExtractor extends SearchExtractor {
throw new IllegalArgumentException("Page doesn't contain an URL");
}
final InfoItemsSearchCollector collector = new InfoItemsSearchCollector(getServiceId());
final MultiInfoItemsCollector collector = new MultiInfoItemsCollector(getServiceId());
final String[] youtubeMusicKeys = YoutubeParsingHelper.getYoutubeMusicKey();
@ -264,7 +264,7 @@ public class YoutubeMusicSearchExtractor extends SearchExtractor {
return new InfoItemsPage<>(collector, getNextPageFrom(continuations));
}
private void collectMusicStreamsFrom(final InfoItemsSearchCollector collector,
private void collectMusicStreamsFrom(final MultiInfoItemsCollector collector,
@Nonnull final JsonArray videos) {
final TimeAgoParser timeAgoParser = getTimeAgoParser();

View File

@ -11,7 +11,7 @@ import org.schabi.newpipe.extractor.exceptions.ParsingException;
import org.schabi.newpipe.extractor.linkhandler.SearchQueryHandler;
import org.schabi.newpipe.extractor.localization.Localization;
import org.schabi.newpipe.extractor.localization.TimeAgoParser;
import org.schabi.newpipe.extractor.search.InfoItemsSearchCollector;
import org.schabi.newpipe.extractor.MultiInfoItemsCollector;
import org.schabi.newpipe.extractor.search.SearchExtractor;
import org.schabi.newpipe.extractor.services.youtube.YoutubeParsingHelper;
import org.schabi.newpipe.extractor.utils.JsonUtils;
@ -132,7 +132,7 @@ public class YoutubeSearchExtractor extends SearchExtractor {
@Nonnull
@Override
public InfoItemsPage<InfoItem> getInitialPage() throws IOException, ExtractionException {
final InfoItemsSearchCollector collector = new InfoItemsSearchCollector(getServiceId());
final MultiInfoItemsCollector collector = new MultiInfoItemsCollector(getServiceId());
final JsonArray sections = initialData.getObject("contents")
.getObject("twoColumnSearchResultsRenderer").getObject("primaryContents")
@ -163,7 +163,7 @@ public class YoutubeSearchExtractor extends SearchExtractor {
}
final Localization localization = getExtractorLocalization();
final InfoItemsSearchCollector collector = new InfoItemsSearchCollector(getServiceId());
final MultiInfoItemsCollector collector = new MultiInfoItemsCollector(getServiceId());
// @formatter:off
final byte[] json = JsonWriter.string(prepareDesktopJsonBuilder(localization,
@ -195,7 +195,7 @@ public class YoutubeSearchExtractor extends SearchExtractor {
.getObject("continuationItemRenderer")));
}
private void collectStreamsFrom(final InfoItemsSearchCollector collector,
private void collectStreamsFrom(final MultiInfoItemsCollector collector,
final JsonArray contents) throws NothingFoundException,
ParsingException {
final TimeAgoParser timeAgoParser = getTimeAgoParser();