Skip to content

Commit 5e4ddb3

Browse files
committed
[SoundCloud] Fix extractors built from next playlist pages
They didn't have the information to calculate another next page url. So now `nextPageUrl` contains a full link with all video ids, and `getPage` takes the first part of the url (containing 15 streams) and produces another `nextPageUrl` with the remaining streams. Also add a test for this.
1 parent 0e1b4bb commit 5e4ddb3

2 files changed

Lines changed: 39 additions & 32 deletions

File tree

extractor/src/main/java/org/schabi/newpipe/extractor/services/soundcloud/SoundcloudPlaylistExtractor.java

Lines changed: 30 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -30,8 +30,6 @@ public class SoundcloudPlaylistExtractor extends PlaylistExtractor {
3030
private JsonObject playlist;
3131

3232
private StreamInfoItemsCollector streamInfoItemsCollector;
33-
private List<Integer> nextTrackIds;
34-
private int nextTrackIdsIndex;
3533
private String nextPageUrl;
3634

3735
public SoundcloudPlaylistExtractor(StreamingService service, ListLinkHandler linkHandler) {
@@ -121,15 +119,16 @@ public long getStreamCount() {
121119
@Override
122120
public InfoItemsPage<StreamInfoItem> getInitialPage() throws IOException, ExtractionException {
123121
if (streamInfoItemsCollector == null) {
124-
computeInitialTracksAndNextIds();
122+
computeInitialTracksAndNextPageUrl();
125123
}
126-
return new InfoItemsPage<>(streamInfoItemsCollector, getNextPageUrl());
124+
return new InfoItemsPage<>(streamInfoItemsCollector, nextPageUrl);
127125
}
128126

129-
private void computeInitialTracksAndNextIds() {
127+
private void computeInitialTracksAndNextPageUrl() throws IOException, ExtractionException {
130128
streamInfoItemsCollector = new StreamInfoItemsCollector(getServiceId());
131-
nextTrackIds = new ArrayList<>();
132-
nextTrackIdsIndex = 0;
129+
StringBuilder nextPageUrlBuilder = new StringBuilder("https://api-v2.soundcloud.com/tracks?client_id=");
130+
nextPageUrlBuilder.append(SoundcloudParsingHelper.clientId());
131+
nextPageUrlBuilder.append("&ids=");
133132

134133
JsonArray tracks = playlist.getArray("tracks");
135134
for (Object o : tracks) {
@@ -138,39 +137,23 @@ private void computeInitialTracksAndNextIds() {
138137
if (track.has("title")) { // i.e. if full info is available
139138
streamInfoItemsCollector.commit(new SoundcloudStreamInfoItemExtractor(track));
140139
} else {
141-
nextTrackIds.add(track.getInt("id"));
140+
// %09d would be enough, but a 0 before the number does not create problems, so let's be sure
141+
nextPageUrlBuilder.append(String.format("%010d,", track.getInt("id")));
142142
}
143143
}
144144
}
145-
}
146-
147-
private void computeAnotherNextPageUrl() throws IOException, ExtractionException {
148-
if (nextTrackIds == null || nextTrackIdsIndex >= nextTrackIds.size()) {
149-
nextPageUrl = ""; // there are no more tracks
150-
return;
151-
}
152-
153-
StringBuilder urlBuilder = new StringBuilder("https://api-v2.soundcloud.com/tracks?client_id=");
154-
urlBuilder.append(SoundcloudParsingHelper.clientId());
155-
urlBuilder.append("&ids=");
156145

157-
int upperIndex = Math.min(nextTrackIdsIndex + streamsPerRequestedPage, nextTrackIds.size());
158-
for (int i = nextTrackIdsIndex; i < upperIndex; ++i) {
159-
urlBuilder.append(nextTrackIds.get(i));
160-
urlBuilder.append(","); // a , at the end is ok
146+
nextPageUrl = nextPageUrlBuilder.toString();
147+
if (nextPageUrl.endsWith("&ids=")) {
148+
// there are no other videos
149+
nextPageUrl = "";
161150
}
162-
163-
nextTrackIdsIndex = upperIndex;
164-
nextPageUrl = urlBuilder.toString();
165151
}
166152

167153
@Override
168154
public String getNextPageUrl() throws IOException, ExtractionException {
169155
if (nextPageUrl == null) {
170-
if (nextTrackIds == null) {
171-
computeInitialTracksAndNextIds();
172-
}
173-
computeAnotherNextPageUrl();
156+
computeInitialTracksAndNextPageUrl();
174157
}
175158
return nextPageUrl;
176159
}
@@ -181,8 +164,24 @@ public InfoItemsPage<StreamInfoItem> getPage(String pageUrl) throws IOException,
181164
throw new ExtractionException(new IllegalArgumentException("Page url is empty or null"));
182165
}
183166

167+
// see computeInitialTracksAndNextPageUrl
168+
final int lengthFirstPartOfUrl = ("https://api-v2.soundcloud.com/tracks?client_id="
169+
+ SoundcloudParsingHelper.clientId()
170+
+ "&ids=").length();
171+
final int lengthOfEveryStream = 11;
172+
173+
String currentPageUrl;
174+
int lengthMaxStreams = lengthFirstPartOfUrl + lengthOfEveryStream * streamsPerRequestedPage;
175+
if (pageUrl.length() <= lengthMaxStreams) {
176+
currentPageUrl = pageUrl; // fetch every remaining video, there are less than the max
177+
nextPageUrl = ""; // afterwards the list is complete
178+
} else {
179+
currentPageUrl = pageUrl.substring(0, lengthMaxStreams);
180+
nextPageUrl = pageUrl.substring(0, lengthFirstPartOfUrl) + pageUrl.substring(lengthMaxStreams);
181+
}
182+
184183
StreamInfoItemsCollector collector = new StreamInfoItemsCollector(getServiceId());
185-
String response = NewPipe.getDownloader().get(pageUrl, getExtractorLocalization()).responseBody();
184+
String response = NewPipe.getDownloader().get(currentPageUrl, getExtractorLocalization()).responseBody();
186185

187186
try {
188187
JsonArray tracks = JsonParser.array().from(response);
@@ -195,7 +194,6 @@ public InfoItemsPage<StreamInfoItem> getPage(String pageUrl) throws IOException,
195194
throw new ParsingException("Could not parse json response", e);
196195
}
197196

198-
computeAnotherNextPageUrl();
199197
return new InfoItemsPage<>(collector, nextPageUrl);
200198
}
201199
}

extractor/src/test/java/org/schabi/newpipe/extractor/services/soundcloud/SoundcloudPlaylistExtractorTest.java

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -234,6 +234,15 @@ public static void setUp() throws Exception {
234234
public void testGetPageInNewExtractor() throws Exception {
235235
final PlaylistExtractor newExtractor = SoundCloud.getPlaylistExtractor(extractor.getUrl());
236236
defaultTestGetPageInNewExtractor(extractor, newExtractor);
237+
String page1 = newExtractor.getNextPageUrl();
238+
defaultTestMoreItems(newExtractor); // there has to be another page
239+
String page2 = newExtractor.getNextPageUrl();
240+
defaultTestMoreItems(newExtractor); // and another one
241+
String page3 = newExtractor.getNextPageUrl();
242+
243+
assertNotEquals("Same pages", page1, page2);
244+
assertNotEquals("Same pages", page2, page3);
245+
assertNotEquals("Same pages", page3, page1);
237246
}
238247

239248
/*//////////////////////////////////////////////////////////////////////////

0 commit comments

Comments
 (0)