pollymc/api/logic/net/NetJob.cpp

213 lines
5.0 KiB
C++
Raw Normal View History

2018-02-11 05:10:01 +05:30
/* Copyright 2013-2018 MultiMC Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "NetJob.h"
#include "Download.h"
2013-07-06 05:20:07 +05:30
#include <QDebug>
void NetJob::partSucceeded(int index)
2013-09-02 03:55:40 +05:30
{
// do progress. all slots are 1 in size at least
auto &slot = parts_progress[index];
partProgress(index, slot.total_progress, slot.total_progress);
m_doing.remove(index);
m_done.insert(index);
downloads[index].get()->disconnect(this);
startMoreParts();
2013-09-02 03:55:40 +05:30
}
void NetJob::partFailed(int index)
2013-09-02 03:55:40 +05:30
{
m_doing.remove(index);
auto &slot = parts_progress[index];
if (slot.failures == 3)
2013-09-02 03:55:40 +05:30
{
m_failed.insert(index);
}
else
{
slot.failures++;
m_todo.enqueue(index);
2013-09-02 03:55:40 +05:30
}
downloads[index].get()->disconnect(this);
startMoreParts();
2013-09-02 03:55:40 +05:30
}
void NetJob::partAborted(int index)
{
m_aborted = true;
m_doing.remove(index);
m_failed.insert(index);
downloads[index].get()->disconnect(this);
startMoreParts();
}
void NetJob::partProgress(int index, qint64 bytesReceived, qint64 bytesTotal)
2013-09-02 03:55:40 +05:30
{
auto &slot = parts_progress[index];
slot.current_progress = bytesReceived;
slot.total_progress = bytesTotal;
int done = m_done.size();
int doing = m_doing.size();
int all = parts_progress.size();
qint64 bytesAll = 0;
qint64 bytesTotalAll = 0;
for(auto & partIdx: m_doing)
{
auto part = parts_progress[partIdx];
// do not count parts with unknown/nonsensical total size
if(part.total_progress <= 0)
{
continue;
}
bytesAll += part.current_progress;
bytesTotalAll += part.total_progress;
}
qint64 inprogress = (bytesTotalAll == 0) ? 0 : (bytesAll * 1000) / bytesTotalAll;
auto current = done * 1000 + doing * inprogress;
auto current_total = all * 1000;
// HACK: make sure it never jumps backwards.
if(m_current_progress > current)
{
current = m_current_progress;
}
m_current_progress = current;
setProgress(current, current_total);
2013-09-02 03:55:40 +05:30
}
2015-04-26 17:17:14 +05:30
void NetJob::executeTask()
2013-09-02 03:55:40 +05:30
{
// hack that delays early failures so they can be caught easier
QMetaObject::invokeMethod(this, "startMoreParts", Qt::QueuedConnection);
}
void NetJob::startMoreParts()
{
if(!isRunning())
{
// this actually makes sense. You can put running downloads into a NetJob and then not start it until much later.
return;
}
// OK. We are actively processing tasks, proceed.
// Check for final conditions if there's nothing in the queue.
if(!m_todo.size())
{
if(!m_doing.size())
{
if(!m_failed.size())
{
2015-04-26 17:17:14 +05:30
emitSucceeded();
}
else if(m_aborted)
{
emitAborted();
}
else
{
emitFailed(tr("Job '%1' failed to process:\n%2").arg(objectName()).arg(getFailedFiles().join("\n")));
}
}
return;
}
// There's work to do, try to start more parts.
while (m_doing.size() < 6)
{
if(!m_todo.size())
return;
int doThis = m_todo.dequeue();
m_doing.insert(doThis);
auto part = downloads[doThis];
// connect signals :D
connect(part.get(), SIGNAL(succeeded(int)), SLOT(partSucceeded(int)));
connect(part.get(), SIGNAL(failed(int)), SLOT(partFailed(int)));
connect(part.get(), SIGNAL(aborted(int)), SLOT(partAborted(int)));
connect(part.get(), SIGNAL(netActionProgress(int, qint64, qint64)),
SLOT(partProgress(int, qint64, qint64)));
part->start();
2013-09-02 03:55:40 +05:30
}
}
QStringList NetJob::getFailedFiles()
{
QStringList failed;
for (auto index: m_failed)
{
failed.push_back(downloads[index]->url().toString());
}
failed.sort();
return failed;
}
bool NetJob::canAbort() const
{
bool canFullyAbort = true;
// can abort the waiting?
for(auto index: m_todo)
{
auto part = downloads[index];
canFullyAbort &= part->canAbort();
}
// can abort the active?
for(auto index: m_doing)
{
auto part = downloads[index];
canFullyAbort &= part->canAbort();
}
return canFullyAbort;
}
bool NetJob::abort()
{
bool fullyAborted = true;
// fail all waiting
m_failed.unite(m_todo.toSet());
m_todo.clear();
// abort active
auto toKill = m_doing.toList();
for(auto index: toKill)
{
auto part = downloads[index];
fullyAborted &= part->abort();
}
return fullyAborted;
}
bool NetJob::addNetAction(NetActionPtr action)
{
action->m_index_within_job = downloads.size();
downloads.append(action);
part_info pi;
parts_progress.append(pi);
partProgress(parts_progress.count() - 1, action->currentProgress(), action->totalProgress());
if(action->isRunning())
{
connect(action.get(), SIGNAL(succeeded(int)), SLOT(partSucceeded(int)));
connect(action.get(), SIGNAL(failed(int)), SLOT(partFailed(int)));
connect(action.get(), SIGNAL(netActionProgress(int, qint64, qint64)), SLOT(partProgress(int, qint64, qint64)));
}
else
{
m_todo.append(parts_progress.size() - 1);
}
return true;
}