From 5cca78e718f15522cc3db9aec76aa910dd696aa8 Mon Sep 17 00:00:00 2001 From: Monty Brandenberg Date: Mon, 23 Jun 2014 14:23:33 -0400 Subject: First HTTP pipelining viewer. Enable pipelining for GetTexture and GetMesh2 at a pipeline depth of 5. Create global debug option, HttpPipelining, to enable and disable HTTP pipelining (defaults to true). Tweak texture and mesh low- and high-water request levels based on pipelining status and depth. Fixup texture console which was damaged in a recent release. Split logging of the no-request HTTP error case into two cases: one for missing URL in HTTP request, one for HTTP request not created. A refactor in llcorehttp is coming: I will be moving all libcurl- using code into libcurl-specific modules. --- indra/newview/llappcorehttp.cpp | 169 ++++++++++++++++++++++++++++------------ 1 file changed, 119 insertions(+), 50 deletions(-) (limited to 'indra/newview/llappcorehttp.cpp') diff --git a/indra/newview/llappcorehttp.cpp b/indra/newview/llappcorehttp.cpp index 70dcffefb2..deda0ccb41 100755 --- a/indra/newview/llappcorehttp.cpp +++ b/indra/newview/llappcorehttp.cpp @@ -4,7 +4,7 @@ * * $LicenseInfo:firstyear=2012&license=viewerlgpl$ * Second Life Viewer Source Code - * Copyright (C) 2012-2013, Linden Research, Inc. + * Copyright (C) 2012-2014, Linden Research, Inc. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public @@ -40,6 +40,8 @@ // be open at a time. const F64 LLAppCoreHttp::MAX_THREAD_WAIT_TIME(10.0); +const long LLAppCoreHttp::PIPELINING_DEPTH(5L); + static const struct { LLAppCoreHttp::EAppPolicy mPolicy; @@ -47,42 +49,43 @@ static const struct U32 mMin; U32 mMax; U32 mRate; + bool mPipelined; std::string mKey; const char * mUsage; } init_data[] = // Default and dynamic values for classes { { - LLAppCoreHttp::AP_DEFAULT, 8, 8, 8, 0, + LLAppCoreHttp::AP_DEFAULT, 8, 8, 8, 0, false, "", "other" }, { - LLAppCoreHttp::AP_TEXTURE, 8, 1, 12, 0, + LLAppCoreHttp::AP_TEXTURE, 8, 1, 12, 0, true, "TextureFetchConcurrency", "texture fetch" }, { - LLAppCoreHttp::AP_MESH1, 32, 1, 128, 100, + LLAppCoreHttp::AP_MESH1, 32, 1, 128, 100, false, "MeshMaxConcurrentRequests", "mesh fetch" }, { - LLAppCoreHttp::AP_MESH2, 8, 1, 32, 100, + LLAppCoreHttp::AP_MESH2, 8, 1, 32, 100, true, "Mesh2MaxConcurrentRequests", "mesh2 fetch" }, { - LLAppCoreHttp::AP_LARGE_MESH, 2, 1, 8, 0, + LLAppCoreHttp::AP_LARGE_MESH, 2, 1, 8, 0, false, "", "large mesh fetch" }, { - LLAppCoreHttp::AP_UPLOADS, 2, 1, 8, 0, + LLAppCoreHttp::AP_UPLOADS, 2, 1, 8, 0, false, "", "asset upload" }, { - LLAppCoreHttp::AP_LONG_POLL, 32, 32, 32, 0, + LLAppCoreHttp::AP_LONG_POLL, 32, 32, 32, 0, false, "", "long poll" } @@ -91,18 +94,20 @@ static const struct static void setting_changed(); +LLAppCoreHttp::HttpClass::HttpClass() + : mPolicy(LLCore::HttpRequest::DEFAULT_POLICY_ID), + mConnLimit(0U), + mPipelined(false) +{} + + LLAppCoreHttp::LLAppCoreHttp() : mRequest(NULL), mStopHandle(LLCORE_HTTP_HANDLE_INVALID), mStopRequested(0.0), - mStopped(false) -{ - for (int i(0); i < LL_ARRAY_SIZE(mPolicies); ++i) - { - mPolicies[i] = LLCore::HttpRequest::DEFAULT_POLICY_ID; - mSettings[i] = 0U; - } -} + mStopped(false), + mPipelined(true) +{} LLAppCoreHttp::~LLAppCoreHttp() @@ -121,6 +126,14 @@ void LLAppCoreHttp::init() << LL_ENDL; } + // Global pipelining preference from settings + static const std::string http_pipelining("HttpPipelining"); + if (gSavedSettings.controlExists(http_pipelining)) + { + // Default to true if absent. + mPipelined = gSavedSettings.getBOOL(http_pipelining); + } + // Point to our certs or SSH/https: will fail on connect status = LLCore::HttpRequest::setStaticPolicyOption(LLCore::HttpRequest::PO_CA_FILE, LLCore::HttpRequest::GLOBAL_POLICY_ID, @@ -157,27 +170,27 @@ void LLAppCoreHttp::init() } // Setup default policy and constrain if directed to - mPolicies[AP_DEFAULT] = LLCore::HttpRequest::DEFAULT_POLICY_ID; + mHttpClasses[AP_DEFAULT].mPolicy = LLCore::HttpRequest::DEFAULT_POLICY_ID; // Setup additional policies based on table and some special rules for (int i(0); i < LL_ARRAY_SIZE(init_data); ++i) { - const EAppPolicy policy(init_data[i].mPolicy); + const EAppPolicy app_policy(init_data[i].mPolicy); - if (AP_DEFAULT == policy) + if (AP_DEFAULT == app_policy) { // Pre-created continue; } - mPolicies[policy] = LLCore::HttpRequest::createPolicyClass(); - if (! mPolicies[policy]) + mHttpClasses[app_policy].mPolicy = LLCore::HttpRequest::createPolicyClass(); + if (! mHttpClasses[app_policy].mPolicy) { // Use default policy (but don't accidentally modify default) LL_WARNS("Init") << "Failed to create HTTP policy class for " << init_data[i].mUsage << ". Using default policy." << LL_ENDL; - mPolicies[policy] = mPolicies[AP_DEFAULT]; + mHttpClasses[app_policy].mPolicy = mHttpClasses[AP_DEFAULT].mPolicy; continue; } } @@ -196,6 +209,9 @@ void LLAppCoreHttp::init() << LL_ENDL; } + // *NOTE: Pipelining isn't dynamic yet. When it is, add a global + // signal for the setting here. + // Register signals for settings and state changes for (int i(0); i < LL_ARRAY_SIZE(init_data); ++i) { @@ -209,7 +225,7 @@ void LLAppCoreHttp::init() } else { - mSettingsSignal[i] = cntrl_ptr->getCommitSignal()->connect(boost::bind(&setting_changed)); + mHttpClasses[i].mSettingsSignal = cntrl_ptr->getCommitSignal()->connect(boost::bind(&setting_changed)); } } } @@ -261,9 +277,9 @@ void LLAppCoreHttp::cleanup() } } - for (int i(0); i < LL_ARRAY_SIZE(init_data); ++i) + for (int i(0); i < LL_ARRAY_SIZE(mHttpClasses); ++i) { - mSettingsSignal[i].disconnect(); + mHttpClasses[i].mSettingsSignal.disconnect(); } delete mRequest; @@ -278,30 +294,57 @@ void LLAppCoreHttp::cleanup() } } + void LLAppCoreHttp::refreshSettings(bool initial) { LLCore::HttpStatus status; for (int i(0); i < LL_ARRAY_SIZE(init_data); ++i) { - const EAppPolicy policy(init_data[i].mPolicy); + const EAppPolicy app_policy(init_data[i].mPolicy); - // Set any desired throttle - if (initial && init_data[i].mRate) + if (initial) { - // Init-time only, can use the static setters here - status = LLCore::HttpRequest::setStaticPolicyOption(LLCore::HttpRequest::PO_THROTTLE_RATE, - mPolicies[policy], - init_data[i].mRate, - NULL); - if (! status) + // Init-time only settings, can use the static setters here + + if (init_data[i].mRate) { - LL_WARNS("Init") << "Unable to set " << init_data[i].mUsage - << " throttle rate. Reason: " << status.toString() - << LL_ENDL; + // Set any desired throttle + status = LLCore::HttpRequest::setStaticPolicyOption(LLCore::HttpRequest::PO_THROTTLE_RATE, + mHttpClasses[app_policy].mPolicy, + init_data[i].mRate, + NULL); + if (! status) + { + LL_WARNS("Init") << "Unable to set " << init_data[i].mUsage + << " throttle rate. Reason: " << status.toString() + << LL_ENDL; + } + } + + mHttpClasses[app_policy].mPipelined = false; + if (mPipelined && init_data[i].mPipelined) + { + // Pipelining election is currently static (init-time). + status = LLCore::HttpRequest::setStaticPolicyOption(LLCore::HttpRequest::PO_PIPELINING_DEPTH, + mHttpClasses[app_policy].mPolicy, + PIPELINING_DEPTH, + NULL); + if (! status) + { + LL_WARNS("Init") << "Unable to set " << init_data[i].mUsage + << " to pipelined mode. Reason: " << status.toString() + << LL_ENDL; + } + else + { + mHttpClasses[app_policy].mPipelined = true; + } } } + // Init- or run-time settings + // Get target connection concurrency value U32 setting(init_data[i].mDefault); if (! init_data[i].mKey.empty() && gSavedSettings.controlExists(init_data[i].mKey)) @@ -314,19 +357,31 @@ void LLAppCoreHttp::refreshSettings(bool initial) } } - if (! initial && setting == mSettings[policy]) + if (! initial && setting == mHttpClasses[app_policy].mConnLimit) { // Unchanged, try next setting continue; } - // Set it and report - // *TODO: These are intended to be per-host limits when we can - // support that in llcorehttp/libcurl. + // Set it and report. Strategies depend on pipelining: + // + // No Pipelining. Llcorehttp manages connections itself based + // on the PO_CONNECTION_LIMIT setting. Set both limits to the + // same value for logical consistency. In the future, may + // hand over connection management to libcurl after the + // connection cache has been better vetted. + // + // Pipelining. Libcurl is allowed to manage connections to a + // great degree. Steady state will connection limit based on + // the per-host setting. Transitions (region crossings, new + // avatars, etc.) can request additional outbound connections + // to other servers via 2X total connection limit. + // LLCore::HttpHandle handle; handle = mRequest->setPolicyOption(LLCore::HttpRequest::PO_CONNECTION_LIMIT, - mPolicies[policy], - setting, NULL); + mHttpClasses[app_policy].mPolicy, + (mHttpClasses[app_policy].mPipelined ? 2 * setting : setting), + NULL); if (LLCORE_HTTP_HANDLE_INVALID == handle) { status = mRequest->getStatus(); @@ -336,16 +391,30 @@ void LLAppCoreHttp::refreshSettings(bool initial) } else { - LL_DEBUGS("Init") << "Changed " << init_data[i].mUsage - << " concurrency. New value: " << setting - << LL_ENDL; - mSettings[policy] = setting; - if (initial && setting != init_data[i].mDefault) + handle = mRequest->setPolicyOption(LLCore::HttpRequest::PO_PER_HOST_CONNECTION_LIMIT, + mHttpClasses[app_policy].mPolicy, + setting, + NULL); + if (LLCORE_HTTP_HANDLE_INVALID == handle) { - LL_INFOS("Init") << "Application settings overriding default " << init_data[i].mUsage - << " concurrency. New value: " << setting + status = mRequest->getStatus(); + LL_WARNS("Init") << "Unable to set " << init_data[i].mUsage + << " per-host concurrency. Reason: " << status.toString() << LL_ENDL; } + else + { + LL_DEBUGS("Init") << "Changed " << init_data[i].mUsage + << " concurrency. New value: " << setting + << LL_ENDL; + mHttpClasses[app_policy].mConnLimit = setting; + if (initial && setting != init_data[i].mDefault) + { + LL_INFOS("Init") << "Application settings overriding default " << init_data[i].mUsage + << " concurrency. New value: " << setting + << LL_ENDL; + } + } } } } -- cgit v1.2.3 From 17da4cf57aadcf1987b48af298d8b2742089a35c Mon Sep 17 00:00:00 2001 From: Monty Brandenberg Date: Fri, 27 Jun 2014 17:25:39 -0400 Subject: Cleanup and tuning. Use a consistent index on some initialization data so their isn't an opportunity for gaps over overruns (init_data). Start some preliminary tweaking of policy class numbers. It looks like I can easily drop the default connection count to '4' and still hit the throttles. Did some experiments running pipeline deeper which was mostly fine for textures but tended to slow meshes. Reason uncertain but a depth of '5' seems generally healthy for mesh. I had one run of 52.6S with a theoretical minimum of 51.2S. That's as good as I've ever seen. --- indra/newview/llappcorehttp.cpp | 41 +++++++++++++++++++++++------------------ 1 file changed, 23 insertions(+), 18 deletions(-) (limited to 'indra/newview/llappcorehttp.cpp') diff --git a/indra/newview/llappcorehttp.cpp b/indra/newview/llappcorehttp.cpp index deda0ccb41..d097f18d61 100755 --- a/indra/newview/llappcorehttp.cpp +++ b/indra/newview/llappcorehttp.cpp @@ -42,9 +42,9 @@ const F64 LLAppCoreHttp::MAX_THREAD_WAIT_TIME(10.0); const long LLAppCoreHttp::PIPELINING_DEPTH(5L); +// Default and dynamic values for classes static const struct { - LLAppCoreHttp::EAppPolicy mPolicy; U32 mDefault; U32 mMin; U32 mMax; @@ -52,40 +52,40 @@ static const struct bool mPipelined; std::string mKey; const char * mUsage; -} init_data[] = // Default and dynamic values for classes +} init_data[LLAppCoreHttp::AP_COUNT] = { - { - LLAppCoreHttp::AP_DEFAULT, 8, 8, 8, 0, false, + { // AP_DEFAULT + 8, 8, 8, 0, false, "", "other" }, - { - LLAppCoreHttp::AP_TEXTURE, 8, 1, 12, 0, true, + { // AP_TEXTURE + 4, 1, 12, 0, true, "TextureFetchConcurrency", "texture fetch" }, - { - LLAppCoreHttp::AP_MESH1, 32, 1, 128, 100, false, + { // AP_MESH1 + 32, 1, 128, 100, false, "MeshMaxConcurrentRequests", "mesh fetch" }, - { - LLAppCoreHttp::AP_MESH2, 8, 1, 32, 100, true, + { // AP_MESH2 + 4, 1, 32, 100, true, "Mesh2MaxConcurrentRequests", "mesh2 fetch" }, - { - LLAppCoreHttp::AP_LARGE_MESH, 2, 1, 8, 0, false, + { // AP_LARGE_MESH + 2, 1, 8, 0, false, "", "large mesh fetch" }, - { - LLAppCoreHttp::AP_UPLOADS, 2, 1, 8, 0, false, + { // AP_UPLOADS + 2, 1, 8, 0, false, "", "asset upload" }, - { - LLAppCoreHttp::AP_LONG_POLL, 32, 32, 32, 0, false, + { // AP_LONG_POLL + 32, 32, 32, 0, false, "", "long poll" } @@ -173,9 +173,10 @@ void LLAppCoreHttp::init() mHttpClasses[AP_DEFAULT].mPolicy = LLCore::HttpRequest::DEFAULT_POLICY_ID; // Setup additional policies based on table and some special rules + llassert(LL_ARRAY_SIZE(init_data) == AP_COUNT); for (int i(0); i < LL_ARRAY_SIZE(init_data); ++i) { - const EAppPolicy app_policy(init_data[i].mPolicy); + const EAppPolicy app_policy(static_cast(i)); if (AP_DEFAULT == app_policy) { @@ -301,7 +302,7 @@ void LLAppCoreHttp::refreshSettings(bool initial) for (int i(0); i < LL_ARRAY_SIZE(init_data); ++i) { - const EAppPolicy app_policy(init_data[i].mPolicy); + const EAppPolicy app_policy(static_cast(i)); if (initial) { @@ -326,6 +327,10 @@ void LLAppCoreHttp::refreshSettings(bool initial) if (mPipelined && init_data[i].mPipelined) { // Pipelining election is currently static (init-time). + // Making it dynamic isn't too hard in the SL code but verifying + // that libcurl handles the on-to-off transition while holding + // outstanding requests is something that should be tested. + status = LLCore::HttpRequest::setStaticPolicyOption(LLCore::HttpRequest::PO_PIPELINING_DEPTH, mHttpClasses[app_policy].mPolicy, PIPELINING_DEPTH, -- cgit v1.2.3 From e79a88c8ccfadcd260892000d4dec2ae921b26de Mon Sep 17 00:00:00 2001 From: Monty Brandenberg Date: Tue, 12 Aug 2014 18:21:26 -0400 Subject: Better support for dynamic option changes in llcorehttp. Libcurl has some problems disabling pipelining on a multi handle with outstanding requests so build a more conservative system that allows requests to drain before setting curl multi options. Would rather not have this but it is significantly safer. "HttpPipelining" debug setting is now fully dynamic. Connection limits can also be made dynamic in the near future. Upped the default connection count back to 8 for now but will revisit this in the tuning phase. It might be time to combine mesh and textures into a single asset class. For normal server operations that would be a clear path, but for server under load, the current scheme may be better. Minor cleanup in logging to elminate some redundant strings. Might add some more tracing to the stall logic 'just in case'. --- indra/newview/llappcorehttp.cpp | 174 +++++++++++++++++++++++----------------- 1 file changed, 101 insertions(+), 73 deletions(-) (limited to 'indra/newview/llappcorehttp.cpp') diff --git a/indra/newview/llappcorehttp.cpp b/indra/newview/llappcorehttp.cpp index d097f18d61..464e60948a 100755 --- a/indra/newview/llappcorehttp.cpp +++ b/indra/newview/llappcorehttp.cpp @@ -60,7 +60,7 @@ static const struct "other" }, { // AP_TEXTURE - 4, 1, 12, 0, true, + 8, 1, 12, 0, true, "TextureFetchConcurrency", "texture fetch" }, @@ -70,7 +70,7 @@ static const struct "mesh fetch" }, { // AP_MESH2 - 4, 1, 32, 100, true, + 8, 1, 32, 100, true, "Mesh2MaxConcurrentRequests", "mesh2 fetch" }, @@ -126,14 +126,6 @@ void LLAppCoreHttp::init() << LL_ENDL; } - // Global pipelining preference from settings - static const std::string http_pipelining("HttpPipelining"); - if (gSavedSettings.controlExists(http_pipelining)) - { - // Default to true if absent. - mPipelined = gSavedSettings.getBOOL(http_pipelining); - } - // Point to our certs or SSH/https: will fail on connect status = LLCore::HttpRequest::setStaticPolicyOption(LLCore::HttpRequest::PO_CA_FILE, LLCore::HttpRequest::GLOBAL_POLICY_ID, @@ -210,12 +202,27 @@ void LLAppCoreHttp::init() << LL_ENDL; } - // *NOTE: Pipelining isn't dynamic yet. When it is, add a global - // signal for the setting here. - + // Signal for global pipelining preference from settings + static const std::string http_pipelining("HttpPipelining"); + if (gSavedSettings.controlExists(http_pipelining)) + { + LLPointer cntrl_ptr = gSavedSettings.getControl(http_pipelining); + if (cntrl_ptr.isNull()) + { + LL_WARNS("Init") << "Unable to set signal on global setting '" << http_pipelining + << "'" << LL_ENDL; + } + else + { + mPipelinedSignal = cntrl_ptr->getCommitSignal()->connect(boost::bind(&setting_changed)); + } + } + // Register signals for settings and state changes for (int i(0); i < LL_ARRAY_SIZE(init_data); ++i) { + const EAppPolicy app_policy(static_cast(i)); + if (! init_data[i].mKey.empty() && gSavedSettings.controlExists(init_data[i].mKey)) { LLPointer cntrl_ptr = gSavedSettings.getControl(init_data[i].mKey); @@ -226,7 +233,7 @@ void LLAppCoreHttp::init() } else { - mHttpClasses[i].mSettingsSignal = cntrl_ptr->getCommitSignal()->connect(boost::bind(&setting_changed)); + mHttpClasses[app_policy].mSettingsSignal = cntrl_ptr->getCommitSignal()->connect(boost::bind(&setting_changed)); } } } @@ -282,6 +289,7 @@ void LLAppCoreHttp::cleanup() { mHttpClasses[i].mSettingsSignal.disconnect(); } + mPipelinedSignal.disconnect(); delete mRequest; mRequest = NULL; @@ -299,6 +307,20 @@ void LLAppCoreHttp::cleanup() void LLAppCoreHttp::refreshSettings(bool initial) { LLCore::HttpStatus status; + + // Global pipelining setting + bool pipeline_changed(false); + static const std::string http_pipelining("HttpPipelining"); + if (gSavedSettings.controlExists(http_pipelining)) + { + // Default to true (in ctor) if absent. + bool pipelined(gSavedSettings.getBOOL(http_pipelining)); + if (pipelined != mPipelined) + { + mPipelined = pipelined; + pipeline_changed = true; + } + } for (int i(0); i < LL_ARRAY_SIZE(init_data); ++i) { @@ -323,33 +345,42 @@ void LLAppCoreHttp::refreshSettings(bool initial) } } - mHttpClasses[app_policy].mPipelined = false; - if (mPipelined && init_data[i].mPipelined) + } + + // Init- or run-time settings. Must use the queued request API. + + // Pipelining changes + if (initial || pipeline_changed) + { + const bool to_pipeline(mPipelined && init_data[i].mPipelined); + if (to_pipeline != mHttpClasses[app_policy].mPipelined) { - // Pipelining election is currently static (init-time). - // Making it dynamic isn't too hard in the SL code but verifying - // that libcurl handles the on-to-off transition while holding - // outstanding requests is something that should be tested. + // Pipeline election changing, set dynamic option via request + + LLCore::HttpHandle handle; + const long new_depth(to_pipeline ? PIPELINING_DEPTH : 0); - status = LLCore::HttpRequest::setStaticPolicyOption(LLCore::HttpRequest::PO_PIPELINING_DEPTH, - mHttpClasses[app_policy].mPolicy, - PIPELINING_DEPTH, - NULL); - if (! status) + handle = mRequest->setPolicyOption(LLCore::HttpRequest::PO_PIPELINING_DEPTH, + mHttpClasses[app_policy].mPolicy, + new_depth, + NULL); + if (LLCORE_HTTP_HANDLE_INVALID == handle) { + status = mRequest->getStatus(); LL_WARNS("Init") << "Unable to set " << init_data[i].mUsage - << " to pipelined mode. Reason: " << status.toString() + << " pipelining. Reason: " << status.toString() << LL_ENDL; } else { - mHttpClasses[app_policy].mPipelined = true; + LL_DEBUGS("Init") << "Changed " << init_data[i].mUsage + << " pipelining. New value: " << new_depth + << LL_ENDL; + mHttpClasses[app_policy].mPipelined = to_pipeline; } } } - - // Init- or run-time settings - + // Get target connection concurrency value U32 setting(init_data[i].mDefault); if (! init_data[i].mKey.empty() && gSavedSettings.controlExists(init_data[i].mKey)) @@ -362,63 +393,60 @@ void LLAppCoreHttp::refreshSettings(bool initial) } } - if (! initial && setting == mHttpClasses[app_policy].mConnLimit) + if (initial || setting != mHttpClasses[app_policy].mConnLimit || pipeline_changed) { - // Unchanged, try next setting - continue; - } - - // Set it and report. Strategies depend on pipelining: - // - // No Pipelining. Llcorehttp manages connections itself based - // on the PO_CONNECTION_LIMIT setting. Set both limits to the - // same value for logical consistency. In the future, may - // hand over connection management to libcurl after the - // connection cache has been better vetted. - // - // Pipelining. Libcurl is allowed to manage connections to a - // great degree. Steady state will connection limit based on - // the per-host setting. Transitions (region crossings, new - // avatars, etc.) can request additional outbound connections - // to other servers via 2X total connection limit. - // - LLCore::HttpHandle handle; - handle = mRequest->setPolicyOption(LLCore::HttpRequest::PO_CONNECTION_LIMIT, - mHttpClasses[app_policy].mPolicy, - (mHttpClasses[app_policy].mPipelined ? 2 * setting : setting), - NULL); - if (LLCORE_HTTP_HANDLE_INVALID == handle) - { - status = mRequest->getStatus(); - LL_WARNS("Init") << "Unable to set " << init_data[i].mUsage - << " concurrency. Reason: " << status.toString() - << LL_ENDL; - } - else - { - handle = mRequest->setPolicyOption(LLCore::HttpRequest::PO_PER_HOST_CONNECTION_LIMIT, + // Set it and report. Strategies depend on pipelining: + // + // No Pipelining. Llcorehttp manages connections itself based + // on the PO_CONNECTION_LIMIT setting. Set both limits to the + // same value for logical consistency. In the future, may + // hand over connection management to libcurl after the + // connection cache has been better vetted. + // + // Pipelining. Libcurl is allowed to manage connections to a + // great degree. Steady state will connection limit based on + // the per-host setting. Transitions (region crossings, new + // avatars, etc.) can request additional outbound connections + // to other servers via 2X total connection limit. + // + LLCore::HttpHandle handle; + handle = mRequest->setPolicyOption(LLCore::HttpRequest::PO_CONNECTION_LIMIT, mHttpClasses[app_policy].mPolicy, - setting, + (mHttpClasses[app_policy].mPipelined ? 2 * setting : setting), NULL); if (LLCORE_HTTP_HANDLE_INVALID == handle) { status = mRequest->getStatus(); LL_WARNS("Init") << "Unable to set " << init_data[i].mUsage - << " per-host concurrency. Reason: " << status.toString() + << " concurrency. Reason: " << status.toString() << LL_ENDL; } else { - LL_DEBUGS("Init") << "Changed " << init_data[i].mUsage - << " concurrency. New value: " << setting - << LL_ENDL; - mHttpClasses[app_policy].mConnLimit = setting; - if (initial && setting != init_data[i].mDefault) + handle = mRequest->setPolicyOption(LLCore::HttpRequest::PO_PER_HOST_CONNECTION_LIMIT, + mHttpClasses[app_policy].mPolicy, + setting, + NULL); + if (LLCORE_HTTP_HANDLE_INVALID == handle) { - LL_INFOS("Init") << "Application settings overriding default " << init_data[i].mUsage - << " concurrency. New value: " << setting + status = mRequest->getStatus(); + LL_WARNS("Init") << "Unable to set " << init_data[i].mUsage + << " per-host concurrency. Reason: " << status.toString() << LL_ENDL; } + else + { + LL_DEBUGS("Init") << "Changed " << init_data[i].mUsage + << " concurrency. New value: " << setting + << LL_ENDL; + mHttpClasses[app_policy].mConnLimit = setting; + if (initial && setting != init_data[i].mDefault) + { + LL_INFOS("Init") << "Application settings overriding default " << init_data[i].mUsage + << " concurrency. New value: " << setting + << LL_ENDL; + } + } } } } -- cgit v1.2.3 From 7fa382937679a9937fd7b09e33b6c2f39ec680ff Mon Sep 17 00:00:00 2001 From: Monty Brandenberg Date: Thu, 21 Aug 2014 15:20:31 -0400 Subject: Remove viewer-side throttles on mesh requests. --- indra/newview/llappcorehttp.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'indra/newview/llappcorehttp.cpp') diff --git a/indra/newview/llappcorehttp.cpp b/indra/newview/llappcorehttp.cpp index 464e60948a..e9274c5c1e 100755 --- a/indra/newview/llappcorehttp.cpp +++ b/indra/newview/llappcorehttp.cpp @@ -65,12 +65,12 @@ static const struct "texture fetch" }, { // AP_MESH1 - 32, 1, 128, 100, false, + 32, 1, 128, 0, false, "MeshMaxConcurrentRequests", "mesh fetch" }, { // AP_MESH2 - 8, 1, 32, 100, true, + 8, 1, 32, 0, true, "Mesh2MaxConcurrentRequests", "mesh2 fetch" }, -- cgit v1.2.3 From 85cba58ad473ed28efda7f645af20d56229e8637 Mon Sep 17 00:00:00 2001 From: Monty Brandenberg Date: Fri, 22 Aug 2014 18:04:27 -0400 Subject: Add an HTTP policy class for inventory operations using four (4) connections. Convert background and foreground fetches, both items and folders/inventory and library, to use new HTTP. Non-fetch inventory operations continue to use LLHTTPClient (at least for now). Error handling and retry on fetches wasn't 100% previously and that's still the case. I'll rip through this again to clean that up. Cleaned up logging in much of the inventory code with consistent labels on logging events and correct macros (removed deprecation warnings). This started as an attempt to get libcurl to do pipelining on POSTs and PUTs. Discovered that this is going to be very difficult to support in general in libcurl. May look at that again in the future. --- indra/newview/llappcorehttp.cpp | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'indra/newview/llappcorehttp.cpp') diff --git a/indra/newview/llappcorehttp.cpp b/indra/newview/llappcorehttp.cpp index 464e60948a..2b7d91b5ff 100755 --- a/indra/newview/llappcorehttp.cpp +++ b/indra/newview/llappcorehttp.cpp @@ -60,7 +60,7 @@ static const struct "other" }, { // AP_TEXTURE - 8, 1, 12, 0, true, + 8, 1, 12, 0, true, "TextureFetchConcurrency", "texture fetch" }, @@ -70,7 +70,7 @@ static const struct "mesh fetch" }, { // AP_MESH2 - 8, 1, 32, 100, true, + 8, 1, 32, 100, true, "Mesh2MaxConcurrentRequests", "mesh2 fetch" }, @@ -88,6 +88,11 @@ static const struct 32, 32, 32, 0, false, "", "long poll" + }, + { // AP_INVENTORY + 4, 1, 4, 0, false, + "", + "inventory" } }; -- cgit v1.2.3