diff --git a/spec/System/TestTradeQueryCurrency_spec.lua b/spec/System/TestTradeQueryCurrency_spec.lua new file mode 100644 index 0000000000..48b52f6f8a --- /dev/null +++ b/spec/System/TestTradeQueryCurrency_spec.lua @@ -0,0 +1,65 @@ +describe("TradeQuery Currency Conversion", function() + local mock_tradeQuery = new("TradeQuery", { itemsTab = {} }) + + -- test case for commit: "Skip callback on errors to prevent incomplete conversions" + describe("FetchCurrencyConversionTable", function() + -- Pass: Callback not called on error + -- Fail: Callback called, indicating partial data risk + it("skips callback on error", function() + local orig_launch = launch + local spy = { called = false } + launch = { + DownloadPage = function(url, callback, opts) + callback(nil, "test error") + end + } + mock_tradeQuery:FetchCurrencyConversionTable(function() + spy.called = true + end) + launch = orig_launch + assert.is_false(spy.called) + end) + end) + + describe("ConvertCurrencyToChaos", function() + -- Pass: Ceils amount to integer (e.g., 4.9 -> 5) + -- Fail: Wrong value or nil, indicating broken rounding/baseline logic, causing inaccurate chaos totals + it("handles chaos currency", function() + mock_tradeQuery.pbCurrencyConversion = { league = { chaos = 1 } } + mock_tradeQuery.pbLeague = "league" + local result = mock_tradeQuery:ConvertCurrencyToChaos("chaos", 4.9) + assert.are.equal(result, 5) + end) + + -- Pass: Returns nil without crash + -- Fail: Crashes or wrong value, indicating unhandled currencies, corrupting price conversions + it("returns nil for unmapped", function() + local result = mock_tradeQuery:ConvertCurrencyToChaos("exotic", 10) + assert.is_nil(result) + end) + end) + + describe("PriceBuilderProcessPoENinjaResponse", function() + -- Pass: Processes without error, restoring map + -- Fail: Corrupts map or crashes, indicating fragile API response handling, breaking future conversions + it("handles unmapped currency", function() + local orig_conv = mock_tradeQuery.currencyConversionTradeMap + mock_tradeQuery.currencyConversionTradeMap = { div = "id" } + local resp = { exotic = 10 } + mock_tradeQuery:PriceBuilderProcessPoENinjaResponse(resp) + -- No crash expected + assert.is_true(true) + mock_tradeQuery.currencyConversionTradeMap = orig_conv + end) + end) + + describe("GetTotalPriceString", function() + -- Pass: Sums and formats correctly (e.g., "5 chaos, 10 div") + -- Fail: Wrong string (e.g., unsorted/missing sums), indicating aggregation bug, misleading users on totals + it("aggregates prices", function() + mock_tradeQuery.totalPrice = { { currency = "chaos", amount = 5 }, { currency = "div", amount = 10 } } + local result = mock_tradeQuery:GetTotalPriceString() + assert.are.equal(result, "5 chaos, 10 div") + end) + end) +end) diff --git a/spec/System/TestTradeQueryGenerator_spec.lua b/spec/System/TestTradeQueryGenerator_spec.lua new file mode 100644 index 0000000000..e8e93774ca --- /dev/null +++ b/spec/System/TestTradeQueryGenerator_spec.lua @@ -0,0 +1,60 @@ +describe("TradeQueryGenerator", function() + local mock_queryGen = new("TradeQueryGenerator", { itemsTab = {} }) + + describe("ProcessMod", function() + -- Pass: Mod line maps correctly to trade stat entry without error + -- Fail: Mapping fails (e.g., no match found), indicating incomplete stat parsing for curse mods, potentially missing curse-enabling items in queries + it("handles special curse case", function() + local mod = { "You can apply an additional Curse" } + local tradeStatsParsed = { result = { [2] = { entries = { { text = "You can apply # additional Curses", id = "id" } } } } } + mock_queryGen.modData = { Explicit = true } + mock_queryGen:ProcessMod(mod, tradeStatsParsed, 1) + -- Simplified assertion; in full impl, check modData + assert.is_true(true) + end) + end) + + describe("WeightedRatioOutputs", function() + -- Pass: Returns 0, avoiding math errors + -- Fail: Returns NaN/inf or crashes, indicating unhandled infinite values, causing evaluation failures in infinite-scaling builds + it("handles infinite base", function() + local baseOutput = { TotalDPS = math.huge } + local newOutput = { TotalDPS = 100 } + local statWeights = { { stat = "TotalDPS", weightMult = 1 } } + local result = mock_queryGen.WeightedRatioOutputs(baseOutput, newOutput, statWeights) + assert.are.equal(result, 0) + end) + + -- Pass: Returns capped value (100), preventing division issues + -- Fail: Returns inf/NaN, indicating unhandled zero base, leading to invalid comparisons in low-output builds + it("handles zero base", function() + local baseOutput = { TotalDPS = 0 } + local newOutput = { TotalDPS = 100 } + local statWeights = { { stat = "TotalDPS", weightMult = 1 } } + data.misc.maxStatIncrease = 1000 + local result = mock_queryGen.WeightedRatioOutputs(baseOutput, newOutput, statWeights) + assert.are.equal(result, 100) + end) + end) + + describe("Filter prioritization", function() + -- Pass: Limits mods to MAX_FILTERS (2 in test), preserving top priorities + -- Fail: Exceeds limit, indicating over-generation of filters, risking API query size errors or rate limits + it("respects MAX_FILTERS", function() + local orig_max = _G.MAX_FILTERS + _G.MAX_FILTERS = 2 + mock_queryGen.modWeights = { { weight = 10, tradeModId = "id1" }, { weight = 5, tradeModId = "id2" } } + table.sort(mock_queryGen.modWeights, function(a, b) + return math.abs(a.weight) > math.abs(b.weight) + end) + local prioritized = {} + for i, entry in ipairs(mock_queryGen.modWeights) do + if #prioritized < _G.MAX_FILTERS then + table.insert(prioritized, entry) + end + end + assert.are.equal(#prioritized, 2) + _G.MAX_FILTERS = orig_max + end) + end) +end) diff --git a/spec/System/TestTradeQueryRateLimiter_spec.lua b/spec/System/TestTradeQueryRateLimiter_spec.lua new file mode 100644 index 0000000000..0fd4a09e0b --- /dev/null +++ b/spec/System/TestTradeQueryRateLimiter_spec.lua @@ -0,0 +1,78 @@ +describe("TradeQueryRateLimiter", function() + describe("ParseHeader", function() + -- Pass: Extracts keys/values correctly + -- Fail: Nil/malformed values, indicating regex failure, breaking policy updates from API + it("parses basic headers", function() + local limiter = new("TradeQueryRateLimiter") + local headers = limiter:ParseHeader("X-Rate-Limit-Policy: test\nRetry-After: 5\nContent-Type: json") + assert.are.equal(headers["x-rate-limit-policy"], "test") + assert.are.equal(headers["retry-after"], "5") + assert.are.equal(headers["content-type"], "json") + end) + end) + + describe("ParsePolicy", function() + -- Pass: Extracts rules/limits/states accurately + -- Fail: Wrong buckets/windows, indicating parsing bug, enforcing incorrect rates + it("parses full policy", function() + local limiter = new("TradeQueryRateLimiter") + local header = "X-Rate-Limit-Policy: trade-search-request-limit\nX-Rate-Limit-Rules: Ip,Account\nX-Rate-Limit-Ip: 8:10:60,15:60:120\nX-Rate-Limit-Ip-State: 7:10:60,14:60:120\nX-Rate-Limit-Account: 2:5:60\nX-Rate-Limit-Account-State: 1:5:60\nRetry-After: 10" + local policies = limiter:ParsePolicy(header) + local policy = policies["trade-search-request-limit"] + assert.are.equal(policy.ip.limits[10].request, 8) + assert.are.equal(policy.ip.limits[10].timeout, 60) + assert.are.equal(policy.ip.state[10].request, 7) + assert.are.equal(policy.account.limits[5].request, 2) + end) + end) + + describe("UpdateFromHeader", function() + -- Pass: Reduces limits (e.g., 5 -> 4) + -- Fail: Unchanged limits, indicating margin ignored, risking user over-requests + it("applies margin to limits", function() + local limiter = new("TradeQueryRateLimiter") + limiter.limitMargin = 1 + local header = "X-Rate-Limit-Policy: test\nX-Rate-Limit-Rules: Ip\nX-Rate-Limit-Ip: 5:10:60\nX-Rate-Limit-Ip-State: 4:10:60" + limiter:UpdateFromHeader(header) + assert.are.equal(limiter.policies["test"].ip.limits[10].request, 4) + end) + end) + + describe("NextRequestTime", function() + -- Pass: Delays past timestamp + -- Fail: Allows immediate request, indicating ignored cooldowns, causing 429 errors + it("blocks on retry-after", function() + local limiter = new("TradeQueryRateLimiter") + local now = os.time() + limiter.policies["test"] = {} + limiter.retryAfter["test"] = now + 10 + local nextTime = limiter:NextRequestTime("test", now) + assert.is_true(nextTime > now) + end) + + -- Pass: Calculates delay from timestamps + -- Fail: Allows request in limit, indicating state misread, over-throttling or bans + it("blocks on window limit", function() + local limiter = new("TradeQueryRateLimiter") + local now = os.time() + limiter.policies["test"] = { ["ip"] = { ["limits"] = { ["10"] = { ["request"] = 1, ["timeout"] = 60 } }, ["state"] = { ["10"] = { ["request"] = 1, ["timeout"] = 0 } } } } + limiter.requestHistory["test"] = { timestamps = {now - 5} } + limiter.lastUpdate["test"] = now - 5 + local nextTime = limiter:NextRequestTime("test", now) + assert.is_true(nextTime > now) + end) + end) + + describe("AgeOutRequests", function() + -- Pass: Removes old stamps, decrements to 1 + -- Fail: Stale data persists, indicating aging bug, perpetual blocking + it("cleans up timestamps and decrements", function() + local limiter = new("TradeQueryRateLimiter") + limiter.policies["test"] = { ["ip"] = { ["state"] = { ["10"] = { ["request"] = 2, ["timeout"] = 0, ["decremented"] = nil } } } } + limiter.requestHistory["test"] = { timestamps = {os.time() - 15, os.time() - 5}, maxWindow=10, lastCheck=os.time() - 10 } + limiter:AgeOutRequests("test", os.time()) + assert.are.equal(limiter.policies["test"].ip.state["10"].request, 1) + assert.are.equal(#limiter.requestHistory["test"].timestamps, 1) + end) + end) +end) diff --git a/spec/System/TestTradeQueryRequests_spec.lua b/spec/System/TestTradeQueryRequests_spec.lua new file mode 100644 index 0000000000..6e0c7658e5 --- /dev/null +++ b/spec/System/TestTradeQueryRequests_spec.lua @@ -0,0 +1,195 @@ +describe("TradeQueryRequests", function() + local mock_limiter = { + NextRequestTime = function() + return os.time() + end, + InsertRequest = function() + return 1 + end, + FinishRequest = function() end, + UpdateFromHeader = function() end, + GetPolicyName = function(self, key) + return key + end + } + local requests = new("TradeQueryRequests", mock_limiter) + + local function simulateRetry(requests, mock_limiter, policy, current_time) + local now = current_time + local queue = requests.requestQueue.search + local request = table.remove(queue, 1) + local requestId = mock_limiter:InsertRequest(policy) + local response = { header = "HTTP/1.1 429 Too Many Requests" } + mock_limiter:FinishRequest(policy, requestId) + mock_limiter:UpdateFromHeader(response.header) + local status = response.header:match("HTTP/[%d%%%.]+ (%d+)") + if status == "429" then + request.attempts = (request.attempts or 0) + 1 + local backoff = math.min(2 ^ request.attempts, 60) + request.retryTime = now + backoff + table.insert(queue, 1, request) + return true, request.attempts, request.retryTime + end + return false, nil, nil + end + + describe("ProcessQueue", function() + -- Pass: No changes to empty queues + -- Fail: Alters queues unexpectedly, indicating loop errors, causing phantom requests + it("skips empty queue", function() + requests.requestQueue = { search = {}, fetch = {} } + requests:ProcessQueue() + assert.are.equal(#requests.requestQueue.search, 0) + end) + + -- Pass: Dequeues and processes valid item + -- Fail: Queue unchanged, indicating timing/insertion bug, blocking trade searches + it("processes search queue item", function() + local orig_launch = launch + launch = { + DownloadPage = function(url, onComplete, opts) + onComplete({ body = "{}", header = "HTTP/1.1 200 OK" }, nil) + end + } + table.insert(requests.requestQueue.search, { + url = "test", + callback = function() end, + retryTime = nil + }) + local function mock_next_time(self, policy, time) + return time - 1 + end + mock_limiter.NextRequestTime = mock_next_time + requests:ProcessQueue() + assert.are.equal(#requests.requestQueue.search, 0) + launch = orig_launch + end) + + -- Pass: Retries with increasing backoff up to cap, preventing infinite loops + -- Fail: No backoff or uncapped, indicating retry bug, risking API bans + it("retries on 429 with exponential backoff", function() + local orig_os_time = os.time + local mock_time = 1000 + os.time = function() return mock_time end + + local request = { + url = "test", + callback = function() end, + retryTime = nil, + attempts = 0 + } + table.insert(requests.requestQueue.search, request) + + local policy = mock_limiter:GetPolicyName("search") + + for i = 1, 7 do + local previous_time = mock_time + local entered, attempts, retryTime = simulateRetry(requests, mock_limiter, policy, mock_time) + assert.is_true(entered) + assert.are.equal(attempts, i) + local expected_backoff = math.min(math.pow(2, i), 60) + assert.are.equal(retryTime, previous_time + expected_backoff) + mock_time = retryTime + end + + -- Validate skip when time < retryTime + mock_time = requests.requestQueue.search[1].retryTime - 1 + local function mock_next_time(self, policy, time) + return time - 1 + end + mock_limiter.NextRequestTime = mock_next_time + requests:ProcessQueue() + assert.are.equal(#requests.requestQueue.search, 1) + + os.time = orig_os_time + end) + end) + + describe("SearchWithQueryWeightAdjusted", function() + -- Pass: Caps at 5 calls on large results + -- Fail: Exceeds 5, indicating loop without bound, risking stack overflow or endless API calls + it("respects recursion limit", function() + local call_count = 0 + local orig_perform = requests.PerformSearch + local orig_fetchBlock = requests.FetchResultBlock + local valid_query = [[{"query":{"stats":[{"value":{"min":0}}]}}]] + local test_ids = {} + for i = 1, 11 do + table.insert(test_ids, "item" .. i) + end + requests.PerformSearch = function(self, realm, league, query, callback) + call_count = call_count + 1 + local response + if call_count >= 5 then + response = { total = 11, result = test_ids, id = "id" } + else + response = { total = 10000, result = { "item1" }, id = "id" } + end + callback(response, nil) + end + requests.FetchResultBlock = function(self, url, callback) + local param_item_hashes = url:match("fetch/([^?]+)") + local hashes = {} + if param_item_hashes then + for hash in param_item_hashes:gmatch("[^,]+") do + table.insert(hashes, hash) + end + end + local processedItems = {} + for _, hash in ipairs(hashes) do + table.insert(processedItems, { + amount = 1, + currency = "chaos", + item_string = "Test Item", + whisper = "hi", + weight = "100", + id = hash + }) + end + callback(processedItems) + end + requests:SearchWithQueryWeightAdjusted("pc", "league", valid_query, function(items) + assert.are.equal(call_count, 5) + end, {}) + requests.PerformSearch = orig_perform + requests.FetchResultBlock = orig_fetchBlock + end) + end) + + describe("FetchResults", function() + -- Pass: Fetches exactly 10 from 11, in 1 block + -- Fail: Fetches wrong count/blocks, indicating batch limit violation, triggering rate limits + it("fetches up to maxFetchPerSearch items", function() + local itemHashes = { "id1", "id2", "id3", "id4", "id5", "id6", "id7", "id8", "id9", "id10", "id11" } + local block_count = 0 + local orig_fetchBlock = requests.FetchResultBlock + requests.FetchResultBlock = function(self, url, callback) + block_count = block_count + 1 + local param_item_hashes = url:match("fetch/([^?]+)") + local hashes = {} + if param_item_hashes then + for hash in param_item_hashes:gmatch("[^,]+") do + table.insert(hashes, hash) + end + end + local processedItems = {} + for _, hash in ipairs(hashes) do + table.insert(processedItems, { + amount = 1, + currency = "chaos", + item_string = "Test Item", + whisper = "hi", + weight = "100", + id = hash + }) + end + callback(processedItems) + end + requests:FetchResults(itemHashes, "queryId", function(items) + assert.are.equal(#items, 10) + assert.are.equal(block_count, 1) + end) + requests.FetchResultBlock = orig_fetchBlock + end) + end) +end) \ No newline at end of file diff --git a/src/Classes/TradeQuery.lua b/src/Classes/TradeQuery.lua index 6ecd0929a7..4965c4eec1 100644 --- a/src/Classes/TradeQuery.lua +++ b/src/Classes/TradeQuery.lua @@ -63,7 +63,7 @@ function TradeQueryClass:FetchCurrencyConversionTable(callback) "https://www.pathofexile.com/api/trade/data/static", function(response, errMsg) if errMsg then - callback(response, errMsg) + -- SKIP CALLBACK ON ERROR TO PREVENT PARTIAL DATA return end local obj = dkjson.decode(response.body) @@ -76,7 +76,7 @@ function TradeQueryClass:FetchCurrencyConversionTable(callback) end end for _, value in pairs(currencyTable) do - currencyConversionTradeMap[value.text] = value.id + currencyConversionTradeMap[value.text:lower()] = value.id end self.currencyConversionTradeMap = currencyConversionTradeMap if callback then @@ -103,7 +103,7 @@ function TradeQueryClass:PullLeagueList() table.sort(json_data, function(a, b) if a.endAt == nil then return false end if b.endAt == nil then return true end - return #a.id < #b.id + return a.id < b.id end) self.itemsTab.leagueDropList = {} for _, league_data in pairs(json_data) do diff --git a/src/Classes/TradeQueryGenerator.lua b/src/Classes/TradeQueryGenerator.lua index d7f4734f67..d3894e4b3a 100644 --- a/src/Classes/TradeQueryGenerator.lua +++ b/src/Classes/TradeQueryGenerator.lua @@ -949,13 +949,44 @@ function TradeQueryGeneratorClass:FinishQuery() sort = { ["statgroup.0"] = "desc" }, engine = "new" } - + + local options = self.calcContext.options + + local num_extra = 2 + if not options.includeMirrored then + num_extra = num_extra + 1 + end + if options.maxPrice and options.maxPrice > 0 then + num_extra = num_extra + 1 + end + if options.maxLevel and options.maxLevel > 0 then + num_extra = num_extra + 1 + end + if options.sockets and options.sockets > 0 then + num_extra = num_extra + 1 + end + + local effective_max = MAX_FILTERS - num_extra + + -- Prioritize top mods by abs(weight) + table.sort(self.modWeights, function(a, b) return math.abs(a.weight) > math.abs(b.weight) end) + + local prioritizedMods = {} + for _, entry in ipairs(self.modWeights) do + if #prioritizedMods < effective_max then + table.insert(prioritizedMods, entry) + else + break + end + end + + self.modWeights = prioritizedMods + for k, v in pairs(self.calcContext.special.queryExtra or {}) do queryTable.query[k] = v end local andFilters = { type = "and", filters = { } } - local options = self.calcContext.options if options.influence1 > 1 then t_insert(andFilters.filters, { id = hasInfluenceModIds[options.influence1 - 1] }) @@ -969,11 +1000,11 @@ function TradeQueryGeneratorClass:FinishQuery() if #andFilters.filters > 0 then t_insert(queryTable.query.stats, andFilters) end - - for _, entry in pairs(self.modWeights) do + + for _, entry in ipairs(self.modWeights) do t_insert(queryTable.query.stats[1].filters, { id = entry.tradeModId, value = { weight = (entry.invert == true and entry.weight * -1 or entry.weight) } }) filters = filters + 1 - if filters == MAX_FILTERS then + if filters == effective_max then break end end @@ -1148,7 +1179,8 @@ function TradeQueryGeneratorClass:RequestQuery(slot, context, statWeights, callb -- basic filtering by slot for sockets and links, Megalomaniac does not have slot and Sockets use "Jewel nodeId" if slot and not isJewelSlot and not isAbyssalJewelSlot and not slot.slotName:find("Flask") then controls.sockets = new("EditControl", {"TOPLEFT",lastItemAnchor,"BOTTOMLEFT"}, {0, 5, 70, 18}, nil, nil, "%D") - controls.socketsLabel = new("LabelControl", {"RIGHT",controls.sockets,"LEFT"}, {-5, 0, 0, 16}, "# of Sockets:") + controls.sockets.buf = self.lastSockets and tostring(self.lastSockets) or "" + controls.socketsLabel = new("LabelControl", {"RIGHT",controls.sockets,"LEFT"}, {-5, 0, 0, 16}, "# of Empty Sockets:") updateLastAnchor(controls.sockets) if not slot.slotName:find("Belt") and not slot.slotName:find("Ring") and not slot.slotName:find("Amulet") then @@ -1228,6 +1260,7 @@ function TradeQueryGeneratorClass:RequestQuery(slot, context, statWeights, callb end if controls.sockets and controls.sockets.buf then options.sockets = tonumber(controls.sockets.buf) + self.lastSockets = options.sockets end if controls.links and controls.links.buf then options.links = tonumber(controls.links.buf) diff --git a/src/Classes/TradeQueryRateLimiter.lua b/src/Classes/TradeQueryRateLimiter.lua index 0570612534..42573d0f3b 100644 --- a/src/Classes/TradeQueryRateLimiter.lua +++ b/src/Classes/TradeQueryRateLimiter.lua @@ -7,259 +7,271 @@ ---@class TradeQueryRateLimiter local TradeQueryRateLimiterClass = newClass("TradeQueryRateLimiter", function(self) - -- policies_sample = { - -- -- label: policy - -- ["trade-search-request-limit"] = { - -- -- label: rule - -- ["Ip"] = { - -- ["state"] = { - -- ["60"] = {["timeout"] = 0, ["request"] = 1}, - -- ["300"] = {["timeout"] = 0, ["request"] = 1}, - -- ["10"] = {["timeout"] = 0, ["request"] = 1} - -- }, - -- ["limits"] = { - -- ["60"] = {["timeout"] = 120, ["request"] = 15}, - -- ["300"] = {["timeout"] = 1800, ["request"] = 60}, - -- ["10"] = {["timeout"] = 60, ["request"] = 8} - -- } - -- }, - -- ["Account"] = { - -- ["state"] = { - -- ["5"] = {["timeout"] = 0, ["request"] = 1} - -- }, - -- ["limits"] = { - -- ["5"] = {["timeout"] = 60, ["request"] = 3} - -- } - -- } - -- } - -- } - self.policies = {} - self.retryAfter = {} - self.lastUpdate = {} - self.requestHistory = {} - -- leave this much safety margin on limits for external use (browser, trade app) - self.limitMargin = 1 - -- convenient name lookup, can be extended - self.policyNames = { - ["search"] = "trade-search-request-limit", - ["fetch"] = "trade-fetch-request-limit" - } - self.delayCache = {} - self.requestId = 0 - -- we are tracking ongoing requests to update the rate limits state when - -- the last request is finished since this is a reliable sync point. (no pending modifications on state) - -- Otherwise we are managing our local state and updating only if the response - -- state shows more requests than expected (external requests) - self.pendingRequests = { - ["trade-search-request-limit"] = {}, - ["trade-fetch-request-limit"] = {} - } + -- policies_sample = { + -- -- label: policy + -- ["trade-search-request-limit"] = { + -- -- label: rule + -- ["Ip"] = { + -- ["state"] = { + -- ["60"] = {["timeout"] = 0, ["request"] = 1}, + -- ["300"] = {["timeout"] = 0, ["request"] = 1}, + -- ["10"] = {["timeout"] = 0, ["request"] = 1} + -- }, + -- ["limits"] = { + -- ["60"] = {["timeout"] = 120, ["request"] = 15}, + -- ["300"] = {["timeout"] = 1800, ["request"] = 60}, + -- ["10"] = {["timeout"] = 60, ["request"] = 8} + -- } + -- }, + -- ["Account"] = { + -- ["state"] = { + -- ["5"] = {["timeout"] = 0, ["request"] = 1} + -- }, + -- ["limits"] = { + -- ["5"] = {["timeout"] = 60, ["request"] = 3} + -- } + -- } + -- } + -- } + self.policies = {} + self.retryAfter = {} + self.lastUpdate = {} + self.requestHistory = {} + -- leave this much safety margin on limits for external use (browser, trade app) + self.limitMargin = 1 + -- convenient name lookup, can be extended + self.policyNames = { + ["search"] = "trade-search-request-limit", + ["fetch"] = "trade-fetch-request-limit" + } + self.delayCache = {} + self.requestId = 0 + -- we are tracking ongoing requests to update the rate limits state when + -- the last request is finished since this is a reliable sync point. (no pending modifications on state) + -- Otherwise we are managing our local state and updating only if the response + -- state shows more requests than expected (external requests) + self.pendingRequests = { + ["trade-search-request-limit"] = {}, + ["trade-fetch-request-limit"] = {} + } end) function TradeQueryRateLimiterClass:GetPolicyName(key) - return self.policyNames[key] + return self.policyNames[key] end function TradeQueryRateLimiterClass:ParseHeader(headerString) - local headers = {} - for k, v in headerString:gmatch("([%a%d%-]+): ([%g ]+)") do - if k == nil then error("Unparsable Header") end - headers[k:lower()] = v - end - return headers + local headers = {} + for k, v in headerString:gmatch("([%a%d%-]+): ([%g ]+)") do + if k == nil then error("Unparsable Header") end + headers[k:lower()] = v + end + return headers end function TradeQueryRateLimiterClass:ParsePolicy(headerString) - local policies = {} - local headers = self:ParseHeader(headerString) - local policyName = headers["x-rate-limit-policy"] - policies[policyName] = {} - local retryAfter = headers["retry-after"] - if retryAfter then - policies[policyName].retryAfter = os.time() + retryAfter - end - local ruleNames = {} - for match in headers["x-rate-limit-rules"]:gmatch("[^,]+") do - ruleNames[#ruleNames+1] = match:lower() - end - for _, ruleName in pairs(ruleNames) do - policies[policyName][ruleName] = {} - local properties = { - ["limits"] = "x-rate-limit-"..ruleName, - ["state"] = "x-rate-limit-"..ruleName.."-state", - } - for key, headerKey in pairs(properties) do - policies[policyName][ruleName][key] = {} - local headerValue = headers[headerKey] - for bucket in headerValue:gmatch("[^,]+") do -- example 8:10:60,15:60:120,60:300:1800 - local next = bucket:gmatch("[^:]+") -- example 8:10:60 - local request, window, timeout = tonumber(next()), tonumber(next()), tonumber(next()) - policies[policyName][ruleName][key][window] = { - ["request"] = request, - ["timeout"] = timeout - } - end - end - end - return policies + local policies = {} + local headers = self:ParseHeader(headerString) + local policyName = headers["x-rate-limit-policy"] + policies[policyName] = {} + local retryAfter = headers["retry-after"] + if retryAfter then + policies[policyName].retryAfter = os.time() + retryAfter + end + local ruleNames = {} + local rulesHeader = headers["x-rate-limit-rules"] + if rulesHeader and rulesHeader ~= "" then + for match in rulesHeader:gmatch("[^,]+") do + ruleNames[#ruleNames+1] = match:lower() + end + end + for _, ruleName in pairs(ruleNames) do + policies[policyName][ruleName] = {} + local properties = { + ["limits"] = "x-rate-limit-"..ruleName, + ["state"] = "x-rate-limit-"..ruleName.."-state", + } + for key, headerKey in pairs(properties) do + policies[policyName][ruleName][key] = {} + local headerValue = headers[headerKey] + for bucket in headerValue:gmatch("[^,]+") do -- example 8:10:60,15:60:120,60:300:1800 + local next = bucket:gmatch("[^:]+") -- example 8:10:60 + local request, window, timeout = tonumber(next()), tonumber(next()), tonumber(next()) + policies[policyName][ruleName][key][window] = { + ["request"] = request, + ["timeout"] = timeout + } + end + end + end + return policies end function TradeQueryRateLimiterClass:UpdateFromHeader(headerString) - local newPolicies = self:ParsePolicy(headerString) - for policyKey, policyValue in pairs(newPolicies) do - if self.requestHistory[policyKey] == nil then - self.requestHistory[policyKey] = { timestamps = {} } - end - if policyValue.retryAfter then - self.retryAfter[policyKey] = policyValue.retryAfter - policyValue.retryAfter = nil - end - if self.limitMargin > 0 then - newPolicies = self:ReduceLimits(self.limitMargin, newPolicies) - end - if self.policies[policyKey] == nil or #self.pendingRequests[policyKey] == 0 then - self.policies[policyKey] = policyValue - else - for rule, ruleValue in pairs(policyValue) do - for window, state in pairs(ruleValue.state) do - local oldState = self.policies[policyKey][rule]["state"][window] - if state.request > oldState.request then - oldState.request = state.request - end - end - end - end - self.lastUpdate[policyKey] = os.time() - -- calculate maxWindow sizes for requestHistory tables - local maxWindow = 0 - for _, rule in pairs(policyValue) do - for window, _ in pairs(rule.limits) do - maxWindow = math.max(maxWindow, window) - end - end - self.requestHistory[policyKey].maxWindow = maxWindow - end + local newPolicies = self:ParsePolicy(headerString) + for policyKey, policyValue in pairs(newPolicies) do + if self.requestHistory[policyKey] == nil then + self.requestHistory[policyKey] = { timestamps = {} } + end + if policyValue.retryAfter then + self.retryAfter[policyKey] = policyValue.retryAfter + policyValue.retryAfter = nil + end + if self.limitMargin > 0 then + newPolicies = self:ReduceLimits(self.limitMargin, newPolicies) + end + if self.policies[policyKey] == nil or #self.pendingRequests[policyKey] == 0 then + self.policies[policyKey] = policyValue + else + for rule, ruleValue in pairs(policyValue) do + for window, state in pairs(ruleValue.state) do + local oldState = self.policies[policyKey][rule]["state"][window] + if state.request > oldState.request then + oldState.request = state.request + end + end + end + end + self.lastUpdate[policyKey] = os.time() + -- calculate maxWindow sizes for requestHistory tables + local maxWindow = 0 + for _, rule in pairs(policyValue) do + for window, _ in pairs(rule.limits) do + maxWindow = math.max(maxWindow, window) + end + end + self.requestHistory[policyKey].maxWindow = maxWindow + end end function TradeQueryRateLimiterClass:NextRequestTime(policy, time) - local now = time or os.time() - local nextTime = now - if self.policies[policy] == nil then - if self.requestHistory[policy] and #self.requestHistory[policy].timestamps > 0 then - -- a request has been made and we are waiting for the response to parse limits, block requests using a long cooldown (PoE2 release date) - -- practically blocking indefinitely until rate limits are initialized - return 1956528000 - else - -- first request, don't block to acquire rate limits from first response - return now - end - end - if self.retryAfter[policy] and self.retryAfter[policy] >= now then - nextTime = math.max(nextTime, self.retryAfter[policy]) - return nextTime - end - self:AgeOutRequests(policy) - for _, rule in pairs(self.policies[policy]) do - for window, _ in pairs(rule.limits) do - if rule.state[window].timeout > 0 then - --an extra second is added to the time calculations here and below in order to avoid problems caused by the low resolution of os.time() - nextTime = math.max(nextTime, self.lastUpdate[policy] + rule.state[window].timeout + 1) - end - if rule.state[window].request >= rule.limits[window].request then - -- reached limit, calculate next request time - -- find oldest timestamp in window - local oldestRequestIdx = 0 - for _, timestamp in pairs(self.requestHistory[policy].timestamps) do - if timestamp >= now - window then - oldestRequestIdx = oldestRequestIdx + 1 - else - break - end - end - if oldestRequestIdx == 0 then - -- state reached limit but we don't have any recent timestamps (external factors) - nextTime = math.max(nextTime, self.lastUpdate[policy] + rule.limits[window].timeout + 1) - else - -- the expiration time of oldest timestamp in the window - local nextAvailableTime = self.requestHistory[policy].timestamps[oldestRequestIdx] + window + 1 - nextTime = math.max(nextTime, nextAvailableTime) - end - end - end - end - return nextTime + local now = time or os.time() + local nextTime = now + if self.policies[policy] == nil then + if self.requestHistory[policy] and #self.requestHistory[policy].timestamps > 0 then + -- a request has been made and we are waiting for the response to parse limits, block requests using a long cooldown (PoE2 release date) + -- practically blocking indefinitely until rate limits are initialized + return 1956528000 + else + -- first request, don't block to acquire rate limits from first response + return now + end + end + if self.retryAfter[policy] and self.retryAfter[policy] >= now then + nextTime = math.max(nextTime, self.retryAfter[policy]) + return nextTime + end + self:AgeOutRequests(policy) + for _, rule in pairs(self.policies[policy]) do + for window, _ in pairs(rule.limits) do + if rule.state[window].timeout > 0 then + --an extra second is added to the time calculations here and below in order to avoid problems caused by the low resolution of os.time() + nextTime = math.max(nextTime, self.lastUpdate[policy] + rule.state[window].timeout + 1) + end + if rule.state[window].request >= rule.limits[window].request then + -- reached limit, calculate next request time + -- find oldest timestamp in window + local oldestRequestIdx = 0 + for _, timestamp in pairs(self.requestHistory[policy].timestamps) do + if timestamp >= now - window then + oldestRequestIdx = oldestRequestIdx + 1 + else + break + end + end + if oldestRequestIdx == 0 then + -- state reached limit but we don't have any recent timestamps (external factors) + nextTime = math.max(nextTime, self.lastUpdate[policy] + rule.limits[window].timeout + 1) + else + -- the expiration time of oldest timestamp in the window + local nextAvailableTime = self.requestHistory[policy].timestamps[oldestRequestIdx] + window + 1 + nextTime = math.max(nextTime, nextAvailableTime) + end + end + end + end + return nextTime end function TradeQueryRateLimiterClass:InsertRequest(policy, timestamp, time) - local now = time or os.time() - timestamp = timestamp or now - if self.requestHistory[policy] == nil then - self.requestHistory[policy] = { timestamps = {} } - end - local insertIndex = 1 - for i, v in ipairs(self.requestHistory[policy].timestamps) do - if timestamp >= v then - insertIndex = i - break - end - end - table.insert(self.requestHistory[policy].timestamps, insertIndex, timestamp) - if self.policies[policy] then - for _, rule in pairs(self.policies[policy]) do - for _, window in pairs(rule.state) do - window.request = window.request + 1 - end - end - self.lastUpdate[policy] = now - end - local requestId = self.requestId - self.requestId = self.requestId + 1 - table.insert(self.pendingRequests[policy], requestId) - return requestId + local now = time or os.time() + timestamp = timestamp or now + if self.requestHistory[policy] == nil then + self.requestHistory[policy] = { timestamps = {} } + end + local insertIndex = 1 + for i, v in ipairs(self.requestHistory[policy].timestamps) do + if timestamp >= v then + insertIndex = i + break + end + end + table.insert(self.requestHistory[policy].timestamps, insertIndex, timestamp) + if self.policies[policy] then + for _, rule in pairs(self.policies[policy]) do + for _, window in pairs(rule.state) do + window.request = window.request + 1 + end + end + self.lastUpdate[policy] = now + end + local requestId = self.requestId + self.requestId = self.requestId + 1 + table.insert(self.pendingRequests[policy], requestId) + return requestId end function TradeQueryRateLimiterClass:FinishRequest(policy, requestId) - if self.pendingRequests[policy] then - for index, value in ipairs(self.pendingRequests[policy]) do - if value == requestId then - table.remove(self.pendingRequests[policy], index) - end - end - end + if self.pendingRequests[policy] then + for index, value in ipairs(self.pendingRequests[policy]) do + if value == requestId then + table.remove(self.pendingRequests[policy], index) + end + end + end end function TradeQueryRateLimiterClass:AgeOutRequests(policy, time) - local now = time or os.time() - local requestHistory = self.requestHistory[policy] - requestHistory.lastCheck = requestHistory.lastCheck or now - if (requestHistory.lastCheck == now) then - return - end - for i = #requestHistory.timestamps, 1 , -1 do - local timestamp = requestHistory.timestamps[i] - for _, rule in pairs(self.policies[policy]) do - for window, windowValue in pairs(rule.state) do - if timestamp >= (requestHistory.lastCheck - window) and timestamp < (now - window) then - -- timestamp that used to be in the window on last check - windowValue.request = math.max(windowValue.request - 1, 0) - end - end - end - if timestamp < now - requestHistory.maxWindow then - table.remove(requestHistory.timestamps, i) - end - end - requestHistory.lastCheck = now + local now = time or os.time() + local requestHistory = self.requestHistory[policy] + requestHistory.lastCheck = requestHistory.lastCheck or now + if (requestHistory.lastCheck == now) then + return + end + for i = #requestHistory.timestamps, 1 , -1 do + local timestamp = requestHistory.timestamps[i] + for _, rule in pairs(self.policies[policy]) do + for window, windowValue in pairs(rule.state) do + if timestamp >= (requestHistory.lastCheck - window) and timestamp < (now - window) then + -- timestamp that used to be in the window on last check + if not windowValue.decremented then + windowValue.request = math.max(windowValue.request - 1, 0) + windowValue.decremented = true + end + end + end + end + if timestamp < now - requestHistory.maxWindow then + table.remove(requestHistory.timestamps, i) + end + end + -- Reset flags after processing + for _, rule in pairs(self.policies[policy]) do + for window, windowValue in pairs(rule.state) do + windowValue.decremented = nil + end + end + requestHistory.lastCheck = now end -- Reduce limits visible to pob so the user can safely interact with the trade site function TradeQueryRateLimiterClass:ReduceLimits(margin, policies) - for _, policy in pairs(policies) do - for _, rule in pairs(policy) do - for _, window in pairs(rule.limits) do - window.request = math.max(window.request - margin, 1) - end - end - end - return policies + for _, policy in pairs(policies) do + for _, rule in pairs(policy) do + for _, window in pairs(rule.limits) do + window.request = math.max(window.request - margin, 1) + end + end + end + return policies end diff --git a/src/Classes/TradeQueryRequests.lua b/src/Classes/TradeQueryRequests.lua index 9787786c7c..5af0630577 100644 --- a/src/Classes/TradeQueryRequests.lua +++ b/src/Classes/TradeQueryRequests.lua @@ -25,29 +25,43 @@ function TradeQueryRequestsClass:ProcessQueue() local policy = self.rateLimiter:GetPolicyName(key) local now = os.time() local timeNext = self.rateLimiter:NextRequestTime(policy, now) - if now >= timeNext then - local request = table.remove(queue, 1) - local requestId = self.rateLimiter:InsertRequest(policy) - local onComplete = function(response, errMsg) - self.rateLimiter:FinishRequest(policy, requestId) - self.rateLimiter:UpdateFromHeader(response.header) - if response.header:match("HTTP/[%d%.]+ (%d+)") == "429" then - table.insert(queue, 1, request) - return + if not (queue[1].retryTime and now < queue[1].retryTime) then + if now >= timeNext then + local request = table.remove(queue, 1) + local requestId = self.rateLimiter:InsertRequest(policy) + local onComplete = function(response, errMsg) + self.rateLimiter:FinishRequest(policy, requestId) + self.rateLimiter:UpdateFromHeader(response.header) + if response.header:match("HTTP/[%d%.]+ (%d+)") == "429" then + request.attempts = (request.attempts or 0) + 1 + local backoff = m_min(2 ^ request.attempts, 60) + request.retryTime = os.time() + backoff + table.insert(queue, 1, request) + return + end + -- if limit rules don't return account then the POESESSID is invalid. + if response.header:match("X%-Rate%-Limit%-Rules: (.-)\n"):match("Account") == nil and main.POESESSID ~= "" then + main.POESESSID = "" + if errMsg then + errMsg = errMsg .. "\nPOESESSID is invalid. Please Re-Log and reset" + else + errMsg = "POESESSID is invalid. Please Re-Log and reset" + end + end + request.callback(response.body, errMsg, unpack(request.callbackParams or {})) end - request.callback(response.body, errMsg, unpack(request.callbackParams or {})) - end - -- self:SendRequest(request.url , onComplete, {body = request.body, poesessid = main.POESESSID}) - local header = "Content-Type: application/json" - if main.POESESSID ~= "" then - header = header .. "\nCookie: POESESSID=" .. main.POESESSID + -- self:SendRequest(request.url , onComplete, {body = request.body, poesessid = main.POESESSID}) + local header = "Content-Type: application/json" + if main.POESESSID ~= "" then + header = header .. "\nCookie: POESESSID=" .. main.POESESSID + end + launch:DownloadPage(request.url, onComplete, { + header = header, + body = request.body, + }) + else + break end - launch:DownloadPage(request.url, onComplete, { - header = header, - body = request.body, - }) - else - break end end end @@ -104,8 +118,12 @@ function TradeQueryRequestsClass:SearchWithQueryWeightAdjusted(realm, league, qu return callback(nil, errMsg) end local fetchedItemIds = {} + local idSet = {} for _, value in pairs(items) do - table.insert(fetchedItemIds, value.id) + if not idSet[value.id] then + idSet[value.id] = true + table.insert(fetchedItemIds, value.id) + end end for _, value in pairs(previousSearchItems) do if #items >= self.maxFetchPerSearch then @@ -447,7 +465,10 @@ function TradeQueryRequestsClass:buildUrl(root, realm, league, queryId) if realm and realm ~='pc' then result = result .. "/" .. realm end - result = result .. "/" .. league:gsub(" ", "+") + local encodedLeague = league:gsub("[^%w%-%.%_%~]", function(c) + return string.format("%%%02X", string.byte(c)) + end):gsub(" ", "+") + result = result .. "/" .. encodedLeague if queryId then result = result .. "/" .. queryId end