// This source code is subject to the terms of the Mozilla Public License 2.0 at https://mozilla.org/MPL/2.0/ // © Hiubris_Indicators //@version=5 strategy(title = "The Hiubris", overlay = true, default_qty_value = 100, initial_capital=100000,default_qty_type=strategy.percent_of_equity, pyramiding=0, process_orders_on_close=true, max_bars_back=3000) entry_kernelL = input.string('Nadaraya', title='Kernel Used LONGS' , options=['Nadaraya', 'GBM', 'KRVM', 'AVG', 'TikTok']) entry_kernelS = input.string('Nadaraya', title='Kernel Used SHORTS', options=['Nadaraya', 'GBM', 'KRVM', 'AVG', 'TikTok']) use1 = input(true, title="Use TP: Premium/Discount") use2 = input(false, title="Use TP: Kernel Reverse") use3 = input(true, title="Use TP: HTF Liquitidy") use4 = input(false, title="Use TP: Predictive Range R1") use5 = input(true, title="Use TP: Predictive Range R2") use6 = input(true, title="Use TP: Exit after X Candles") use7 = input(true, title="Use TP: PeakDetection") bars = input(10, title='Max bars between the 2 entry conditions') exitBars = input(15, title='Exit after X Candles') gbm_grp = 'GBM' Len2 = input(title='Length', defval=20, group=gbm_grp) K2 = input(title='Volatility Multiple', defval=2.0, group=gbm_grp) lookback = input.int(title='Lookback Window', defval=1, minval=1, group=gbm_grp) // Source selection sourceType = input.string(defval='close', title='Source', options=['open', 'high', 'low', 'close', 'hl2', 'hlc3', 'ohlc4', 'hlcc4'], group=gbm_grp) source = close source := sourceType == 'open' ? open : sourceType == 'high' ? high : sourceType == 'low' ? low : sourceType == 'hl2' ? (high + low) / 2 : sourceType == 'hlc3' ? (high + low + close) / 3 : sourceType == 'ohlc4' ? (open + high + low + close) / 4 : sourceType == 'hlcc4' ? (high + low + close + close) / 4 : source h3 = input(8.0, 'Lookback Window Kernel Regression', group=gbm_grp) r2 = input(8.0, 'Relative Weighting for Kernel Regression', group=gbm_grp) x_02 = input(25, 'Start Regression at Bar for Kernel Regression', group=gbm_grp) // Kernel Regression function from first script kernel_regression2(_src, _h) => _size = array.size(array.from(_src)) _currentWeight = 0.0 _cumulativeWeight = 0.0 for i = 0 to _size + x_02 by 1 y = _src[i] w = math.pow(1 + math.pow(i, 2) / (math.pow(_h, 2) * 2 * r2), -r2) _currentWeight += y * w _cumulativeWeight += w _cumulativeWeight _currentWeight / _cumulativeWeight // Apply kernel regression yhat12 = kernel_regression2(source, h3) xCr2 = math.log(yhat12 / yhat12[lookback]) gbm_0 = ta.sma(yhat12[lookback], Len2) * math.exp(K2 * ta.stdev(xCr2, Len2 - 1) * math.sqrt(Len2)) // Plot GBM 0 plot(gbm_0, title='GBM 0', color=color.new(#f44336, 0), linewidth=1) // Nadaraya src1 = input.source(close, 'Nadaraya Source', group='Nadaraya') h1 = input.float(50, 'Nadaraya Lookback Window', minval=3., tooltip='The number of bars used for the estimation. This is a sliding value that represents the most recent historical bars. Recommended range: 3-50', group='Nadaraya') r1 = input.float(8, 'Nadaraya Relative Weighting', step=0.25, tooltip='Relative weighting of time frames. As this value approaches zero, the longer time frames will exert more influence on the estimation. As this value approaches infinity, the behavior of the Rational Quadratic Kernel will become identical to the Gaussian kernel. Recommended range: 0.25-25', group='Nadaraya') x_0 = input.int(25, "Start Regression at Bar", tooltip='Bar index on which to start regression. The first bars of a chart are often highly volatile, and omission of these initial bars often leads to a better overall fit. Recommended range: 5-25', group='Nadaraya') smoothColors = input.bool(false, "Smooth Colors", tooltip="Uses a crossover based mechanism to determine colors. This often results in less color transitions overall.", inline='1', group='Colors') lag1 = input.int(2, "Lag", tooltip="Lag for crossover detection. Lower values result in earlier crossovers. Recommended range: 1-2", inline='1', group='Colors') size1 = array.size(array.from(src1)) // size of the data series kernel_regression(_src, _size, _h, x_0) => float _currentWeight = 0. float _cumulativeWeight = 0. for i = 0 to _size + x_0 y = _src[i] w = math.pow(1 + (math.pow(i, 2) / ((math.pow(_h, 2) * 2 * r1))), -r1) _currentWeight += y*w _cumulativeWeight += w _currentWeight / _cumulativeWeight // Estimations nad1 = kernel_regression(src1, size1, h1, x_0) nad2 = kernel_regression(src1, size1, h1-lag1, x_0) // Rates of Change bool wasBearish = nad1[2] > nad1[1] bool wasBullish = nad1[2] < nad1[1] bool isBearish = nad1[1] > nad1 bool isBullish = nad1[1] < nad1 bool isBearishChange = isBearish and wasBullish bool isBullishChange = isBullish and wasBearish // Crossovers bool isBullishCross = ta.crossover(nad2, nad1) bool isBearishCross = ta.crossunder(nad2, nad1) bool isBullishSmooth = nad2 > nad1 bool isBearishSmooth = nad2 < nad1 // Colors color c_bullish = input.color(#3AFF17, 'Bullish Color', group='Colors') color c_bearish = input.color(#FD1707, 'Bearish Color', group='Colors') color colorByCross = isBullishSmooth ? c_bullish : c_bearish color colorByRate = isBullish ? c_bullish : c_bearish color plotColor = smoothColors ? colorByCross : colorByRate // ------------------------------------------ // Kernel Regression Volatility Multipl krvm_grp = 'Kernel Regression Volatility Multiple' // Nadaraya-Watson kernel regression kernel_regression22(_src, _h, r, x_0) => _currentWeight = 0. _cumulativeWeight = 0. for i = 0 to _h + x_0 y = _src[i] w = math.pow(1 + (math.pow(i, 2) / (math.pow(_h, 2) * 2 * r)), -r) _currentWeight += y * w _cumulativeWeight += w _currentWeight / _cumulativeWeight krvm() => src = input.source(close, title='Source', group=krvm_grp) length = input.int(30, title='Window Size', group=krvm_grp) K = input.int(5, title='Clusters', minval=3, maxval=10, group=krvm_grp) maxIter = input.int(500, title='Maximum Iteration Steps', group=krvm_grp) maxData = input.int(5000, title='Historical Bars Calculation', group=krvm_grp) h = input.float(8., title='Lookback Window for Kernel Regression', minval=3., group=krvm_grp) r = input.float(8., title='Relative Weighting for Kernel Regression', step=0.25, group=krvm_grp) x_0 = input.int(25, title="Start Regression at Bar", group=krvm_grp) K_volatility = input.float(2.0, title="Volatility Multiple", group=krvm_grp) lookback = input.int(1, title="Lookback Window", minval=1, group=krvm_grp) // K-means clustering data = array.new_float(0) if (ta.barssince(true) <= maxData) for i = 0 to length-1 array.push(data, src[i]) centroids = array.new_float(0) lowest = array.min(data) highest = array.max(data) step = (highest - lowest) / (K+1) for i = 1 to K array.push(centroids, lowest + step * i) if (ta.barssince(true) <= maxData) for _ = 0 to maxIter new_centroids = array.new_float(K) for value in data dist = array.new_float(0) for centroid in centroids array.push(dist, math.abs(value - centroid)) idx = array.indexof(dist, array.min(dist)) if idx != -1 array.set(new_centroids, idx, value) change = false for i = 0 to K-1 if array.get(new_centroids, i) != array.get(centroids, i) change := true break if (not change) break centroids := new_centroids // Average of centroids var float avg = src if (not na(centroids)) avg := nz(array.get(centroids, int(math.avg(0, K-1))), avg) // Apply kernel regression on the average kernel_avg = kernel_regression22(avg, h, r, x_0) // Incorporating GBM with Volatility Multiple xCr = math.log(kernel_avg / kernel_avg[lookback]) gbm_adjusted_avg = kernel_avg * math.exp(K_volatility * ta.stdev(xCr, length - 1) * math.sqrt(length)) gbm_adjusted_avg gbm_adjusted_avg = krvm() // LORENZIAN =========================================================================================================== import jdehorty/MLExtensions/2 as ml import jdehorty/KernelFunctions/2 as kernels type Settings float source int neighborsCount int maxBarsBack int featureCount int colorCompression bool showExits bool useDynamicExits type Label int long int short int neutral type FeatureArrays array f1 array f2 array f3 array f4 array f5 type FeatureSeries float f1 float f2 float f3 float f4 float f5 type MLModel int firstBarIndex array trainingLabels int loopSize float lastDistance array distancesArray array predictionsArray int prediction type FilterSettings bool useVolatilityFilter bool useRegimeFilter bool useAdxFilter float regimeThreshold int adxThreshold type Filter bool volatility bool regime bool adx // ========================== // ==== Helper Functions ==== // ========================== series_from(feature_string, _close, _high, _low, _hlc3, f_paramA, f_paramB) => switch feature_string "RSI" => ml.n_rsi(_close, f_paramA, f_paramB) "WT" => ml.n_wt(_hlc3, f_paramA, f_paramB) "CCI" => ml.n_cci(_close, f_paramA, f_paramB) "ADX" => ml.n_adx(_high, _low, _close, f_paramA) get_lorentzian_distance(int i, int featureCount, FeatureSeries featureSeries, FeatureArrays featureArrays) => switch featureCount 5 => math.log(1+math.abs(featureSeries.f1 - array.get(featureArrays.f1, i))) + math.log(1+math.abs(featureSeries.f2 - array.get(featureArrays.f2, i))) + math.log(1+math.abs(featureSeries.f3 - array.get(featureArrays.f3, i))) + math.log(1+math.abs(featureSeries.f4 - array.get(featureArrays.f4, i))) + math.log(1+math.abs(featureSeries.f5 - array.get(featureArrays.f5, i))) 4 => math.log(1+math.abs(featureSeries.f1 - array.get(featureArrays.f1, i))) + math.log(1+math.abs(featureSeries.f2 - array.get(featureArrays.f2, i))) + math.log(1+math.abs(featureSeries.f3 - array.get(featureArrays.f3, i))) + math.log(1+math.abs(featureSeries.f4 - array.get(featureArrays.f4, i))) 3 => math.log(1+math.abs(featureSeries.f1 - array.get(featureArrays.f1, i))) + math.log(1+math.abs(featureSeries.f2 - array.get(featureArrays.f2, i))) + math.log(1+math.abs(featureSeries.f3 - array.get(featureArrays.f3, i))) 2 => math.log(1+math.abs(featureSeries.f1 - array.get(featureArrays.f1, i))) + math.log(1+math.abs(featureSeries.f2 - array.get(featureArrays.f2, i))) // ================ // ==== Inputs ==== // ================ // Settings Object: General User-Defined Inputs Settings settings = Settings.new( input.source(title='General Settings Source', defval=close, group="General Settings", tooltip="Source of the input data"), input.int(title='Neighbors Count', defval=8, group="General Settings", minval=1, maxval=100, step=1, tooltip="Number of neighbors to consider"), input.int(title="Max Bars Back", defval=2000, group="General Settings"), input.int(title="Feature Count", defval=5, group="Feature Engineering", minval=2, maxval=5, tooltip="Number of features to use for ML predictions."), input.int(title="Color Compression", defval=1, group="General Settings", minval=1, maxval=10, tooltip="Compression factor for adjusting the intensity of the color scale."), input.bool(title="Show Default Exits", defval=false, group="General Settings", tooltip="Default exits occur exactly 4 bars after an entry signal. This corresponds to the predefined length of a trade during the model's training process.", inline="exits"), input.bool(title="Use Dynamic Exits", defval=false, group="General Settings", tooltip="Dynamic exits attempt to let profits ride by dynamically adjusting the exit threshold based on kernel regression logic.", inline="exits") ) // Trade Stats Settings // Note: The trade stats section is NOT intended to be used as a replacement for proper backtesting. It is intended to be used for calibration purposes only. showTradeStats = input.bool(true, 'Show Trade Stats', tooltip='Displays the trade stats for a given configuration. Useful for optimizing the settings in the Feature Engineering section. This should NOT replace backtesting and should be used for calibration purposes only. Early Signal Flips represent instances where the model changes signals before 4 bars elapses; high values can indicate choppy (ranging) market conditions.', group="General Settings") useWorstCase = input.bool(false, "Use Worst Case Estimates", tooltip="Whether to use the worst case scenario for backtesting. This option can be useful for creating a conservative estimate that is based on close prices only, thus avoiding the effects of intrabar repainting. This option assumes that the user does not enter when the signal first appears and instead waits for the bar to close as confirmation. On larger timeframes, this can mean entering after a large move has already occurred. Leaving this option disabled is generally better for those that use this indicator as a source of confluence and prefer estimates that demonstrate discretionary mid-bar entries. Leaving this option enabled may be more consistent with traditional backtesting results.", group="General Settings") // Settings object for user-defined settings FilterSettings filterSettings = FilterSettings.new( input.bool(title="Use Volatility Filter", defval=true, tooltip="Whether to use the volatility filter.", group="Filters"), input.bool(title="Use Regime Filter", defval=true, group="Filters", inline="regime"), input.bool(title="Use ADX Filter", defval=false, group="Filters", inline="adx"), input.float(title="Threshold", defval=-0.1, minval=-10, maxval=10, step=0.1, tooltip="Whether to use the trend detection filter. Threshold for detecting Trending/Ranging markets.", group="Filters", inline="regime"), input.int(title="Threshold", defval=20, minval=0, maxval=100, step=1, tooltip="Whether to use the ADX filter. Threshold for detecting Trending/Ranging markets.", group="Filters", inline="adx") ) // Filter object for filtering the ML predictions Filter filter = Filter.new( ml.filter_volatility(1, 10, filterSettings.useVolatilityFilter), ml.regime_filter(ohlc4, filterSettings.regimeThreshold, filterSettings.useRegimeFilter), ml.filter_adx(settings.source, 14, filterSettings.adxThreshold, filterSettings.useAdxFilter) ) // Feature Variables: User-Defined Inputs for calculating Feature Series. f1_string = input.string(title="Feature 1", options=["RSI", "WT", "CCI", "ADX"], defval="RSI", inline = "01", tooltip="The first feature to use for ML predictions.", group="Feature Engineering") f1_paramA = input.int(title="Parameter A", tooltip="The primary parameter of feature 1.", defval=14, inline = "02", group="Feature Engineering") f1_paramB = input.int(title="Parameter B", tooltip="The secondary parameter of feature 2 (if applicable).", defval=1, inline = "02", group="Feature Engineering") f2_string = input.string(title="Feature 2", options=["RSI", "WT", "CCI", "ADX"], defval="WT", inline = "03", tooltip="The second feature to use for ML predictions.", group="Feature Engineering") f2_paramA = input.int(title="Parameter A", tooltip="The primary parameter of feature 2.", defval=10, inline = "04", group="Feature Engineering") f2_paramB = input.int(title="Parameter B", tooltip="The secondary parameter of feature 2 (if applicable).", defval=11, inline = "04", group="Feature Engineering") f3_string = input.string(title="Feature 3", options=["RSI", "WT", "CCI", "ADX"], defval="CCI", inline = "05", tooltip="The third feature to use for ML predictions.", group="Feature Engineering") f3_paramA = input.int(title="Parameter A", tooltip="The primary parameter of feature 3.", defval=20, inline = "06", group="Feature Engineering") f3_paramB = input.int(title="Parameter B", tooltip="The secondary parameter of feature 3 (if applicable).", defval=1, inline = "06", group="Feature Engineering") f4_string = input.string(title="Feature 4", options=["RSI", "WT", "CCI", "ADX"], defval="ADX", inline = "07", tooltip="The fourth feature to use for ML predictions.", group="Feature Engineering") f4_paramA = input.int(title="Parameter A", tooltip="The primary parameter of feature 4.", defval=20, inline = "08", group="Feature Engineering") f4_paramB = input.int(title="Parameter B", tooltip="The secondary parameter of feature 4 (if applicable).", defval=2, inline = "08", group="Feature Engineering") f5_string = input.string(title="Feature 5", options=["RSI", "WT", "CCI", "ADX"], defval="RSI", inline = "09", tooltip="The fifth feature to use for ML predictions.", group="Feature Engineering") f5_paramA = input.int(title="Parameter A", tooltip="The primary parameter of feature 5.", defval=9, inline = "10", group="Feature Engineering") f5_paramB = input.int(title="Parameter B", tooltip="The secondary parameter of feature 5 (if applicable).", defval=1, inline = "10", group="Feature Engineering") // FeatureSeries Object: Calculated Feature Series based on Feature Variables featureSeries = FeatureSeries.new( series_from(f1_string, close, high, low, hlc3, f1_paramA, f1_paramB), // f1 series_from(f2_string, close, high, low, hlc3, f2_paramA, f2_paramB), // f2 series_from(f3_string, close, high, low, hlc3, f3_paramA, f3_paramB), // f3 series_from(f4_string, close, high, low, hlc3, f4_paramA, f4_paramB), // f4 series_from(f5_string, close, high, low, hlc3, f5_paramA, f5_paramB) // f5 ) // FeatureArrays Variables: Storage of Feature Series as Feature Arrays Optimized for ML // Note: These arrays cannot be dynamically created within the FeatureArrays Object Initialization and thus must be set-up in advance. var f1Array = array.new_float() var f2Array = array.new_float() var f3Array = array.new_float() var f4Array = array.new_float() var f5Array = array.new_float() array.push(f1Array, featureSeries.f1) array.push(f2Array, featureSeries.f2) array.push(f3Array, featureSeries.f3) array.push(f4Array, featureSeries.f4) array.push(f5Array, featureSeries.f5) // FeatureArrays Object: Storage of the calculated FeatureArrays into a single object featureArrays = FeatureArrays.new( f1Array, // f1 f2Array, // f2 f3Array, // f3 f4Array, // f4 f5Array // f5 ) // Label Object: Used for classifying historical data as training data for the ML Model Label direction = Label.new( long=1, short=-1, neutral=0 ) // Derived from General Settings maxBarsBackIndex = last_bar_index >= settings.maxBarsBack ? last_bar_index - settings.maxBarsBack : 0 // EMA Settings useEmaFilter = input.bool(title="Use EMA Filter", defval=false, group="Filters", inline="ema") emaPeriod = input.int(title="Period", defval=200, minval=1, step=1, group="Filters", inline="ema", tooltip="The period of the EMA used for the EMA Filter.") isEmaUptrend = useEmaFilter ? close > ta.ema(close, emaPeriod) : true isEmaDowntrend = useEmaFilter ? close < ta.ema(close, emaPeriod) : true useSmaFilter = input.bool(title="Use SMA Filter", defval=false, group="Filters", inline="sma") smaPeriod = input.int(title="Period", defval=200, minval=1, step=1, group="Filters", inline="sma", tooltip="The period of the SMA used for the SMA Filter.") isSmaUptrend = useSmaFilter ? close > ta.sma(close, smaPeriod) : true isSmaDowntrend = useSmaFilter ? close < ta.sma(close, smaPeriod) : true // Nadaraya-Watson Kernel Regression Settings useKernelFilter = input.bool(true, "Trade with Kernel", group="Kernel Settings", inline="kernel") showKernelEstimate = input.bool(true, "Show Kernel Estimate", group="Kernel Settings", inline="kernel") useKernelSmoothing = input.bool(false, "Enhance Kernel Smoothing", tooltip="Uses a crossover based mechanism to smoothen kernel color changes. This often results in less color transitions overall and may result in more ML entry signals being generated.", inline='1', group='Kernel Settings') h = input.int(5, 'Kernel Lookback Window', minval=3, tooltip='The number of bars used for the estimation. This is a sliding value that represents the most recent historical bars. Recommended range: 3-50', group="Kernel Settings", inline="kernel") r = input.float(8., 'Kernel Relative Weighting', step=0.25, tooltip='Relative weighting of time frames. As this value approaches zero, the longer time frames will exert more influence on the estimation. As this value approaches infinity, the behavior of the Rational Quadratic Kernel will become identical to the Gaussian kernel. Recommended range: 0.25-25', group="Kernel Settings", inline="kernel") x = input.int(20, "Regression Level", tooltip='Bar index on which to start regression. Controls how tightly fit the kernel estimate is to the data. Smaller values are a tighter fit. Larger values are a looser fit. Recommended range: 2-25', group="Kernel Settings", inline="kernel") lag = input.int(2, "Lag", tooltip="Lag for crossover detection. Lower values result in earlier crossovers. Recommended range: 1-2", inline='1', group='Kernel Settings') // Display Settings showBarColors = input.bool(true, "Show Bar Colors", tooltip="Whether to show the bar colors.", group="Display Settings") showBarPredictions = input.bool(defval = true, title = "Show Bar Prediction Values", tooltip = "Will show the ML model's evaluation of each bar as an integer.", group="Display Settings") useAtrOffset = input.bool(defval = false, title = "Use ATR Offset", tooltip = "Will use the ATR offset instead of the bar prediction offset.", group="Display Settings") barPredictionsOffset = input.float(0, "Bar Prediction Offset", minval=0, tooltip="The offset of the bar predictions as a percentage from the bar high or close.", group="Display Settings") src = settings.source y_train_series = src[4] < src[0] ? direction.short : src[4] > src[0] ? direction.long : direction.neutral var y_train_array = array.new_int(0) // Variables used for ML Logic var predictions = array.new_float(0) var prediction = 0. var signal = direction.neutral var distances = array.new_float(0) array.push(y_train_array, y_train_series) lastDistance = -1.0 size = math.min(settings.maxBarsBack-1, array.size(y_train_array)-1) sizeLoop = math.min(settings.maxBarsBack-1, size) if bar_index >= maxBarsBackIndex //{ for i = 0 to sizeLoop //{ d = get_lorentzian_distance(i, settings.featureCount, featureSeries, featureArrays) if d >= lastDistance and i%4 //{ lastDistance := d array.push(distances, d) array.push(predictions, math.round(array.get(y_train_array, i))) if array.size(predictions) > settings.neighborsCount //{ lastDistance := array.get(distances, math.round(settings.neighborsCount*3/4)) array.shift(distances) array.shift(predictions) //} //} //} prediction := array.sum(predictions) //} // User Defined Filters: Used for adjusting the frequency of the ML Model's predictions filter_all = filter.volatility and filter.regime and filter.adx // Filtered Signal: The model's prediction of future price movement direction with user-defined filters applied signal := prediction > 0 and filter_all ? direction.long : prediction < 0 and filter_all ? direction.short : nz(signal[1]) // Bar-Count Filters: Represents strict filters based on a pre-defined holding period of 4 bars var int barsHeld = 0 barsHeld := ta.change(signal) ? 0 : barsHeld + 1 isHeldFourBars = barsHeld == 4 isHeldLessThanFourBars = 0 < barsHeld and barsHeld < 4 // Fractal Filters: Derived from relative appearances of signals in a given time series fractal/segment with a default length of 4 bars isDifferentSignalType = ta.change(signal) isEarlySignalFlip = ta.change(signal) and (ta.change(signal[1]) or ta.change(signal[2]) or ta.change(signal[3])) isBuySignal = signal == direction.long and isEmaUptrend and isSmaUptrend isSellSignal = signal == direction.short and isEmaDowntrend and isSmaDowntrend isLastSignalBuy = signal[4] == direction.long and isEmaUptrend[4] and isSmaUptrend[4] isLastSignalSell = signal[4] == direction.short and isEmaDowntrend[4] and isSmaDowntrend[4] isNewBuySignal = isBuySignal and isDifferentSignalType isNewSellSignal = isSellSignal and isDifferentSignalType c_green = color.new(#009988, 20) c_red = color.new(#CC3311, 20) transparent = color.new(#000000, 100) yhat1 = kernels.rationalQuadratic(settings.source, h, r, x) yhat2 = kernels.gaussian(settings.source, h-lag, x) kernelEstimate = yhat1 // Kernel Rates of Change bool wasBearishRate2 = yhat1[2] > yhat1[1] bool wasBullishRate2 = yhat1[2] < yhat1[1] bool isBearishRate2 = yhat1[1] > yhat1 bool isBullishRate2 = yhat1[1] < yhat1 isBearishChange2 = isBearishRate2 and wasBullishRate2 isBullishChange2 = isBullishRate2 and wasBearishRate2 bool isBullishSmooth2 = yhat2 >= yhat1 bool isBearishSmooth2 = yhat2 <= yhat1 // Kernel Colors color colorByCross2 = isBullishSmooth2 ? c_green : c_red color colorByRate2 = isBullishRate2 ? c_green : c_red color plotColor2 = showKernelEstimate ? (useKernelSmoothing ? colorByCross2 : colorByRate2) : transparent // PpSIgnal Random Walk lengthL = input(34, title='Longs Length' , group='PpSIgnal Random Walk ') lengthS = input(34, title='Shorts Length', group='PpSIgnal Random Walk ') useCurrentRes = input(true, title='Corrent Resolution?', group='PpSIgnal Random Walk ') resCustom = input.timeframe(title='Different Timeframe? Uncheck Box Above', defval='240', group='PpSIgnal Random Walk ') overide = input(false, title='for NOT use of the recommended time, turn on box', group='PpSIgnal Random Walk ') newresCustom1 = overide ? resCustom : timeframe.period == '1' ? '5' : timeframe.period == '5' ? '15' : timeframe.period == '15' ? '60' : timeframe.period == '30' ? '120' : timeframe.period == '60' ? '240' : timeframe.period == 'D' ? 'W' : timeframe.period == 'W' ? 'M' : '240' res = useCurrentRes ? timeframe.period : newresCustom1 //Close Close = request.security(syminfo.tickerid, res, close, lookahead=barmerge.lookahead_on) change_1 = ta.change(Close) newSession = change_1 ? 1 : 0 //Opnen Open = request.security(syminfo.tickerid, res, open, lookahead=barmerge.lookahead_on) change_2 = ta.change(Open) newSessionho = change_2 ? 1 : 0 //High High = request.security(syminfo.tickerid, res, high, lookahead=barmerge.lookahead_on) change_3 = ta.change(High) newSessionh = change_3 ? 1 : 0 //low Low = request.security(syminfo.tickerid, res, low, lookahead=barmerge.lookahead_on) change_4 = ta.change(Low) newSessionl = change_4 ? 1 : 0 barsInIntervalc() => sinceNewSession = 0 barssince_1 = ta.barssince(newSession) offset = useCurrentRes ? 0 : barssince_1 sinceNewSession := na(sinceNewSession[1]) or offset > nz(sinceNewSession[1]) ? offset : nz(sinceNewSession[1]) sinceNewSession barsInIntervalcho() => sinceNewSessionho = 0 barssince_1 = ta.barssince(newSessionho) offsetho = useCurrentRes ? 0 : barssince_1 sinceNewSessionho := na(sinceNewSessionho[1]) or offsetho > nz(sinceNewSessionho[1]) ? offsetho : nz(sinceNewSessionho[1]) sinceNewSessionho barsInIntervalch() => sinceNewSessionh = 0 barssince_1 = ta.barssince(newSessionh) offseth = useCurrentRes ? 0 : barssince_1 sinceNewSessionh := na(sinceNewSessionh[1]) or offseth > nz(sinceNewSessionh[1]) ? offseth : nz(sinceNewSessionh[1]) sinceNewSessionh barsInIntervalcl() => sinceNewSessionl = 0 barssince_1 = ta.barssince(newSession) offsetl = useCurrentRes ? 0 : barssince_1 sinceNewSessionl := na(sinceNewSessionl[1]) or offsetl > nz(sinceNewSessionl[1]) ? offsetl : nz(sinceNewSessionl[1]) sinceNewSessionl barsInIntcl = barsInIntervalcl() barsInIntch = barsInIntervalch() barsInIntcho = barsInIntervalcho() barsInIntc = barsInIntervalc() f_rwi(_length) => _range = High - Low _rwi_of_high = (High - Low[_length]) / (_range[_length] * math.sqrt(_length)) _rwi_of_low = (High[_length] - Low) / (_range[_length] * math.sqrt(_length)) [_rwi_of_high, _rwi_of_low] pp(length) => [rwi_of_high, rwi_of_low] = f_rwi(length) overlay_barcolors = true//input(true) overlay_barsignal = true//input(true) candle_signal = 0 if overlay_barsignal if rwi_of_high > rwi_of_low candle_signal := 1 candle_signal if rwi_of_high < rwi_of_low candle_signal := -1 candle_signal else if Close > Open candle_signal := 1 candle_signal if Close < Open candle_signal := -1 candle_signal bs = candle_signal == 1 and candle_signal[1] == -1 or candle_signal[1] == 0 and candle_signal[2] == -1 ? 1 : 0 ss = candle_signal == -1 and candle_signal[1] == 1 or candle_signal[1] == 0 and candle_signal[2] == 1 ? 1 : 0 [bs, ss] [bs, ss_] = pp(lengthL) [bs_, ss] = pp(lengthS) plotshape(bs, style=shape.triangleup, color=color.new(color.lime, 0), location=location.bottom) plotshape(ss, style=shape.triangledown, color=color.new(color.orange, 0), location=location.top) // Plotting plot(gbm_adjusted_avg, 'Kernel Regression Average adjusted with GBM Volatility', color=#ff5d00) plot(nad1, "Rational Quadratic Kernel Estimate", color=plotColor, linewidth=2) plot(kernelEstimate, color=plotColor2, linewidth=2, title="Kernel Regression Estimate") //-------------------------------------------------------------------- //#region Constants //-------------------------------------------------------------------- int LINE_OFFSET_START = 0 int LINE_OFFSET_END = 25 //#endregion //-------------------------------------------------------------------- //#region Inputs //-------------------------------------------------------------------- group1 = "Liquidity Levels" group2 = "Purged Levels" purgeTimeframeTooltip = "Clear all the purged levels on a new timeframe rotation." isEnabledInput1 = input (true, "", inline="Level1", group=group1) timeframeInput1 = input.timeframe ("M", "", inline="Level1", group=group1) upperColorInput1 = input (color.rgb(135, 254, 7, 90), "", inline="Level1", group=group1) lowerColorInput1 = input (color.new(color.orange, 90), "", inline="Level1", group=group1) widthInput1 = input (8, "Width", inline="Level1", group=group1, display=display.none) isEnabledInput2 = input (true, "", inline="Level2", group=group1) timeframeInput2 = input.timeframe ("W", "", inline="Level2", group=group1) upperColorInput2 = input (color.new(color.lime, 70), "", inline="Level2", group=group1) lowerColorInput2 = input (color.new(color.red, 70), "", inline="Level2", group=group1) widthInput2 = input (6, "Width", inline="Level2", group=group1, display=display.none) isEnabledInput3 = input (true, "", inline="Level3", group=group1) timeframeInput3 = input.timeframe ("D", "", inline="Level3", group=group1) upperColorInput3 = input (color.new(color.green, 70), "", inline="Level3", group=group1) lowerColorInput3 = input (color.rgb(242, 54, 69, 70), "", inline="Level3", group=group1) widthInput3 = input (4, "Width", inline="Level3", group=group1, display=display.none) isEnabledInput4 = input (true, "", inline="Level4", group=group1) timeframeInput4 = input.timeframe ("240", "", inline="Level4", group=group1) upperColorInput4 = input (color.rgb(0, 151, 167, 70), "", inline="Level4", group=group1) lowerColorInput4 = input (color.rgb(123, 31, 162, 70), "", inline="Level4", group=group1) widthInput4 = input (2, "Width", inline="Level4", group=group1, display=display.none) isEnabledInput5 = input (true, "", inline="Level5", group=group1) timeframeInput5 = input.timeframe ("60", "", inline="Level5", group=group1) upperColorInput5 = input (color.rgb(0, 96, 100, 70), "", inline="Level5", group=group1) lowerColorInput5 = input (color.rgb(74, 20, 140, 70), "", inline="Level5", group=group1) widthInput5 = input (1, "Width", inline="Level5", group=group1, display=display.none) purgedColorInput = input (color.new(color.gray, 70), "Color", group=group2) purgedStyleInput = input.string ("Dashed", "Style", ["Solid", "Dashed", "Dotted"], group=group2, display=display.none) purgeTimeframeInput = input.timeframe ("D", "Removal", tooltip=purgeTimeframeTooltip, group=group2, display=display.none) //#endregion //-------------------------------------------------------------------- //#region Types //-------------------------------------------------------------------- type Level float price line line //#endregion //-------------------------------------------------------------------- //#region Variables declarations //-------------------------------------------------------------------- var highsArray = array.new() var lowsArray = array.new() var purgedArray = array.new() [prevHigh1, prevLow1] = request.security(syminfo.tickerid, timeframeInput1, [high[1], low[1]], lookahead=barmerge.lookahead_on) [prevHigh2, prevLow2] = request.security(syminfo.tickerid, timeframeInput2, [high[1], low[1]], lookahead=barmerge.lookahead_on) [prevHigh3, prevLow3] = request.security(syminfo.tickerid, timeframeInput3, [high[1], low[1]], lookahead=barmerge.lookahead_on) [prevHigh4, prevLow4] = request.security(syminfo.tickerid, timeframeInput4, [high[1], low[1]], lookahead=barmerge.lookahead_on) [prevHigh5, prevLow5] = request.security(syminfo.tickerid, timeframeInput5, [high[1], low[1]], lookahead=barmerge.lookahead_on) //#endregion //-------------------------------------------------------------------- //#region Functions & methods //-------------------------------------------------------------------- // @function Check if a given timeframe is equal or higher than the chart's timeframe // @returns bool f_isHigherTimeframe(string timeframe) => timeframe.in_seconds(timeframe) >= timeframe.in_seconds() // @function Produce the line style argument for the `style` parameter from the input settings // @returns (const string) `line.style_*` built-in constants f_getLineStyle() => switch purgedStyleInput "Solid" => line.style_solid "Dotted" => line.style_dotted "Dashed" => line.style_dashed // @function Draw a liquidity level // @returns (line) A new `line` object f_drawLine(float y, color color, int width) => line.new(bar_index, y, bar_index, y, color=color, width=width) // @function Create and store new upper and lower liquidity levels // @returns void f_createLevels(float h, float l, color upperColor, color lowerColor, int width) => highsArray.push(Level.new(h, f_drawLine(h, upperColor, width))) lowsArray.push(Level.new(l, f_drawLine(l, lowerColor, width))) // @function Update the levels' starting and ending positions // @returns void method updatePosition(array this) => _x1 = bar_index + LINE_OFFSET_START _x2 = bar_index + LINE_OFFSET_END for _level in this _level.line.set_x1(_x1) _level.line.set_x2(_x2) // @function Transfer a level from an array to another // @returns void method transferTo(array this, array dest, int index) => dest.push(this.remove(index)) // @function Highlight a level that has its liquidity "purged" // @returns void method highlightPurgedLevel(line this) => var _style = f_getLineStyle() this.set_color(purgedColorInput) this.set_style(_style) // @function Update the levels that got their liquidity "purged" // @returns (bool) If at least one level was purged method updateLevels(array this, array purgedArray, bool isUpperLevel) => _hasPurgedSome = false _size = this.size() if _size > 0 for i = _size -1 to 0 _level = this.get(i) if isUpperLevel ? (high > _level.price) : (low < _level.price) _level.line.highlightPurgedLevel() this.transferTo(purgedArray, i) _hasPurgedSome := true _hasPurgedSome // @function Remove the levels in the array and delete their lines // @returns void method clearLevels(array this) => _size = this.size() if _size > 0 for i = _size -1 to 0 _level = this.remove(i) _level.line.delete() //#endregion //-------------------------------------------------------------------- //#region Plotting & styling //-------------------------------------------------------------------- // Create levels on historical bars if isEnabledInput5 and f_isHigherTimeframe(timeframeInput5) and timeframe.change(timeframeInput5) f_createLevels(prevHigh5, prevLow5, upperColorInput5, lowerColorInput5, widthInput5) if isEnabledInput4 and f_isHigherTimeframe(timeframeInput4) and timeframe.change(timeframeInput4) f_createLevels(prevHigh4, prevLow4, upperColorInput4, lowerColorInput4, widthInput4) if isEnabledInput3 and f_isHigherTimeframe(timeframeInput3) and timeframe.change(timeframeInput3) f_createLevels(prevHigh3, prevLow3, upperColorInput3, lowerColorInput3, widthInput3) if isEnabledInput2 and f_isHigherTimeframe(timeframeInput2) and timeframe.change(timeframeInput2) f_createLevels(prevHigh2, prevLow2, upperColorInput2, lowerColorInput2, widthInput2) if isEnabledInput1 and f_isHigherTimeframe(timeframeInput1) and timeframe.change(timeframeInput1) f_createLevels(prevHigh1, prevLow1, upperColorInput1, lowerColorInput1, widthInput1) // Update the level positions to "float" at the right of the chart's last bar if barstate.islast highsArray.updatePosition() lowsArray.updatePosition() purgedArray.updatePosition() // Update the levels that got their liquidity taken hasPurgedSomeHighs = highsArray.updateLevels(purgedArray, true) hasPurgedSomeLows = lowsArray.updateLevels(purgedArray, false) // Clean up on a new resolution, the levels that had their liquidity taken if timeframe.change(purgeTimeframeInput) purgedArray.clearLevels() alertcondition(hasPurgedSomeHighs, "Purging Up", "{{ticker}} Purging Up Liquidity") alertcondition(hasPurgedSomeLows , "Purging Down", "{{ticker}} Purging Down Liquidity") //Premium/Discount zones TRANSP_CSS = #ffffff00 show_sd = input(false, 'Premium/Discount Zones' , group = 'Premium & Discount Zones') premium_css = input.color(#f23645, 'Premium Zone' , group = 'Premium & Discount Zones') eq_css = input.color(#b2b5be, 'Equilibrium Zone' , group = 'Premium & Discount Zones') discount_css = input.color(#089981, 'Discount Zone' , group = 'Premium & Discount Zones') lengthx = input.int(50, title='', group = 'Premium & Discount Zones') n = bar_index var top_x = 0 var btm_x = 0 var trail_up = high, var trail_dn = low //Premium/Discount/Equilibrium zones var premium = box.new(na, na, na, na , bgcolor = color.new(premium_css, 80) , border_color = na) var premium_lbl = label.new(na, na , text = 'Premium' , color = TRANSP_CSS , textcolor = premium_css , style = label.style_label_down , size = size.small) var discount = box.new(na, na, na, na , bgcolor = color.new(discount_css, 80) , border_color = na) var discount_lbl = label.new(na, na , text = 'Discount' , color = TRANSP_CSS , textcolor = discount_css , style = label.style_label_up , size = size.small) //Swings detection/measurements swings(len)=> var os = 0 upper = ta.highest(len) lower = ta.lowest(len) os := high[len] > upper ? 0 : low[len] < lower ? 1 : os[1] top = os == 0 and os[1] != 0 ? high[len] : 0 btm = os == 1 and os[1] != 1 ? low[len] : 0 [top, btm] [top, btm] = swings(lengthx) //Pivot High if top top_x := n - lengthx trail_up := top trail_up := math.max(high, trail_up) //Pivot Low if btm btm_x := n-lengthx trail_dn := btm trail_dn := math.min(low, trail_dn) //Show Premium/Discount Areas box.set_lefttop(premium, math.max(top_x, btm_x), trail_up) box.set_rightbottom(premium, n, .95 * trail_up + .05 * trail_dn) label.set_xy(premium_lbl, int(math.avg(math.max(top_x, btm_x), n)), trail_up) box.set_lefttop(discount, math.max(top_x, btm_x), .95 * trail_dn + .05 * trail_up) box.set_rightbottom(discount, n, trail_dn) label.set_xy(discount_lbl, int(math.avg(math.max(top_x, btm_x), n)), trail_dn) touch_discount = low <=box.get_top (discount) touch_premium = high>=box.get_bottom(premium ) // Peak Detection N = input(5, title='Number of Neighbors', group='Peak Detection') useROC = input(true, title='Use Rate of Change', group='Peak Detection') useSavitzkyGolay = input(true, title='Use Savitzky-Golay Filter', group='Peak Detection') useCC = input(true, title="Use Cross-Correlation", group='Peak Detection') // Inputs for each method roc_length = input(14, title='ROC Length', group='Peak Detection') cc_length = input.int(30, minval=1, title="Cross-Correlation Length", group='Peak Detection') cc_shift = input.int(1, minval=1, title="Cross-Correlation Shift Amount", group='Peak Detection') // Savitzky-Golay Filter Function savgol_filter(src) => window_length = 5 poly_order = 2 sum = 0.0 if poly_order == 2 and window_length == 5 sum += src[0] * 3 + src[1] * 12 + src[2] * 17 + src[3] * 12 + src[4] * 3 sum / 47 else src // Apply the Savitzky-Golay filter close_smoothed = useSavitzkyGolay ? savgol_filter(close) : close // ROC Calculation roc = ((close_smoothed - close_smoothed[roc_length]) / close_smoothed[roc_length]) * 100 roc_smoothed = savgol_filter(roc) // Cross-Correlation Function cross_correlation(src) => lag = cc_shift sum1 = 0.0 sum2 = 0.0 for i = 0 to cc_length - 1 sum1 := sum1 + src[i] * src[i + lag] sum2 := sum2 + src[i + lag] sum1 / (cc_length * ta.stdev(src, cc_length) * ta.stdev(src[lag], cc_length)) - (ta.sma(src, cc_length) * ta.sma(src[lag], cc_length)) / (ta.stdev(src, cc_length) * ta.stdev(src[lag], cc_length)) cc = useCC ? cross_correlation(close) : na // Peak Detection Function is_peak(src, n, method) => is_high_peak = true is_low_peak = true for i = 1 to n is_high_peak := is_high_peak and src > src[i] is_low_peak := is_low_peak and src < src[i] is_high_peak ? 1 : is_low_peak ? -1 : 0 peak_ROC = useROC ? is_peak(roc_smoothed, N, "ROC") : na peak_CC = useCC ? is_peak(cc , N, "CC") : na peakUp = (peak_ROC == 1 or peak_CC == 1) peakDn = (peak_ROC == -1 or peak_CC == -1) // Predictive Ranges length3 = input.int(200, 'Predictive Range Length', minval = 2, group='Predictive Ranges') mult3 = input.float(6., 'Factor', minval = 0, step = .5, group='Predictive Ranges') tf3 = input.timeframe('', 'Timeframe', group='Predictive Ranges') src3 = input(close, 'Predictive Ranges Source', group='Predictive Ranges') pred_ranges(length, mult)=> var avg = src var hold_atr = 0. atr = nz(ta.atr(length)) * mult avg := src - avg > atr ? avg + atr : avg - src > atr ? avg - atr : avg hold_atr := avg != avg[1] ? atr / 2 : hold_atr [avg + hold_atr * 2, avg + hold_atr, avg, avg - hold_atr, avg - hold_atr * 2] [prR2, prR1, avg, prS1, prS2] = request.security(syminfo.tickerid, tf3, pred_ranges(length3, mult3)) plot_pru2 = plot(prR2, 'PR Upper 2', avg != avg[1] ? na : #f23645) plot_pru1 = plot(prR1, 'PR Upper 1', avg != avg[1] ? na : #f23645) plot_pravg = plot(avg , 'PR Average', avg != avg[1] ? na : #5b9cf6) plot_prl1 = plot(prS1, 'PR Lower 1', avg != avg[1] ? na : #089981) plot_prl2 = plot(prS2, 'PR Lower 2', avg != avg[1] ? na : #089981) fill(plot_pru2, plot_pru1, avg != avg[1] ? na : color.new(#f23645, 95)) fill(plot_prl1, plot_prl2, avg != avg[1] ? na : color.new(#089981, 95)) // Custom clamp function clamp(x, minVal, maxVal) => math.min(math.max(x, minVal), maxVal) // Kernel Regression calculation kernel_regressionxxx(_src, _h, x_0, r) => _currentWeight = 0. _cumulativeWeight = 0. for i = 0 to _h + x_0 y = _src[i] w = math.pow(1 + (math.pow(i, 2) / (math.pow(_h, 2) * 2 * r)), -r) _currentWeight += y * w _cumulativeWeight += w _currentWeight / _cumulativeWeight tktk() => // Input settings rsi_length = input.int(14, "RSI Length", group='TikTok Kernel') rsi_smooth = input.int(3, "RSI Smoothing Periods", group='TikTok Kernel') volume_length = input.int(30, "Average Volume Length", group='TikTok Kernel') // Calculate average volume avg_volume = ta.sma(volume, volume_length) highest_avg_volume = ta.highest(avg_volume, volume_length) // Calculate multiple ATR series atr_20 = ta.atr(20) atr_14 = ta.atr(14) atr_10 = ta.atr(10) // Conditionally select appropriate ATR based on average volume atr = avg_volume > highest_avg_volume * 0.8 ? atr_20 : avg_volume > highest_avg_volume * 0.5 ? atr_14 : atr_10 atr_abs_diff = atr - atr[1] atr_roc = (atr - atr[1]) / atr[1] * 100 // RSI calculation and smoothing rsi_val = ta.rsi(close, rsi_length) smoothed_rsi = ta.sma(rsi_val, rsi_smooth) // Using smoothed RSI to influence the weight rsi_factor = smoothed_rsi / 50 - 1 // Weight calculation for AMA base_weight = volume > ta.sma(volume, volume_length) ? 0.7 : 0.3 normalized_abs_diff = atr_abs_diff / atr[1] combined_metric = (normalized_abs_diff + atr_roc / 100 + rsi_factor) / 3 final_weight = clamp(base_weight + combined_metric, 0.1, 0.9) // AMA calculation var float ama = close ama := na(ama) ? close : ama * (1 - final_weight) + close * final_weight // Kernel Regression settings hXX = input.float(8, 'Lookback Window for Kernel Regression', minval=3, group='TikTok Kernel') rXX = input.float(8, 'Relative Weighting for Kernel Regression', step=0.25, group='TikTok Kernel') x_0XX = input.int(25, "Start Regression at Bar", group='TikTok Kernel') kernel_ama = kernel_regressionxxx(ama, hXX, x_0XX, rXX) length_gbm = input.int(30, title='Window Size', group='TikTok Kernel') K_volatility = input.float(2.0, title="Volatility Multiple", group='TikTok Kernel') lookback = input.int(1, title="Lookback Window", minval=1, group='TikTok Kernel') xCr = math.log(kernel_ama / kernel_ama[lookback]) gbm_adjusted_ama = kernel_ama * math.exp(K_volatility * ta.stdev(xCr, length_gbm - 1) * math.sqrt(length_gbm)) gbm_adjusted_ama gbm_adjusted_ama = tktk() avg_kern_sw1 = input.string('Nadaraya', title='Kernel Used LONGS' , options=['Nadaraya', 'GBM', 'KRVM', 'TikTok'], group='Average Kernels') avg_kern_sw2 = input.string('Nadaraya', title='Kernel Used SHORTS', options=['Nadaraya', 'GBM', 'KRVM', 'TikTok'], group='Average Kernels') // --- Kernel Regression Settings --- kernel_regression33(_src, _h) => float _currentWeight = 0. float _cumulativeWeight = 0. for i = 0 to array.size(array.from(_src)) + x_0 y = _src[i] w = math.pow(1 + (math.pow(i, 2) / ((math.pow(_h, 2) * 2 * r))), -r) _currentWeight += y*w _cumulativeWeight += w _currentWeight / _cumulativeWeight avg_k() => h = input.float(8., 'Lookback Window', minval=3., group='Average Kernels') r = input.float(8., 'Relative Weighting', step=0.25, group='Average Kernels') x_0 = input.int(25, "Start Regression at Bar", group='Average Kernels') sensitivity_length = input.int(50, "Sensitivity Between Sources", group='Average Kernels') avg_kern_src1 = avg_kern_sw1=='Nadaraya' ? nad1 : avg_kern_sw1=='GBM' ? gbm_0 : avg_kern_sw1=='KRVM' ? gbm_adjusted_avg : gbm_adjusted_ama avg_kern_src2 = avg_kern_sw2=='Nadaraya' ? nad1 : avg_kern_sw2=='GBM' ? gbm_0 : avg_kern_sw2=='KRVM' ? gbm_adjusted_avg : gbm_adjusted_ama avg_of_sources = (avg_kern_src1 + avg_kern_src2) / 2 ma_of_avg_sources = ta.sma(avg_of_sources, sensitivity_length) AVG = kernel_regression33(ma_of_avg_sources, h) AVG AVG = avg_k() plot(AVG, title="Nadaraya-Watson Kernel Regression of Moving Avg of Avg Sources", color=color.blue) nad_finalL = entry_kernelL=='Nadaraya' ? nad1 : entry_kernelL=='KRVM' ? gbm_adjusted_avg : entry_kernelL=='AVG' ? AVG : entry_kernelL=='GBM' ? gbm_0 :gbm_adjusted_ama nad_finalS = entry_kernelS=='Nadaraya' ? nad1 : entry_kernelS=='KRVM' ? gbm_adjusted_avg : entry_kernelS=='AVG' ? AVG : entry_kernelS=='GBM' ? gbm_0 :gbm_adjusted_ama cond2(x,y) => rr1 = x and ta.barssince(y)<=bars rr2 = y and ta.barssince(x)<=bars rr = rr1 or rr2 rr and not rr[1] long = cond2(bs, (ta.cross(nad_finalL, kernelEstimate) and nad_finalLkernelEstimate)) // Position Management Tools pos = 0.0 pos:= long? 1 : short? -1 : pos[1] longCond = long and (pos[1]!= 1 or na(pos[1])) shortCond = short and (pos[1]!=-1 or na(pos[1])) long_tp1 = use1 and touch_premium // Premium/Discount long_tp2 = use2 and isBearish // Kernel Reverse long_tp3 = use3 and hasPurgedSomeHighs // HTF Liquitidy long_tp4 = use4 and high>=prR1 // Predictive Range 1 long_tp5 = use5 and high>=prR2 // Predictive Range 2 long_tp6 = use6 and ta.barssince(longCond) == exitBars // Exit Bars long_tp7 = use7 and peakUp // PEAK short_tp1 = use1 and touch_discount short_tp2 = use2 and isBullish short_tp3 = use3 and hasPurgedSomeLows short_tp4 = use4 and low<= prS1 short_tp5 = use5 and low<= prS2 short_tp6 = use6 and ta.barssince(shortCond) == exitBars // Predictive Range 2 short_tp7 = use7 and peakDn // PEAK long_exit = (long_tp1 or long_tp2 or long_tp3 or long_tp4 or long_tp5 or long_tp6 or long_tp7 ) and pos[1]==1 short_exit= (short_tp1 or short_tp2 or short_tp3 or short_tp4 or short_tp5 or short_tp6 or short_tp7) and pos[1]==-1 if (long_exit and not shortCond) or (short_exit and not longCond) pos:=0 // Chart Plot & Alerts plotshape(longCond, textcolor=color.lime, color=color.lime, style=shape.triangleup , title="Buy" , text="Buy" , location=location.belowbar, offset=0, size=size.small) plotshape(shortCond, textcolor=color.red, color=color.red, style=shape.triangledown, title="Sell", text="Sell", location=location.abovebar, offset=0, size=size.small) plotshape(long_exit, textcolor=color.purple, color=color.purple, style=shape.circle, text="X" , title="Long Exit" , location=location.abovebar, offset=0, size=size.tiny) plotshape(short_exit, textcolor=color.purple, color=color.purple, style=shape.circle, text="X", title="Short Exit", location=location.belowbar, offset=0, size=size.tiny) // EXIT FUNCTIONS // i_sl = input.float(0.0, title="Stop Loss $     ", minval=0, step=0.1, inline='sl ') i_tp = input.float(0.0, title="Take Profit $   ", minval=0, step=0.1, inline='tp ') sl = i_sl >0? i_sl : 99999 tp = i_tp >0? i_tp : 99999 long_entry = ta.valuewhen(longCond , close, 0) short_entry = ta.valuewhen(shortCond, close, 0) // Simple Stop Loss and Take Profit sl_long = long_entry - sl sl_short = short_entry + sl tp_long = long_entry + tp tp_short = short_entry - tp // Position Adjustment long_sl = low sl_short[1] and pos[1]==-1 final_long_tp = high>tp_long[1] and pos[1]==1 final_short_tp = low i_startTime) and (time < i_endTime) equity = strategy.initial_capital + strategy.netprofit if equity>0 and timeCond if longCond strategy.entry("long" , strategy.long ) if shortCond strategy.entry("short", strategy.short) strategy.exit("SL/TP", from_entry = "long" , stop=sl_long , limit=tp_long , comment_profit ='TP', comment_loss='SL') strategy.exit("SL/TP", from_entry = "short", stop=sl_short, limit=tp_short, comment_profit ='TP', comment_loss='SL') if long_exit and timeCond strategy.close("long" , comment="Exit") if short_exit and timeCond strategy.close("short", comment="Exit")