A lesser-known, step-like function approximation method. Note that the default assumes an incremental (positive) approximation and in order to approx the cosine you need to set this to False.

        import numpy as np
        import matplotlib.pyplot as plt
        from matplotlib.collections import LineCollection
        %matplotlib inline
        from sklearn.linear_model import LinearRegression
        from sklearn.isotonic import IsotonicRegression
        from sklearn.utils import check_random_state

        n = 100
        x = np.arange(n)
        rs = check_random_state(0)
        # log1p = log(1 + x)
        y = rs.randint(-1, 1, size=(n,)) + 50. * np.cos(np.arange(n)/n)


        ir = IsotonicRegression(increasing = False)

        y_ = ir.fit_transform(x, y)

        lr = LinearRegression()
        # x needs to be 2d for LinearRegression, the newaxis simply increases the array dimension
        lr.fit(x[:, np.newaxis], y)  



        segments = [[[i, y[i]], [i, y_[i]]] for i in range(n)]
        lc = LineCollection(segments, zorder=0)
        lc.set_array(np.ones(len(y)))
        lc.set_linewidths(np.full(n, 0.5))

        fig = plt.figure()
        plt.plot(x, y, 'r.', markersize=12)
        plt.plot(x, y_, 'g.-', markersize=12)
        plt.plot(x, lr.predict(x[:, np.newaxis]), 'b-')
        plt.gca().add_collection(lc)
        plt.legend(('Data', 'Isotonic Fit', 'Linear Fit'), loc='lower right')
        plt.title('Isotonic regression')
        plt.show()