Lines 130-142
typedef int l_tick_t(struct iop_stats *);
Link Here
|
130 |
|
130 |
|
131 |
/* |
131 |
/* |
132 |
* Called to see if the limiter thinks this IOP can be allowed to |
132 |
* Called to see if the limiter thinks this IOP can be allowed to |
133 |
* proceed. If so, the limiter assumes that the while IOP proceeded |
133 |
* proceed. If so, the limiter assumes that the IOP proceeded |
134 |
* and makes any accounting of it that's needed. |
134 |
* and makes any accounting of it that's needed. |
135 |
*/ |
135 |
*/ |
136 |
typedef int l_iop_t(struct iop_stats *, struct bio *); |
136 |
typedef int l_iop_t(struct iop_stats *, struct bio *); |
137 |
|
137 |
|
138 |
/* |
138 |
/* |
139 |
* Called when an I/O completes so the limiter can updates its |
139 |
* Called when an I/O completes so the limiter can update its |
140 |
* accounting. Pending I/Os may complete in any order (even when |
140 |
* accounting. Pending I/Os may complete in any order (even when |
141 |
* sent to the hardware at the same time), so the limiter may not |
141 |
* sent to the hardware at the same time), so the limiter may not |
142 |
* make any assumptions other than this I/O has completed. If it |
142 |
* make any assumptions other than this I/O has completed. If it |
Lines 493-500
cam_iosched_bw_caniop(struct iop_stats *ios, struct bio *bp)
Link Here
|
493 |
{ |
493 |
{ |
494 |
/* |
494 |
/* |
495 |
* So if we have any more bw quota left, allow it, |
495 |
* So if we have any more bw quota left, allow it, |
496 |
* otherwise wait. Not, we'll go negative and that's |
496 |
* otherwise wait. Note, we'll go negative and that's |
497 |
* OK. We'll just get a lettle less next quota. |
497 |
* OK. We'll just get a little less next quota. |
498 |
* |
498 |
* |
499 |
* Note on going negative: that allows us to process |
499 |
* Note on going negative: that allows us to process |
500 |
* requests in order better, since we won't allow |
500 |
* requests in order better, since we won't allow |
Lines 639-645
cam_iosched_cl_maybe_steer(struct control_loop *clp)
Link Here
|
639 |
* ~10. At .25 it only takes ~8. However some preliminary data |
639 |
* ~10. At .25 it only takes ~8. However some preliminary data |
640 |
* from the SSD drives suggests a reasponse time in 10's of |
640 |
* from the SSD drives suggests a reasponse time in 10's of |
641 |
* seconds before latency drops regardless of the new write |
641 |
* seconds before latency drops regardless of the new write |
642 |
* rate. Careful observation will be reqiured to tune this |
642 |
* rate. Careful observation will be required to tune this |
643 |
* effectively. |
643 |
* effectively. |
644 |
* |
644 |
* |
645 |
* Also, when there's no read traffic, we jack up the write |
645 |
* Also, when there's no read traffic, we jack up the write |
Lines 1214-1220
cam_iosched_put_back_trim(struct cam_iosched_softc *isc, struct bio *bp)
Link Here
|
1214 |
* gets the next trim from the trim queue. |
1214 |
* gets the next trim from the trim queue. |
1215 |
* |
1215 |
* |
1216 |
* Assumes we're called with the periph lock held. It removes this |
1216 |
* Assumes we're called with the periph lock held. It removes this |
1217 |
* trim from the queue and the device must explicitly reinstert it |
1217 |
* trim from the queue and the device must explicitly reinsert it |
1218 |
* should the need arise. |
1218 |
* should the need arise. |
1219 |
*/ |
1219 |
*/ |
1220 |
struct bio * |
1220 |
struct bio * |
Lines 1235-1243
cam_iosched_next_trim(struct cam_iosched_softc *isc)
Link Here
|
1235 |
} |
1235 |
} |
1236 |
|
1236 |
|
1237 |
/* |
1237 |
/* |
1238 |
* gets the an available trim from the trim queue, if there's no trim |
1238 |
* gets an available trim from the trim queue, if there's no trim |
1239 |
* already pending. It removes this trim from the queue and the device |
1239 |
* already pending. It removes this trim from the queue and the device |
1240 |
* must explicitly reinstert it should the need arise. |
1240 |
* must explicitly reinsert it should the need arise. |
1241 |
* |
1241 |
* |
1242 |
* Assumes we're called with the periph lock held. |
1242 |
* Assumes we're called with the periph lock held. |
1243 |
*/ |
1243 |
*/ |
Lines 1476-1482
cam_iosched_clr_work_flags(struct cam_iosched_softc *isc, uint32_t flags)
Link Here
|
1476 |
#ifdef CAM_IOSCHED_DYNAMIC |
1476 |
#ifdef CAM_IOSCHED_DYNAMIC |
1477 |
/* |
1477 |
/* |
1478 |
* After the method presented in Jack Crenshaw's 1998 article "Integer |
1478 |
* After the method presented in Jack Crenshaw's 1998 article "Integer |
1479 |
* Suqare Roots," reprinted at |
1479 |
* Square Roots," reprinted at |
1480 |
* http://www.embedded.com/electronics-blogs/programmer-s-toolbox/4219659/Integer-Square-Roots |
1480 |
* http://www.embedded.com/electronics-blogs/programmer-s-toolbox/4219659/Integer-Square-Roots |
1481 |
* and well worth the read. Briefly, we find the power of 4 that's the |
1481 |
* and well worth the read. Briefly, we find the power of 4 that's the |
1482 |
* largest smaller than val. We then check each smaller power of 4 to |
1482 |
* largest smaller than val. We then check each smaller power of 4 to |
Lines 1485-1491
cam_iosched_clr_work_flags(struct cam_iosched_softc *isc, uint32_t flags)
Link Here
|
1485 |
* accumulating the right answer. It could also have been accumulated |
1485 |
* accumulating the right answer. It could also have been accumulated |
1486 |
* using a separate root counter, but this code is smaller and faster |
1486 |
* using a separate root counter, but this code is smaller and faster |
1487 |
* than that method. This method is also integer size invariant. |
1487 |
* than that method. This method is also integer size invariant. |
1488 |
* It returns floor(sqrt((float)val)), or the larget integer less than |
1488 |
* It returns floor(sqrt((float)val)), or the largest integer less than |
1489 |
* or equal to the square root. |
1489 |
* or equal to the square root. |
1490 |
*/ |
1490 |
*/ |
1491 |
static uint64_t |
1491 |
static uint64_t |
Lines 1553-1559
cam_iosched_update(struct iop_stats *iop, sbintime_t sim_latency)
Link Here
|
1553 |
iop->latencies[i]++; /* Put all > 1024ms values into the last bucket. */ |
1553 |
iop->latencies[i]++; /* Put all > 1024ms values into the last bucket. */ |
1554 |
|
1554 |
|
1555 |
/* |
1555 |
/* |
1556 |
* Classic expoentially decaying average with a tiny alpha |
1556 |
* Classic exponentially decaying average with a tiny alpha |
1557 |
* (2 ^ -alpha_bits). For more info see the NIST statistical |
1557 |
* (2 ^ -alpha_bits). For more info see the NIST statistical |
1558 |
* handbook. |
1558 |
* handbook. |
1559 |
* |
1559 |
* |
1560 |
- |
|
|