The DRBD driver
Philipp Reisner [Fri, 25 Sep 2009 23:07:19 +0000 (16:07 -0700)]
Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>

35 files changed:
Documentation/blockdev/drbd/DRBD-8.3-data-packets.svg [new file with mode: 0644]
Documentation/blockdev/drbd/DRBD-data-packets.svg [new file with mode: 0644]
Documentation/blockdev/drbd/README.txt [new file with mode: 0644]
Documentation/blockdev/drbd/conn-states-8.dot [new file with mode: 0644]
Documentation/blockdev/drbd/disk-states-8.dot [new file with mode: 0644]
Documentation/blockdev/drbd/drbd-connection-state-overview.dot [new file with mode: 0644]
Documentation/blockdev/drbd/node-states-8.dot [new file with mode: 0644]
MAINTAINERS
drivers/block/Kconfig
drivers/block/Makefile
drivers/block/drbd/Kconfig [new file with mode: 0644]
drivers/block/drbd/Makefile [new file with mode: 0644]
drivers/block/drbd/drbd_actlog.c [new file with mode: 0644]
drivers/block/drbd/drbd_bitmap.c [new file with mode: 0644]
drivers/block/drbd/drbd_int.h [new file with mode: 0644]
drivers/block/drbd/drbd_main.c [new file with mode: 0644]
drivers/block/drbd/drbd_nl.c [new file with mode: 0644]
drivers/block/drbd/drbd_proc.c [new file with mode: 0644]
drivers/block/drbd/drbd_receiver.c [new file with mode: 0644]
drivers/block/drbd/drbd_req.c [new file with mode: 0644]
drivers/block/drbd/drbd_req.h [new file with mode: 0644]
drivers/block/drbd/drbd_strings.c [new file with mode: 0644]
drivers/block/drbd/drbd_tracing.c [new file with mode: 0644]
drivers/block/drbd/drbd_tracing.h [new file with mode: 0644]
drivers/block/drbd/drbd_vli.h [new file with mode: 0644]
drivers/block/drbd/drbd_worker.c [new file with mode: 0644]
drivers/block/drbd/drbd_wrappers.h [new file with mode: 0644]
include/linux/drbd.h [new file with mode: 0644]
include/linux/drbd_limits.h [new file with mode: 0644]
include/linux/drbd_nl.h [new file with mode: 0644]
include/linux/drbd_tag_magic.h [new file with mode: 0644]
include/linux/lru_cache.h [new file with mode: 0644]
lib/Kconfig
lib/Makefile
lib/lru_cache.c [new file with mode: 0644]

diff --git a/Documentation/blockdev/drbd/DRBD-8.3-data-packets.svg b/Documentation/blockdev/drbd/DRBD-8.3-data-packets.svg
new file mode 100644 (file)
index 0000000..f87cfa0
--- /dev/null
@@ -0,0 +1,588 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+<svg
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   version="1.0"
+   width="210mm"
+   height="297mm"
+   viewBox="0 0 21000 29700"
+   id="svg2"
+   style="fill-rule:evenodd">
+  <defs
+     id="defs4" />
+  <g
+     id="Default"
+     style="visibility:visible">
+    <desc
+       id="desc180">Master slide</desc>
+  </g>
+  <path
+     d="M 11999,8601 L 11899,8301 L 12099,8301 L 11999,8601 z"
+     id="path193"
+     style="fill:#008000;visibility:visible" />
+  <path
+     d="M 11999,7801 L 11999,8361"
+     id="path197"
+     style="fill:none;stroke:#008000;visibility:visible" />
+  <path
+     d="M 7999,10401 L 7899,10101 L 8099,10101 L 7999,10401 z"
+     id="path209"
+     style="fill:#008000;visibility:visible" />
+  <path
+     d="M 7999,9601 L 7999,10161"
+     id="path213"
+     style="fill:none;stroke:#008000;visibility:visible" />
+  <path
+     d="M 11999,7801 L 11685,7840 L 11724,7644 L 11999,7801 z"
+     id="path225"
+     style="fill:#008000;visibility:visible" />
+  <path
+     d="M 7999,7001 L 11764,7754"
+     id="path229"
+     style="fill:none;stroke:#008000;visibility:visible" />
+  <g
+     transform="matrix(0.9895258,-0.1443562,0.1443562,0.9895258,-1244.4792,1416.5139)"
+     id="g245"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <text
+       id="text247">
+      <tspan
+         x="9139 9368 9579 9808 9986 10075 10252 10481 10659 10837 10909"
+         y="9284"
+         id="tspan249">RSDataReply</tspan>
+    </text>
+  </g>
+  <path
+     d="M 7999,9601 L 8281,9458 L 8311,9655 L 7999,9601 z"
+     id="path259"
+     style="fill:#008000;visibility:visible" />
+  <path
+     d="M 11999,9001 L 8236,9565"
+     id="path263"
+     style="fill:none;stroke:#008000;visibility:visible" />
+  <g
+     transform="matrix(0.9788674,0.2044961,-0.2044961,0.9788674,1620.9382,-1639.4947)"
+     id="g279"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <text
+       id="text281">
+      <tspan
+         x="8743 8972 9132 9310 9573 9801 10013 10242 10419 10597 10775 10953 11114"
+         y="7023"
+         id="tspan283">CsumRSRequest</tspan>
+    </text>
+  </g>
+  <text
+     id="text297"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="4034 4263 4440 4703 4881 5042 5219 5397 5503 5681 5842 6003 6180 6341 6519 6625 6803 6980 7158 7336 7497 7586 7692"
+       y="5707"
+       id="tspan299">w_make_resync_request()</tspan>
+  </text>
+  <text
+     id="text313"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="12199 12305 12483 12644 12821 12893 13054 13232 13410 13638 13816 13905 14083 14311 14489 14667 14845 15023 15184 15272 15378"
+       y="7806"
+       id="tspan315">receive_DataRequest()</tspan>
+  </text>
+  <text
+     id="text329"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="12199 12377 12483 12660 12838 13016 13194 13372 13549 13621 13799 13977 14083 14261 14438 14616 14794 14955 15133 15294 15399"
+       y="8606"
+       id="tspan331">drbd_endio_read_sec()</tspan>
+  </text>
+  <text
+     id="text345"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="12191 12420 12597 12775 12953 13131 13309 13486 13664 13825 13986 14164 14426 14604 14710 14871 15049 15154 15332 15510 15616"
+       y="9007"
+       id="tspan347">w_e_end_csum_rs_req()</tspan>
+  </text>
+  <text
+     id="text361"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="4444 4550 4728 4889 5066 5138 5299 5477 5655 5883 6095 6324 6501 6590 6768 6997 7175 7352 7424 7585 7691"
+       y="9507"
+       id="tspan363">receive_RSDataReply()</tspan>
+  </text>
+  <text
+     id="text377"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="4457 4635 4741 4918 5096 5274 5452 5630 5807 5879 6057 6235 6464 6569 6641 6730 6908 7086 7247 7425 7585 7691"
+       y="10407"
+       id="tspan379">drbd_endio_write_sec()</tspan>
+  </text>
+  <text
+     id="text393"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="4647 4825 5003 5180 5358 5536 5714 5820 5997 6158 6319 6497 6658 6836 7013 7085 7263 7424 7585 7691"
+       y="10907"
+       id="tspan395">e_end_resync_block()</tspan>
+  </text>
+  <path
+     d="M 11999,11601 L 11685,11640 L 11724,11444 L 11999,11601 z"
+     id="path405"
+     style="fill:#000080;visibility:visible" />
+  <path
+     d="M 7999,10801 L 11764,11554"
+     id="path409"
+     style="fill:none;stroke:#000080;visibility:visible" />
+  <g
+     transform="matrix(0.9788674,0.2044961,-0.2044961,0.9788674,2434.7562,-1674.649)"
+     id="g425"
+     style="font-size:318px;font-weight:400;fill:#000080;visibility:visible;font-family:Helvetica embedded">
+    <text
+       id="text427">
+      <tspan
+         x="9320 9621 9726 9798 9887 10065 10277 10438"
+         y="10943"
+         id="tspan429">WriteAck</tspan>
+    </text>
+  </g>
+  <text
+     id="text443"
+     style="font-size:318px;font-weight:400;fill:#000080;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="12199 12377 12555 12644 12821 13033 13105 13283 13444 13604 13816 13977 14138 14244"
+       y="11559"
+       id="tspan445">got_BlockAck()</tspan>
+  </text>
+  <text
+     id="text459"
+     style="font-size:423px;font-weight:400;fill:#000000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="7999 8304 8541 8778 8990 9201 9413 9650 10001 10120 10357 10594 10806 11043 11280 11398 11703 11940 12152 12364 12601 12812 12931 13049 13261 13498 13710 13947 14065 14302 14540 14658 14777 14870 15107 15225 15437 15649 15886"
+       y="4877"
+       id="tspan461">Checksum based Resync, case not in sync</tspan>
+  </text>
+  <text
+     id="text475"
+     style="font-size:423px;font-weight:400;fill:#000000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="6961 7266 7571 7854 8159 8299 8536 8654 8891 9010 9247 9484 9603 9840 9958 10077 10170 10407"
+       y="2806"
+       id="tspan477">DRBD-8.3 data flow</tspan>
+  </text>
+  <text
+     id="text491"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="5190 5419 5596 5774 5952 6113 6291 6468 6646 6824 6985 7146 7324 7586 7692"
+       y="7005"
+       id="tspan493">w_e_send_csum()</tspan>
+  </text>
+  <path
+     d="M 11999,17601 L 11899,17301 L 12099,17301 L 11999,17601 z"
+     id="path503"
+     style="fill:#008000;visibility:visible" />
+  <path
+     d="M 11999,16801 L 11999,17361"
+     id="path507"
+     style="fill:none;stroke:#008000;visibility:visible" />
+  <path
+     d="M 11999,16801 L 11685,16840 L 11724,16644 L 11999,16801 z"
+     id="path519"
+     style="fill:#008000;visibility:visible" />
+  <path
+     d="M 7999,16001 L 11764,16754"
+     id="path523"
+     style="fill:none;stroke:#008000;visibility:visible" />
+  <g
+     transform="matrix(0.9895258,-0.1443562,0.1443562,0.9895258,-2539.5806,1529.3491)"
+     id="g539"
+     style="font-size:318px;font-weight:400;fill:#000080;visibility:visible;font-family:Helvetica embedded">
+    <text
+       id="text541">
+      <tspan
+         x="9269 9498 9709 9798 9959 10048 10226 10437 10598 10776"
+         y="18265"
+         id="tspan543">RSIsInSync</tspan>
+    </text>
+  </g>
+  <path
+     d="M 7999,18601 L 8281,18458 L 8311,18655 L 7999,18601 z"
+     id="path553"
+     style="fill:#000080;visibility:visible" />
+  <path
+     d="M 11999,18001 L 8236,18565"
+     id="path557"
+     style="fill:none;stroke:#000080;visibility:visible" />
+  <g
+     transform="matrix(0.9788674,0.2044961,-0.2044961,0.9788674,3461.4027,-1449.3012)"
+     id="g573"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <text
+       id="text575">
+      <tspan
+         x="8743 8972 9132 9310 9573 9801 10013 10242 10419 10597 10775 10953 11114"
+         y="16023"
+         id="tspan577">CsumRSRequest</tspan>
+    </text>
+  </g>
+  <text
+     id="text591"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="12199 12305 12483 12644 12821 12893 13054 13232 13410 13638 13816 13905 14083 14311 14489 14667 14845 15023 15184 15272 15378"
+       y="16806"
+       id="tspan593">receive_DataRequest()</tspan>
+  </text>
+  <text
+     id="text607"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="12199 12377 12483 12660 12838 13016 13194 13372 13549 13621 13799 13977 14083 14261 14438 14616 14794 14955 15133 15294 15399"
+       y="17606"
+       id="tspan609">drbd_endio_read_sec()</tspan>
+  </text>
+  <text
+     id="text623"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="12191 12420 12597 12775 12953 13131 13309 13486 13664 13825 13986 14164 14426 14604 14710 14871 15049 15154 15332 15510 15616"
+       y="18007"
+       id="tspan625">w_e_end_csum_rs_req()</tspan>
+  </text>
+  <text
+     id="text639"
+     style="font-size:318px;font-weight:400;fill:#000080;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="5735 5913 6091 6180 6357 6446 6607 6696 6874 7085 7246 7424 7585 7691"
+       y="18507"
+       id="tspan641">got_IsInSync()</tspan>
+  </text>
+  <text
+     id="text655"
+     style="font-size:423px;font-weight:400;fill:#000000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="7999 8304 8541 8778 8990 9201 9413 9650 10001 10120 10357 10594 10806 11043 11280 11398 11703 11940 12152 12364 12601 12812 12931 13049 13261 13498 13710 13947 14065 14159 14396 14514 14726 14937 15175"
+       y="13877"
+       id="tspan657">Checksum based Resync, case in sync</tspan>
+  </text>
+  <path
+     d="M 12000,24601 L 11900,24301 L 12100,24301 L 12000,24601 z"
+     id="path667"
+     style="fill:#008000;visibility:visible" />
+  <path
+     d="M 12000,23801 L 12000,24361"
+     id="path671"
+     style="fill:none;stroke:#008000;visibility:visible" />
+  <path
+     d="M 8000,26401 L 7900,26101 L 8100,26101 L 8000,26401 z"
+     id="path683"
+     style="fill:#008000;visibility:visible" />
+  <path
+     d="M 8000,25601 L 8000,26161"
+     id="path687"
+     style="fill:none;stroke:#008000;visibility:visible" />
+  <path
+     d="M 12000,23801 L 11686,23840 L 11725,23644 L 12000,23801 z"
+     id="path699"
+     style="fill:#008000;visibility:visible" />
+  <path
+     d="M 8000,23001 L 11765,23754"
+     id="path703"
+     style="fill:none;stroke:#008000;visibility:visible" />
+  <g
+     transform="matrix(0.9895258,-0.1443562,0.1443562,0.9895258,-3543.8452,1630.5143)"
+     id="g719"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <text
+       id="text721">
+      <tspan
+         x="9464 9710 9921 10150 10328 10505 10577"
+         y="25236"
+         id="tspan723">OVReply</tspan>
+    </text>
+  </g>
+  <path
+     d="M 8000,25601 L 8282,25458 L 8312,25655 L 8000,25601 z"
+     id="path733"
+     style="fill:#008000;visibility:visible" />
+  <path
+     d="M 12000,25001 L 8237,25565"
+     id="path737"
+     style="fill:none;stroke:#008000;visibility:visible" />
+  <g
+     transform="matrix(0.9788674,0.2044961,-0.2044961,0.9788674,4918.2801,-1381.2128)"
+     id="g753"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <text
+       id="text755">
+      <tspan
+         x="9142 9388 9599 9828 10006 10183 10361 10539 10700"
+         y="23106"
+         id="tspan757">OVRequest</tspan>
+    </text>
+  </g>
+  <text
+     id="text771"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="12200 12306 12484 12645 12822 12894 13055 13233 13411 13656 13868 14097 14274 14452 14630 14808 14969 15058 15163"
+       y="23806"
+       id="tspan773">receive_OVRequest()</tspan>
+  </text>
+  <text
+     id="text787"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="12200 12378 12484 12661 12839 13017 13195 13373 13550 13622 13800 13978 14084 14262 14439 14617 14795 14956 15134 15295 15400"
+       y="24606"
+       id="tspan789">drbd_endio_read_sec()</tspan>
+  </text>
+  <text
+     id="text803"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="12192 12421 12598 12776 12954 13132 13310 13487 13665 13843 14004 14182 14288 14465 14643 14749"
+       y="25007"
+       id="tspan805">w_e_end_ov_req()</tspan>
+  </text>
+  <text
+     id="text819"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="5101 5207 5385 5546 5723 5795 5956 6134 6312 6557 6769 6998 7175 7353 7425 7586 7692"
+       y="25507"
+       id="tspan821">receive_OVReply()</tspan>
+  </text>
+  <text
+     id="text835"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="4492 4670 4776 4953 5131 5309 5487 5665 5842 5914 6092 6270 6376 6554 6731 6909 7087 7248 7426 7587 7692"
+       y="26407"
+       id="tspan837">drbd_endio_read_sec()</tspan>
+  </text>
+  <text
+     id="text851"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="4902 5131 5308 5486 5664 5842 6020 6197 6375 6553 6714 6892 6998 7175 7353 7425 7586 7692"
+       y="26907"
+       id="tspan853">w_e_end_ov_reply()</tspan>
+  </text>
+  <path
+     d="M 12000,27601 L 11686,27640 L 11725,27444 L 12000,27601 z"
+     id="path863"
+     style="fill:#000080;visibility:visible" />
+  <path
+     d="M 8000,26801 L 11765,27554"
+     id="path867"
+     style="fill:none;stroke:#000080;visibility:visible" />
+  <g
+     transform="matrix(0.9788674,0.2044961,-0.2044961,0.9788674,5704.1907,-1328.312)"
+     id="g883"
+     style="font-size:318px;font-weight:400;fill:#000080;visibility:visible;font-family:Helvetica embedded">
+    <text
+       id="text885">
+      <tspan
+         x="9279 9525 9736 9965 10143 10303 10481 10553"
+         y="26935"
+         id="tspan887">OVResult</tspan>
+    </text>
+  </g>
+  <text
+     id="text901"
+     style="font-size:318px;font-weight:400;fill:#000080;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="12200 12378 12556 12645 12822 13068 13280 13508 13686 13847 14025 14097 14185 14291"
+       y="27559"
+       id="tspan903">got_OVResult()</tspan>
+  </text>
+  <text
+     id="text917"
+     style="font-size:423px;font-weight:400;fill:#000000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="8000 8330 8567 8660 8754 8991 9228 9346 9558 9795 9935 10028 10146"
+       y="21877"
+       id="tspan919">Online verify</tspan>
+  </text>
+  <text
+     id="text933"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="4641 4870 5047 5310 5488 5649 5826 6004 6182 6343 6521 6626 6804 6982 7160 7338 7499 7587 7693"
+       y="23005"
+       id="tspan935">w_make_ov_request()</tspan>
+  </text>
+  <path
+     d="M 8000,6500 L 7900,6200 L 8100,6200 L 8000,6500 z"
+     id="path945"
+     style="fill:#008000;visibility:visible" />
+  <path
+     d="M 8000,5700 L 8000,6260"
+     id="path949"
+     style="fill:none;stroke:#008000;visibility:visible" />
+  <path
+     d="M 3900,5500 L 3700,5500 L 3700,11000 L 3900,11000"
+     id="path961"
+     style="fill:none;stroke:#000000;visibility:visible" />
+  <path
+     d="M 3900,14500 L 3700,14500 L 3700,18600 L 3900,18600"
+     id="path973"
+     style="fill:none;stroke:#000000;visibility:visible" />
+  <path
+     d="M 3900,22800 L 3700,22800 L 3700,26900 L 3900,26900"
+     id="path985"
+     style="fill:none;stroke:#000000;visibility:visible" />
+  <text
+     id="text1001"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="4492 4670 4776 4953 5131 5309 5487 5665 5842 5914 6092 6270 6376 6554 6731 6909 7087 7248 7426 7587 7692"
+       y="6506"
+       id="tspan1003">drbd_endio_read_sec()</tspan>
+  </text>
+  <text
+     id="text1017"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="4034 4263 4440 4703 4881 5042 5219 5397 5503 5681 5842 6003 6180 6341 6519 6625 6803 6980 7158 7336 7497 7586 7692"
+       y="14708"
+       id="tspan1019">w_make_resync_request()</tspan>
+  </text>
+  <text
+     id="text1033"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="5190 5419 5596 5774 5952 6113 6291 6468 6646 6824 6985 7146 7324 7586 7692"
+       y="16006"
+       id="tspan1035">w_e_send_csum()</tspan>
+  </text>
+  <path
+     d="M 8000,15501 L 7900,15201 L 8100,15201 L 8000,15501 z"
+     id="path1045"
+     style="fill:#008000;visibility:visible" />
+  <path
+     d="M 8000,14701 L 8000,15261"
+     id="path1049"
+     style="fill:none;stroke:#008000;visibility:visible" />
+  <text
+     id="text1065"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="4492 4670 4776 4953 5131 5309 5487 5665 5842 5914 6092 6270 6376 6554 6731 6909 7087 7248 7426 7587 7692"
+       y="15507"
+       id="tspan1067">drbd_endio_read_sec()</tspan>
+  </text>
+  <path
+     d="M 16100,9000 L 16300,9000 L 16300,7500 L 16100,7500"
+     id="path1077"
+     style="fill:none;stroke:#000000;visibility:visible" />
+  <path
+     d="M 16100,18000 L 16300,18000 L 16300,16500 L 16100,16500"
+     id="path1089"
+     style="fill:none;stroke:#000000;visibility:visible" />
+  <path
+     d="M 16100,25000 L 16300,25000 L 16300,23500 L 16100,23500"
+     id="path1101"
+     style="fill:none;stroke:#000000;visibility:visible" />
+  <text
+     id="text1117"
+     style="font-size:318px;font-weight:400;fill:#000000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="2026 2132 2293 2471 2648 2826 3004 3076 3254 3431 3503 3681 3787"
+       y="5402"
+       id="tspan1119">rs_begin_io()</tspan>
+  </text>
+  <text
+     id="text1133"
+     style="font-size:318px;font-weight:400;fill:#000000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="2027 2133 2294 2472 2649 2827 3005 3077 3255 3432 3504 3682 3788"
+       y="14402"
+       id="tspan1135">rs_begin_io()</tspan>
+  </text>
+  <text
+     id="text1149"
+     style="font-size:318px;font-weight:400;fill:#000000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="2026 2132 2293 2471 2648 2826 3004 3076 3254 3431 3503 3681 3787"
+       y="22602"
+       id="tspan1151">rs_begin_io()</tspan>
+  </text>
+  <text
+     id="text1165"
+     style="font-size:318px;font-weight:400;fill:#000000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="1426 1532 1693 1871 2031 2209 2472 2649 2721 2899 2988 3166 3344 3416 3593 3699"
+       y="11302"
+       id="tspan1167">rs_complete_io()</tspan>
+  </text>
+  <text
+     id="text1181"
+     style="font-size:318px;font-weight:400;fill:#000000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="1526 1632 1793 1971 2131 2309 2572 2749 2821 2999 3088 3266 3444 3516 3693 3799"
+       y="18931"
+       id="tspan1183">rs_complete_io()</tspan>
+  </text>
+  <text
+     id="text1197"
+     style="font-size:318px;font-weight:400;fill:#000000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="1526 1632 1793 1971 2131 2309 2572 2749 2821 2999 3088 3266 3444 3516 3693 3799"
+       y="27231"
+       id="tspan1199">rs_complete_io()</tspan>
+  </text>
+  <text
+     id="text1213"
+     style="font-size:318px;font-weight:400;fill:#000000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="16126 16232 16393 16571 16748 16926 17104 17176 17354 17531 17603 17781 17887"
+       y="7402"
+       id="tspan1215">rs_begin_io()</tspan>
+  </text>
+  <text
+     id="text1229"
+     style="font-size:318px;font-weight:400;fill:#000000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="16127 16233 16394 16572 16749 16927 17105 17177 17355 17532 17604 17782 17888"
+       y="16331"
+       id="tspan1231">rs_begin_io()</tspan>
+  </text>
+  <text
+     id="text1245"
+     style="font-size:318px;font-weight:400;fill:#000000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="16127 16233 16394 16572 16749 16927 17105 17177 17355 17532 17604 17782 17888"
+       y="23302"
+       id="tspan1247">rs_begin_io()</tspan>
+  </text>
+  <text
+     id="text1261"
+     style="font-size:318px;font-weight:400;fill:#000000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="16115 16221 16382 16560 16720 16898 17161 17338 17410 17588 17677 17855 18033 18105 18282 18388"
+       y="9302"
+       id="tspan1263">rs_complete_io()</tspan>
+  </text>
+  <text
+     id="text1277"
+     style="font-size:318px;font-weight:400;fill:#000000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="16115 16221 16382 16560 16720 16898 17161 17338 17410 17588 17677 17855 18033 18105 18282 18388"
+       y="18331"
+       id="tspan1279">rs_complete_io()</tspan>
+  </text>
+  <text
+     id="text1293"
+     style="font-size:318px;font-weight:400;fill:#000000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="16126 16232 16393 16571 16731 16909 17172 17349 17421 17599 17688 17866 18044 18116 18293 18399"
+       y="25302"
+       id="tspan1295">rs_complete_io()</tspan>
+  </text>
+</svg>
diff --git a/Documentation/blockdev/drbd/DRBD-data-packets.svg b/Documentation/blockdev/drbd/DRBD-data-packets.svg
new file mode 100644 (file)
index 0000000..48a1e21
--- /dev/null
@@ -0,0 +1,459 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+<svg
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   version="1.0"
+   width="210mm"
+   height="297mm"
+   viewBox="0 0 21000 29700"
+   id="svg2"
+   style="fill-rule:evenodd">
+  <defs
+     id="defs4" />
+  <g
+     id="Default"
+     style="visibility:visible">
+    <desc
+       id="desc176">Master slide</desc>
+  </g>
+  <path
+     d="M 11999,19601 L 11899,19301 L 12099,19301 L 11999,19601 z"
+     id="path189"
+     style="fill:#008000;visibility:visible" />
+  <path
+     d="M 11999,18801 L 11999,19361"
+     id="path193"
+     style="fill:none;stroke:#008000;visibility:visible" />
+  <path
+     d="M 7999,21401 L 7899,21101 L 8099,21101 L 7999,21401 z"
+     id="path205"
+     style="fill:#008000;visibility:visible" />
+  <path
+     d="M 7999,20601 L 7999,21161"
+     id="path209"
+     style="fill:none;stroke:#008000;visibility:visible" />
+  <path
+     d="M 11999,18801 L 11685,18840 L 11724,18644 L 11999,18801 z"
+     id="path221"
+     style="fill:#008000;visibility:visible" />
+  <path
+     d="M 7999,18001 L 11764,18754"
+     id="path225"
+     style="fill:none;stroke:#008000;visibility:visible" />
+  <text
+     x="-3023.845"
+     y="1106.8124"
+     transform="matrix(0.9895258,-0.1443562,0.1443562,0.9895258,0,0)"
+     id="text243"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="6115.1553 6344.1553 6555.1553 6784.1553 6962.1553 7051.1553 7228.1553 7457.1553 7635.1553 7813.1553 7885.1553"
+       y="21390.812"
+       id="tspan245">RSDataReply</tspan>
+  </text>
+  <path
+     d="M 7999,20601 L 8281,20458 L 8311,20655 L 7999,20601 z"
+     id="path255"
+     style="fill:#008000;visibility:visible" />
+  <path
+     d="M 11999,20001 L 8236,20565"
+     id="path259"
+     style="fill:none;stroke:#008000;visibility:visible" />
+  <text
+     x="3502.5356"
+     y="-2184.6621"
+     transform="matrix(0.9788674,0.2044961,-0.2044961,0.9788674,0,0)"
+     id="text277"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="12321.536 12550.536 12761.536 12990.536 13168.536 13257.536 13434.536 13663.536 13841.536 14019.536 14196.536 14374.536 14535.536"
+       y="15854.338"
+       id="tspan279">RSDataRequest</tspan>
+  </text>
+  <text
+     id="text293"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="4034 4263 4440 4703 4881 5042 5219 5397 5503 5681 5842 6003 6180 6341 6519 6625 6803 6980 7158 7336 7497 7586 7692"
+       y="17807"
+       id="tspan295">w_make_resync_request()</tspan>
+  </text>
+  <text
+     id="text309"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="12199 12305 12483 12644 12821 12893 13054 13232 13410 13638 13816 13905 14083 14311 14489 14667 14845 15023 15184 15272 15378"
+       y="18806"
+       id="tspan311">receive_DataRequest()</tspan>
+  </text>
+  <text
+     id="text325"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="12199 12377 12483 12660 12838 13016 13194 13372 13549 13621 13799 13977 14083 14261 14438 14616 14794 14955 15133 15294 15399"
+       y="19606"
+       id="tspan327">drbd_endio_read_sec()</tspan>
+  </text>
+  <text
+     id="text341"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="12191 12420 12597 12775 12953 13131 13309 13486 13664 13770 13931 14109 14287 14375 14553 14731 14837 15015 15192 15298"
+       y="20007"
+       id="tspan343">w_e_end_rsdata_req()</tspan>
+  </text>
+  <text
+     id="text357"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="4444 4550 4728 4889 5066 5138 5299 5477 5655 5883 6095 6324 6501 6590 6768 6997 7175 7352 7424 7585 7691"
+       y="20507"
+       id="tspan359">receive_RSDataReply()</tspan>
+  </text>
+  <text
+     id="text373"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="4457 4635 4741 4918 5096 5274 5452 5630 5807 5879 6057 6235 6464 6569 6641 6730 6908 7086 7247 7425 7585 7691"
+       y="21407"
+       id="tspan375">drbd_endio_write_sec()</tspan>
+  </text>
+  <text
+     id="text389"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="4647 4825 5003 5180 5358 5536 5714 5820 5997 6158 6319 6497 6658 6836 7013 7085 7263 7424 7585 7691"
+       y="21907"
+       id="tspan391">e_end_resync_block()</tspan>
+  </text>
+  <path
+     d="M 11999,22601 L 11685,22640 L 11724,22444 L 11999,22601 z"
+     id="path401"
+     style="fill:#000080;visibility:visible" />
+  <path
+     d="M 7999,21801 L 11764,22554"
+     id="path405"
+     style="fill:none;stroke:#000080;visibility:visible" />
+  <text
+     x="4290.3008"
+     y="-2369.6162"
+     transform="matrix(0.9788674,0.2044961,-0.2044961,0.9788674,0,0)"
+     id="text423"
+     style="font-size:318px;font-weight:400;fill:#000080;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="13610.301 13911.301 14016.301 14088.301 14177.301 14355.301 14567.301 14728.301"
+       y="19573.385"
+       id="tspan425">WriteAck</tspan>
+  </text>
+  <text
+     id="text439"
+     style="font-size:318px;font-weight:400;fill:#000080;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="12199 12377 12555 12644 12821 13033 13105 13283 13444 13604 13816 13977 14138 14244"
+       y="22559"
+       id="tspan441">got_BlockAck()</tspan>
+  </text>
+  <text
+     id="text455"
+     style="font-size:423px;font-weight:400;fill:#000000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="7999 8304 8541 8753 8964 9201 9413 9531 9769 9862 10099 10310 10522 10734 10852 10971 11208 11348 11585 11822"
+       y="16877"
+       id="tspan457">Resync blocks, 4-32K</tspan>
+  </text>
+  <path
+     d="M 12000,7601 L 11900,7301 L 12100,7301 L 12000,7601 z"
+     id="path467"
+     style="fill:#008000;visibility:visible" />
+  <path
+     d="M 12000,6801 L 12000,7361"
+     id="path471"
+     style="fill:none;stroke:#008000;visibility:visible" />
+  <path
+     d="M 12000,6801 L 11686,6840 L 11725,6644 L 12000,6801 z"
+     id="path483"
+     style="fill:#008000;visibility:visible" />
+  <path
+     d="M 8000,6001 L 11765,6754"
+     id="path487"
+     style="fill:none;stroke:#008000;visibility:visible" />
+  <text
+     x="-1288.1796"
+     y="1279.7666"
+     transform="matrix(0.9895258,-0.1443562,0.1443562,0.9895258,0,0)"
+     id="text505"
+     style="font-size:318px;font-weight:400;fill:#000080;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="8174.8208 8475.8203 8580.8203 8652.8203 8741.8203 8919.8203 9131.8203 9292.8203"
+       y="9516.7666"
+       id="tspan507">WriteAck</tspan>
+  </text>
+  <path
+     d="M 8000,8601 L 8282,8458 L 8312,8655 L 8000,8601 z"
+     id="path517"
+     style="fill:#000080;visibility:visible" />
+  <path
+     d="M 12000,8001 L 8237,8565"
+     id="path521"
+     style="fill:none;stroke:#000080;visibility:visible" />
+  <text
+     x="1065.6655"
+     y="-2097.7664"
+     transform="matrix(0.9788674,0.2044961,-0.2044961,0.9788674,0,0)"
+     id="text539"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="10682.666 10911.666 11088.666 11177.666"
+       y="4107.2339"
+       id="tspan541">Data</tspan>
+  </text>
+  <text
+     id="text555"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="4746 4924 5030 5207 5385 5563 5826 6003 6164 6342 6520 6626 6803 6981 7159 7337 7498 7587 7692"
+       y="5505"
+       id="tspan557">drbd_make_request()</tspan>
+  </text>
+  <text
+     id="text571"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="12200 12306 12484 12645 12822 12894 13055 13233 13411 13639 13817 13906 14084 14190"
+       y="6806"
+       id="tspan573">receive_Data()</tspan>
+  </text>
+  <text
+     id="text587"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="12200 12378 12484 12661 12839 13017 13195 13373 13550 13622 13800 13978 14207 14312 14384 14473 14651 14829 14990 15168 15328 15434"
+       y="7606"
+       id="tspan589">drbd_endio_write_sec()</tspan>
+  </text>
+  <text
+     id="text603"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="12192 12370 12548 12725 12903 13081 13259 13437 13509 13686 13847 14008 14114"
+       y="8007"
+       id="tspan605">e_end_block()</tspan>
+  </text>
+  <text
+     id="text619"
+     style="font-size:318px;font-weight:400;fill:#000080;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="5647 5825 6003 6092 6269 6481 6553 6731 6892 7052 7264 7425 7586 7692"
+       y="8606"
+       id="tspan621">got_BlockAck()</tspan>
+  </text>
+  <text
+     id="text635"
+     style="font-size:423px;font-weight:400;fill:#000000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="8000 8305 8542 8779 9016 9109 9346 9486 9604 9956 10049 10189 10328 10565 10705 10942 11179 11298 11603 11742 11835 11954 12191 12310 12428 12665 12902 13139 13279 13516 13753"
+       y="4877"
+       id="tspan637">Regular mirrored write, 512-32K</tspan>
+  </text>
+  <text
+     id="text651"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="5381 5610 5787 5948 6126 6304 6482 6659 6837 7015 7087 7265 7426 7587 7692"
+       y="6003"
+       id="tspan653">w_send_dblock()</tspan>
+  </text>
+  <path
+     d="M 8000,6800 L 7900,6500 L 8100,6500 L 8000,6800 z"
+     id="path663"
+     style="fill:#008000;visibility:visible" />
+  <path
+     d="M 8000,6000 L 8000,6560"
+     id="path667"
+     style="fill:none;stroke:#008000;visibility:visible" />
+  <text
+     id="text683"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="4602 4780 4886 5063 5241 5419 5597 5775 5952 6024 6202 6380 6609 6714 6786 6875 7053 7231 7409 7515 7587 7692"
+       y="6905"
+       id="tspan685">drbd_endio_write_pri()</tspan>
+  </text>
+  <path
+     d="M 12000,13602 L 11900,13302 L 12100,13302 L 12000,13602 z"
+     id="path695"
+     style="fill:#008000;visibility:visible" />
+  <path
+     d="M 12000,12802 L 12000,13362"
+     id="path699"
+     style="fill:none;stroke:#008000;visibility:visible" />
+  <path
+     d="M 12000,12802 L 11686,12841 L 11725,12645 L 12000,12802 z"
+     id="path711"
+     style="fill:#008000;visibility:visible" />
+  <path
+     d="M 8000,12002 L 11765,12755"
+     id="path715"
+     style="fill:none;stroke:#008000;visibility:visible" />
+  <text
+     x="-2155.5266"
+     y="1201.5964"
+     transform="matrix(0.9895258,-0.1443562,0.1443562,0.9895258,0,0)"
+     id="text733"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="7202.4736 7431.4736 7608.4736 7697.4736 7875.4736 8104.4736 8282.4736 8459.4736 8531.4736"
+       y="15454.597"
+       id="tspan735">DataReply</tspan>
+  </text>
+  <path
+     d="M 8000,14602 L 8282,14459 L 8312,14656 L 8000,14602 z"
+     id="path745"
+     style="fill:#008000;visibility:visible" />
+  <path
+     d="M 12000,14002 L 8237,14566"
+     id="path749"
+     style="fill:none;stroke:#008000;visibility:visible" />
+  <text
+     x="2280.3804"
+     y="-2103.2141"
+     transform="matrix(0.9788674,0.2044961,-0.2044961,0.9788674,0,0)"
+     id="text767"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="11316.381 11545.381 11722.381 11811.381 11989.381 12218.381 12396.381 12573.381 12751.381 12929.381 13090.381"
+       y="9981.7861"
+       id="tspan769">DataRequest</tspan>
+  </text>
+  <text
+     id="text783"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="4746 4924 5030 5207 5385 5563 5826 6003 6164 6342 6520 6626 6803 6981 7159 7337 7498 7587 7692"
+       y="11506"
+       id="tspan785">drbd_make_request()</tspan>
+  </text>
+  <text
+     id="text799"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="12200 12306 12484 12645 12822 12894 13055 13233 13411 13639 13817 13906 14084 14312 14490 14668 14846 15024 15185 15273 15379"
+       y="12807"
+       id="tspan801">receive_DataRequest()</tspan>
+  </text>
+  <text
+     id="text815"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="12200 12378 12484 12661 12839 13017 13195 13373 13550 13622 13800 13978 14084 14262 14439 14617 14795 14956 15134 15295 15400"
+       y="13607"
+       id="tspan817">drbd_endio_read_sec()</tspan>
+  </text>
+  <text
+     id="text831"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="12192 12421 12598 12776 12954 13132 13310 13487 13665 13843 14021 14110 14288 14465 14571 14749 14927 15033"
+       y="14008"
+       id="tspan833">w_e_end_data_req()</tspan>
+  </text>
+  <g
+     id="g835"
+     style="visibility:visible">
+    <desc
+       id="desc837">Drawing</desc>
+    <text
+       id="text847"
+       style="font-size:318px;font-weight:400;fill:#008000;font-family:Helvetica embedded">
+      <tspan
+         x="4885 4991 5169 5330 5507 5579 5740 5918 6096 6324 6502 6591 6769 6997 7175 7353 7425 7586 7692"
+         y="14607"
+         id="tspan849">receive_DataReply()</tspan>
+    </text>
+  </g>
+  <text
+     id="text863"
+     style="font-size:423px;font-weight:400;fill:#000000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="8000 8305 8398 8610 8821 8914 9151 9363 9575 9693 9833 10070 10307 10544 10663 10781 11018 11255 11493 11632 11869 12106"
+       y="10878"
+       id="tspan865">Diskless read, 512-32K</tspan>
+  </text>
+  <text
+     id="text879"
+     style="font-size:318px;font-weight:400;fill:#008000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="5029 5258 5435 5596 5774 5952 6130 6307 6413 6591 6769 6947 7125 7230 7408 7586 7692"
+       y="12004"
+       id="tspan881">w_send_read_req()</tspan>
+  </text>
+  <text
+     id="text895"
+     style="font-size:423px;font-weight:400;fill:#000000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="6961 7266 7571 7854 8159 8278 8515 8633 8870 9107 9226 9463 9581 9700 9793 10030"
+       y="2806"
+       id="tspan897">DRBD 8 data flow</tspan>
+  </text>
+  <path
+     d="M 3900,5300 L 3700,5300 L 3700,7000 L 3900,7000"
+     id="path907"
+     style="fill:none;stroke:#000000;visibility:visible" />
+  <path
+     d="M 3900,17600 L 3700,17600 L 3700,22000 L 3900,22000"
+     id="path919"
+     style="fill:none;stroke:#000000;visibility:visible" />
+  <path
+     d="M 16100,20000 L 16300,20000 L 16300,18500 L 16100,18500"
+     id="path931"
+     style="fill:none;stroke:#000000;visibility:visible" />
+  <text
+     id="text947"
+     style="font-size:318px;font-weight:400;fill:#000000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="2126 2304 2376 2554 2731 2909 3087 3159 3337 3515 3587 3764 3870"
+       y="5202"
+       id="tspan949">al_begin_io()</tspan>
+  </text>
+  <text
+     id="text963"
+     style="font-size:318px;font-weight:400;fill:#000000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="1632 1810 1882 2060 2220 2398 2661 2839 2910 3088 3177 3355 3533 3605 3783 3888"
+       y="7331"
+       id="tspan965">al_complete_io()</tspan>
+  </text>
+  <text
+     id="text979"
+     style="font-size:318px;font-weight:400;fill:#000000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="2126 2232 2393 2571 2748 2926 3104 3176 3354 3531 3603 3781 3887"
+       y="17431"
+       id="tspan981">rs_begin_io()</tspan>
+  </text>
+  <text
+     id="text995"
+     style="font-size:318px;font-weight:400;fill:#000000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="1626 1732 1893 2071 2231 2409 2672 2849 2921 3099 3188 3366 3544 3616 3793 3899"
+       y="22331"
+       id="tspan997">rs_complete_io()</tspan>
+  </text>
+  <text
+     id="text1011"
+     style="font-size:318px;font-weight:400;fill:#000000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="16027 16133 16294 16472 16649 16827 17005 17077 17255 17432 17504 17682 17788"
+       y="18402"
+       id="tspan1013">rs_begin_io()</tspan>
+  </text>
+  <text
+     id="text1027"
+     style="font-size:318px;font-weight:400;fill:#000000;visibility:visible;font-family:Helvetica embedded">
+    <tspan
+       x="16115 16221 16382 16560 16720 16898 17161 17338 17410 17588 17677 17855 18033 18105 18282 18388"
+       y="20331"
+       id="tspan1029">rs_complete_io()</tspan>
+  </text>
+</svg>
diff --git a/Documentation/blockdev/drbd/README.txt b/Documentation/blockdev/drbd/README.txt
new file mode 100644 (file)
index 0000000..627b0a1
--- /dev/null
@@ -0,0 +1,16 @@
+Description
+
+  DRBD is a shared-nothing, synchronously replicated block device. It
+  is designed to serve as a building block for high availability
+  clusters and in this context, is a "drop-in" replacement for shared
+  storage. Simplistically, you could see it as a network RAID 1.
+
+  Please visit http://www.drbd.org to find out more.
+
+The here included files are intended to help understand the implementation
+
+DRBD-8.3-data-packets.svg, DRBD-data-packets.svg  
+  relates some functions, and write packets.
+
+conn-states-8.dot, disk-states-8.dot, node-states-8.dot
+  The sub graphs of DRBD's state transitions
diff --git a/Documentation/blockdev/drbd/conn-states-8.dot b/Documentation/blockdev/drbd/conn-states-8.dot
new file mode 100644 (file)
index 0000000..025e8cf
--- /dev/null
@@ -0,0 +1,18 @@
+digraph conn_states {
+       StandAllone  -> WFConnection   [ label = "ioctl_set_net()" ]
+       WFConnection -> Unconnected    [ label = "unable to bind()" ]
+       WFConnection -> WFReportParams [ label = "in connect() after accept" ]
+       WFReportParams -> StandAllone  [ label = "checks in receive_param()" ]
+       WFReportParams -> Connected    [ label = "in receive_param()" ]
+       WFReportParams -> WFBitMapS    [ label = "sync_handshake()" ]
+       WFReportParams -> WFBitMapT    [ label = "sync_handshake()" ]
+       WFBitMapS -> SyncSource        [ label = "receive_bitmap()" ]
+       WFBitMapT -> SyncTarget        [ label = "receive_bitmap()" ]
+       SyncSource -> Connected
+       SyncTarget -> Connected
+       SyncSource -> PausedSyncS
+       SyncTarget -> PausedSyncT
+       PausedSyncS -> SyncSource
+       PausedSyncT -> SyncTarget
+       Connected   -> WFConnection    [ label = "* on network error" ]
+}
diff --git a/Documentation/blockdev/drbd/disk-states-8.dot b/Documentation/blockdev/drbd/disk-states-8.dot
new file mode 100644 (file)
index 0000000..d06cfb4
--- /dev/null
@@ -0,0 +1,16 @@
+digraph disk_states {
+       Diskless -> Inconsistent       [ label = "ioctl_set_disk()" ]
+       Diskless -> Consistent         [ label = "ioctl_set_disk()" ]
+       Diskless -> Outdated           [ label = "ioctl_set_disk()" ]
+       Consistent -> Outdated         [ label = "receive_param()" ]
+       Consistent -> UpToDate         [ label = "receive_param()" ]
+       Consistent -> Inconsistent     [ label = "start resync" ]
+       Outdated   -> Inconsistent     [ label = "start resync" ]
+       UpToDate   -> Inconsistent     [ label = "ioctl_replicate" ]
+       Inconsistent -> UpToDate       [ label = "resync completed" ]
+       Consistent -> Failed           [ label = "io completion error" ]
+       Outdated   -> Failed           [ label = "io completion error" ]
+       UpToDate   -> Failed           [ label = "io completion error" ]
+       Inconsistent -> Failed         [ label = "io completion error" ]
+       Failed -> Diskless             [ label = "sending notify to peer" ]
+}
diff --git a/Documentation/blockdev/drbd/drbd-connection-state-overview.dot b/Documentation/blockdev/drbd/drbd-connection-state-overview.dot
new file mode 100644 (file)
index 0000000..6d9cf0a
--- /dev/null
@@ -0,0 +1,85 @@
+// vim: set sw=2 sts=2 :
+digraph {
+  rankdir=BT
+  bgcolor=white
+
+  node [shape=plaintext]
+  node [fontcolor=black]
+
+  StandAlone     [ style=filled,fillcolor=gray,label=StandAlone ]
+
+  node [fontcolor=lightgray]
+
+  Unconnected    [ label=Unconnected ]
+
+  CommTrouble [ shape=record,
+    label="{communication loss|{Timeout|BrokenPipe|NetworkFailure}}" ]
+
+  node [fontcolor=gray]
+
+  subgraph cluster_try_connect {
+    label="try to connect, handshake"
+    rank=max
+    WFConnection   [ label=WFConnection ]
+    WFReportParams [ label=WFReportParams ]
+  }
+
+  TearDown       [ label=TearDown ]
+
+  Connected      [ label=Connected,style=filled,fillcolor=green,fontcolor=black ]
+
+  node [fontcolor=lightblue]
+
+  StartingSyncS  [ label=StartingSyncS ]
+  StartingSyncT  [ label=StartingSyncT ]
+
+  subgraph cluster_bitmap_exchange {
+    node [fontcolor=red]
+    fontcolor=red
+    label="new application (WRITE?) requests blocked\lwhile bitmap is exchanged"
+
+    WFBitMapT      [ label=WFBitMapT ]
+    WFSyncUUID     [ label=WFSyncUUID ]
+    WFBitMapS      [ label=WFBitMapS ]
+  }
+
+  node [fontcolor=blue]
+
+  cluster_resync [ shape=record,label="{<any>resynchronisation process running\l'concurrent' application requests allowed|{{<T>PausedSyncT\nSyncTarget}|{<S>PausedSyncS\nSyncSource}}}" ]
+
+  node [shape=box,fontcolor=black]
+
+  // drbdadm [label="drbdadm connect"]
+  // handshake [label="drbd_connect()\ndrbd_do_handshake\ndrbd_sync_handshake() etc."]
+  // comm_error [label="communication trouble"]
+
+  //
+  // edges
+  // --------------------------------------
+
+  StandAlone -> Unconnected [ label="drbdadm connect" ]
+  Unconnected -> StandAlone  [ label="drbdadm disconnect\lor serious communication trouble" ]
+  Unconnected -> WFConnection [ label="receiver thread is started" ]
+  WFConnection -> WFReportParams [ headlabel="accept()\land/or                        \lconnect()\l" ]
+
+  WFReportParams -> StandAlone [ label="during handshake\lpeers do not agree\labout something essential" ]
+  WFReportParams -> Connected [ label="data identical\lno sync needed",color=green,fontcolor=green ]
+
+    WFReportParams -> WFBitMapS
+    WFReportParams -> WFBitMapT
+    WFBitMapT -> WFSyncUUID [minlen=0.1,constraint=false]
+
+      WFBitMapS -> cluster_resync:S
+      WFSyncUUID -> cluster_resync:T
+
+  edge [color=green]
+  cluster_resync:any -> Connected [ label="resnyc done",fontcolor=green ]
+
+  edge [color=red]
+  WFReportParams -> CommTrouble
+  Connected -> CommTrouble
+  cluster_resync:any -> CommTrouble
+  edge [color=black]
+  CommTrouble -> Unconnected [label="receiver thread is stopped" ]
+
+}
diff --git a/Documentation/blockdev/drbd/node-states-8.dot b/Documentation/blockdev/drbd/node-states-8.dot
new file mode 100644 (file)
index 0000000..4a2b00c
--- /dev/null
@@ -0,0 +1,14 @@
+digraph node_states {
+       Secondary -> Primary           [ label = "ioctl_set_state()" ]
+       Primary   -> Secondary         [ label = "ioctl_set_state()" ]
+}
+
+digraph peer_states {
+       Secondary -> Primary           [ label = "recv state packet" ]
+       Primary   -> Secondary         [ label = "recv state packet" ]
+       Primary   -> Unknown           [ label = "connection lost" ]
+       Secondary  -> Unknown          [ label = "connection lost" ]
+       Unknown   -> Primary           [ label = "connected" ]
+       Unknown   -> Secondary         [ label = "connected" ]
+}
+
index c450f3a..ea56bd7 100644 (file)
@@ -1758,6 +1758,19 @@ S:       Maintained
 F:     drivers/scsi/dpt*
 F:     drivers/scsi/dpt/
 
+DRBD DRIVER
+P:     Philipp Reisner
+P:     Lars Ellenberg
+M:     drbd-dev@lists.linbit.com
+L:     drbd-user@lists.linbit.com
+W:     http://www.drbd.org
+T:     git git://git.drbd.org/linux-2.6-drbd.git drbd
+T:     git git://git.drbd.org/drbd-8.3.git
+S:     Supported
+F:     drivers/block/drbd/
+F:     lib/lru_cache.c
+F:     Documentation/blockdev/drbd/
+
 DRIVER CORE, KOBJECTS, AND SYSFS
 M:     Greg Kroah-Hartman <gregkh@suse.de>
 T:     quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/
index 1d886e0..77bfce5 100644 (file)
@@ -271,6 +271,8 @@ config BLK_DEV_CRYPTOLOOP
          instead, which can be configured to be on-disk compatible with the
          cryptoloop device.
 
+source "drivers/block/drbd/Kconfig"
+
 config BLK_DEV_NBD
        tristate "Network block device support"
        depends on NET
index cdaa3f8..aff5ac9 100644 (file)
@@ -36,5 +36,6 @@ obj-$(CONFIG_BLK_DEV_UB)      += ub.o
 obj-$(CONFIG_BLK_DEV_HD)       += hd.o
 
 obj-$(CONFIG_XEN_BLKDEV_FRONTEND)      += xen-blkfront.o
+obj-$(CONFIG_BLK_DEV_DRBD)     += drbd/
 
 swim_mod-objs  := swim.o swim_asm.o
diff --git a/drivers/block/drbd/Kconfig b/drivers/block/drbd/Kconfig
new file mode 100644 (file)
index 0000000..4e6f90f
--- /dev/null
@@ -0,0 +1,82 @@
+#
+# DRBD device driver configuration
+#
+
+comment "DRBD disabled because PROC_FS, INET or CONNECTOR not selected"
+       depends on !PROC_FS || !INET || !CONNECTOR
+
+config BLK_DEV_DRBD
+       tristate "DRBD Distributed Replicated Block Device support"
+       depends on PROC_FS && INET && CONNECTOR
+       select LRU_CACHE
+       default n
+       help
+
+         NOTE: In order to authenticate connections you have to select
+         CRYPTO_HMAC and a hash function as well.
+
+         DRBD is a shared-nothing, synchronously replicated block device. It
+         is designed to serve as a building block for high availability
+         clusters and in this context, is a "drop-in" replacement for shared
+         storage. Simplistically, you could see it as a network RAID 1.
+
+         Each minor device has a role, which can be 'primary' or 'secondary'.
+         On the node with the primary device the application is supposed to
+         run and to access the device (/dev/drbdX). Every write is sent to
+         the local 'lower level block device' and, across the network, to the
+         node with the device in 'secondary' state.  The secondary device
+         simply writes the data to its lower level block device.
+
+         DRBD can also be used in dual-Primary mode (device writable on both
+         nodes), which means it can exhibit shared disk semantics in a
+         shared-nothing cluster.  Needless to say, on top of dual-Primary
+         DRBD utilizing a cluster file system is necessary to maintain for
+         cache coherency.
+
+         For automatic failover you need a cluster manager (e.g. heartbeat).
+         See also: http://www.drbd.org/, http://www.linux-ha.org
+
+         If unsure, say N.
+
+config DRBD_TRACE
+       tristate "DRBD tracing"
+       depends on BLK_DEV_DRBD
+       select TRACEPOINTS
+       default n
+       help
+
+         Say Y here if you want to be able to trace various events in DRBD.
+
+         If unsure, say N.
+
+config DRBD_FAULT_INJECTION
+       bool "DRBD fault injection"
+       depends on BLK_DEV_DRBD
+       help
+
+         Say Y here if you want to simulate IO errors, in order to test DRBD's
+         behavior.
+
+         The actual simulation of IO errors is done by writing 3 values to
+         /sys/module/drbd/parameters/
+
+         enable_faults: bitmask of...
+         1     meta data write
+         2               read
+         4     resync data write
+         8                 read
+         16    data write
+         32    data read
+         64    read ahead
+         128   kmalloc of bitmap
+         256   allocation of EE (epoch_entries)
+
+         fault_devs: bitmask of minor numbers
+         fault_rate: frequency in percent
+
+         Example: Simulate data write errors on /dev/drbd0 with a probability of 5%.
+               echo 16 > /sys/module/drbd/parameters/enable_faults
+               echo 1 > /sys/module/drbd/parameters/fault_devs
+               echo 5 > /sys/module/drbd/parameters/fault_rate
+
+         If unsure, say N.
diff --git a/drivers/block/drbd/Makefile b/drivers/block/drbd/Makefile
new file mode 100644 (file)
index 0000000..7d86ef8
--- /dev/null
@@ -0,0 +1,8 @@
+drbd-y := drbd_bitmap.o drbd_proc.o
+drbd-y += drbd_worker.o drbd_receiver.o drbd_req.o drbd_actlog.o
+drbd-y += drbd_main.o drbd_strings.o drbd_nl.o
+
+drbd_trace-y := drbd_tracing.o
+
+obj-$(CONFIG_BLK_DEV_DRBD)     += drbd.o
+obj-$(CONFIG_DRBD_TRACE)       += drbd_trace.o
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
new file mode 100644 (file)
index 0000000..74b4835
--- /dev/null
@@ -0,0 +1,1484 @@
+/*
+   drbd_actlog.c
+
+   This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
+
+   Copyright (C) 2003-2008, LINBIT Information Technologies GmbH.
+   Copyright (C) 2003-2008, Philipp Reisner <philipp.reisner@linbit.com>.
+   Copyright (C) 2003-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
+
+   drbd is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 2, or (at your option)
+   any later version.
+
+   drbd is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with drbd; see the file COPYING.  If not, write to
+   the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+
+ */
+
+#include <linux/slab.h>
+#include <linux/drbd.h>
+#include "drbd_int.h"
+#include "drbd_tracing.h"
+#include "drbd_wrappers.h"
+
+/* We maintain a trivial check sum in our on disk activity log.
+ * With that we can ensure correct operation even when the storage
+ * device might do a partial (last) sector write while loosing power.
+ */
+struct __packed al_transaction {
+       u32       magic;
+       u32       tr_number;
+       struct __packed {
+               u32 pos;
+               u32 extent; } updates[1 + AL_EXTENTS_PT];
+       u32       xor_sum;
+};
+
+struct update_odbm_work {
+       struct drbd_work w;
+       unsigned int enr;
+};
+
+struct update_al_work {
+       struct drbd_work w;
+       struct lc_element *al_ext;
+       struct completion event;
+       unsigned int enr;
+       /* if old_enr != LC_FREE, write corresponding bitmap sector, too */
+       unsigned int old_enr;
+};
+
+struct drbd_atodb_wait {
+       atomic_t           count;
+       struct completion  io_done;
+       struct drbd_conf   *mdev;
+       int                error;
+};
+
+
+int w_al_write_transaction(struct drbd_conf *, struct drbd_work *, int);
+
+/* The actual tracepoint needs to have constant number of known arguments...
+ */
+void trace_drbd_resync(struct drbd_conf *mdev, int level, const char *fmt, ...)
+{
+       va_list ap;
+
+       va_start(ap, fmt);
+       trace__drbd_resync(mdev, level, fmt, ap);
+       va_end(ap);
+}
+
+static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
+                                struct drbd_backing_dev *bdev,
+                                struct page *page, sector_t sector,
+                                int rw, int size)
+{
+       struct bio *bio;
+       struct drbd_md_io md_io;
+       int ok;
+
+       md_io.mdev = mdev;
+       init_completion(&md_io.event);
+       md_io.error = 0;
+
+       if ((rw & WRITE) && !test_bit(MD_NO_BARRIER, &mdev->flags))
+               rw |= (1 << BIO_RW_BARRIER);
+       rw |= ((1<<BIO_RW_UNPLUG) | (1<<BIO_RW_SYNCIO));
+
+ retry:
+       bio = bio_alloc(GFP_NOIO, 1);
+       bio->bi_bdev = bdev->md_bdev;
+       bio->bi_sector = sector;
+       ok = (bio_add_page(bio, page, size, 0) == size);
+       if (!ok)
+               goto out;
+       bio->bi_private = &md_io;
+       bio->bi_end_io = drbd_md_io_complete;
+       bio->bi_rw = rw;
+
+       trace_drbd_bio(mdev, "Md", bio, 0, NULL);
+
+       if (FAULT_ACTIVE(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
+               bio_endio(bio, -EIO);
+       else
+               submit_bio(rw, bio);
+       wait_for_completion(&md_io.event);
+       ok = bio_flagged(bio, BIO_UPTODATE) && md_io.error == 0;
+
+       /* check for unsupported barrier op.
+        * would rather check on EOPNOTSUPP, but that is not reliable.
+        * don't try again for ANY return value != 0 */
+       if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER) && !ok)) {
+               /* Try again with no barrier */
+               dev_warn(DEV, "Barriers not supported on meta data device - disabling\n");
+               set_bit(MD_NO_BARRIER, &mdev->flags);
+               rw &= ~(1 << BIO_RW_BARRIER);
+               bio_put(bio);
+               goto retry;
+       }
+ out:
+       bio_put(bio);
+       return ok;
+}
+
+int drbd_md_sync_page_io(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
+                        sector_t sector, int rw)
+{
+       int logical_block_size, mask, ok;
+       int offset = 0;
+       struct page *iop = mdev->md_io_page;
+
+       D_ASSERT(mutex_is_locked(&mdev->md_io_mutex));
+
+       BUG_ON(!bdev->md_bdev);
+
+       logical_block_size = bdev_logical_block_size(bdev->md_bdev);
+       if (logical_block_size == 0)
+               logical_block_size = MD_SECTOR_SIZE;
+
+       /* in case logical_block_size != 512 [ s390 only? ] */
+       if (logical_block_size != MD_SECTOR_SIZE) {
+               mask = (logical_block_size / MD_SECTOR_SIZE) - 1;
+               D_ASSERT(mask == 1 || mask == 3 || mask == 7);
+               D_ASSERT(logical_block_size == (mask+1) * MD_SECTOR_SIZE);
+               offset = sector & mask;
+               sector = sector & ~mask;
+               iop = mdev->md_io_tmpp;
+
+               if (rw & WRITE) {
+                       /* these are GFP_KERNEL pages, pre-allocated
+                        * on device initialization */
+                       void *p = page_address(mdev->md_io_page);
+                       void *hp = page_address(mdev->md_io_tmpp);
+
+                       ok = _drbd_md_sync_page_io(mdev, bdev, iop, sector,
+                                       READ, logical_block_size);
+
+                       if (unlikely(!ok)) {
+                               dev_err(DEV, "drbd_md_sync_page_io(,%llus,"
+                                   "READ [logical_block_size!=512]) failed!\n",
+                                   (unsigned long long)sector);
+                               return 0;
+                       }
+
+                       memcpy(hp + offset*MD_SECTOR_SIZE, p, MD_SECTOR_SIZE);
+               }
+       }
+
+       if (sector < drbd_md_first_sector(bdev) ||
+           sector > drbd_md_last_sector(bdev))
+               dev_alert(DEV, "%s [%d]:%s(,%llus,%s) out of range md access!\n",
+                    current->comm, current->pid, __func__,
+                    (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ");
+
+       ok = _drbd_md_sync_page_io(mdev, bdev, iop, sector, rw, logical_block_size);
+       if (unlikely(!ok)) {
+               dev_err(DEV, "drbd_md_sync_page_io(,%llus,%s) failed!\n",
+                   (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ");
+               return 0;
+       }
+
+       if (logical_block_size != MD_SECTOR_SIZE && !(rw & WRITE)) {
+               void *p = page_address(mdev->md_io_page);
+               void *hp = page_address(mdev->md_io_tmpp);
+
+               memcpy(p, hp + offset*MD_SECTOR_SIZE, MD_SECTOR_SIZE);
+       }
+
+       return ok;
+}
+
+static struct lc_element *_al_get(struct drbd_conf *mdev, unsigned int enr)
+{
+       struct lc_element *al_ext;
+       struct lc_element *tmp;
+       unsigned long     al_flags = 0;
+
+       spin_lock_irq(&mdev->al_lock);
+       tmp = lc_find(mdev->resync, enr/AL_EXT_PER_BM_SECT);
+       if (unlikely(tmp != NULL)) {
+               struct bm_extent  *bm_ext = lc_entry(tmp, struct bm_extent, lce);
+               if (test_bit(BME_NO_WRITES, &bm_ext->flags)) {
+                       spin_unlock_irq(&mdev->al_lock);
+                       return NULL;
+               }
+       }
+       al_ext   = lc_get(mdev->act_log, enr);
+       al_flags = mdev->act_log->flags;
+       spin_unlock_irq(&mdev->al_lock);
+
+       /*
+       if (!al_ext) {
+               if (al_flags & LC_STARVING)
+                       dev_warn(DEV, "Have to wait for LRU element (AL too small?)\n");
+               if (al_flags & LC_DIRTY)
+                       dev_warn(DEV, "Ongoing AL update (AL device too slow?)\n");
+       }
+       */
+
+       return al_ext;
+}
+
+void drbd_al_begin_io(struct drbd_conf *mdev, sector_t sector)
+{
+       unsigned int enr = (sector >> (AL_EXTENT_SHIFT-9));
+       struct lc_element *al_ext;
+       struct update_al_work al_work;
+
+       D_ASSERT(atomic_read(&mdev->local_cnt) > 0);
+
+       trace_drbd_actlog(mdev, sector, "al_begin_io");
+
+       wait_event(mdev->al_wait, (al_ext = _al_get(mdev, enr)));
+
+       if (al_ext->lc_number != enr) {
+               /* drbd_al_write_transaction(mdev,al_ext,enr);
+                * recurses into generic_make_request(), which
+                * disallows recursion, bios being serialized on the
+                * current->bio_tail list now.
+                * we have to delegate updates to the activity log
+                * to the worker thread. */
+               init_completion(&al_work.event);
+               al_work.al_ext = al_ext;
+               al_work.enr = enr;
+               al_work.old_enr = al_ext->lc_number;
+               al_work.w.cb = w_al_write_transaction;
+               drbd_queue_work_front(&mdev->data.work, &al_work.w);
+               wait_for_completion(&al_work.event);
+
+               mdev->al_writ_cnt++;
+
+               spin_lock_irq(&mdev->al_lock);
+               lc_changed(mdev->act_log, al_ext);
+               spin_unlock_irq(&mdev->al_lock);
+               wake_up(&mdev->al_wait);
+       }
+}
+
+void drbd_al_complete_io(struct drbd_conf *mdev, sector_t sector)
+{
+       unsigned int enr = (sector >> (AL_EXTENT_SHIFT-9));
+       struct lc_element *extent;
+       unsigned long flags;
+
+       trace_drbd_actlog(mdev, sector, "al_complete_io");
+
+       spin_lock_irqsave(&mdev->al_lock, flags);
+
+       extent = lc_find(mdev->act_log, enr);
+
+       if (!extent) {
+               spin_unlock_irqrestore(&mdev->al_lock, flags);
+               dev_err(DEV, "al_complete_io() called on inactive extent %u\n", enr);
+               return;
+       }
+
+       if (lc_put(mdev->act_log, extent) == 0)
+               wake_up(&mdev->al_wait);
+
+       spin_unlock_irqrestore(&mdev->al_lock, flags);
+}
+
+int
+w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
+{
+       struct update_al_work *aw = container_of(w, struct update_al_work, w);
+       struct lc_element *updated = aw->al_ext;
+       const unsigned int new_enr = aw->enr;
+       const unsigned int evicted = aw->old_enr;
+       struct al_transaction *buffer;
+       sector_t sector;
+       int i, n, mx;
+       unsigned int extent_nr;
+       u32 xor_sum = 0;
+
+       if (!get_ldev(mdev)) {
+               dev_err(DEV, "get_ldev() failed in w_al_write_transaction\n");
+               complete(&((struct update_al_work *)w)->event);
+               return 1;
+       }
+       /* do we have to do a bitmap write, first?
+        * TODO reduce maximum latency:
+        * submit both bios, then wait for both,
+        * instead of doing two synchronous sector writes. */
+       if (mdev->state.conn < C_CONNECTED && evicted != LC_FREE)
+               drbd_bm_write_sect(mdev, evicted/AL_EXT_PER_BM_SECT);
+
+       mutex_lock(&mdev->md_io_mutex); /* protects md_io_page, al_tr_cycle, ... */
+       buffer = (struct al_transaction *)page_address(mdev->md_io_page);
+
+       buffer->magic = __constant_cpu_to_be32(DRBD_MAGIC);
+       buffer->tr_number = cpu_to_be32(mdev->al_tr_number);
+
+       n = lc_index_of(mdev->act_log, updated);
+
+       buffer->updates[0].pos = cpu_to_be32(n);
+       buffer->updates[0].extent = cpu_to_be32(new_enr);
+
+       xor_sum ^= new_enr;
+
+       mx = min_t(int, AL_EXTENTS_PT,
+                  mdev->act_log->nr_elements - mdev->al_tr_cycle);
+       for (i = 0; i < mx; i++) {
+               unsigned idx = mdev->al_tr_cycle + i;
+               extent_nr = lc_element_by_index(mdev->act_log, idx)->lc_number;
+               buffer->updates[i+1].pos = cpu_to_be32(idx);
+               buffer->updates[i+1].extent = cpu_to_be32(extent_nr);
+               xor_sum ^= extent_nr;
+       }
+       for (; i < AL_EXTENTS_PT; i++) {
+               buffer->updates[i+1].pos = __constant_cpu_to_be32(-1);
+               buffer->updates[i+1].extent = __constant_cpu_to_be32(LC_FREE);
+               xor_sum ^= LC_FREE;
+       }
+       mdev->al_tr_cycle += AL_EXTENTS_PT;
+       if (mdev->al_tr_cycle >= mdev->act_log->nr_elements)
+               mdev->al_tr_cycle = 0;
+
+       buffer->xor_sum = cpu_to_be32(xor_sum);
+
+       sector =  mdev->ldev->md.md_offset
+               + mdev->ldev->md.al_offset + mdev->al_tr_pos;
+
+       if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE))
+               drbd_chk_io_error(mdev, 1, TRUE);
+
+       if (++mdev->al_tr_pos >
+           div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT))
+               mdev->al_tr_pos = 0;
+
+       D_ASSERT(mdev->al_tr_pos < MD_AL_MAX_SIZE);
+       mdev->al_tr_number++;
+
+       mutex_unlock(&mdev->md_io_mutex);
+
+       complete(&((struct update_al_work *)w)->event);
+       put_ldev(mdev);
+
+       return 1;
+}
+
+/**
+ * drbd_al_read_tr() - Read a single transaction from the on disk activity log
+ * @mdev:      DRBD device.
+ * @bdev:      Block device to read form.
+ * @b:         pointer to an al_transaction.
+ * @index:     On disk slot of the transaction to read.
+ *
+ * Returns -1 on IO error, 0 on checksum error and 1 upon success.
+ */
+static int drbd_al_read_tr(struct drbd_conf *mdev,
+                          struct drbd_backing_dev *bdev,
+                          struct al_transaction *b,
+                          int index)
+{
+       sector_t sector;
+       int rv, i;
+       u32 xor_sum = 0;
+
+       sector = bdev->md.md_offset + bdev->md.al_offset + index;
+
+       /* Dont process error normally,
+        * as this is done before disk is attached! */
+       if (!drbd_md_sync_page_io(mdev, bdev, sector, READ))
+               return -1;
+
+       rv = (be32_to_cpu(b->magic) == DRBD_MAGIC);
+
+       for (i = 0; i < AL_EXTENTS_PT + 1; i++)
+               xor_sum ^= be32_to_cpu(b->updates[i].extent);
+       rv &= (xor_sum == be32_to_cpu(b->xor_sum));
+
+       return rv;
+}
+
+/**
+ * drbd_al_read_log() - Restores the activity log from its on disk representation.
+ * @mdev:      DRBD device.
+ * @bdev:      Block device to read form.
+ *
+ * Returns 1 on success, returns 0 when reading the log failed due to IO errors.
+ */
+int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
+{
+       struct al_transaction *buffer;
+       int i;
+       int rv;
+       int mx;
+       int active_extents = 0;
+       int transactions = 0;
+       int found_valid = 0;
+       int from = 0;
+       int to = 0;
+       u32 from_tnr = 0;
+       u32 to_tnr = 0;
+       u32 cnr;
+
+       mx = div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT);
+
+       /* lock out all other meta data io for now,
+        * and make sure the page is mapped.
+        */
+       mutex_lock(&mdev->md_io_mutex);
+       buffer = page_address(mdev->md_io_page);
+
+       /* Find the valid transaction in the log */
+       for (i = 0; i <= mx; i++) {
+               rv = drbd_al_read_tr(mdev, bdev, buffer, i);
+               if (rv == 0)
+                       continue;
+               if (rv == -1) {
+                       mutex_unlock(&mdev->md_io_mutex);
+                       return 0;
+               }
+               cnr = be32_to_cpu(buffer->tr_number);
+
+               if (++found_valid == 1) {
+                       from = i;
+                       to = i;
+                       from_tnr = cnr;
+                       to_tnr = cnr;
+                       continue;
+               }
+               if ((int)cnr - (int)from_tnr < 0) {
+                       D_ASSERT(from_tnr - cnr + i - from == mx+1);
+                       from = i;
+                       from_tnr = cnr;
+               }
+               if ((int)cnr - (int)to_tnr > 0) {
+                       D_ASSERT(cnr - to_tnr == i - to);
+                       to = i;
+                       to_tnr = cnr;
+               }
+       }
+
+       if (!found_valid) {
+               dev_warn(DEV, "No usable activity log found.\n");
+               mutex_unlock(&mdev->md_io_mutex);
+               return 1;
+       }
+
+       /* Read the valid transactions.
+        * dev_info(DEV, "Reading from %d to %d.\n",from,to); */
+       i = from;
+       while (1) {
+               int j, pos;
+               unsigned int extent_nr;
+               unsigned int trn;
+
+               rv = drbd_al_read_tr(mdev, bdev, buffer, i);
+               ERR_IF(rv == 0) goto cancel;
+               if (rv == -1) {
+                       mutex_unlock(&mdev->md_io_mutex);
+                       return 0;
+               }
+
+               trn = be32_to_cpu(buffer->tr_number);
+
+               spin_lock_irq(&mdev->al_lock);
+
+               /* This loop runs backwards because in the cyclic
+                  elements there might be an old version of the
+                  updated element (in slot 0). So the element in slot 0
+                  can overwrite old versions. */
+               for (j = AL_EXTENTS_PT; j >= 0; j--) {
+                       pos = be32_to_cpu(buffer->updates[j].pos);
+                       extent_nr = be32_to_cpu(buffer->updates[j].extent);
+
+                       if (extent_nr == LC_FREE)
+                               continue;
+
+                       lc_set(mdev->act_log, extent_nr, pos);
+                       active_extents++;
+               }
+               spin_unlock_irq(&mdev->al_lock);
+
+               transactions++;
+
+cancel:
+               if (i == to)
+                       break;
+               i++;
+               if (i > mx)
+                       i = 0;
+       }
+
+       mdev->al_tr_number = to_tnr+1;
+       mdev->al_tr_pos = to;
+       if (++mdev->al_tr_pos >
+           div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT))
+               mdev->al_tr_pos = 0;
+
+       /* ok, we are done with it */
+       mutex_unlock(&mdev->md_io_mutex);
+
+       dev_info(DEV, "Found %d transactions (%d active extents) in activity log.\n",
+            transactions, active_extents);
+
+       return 1;
+}
+
+static void atodb_endio(struct bio *bio, int error)
+{
+       struct drbd_atodb_wait *wc = bio->bi_private;
+       struct drbd_conf *mdev = wc->mdev;
+       struct page *page;
+       int uptodate = bio_flagged(bio, BIO_UPTODATE);
+
+       /* strange behavior of some lower level drivers...
+        * fail the request by clearing the uptodate flag,
+        * but do not return any error?! */
+       if (!error && !uptodate)
+               error = -EIO;
+
+       drbd_chk_io_error(mdev, error, TRUE);
+       if (error && wc->error == 0)
+               wc->error = error;
+
+       if (atomic_dec_and_test(&wc->count))
+               complete(&wc->io_done);
+
+       page = bio->bi_io_vec[0].bv_page;
+       put_page(page);
+       bio_put(bio);
+       mdev->bm_writ_cnt++;
+       put_ldev(mdev);
+}
+
+#define S2W(s) ((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL))
+/* activity log to on disk bitmap -- prepare bio unless that sector
+ * is already covered by previously prepared bios */
+static int atodb_prepare_unless_covered(struct drbd_conf *mdev,
+                                       struct bio **bios,
+                                       unsigned int enr,
+                                       struct drbd_atodb_wait *wc) __must_hold(local)
+{
+       struct bio *bio;
+       struct page *page;
+       sector_t on_disk_sector = enr + mdev->ldev->md.md_offset
+                                     + mdev->ldev->md.bm_offset;
+       unsigned int page_offset = PAGE_SIZE;
+       int offset;
+       int i = 0;
+       int err = -ENOMEM;
+
+       /* Check if that enr is already covered by an already created bio.
+        * Caution, bios[] is not NULL terminated,
+        * but only initialized to all NULL.
+        * For completely scattered activity log,
+        * the last invocation iterates over all bios,
+        * and finds the last NULL entry.
+        */
+       while ((bio = bios[i])) {
+               if (bio->bi_sector == on_disk_sector)
+                       return 0;
+               i++;
+       }
+       /* bios[i] == NULL, the next not yet used slot */
+
+       /* GFP_KERNEL, we are not in the write-out path */
+       bio = bio_alloc(GFP_KERNEL, 1);
+       if (bio == NULL)
+               return -ENOMEM;
+
+       if (i > 0) {
+               const struct bio_vec *prev_bv = bios[i-1]->bi_io_vec;
+               page_offset = prev_bv->bv_offset + prev_bv->bv_len;
+               page = prev_bv->bv_page;
+       }
+       if (page_offset == PAGE_SIZE) {
+               page = alloc_page(__GFP_HIGHMEM);
+               if (page == NULL)
+                       goto out_bio_put;
+               page_offset = 0;
+       } else {
+               get_page(page);
+       }
+
+       offset = S2W(enr);
+       drbd_bm_get_lel(mdev, offset,
+                       min_t(size_t, S2W(1), drbd_bm_words(mdev) - offset),
+                       kmap(page) + page_offset);
+       kunmap(page);
+
+       bio->bi_private = wc;
+       bio->bi_end_io = atodb_endio;
+       bio->bi_bdev = mdev->ldev->md_bdev;
+       bio->bi_sector = on_disk_sector;
+
+       if (bio_add_page(bio, page, MD_SECTOR_SIZE, page_offset) != MD_SECTOR_SIZE)
+               goto out_put_page;
+
+       atomic_inc(&wc->count);
+       /* we already know that we may do this...
+        * get_ldev_if_state(mdev,D_ATTACHING);
+        * just get the extra reference, so that the local_cnt reflects
+        * the number of pending IO requests DRBD at its backing device.
+        */
+       atomic_inc(&mdev->local_cnt);
+
+       bios[i] = bio;
+
+       return 0;
+
+out_put_page:
+       err = -EINVAL;
+       put_page(page);
+out_bio_put:
+       bio_put(bio);
+       return err;
+}
+
+/**
+ * drbd_al_to_on_disk_bm() -  * Writes bitmap parts covered by active AL extents
+ * @mdev:      DRBD device.
+ *
+ * Called when we detach (unconfigure) local storage,
+ * or when we go from R_PRIMARY to R_SECONDARY role.
+ */
+void drbd_al_to_on_disk_bm(struct drbd_conf *mdev)
+{
+       int i, nr_elements;
+       unsigned int enr;
+       struct bio **bios;
+       struct drbd_atodb_wait wc;
+
+       ERR_IF (!get_ldev_if_state(mdev, D_ATTACHING))
+               return; /* sorry, I don't have any act_log etc... */
+
+       wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
+
+       nr_elements = mdev->act_log->nr_elements;
+
+       /* GFP_KERNEL, we are not in anyone's write-out path */
+       bios = kzalloc(sizeof(struct bio *) * nr_elements, GFP_KERNEL);
+       if (!bios)
+               goto submit_one_by_one;
+
+       atomic_set(&wc.count, 0);
+       init_completion(&wc.io_done);
+       wc.mdev = mdev;
+       wc.error = 0;
+
+       for (i = 0; i < nr_elements; i++) {
+               enr = lc_element_by_index(mdev->act_log, i)->lc_number;
+               if (enr == LC_FREE)
+                       continue;
+               /* next statement also does atomic_inc wc.count and local_cnt */
+               if (atodb_prepare_unless_covered(mdev, bios,
+                                               enr/AL_EXT_PER_BM_SECT,
+                                               &wc))
+                       goto free_bios_submit_one_by_one;
+       }
+
+       /* unnecessary optimization? */
+       lc_unlock(mdev->act_log);
+       wake_up(&mdev->al_wait);
+
+       /* all prepared, submit them */
+       for (i = 0; i < nr_elements; i++) {
+               if (bios[i] == NULL)
+                       break;
+               if (FAULT_ACTIVE(mdev, DRBD_FAULT_MD_WR)) {
+                       bios[i]->bi_rw = WRITE;
+                       bio_endio(bios[i], -EIO);
+               } else {
+                       submit_bio(WRITE, bios[i]);
+               }
+       }
+
+       drbd_blk_run_queue(bdev_get_queue(mdev->ldev->md_bdev));
+
+       /* always (try to) flush bitmap to stable storage */
+       drbd_md_flush(mdev);
+
+       /* In case we did not submit a single IO do not wait for
+        * them to complete. ( Because we would wait forever here. )
+        *
+        * In case we had IOs and they are already complete, there
+        * is not point in waiting anyways.
+        * Therefore this if () ... */
+       if (atomic_read(&wc.count))
+               wait_for_completion(&wc.io_done);
+
+       put_ldev(mdev);
+
+       kfree(bios);
+       return;
+
+ free_bios_submit_one_by_one:
+       /* free everything by calling the endio callback directly. */
+       for (i = 0; i < nr_elements && bios[i]; i++)
+               bio_endio(bios[i], 0);
+
+       kfree(bios);
+
+ submit_one_by_one:
+       dev_warn(DEV, "Using the slow drbd_al_to_on_disk_bm()\n");
+
+       for (i = 0; i < mdev->act_log->nr_elements; i++) {
+               enr = lc_element_by_index(mdev->act_log, i)->lc_number;
+               if (enr == LC_FREE)
+                       continue;
+               /* Really slow: if we have al-extents 16..19 active,
+                * sector 4 will be written four times! Synchronous! */
+               drbd_bm_write_sect(mdev, enr/AL_EXT_PER_BM_SECT);
+       }
+
+       lc_unlock(mdev->act_log);
+       wake_up(&mdev->al_wait);
+       put_ldev(mdev);
+}
+
+/**
+ * drbd_al_apply_to_bm() - Sets the bitmap to diry(1) where covered ba active AL extents
+ * @mdev:      DRBD device.
+ */
+void drbd_al_apply_to_bm(struct drbd_conf *mdev)
+{
+       unsigned int enr;
+       unsigned long add = 0;
+       char ppb[10];
+       int i;
+
+       wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
+
+       for (i = 0; i < mdev->act_log->nr_elements; i++) {
+               enr = lc_element_by_index(mdev->act_log, i)->lc_number;
+               if (enr == LC_FREE)
+                       continue;
+               add += drbd_bm_ALe_set_all(mdev, enr);
+       }
+
+       lc_unlock(mdev->act_log);
+       wake_up(&mdev->al_wait);
+
+       dev_info(DEV, "Marked additional %s as out-of-sync based on AL.\n",
+            ppsize(ppb, Bit2KB(add)));
+}
+
+static int _try_lc_del(struct drbd_conf *mdev, struct lc_element *al_ext)
+{
+       int rv;
+
+       spin_lock_irq(&mdev->al_lock);
+       rv = (al_ext->refcnt == 0);
+       if (likely(rv))
+               lc_del(mdev->act_log, al_ext);
+       spin_unlock_irq(&mdev->al_lock);
+
+       return rv;
+}
+
+/**
+ * drbd_al_shrink() - Removes all active extents form the activity log
+ * @mdev:      DRBD device.
+ *
+ * Removes all active extents form the activity log, waiting until
+ * the reference count of each entry dropped to 0 first, of course.
+ *
+ * You need to lock mdev->act_log with lc_try_lock() / lc_unlock()
+ */
+void drbd_al_shrink(struct drbd_conf *mdev)
+{
+       struct lc_element *al_ext;
+       int i;
+
+       D_ASSERT(test_bit(__LC_DIRTY, &mdev->act_log->flags));
+
+       for (i = 0; i < mdev->act_log->nr_elements; i++) {
+               al_ext = lc_element_by_index(mdev->act_log, i);
+               if (al_ext->lc_number == LC_FREE)
+                       continue;
+               wait_event(mdev->al_wait, _try_lc_del(mdev, al_ext));
+       }
+
+       wake_up(&mdev->al_wait);
+}
+
+static int w_update_odbm(struct drbd_conf *mdev, struct drbd_work *w, int unused)
+{
+       struct update_odbm_work *udw = container_of(w, struct update_odbm_work, w);
+
+       if (!get_ldev(mdev)) {
+               if (__ratelimit(&drbd_ratelimit_state))
+                       dev_warn(DEV, "Can not update on disk bitmap, local IO disabled.\n");
+               kfree(udw);
+               return 1;
+       }
+
+       drbd_bm_write_sect(mdev, udw->enr);
+       put_ldev(mdev);
+
+       kfree(udw);
+
+       if (drbd_bm_total_weight(mdev) <= mdev->rs_failed) {
+               switch (mdev->state.conn) {
+               case C_SYNC_SOURCE:  case C_SYNC_TARGET:
+               case C_PAUSED_SYNC_S: case C_PAUSED_SYNC_T:
+                       drbd_resync_finished(mdev);
+               default:
+                       /* nothing to do */
+                       break;
+               }
+       }
+       drbd_bcast_sync_progress(mdev);
+
+       return 1;
+}
+
+
+/* ATTENTION. The AL's extents are 4MB each, while the extents in the
+ * resync LRU-cache are 16MB each.
+ * The caller of this function has to hold an get_ldev() reference.
+ *
+ * TODO will be obsoleted once we have a caching lru of the on disk bitmap
+ */
+static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
+                                     int count, int success)
+{
+       struct lc_element *e;
+       struct update_odbm_work *udw;
+
+       unsigned int enr;
+
+       D_ASSERT(atomic_read(&mdev->local_cnt));
+
+       /* I simply assume that a sector/size pair never crosses
+        * a 16 MB extent border. (Currently this is true...) */
+       enr = BM_SECT_TO_EXT(sector);
+
+       e = lc_get(mdev->resync, enr);
+       if (e) {
+               struct bm_extent *ext = lc_entry(e, struct bm_extent, lce);
+               if (ext->lce.lc_number == enr) {
+                       if (success)
+                               ext->rs_left -= count;
+                       else
+                               ext->rs_failed += count;
+                       if (ext->rs_left < ext->rs_failed) {
+                               dev_err(DEV, "BAD! sector=%llus enr=%u rs_left=%d "
+                                   "rs_failed=%d count=%d\n",
+                                    (unsigned long long)sector,
+                                    ext->lce.lc_number, ext->rs_left,
+                                    ext->rs_failed, count);
+                               dump_stack();
+
+                               lc_put(mdev->resync, &ext->lce);
+                               drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
+                               return;
+                       }
+               } else {
+                       /* Normally this element should be in the cache,
+                        * since drbd_rs_begin_io() pulled it already in.
+                        *
+                        * But maybe an application write finished, and we set
+                        * something outside the resync lru_cache in sync.
+                        */
+                       int rs_left = drbd_bm_e_weight(mdev, enr);
+                       if (ext->flags != 0) {
+                               dev_warn(DEV, "changing resync lce: %d[%u;%02lx]"
+                                    " -> %d[%u;00]\n",
+                                    ext->lce.lc_number, ext->rs_left,
+                                    ext->flags, enr, rs_left);
+                               ext->flags = 0;
+                       }
+                       if (ext->rs_failed) {
+                               dev_warn(DEV, "Kicking resync_lru element enr=%u "
+                                    "out with rs_failed=%d\n",
+                                    ext->lce.lc_number, ext->rs_failed);
+                               set_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags);
+                       }
+                       ext->rs_left = rs_left;
+                       ext->rs_failed = success ? 0 : count;
+                       lc_changed(mdev->resync, &ext->lce);
+               }
+               lc_put(mdev->resync, &ext->lce);
+               /* no race, we are within the al_lock! */
+
+               if (ext->rs_left == ext->rs_failed) {
+                       ext->rs_failed = 0;
+
+                       udw = kmalloc(sizeof(*udw), GFP_ATOMIC);
+                       if (udw) {
+                               udw->enr = ext->lce.lc_number;
+                               udw->w.cb = w_update_odbm;
+                               drbd_queue_work_front(&mdev->data.work, &udw->w);
+                       } else {
+                               dev_warn(DEV, "Could not kmalloc an udw\n");
+                               set_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags);
+                       }
+               }
+       } else {
+               dev_err(DEV, "lc_get() failed! locked=%d/%d flags=%lu\n",
+                   mdev->resync_locked,
+                   mdev->resync->nr_elements,
+                   mdev->resync->flags);
+       }
+}
+
+/* clear the bit corresponding to the piece of storage in question:
+ * size byte of data starting from sector.  Only clear a bits of the affected
+ * one ore more _aligned_ BM_BLOCK_SIZE blocks.
+ *
+ * called by worker on C_SYNC_TARGET and receiver on SyncSource.
+ *
+ */
+void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
+                      const char *file, const unsigned int line)
+{
+       /* Is called from worker and receiver context _only_ */
+       unsigned long sbnr, ebnr, lbnr;
+       unsigned long count = 0;
+       sector_t esector, nr_sectors;
+       int wake_up = 0;
+       unsigned long flags;
+
+       if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) {
+               dev_err(DEV, "drbd_set_in_sync: sector=%llus size=%d nonsense!\n",
+                               (unsigned long long)sector, size);
+               return;
+       }
+       nr_sectors = drbd_get_capacity(mdev->this_bdev);
+       esector = sector + (size >> 9) - 1;
+
+       ERR_IF(sector >= nr_sectors) return;
+       ERR_IF(esector >= nr_sectors) esector = (nr_sectors-1);
+
+       lbnr = BM_SECT_TO_BIT(nr_sectors-1);
+
+       /* we clear it (in sync).
+        * round up start sector, round down end sector.  we make sure we only
+        * clear full, aligned, BM_BLOCK_SIZE (4K) blocks */
+       if (unlikely(esector < BM_SECT_PER_BIT-1))
+               return;
+       if (unlikely(esector == (nr_sectors-1)))
+               ebnr = lbnr;
+       else
+               ebnr = BM_SECT_TO_BIT(esector - (BM_SECT_PER_BIT-1));
+       sbnr = BM_SECT_TO_BIT(sector + BM_SECT_PER_BIT-1);
+
+       trace_drbd_resync(mdev, TRACE_LVL_METRICS,
+                         "drbd_set_in_sync: sector=%llus size=%u sbnr=%lu ebnr=%lu\n",
+                         (unsigned long long)sector, size, sbnr, ebnr);
+
+       if (sbnr > ebnr)
+               return;
+
+       /*
+        * ok, (capacity & 7) != 0 sometimes, but who cares...
+        * we count rs_{total,left} in bits, not sectors.
+        */
+       spin_lock_irqsave(&mdev->al_lock, flags);
+       count = drbd_bm_clear_bits(mdev, sbnr, ebnr);
+       if (count) {
+               /* we need the lock for drbd_try_clear_on_disk_bm */
+               if (jiffies - mdev->rs_mark_time > HZ*10) {
+                       /* should be rolling marks,
+                        * but we estimate only anyways. */
+                       if (mdev->rs_mark_left != drbd_bm_total_weight(mdev) &&
+                           mdev->state.conn != C_PAUSED_SYNC_T &&
+                           mdev->state.conn != C_PAUSED_SYNC_S) {
+                               mdev->rs_mark_time = jiffies;
+                               mdev->rs_mark_left = drbd_bm_total_weight(mdev);
+                       }
+               }
+               if (get_ldev(mdev)) {
+                       drbd_try_clear_on_disk_bm(mdev, sector, count, TRUE);
+                       put_ldev(mdev);
+               }
+               /* just wake_up unconditional now, various lc_chaged(),
+                * lc_put() in drbd_try_clear_on_disk_bm(). */
+               wake_up = 1;
+       }
+       spin_unlock_irqrestore(&mdev->al_lock, flags);
+       if (wake_up)
+               wake_up(&mdev->al_wait);
+}
+
+/*
+ * this is intended to set one request worth of data out of sync.
+ * affects at least 1 bit,
+ * and at most 1+DRBD_MAX_SEGMENT_SIZE/BM_BLOCK_SIZE bits.
+ *
+ * called by tl_clear and drbd_send_dblock (==drbd_make_request).
+ * so this can be _any_ process.
+ */
+void __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
+                           const char *file, const unsigned int line)
+{
+       unsigned long sbnr, ebnr, lbnr, flags;
+       sector_t esector, nr_sectors;
+       unsigned int enr, count;
+       struct lc_element *e;
+
+       if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) {
+               dev_err(DEV, "sector: %llus, size: %d\n",
+                       (unsigned long long)sector, size);
+               return;
+       }
+
+       if (!get_ldev(mdev))
+               return; /* no disk, no metadata, no bitmap to set bits in */
+
+       nr_sectors = drbd_get_capacity(mdev->this_bdev);
+       esector = sector + (size >> 9) - 1;
+
+       ERR_IF(sector >= nr_sectors)
+               goto out;
+       ERR_IF(esector >= nr_sectors)
+               esector = (nr_sectors-1);
+
+       lbnr = BM_SECT_TO_BIT(nr_sectors-1);
+
+       /* we set it out of sync,
+        * we do not need to round anything here */
+       sbnr = BM_SECT_TO_BIT(sector);
+       ebnr = BM_SECT_TO_BIT(esector);
+
+       trace_drbd_resync(mdev, TRACE_LVL_METRICS,
+                         "drbd_set_out_of_sync: sector=%llus size=%u sbnr=%lu ebnr=%lu\n",
+                         (unsigned long long)sector, size, sbnr, ebnr);
+
+       /* ok, (capacity & 7) != 0 sometimes, but who cares...
+        * we count rs_{total,left} in bits, not sectors.  */
+       spin_lock_irqsave(&mdev->al_lock, flags);
+       count = drbd_bm_set_bits(mdev, sbnr, ebnr);
+
+       enr = BM_SECT_TO_EXT(sector);
+       e = lc_find(mdev->resync, enr);
+       if (e)
+               lc_entry(e, struct bm_extent, lce)->rs_left += count;
+       spin_unlock_irqrestore(&mdev->al_lock, flags);
+
+out:
+       put_ldev(mdev);
+}
+
+static
+struct bm_extent *_bme_get(struct drbd_conf *mdev, unsigned int enr)
+{
+       struct lc_element *e;
+       struct bm_extent *bm_ext;
+       int wakeup = 0;
+       unsigned long rs_flags;
+
+       spin_lock_irq(&mdev->al_lock);
+       if (mdev->resync_locked > mdev->resync->nr_elements/2) {
+               spin_unlock_irq(&mdev->al_lock);
+               return NULL;
+       }
+       e = lc_get(mdev->resync, enr);
+       bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
+       if (bm_ext) {
+               if (bm_ext->lce.lc_number != enr) {
+                       bm_ext->rs_left = drbd_bm_e_weight(mdev, enr);
+                       bm_ext->rs_failed = 0;
+                       lc_changed(mdev->resync, &bm_ext->lce);
+                       wakeup = 1;
+               }
+               if (bm_ext->lce.refcnt == 1)
+                       mdev->resync_locked++;
+               set_bit(BME_NO_WRITES, &bm_ext->flags);
+       }
+       rs_flags = mdev->resync->flags;
+       spin_unlock_irq(&mdev->al_lock);
+       if (wakeup)
+               wake_up(&mdev->al_wait);
+
+       if (!bm_ext) {
+               if (rs_flags & LC_STARVING)
+                       dev_warn(DEV, "Have to wait for element"
+                            " (resync LRU too small?)\n");
+               BUG_ON(rs_flags & LC_DIRTY);
+       }
+
+       return bm_ext;
+}
+
+static int _is_in_al(struct drbd_conf *mdev, unsigned int enr)
+{
+       struct lc_element *al_ext;
+       int rv = 0;
+
+       spin_lock_irq(&mdev->al_lock);
+       if (unlikely(enr == mdev->act_log->new_number))
+               rv = 1;
+       else {
+               al_ext = lc_find(mdev->act_log, enr);
+               if (al_ext) {
+                       if (al_ext->refcnt)
+                               rv = 1;
+               }
+       }
+       spin_unlock_irq(&mdev->al_lock);
+
+       /*
+       if (unlikely(rv)) {
+               dev_info(DEV, "Delaying sync read until app's write is done\n");
+       }
+       */
+       return rv;
+}
+
+/**
+ * drbd_rs_begin_io() - Gets an extent in the resync LRU cache and sets it to BME_LOCKED
+ * @mdev:      DRBD device.
+ * @sector:    The sector number.
+ *
+ * This functions sleeps on al_wait. Returns 1 on success, 0 if interrupted.
+ */
+int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
+{
+       unsigned int enr = BM_SECT_TO_EXT(sector);
+       struct bm_extent *bm_ext;
+       int i, sig;
+
+       trace_drbd_resync(mdev, TRACE_LVL_ALL,
+                         "drbd_rs_begin_io: sector=%llus (rs_end=%d)\n",
+                         (unsigned long long)sector, enr);
+
+       sig = wait_event_interruptible(mdev->al_wait,
+                       (bm_ext = _bme_get(mdev, enr)));
+       if (sig)
+               return 0;
+
+       if (test_bit(BME_LOCKED, &bm_ext->flags))
+               return 1;
+
+       for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
+               sig = wait_event_interruptible(mdev->al_wait,
+                               !_is_in_al(mdev, enr * AL_EXT_PER_BM_SECT + i));
+               if (sig) {
+                       spin_lock_irq(&mdev->al_lock);
+                       if (lc_put(mdev->resync, &bm_ext->lce) == 0) {
+                               clear_bit(BME_NO_WRITES, &bm_ext->flags);
+                               mdev->resync_locked--;
+                               wake_up(&mdev->al_wait);
+                       }
+                       spin_unlock_irq(&mdev->al_lock);
+                       return 0;
+               }
+       }
+
+       set_bit(BME_LOCKED, &bm_ext->flags);
+
+       return 1;
+}
+
+/**
+ * drbd_try_rs_begin_io() - Gets an extent in the resync LRU cache, does not sleep
+ * @mdev:      DRBD device.
+ * @sector:    The sector number.
+ *
+ * Gets an extent in the resync LRU cache, sets it to BME_NO_WRITES, then
+ * tries to set it to BME_LOCKED. Returns 0 upon success, and -EAGAIN
+ * if there is still application IO going on in this area.
+ */
+int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
+{
+       unsigned int enr = BM_SECT_TO_EXT(sector);
+       const unsigned int al_enr = enr*AL_EXT_PER_BM_SECT;
+       struct lc_element *e;
+       struct bm_extent *bm_ext;
+       int i;
+
+       trace_drbd_resync(mdev, TRACE_LVL_ALL, "drbd_try_rs_begin_io: sector=%llus\n",
+                         (unsigned long long)sector);
+
+       spin_lock_irq(&mdev->al_lock);
+       if (mdev->resync_wenr != LC_FREE && mdev->resync_wenr != enr) {
+               /* in case you have very heavy scattered io, it may
+                * stall the syncer undefined if we give up the ref count
+                * when we try again and requeue.
+                *
+                * if we don't give up the refcount, but the next time
+                * we are scheduled this extent has been "synced" by new
+                * application writes, we'd miss the lc_put on the
+                * extent we keep the refcount on.
+                * so we remembered which extent we had to try again, and
+                * if the next requested one is something else, we do
+                * the lc_put here...
+                * we also have to wake_up
+                */
+
+               trace_drbd_resync(mdev, TRACE_LVL_ALL,
+                                 "dropping %u, apparently got 'synced' by application io\n",
+                                 mdev->resync_wenr);
+
+               e = lc_find(mdev->resync, mdev->resync_wenr);
+               bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
+               if (bm_ext) {
+                       D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags));
+                       D_ASSERT(test_bit(BME_NO_WRITES, &bm_ext->flags));
+                       clear_bit(BME_NO_WRITES, &bm_ext->flags);
+                       mdev->resync_wenr = LC_FREE;
+                       if (lc_put(mdev->resync, &bm_ext->lce) == 0)
+                               mdev->resync_locked--;
+                       wake_up(&mdev->al_wait);
+               } else {
+                       dev_alert(DEV, "LOGIC BUG\n");
+               }
+       }
+       /* TRY. */
+       e = lc_try_get(mdev->resync, enr);
+       bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
+       if (bm_ext) {
+               if (test_bit(BME_LOCKED, &bm_ext->flags))
+                       goto proceed;
+               if (!test_and_set_bit(BME_NO_WRITES, &bm_ext->flags)) {
+                       mdev->resync_locked++;
+               } else {
+                       /* we did set the BME_NO_WRITES,
+                        * but then could not set BME_LOCKED,
+                        * so we tried again.
+                        * drop the extra reference. */
+                       trace_drbd_resync(mdev, TRACE_LVL_ALL,
+                                         "dropping extra reference on %u\n", enr);
+
+                       bm_ext->lce.refcnt--;
+                       D_ASSERT(bm_ext->lce.refcnt > 0);
+               }
+               goto check_al;
+       } else {
+               /* do we rather want to try later? */
+               if (mdev->resync_locked > mdev->resync->nr_elements-3) {
+                       trace_drbd_resync(mdev, TRACE_LVL_ALL,
+                                         "resync_locked = %u!\n", mdev->resync_locked);
+
+                       goto try_again;
+               }
+               /* Do or do not. There is no try. -- Yoda */
+               e = lc_get(mdev->resync, enr);
+               bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
+               if (!bm_ext) {
+                       const unsigned long rs_flags = mdev->resync->flags;
+                       if (rs_flags & LC_STARVING)
+                               dev_warn(DEV, "Have to wait for element"
+                                    " (resync LRU too small?)\n");
+                       BUG_ON(rs_flags & LC_DIRTY);
+                       goto try_again;
+               }
+               if (bm_ext->lce.lc_number != enr) {
+                       bm_ext->rs_left = drbd_bm_e_weight(mdev, enr);
+                       bm_ext->rs_failed = 0;
+                       lc_changed(mdev->resync, &bm_ext->lce);
+                       wake_up(&mdev->al_wait);
+                       D_ASSERT(test_bit(BME_LOCKED, &bm_ext->flags) == 0);
+               }
+               set_bit(BME_NO_WRITES, &bm_ext->flags);
+               D_ASSERT(bm_ext->lce.refcnt == 1);
+               mdev->resync_locked++;
+               goto check_al;
+       }
+check_al:
+       trace_drbd_resync(mdev, TRACE_LVL_ALL, "checking al for %u\n", enr);
+
+       for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
+               if (unlikely(al_enr+i == mdev->act_log->new_number))
+                       goto try_again;
+               if (lc_is_used(mdev->act_log, al_enr+i))
+                       goto try_again;
+       }
+       set_bit(BME_LOCKED, &bm_ext->flags);
+proceed:
+       mdev->resync_wenr = LC_FREE;
+       spin_unlock_irq(&mdev->al_lock);
+       return 0;
+
+try_again:
+       trace_drbd_resync(mdev, TRACE_LVL_ALL, "need to try again for %u\n", enr);
+       if (bm_ext)
+               mdev->resync_wenr = enr;
+       spin_unlock_irq(&mdev->al_lock);
+       return -EAGAIN;
+}
+
+void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector)
+{
+       unsigned int enr = BM_SECT_TO_EXT(sector);
+       struct lc_element *e;
+       struct bm_extent *bm_ext;
+       unsigned long flags;
+
+       trace_drbd_resync(mdev, TRACE_LVL_ALL,
+                         "drbd_rs_complete_io: sector=%llus (rs_enr=%d)\n",
+                         (long long)sector, enr);
+
+       spin_lock_irqsave(&mdev->al_lock, flags);
+       e = lc_find(mdev->resync, enr);
+       bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
+       if (!bm_ext) {
+               spin_unlock_irqrestore(&mdev->al_lock, flags);
+               if (__ratelimit(&drbd_ratelimit_state))
+                       dev_err(DEV, "drbd_rs_complete_io() called, but extent not found\n");
+               return;
+       }
+
+       if (bm_ext->lce.refcnt == 0) {
+               spin_unlock_irqrestore(&mdev->al_lock, flags);
+               dev_err(DEV, "drbd_rs_complete_io(,%llu [=%u]) called, "
+                   "but refcnt is 0!?\n",
+                   (unsigned long long)sector, enr);
+               return;
+       }
+
+       if (lc_put(mdev->resync, &bm_ext->lce) == 0) {
+               clear_bit(BME_LOCKED, &bm_ext->flags);
+               clear_bit(BME_NO_WRITES, &bm_ext->flags);
+               mdev->resync_locked--;
+               wake_up(&mdev->al_wait);
+       }
+
+       spin_unlock_irqrestore(&mdev->al_lock, flags);
+}
+
+/**
+ * drbd_rs_cancel_all() - Removes all extents from the resync LRU (even BME_LOCKED)
+ * @mdev:      DRBD device.
+ */
+void drbd_rs_cancel_all(struct drbd_conf *mdev)
+{
+       trace_drbd_resync(mdev, TRACE_LVL_METRICS, "drbd_rs_cancel_all\n");
+
+       spin_lock_irq(&mdev->al_lock);
+
+       if (get_ldev_if_state(mdev, D_FAILED)) { /* Makes sure ->resync is there. */
+               lc_reset(mdev->resync);
+               put_ldev(mdev);
+       }
+       mdev->resync_locked = 0;
+       mdev->resync_wenr = LC_FREE;
+       spin_unlock_irq(&mdev->al_lock);
+       wake_up(&mdev->al_wait);
+}
+
+/**
+ * drbd_rs_del_all() - Gracefully remove all extents from the resync LRU
+ * @mdev:      DRBD device.
+ *
+ * Returns 0 upon success, -EAGAIN if at least one reference count was
+ * not zero.
+ */
+int drbd_rs_del_all(struct drbd_conf *mdev)
+{
+       struct lc_element *e;
+       struct bm_extent *bm_ext;
+       int i;
+
+       trace_drbd_resync(mdev, TRACE_LVL_METRICS, "drbd_rs_del_all\n");
+
+       spin_lock_irq(&mdev->al_lock);
+
+       if (get_ldev_if_state(mdev, D_FAILED)) {
+               /* ok, ->resync is there. */
+               for (i = 0; i < mdev->resync->nr_elements; i++) {
+                       e = lc_element_by_index(mdev->resync, i);
+                       bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL;
+                       if (bm_ext->lce.lc_number == LC_FREE)
+                               continue;
+                       if (bm_ext->lce.lc_number == mdev->resync_wenr) {
+                               dev_info(DEV, "dropping %u in drbd_rs_del_all, apparently"
+                                    " got 'synced' by application io\n",
+                                    mdev->resync_wenr);
+                               D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags));
+                               D_ASSERT(test_bit(BME_NO_WRITES, &bm_ext->flags));
+                               clear_bit(BME_NO_WRITES, &bm_ext->flags);
+                               mdev->resync_wenr = LC_FREE;
+                               lc_put(mdev->resync, &bm_ext->lce);
+                       }
+                       if (bm_ext->lce.refcnt != 0) {
+                               dev_info(DEV, "Retrying drbd_rs_del_all() later. "
+                                    "refcnt=%d\n", bm_ext->lce.refcnt);
+                               put_ldev(mdev);
+                               spin_unlock_irq(&mdev->al_lock);
+                               return -EAGAIN;
+                       }
+                       D_ASSERT(!test_bit(BME_LOCKED, &bm_ext->flags));
+                       D_ASSERT(!test_bit(BME_NO_WRITES, &bm_ext->flags));
+                       lc_del(mdev->resync, &bm_ext->lce);
+               }
+               D_ASSERT(mdev->resync->used == 0);
+               put_ldev(mdev);
+       }
+       spin_unlock_irq(&mdev->al_lock);
+
+       return 0;
+}
+
+/**
+ * drbd_rs_failed_io() - Record information on a failure to resync the specified blocks
+ * @mdev:      DRBD device.
+ * @sector:    The sector number.
+ * @size:      Size of failed IO operation, in byte.
+ */
+void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size)
+{
+       /* Is called from worker and receiver context _only_ */
+       unsigned long sbnr, ebnr, lbnr;
+       unsigned long count;
+       sector_t esector, nr_sectors;
+       int wake_up = 0;
+
+       trace_drbd_resync(mdev, TRACE_LVL_SUMMARY,
+                         "drbd_rs_failed_io: sector=%llus, size=%u\n",
+                         (unsigned long long)sector, size);
+
+       if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) {
+               dev_err(DEV, "drbd_rs_failed_io: sector=%llus size=%d nonsense!\n",
+                               (unsigned long long)sector, size);
+               return;
+       }
+       nr_sectors = drbd_get_capacity(mdev->this_bdev);
+       esector = sector + (size >> 9) - 1;
+
+       ERR_IF(sector >= nr_sectors) return;
+       ERR_IF(esector >= nr_sectors) esector = (nr_sectors-1);
+
+       lbnr = BM_SECT_TO_BIT(nr_sectors-1);
+
+       /*
+        * round up start sector, round down end sector.  we make sure we only
+        * handle full, aligned, BM_BLOCK_SIZE (4K) blocks */
+       if (unlikely(esector < BM_SECT_PER_BIT-1))
+               return;
+       if (unlikely(esector == (nr_sectors-1)))
+               ebnr = lbnr;
+       else
+               ebnr = BM_SECT_TO_BIT(esector - (BM_SECT_PER_BIT-1));
+       sbnr = BM_SECT_TO_BIT(sector + BM_SECT_PER_BIT-1);
+
+       if (sbnr > ebnr)
+               return;
+
+       /*
+        * ok, (capacity & 7) != 0 sometimes, but who cares...
+        * we count rs_{total,left} in bits, not sectors.
+        */
+       spin_lock_irq(&mdev->al_lock);
+       count = drbd_bm_count_bits(mdev, sbnr, ebnr);
+       if (count) {
+               mdev->rs_failed += count;
+
+               if (get_ldev(mdev)) {
+                       drbd_try_clear_on_disk_bm(mdev, sector, count, FALSE);
+                       put_ldev(mdev);
+               }
+
+               /* just wake_up unconditional now, various lc_chaged(),
+                * lc_put() in drbd_try_clear_on_disk_bm(). */
+               wake_up = 1;
+       }
+       spin_unlock_irq(&mdev->al_lock);
+       if (wake_up)
+               wake_up(&mdev->al_wait);
+}
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
new file mode 100644 (file)
index 0000000..b61057e
--- /dev/null
@@ -0,0 +1,1327 @@
+/*
+   drbd_bitmap.c
+
+   This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
+
+   Copyright (C) 2004-2008, LINBIT Information Technologies GmbH.
+   Copyright (C) 2004-2008, Philipp Reisner <philipp.reisner@linbit.com>.
+   Copyright (C) 2004-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
+
+   drbd is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 2, or (at your option)
+   any later version.
+
+   drbd is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with drbd; see the file COPYING.  If not, write to
+   the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/bitops.h>
+#include <linux/vmalloc.h>
+#include <linux/string.h>
+#include <linux/drbd.h>
+#include <asm/kmap_types.h>
+#include "drbd_int.h"
+
+/* OPAQUE outside this file!
+ * interface defined in drbd_int.h
+
+ * convention:
+ * function name drbd_bm_... => used elsewhere, "public".
+ * function name      bm_... => internal to implementation, "private".
+
+ * Note that since find_first_bit returns int, at the current granularity of
+ * the bitmap (4KB per byte), this implementation "only" supports up to
+ * 1<<(32+12) == 16 TB...
+ */
+
+/*
+ * NOTE
+ *  Access to the *bm_pages is protected by bm_lock.
+ *  It is safe to read the other members within the lock.
+ *
+ *  drbd_bm_set_bits is called from bio_endio callbacks,
+ *  We may be called with irq already disabled,
+ *  so we need spin_lock_irqsave().
+ *  And we need the kmap_atomic.
+ */
+struct drbd_bitmap {
+       struct page **bm_pages;
+       spinlock_t bm_lock;
+       /* WARNING unsigned long bm_*:
+        * 32bit number of bit offset is just enough for 512 MB bitmap.
+        * it will blow up if we make the bitmap bigger...
+        * not that it makes much sense to have a bitmap that large,
+        * rather change the granularity to 16k or 64k or something.
+        * (that implies other problems, however...)
+        */
+       unsigned long bm_set;       /* nr of set bits; THINK maybe atomic_t? */
+       unsigned long bm_bits;
+       size_t   bm_words;
+       size_t   bm_number_of_pages;
+       sector_t bm_dev_capacity;
+       struct semaphore bm_change; /* serializes resize operations */
+
+       atomic_t bm_async_io;
+       wait_queue_head_t bm_io_wait;
+
+       unsigned long  bm_flags;
+
+       /* debugging aid, in case we are still racy somewhere */
+       char          *bm_why;
+       struct task_struct *bm_task;
+};
+
+/* definition of bits in bm_flags */
+#define BM_LOCKED       0
+#define BM_MD_IO_ERROR  1
+#define BM_P_VMALLOCED  2
+
+static int bm_is_locked(struct drbd_bitmap *b)
+{
+       return test_bit(BM_LOCKED, &b->bm_flags);
+}
+
+#define bm_print_lock_info(m) __bm_print_lock_info(m, __func__)
+static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func)
+{
+       struct drbd_bitmap *b = mdev->bitmap;
+       if (!__ratelimit(&drbd_ratelimit_state))
+               return;
+       dev_err(DEV, "FIXME %s in %s, bitmap locked for '%s' by %s\n",
+           current == mdev->receiver.task ? "receiver" :
+           current == mdev->asender.task  ? "asender"  :
+           current == mdev->worker.task   ? "worker"   : current->comm,
+           func, b->bm_why ?: "?",
+           b->bm_task == mdev->receiver.task ? "receiver" :
+           b->bm_task == mdev->asender.task  ? "asender"  :
+           b->bm_task == mdev->worker.task   ? "worker"   : "?");
+}
+
+void drbd_bm_lock(struct drbd_conf *mdev, char *why)
+{
+       struct drbd_bitmap *b = mdev->bitmap;
+       int trylock_failed;
+
+       if (!b) {
+               dev_err(DEV, "FIXME no bitmap in drbd_bm_lock!?\n");
+               return;
+       }
+
+       trylock_failed = down_trylock(&b->bm_change);
+
+       if (trylock_failed) {
+               dev_warn(DEV, "%s going to '%s' but bitmap already locked for '%s' by %s\n",
+                   current == mdev->receiver.task ? "receiver" :
+                   current == mdev->asender.task  ? "asender"  :
+                   current == mdev->worker.task   ? "worker"   : current->comm,
+                   why, b->bm_why ?: "?",
+                   b->bm_task == mdev->receiver.task ? "receiver" :
+                   b->bm_task == mdev->asender.task  ? "asender"  :
+                   b->bm_task == mdev->worker.task   ? "worker"   : "?");
+               down(&b->bm_change);
+       }
+       if (__test_and_set_bit(BM_LOCKED, &b->bm_flags))
+               dev_err(DEV, "FIXME bitmap already locked in bm_lock\n");
+
+       b->bm_why  = why;
+       b->bm_task = current;
+}
+
+void drbd_bm_unlock(struct drbd_conf *mdev)
+{
+       struct drbd_bitmap *b = mdev->bitmap;
+       if (!b) {
+               dev_err(DEV, "FIXME no bitmap in drbd_bm_unlock!?\n");
+               return;
+       }
+
+       if (!__test_and_clear_bit(BM_LOCKED, &mdev->bitmap->bm_flags))
+               dev_err(DEV, "FIXME bitmap not locked in bm_unlock\n");
+
+       b->bm_why  = NULL;
+       b->bm_task = NULL;
+       up(&b->bm_change);
+}
+
+/* word offset to long pointer */
+static unsigned long *__bm_map_paddr(struct drbd_bitmap *b, unsigned long offset, const enum km_type km)
+{
+       struct page *page;
+       unsigned long page_nr;
+
+       /* page_nr = (word*sizeof(long)) >> PAGE_SHIFT; */
+       page_nr = offset >> (PAGE_SHIFT - LN2_BPL + 3);
+       BUG_ON(page_nr >= b->bm_number_of_pages);
+       page = b->bm_pages[page_nr];
+
+       return (unsigned long *) kmap_atomic(page, km);
+}
+
+static unsigned long * bm_map_paddr(struct drbd_bitmap *b, unsigned long offset)
+{
+       return __bm_map_paddr(b, offset, KM_IRQ1);
+}
+
+static void __bm_unmap(unsigned long *p_addr, const enum km_type km)
+{
+       kunmap_atomic(p_addr, km);
+};
+
+static void bm_unmap(unsigned long *p_addr)
+{
+       return __bm_unmap(p_addr, KM_IRQ1);
+}
+
+/* long word offset of _bitmap_ sector */
+#define S2W(s) ((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL))
+/* word offset from start of bitmap to word number _in_page_
+ * modulo longs per page
+#define MLPP(X) ((X) % (PAGE_SIZE/sizeof(long))
+ hm, well, Philipp thinks gcc might not optimze the % into & (... - 1)
+ so do it explicitly:
+ */
+#define MLPP(X) ((X) & ((PAGE_SIZE/sizeof(long))-1))
+
+/* Long words per page */
+#define LWPP (PAGE_SIZE/sizeof(long))
+
+/*
+ * actually most functions herein should take a struct drbd_bitmap*, not a
+ * struct drbd_conf*, but for the debug macros I like to have the mdev around
+ * to be able to report device specific.
+ */
+
+static void bm_free_pages(struct page **pages, unsigned long number)
+{
+       unsigned long i;
+       if (!pages)
+               return;
+
+       for (i = 0; i < number; i++) {
+               if (!pages[i]) {
+                       printk(KERN_ALERT "drbd: bm_free_pages tried to free "
+                                         "a NULL pointer; i=%lu n=%lu\n",
+                                         i, number);
+                       continue;
+               }
+               __free_page(pages[i]);
+               pages[i] = NULL;
+       }
+}
+
+static void bm_vk_free(void *ptr, int v)
+{
+       if (v)
+               vfree(ptr);
+       else
+               kfree(ptr);
+}
+
+/*
+ * "have" and "want" are NUMBER OF PAGES.
+ */
+static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
+{
+       struct page **old_pages = b->bm_pages;
+       struct page **new_pages, *page;
+       unsigned int i, bytes, vmalloced = 0;
+       unsigned long have = b->bm_number_of_pages;
+
+       BUG_ON(have == 0 && old_pages != NULL);
+       BUG_ON(have != 0 && old_pages == NULL);
+
+       if (have == want)
+               return old_pages;
+
+       /* Trying kmalloc first, falling back to vmalloc.
+        * GFP_KERNEL is ok, as this is done when a lower level disk is
+        * "attached" to the drbd.  Context is receiver thread or cqueue
+        * thread.  As we have no disk yet, we are not in the IO path,
+        * not even the IO path of the peer. */
+       bytes = sizeof(struct page *)*want;
+       new_pages = kmalloc(bytes, GFP_KERNEL);
+       if (!new_pages) {
+               new_pages = vmalloc(bytes);
+               if (!new_pages)
+                       return NULL;
+               vmalloced = 1;
+       }
+
+       memset(new_pages, 0, bytes);
+       if (want >= have) {
+               for (i = 0; i < have; i++)
+                       new_pages[i] = old_pages[i];
+               for (; i < want; i++) {
+                       page = alloc_page(GFP_HIGHUSER);
+                       if (!page) {
+                               bm_free_pages(new_pages + have, i - have);
+                               bm_vk_free(new_pages, vmalloced);
+                               return NULL;
+                       }
+                       new_pages[i] = page;
+               }
+       } else {
+               for (i = 0; i < want; i++)
+                       new_pages[i] = old_pages[i];
+               /* NOT HERE, we are outside the spinlock!
+               bm_free_pages(old_pages + want, have - want);
+               */
+       }
+
+       if (vmalloced)
+               set_bit(BM_P_VMALLOCED, &b->bm_flags);
+       else
+               clear_bit(BM_P_VMALLOCED, &b->bm_flags);
+
+       return new_pages;
+}
+
+/*
+ * called on driver init only. TODO call when a device is created.
+ * allocates the drbd_bitmap, and stores it in mdev->bitmap.
+ */
+int drbd_bm_init(struct drbd_conf *mdev)
+{
+       struct drbd_bitmap *b = mdev->bitmap;
+       WARN_ON(b != NULL);
+       b = kzalloc(sizeof(struct drbd_bitmap), GFP_KERNEL);
+       if (!b)
+               return -ENOMEM;
+       spin_lock_init(&b->bm_lock);
+       init_MUTEX(&b->bm_change);
+       init_waitqueue_head(&b->bm_io_wait);
+
+       mdev->bitmap = b;
+
+       return 0;
+}
+
+sector_t drbd_bm_capacity(struct drbd_conf *mdev)
+{
+       ERR_IF(!mdev->bitmap) return 0;
+       return mdev->bitmap->bm_dev_capacity;
+}
+
+/* called on driver unload. TODO: call when a device is destroyed.
+ */
+void drbd_bm_cleanup(struct drbd_conf *mdev)
+{
+       ERR_IF (!mdev->bitmap) return;
+       bm_free_pages(mdev->bitmap->bm_pages, mdev->bitmap->bm_number_of_pages);
+       bm_vk_free(mdev->bitmap->bm_pages, test_bit(BM_P_VMALLOCED, &mdev->bitmap->bm_flags));
+       kfree(mdev->bitmap);
+       mdev->bitmap = NULL;
+}
+
+/*
+ * since (b->bm_bits % BITS_PER_LONG) != 0,
+ * this masks out the remaining bits.
+ * Returns the number of bits cleared.
+ */
+static int bm_clear_surplus(struct drbd_bitmap *b)
+{
+       const unsigned long mask = (1UL << (b->bm_bits & (BITS_PER_LONG-1))) - 1;
+       size_t w = b->bm_bits >> LN2_BPL;
+       int cleared = 0;
+       unsigned long *p_addr, *bm;
+
+       p_addr = bm_map_paddr(b, w);
+       bm = p_addr + MLPP(w);
+       if (w < b->bm_words) {
+               cleared = hweight_long(*bm & ~mask);
+               *bm &= mask;
+               w++; bm++;
+       }
+
+       if (w < b->bm_words) {
+               cleared += hweight_long(*bm);
+               *bm = 0;
+       }
+       bm_unmap(p_addr);
+       return cleared;
+}
+
+static void bm_set_surplus(struct drbd_bitmap *b)
+{
+       const unsigned long mask = (1UL << (b->bm_bits & (BITS_PER_LONG-1))) - 1;
+       size_t w = b->bm_bits >> LN2_BPL;
+       unsigned long *p_addr, *bm;
+
+       p_addr = bm_map_paddr(b, w);
+       bm = p_addr + MLPP(w);
+       if (w < b->bm_words) {
+               *bm |= ~mask;
+               bm++; w++;
+       }
+
+       if (w < b->bm_words) {
+               *bm = ~(0UL);
+       }
+       bm_unmap(p_addr);
+}
+
+static unsigned long __bm_count_bits(struct drbd_bitmap *b, const int swap_endian)
+{
+       unsigned long *p_addr, *bm, offset = 0;
+       unsigned long bits = 0;
+       unsigned long i, do_now;
+
+       while (offset < b->bm_words) {
+               i = do_now = min_t(size_t, b->bm_words-offset, LWPP);
+               p_addr = __bm_map_paddr(b, offset, KM_USER0);
+               bm = p_addr + MLPP(offset);
+               while (i--) {
+#ifndef __LITTLE_ENDIAN
+                       if (swap_endian)
+                               *bm = lel_to_cpu(*bm);
+#endif
+                       bits += hweight_long(*bm++);
+               }
+               __bm_unmap(p_addr, KM_USER0);
+               offset += do_now;
+               cond_resched();
+       }
+
+       return bits;
+}
+
+static unsigned long bm_count_bits(struct drbd_bitmap *b)
+{
+       return __bm_count_bits(b, 0);
+}
+
+static unsigned long bm_count_bits_swap_endian(struct drbd_bitmap *b)
+{
+       return __bm_count_bits(b, 1);
+}
+
+/* offset and len in long words.*/
+static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len)
+{
+       unsigned long *p_addr, *bm;
+       size_t do_now, end;
+
+#define BM_SECTORS_PER_BIT (BM_BLOCK_SIZE/512)
+
+       end = offset + len;
+
+       if (end > b->bm_words) {
+               printk(KERN_ALERT "drbd: bm_memset end > bm_words\n");
+               return;
+       }
+
+       while (offset < end) {
+               do_now = min_t(size_t, ALIGN(offset + 1, LWPP), end) - offset;
+               p_addr = bm_map_paddr(b, offset);
+               bm = p_addr + MLPP(offset);
+               if (bm+do_now > p_addr + LWPP) {
+                       printk(KERN_ALERT "drbd: BUG BUG BUG! p_addr:%p bm:%p do_now:%d\n",
+                              p_addr, bm, (int)do_now);
+                       break; /* breaks to after catch_oob_access_end() only! */
+               }
+               memset(bm, c, do_now * sizeof(long));
+               bm_unmap(p_addr);
+               offset += do_now;
+       }
+}
+
+/*
+ * make sure the bitmap has enough room for the attached storage,
+ * if necessary, resize.
+ * called whenever we may have changed the device size.
+ * returns -ENOMEM if we could not allocate enough memory, 0 on success.
+ * In case this is actually a resize, we copy the old bitmap into the new one.
+ * Otherwise, the bitmap is initialized to all bits set.
+ */
+int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity)
+{
+       struct drbd_bitmap *b = mdev->bitmap;
+       unsigned long bits, words, owords, obits, *p_addr, *bm;
+       unsigned long want, have, onpages; /* number of pages */
+       struct page **npages, **opages = NULL;
+       int err = 0, growing;
+       int opages_vmalloced;
+
+       ERR_IF(!b) return -ENOMEM;
+
+       drbd_bm_lock(mdev, "resize");
+
+       dev_info(DEV, "drbd_bm_resize called with capacity == %llu\n",
+                       (unsigned long long)capacity);
+
+       if (capacity == b->bm_dev_capacity)
+               goto out;
+
+       opages_vmalloced = test_bit(BM_P_VMALLOCED, &b->bm_flags);
+
+       if (capacity == 0) {
+               spin_lock_irq(&b->bm_lock);
+               opages = b->bm_pages;
+               onpages = b->bm_number_of_pages;
+               owords = b->bm_words;
+               b->bm_pages = NULL;
+               b->bm_number_of_pages =
+               b->bm_set   =
+               b->bm_bits  =
+               b->bm_words =
+               b->bm_dev_capacity = 0;
+               spin_unlock_irq(&b->bm_lock);
+               bm_free_pages(opages, onpages);
+               bm_vk_free(opages, opages_vmalloced);
+               goto out;
+       }
+       bits  = BM_SECT_TO_BIT(ALIGN(capacity, BM_SECT_PER_BIT));
+
+       /* if we would use
+          words = ALIGN(bits,BITS_PER_LONG) >> LN2_BPL;
+          a 32bit host could present the wrong number of words
+          to a 64bit host.
+       */
+       words = ALIGN(bits, 64) >> LN2_BPL;
+
+       if (get_ldev(mdev)) {
+               D_ASSERT((u64)bits <= (((u64)mdev->ldev->md.md_size_sect-MD_BM_OFFSET) << 12));
+               put_ldev(mdev);
+       }
+
+       /* one extra long to catch off by one errors */
+       want = ALIGN((words+1)*sizeof(long), PAGE_SIZE) >> PAGE_SHIFT;
+       have = b->bm_number_of_pages;
+       if (want == have) {
+               D_ASSERT(b->bm_pages != NULL);
+               npages = b->bm_pages;
+       } else {
+               if (FAULT_ACTIVE(mdev, DRBD_FAULT_BM_ALLOC))
+                       npages = NULL;
+               else
+                       npages = bm_realloc_pages(b, want);
+       }
+
+       if (!npages) {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       spin_lock_irq(&b->bm_lock);
+       opages = b->bm_pages;
+       owords = b->bm_words;
+       obits  = b->bm_bits;
+
+       growing = bits > obits;
+       if (opages)
+               bm_set_surplus(b);
+
+       b->bm_pages = npages;
+       b->bm_number_of_pages = want;
+       b->bm_bits  = bits;
+       b->bm_words = words;
+       b->bm_dev_capacity = capacity;
+
+       if (growing) {
+               bm_memset(b, owords, 0xff, words-owords);
+               b->bm_set += bits - obits;
+       }
+
+       if (want < have) {
+               /* implicit: (opages != NULL) && (opages != npages) */
+               bm_free_pages(opages + want, have - want);
+       }
+
+       p_addr = bm_map_paddr(b, words);
+       bm = p_addr + MLPP(words);
+       *bm = DRBD_MAGIC;
+       bm_unmap(p_addr);
+
+       (void)bm_clear_surplus(b);
+
+       spin_unlock_irq(&b->bm_lock);
+       if (opages != npages)
+               bm_vk_free(opages, opages_vmalloced);
+       if (!growing)
+               b->bm_set = bm_count_bits(b);
+       dev_info(DEV, "resync bitmap: bits=%lu words=%lu\n", bits, words);
+
+ out:
+       drbd_bm_unlock(mdev);
+       return err;
+}
+
+/* inherently racy:
+ * if not protected by other means, return value may be out of date when
+ * leaving this function...
+ * we still need to lock it, since it is important that this returns
+ * bm_set == 0 precisely.
+ *
+ * maybe bm_set should be atomic_t ?
+ */
+static unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev)
+{
+       struct drbd_bitmap *b = mdev->bitmap;
+       unsigned long s;
+       unsigned long flags;
+
+       ERR_IF(!b) return 0;
+       ERR_IF(!b->bm_pages) return 0;
+
+       spin_lock_irqsave(&b->bm_lock, flags);
+       s = b->bm_set;
+       spin_unlock_irqrestore(&b->bm_lock, flags);
+
+       return s;
+}
+
+unsigned long drbd_bm_total_weight(struct drbd_conf *mdev)
+{
+       unsigned long s;
+       /* if I don't have a disk, I don't know about out-of-sync status */
+       if (!get_ldev_if_state(mdev, D_NEGOTIATING))
+               return 0;
+       s = _drbd_bm_total_weight(mdev);
+       put_ldev(mdev);
+       return s;
+}
+
+size_t drbd_bm_words(struct drbd_conf *mdev)
+{
+       struct drbd_bitmap *b = mdev->bitmap;
+       ERR_IF(!b) return 0;
+       ERR_IF(!b->bm_pages) return 0;
+
+       return b->bm_words;
+}
+
+unsigned long drbd_bm_bits(struct drbd_conf *mdev)
+{
+       struct drbd_bitmap *b = mdev->bitmap;
+       ERR_IF(!b) return 0;
+
+       return b->bm_bits;
+}
+
+/* merge number words from buffer into the bitmap starting at offset.
+ * buffer[i] is expected to be little endian unsigned long.
+ * bitmap must be locked by drbd_bm_lock.
+ * currently only used from receive_bitmap.
+ */
+void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number,
+                       unsigned long *buffer)
+{
+       struct drbd_bitmap *b = mdev->bitmap;
+       unsigned long *p_addr, *bm;
+       unsigned long word, bits;
+       size_t end, do_now;
+
+       end = offset + number;
+
+       ERR_IF(!b) return;
+       ERR_IF(!b->bm_pages) return;
+       if (number == 0)
+               return;
+       WARN_ON(offset >= b->bm_words);
+       WARN_ON(end    >  b->bm_words);
+
+       spin_lock_irq(&b->bm_lock);
+       while (offset < end) {
+               do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset;
+               p_addr = bm_map_paddr(b, offset);
+               bm = p_addr + MLPP(offset);
+               offset += do_now;
+               while (do_now--) {
+                       bits = hweight_long(*bm);
+                       word = *bm | lel_to_cpu(*buffer++);
+                       *bm++ = word;
+                       b->bm_set += hweight_long(word) - bits;
+               }
+               bm_unmap(p_addr);
+       }
+       /* with 32bit <-> 64bit cross-platform connect
+        * this is only correct for current usage,
+        * where we _know_ that we are 64 bit aligned,
+        * and know that this function is used in this way, too...
+        */
+       if (end == b->bm_words)
+               b->bm_set -= bm_clear_surplus(b);
+
+       spin_unlock_irq(&b->bm_lock);
+}
+
+/* copy number words from the bitmap starting at offset into the buffer.
+ * buffer[i] will be little endian unsigned long.
+ */
+void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, size_t number,
+                    unsigned long *buffer)
+{
+       struct drbd_bitmap *b = mdev->bitmap;
+       unsigned long *p_addr, *bm;
+       size_t end, do_now;
+
+       end = offset + number;
+
+       ERR_IF(!b) return;
+       ERR_IF(!b->bm_pages) return;
+
+       spin_lock_irq(&b->bm_lock);
+       if ((offset >= b->bm_words) ||
+           (end    >  b->bm_words) ||
+           (number <= 0))
+               dev_err(DEV, "offset=%lu number=%lu bm_words=%lu\n",
+                       (unsigned long) offset,
+                       (unsigned long) number,
+                       (unsigned long) b->bm_words);
+       else {
+               while (offset < end) {
+                       do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset;
+                       p_addr = bm_map_paddr(b, offset);
+                       bm = p_addr + MLPP(offset);
+                       offset += do_now;
+                       while (do_now--)
+                               *buffer++ = cpu_to_lel(*bm++);
+                       bm_unmap(p_addr);
+               }
+       }
+       spin_unlock_irq(&b->bm_lock);
+}
+
+/* set all bits in the bitmap */
+void drbd_bm_set_all(struct drbd_conf *mdev)
+{
+       struct drbd_bitmap *b = mdev->bitmap;
+       ERR_IF(!b) return;
+       ERR_IF(!b->bm_pages) return;
+
+       spin_lock_irq(&b->bm_lock);
+       bm_memset(b, 0, 0xff, b->bm_words);
+       (void)bm_clear_surplus(b);
+       b->bm_set = b->bm_bits;
+       spin_unlock_irq(&b->bm_lock);
+}
+
+/* clear all bits in the bitmap */
+void drbd_bm_clear_all(struct drbd_conf *mdev)
+{
+       struct drbd_bitmap *b = mdev->bitmap;
+       ERR_IF(!b) return;
+       ERR_IF(!b->bm_pages) return;
+
+       spin_lock_irq(&b->bm_lock);
+       bm_memset(b, 0, 0, b->bm_words);
+       b->bm_set = 0;
+       spin_unlock_irq(&b->bm_lock);
+}
+
+static void bm_async_io_complete(struct bio *bio, int error)
+{
+       struct drbd_bitmap *b = bio->bi_private;
+       int uptodate = bio_flagged(bio, BIO_UPTODATE);
+
+
+       /* strange behavior of some lower level drivers...
+        * fail the request by clearing the uptodate flag,
+        * but do not return any error?!
+        * do we want to WARN() on this? */
+       if (!error && !uptodate)
+               error = -EIO;
+
+       if (error) {
+               /* doh. what now?
+                * for now, set all bits, and flag MD_IO_ERROR */
+               __set_bit(BM_MD_IO_ERROR, &b->bm_flags);
+       }
+       if (atomic_dec_and_test(&b->bm_async_io))
+               wake_up(&b->bm_io_wait);
+
+       bio_put(bio);
+}
+
+static void bm_page_io_async(struct drbd_conf *mdev, struct drbd_bitmap *b, int page_nr, int rw) __must_hold(local)
+{
+       /* we are process context. we always get a bio */
+       struct bio *bio = bio_alloc(GFP_KERNEL, 1);
+       unsigned int len;
+       sector_t on_disk_sector =
+               mdev->ldev->md.md_offset + mdev->ldev->md.bm_offset;
+       on_disk_sector += ((sector_t)page_nr) << (PAGE_SHIFT-9);
+
+       /* this might happen with very small
+        * flexible external meta data device */
+       len = min_t(unsigned int, PAGE_SIZE,
+               (drbd_md_last_sector(mdev->ldev) - on_disk_sector + 1)<<9);
+
+       bio->bi_bdev = mdev->ldev->md_bdev;
+       bio->bi_sector = on_disk_sector;
+       bio_add_page(bio, b->bm_pages[page_nr], len, 0);
+       bio->bi_private = b;
+       bio->bi_end_io = bm_async_io_complete;
+
+       if (FAULT_ACTIVE(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
+               bio->bi_rw |= rw;
+               bio_endio(bio, -EIO);
+       } else {
+               submit_bio(rw, bio);
+       }
+}
+
+# if defined(__LITTLE_ENDIAN)
+       /* nothing to do, on disk == in memory */
+# define bm_cpu_to_lel(x) ((void)0)
+# else
+void bm_cpu_to_lel(struct drbd_bitmap *b)
+{
+       /* need to cpu_to_lel all the pages ...
+        * this may be optimized by using
+        * cpu_to_lel(-1) == -1 and cpu_to_lel(0) == 0;
+        * the following is still not optimal, but better than nothing */
+       unsigned int i;
+       unsigned long *p_addr, *bm;
+       if (b->bm_set == 0) {
+               /* no page at all; avoid swap if all is 0 */
+               i = b->bm_number_of_pages;
+       } else if (b->bm_set == b->bm_bits) {
+               /* only the last page */
+               i = b->bm_number_of_pages - 1;
+       } else {
+               /* all pages */
+               i = 0;
+       }
+       for (; i < b->bm_number_of_pages; i++) {
+               p_addr = kmap_atomic(b->bm_pages[i], KM_USER0);
+               for (bm = p_addr; bm < p_addr + PAGE_SIZE/sizeof(long); bm++)
+                       *bm = cpu_to_lel(*bm);
+               kunmap_atomic(p_addr, KM_USER0);
+       }
+}
+# endif
+/* lel_to_cpu == cpu_to_lel */
+# define bm_lel_to_cpu(x) bm_cpu_to_lel(x)
+
+/*
+ * bm_rw: read/write the whole bitmap from/to its on disk location.
+ */
+static int bm_rw(struct drbd_conf *mdev, int rw) __must_hold(local)
+{
+       struct drbd_bitmap *b = mdev->bitmap;
+       /* sector_t sector; */
+       int bm_words, num_pages, i;
+       unsigned long now;
+       char ppb[10];
+       int err = 0;
+
+       WARN_ON(!bm_is_locked(b));
+
+       /* no spinlock here, the drbd_bm_lock should be enough! */
+
+       bm_words  = drbd_bm_words(mdev);
+       num_pages = (bm_words*sizeof(long) + PAGE_SIZE-1) >> PAGE_SHIFT;
+
+       /* on disk bitmap is little endian */
+       if (rw == WRITE)
+               bm_cpu_to_lel(b);
+
+       now = jiffies;
+       atomic_set(&b->bm_async_io, num_pages);
+       __clear_bit(BM_MD_IO_ERROR, &b->bm_flags);
+
+       /* let the layers below us try to merge these bios... */
+       for (i = 0; i < num_pages; i++)
+               bm_page_io_async(mdev, b, i, rw);
+
+       drbd_blk_run_queue(bdev_get_queue(mdev->ldev->md_bdev));
+       wait_event(b->bm_io_wait, atomic_read(&b->bm_async_io) == 0);
+
+       if (test_bit(BM_MD_IO_ERROR, &b->bm_flags)) {
+               dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n");
+               drbd_chk_io_error(mdev, 1, TRUE);
+               err = -EIO;
+       }
+
+       now = jiffies;
+       if (rw == WRITE) {
+               /* swap back endianness */
+               bm_lel_to_cpu(b);
+               /* flush bitmap to stable storage */
+               drbd_md_flush(mdev);
+       } else /* rw == READ */ {
+               /* just read, if necessary adjust endianness */
+               b->bm_set = bm_count_bits_swap_endian(b);
+               dev_info(DEV, "recounting of set bits took additional %lu jiffies\n",
+                    jiffies - now);
+       }
+       now = b->bm_set;
+
+       dev_info(DEV, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n",
+            ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now);
+
+       return err;
+}
+
+/**
+ * drbd_bm_read() - Read the whole bitmap from its on disk location.
+ * @mdev:      DRBD device.
+ */
+int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local)
+{
+       return bm_rw(mdev, READ);
+}
+
+/**
+ * drbd_bm_write() - Write the whole bitmap to its on disk location.
+ * @mdev:      DRBD device.
+ */
+int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local)
+{
+       return bm_rw(mdev, WRITE);
+}
+
+/**
+ * drbd_bm_write_sect: Writes a 512 (MD_SECTOR_SIZE) byte piece of the bitmap
+ * @mdev:      DRBD device.
+ * @enr:       Extent number in the resync lru (happens to be sector offset)
+ *
+ * The BM_EXT_SIZE is on purpose exactly the amount of the bitmap covered
+ * by a single sector write. Therefore enr == sector offset from the
+ * start of the bitmap.
+ */
+int drbd_bm_write_sect(struct drbd_conf *mdev, unsigned long enr) __must_hold(local)
+{
+       sector_t on_disk_sector = enr + mdev->ldev->md.md_offset
+                                     + mdev->ldev->md.bm_offset;
+       int bm_words, num_words, offset;
+       int err = 0;
+
+       mutex_lock(&mdev->md_io_mutex);
+       bm_words  = drbd_bm_words(mdev);
+       offset    = S2W(enr);   /* word offset into bitmap */
+       num_words = min(S2W(1), bm_words - offset);
+       if (num_words < S2W(1))
+               memset(page_address(mdev->md_io_page), 0, MD_SECTOR_SIZE);
+       drbd_bm_get_lel(mdev, offset, num_words,
+                       page_address(mdev->md_io_page));
+       if (!drbd_md_sync_page_io(mdev, mdev->ldev, on_disk_sector, WRITE)) {
+               int i;
+               err = -EIO;
+               dev_err(DEV, "IO ERROR writing bitmap sector %lu "
+                   "(meta-disk sector %llus)\n",
+                   enr, (unsigned long long)on_disk_sector);
+               drbd_chk_io_error(mdev, 1, TRUE);
+               for (i = 0; i < AL_EXT_PER_BM_SECT; i++)
+                       drbd_bm_ALe_set_all(mdev, enr*AL_EXT_PER_BM_SECT+i);
+       }
+       mdev->bm_writ_cnt++;
+       mutex_unlock(&mdev->md_io_mutex);
+       return err;
+}
+
+/* NOTE
+ * find_first_bit returns int, we return unsigned long.
+ * should not make much difference anyways, but ...
+ *
+ * this returns a bit number, NOT a sector!
+ */
+#define BPP_MASK ((1UL << (PAGE_SHIFT+3)) - 1)
+static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo,
+       const int find_zero_bit, const enum km_type km)
+{
+       struct drbd_bitmap *b = mdev->bitmap;
+       unsigned long i = -1UL;
+       unsigned long *p_addr;
+       unsigned long bit_offset; /* bit offset of the mapped page. */
+
+       if (bm_fo > b->bm_bits) {
+               dev_err(DEV, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits);
+       } else {
+               while (bm_fo < b->bm_bits) {
+                       unsigned long offset;
+                       bit_offset = bm_fo & ~BPP_MASK; /* bit offset of the page */
+                       offset = bit_offset >> LN2_BPL;    /* word offset of the page */
+                       p_addr = __bm_map_paddr(b, offset, km);
+
+                       if (find_zero_bit)
+                               i = find_next_zero_bit(p_addr, PAGE_SIZE*8, bm_fo & BPP_MASK);
+                       else
+                               i = find_next_bit(p_addr, PAGE_SIZE*8, bm_fo & BPP_MASK);
+
+                       __bm_unmap(p_addr, km);
+                       if (i < PAGE_SIZE*8) {
+                               i = bit_offset + i;
+                               if (i >= b->bm_bits)
+                                       break;
+                               goto found;
+                       }
+                       bm_fo = bit_offset + PAGE_SIZE*8;
+               }
+               i = -1UL;
+       }
+ found:
+       return i;
+}
+
+static unsigned long bm_find_next(struct drbd_conf *mdev,
+       unsigned long bm_fo, const int find_zero_bit)
+{
+       struct drbd_bitmap *b = mdev->bitmap;
+       unsigned long i = -1UL;
+
+       ERR_IF(!b) return i;
+       ERR_IF(!b->bm_pages) return i;
+
+       spin_lock_irq(&b->bm_lock);
+       if (bm_is_locked(b))
+               bm_print_lock_info(mdev);
+
+       i = __bm_find_next(mdev, bm_fo, find_zero_bit, KM_IRQ1);
+
+       spin_unlock_irq(&b->bm_lock);
+       return i;
+}
+
+unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo)
+{
+       return bm_find_next(mdev, bm_fo, 0);
+}
+
+#if 0
+/* not yet needed for anything. */
+unsigned long drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo)
+{
+       return bm_find_next(mdev, bm_fo, 1);
+}
+#endif
+
+/* does not spin_lock_irqsave.
+ * you must take drbd_bm_lock() first */
+unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo)
+{
+       /* WARN_ON(!bm_is_locked(mdev)); */
+       return __bm_find_next(mdev, bm_fo, 0, KM_USER1);
+}
+
+unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo)
+{
+       /* WARN_ON(!bm_is_locked(mdev)); */
+       return __bm_find_next(mdev, bm_fo, 1, KM_USER1);
+}
+
+/* returns number of bits actually changed.
+ * for val != 0, we change 0 -> 1, return code positive
+ * for val == 0, we change 1 -> 0, return code negative
+ * wants bitnr, not sector.
+ * expected to be called for only a few bits (e - s about BITS_PER_LONG).
+ * Must hold bitmap lock already. */
+int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
+       unsigned long e, int val, const enum km_type km)
+{
+       struct drbd_bitmap *b = mdev->bitmap;
+       unsigned long *p_addr = NULL;
+       unsigned long bitnr;
+       unsigned long last_page_nr = -1UL;
+       int c = 0;
+
+       if (e >= b->bm_bits) {
+               dev_err(DEV, "ASSERT FAILED: bit_s=%lu bit_e=%lu bm_bits=%lu\n",
+                               s, e, b->bm_bits);
+               e = b->bm_bits ? b->bm_bits -1 : 0;
+       }
+       for (bitnr = s; bitnr <= e; bitnr++) {
+               unsigned long offset = bitnr>>LN2_BPL;
+               unsigned long page_nr = offset >> (PAGE_SHIFT - LN2_BPL + 3);
+               if (page_nr != last_page_nr) {
+                       if (p_addr)
+                               __bm_unmap(p_addr, km);
+                       p_addr = __bm_map_paddr(b, offset, km);
+                       last_page_nr = page_nr;
+               }
+               if (val)
+                       c += (0 == __test_and_set_bit(bitnr & BPP_MASK, p_addr));
+               else
+                       c -= (0 != __test_and_clear_bit(bitnr & BPP_MASK, p_addr));
+       }
+       if (p_addr)
+               __bm_unmap(p_addr, km);
+       b->bm_set += c;
+       return c;
+}
+
+/* returns number of bits actually changed.
+ * for val != 0, we change 0 -> 1, return code positive
+ * for val == 0, we change 1 -> 0, return code negative
+ * wants bitnr, not sector */
+int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
+       const unsigned long e, int val)
+{
+       unsigned long flags;
+       struct drbd_bitmap *b = mdev->bitmap;
+       int c = 0;
+
+       ERR_IF(!b) return 1;
+       ERR_IF(!b->bm_pages) return 0;
+
+       spin_lock_irqsave(&b->bm_lock, flags);
+       if (bm_is_locked(b))
+               bm_print_lock_info(mdev);
+
+       c = __bm_change_bits_to(mdev, s, e, val, KM_IRQ1);
+
+       spin_unlock_irqrestore(&b->bm_lock, flags);
+       return c;
+}
+
+/* returns number of bits changed 0 -> 1 */
+int drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
+{
+       return bm_change_bits_to(mdev, s, e, 1);
+}
+
+/* returns number of bits changed 1 -> 0 */
+int drbd_bm_clear_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
+{
+       return -bm_change_bits_to(mdev, s, e, 0);
+}
+
+/* sets all bits in full words,
+ * from first_word up to, but not including, last_word */
+static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b,
+               int page_nr, int first_word, int last_word)
+{
+       int i;
+       int bits;
+       unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr], KM_USER0);
+       for (i = first_word; i < last_word; i++) {
+               bits = hweight_long(paddr[i]);
+               paddr[i] = ~0UL;
+               b->bm_set += BITS_PER_LONG - bits;
+       }
+       kunmap_atomic(paddr, KM_USER0);
+}
+
+/* Same thing as drbd_bm_set_bits, but without taking the spin_lock_irqsave.
+ * You must first drbd_bm_lock().
+ * Can be called to set the whole bitmap in one go.
+ * Sets bits from s to e _inclusive_. */
+void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
+{
+       /* First set_bit from the first bit (s)
+        * up to the next long boundary (sl),
+        * then assign full words up to the last long boundary (el),
+        * then set_bit up to and including the last bit (e).
+        *
+        * Do not use memset, because we must account for changes,
+        * so we need to loop over the words with hweight() anyways.
+        */
+       unsigned long sl = ALIGN(s,BITS_PER_LONG);
+       unsigned long el = (e+1) & ~((unsigned long)BITS_PER_LONG-1);
+       int first_page;
+       int last_page;
+       int page_nr;
+       int first_word;
+       int last_word;
+
+       if (e - s <= 3*BITS_PER_LONG) {
+               /* don't bother; el and sl may even be wrong. */
+               __bm_change_bits_to(mdev, s, e, 1, KM_USER0);
+               return;
+       }
+
+       /* difference is large enough that we can trust sl and el */
+
+       /* bits filling the current long */
+       if (sl)
+               __bm_change_bits_to(mdev, s, sl-1, 1, KM_USER0);
+
+       first_page = sl >> (3 + PAGE_SHIFT);
+       last_page = el >> (3 + PAGE_SHIFT);
+
+       /* MLPP: modulo longs per page */
+       /* LWPP: long words per page */
+       first_word = MLPP(sl >> LN2_BPL);
+       last_word = LWPP;
+
+       /* first and full pages, unless first page == last page */
+       for (page_nr = first_page; page_nr < last_page; page_nr++) {
+               bm_set_full_words_within_one_page(mdev->bitmap, page_nr, first_word, last_word);
+               cond_resched();
+               first_word = 0;
+       }
+
+       /* last page (respectively only page, for first page == last page) */
+       last_word = MLPP(el >> LN2_BPL);
+       bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
+
+       /* possibly trailing bits.
+        * example: (e & 63) == 63, el will be e+1.
+        * if that even was the very last bit,
+        * it would trigger an assert in __bm_change_bits_to()
+        */
+       if (el <= e)
+               __bm_change_bits_to(mdev, el, e, 1, KM_USER0);
+}
+
+/* returns bit state
+ * wants bitnr, NOT sector.
+ * inherently racy... area needs to be locked by means of {al,rs}_lru
+ *  1 ... bit set
+ *  0 ... bit not set
+ * -1 ... first out of bounds access, stop testing for bits!
+ */
+int drbd_bm_test_bit(struct drbd_conf *mdev, const unsigned long bitnr)
+{
+       unsigned long flags;
+       struct drbd_bitmap *b = mdev->bitmap;
+       unsigned long *p_addr;
+       int i;
+
+       ERR_IF(!b) return 0;
+       ERR_IF(!b->bm_pages) return 0;
+
+       spin_lock_irqsave(&b->bm_lock, flags);
+       if (bm_is_locked(b))
+               bm_print_lock_info(mdev);
+       if (bitnr < b->bm_bits) {
+               unsigned long offset = bitnr>>LN2_BPL;
+               p_addr = bm_map_paddr(b, offset);
+               i = test_bit(bitnr & BPP_MASK, p_addr) ? 1 : 0;
+               bm_unmap(p_addr);
+       } else if (bitnr == b->bm_bits) {
+               i = -1;
+       } else { /* (bitnr > b->bm_bits) */
+               dev_err(DEV, "bitnr=%lu > bm_bits=%lu\n", bitnr, b->bm_bits);
+               i = 0;
+       }
+
+       spin_unlock_irqrestore(&b->bm_lock, flags);
+       return i;
+}
+
+/* returns number of bits set in the range [s, e] */
+int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e)
+{
+       unsigned long flags;
+       struct drbd_bitmap *b = mdev->bitmap;
+       unsigned long *p_addr = NULL, page_nr = -1;
+       unsigned long bitnr;
+       int c = 0;
+       size_t w;
+
+       /* If this is called without a bitmap, that is a bug.  But just to be
+        * robust in case we screwed up elsewhere, in that case pretend there
+        * was one dirty bit in the requested area, so we won't try to do a
+        * local read there (no bitmap probably implies no disk) */
+       ERR_IF(!b) return 1;
+       ERR_IF(!b->bm_pages) return 1;
+
+       spin_lock_irqsave(&b->bm_lock, flags);
+       if (bm_is_locked(b))
+               bm_print_lock_info(mdev);
+       for (bitnr = s; bitnr <= e; bitnr++) {
+               w = bitnr >> LN2_BPL;
+               if (page_nr != w >> (PAGE_SHIFT - LN2_BPL + 3)) {
+                       page_nr = w >> (PAGE_SHIFT - LN2_BPL + 3);
+                       if (p_addr)
+                               bm_unmap(p_addr);
+                       p_addr = bm_map_paddr(b, w);
+               }
+               ERR_IF (bitnr >= b->bm_bits) {
+                       dev_err(DEV, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits);
+               } else {
+                       c += (0 != test_bit(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr));
+               }
+       }
+       if (p_addr)
+               bm_unmap(p_addr);
+       spin_unlock_irqrestore(&b->bm_lock, flags);
+       return c;
+}
+
+
+/* inherently racy...
+ * return value may be already out-of-date when this function returns.
+ * but the general usage is that this is only use during a cstate when bits are
+ * only cleared, not set, and typically only care for the case when the return
+ * value is zero, or we already "locked" this "bitmap extent" by other means.
+ *
+ * enr is bm-extent number, since we chose to name one sector (512 bytes)
+ * worth of the bitmap a "bitmap extent".
+ *
+ * TODO
+ * I think since we use it like a reference count, we should use the real
+ * reference count of some bitmap extent element from some lru instead...
+ *
+ */
+int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr)
+{
+       struct drbd_bitmap *b = mdev->bitmap;
+       int count, s, e;
+       unsigned long flags;
+       unsigned long *p_addr, *bm;
+
+       ERR_IF(!b) return 0;
+       ERR_IF(!b->bm_pages) return 0;
+
+       spin_lock_irqsave(&b->bm_lock, flags);
+       if (bm_is_locked(b))
+               bm_print_lock_info(mdev);
+
+       s = S2W(enr);
+       e = min((size_t)S2W(enr+1), b->bm_words);
+       count = 0;
+       if (s < b->bm_words) {
+               int n = e-s;
+               p_addr = bm_map_paddr(b, s);
+               bm = p_addr + MLPP(s);
+               while (n--)
+                       count += hweight_long(*bm++);
+               bm_unmap(p_addr);
+       } else {
+               dev_err(DEV, "start offset (%d) too large in drbd_bm_e_weight\n", s);
+       }
+       spin_unlock_irqrestore(&b->bm_lock, flags);
+       return count;
+}
+
+/* set all bits covered by the AL-extent al_enr */
+unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, unsigned long al_enr)
+{
+       struct drbd_bitmap *b = mdev->bitmap;
+       unsigned long *p_addr, *bm;
+       unsigned long weight;
+       int count, s, e, i, do_now;
+       ERR_IF(!b) return 0;
+       ERR_IF(!b->bm_pages) return 0;
+
+       spin_lock_irq(&b->bm_lock);
+       if (bm_is_locked(b))
+               bm_print_lock_info(mdev);
+       weight = b->bm_set;
+
+       s = al_enr * BM_WORDS_PER_AL_EXT;
+       e = min_t(size_t, s + BM_WORDS_PER_AL_EXT, b->bm_words);
+       /* assert that s and e are on the same page */
+       D_ASSERT((e-1) >> (PAGE_SHIFT - LN2_BPL + 3)
+             ==  s    >> (PAGE_SHIFT - LN2_BPL + 3));
+       count = 0;
+       if (s < b->bm_words) {
+               i = do_now = e-s;
+               p_addr = bm_map_paddr(b, s);
+               bm = p_addr + MLPP(s);
+               while (i--) {
+                       count += hweight_long(*bm);
+                       *bm = -1UL;
+                       bm++;
+               }
+               bm_unmap(p_addr);
+               b->bm_set += do_now*BITS_PER_LONG - count;
+               if (e == b->bm_words)
+                       b->bm_set -= bm_clear_surplus(b);
+       } else {
+               dev_err(DEV, "start offset (%d) too large in drbd_bm_ALe_set_all\n", s);
+       }
+       weight = b->bm_set - weight;
+       spin_unlock_irq(&b->bm_lock);
+       return weight;
+}
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
new file mode 100644 (file)
index 0000000..8da602e
--- /dev/null
@@ -0,0 +1,2258 @@
+/*
+  drbd_int.h
+
+  This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
+
+  Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
+  Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
+  Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
+
+  drbd is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 2, or (at your option)
+  any later version.
+
+  drbd is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with drbd; see the file COPYING.  If not, write to
+  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+
+#ifndef _DRBD_INT_H
+#define _DRBD_INT_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/crypto.h>
+#include <linux/tcp.h>
+#include <linux/mutex.h>
+#include <linux/major.h>
+#include <linux/blkdev.h>
+#include <linux/genhd.h>
+#include <net/tcp.h>
+#include <linux/lru_cache.h>
+
+#ifdef __CHECKER__
+# define __protected_by(x)       __attribute__((require_context(x,1,999,"rdwr")))
+# define __protected_read_by(x)  __attribute__((require_context(x,1,999,"read")))
+# define __protected_write_by(x) __attribute__((require_context(x,1,999,"write")))
+# define __must_hold(x)       __attribute__((context(x,1,1), require_context(x,1,999,"call")))
+#else
+# define __protected_by(x)
+# define __protected_read_by(x)
+# define __protected_write_by(x)
+# define __must_hold(x)
+#endif
+
+#define __no_warn(lock, stmt) do { __acquire(lock); stmt; __release(lock); } while (0)
+
+/* module parameter, defined in drbd_main.c */
+extern unsigned int minor_count;
+extern int disable_sendpage;
+extern int allow_oos;
+extern unsigned int cn_idx;
+
+#ifdef CONFIG_DRBD_FAULT_INJECTION
+extern int enable_faults;
+extern int fault_rate;
+extern int fault_devs;
+#endif
+
+extern char usermode_helper[];
+
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+/* I don't remember why XCPU ...
+ * This is used to wake the asender,
+ * and to interrupt sending the sending task
+ * on disconnect.
+ */
+#define DRBD_SIG SIGXCPU
+
+/* This is used to stop/restart our threads.
+ * Cannot use SIGTERM nor SIGKILL, since these
+ * are sent out by init on runlevel changes
+ * I choose SIGHUP for now.
+ */
+#define DRBD_SIGKILL SIGHUP
+
+/* All EEs on the free list should have ID_VACANT (== 0)
+ * freshly allocated EEs get !ID_VACANT (== 1)
+ * so if it says "cannot dereference null pointer at adress 0x00000001",
+ * it is most likely one of these :( */
+
+#define ID_IN_SYNC      (4711ULL)
+#define ID_OUT_OF_SYNC  (4712ULL)
+
+#define ID_SYNCER (-1ULL)
+#define ID_VACANT 0
+#define is_syncer_block_id(id) ((id) == ID_SYNCER)
+
+struct drbd_conf;
+
+
+/* to shorten dev_warn(DEV, "msg"); and relatives statements */
+#define DEV (disk_to_dev(mdev->vdisk))
+
+#define D_ASSERT(exp)  if (!(exp)) \
+        dev_err(DEV, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__)
+
+#define ERR_IF(exp) if (({                             \
+       int _b = (exp) != 0;                            \
+       if (_b) dev_err(DEV, "%s: (%s) in %s:%d\n",     \
+               __func__, #exp, __FILE__, __LINE__);    \
+        _b;                                            \
+       }))
+
+/* Defines to control fault insertion */
+enum {
+       DRBD_FAULT_MD_WR = 0,   /* meta data write */
+       DRBD_FAULT_MD_RD = 1,   /*           read  */
+       DRBD_FAULT_RS_WR = 2,   /* resync          */
+       DRBD_FAULT_RS_RD = 3,
+       DRBD_FAULT_DT_WR = 4,   /* data            */
+       DRBD_FAULT_DT_RD = 5,
+       DRBD_FAULT_DT_RA = 6,   /* data read ahead */
+       DRBD_FAULT_BM_ALLOC = 7,        /* bitmap allocation */
+       DRBD_FAULT_AL_EE = 8,   /* alloc ee */
+
+       DRBD_FAULT_MAX,
+};
+
+extern void trace_drbd_resync(struct drbd_conf *mdev, int level, const char *fmt, ...);
+
+#ifdef CONFIG_DRBD_FAULT_INJECTION
+extern unsigned int
+_drbd_insert_fault(struct drbd_conf *mdev, unsigned int type);
+static inline int
+drbd_insert_fault(struct drbd_conf *mdev, unsigned int type) {
+       return fault_rate &&
+               (enable_faults & (1<<type)) &&
+               _drbd_insert_fault(mdev, type);
+}
+#define FAULT_ACTIVE(_m, _t) (drbd_insert_fault((_m), (_t)))
+
+#else
+#define FAULT_ACTIVE(_m, _t) (0)
+#endif
+
+/* integer division, round _UP_ to the next integer */
+#define div_ceil(A, B) ((A)/(B) + ((A)%(B) ? 1 : 0))
+/* usual integer division */
+#define div_floor(A, B) ((A)/(B))
+
+/* drbd_meta-data.c (still in drbd_main.c) */
+/* 4th incarnation of the disk layout. */
+#define DRBD_MD_MAGIC (DRBD_MAGIC+4)
+
+extern struct drbd_conf **minor_table;
+extern struct ratelimit_state drbd_ratelimit_state;
+
+/* on the wire */
+enum drbd_packets {
+       /* receiver (data socket) */
+       P_DATA                = 0x00,
+       P_DATA_REPLY          = 0x01, /* Response to P_DATA_REQUEST */
+       P_RS_DATA_REPLY       = 0x02, /* Response to P_RS_DATA_REQUEST */
+       P_BARRIER             = 0x03,
+       P_BITMAP              = 0x04,
+       P_BECOME_SYNC_TARGET  = 0x05,
+       P_BECOME_SYNC_SOURCE  = 0x06,
+       P_UNPLUG_REMOTE       = 0x07, /* Used at various times to hint the peer */
+       P_DATA_REQUEST        = 0x08, /* Used to ask for a data block */
+       P_RS_DATA_REQUEST     = 0x09, /* Used to ask for a data block for resync */
+       P_SYNC_PARAM          = 0x0a,
+       P_PROTOCOL            = 0x0b,
+       P_UUIDS               = 0x0c,
+       P_SIZES               = 0x0d,
+       P_STATE               = 0x0e,
+       P_SYNC_UUID           = 0x0f,
+       P_AUTH_CHALLENGE      = 0x10,
+       P_AUTH_RESPONSE       = 0x11,
+       P_STATE_CHG_REQ       = 0x12,
+
+       /* asender (meta socket */
+       P_PING                = 0x13,
+       P_PING_ACK            = 0x14,
+       P_RECV_ACK            = 0x15, /* Used in protocol B */
+       P_WRITE_ACK           = 0x16, /* Used in protocol C */
+       P_RS_WRITE_ACK        = 0x17, /* Is a P_WRITE_ACK, additionally call set_in_sync(). */
+       P_DISCARD_ACK         = 0x18, /* Used in proto C, two-primaries conflict detection */
+       P_NEG_ACK             = 0x19, /* Sent if local disk is unusable */
+       P_NEG_DREPLY          = 0x1a, /* Local disk is broken... */
+       P_NEG_RS_DREPLY       = 0x1b, /* Local disk is broken... */
+       P_BARRIER_ACK         = 0x1c,
+       P_STATE_CHG_REPLY     = 0x1d,
+
+       /* "new" commands, no longer fitting into the ordering scheme above */
+
+       P_OV_REQUEST          = 0x1e, /* data socket */
+       P_OV_REPLY            = 0x1f,
+       P_OV_RESULT           = 0x20, /* meta socket */
+       P_CSUM_RS_REQUEST     = 0x21, /* data socket */
+       P_RS_IS_IN_SYNC       = 0x22, /* meta socket */
+       P_SYNC_PARAM89        = 0x23, /* data socket, protocol version 89 replacement for P_SYNC_PARAM */
+       P_COMPRESSED_BITMAP   = 0x24, /* compressed or otherwise encoded bitmap transfer */
+
+       P_MAX_CMD             = 0x25,
+       P_MAY_IGNORE          = 0x100, /* Flag to test if (cmd > P_MAY_IGNORE) ... */
+       P_MAX_OPT_CMD         = 0x101,
+
+       /* special command ids for handshake */
+
+       P_HAND_SHAKE_M        = 0xfff1, /* First Packet on the MetaSock */
+       P_HAND_SHAKE_S        = 0xfff2, /* First Packet on the Socket */
+
+       P_HAND_SHAKE          = 0xfffe  /* FIXED for the next century! */
+};
+
+static inline const char *cmdname(enum drbd_packets cmd)
+{
+       /* THINK may need to become several global tables
+        * when we want to support more than
+        * one PRO_VERSION */
+       static const char *cmdnames[] = {
+               [P_DATA]                = "Data",
+               [P_DATA_REPLY]          = "DataReply",
+               [P_RS_DATA_REPLY]       = "RSDataReply",
+               [P_BARRIER]             = "Barrier",
+               [P_BITMAP]              = "ReportBitMap",
+               [P_BECOME_SYNC_TARGET]  = "BecomeSyncTarget",
+               [P_BECOME_SYNC_SOURCE]  = "BecomeSyncSource",
+               [P_UNPLUG_REMOTE]       = "UnplugRemote",
+               [P_DATA_REQUEST]        = "DataRequest",
+               [P_RS_DATA_REQUEST]     = "RSDataRequest",
+               [P_SYNC_PARAM]          = "SyncParam",
+               [P_SYNC_PARAM89]        = "SyncParam89",
+               [P_PROTOCOL]            = "ReportProtocol",
+               [P_UUIDS]               = "ReportUUIDs",
+               [P_SIZES]               = "ReportSizes",
+               [P_STATE]               = "ReportState",
+               [P_SYNC_UUID]           = "ReportSyncUUID",
+               [P_AUTH_CHALLENGE]      = "AuthChallenge",
+               [P_AUTH_RESPONSE]       = "AuthResponse",
+               [P_PING]                = "Ping",
+               [P_PING_ACK]            = "PingAck",
+               [P_RECV_ACK]            = "RecvAck",
+               [P_WRITE_ACK]           = "WriteAck",
+               [P_RS_WRITE_ACK]        = "RSWriteAck",
+               [P_DISCARD_ACK]         = "DiscardAck",
+               [P_NEG_ACK]             = "NegAck",
+               [P_NEG_DREPLY]          = "NegDReply",
+               [P_NEG_RS_DREPLY]       = "NegRSDReply",
+               [P_BARRIER_ACK]         = "BarrierAck",
+               [P_STATE_CHG_REQ]       = "StateChgRequest",
+               [P_STATE_CHG_REPLY]     = "StateChgReply",
+               [P_OV_REQUEST]          = "OVRequest",
+               [P_OV_REPLY]            = "OVReply",
+               [P_OV_RESULT]           = "OVResult",
+               [P_MAX_CMD]             = NULL,
+       };
+
+       if (cmd == P_HAND_SHAKE_M)
+               return "HandShakeM";
+       if (cmd == P_HAND_SHAKE_S)
+               return "HandShakeS";
+       if (cmd == P_HAND_SHAKE)
+               return "HandShake";
+       if (cmd >= P_MAX_CMD)
+               return "Unknown";
+       return cmdnames[cmd];
+}
+
+/* for sending/receiving the bitmap,
+ * possibly in some encoding scheme */
+struct bm_xfer_ctx {
+       /* "const"
+        * stores total bits and long words
+        * of the bitmap, so we don't need to
+        * call the accessor functions over and again. */
+       unsigned long bm_bits;
+       unsigned long bm_words;
+       /* during xfer, current position within the bitmap */
+       unsigned long bit_offset;
+       unsigned long word_offset;
+
+       /* statistics; index: (h->command == P_BITMAP) */
+       unsigned packets[2];
+       unsigned bytes[2];
+};
+
+extern void INFO_bm_xfer_stats(struct drbd_conf *mdev,
+               const char *direction, struct bm_xfer_ctx *c);
+
+static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c)
+{
+       /* word_offset counts "native long words" (32 or 64 bit),
+        * aligned at 64 bit.
+        * Encoded packet may end at an unaligned bit offset.
+        * In case a fallback clear text packet is transmitted in
+        * between, we adjust this offset back to the last 64bit
+        * aligned "native long word", which makes coding and decoding
+        * the plain text bitmap much more convenient.  */
+#if BITS_PER_LONG == 64
+       c->word_offset = c->bit_offset >> 6;
+#elif BITS_PER_LONG == 32
+       c->word_offset = c->bit_offset >> 5;
+       c->word_offset &= ~(1UL);
+#else
+# error "unsupported BITS_PER_LONG"
+#endif
+}
+
+#ifndef __packed
+#define __packed __attribute__((packed))
+#endif
+
+/* This is the layout for a packet on the wire.
+ * The byteorder is the network byte order.
+ *     (except block_id and barrier fields.
+ *     these are pointers to local structs
+ *     and have no relevance for the partner,
+ *     which just echoes them as received.)
+ *
+ * NOTE that the payload starts at a long aligned offset,
+ * regardless of 32 or 64 bit arch!
+ */
+struct p_header {
+       u32       magic;
+       u16       command;
+       u16       length;       /* bytes of data after this header */
+       u8        payload[0];
+} __packed;
+/* 8 bytes. packet FIXED for the next century! */
+
+/*
+ * short commands, packets without payload, plain p_header:
+ *   P_PING
+ *   P_PING_ACK
+ *   P_BECOME_SYNC_TARGET
+ *   P_BECOME_SYNC_SOURCE
+ *   P_UNPLUG_REMOTE
+ */
+
+/*
+ * commands with out-of-struct payload:
+ *   P_BITMAP    (no additional fields)
+ *   P_DATA, P_DATA_REPLY (see p_data)
+ *   P_COMPRESSED_BITMAP (see receive_compressed_bitmap)
+ */
+
+/* these defines must not be changed without changing the protocol version */
+#define DP_HARDBARRIER       1
+#define DP_RW_SYNC           2
+#define DP_MAY_SET_IN_SYNC    4
+
+struct p_data {
+       struct p_header head;
+       u64         sector;    /* 64 bits sector number */
+       u64         block_id;  /* to identify the request in protocol B&C */
+       u32         seq_num;
+       u32         dp_flags;
+} __packed;
+
+/*
+ * commands which share a struct:
+ *  p_block_ack:
+ *   P_RECV_ACK (proto B), P_WRITE_ACK (proto C),
+ *   P_DISCARD_ACK (proto C, two-primaries conflict detection)
+ *  p_block_req:
+ *   P_DATA_REQUEST, P_RS_DATA_REQUEST
+ */
+struct p_block_ack {
+       struct p_header head;
+       u64         sector;
+       u64         block_id;
+       u32         blksize;
+       u32         seq_num;
+} __packed;
+
+
+struct p_block_req {
+       struct p_header head;
+       u64 sector;
+       u64 block_id;
+       u32 blksize;
+       u32 pad;        /* to multiple of 8 Byte */
+} __packed;
+
+/*
+ * commands with their own struct for additional fields:
+ *   P_HAND_SHAKE
+ *   P_BARRIER
+ *   P_BARRIER_ACK
+ *   P_SYNC_PARAM
+ *   ReportParams
+ */
+
+struct p_handshake {
+       struct p_header head;   /* 8 bytes */
+       u32 protocol_min;
+       u32 feature_flags;
+       u32 protocol_max;
+
+       /* should be more than enough for future enhancements
+        * for now, feature_flags and the reserverd array shall be zero.
+        */
+
+       u32 _pad;
+       u64 reserverd[7];
+} __packed;
+/* 80 bytes, FIXED for the next century */
+
+struct p_barrier {
+       struct p_header head;
+       u32 barrier;    /* barrier number _handle_ only */
+       u32 pad;        /* to multiple of 8 Byte */
+} __packed;
+
+struct p_barrier_ack {
+       struct p_header head;
+       u32 barrier;
+       u32 set_size;
+} __packed;
+
+struct p_rs_param {
+       struct p_header head;
+       u32 rate;
+
+             /* Since protocol version 88 and higher. */
+       char verify_alg[0];
+} __packed;
+
+struct p_rs_param_89 {
+       struct p_header head;
+       u32 rate;
+        /* protocol version 89: */
+       char verify_alg[SHARED_SECRET_MAX];
+       char csums_alg[SHARED_SECRET_MAX];
+} __packed;
+
+struct p_protocol {
+       struct p_header head;
+       u32 protocol;
+       u32 after_sb_0p;
+       u32 after_sb_1p;
+       u32 after_sb_2p;
+       u32 want_lose;
+       u32 two_primaries;
+
+              /* Since protocol version 87 and higher. */
+       char integrity_alg[0];
+
+} __packed;
+
+struct p_uuids {
+       struct p_header head;
+       u64 uuid[UI_EXTENDED_SIZE];
+} __packed;
+
+struct p_rs_uuid {
+       struct p_header head;
+       u64         uuid;
+} __packed;
+
+struct p_sizes {
+       struct p_header head;
+       u64         d_size;  /* size of disk */
+       u64         u_size;  /* user requested size */
+       u64         c_size;  /* current exported size */
+       u32         max_segment_size;  /* Maximal size of a BIO */
+       u32         queue_order_type;
+} __packed;
+
+struct p_state {
+       struct p_header head;
+       u32         state;
+} __packed;
+
+struct p_req_state {
+       struct p_header head;
+       u32         mask;
+       u32         val;
+} __packed;
+
+struct p_req_state_reply {
+       struct p_header head;
+       u32         retcode;
+} __packed;
+
+struct p_drbd06_param {
+       u64       size;
+       u32       state;
+       u32       blksize;
+       u32       protocol;
+       u32       version;
+       u32       gen_cnt[5];
+       u32       bit_map_gen[5];
+} __packed;
+
+struct p_discard {
+       struct p_header head;
+       u64         block_id;
+       u32         seq_num;
+       u32         pad;
+} __packed;
+
+/* Valid values for the encoding field.
+ * Bump proto version when changing this. */
+enum drbd_bitmap_code {
+       /* RLE_VLI_Bytes = 0,
+        * and other bit variants had been defined during
+        * algorithm evaluation. */
+       RLE_VLI_Bits = 2,
+};
+
+struct p_compressed_bm {
+       struct p_header head;
+       /* (encoding & 0x0f): actual encoding, see enum drbd_bitmap_code
+        * (encoding & 0x80): polarity (set/unset) of first runlength
+        * ((encoding >> 4) & 0x07): pad_bits, number of trailing zero bits
+        * used to pad up to head.length bytes
+        */
+       u8 encoding;
+
+       u8 code[0];
+} __packed;
+
+/* DCBP: Drbd Compressed Bitmap Packet ... */
+static inline enum drbd_bitmap_code
+DCBP_get_code(struct p_compressed_bm *p)
+{
+       return (enum drbd_bitmap_code)(p->encoding & 0x0f);
+}
+
+static inline void
+DCBP_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code)
+{
+       BUG_ON(code & ~0xf);
+       p->encoding = (p->encoding & ~0xf) | code;
+}
+
+static inline int
+DCBP_get_start(struct p_compressed_bm *p)
+{
+       return (p->encoding & 0x80) != 0;
+}
+
+static inline void
+DCBP_set_start(struct p_compressed_bm *p, int set)
+{
+       p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0);
+}
+
+static inline int
+DCBP_get_pad_bits(struct p_compressed_bm *p)
+{
+       return (p->encoding >> 4) & 0x7;
+}
+
+static inline void
+DCBP_set_pad_bits(struct p_compressed_bm *p, int n)
+{
+       BUG_ON(n & ~0x7);
+       p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4);
+}
+
+/* one bitmap packet, including the p_header,
+ * should fit within one _architecture independend_ page.
+ * so we need to use the fixed size 4KiB page size
+ * most architechtures have used for a long time.
+ */
+#define BM_PACKET_PAYLOAD_BYTES (4096 - sizeof(struct p_header))
+#define BM_PACKET_WORDS (BM_PACKET_PAYLOAD_BYTES/sizeof(long))
+#define BM_PACKET_VLI_BYTES_MAX (4096 - sizeof(struct p_compressed_bm))
+#if (PAGE_SIZE < 4096)
+/* drbd_send_bitmap / receive_bitmap would break horribly */
+#error "PAGE_SIZE too small"
+#endif
+
+union p_polymorph {
+        struct p_header          header;
+        struct p_handshake       handshake;
+        struct p_data            data;
+        struct p_block_ack       block_ack;
+        struct p_barrier         barrier;
+        struct p_barrier_ack     barrier_ack;
+        struct p_rs_param_89     rs_param_89;
+        struct p_protocol        protocol;
+        struct p_sizes           sizes;
+        struct p_uuids           uuids;
+        struct p_state           state;
+        struct p_req_state       req_state;
+        struct p_req_state_reply req_state_reply;
+        struct p_block_req       block_req;
+} __packed;
+
+/**********************************************************************/
+enum drbd_thread_state {
+       None,
+       Running,
+       Exiting,
+       Restarting
+};
+
+struct drbd_thread {
+       spinlock_t t_lock;
+       struct task_struct *task;
+       struct completion stop;
+       enum drbd_thread_state t_state;
+       int (*function) (struct drbd_thread *);
+       struct drbd_conf *mdev;
+       int reset_cpu_mask;
+};
+
+static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi)
+{
+       /* THINK testing the t_state seems to be uncritical in all cases
+        * (but thread_{start,stop}), so we can read it *without* the lock.
+        *      --lge */
+
+       smp_rmb();
+       return thi->t_state;
+}
+
+
+/*
+ * Having this as the first member of a struct provides sort of "inheritance".
+ * "derived" structs can be "drbd_queue_work()"ed.
+ * The callback should know and cast back to the descendant struct.
+ * drbd_request and drbd_epoch_entry are descendants of drbd_work.
+ */
+struct drbd_work;
+typedef int (*drbd_work_cb)(struct drbd_conf *, struct drbd_work *, int cancel);
+struct drbd_work {
+       struct list_head list;
+       drbd_work_cb cb;
+};
+
+struct drbd_tl_epoch;
+struct drbd_request {
+       struct drbd_work w;
+       struct drbd_conf *mdev;
+
+       /* if local IO is not allowed, will be NULL.
+        * if local IO _is_ allowed, holds the locally submitted bio clone,
+        * or, after local IO completion, the ERR_PTR(error).
+        * see drbd_endio_pri(). */
+       struct bio *private_bio;
+
+       struct hlist_node colision;
+       sector_t sector;
+       unsigned int size;
+       unsigned int epoch; /* barrier_nr */
+
+       /* barrier_nr: used to check on "completion" whether this req was in
+        * the current epoch, and we therefore have to close it,
+        * starting a new epoch...
+        */
+
+       /* up to here, the struct layout is identical to drbd_epoch_entry;
+        * we might be able to use that to our advantage...  */
+
+       struct list_head tl_requests; /* ring list in the transfer log */
+       struct bio *master_bio;       /* master bio pointer */
+       unsigned long rq_state; /* see comments above _req_mod() */
+       int seq_num;
+       unsigned long start_time;
+};
+
+struct drbd_tl_epoch {
+       struct drbd_work w;
+       struct list_head requests; /* requests before */
+       struct drbd_tl_epoch *next; /* pointer to the next barrier */
+       unsigned int br_number;  /* the barriers identifier. */
+       int n_req;      /* number of requests attached before this barrier */
+};
+
+struct drbd_request;
+
+/* These Tl_epoch_entries may be in one of 6 lists:
+   active_ee .. data packet being written
+   sync_ee   .. syncer block being written
+   done_ee   .. block written, need to send P_WRITE_ACK
+   read_ee   .. [RS]P_DATA_REQUEST being read
+*/
+
+struct drbd_epoch {
+       struct list_head list;
+       unsigned int barrier_nr;
+       atomic_t epoch_size; /* increased on every request added. */
+       atomic_t active;     /* increased on every req. added, and dec on every finished. */
+       unsigned long flags;
+};
+
+/* drbd_epoch flag bits */
+enum {
+       DE_BARRIER_IN_NEXT_EPOCH_ISSUED,
+       DE_BARRIER_IN_NEXT_EPOCH_DONE,
+       DE_CONTAINS_A_BARRIER,
+       DE_HAVE_BARRIER_NUMBER,
+       DE_IS_FINISHING,
+};
+
+enum epoch_event {
+       EV_PUT,
+       EV_GOT_BARRIER_NR,
+       EV_BARRIER_DONE,
+       EV_BECAME_LAST,
+       EV_TRACE_FLUSH,       /* TRACE_ are not real events, only used for tracing */
+       EV_TRACE_ADD_BARRIER, /* Doing the first write as a barrier write */
+       EV_TRACE_SETTING_BI,  /* Barrier is expressed with the first write of the next epoch */
+       EV_TRACE_ALLOC,
+       EV_TRACE_FREE,
+       EV_CLEANUP = 32, /* used as flag */
+};
+
+struct drbd_epoch_entry {
+       struct drbd_work    w;
+       struct drbd_conf *mdev;
+       struct bio *private_bio;
+       struct hlist_node colision;
+       sector_t sector;
+       unsigned int size;
+       struct drbd_epoch *epoch;
+
+       /* up to here, the struct layout is identical to drbd_request;
+        * we might be able to use that to our advantage...  */
+
+       unsigned int flags;
+       u64    block_id;
+};
+
+struct drbd_wq_barrier {
+       struct drbd_work w;
+       struct completion done;
+};
+
+struct digest_info {
+       int digest_size;
+       void *digest;
+};
+
+/* ee flag bits */
+enum {
+       __EE_CALL_AL_COMPLETE_IO,
+       __EE_CONFLICT_PENDING,
+       __EE_MAY_SET_IN_SYNC,
+       __EE_IS_BARRIER,
+};
+#define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
+#define EE_CONFLICT_PENDING    (1<<__EE_CONFLICT_PENDING)
+#define EE_MAY_SET_IN_SYNC     (1<<__EE_MAY_SET_IN_SYNC)
+#define EE_IS_BARRIER          (1<<__EE_IS_BARRIER)
+
+/* global flag bits */
+enum {
+       CREATE_BARRIER,         /* next P_DATA is preceeded by a P_BARRIER */
+       SIGNAL_ASENDER,         /* whether asender wants to be interrupted */
+       SEND_PING,              /* whether asender should send a ping asap */
+
+       STOP_SYNC_TIMER,        /* tell timer to cancel itself */
+       UNPLUG_QUEUED,          /* only relevant with kernel 2.4 */
+       UNPLUG_REMOTE,          /* sending a "UnplugRemote" could help */
+       MD_DIRTY,               /* current uuids and flags not yet on disk */
+       DISCARD_CONCURRENT,     /* Set on one node, cleared on the peer! */
+       USE_DEGR_WFC_T,         /* degr-wfc-timeout instead of wfc-timeout. */
+       CLUSTER_ST_CHANGE,      /* Cluster wide state change going on... */
+       CL_ST_CHG_SUCCESS,
+       CL_ST_CHG_FAIL,
+       CRASHED_PRIMARY,        /* This node was a crashed primary.
+                                * Gets cleared when the state.conn
+                                * goes into C_CONNECTED state. */
+       WRITE_BM_AFTER_RESYNC,  /* A kmalloc() during resync failed */
+       NO_BARRIER_SUPP,        /* underlying block device doesn't implement barriers */
+       CONSIDER_RESYNC,
+
+       MD_NO_BARRIER,          /* meta data device does not support barriers,
+                                  so don't even try */
+       SUSPEND_IO,             /* suspend application io */
+       BITMAP_IO,              /* suspend application io;
+                                  once no more io in flight, start bitmap io */
+       BITMAP_IO_QUEUED,       /* Started bitmap IO */
+       RESYNC_AFTER_NEG,       /* Resync after online grow after the attach&negotiate finished. */
+       NET_CONGESTED,          /* The data socket is congested */
+
+       CONFIG_PENDING,         /* serialization of (re)configuration requests.
+                                * if set, also prevents the device from dying */
+       DEVICE_DYING,           /* device became unconfigured,
+                                * but worker thread is still handling the cleanup.
+                                * reconfiguring (nl_disk_conf, nl_net_conf) is dissalowed,
+                                * while this is set. */
+       RESIZE_PENDING,         /* Size change detected locally, waiting for the response from
+                                * the peer, if it changed there as well. */
+};
+
+struct drbd_bitmap; /* opaque for drbd_conf */
+
+/* TODO sort members for performance
+ * MAYBE group them further */
+
+/* THINK maybe we actually want to use the default "event/%s" worker threads
+ * or similar in linux 2.6, which uses per cpu data and threads.
+ *
+ * To be general, this might need a spin_lock member.
+ * For now, please use the mdev->req_lock to protect list_head,
+ * see drbd_queue_work below.
+ */
+struct drbd_work_queue {
+       struct list_head q;
+       struct semaphore s; /* producers up it, worker down()s it */
+       spinlock_t q_lock;  /* to protect the list. */
+};
+
+struct drbd_socket {
+       struct drbd_work_queue work;
+       struct mutex mutex;
+       struct socket    *socket;
+       /* this way we get our
+        * send/receive buffers off the stack */
+       union p_polymorph sbuf;
+       union p_polymorph rbuf;
+};
+
+struct drbd_md {
+       u64 md_offset;          /* sector offset to 'super' block */
+
+       u64 la_size_sect;       /* last agreed size, unit sectors */
+       u64 uuid[UI_SIZE];
+       u64 device_uuid;
+       u32 flags;
+       u32 md_size_sect;
+
+       s32 al_offset;  /* signed relative sector offset to al area */
+       s32 bm_offset;  /* signed relative sector offset to bitmap */
+
+       /* u32 al_nr_extents;      important for restoring the AL
+        * is stored into  sync_conf.al_extents, which in turn
+        * gets applied to act_log->nr_elements
+        */
+};
+
+/* for sync_conf and other types... */
+#define NL_PACKET(name, number, fields) struct name { fields };
+#define NL_INTEGER(pn,pr,member) int member;
+#define NL_INT64(pn,pr,member) __u64 member;
+#define NL_BIT(pn,pr,member)   unsigned member:1;
+#define NL_STRING(pn,pr,member,len) unsigned char member[len]; int member ## _len;
+#include "linux/drbd_nl.h"
+
+struct drbd_backing_dev {
+       struct block_device *backing_bdev;
+       struct block_device *md_bdev;
+       struct file *lo_file;
+       struct file *md_file;
+       struct drbd_md md;
+       struct disk_conf dc; /* The user provided config... */
+       sector_t known_size; /* last known size of that backing device */
+};
+
+struct drbd_md_io {
+       struct drbd_conf *mdev;
+       struct completion event;
+       int error;
+};
+
+struct bm_io_work {
+       struct drbd_work w;
+       char *why;
+       int (*io_fn)(struct drbd_conf *mdev);
+       void (*done)(struct drbd_conf *mdev, int rv);
+};
+
+enum write_ordering_e {
+       WO_none,
+       WO_drain_io,
+       WO_bdev_flush,
+       WO_bio_barrier
+};
+
+struct drbd_conf {
+       /* things that are stored as / read from meta data on disk */
+       unsigned long flags;
+
+       /* configured by drbdsetup */
+       struct net_conf *net_conf; /* protected by get_net_conf() and put_net_conf() */
+       struct syncer_conf sync_conf;
+       struct drbd_backing_dev *ldev __protected_by(local);
+
+       sector_t p_size;     /* partner's disk size */
+       struct request_queue *rq_queue;
+       struct block_device *this_bdev;
+       struct gendisk      *vdisk;
+
+       struct drbd_socket data; /* data/barrier/cstate/parameter packets */
+       struct drbd_socket meta; /* ping/ack (metadata) packets */
+       int agreed_pro_version;  /* actually used protocol version */
+       unsigned long last_received; /* in jiffies, either socket */
+       unsigned int ko_count;
+       struct drbd_work  resync_work,
+                         unplug_work,
+                         md_sync_work;
+       struct timer_list resync_timer;
+       struct timer_list md_sync_timer;
+
+       /* Used after attach while negotiating new disk state. */
+       union drbd_state new_state_tmp;
+
+       union drbd_state state;
+       wait_queue_head_t misc_wait;
+       wait_queue_head_t state_wait;  /* upon each state change. */
+       unsigned int send_cnt;
+       unsigned int recv_cnt;
+       unsigned int read_cnt;
+       unsigned int writ_cnt;
+       unsigned int al_writ_cnt;
+       unsigned int bm_writ_cnt;
+       atomic_t ap_bio_cnt;     /* Requests we need to complete */
+       atomic_t ap_pending_cnt; /* AP data packets on the wire, ack expected */
+       atomic_t rs_pending_cnt; /* RS request/data packets on the wire */
+       atomic_t unacked_cnt;    /* Need to send replys for */
+       atomic_t local_cnt;      /* Waiting for local completion */
+       atomic_t net_cnt;        /* Users of net_conf */
+       spinlock_t req_lock;
+       struct drbd_tl_epoch *unused_spare_tle; /* for pre-allocation */
+       struct drbd_tl_epoch *newest_tle;
+       struct drbd_tl_epoch *oldest_tle;
+       struct list_head out_of_sequence_requests;
+       struct hlist_head *tl_hash;
+       unsigned int tl_hash_s;
+
+       /* blocks to sync in this run [unit BM_BLOCK_SIZE] */
+       unsigned long rs_total;
+       /* number of sync IOs that failed in this run */
+       unsigned long rs_failed;
+       /* Syncer's start time [unit jiffies] */
+       unsigned long rs_start;
+       /* cumulated time in PausedSyncX state [unit jiffies] */
+       unsigned long rs_paused;
+       /* block not up-to-date at mark [unit BM_BLOCK_SIZE] */
+       unsigned long rs_mark_left;
+       /* marks's time [unit jiffies] */
+       unsigned long rs_mark_time;
+       /* skipped because csum was equeal [unit BM_BLOCK_SIZE] */
+       unsigned long rs_same_csum;
+
+       /* where does the admin want us to start? (sector) */
+       sector_t ov_start_sector;
+       /* where are we now? (sector) */
+       sector_t ov_position;
+       /* Start sector of out of sync range (to merge printk reporting). */
+       sector_t ov_last_oos_start;
+       /* size of out-of-sync range in sectors. */
+       sector_t ov_last_oos_size;
+       unsigned long ov_left; /* in bits */
+       struct crypto_hash *csums_tfm;
+       struct crypto_hash *verify_tfm;
+
+       struct drbd_thread receiver;
+       struct drbd_thread worker;
+       struct drbd_thread asender;
+       struct drbd_bitmap *bitmap;
+       unsigned long bm_resync_fo; /* bit offset for drbd_bm_find_next */
+
+       /* Used to track operations of resync... */
+       struct lru_cache *resync;
+       /* Number of locked elements in resync LRU */
+       unsigned int resync_locked;
+       /* resync extent number waiting for application requests */
+       unsigned int resync_wenr;
+
+       int open_cnt;
+       u64 *p_uuid;
+       struct drbd_epoch *current_epoch;
+       spinlock_t epoch_lock;
+       unsigned int epochs;
+       enum write_ordering_e write_ordering;
+       struct list_head active_ee; /* IO in progress */
+       struct list_head sync_ee;   /* IO in progress */
+       struct list_head done_ee;   /* send ack */
+       struct list_head read_ee;   /* IO in progress */
+       struct list_head net_ee;    /* zero-copy network send in progress */
+       struct hlist_head *ee_hash; /* is proteced by req_lock! */
+       unsigned int ee_hash_s;
+
+       /* this one is protected by ee_lock, single thread */
+       struct drbd_epoch_entry *last_write_w_barrier;
+
+       int next_barrier_nr;
+       struct hlist_head *app_reads_hash; /* is proteced by req_lock */
+       struct list_head resync_reads;
+       atomic_t pp_in_use;
+       wait_queue_head_t ee_wait;
+       struct page *md_io_page;        /* one page buffer for md_io */
+       struct page *md_io_tmpp;        /* for logical_block_size != 512 */
+       struct mutex md_io_mutex;       /* protects the md_io_buffer */
+       spinlock_t al_lock;
+       wait_queue_head_t al_wait;
+       struct lru_cache *act_log;      /* activity log */
+       unsigned int al_tr_number;
+       int al_tr_cycle;
+       int al_tr_pos;   /* position of the next transaction in the journal */
+       struct crypto_hash *cram_hmac_tfm;
+       struct crypto_hash *integrity_w_tfm; /* to be used by the worker thread */
+       struct crypto_hash *integrity_r_tfm; /* to be used by the receiver thread */
+       void *int_dig_out;
+       void *int_dig_in;
+       void *int_dig_vv;
+       wait_queue_head_t seq_wait;
+       atomic_t packet_seq;
+       unsigned int peer_seq;
+       spinlock_t peer_seq_lock;
+       unsigned int minor;
+       unsigned long comm_bm_set; /* communicated number of set bits. */
+       cpumask_var_t cpu_mask;
+       struct bm_io_work bm_io_work;
+       u64 ed_uuid; /* UUID of the exposed data */
+       struct mutex state_mutex;
+       char congestion_reason;  /* Why we where congested... */
+};
+
+static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
+{
+       struct drbd_conf *mdev;
+
+       mdev = minor < minor_count ? minor_table[minor] : NULL;
+
+       return mdev;
+}
+
+static inline unsigned int mdev_to_minor(struct drbd_conf *mdev)
+{
+       return mdev->minor;
+}
+
+/* returns 1 if it was successfull,
+ * returns 0 if there was no data socket.
+ * so wherever you are going to use the data.socket, e.g. do
+ * if (!drbd_get_data_sock(mdev))
+ *     return 0;
+ *     CODE();
+ * drbd_put_data_sock(mdev);
+ */
+static inline int drbd_get_data_sock(struct drbd_conf *mdev)
+{
+       mutex_lock(&mdev->data.mutex);
+       /* drbd_disconnect() could have called drbd_free_sock()
+        * while we were waiting in down()... */
+       if (unlikely(mdev->data.socket == NULL)) {
+               mutex_unlock(&mdev->data.mutex);
+               return 0;
+       }
+       return 1;
+}
+
+static inline void drbd_put_data_sock(struct drbd_conf *mdev)
+{
+       mutex_unlock(&mdev->data.mutex);
+}
+
+/*
+ * function declarations
+ *************************/
+
+/* drbd_main.c */
+
+enum chg_state_flags {
+       CS_HARD = 1,
+       CS_VERBOSE = 2,
+       CS_WAIT_COMPLETE = 4,
+       CS_SERIALIZE    = 8,
+       CS_ORDERED      = CS_WAIT_COMPLETE + CS_SERIALIZE,
+};
+
+extern void drbd_init_set_defaults(struct drbd_conf *mdev);
+extern int drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
+                       union drbd_state mask, union drbd_state val);
+extern void drbd_force_state(struct drbd_conf *, union drbd_state,
+                       union drbd_state);
+extern int _drbd_request_state(struct drbd_conf *, union drbd_state,
+                       union drbd_state, enum chg_state_flags);
+extern int __drbd_set_state(struct drbd_conf *, union drbd_state,
+                           enum chg_state_flags, struct completion *done);
+extern void print_st_err(struct drbd_conf *, union drbd_state,
+                       union drbd_state, int);
+extern int  drbd_thread_start(struct drbd_thread *thi);
+extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait);
+#ifdef CONFIG_SMP
+extern void drbd_thread_current_set_cpu(struct drbd_conf *mdev);
+extern void drbd_calc_cpu_mask(struct drbd_conf *mdev);
+#else
+#define drbd_thread_current_set_cpu(A) ({})
+#define drbd_calc_cpu_mask(A) ({})
+#endif
+extern void drbd_free_resources(struct drbd_conf *mdev);
+extern void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
+                      unsigned int set_size);
+extern void tl_clear(struct drbd_conf *mdev);
+extern void _tl_add_barrier(struct drbd_conf *, struct drbd_tl_epoch *);
+extern void drbd_free_sock(struct drbd_conf *mdev);
+extern int drbd_send(struct drbd_conf *mdev, struct socket *sock,
+                       void *buf, size_t size, unsigned msg_flags);
+extern int drbd_send_protocol(struct drbd_conf *mdev);
+extern int drbd_send_uuids(struct drbd_conf *mdev);
+extern int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev);
+extern int drbd_send_sync_uuid(struct drbd_conf *mdev, u64 val);
+extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply);
+extern int _drbd_send_state(struct drbd_conf *mdev);
+extern int drbd_send_state(struct drbd_conf *mdev);
+extern int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
+                       enum drbd_packets cmd, struct p_header *h,
+                       size_t size, unsigned msg_flags);
+#define USE_DATA_SOCKET 1
+#define USE_META_SOCKET 0
+extern int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
+                       enum drbd_packets cmd, struct p_header *h,
+                       size_t size);
+extern int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd,
+                       char *data, size_t size);
+extern int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc);
+extern int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr,
+                       u32 set_size);
+extern int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
+                       struct drbd_epoch_entry *e);
+extern int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packets cmd,
+                       struct p_block_req *rp);
+extern int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd,
+                       struct p_data *dp);
+extern int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd,
+                           sector_t sector, int blksize, u64 block_id);
+extern int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
+                          struct drbd_epoch_entry *e);
+extern int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req);
+extern int _drbd_send_barrier(struct drbd_conf *mdev,
+                       struct drbd_tl_epoch *barrier);
+extern int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
+                             sector_t sector, int size, u64 block_id);
+extern int drbd_send_drequest_csum(struct drbd_conf *mdev,
+                                  sector_t sector,int size,
+                                  void *digest, int digest_size,
+                                  enum drbd_packets cmd);
+extern int drbd_send_ov_request(struct drbd_conf *mdev,sector_t sector,int size);
+
+extern int drbd_send_bitmap(struct drbd_conf *mdev);
+extern int _drbd_send_bitmap(struct drbd_conf *mdev);
+extern int drbd_send_sr_reply(struct drbd_conf *mdev, int retcode);
+extern void drbd_free_bc(struct drbd_backing_dev *ldev);
+extern void drbd_mdev_cleanup(struct drbd_conf *mdev);
+
+/* drbd_meta-data.c (still in drbd_main.c) */
+extern void drbd_md_sync(struct drbd_conf *mdev);
+extern int  drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev);
+/* maybe define them below as inline? */
+extern void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
+extern void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
+extern void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local);
+extern void _drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local);
+extern void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local);
+extern void drbd_md_set_flag(struct drbd_conf *mdev, int flags) __must_hold(local);
+extern void drbd_md_clear_flag(struct drbd_conf *mdev, int flags)__must_hold(local);
+extern int drbd_md_test_flag(struct drbd_backing_dev *, int);
+extern void drbd_md_mark_dirty(struct drbd_conf *mdev);
+extern void drbd_queue_bitmap_io(struct drbd_conf *mdev,
+                                int (*io_fn)(struct drbd_conf *),
+                                void (*done)(struct drbd_conf *, int),
+                                char *why);
+extern int drbd_bmio_set_n_write(struct drbd_conf *mdev);
+extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev);
+extern int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why);
+
+
+/* Meta data layout
+   We reserve a 128MB Block (4k aligned)
+   * either at the end of the backing device
+   * or on a seperate meta data device. */
+
+#define MD_RESERVED_SECT (128LU << 11)  /* 128 MB, unit sectors */
+/* The following numbers are sectors */
+#define MD_AL_OFFSET 8     /* 8 Sectors after start of meta area */
+#define MD_AL_MAX_SIZE 64   /* = 32 kb LOG  ~ 3776 extents ~ 14 GB Storage */
+/* Allows up to about 3.8TB */
+#define MD_BM_OFFSET (MD_AL_OFFSET + MD_AL_MAX_SIZE)
+
+/* Since the smalles IO unit is usually 512 byte */
+#define MD_SECTOR_SHIFT         9
+#define MD_SECTOR_SIZE  (1<<MD_SECTOR_SHIFT)
+
+/* activity log */
+#define AL_EXTENTS_PT ((MD_SECTOR_SIZE-12)/8-1) /* 61 ; Extents per 512B sector */
+#define AL_EXTENT_SHIFT 22              /* One extent represents 4M Storage */
+#define AL_EXTENT_SIZE (1<<AL_EXTENT_SHIFT)
+
+#if BITS_PER_LONG == 32
+#define LN2_BPL 5
+#define cpu_to_lel(A) cpu_to_le32(A)
+#define lel_to_cpu(A) le32_to_cpu(A)
+#elif BITS_PER_LONG == 64
+#define LN2_BPL 6
+#define cpu_to_lel(A) cpu_to_le64(A)
+#define lel_to_cpu(A) le64_to_cpu(A)
+#else
+#error "LN2 of BITS_PER_LONG unknown!"
+#endif
+
+/* resync bitmap */
+/* 16MB sized 'bitmap extent' to track syncer usage */
+struct bm_extent {
+       int rs_left; /* number of bits set (out of sync) in this extent. */
+       int rs_failed; /* number of failed resync requests in this extent. */
+       unsigned long flags;
+       struct lc_element lce;
+};
+
+#define BME_NO_WRITES  0  /* bm_extent.flags: no more requests on this one! */
+#define BME_LOCKED     1  /* bm_extent.flags: syncer active on this one. */
+
+/* drbd_bitmap.c */
+/*
+ * We need to store one bit for a block.
+ * Example: 1GB disk @ 4096 byte blocks ==> we need 32 KB bitmap.
+ * Bit 0 ==> local node thinks this block is binary identical on both nodes
+ * Bit 1 ==> local node thinks this block needs to be synced.
+ */
+
+#define BM_BLOCK_SHIFT  12                      /* 4k per bit */
+#define BM_BLOCK_SIZE   (1<<BM_BLOCK_SHIFT)
+/* (9+3) : 512 bytes @ 8 bits; representing 16M storage
+ * per sector of on disk bitmap */
+#define BM_EXT_SHIFT    (BM_BLOCK_SHIFT + MD_SECTOR_SHIFT + 3)  /* = 24 */
+#define BM_EXT_SIZE     (1<<BM_EXT_SHIFT)
+
+#if (BM_EXT_SHIFT != 24) || (BM_BLOCK_SHIFT != 12)
+#error "HAVE YOU FIXED drbdmeta AS WELL??"
+#endif
+
+/* thus many _storage_ sectors are described by one bit */
+#define BM_SECT_TO_BIT(x)   ((x)>>(BM_BLOCK_SHIFT-9))
+#define BM_BIT_TO_SECT(x)   ((sector_t)(x)<<(BM_BLOCK_SHIFT-9))
+#define BM_SECT_PER_BIT     BM_BIT_TO_SECT(1)
+
+/* bit to represented kilo byte conversion */
+#define Bit2KB(bits) ((bits)<<(BM_BLOCK_SHIFT-10))
+
+/* in which _bitmap_ extent (resp. sector) the bit for a certain
+ * _storage_ sector is located in */
+#define BM_SECT_TO_EXT(x)   ((x)>>(BM_EXT_SHIFT-9))
+
+/* how much _storage_ sectors we have per bitmap sector */
+#define BM_EXT_TO_SECT(x)   ((sector_t)(x) << (BM_EXT_SHIFT-9))
+#define BM_SECT_PER_EXT     BM_EXT_TO_SECT(1)
+
+/* in one sector of the bitmap, we have this many activity_log extents. */
+#define AL_EXT_PER_BM_SECT  (1 << (BM_EXT_SHIFT - AL_EXTENT_SHIFT))
+#define BM_WORDS_PER_AL_EXT (1 << (AL_EXTENT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL))
+
+#define BM_BLOCKS_PER_BM_EXT_B (BM_EXT_SHIFT - BM_BLOCK_SHIFT)
+#define BM_BLOCKS_PER_BM_EXT_MASK  ((1<<BM_BLOCKS_PER_BM_EXT_B) - 1)
+
+/* the extent in "PER_EXTENT" below is an activity log extent
+ * we need that many (long words/bytes) to store the bitmap
+ *                  of one AL_EXTENT_SIZE chunk of storage.
+ * we can store the bitmap for that many AL_EXTENTS within
+ * one sector of the _on_disk_ bitmap:
+ * bit  0        bit 37   bit 38            bit (512*8)-1
+ *          ...|........|........|.. // ..|........|
+ * sect. 0      `296     `304                     ^(512*8*8)-1
+ *
+#define BM_WORDS_PER_EXT    ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / BITS_PER_LONG )
+#define BM_BYTES_PER_EXT    ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / 8 )  // 128
+#define BM_EXT_PER_SECT            ( 512 / BM_BYTES_PER_EXTENT )        //   4
+ */
+
+#define DRBD_MAX_SECTORS_32 (0xffffffffLU)
+#define DRBD_MAX_SECTORS_BM \
+         ((MD_RESERVED_SECT - MD_BM_OFFSET) * (1LL<<(BM_EXT_SHIFT-9)))
+#if DRBD_MAX_SECTORS_BM < DRBD_MAX_SECTORS_32
+#define DRBD_MAX_SECTORS      DRBD_MAX_SECTORS_BM
+#define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_BM
+#elif !defined(CONFIG_LBD) && BITS_PER_LONG == 32
+#define DRBD_MAX_SECTORS      DRBD_MAX_SECTORS_32
+#define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_32
+#else
+#define DRBD_MAX_SECTORS      DRBD_MAX_SECTORS_BM
+/* 16 TB in units of sectors */
+#if BITS_PER_LONG == 32
+/* adjust by one page worth of bitmap,
+ * so we won't wrap around in drbd_bm_find_next_bit.
+ * you should use 64bit OS for that much storage, anyways. */
+#define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0xffff7fff)
+#else
+#define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0x1LU << 32)
+#endif
+#endif
+
+/* Sector shift value for the "hash" functions of tl_hash and ee_hash tables.
+ * With a value of 6 all IO in one 32K block make it to the same slot of the
+ * hash table. */
+#define HT_SHIFT 6
+#define DRBD_MAX_SEGMENT_SIZE (1U<<(9+HT_SHIFT))
+
+/* Number of elements in the app_reads_hash */
+#define APP_R_HSIZE 15
+
+extern int  drbd_bm_init(struct drbd_conf *mdev);
+extern int  drbd_bm_resize(struct drbd_conf *mdev, sector_t sectors);
+extern void drbd_bm_cleanup(struct drbd_conf *mdev);
+extern void drbd_bm_set_all(struct drbd_conf *mdev);
+extern void drbd_bm_clear_all(struct drbd_conf *mdev);
+extern int  drbd_bm_set_bits(
+               struct drbd_conf *mdev, unsigned long s, unsigned long e);
+extern int  drbd_bm_clear_bits(
+               struct drbd_conf *mdev, unsigned long s, unsigned long e);
+/* bm_set_bits variant for use while holding drbd_bm_lock */
+extern void _drbd_bm_set_bits(struct drbd_conf *mdev,
+               const unsigned long s, const unsigned long e);
+extern int  drbd_bm_test_bit(struct drbd_conf *mdev, unsigned long bitnr);
+extern int  drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr);
+extern int  drbd_bm_write_sect(struct drbd_conf *mdev, unsigned long enr) __must_hold(local);
+extern int  drbd_bm_read(struct drbd_conf *mdev) __must_hold(local);
+extern int  drbd_bm_write(struct drbd_conf *mdev) __must_hold(local);
+extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev,
+               unsigned long al_enr);
+extern size_t       drbd_bm_words(struct drbd_conf *mdev);
+extern unsigned long drbd_bm_bits(struct drbd_conf *mdev);
+extern sector_t      drbd_bm_capacity(struct drbd_conf *mdev);
+extern unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo);
+/* bm_find_next variants for use while you hold drbd_bm_lock() */
+extern unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo);
+extern unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo);
+extern unsigned long drbd_bm_total_weight(struct drbd_conf *mdev);
+extern int drbd_bm_rs_done(struct drbd_conf *mdev);
+/* for receive_bitmap */
+extern void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset,
+               size_t number, unsigned long *buffer);
+/* for _drbd_send_bitmap and drbd_bm_write_sect */
+extern void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset,
+               size_t number, unsigned long *buffer);
+
+extern void drbd_bm_lock(struct drbd_conf *mdev, char *why);
+extern void drbd_bm_unlock(struct drbd_conf *mdev);
+
+extern int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e);
+/* drbd_main.c */
+
+extern struct kmem_cache *drbd_request_cache;
+extern struct kmem_cache *drbd_ee_cache;       /* epoch entries */
+extern struct kmem_cache *drbd_bm_ext_cache;   /* bitmap extents */
+extern struct kmem_cache *drbd_al_ext_cache;   /* activity log extents */
+extern mempool_t *drbd_request_mempool;
+extern mempool_t *drbd_ee_mempool;
+
+extern struct page *drbd_pp_pool; /* drbd's page pool */
+extern spinlock_t   drbd_pp_lock;
+extern int         drbd_pp_vacant;
+extern wait_queue_head_t drbd_pp_wait;
+
+extern rwlock_t global_state_lock;
+
+extern struct drbd_conf *drbd_new_device(unsigned int minor);
+extern void drbd_free_mdev(struct drbd_conf *mdev);
+
+extern int proc_details;
+
+/* drbd_req */
+extern int drbd_make_request_26(struct request_queue *q, struct bio *bio);
+extern int drbd_read_remote(struct drbd_conf *mdev, struct drbd_request *req);
+extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec);
+extern int is_valid_ar_handle(struct drbd_request *, sector_t);
+
+
+/* drbd_nl.c */
+extern void drbd_suspend_io(struct drbd_conf *mdev);
+extern void drbd_resume_io(struct drbd_conf *mdev);
+extern char *ppsize(char *buf, unsigned long long size);
+extern sector_t drbd_new_dev_size(struct drbd_conf *,
+               struct drbd_backing_dev *);
+enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 };
+extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *) __must_hold(local);
+extern void resync_after_online_grow(struct drbd_conf *);
+extern void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int) __must_hold(local);
+extern int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role,
+               int force);
+enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev);
+extern int drbd_khelper(struct drbd_conf *mdev, char *cmd);
+
+/* drbd_worker.c */
+extern int drbd_worker(struct drbd_thread *thi);
+extern int drbd_alter_sa(struct drbd_conf *mdev, int na);
+extern void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side);
+extern void resume_next_sg(struct drbd_conf *mdev);
+extern void suspend_other_sg(struct drbd_conf *mdev);
+extern int drbd_resync_finished(struct drbd_conf *mdev);
+/* maybe rather drbd_main.c ? */
+extern int drbd_md_sync_page_io(struct drbd_conf *mdev,
+               struct drbd_backing_dev *bdev, sector_t sector, int rw);
+extern void drbd_ov_oos_found(struct drbd_conf*, sector_t, int);
+
+static inline void ov_oos_print(struct drbd_conf *mdev)
+{
+       if (mdev->ov_last_oos_size) {
+               dev_err(DEV, "Out of sync: start=%llu, size=%lu (sectors)\n",
+                    (unsigned long long)mdev->ov_last_oos_start,
+                    (unsigned long)mdev->ov_last_oos_size);
+       }
+       mdev->ov_last_oos_size=0;
+}
+
+
+extern void drbd_csum(struct drbd_conf *, struct crypto_hash *, struct bio *, void *);
+/* worker callbacks */
+extern int w_req_cancel_conflict(struct drbd_conf *, struct drbd_work *, int);
+extern int w_read_retry_remote(struct drbd_conf *, struct drbd_work *, int);
+extern int w_e_end_data_req(struct drbd_conf *, struct drbd_work *, int);
+extern int w_e_end_rsdata_req(struct drbd_conf *, struct drbd_work *, int);
+extern int w_e_end_csum_rs_req(struct drbd_conf *, struct drbd_work *, int);
+extern int w_e_end_ov_reply(struct drbd_conf *, struct drbd_work *, int);
+extern int w_e_end_ov_req(struct drbd_conf *, struct drbd_work *, int);
+extern int w_ov_finished(struct drbd_conf *, struct drbd_work *, int);
+extern int w_resync_inactive(struct drbd_conf *, struct drbd_work *, int);
+extern int w_resume_next_sg(struct drbd_conf *, struct drbd_work *, int);
+extern int w_io_error(struct drbd_conf *, struct drbd_work *, int);
+extern int w_send_write_hint(struct drbd_conf *, struct drbd_work *, int);
+extern int w_make_resync_request(struct drbd_conf *, struct drbd_work *, int);
+extern int w_send_dblock(struct drbd_conf *, struct drbd_work *, int);
+extern int w_send_barrier(struct drbd_conf *, struct drbd_work *, int);
+extern int w_send_read_req(struct drbd_conf *, struct drbd_work *, int);
+extern int w_prev_work_done(struct drbd_conf *, struct drbd_work *, int);
+extern int w_e_reissue(struct drbd_conf *, struct drbd_work *, int);
+
+extern void resync_timer_fn(unsigned long data);
+
+/* drbd_receiver.c */
+extern int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list);
+extern struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
+                                           u64 id,
+                                           sector_t sector,
+                                           unsigned int data_size,
+                                           gfp_t gfp_mask) __must_hold(local);
+extern void drbd_free_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e);
+extern void drbd_wait_ee_list_empty(struct drbd_conf *mdev,
+               struct list_head *head);
+extern void _drbd_wait_ee_list_empty(struct drbd_conf *mdev,
+               struct list_head *head);
+extern void drbd_set_recv_tcq(struct drbd_conf *mdev, int tcq_enabled);
+extern void _drbd_clear_done_ee(struct drbd_conf *mdev, struct list_head *to_be_freed);
+extern void drbd_flush_workqueue(struct drbd_conf *mdev);
+
+/* yes, there is kernel_setsockopt, but only since 2.6.18. we don't need to
+ * mess with get_fs/set_fs, we know we are KERNEL_DS always. */
+static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
+                       char __user *optval, int optlen)
+{
+       int err;
+       if (level == SOL_SOCKET)
+               err = sock_setsockopt(sock, level, optname, optval, optlen);
+       else
+               err = sock->ops->setsockopt(sock, level, optname, optval,
+                                           optlen);
+       return err;
+}
+
+static inline void drbd_tcp_cork(struct socket *sock)
+{
+       int __user val = 1;
+       (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
+                       (char __user *)&val, sizeof(val));
+}
+
+static inline void drbd_tcp_uncork(struct socket *sock)
+{
+       int __user val = 0;
+       (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
+                       (char __user *)&val, sizeof(val));
+}
+
+static inline void drbd_tcp_nodelay(struct socket *sock)
+{
+       int __user val = 1;
+       (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
+                       (char __user *)&val, sizeof(val));
+}
+
+static inline void drbd_tcp_quickack(struct socket *sock)
+{
+       int __user val = 1;
+       (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
+                       (char __user *)&val, sizeof(val));
+}
+
+void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
+
+/* drbd_proc.c */
+extern struct proc_dir_entry *drbd_proc;
+extern struct file_operations drbd_proc_fops;
+extern const char *drbd_conn_str(enum drbd_conns s);
+extern const char *drbd_role_str(enum drbd_role s);
+
+/* drbd_actlog.c */
+extern void drbd_al_begin_io(struct drbd_conf *mdev, sector_t sector);
+extern void drbd_al_complete_io(struct drbd_conf *mdev, sector_t sector);
+extern void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector);
+extern int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector);
+extern int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector);
+extern void drbd_rs_cancel_all(struct drbd_conf *mdev);
+extern int drbd_rs_del_all(struct drbd_conf *mdev);
+extern void drbd_rs_failed_io(struct drbd_conf *mdev,
+               sector_t sector, int size);
+extern int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *);
+extern void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector,
+               int size, const char *file, const unsigned int line);
+#define drbd_set_in_sync(mdev, sector, size) \
+       __drbd_set_in_sync(mdev, sector, size, __FILE__, __LINE__)
+extern void __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector,
+               int size, const char *file, const unsigned int line);
+#define drbd_set_out_of_sync(mdev, sector, size) \
+       __drbd_set_out_of_sync(mdev, sector, size, __FILE__, __LINE__)
+extern void drbd_al_apply_to_bm(struct drbd_conf *mdev);
+extern void drbd_al_to_on_disk_bm(struct drbd_conf *mdev);
+extern void drbd_al_shrink(struct drbd_conf *mdev);
+
+
+/* drbd_nl.c */
+
+void drbd_nl_cleanup(void);
+int __init drbd_nl_init(void);
+void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state);
+void drbd_bcast_sync_progress(struct drbd_conf *mdev);
+void drbd_bcast_ee(struct drbd_conf *mdev,
+               const char *reason, const int dgs,
+               const char* seen_hash, const char* calc_hash,
+               const struct drbd_epoch_entry* e);
+
+
+/**
+ * DOC: DRBD State macros
+ *
+ * These macros are used to express state changes in easily readable form.
+ *
+ * The NS macros expand to a mask and a value, that can be bit ored onto the
+ * current state as soon as the spinlock (req_lock) was taken.
+ *
+ * The _NS macros are used for state functions that get called with the
+ * spinlock. These macros expand directly to the new state value.
+ *
+ * Besides the basic forms NS() and _NS() additional _?NS[23] are defined
+ * to express state changes that affect more than one aspect of the state.
+ *
+ * E.g. NS2(conn, C_CONNECTED, peer, R_SECONDARY)
+ * Means that the network connection was established and that the peer
+ * is in secondary role.
+ */
+#define role_MASK R_MASK
+#define peer_MASK R_MASK
+#define disk_MASK D_MASK
+#define pdsk_MASK D_MASK
+#define conn_MASK C_MASK
+#define susp_MASK 1
+#define user_isp_MASK 1
+#define aftr_isp_MASK 1
+
+#define NS(T, S) \
+       ({ union drbd_state mask; mask.i = 0; mask.T = T##_MASK; mask; }), \
+       ({ union drbd_state val; val.i = 0; val.T = (S); val; })
+#define NS2(T1, S1, T2, S2) \
+       ({ union drbd_state mask; mask.i = 0; mask.T1 = T1##_MASK; \
+         mask.T2 = T2##_MASK; mask; }), \
+       ({ union drbd_state val; val.i = 0; val.T1 = (S1); \
+         val.T2 = (S2); val; })
+#define NS3(T1, S1, T2, S2, T3, S3) \
+       ({ union drbd_state mask; mask.i = 0; mask.T1 = T1##_MASK; \
+         mask.T2 = T2##_MASK; mask.T3 = T3##_MASK; mask; }), \
+       ({ union drbd_state val;  val.i = 0; val.T1 = (S1); \
+         val.T2 = (S2); val.T3 = (S3); val; })
+
+#define _NS(D, T, S) \
+       D, ({ union drbd_state __ns; __ns.i = D->state.i; __ns.T = (S); __ns; })
+#define _NS2(D, T1, S1, T2, S2) \
+       D, ({ union drbd_state __ns; __ns.i = D->state.i; __ns.T1 = (S1); \
+       __ns.T2 = (S2); __ns; })
+#define _NS3(D, T1, S1, T2, S2, T3, S3) \
+       D, ({ union drbd_state __ns; __ns.i = D->state.i; __ns.T1 = (S1); \
+       __ns.T2 = (S2); __ns.T3 = (S3); __ns; })
+
+/*
+ * inline helper functions
+ *************************/
+
+static inline void drbd_state_lock(struct drbd_conf *mdev)
+{
+       wait_event(mdev->misc_wait,
+                  !test_and_set_bit(CLUSTER_ST_CHANGE, &mdev->flags));
+}
+
+static inline void drbd_state_unlock(struct drbd_conf *mdev)
+{
+       clear_bit(CLUSTER_ST_CHANGE, &mdev->flags);
+       wake_up(&mdev->misc_wait);
+}
+
+static inline int _drbd_set_state(struct drbd_conf *mdev,
+                                  union drbd_state ns, enum chg_state_flags flags,
+                                  struct completion *done)
+{
+       int rv;
+
+       read_lock(&global_state_lock);
+       rv = __drbd_set_state(mdev, ns, flags, done);
+       read_unlock(&global_state_lock);
+
+       return rv;
+}
+
+/**
+ * drbd_request_state() - Reqest a state change
+ * @mdev:      DRBD device.
+ * @mask:      mask of state bits to change.
+ * @val:       value of new state bits.
+ *
+ * This is the most graceful way of requesting a state change. It is verbose
+ * quite verbose in case the state change is not possible, and all those
+ * state changes are globally serialized.
+ */
+static inline int drbd_request_state(struct drbd_conf *mdev,
+                                    union drbd_state mask,
+                                    union drbd_state val)
+{
+       return _drbd_request_state(mdev, mask, val, CS_VERBOSE + CS_ORDERED);
+}
+
+#define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__)
+static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach, const char *where)
+{
+       switch (mdev->ldev->dc.on_io_error) {
+       case EP_PASS_ON:
+               if (!forcedetach) {
+                       if (printk_ratelimit())
+                               dev_err(DEV, "Local IO failed in %s."
+                                            "Passing error on...\n", where);
+                       break;
+               }
+               /* NOTE fall through to detach case if forcedetach set */
+       case EP_DETACH:
+       case EP_CALL_HELPER:
+               if (mdev->state.disk > D_FAILED) {
+                       _drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL);
+                       dev_err(DEV, "Local IO failed in %s."
+                                    "Detaching...\n", where);
+               }
+               break;
+       }
+}
+
+/**
+ * drbd_chk_io_error: Handle the on_io_error setting, should be called from all io completion handlers
+ * @mdev:       DRBD device.
+ * @error:      Error code passed to the IO completion callback
+ * @forcedetach: Force detach. I.e. the error happened while accessing the meta data
+ *
+ * See also drbd_main.c:after_state_ch() if (os.disk > D_FAILED && ns.disk == D_FAILED)
+ */
+#define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__)
+static inline void drbd_chk_io_error_(struct drbd_conf *mdev,
+       int error, int forcedetach, const char *where)
+{
+       if (error) {
+               unsigned long flags;
+               spin_lock_irqsave(&mdev->req_lock, flags);
+               __drbd_chk_io_error_(mdev, forcedetach, where);
+               spin_unlock_irqrestore(&mdev->req_lock, flags);
+       }
+}
+
+
+/**
+ * drbd_md_first_sector() - Returns the first sector number of the meta data area
+ * @bdev:      Meta data block device.
+ *
+ * BTW, for internal meta data, this happens to be the maximum capacity
+ * we could agree upon with our peer node.
+ */
+static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev)
+{
+       switch (bdev->dc.meta_dev_idx) {
+       case DRBD_MD_INDEX_INTERNAL:
+       case DRBD_MD_INDEX_FLEX_INT:
+               return bdev->md.md_offset + bdev->md.bm_offset;
+       case DRBD_MD_INDEX_FLEX_EXT:
+       default:
+               return bdev->md.md_offset;
+       }
+}
+
+/**
+ * drbd_md_last_sector() - Return the last sector number of the meta data area
+ * @bdev:      Meta data block device.
+ */
+static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev)
+{
+       switch (bdev->dc.meta_dev_idx) {
+       case DRBD_MD_INDEX_INTERNAL:
+       case DRBD_MD_INDEX_FLEX_INT:
+               return bdev->md.md_offset + MD_AL_OFFSET - 1;
+       case DRBD_MD_INDEX_FLEX_EXT:
+       default:
+               return bdev->md.md_offset + bdev->md.md_size_sect;
+       }
+}
+
+/* Returns the number of 512 byte sectors of the device */
+static inline sector_t drbd_get_capacity(struct block_device *bdev)
+{
+       /* return bdev ? get_capacity(bdev->bd_disk) : 0; */
+       return bdev ? bdev->bd_inode->i_size >> 9 : 0;
+}
+
+/**
+ * drbd_get_max_capacity() - Returns the capacity we announce to out peer
+ * @bdev:      Meta data block device.
+ *
+ * returns the capacity we announce to out peer.  we clip ourselves at the
+ * various MAX_SECTORS, because if we don't, current implementation will
+ * oops sooner or later
+ */
+static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev)
+{
+       sector_t s;
+       switch (bdev->dc.meta_dev_idx) {
+       case DRBD_MD_INDEX_INTERNAL:
+       case DRBD_MD_INDEX_FLEX_INT:
+               s = drbd_get_capacity(bdev->backing_bdev)
+                       ? min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
+                                       drbd_md_first_sector(bdev))
+                       : 0;
+               break;
+       case DRBD_MD_INDEX_FLEX_EXT:
+               s = min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
+                               drbd_get_capacity(bdev->backing_bdev));
+               /* clip at maximum size the meta device can support */
+               s = min_t(sector_t, s,
+                       BM_EXT_TO_SECT(bdev->md.md_size_sect
+                                    - bdev->md.bm_offset));
+               break;
+       default:
+               s = min_t(sector_t, DRBD_MAX_SECTORS,
+                               drbd_get_capacity(bdev->backing_bdev));
+       }
+       return s;
+}
+
+/**
+ * drbd_md_ss__() - Return the sector number of our meta data super block
+ * @mdev:      DRBD device.
+ * @bdev:      Meta data block device.
+ */
+static inline sector_t drbd_md_ss__(struct drbd_conf *mdev,
+                                   struct drbd_backing_dev *bdev)
+{
+       switch (bdev->dc.meta_dev_idx) {
+       default: /* external, some index */
+               return MD_RESERVED_SECT * bdev->dc.meta_dev_idx;
+       case DRBD_MD_INDEX_INTERNAL:
+               /* with drbd08, internal meta data is always "flexible" */
+       case DRBD_MD_INDEX_FLEX_INT:
+               /* sizeof(struct md_on_disk_07) == 4k
+                * position: last 4k aligned block of 4k size */
+               if (!bdev->backing_bdev) {
+                       if (__ratelimit(&drbd_ratelimit_state)) {
+                               dev_err(DEV, "bdev->backing_bdev==NULL\n");
+                               dump_stack();
+                       }
+                       return 0;
+               }
+               return (drbd_get_capacity(bdev->backing_bdev) & ~7ULL)
+                       - MD_AL_OFFSET;
+       case DRBD_MD_INDEX_FLEX_EXT:
+               return 0;
+       }
+}
+
+static inline void
+_drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
+{
+       list_add_tail(&w->list, &q->q);
+       up(&q->s);
+}
+
+static inline void
+drbd_queue_work_front(struct drbd_work_queue *q, struct drbd_work *w)
+{
+       unsigned long flags;
+       spin_lock_irqsave(&q->q_lock, flags);
+       list_add(&w->list, &q->q);
+       up(&q->s); /* within the spinlock,
+                     see comment near end of drbd_worker() */
+       spin_unlock_irqrestore(&q->q_lock, flags);
+}
+
+static inline void
+drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
+{
+       unsigned long flags;
+       spin_lock_irqsave(&q->q_lock, flags);
+       list_add_tail(&w->list, &q->q);
+       up(&q->s); /* within the spinlock,
+                     see comment near end of drbd_worker() */
+       spin_unlock_irqrestore(&q->q_lock, flags);
+}
+
+static inline void wake_asender(struct drbd_conf *mdev)
+{
+       if (test_bit(SIGNAL_ASENDER, &mdev->flags))
+               force_sig(DRBD_SIG, mdev->asender.task);
+}
+
+static inline void request_ping(struct drbd_conf *mdev)
+{
+       set_bit(SEND_PING, &mdev->flags);
+       wake_asender(mdev);
+}
+
+static inline int drbd_send_short_cmd(struct drbd_conf *mdev,
+       enum drbd_packets cmd)
+{
+       struct p_header h;
+       return drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd, &h, sizeof(h));
+}
+
+static inline int drbd_send_ping(struct drbd_conf *mdev)
+{
+       struct p_header h;
+       return drbd_send_cmd(mdev, USE_META_SOCKET, P_PING, &h, sizeof(h));
+}
+
+static inline int drbd_send_ping_ack(struct drbd_conf *mdev)
+{
+       struct p_header h;
+       return drbd_send_cmd(mdev, USE_META_SOCKET, P_PING_ACK, &h, sizeof(h));
+}
+
+static inline void drbd_thread_stop(struct drbd_thread *thi)
+{
+       _drbd_thread_stop(thi, FALSE, TRUE);
+}
+
+static inline void drbd_thread_stop_nowait(struct drbd_thread *thi)
+{
+       _drbd_thread_stop(thi, FALSE, FALSE);
+}
+
+static inline void drbd_thread_restart_nowait(struct drbd_thread *thi)
+{
+       _drbd_thread_stop(thi, TRUE, FALSE);
+}
+
+/* counts how many answer packets packets we expect from our peer,
+ * for either explicit application requests,
+ * or implicit barrier packets as necessary.
+ * increased:
+ *  w_send_barrier
+ *  _req_mod(req, queue_for_net_write or queue_for_net_read);
+ *    it is much easier and equally valid to count what we queue for the
+ *    worker, even before it actually was queued or send.
+ *    (drbd_make_request_common; recovery path on read io-error)
+ * decreased:
+ *  got_BarrierAck (respective tl_clear, tl_clear_barrier)
+ *  _req_mod(req, data_received)
+ *     [from receive_DataReply]
+ *  _req_mod(req, write_acked_by_peer or recv_acked_by_peer or neg_acked)
+ *     [from got_BlockAck (P_WRITE_ACK, P_RECV_ACK)]
+ *     for some reason it is NOT decreased in got_NegAck,
+ *     but in the resulting cleanup code from report_params.
+ *     we should try to remember the reason for that...
+ *  _req_mod(req, send_failed or send_canceled)
+ *  _req_mod(req, connection_lost_while_pending)
+ *     [from tl_clear_barrier]
+ */
+static inline void inc_ap_pending(struct drbd_conf *mdev)
+{
+       atomic_inc(&mdev->ap_pending_cnt);
+}
+
+#define ERR_IF_CNT_IS_NEGATIVE(which)                          \
+       if (atomic_read(&mdev->which) < 0)                      \
+               dev_err(DEV, "in %s:%d: " #which " = %d < 0 !\n",       \
+                   __func__ , __LINE__ ,                       \
+                   atomic_read(&mdev->which))
+
+#define dec_ap_pending(mdev)   do {                            \
+       typecheck(struct drbd_conf *, mdev);                    \
+       if (atomic_dec_and_test(&mdev->ap_pending_cnt))         \
+               wake_up(&mdev->misc_wait);                      \
+       ERR_IF_CNT_IS_NEGATIVE(ap_pending_cnt); } while (0)
+
+/* counts how many resync-related answers we still expect from the peer
+ *                  increase                   decrease
+ * C_SYNC_TARGET sends P_RS_DATA_REQUEST (and expects P_RS_DATA_REPLY)
+ * C_SYNC_SOURCE sends P_RS_DATA_REPLY   (and expects P_WRITE_ACK whith ID_SYNCER)
+ *                                        (or P_NEG_ACK with ID_SYNCER)
+ */
+static inline void inc_rs_pending(struct drbd_conf *mdev)
+{
+       atomic_inc(&mdev->rs_pending_cnt);
+}
+
+#define dec_rs_pending(mdev)   do {                            \
+       typecheck(struct drbd_conf *, mdev);                    \
+       atomic_dec(&mdev->rs_pending_cnt);                      \
+       ERR_IF_CNT_IS_NEGATIVE(rs_pending_cnt); } while (0)
+
+/* counts how many answers we still need to send to the peer.
+ * increased on
+ *  receive_Data       unless protocol A;
+ *                     we need to send a P_RECV_ACK (proto B)
+ *                     or P_WRITE_ACK (proto C)
+ *  receive_RSDataReply (recv_resync_read) we need to send a P_WRITE_ACK
+ *  receive_DataRequest (receive_RSDataRequest) we need to send back P_DATA
+ *  receive_Barrier_*  we need to send a P_BARRIER_ACK
+ */
+static inline void inc_unacked(struct drbd_conf *mdev)
+{
+       atomic_inc(&mdev->unacked_cnt);
+}
+
+#define dec_unacked(mdev)      do {                            \
+       typecheck(struct drbd_conf *, mdev);                    \
+       atomic_dec(&mdev->unacked_cnt);                         \
+       ERR_IF_CNT_IS_NEGATIVE(unacked_cnt); } while (0)
+
+#define sub_unacked(mdev, n)   do {                            \
+       typecheck(struct drbd_conf *, mdev);                    \
+       atomic_sub(n, &mdev->unacked_cnt);                      \
+       ERR_IF_CNT_IS_NEGATIVE(unacked_cnt); } while (0)
+
+
+static inline void put_net_conf(struct drbd_conf *mdev)
+{
+       if (atomic_dec_and_test(&mdev->net_cnt))
+               wake_up(&mdev->misc_wait);
+}
+
+/**
+ * get_net_conf() - Increase ref count on mdev->net_conf; Returns 0 if nothing there
+ * @mdev:      DRBD device.
+ *
+ * You have to call put_net_conf() when finished working with mdev->net_conf.
+ */
+static inline int get_net_conf(struct drbd_conf *mdev)
+{
+       int have_net_conf;
+
+       atomic_inc(&mdev->net_cnt);
+       have_net_conf = mdev->state.conn >= C_UNCONNECTED;
+       if (!have_net_conf)
+               put_net_conf(mdev);
+       return have_net_conf;
+}
+
+/**
+ * get_ldev() - Increase the ref count on mdev->ldev. Returns 0 if there is no ldev
+ * @M:         DRBD device.
+ *
+ * You have to call put_ldev() when finished working with mdev->ldev.
+ */
+#define get_ldev(M) __cond_lock(local, _get_ldev_if_state(M,D_INCONSISTENT))
+#define get_ldev_if_state(M,MINS) __cond_lock(local, _get_ldev_if_state(M,MINS))
+
+static inline void put_ldev(struct drbd_conf *mdev)
+{
+       __release(local);
+       if (atomic_dec_and_test(&mdev->local_cnt))
+               wake_up(&mdev->misc_wait);
+       D_ASSERT(atomic_read(&mdev->local_cnt) >= 0);
+}
+
+#ifndef __CHECKER__
+static inline int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
+{
+       int io_allowed;
+
+       atomic_inc(&mdev->local_cnt);
+       io_allowed = (mdev->state.disk >= mins);
+       if (!io_allowed)
+               put_ldev(mdev);
+       return io_allowed;
+}
+#else
+extern int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins);
+#endif
+
+/* you must have an "get_ldev" reference */
+static inline void drbd_get_syncer_progress(struct drbd_conf *mdev,
+               unsigned long *bits_left, unsigned int *per_mil_done)
+{
+       /*
+        * this is to break it at compile time when we change that
+        * (we may feel 4TB maximum storage per drbd is not enough)
+        */
+       typecheck(unsigned long, mdev->rs_total);
+
+       /* note: both rs_total and rs_left are in bits, i.e. in
+        * units of BM_BLOCK_SIZE.
+        * for the percentage, we don't care. */
+
+       *bits_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
+       /* >> 10 to prevent overflow,
+        * +1 to prevent division by zero */
+       if (*bits_left > mdev->rs_total) {
+               /* doh. maybe a logic bug somewhere.
+                * may also be just a race condition
+                * between this and a disconnect during sync.
+                * for now, just prevent in-kernel buffer overflow.
+                */
+               smp_rmb();
+               dev_warn(DEV, "cs:%s rs_left=%lu > rs_total=%lu (rs_failed %lu)\n",
+                               drbd_conn_str(mdev->state.conn),
+                               *bits_left, mdev->rs_total, mdev->rs_failed);
+               *per_mil_done = 0;
+       } else {
+               /* make sure the calculation happens in long context */
+               unsigned long tmp = 1000UL -
+                               (*bits_left >> 10)*1000UL
+                               / ((mdev->rs_total >> 10) + 1UL);
+               *per_mil_done = tmp;
+       }
+}
+
+
+/* this throttles on-the-fly application requests
+ * according to max_buffers settings;
+ * maybe re-implement using semaphores? */
+static inline int drbd_get_max_buffers(struct drbd_conf *mdev)
+{
+       int mxb = 1000000; /* arbitrary limit on open requests */
+       if (get_net_conf(mdev)) {
+               mxb = mdev->net_conf->max_buffers;
+               put_net_conf(mdev);
+       }
+       return mxb;
+}
+
+static inline int drbd_state_is_stable(union drbd_state s)
+{
+
+       /* DO NOT add a default clause, we want the compiler to warn us
+        * for any newly introduced state we may have forgotten to add here */
+
+       switch ((enum drbd_conns)s.conn) {
+       /* new io only accepted when there is no connection, ... */
+       case C_STANDALONE:
+       case C_WF_CONNECTION:
+       /* ... or there is a well established connection. */
+       case C_CONNECTED:
+       case C_SYNC_SOURCE:
+       case C_SYNC_TARGET:
+       case C_VERIFY_S:
+       case C_VERIFY_T:
+       case C_PAUSED_SYNC_S:
+       case C_PAUSED_SYNC_T:
+               /* maybe stable, look at the disk state */
+               break;
+
+       /* no new io accepted during tansitional states
+        * like handshake or teardown */
+       case C_DISCONNECTING:
+       case C_UNCONNECTED:
+       case C_TIMEOUT:
+       case C_BROKEN_PIPE:
+       case C_NETWORK_FAILURE:
+       case C_PROTOCOL_ERROR:
+       case C_TEAR_DOWN:
+       case C_WF_REPORT_PARAMS:
+       case C_STARTING_SYNC_S:
+       case C_STARTING_SYNC_T:
+       case C_WF_BITMAP_S:
+       case C_WF_BITMAP_T:
+       case C_WF_SYNC_UUID:
+       case C_MASK:
+               /* not "stable" */
+               return 0;
+       }
+
+       switch ((enum drbd_disk_state)s.disk) {
+       case D_DISKLESS:
+       case D_INCONSISTENT:
+       case D_OUTDATED:
+       case D_CONSISTENT:
+       case D_UP_TO_DATE:
+               /* disk state is stable as well. */
+               break;
+
+       /* no new io accepted during tansitional states */
+       case D_ATTACHING:
+       case D_FAILED:
+       case D_NEGOTIATING:
+       case D_UNKNOWN:
+       case D_MASK:
+               /* not "stable" */
+               return 0;
+       }
+
+       return 1;
+}
+
+static inline int __inc_ap_bio_cond(struct drbd_conf *mdev)
+{
+       int mxb = drbd_get_max_buffers(mdev);
+
+       if (mdev->state.susp)
+               return 0;
+       if (test_bit(SUSPEND_IO, &mdev->flags))
+               return 0;
+
+       /* to avoid potential deadlock or bitmap corruption,
+        * in various places, we only allow new application io
+        * to start during "stable" states. */
+
+       /* no new io accepted when attaching or detaching the disk */
+       if (!drbd_state_is_stable(mdev->state))
+               return 0;
+
+       /* since some older kernels don't have atomic_add_unless,
+        * and we are within the spinlock anyways, we have this workaround.  */
+       if (atomic_read(&mdev->ap_bio_cnt) > mxb)
+               return 0;
+       if (test_bit(BITMAP_IO, &mdev->flags))
+               return 0;
+       return 1;
+}
+
+/* I'd like to use wait_event_lock_irq,
+ * but I'm not sure when it got introduced,
+ * and not sure when it has 3 or 4 arguments */
+static inline void inc_ap_bio(struct drbd_conf *mdev, int one_or_two)
+{
+       /* compare with after_state_ch,
+        * os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S */
+       DEFINE_WAIT(wait);
+
+       /* we wait here
+        *    as long as the device is suspended
+        *    until the bitmap is no longer on the fly during connection
+        *    handshake as long as we would exeed the max_buffer limit.
+        *
+        * to avoid races with the reconnect code,
+        * we need to atomic_inc within the spinlock. */
+
+       spin_lock_irq(&mdev->req_lock);
+       while (!__inc_ap_bio_cond(mdev)) {
+               prepare_to_wait(&mdev->misc_wait, &wait, TASK_UNINTERRUPTIBLE);
+               spin_unlock_irq(&mdev->req_lock);
+               schedule();
+               finish_wait(&mdev->misc_wait, &wait);
+               spin_lock_irq(&mdev->req_lock);
+       }
+       atomic_add(one_or_two, &mdev->ap_bio_cnt);
+       spin_unlock_irq(&mdev->req_lock);
+}
+
+static inline void dec_ap_bio(struct drbd_conf *mdev)
+{
+       int mxb = drbd_get_max_buffers(mdev);
+       int ap_bio = atomic_dec_return(&mdev->ap_bio_cnt);
+
+       D_ASSERT(ap_bio >= 0);
+       /* this currently does wake_up for every dec_ap_bio!
+        * maybe rather introduce some type of hysteresis?
+        * e.g. (ap_bio == mxb/2 || ap_bio == 0) ? */
+       if (ap_bio < mxb)
+               wake_up(&mdev->misc_wait);
+       if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) {
+               if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
+                       drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
+       }
+}
+
+static inline void drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val)
+{
+       mdev->ed_uuid = val;
+}
+
+static inline int seq_cmp(u32 a, u32 b)
+{
+       /* we assume wrap around at 32bit.
+        * for wrap around at 24bit (old atomic_t),
+        * we'd have to
+        *  a <<= 8; b <<= 8;
+        */
+       return (s32)(a) - (s32)(b);
+}
+#define seq_lt(a, b) (seq_cmp((a), (b)) < 0)
+#define seq_gt(a, b) (seq_cmp((a), (b)) > 0)
+#define seq_ge(a, b) (seq_cmp((a), (b)) >= 0)
+#define seq_le(a, b) (seq_cmp((a), (b)) <= 0)
+/* CAUTION: please no side effects in arguments! */
+#define seq_max(a, b) ((u32)(seq_gt((a), (b)) ? (a) : (b)))
+
+static inline void update_peer_seq(struct drbd_conf *mdev, unsigned int new_seq)
+{
+       unsigned int m;
+       spin_lock(&mdev->peer_seq_lock);
+       m = seq_max(mdev->peer_seq, new_seq);
+       mdev->peer_seq = m;
+       spin_unlock(&mdev->peer_seq_lock);
+       if (m == new_seq)
+               wake_up(&mdev->seq_wait);
+}
+
+static inline void drbd_update_congested(struct drbd_conf *mdev)
+{
+       struct sock *sk = mdev->data.socket->sk;
+       if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
+               set_bit(NET_CONGESTED, &mdev->flags);
+}
+
+static inline int drbd_queue_order_type(struct drbd_conf *mdev)
+{
+       /* sorry, we currently have no working implementation
+        * of distributed TCQ stuff */
+#ifndef QUEUE_ORDERED_NONE
+#define QUEUE_ORDERED_NONE 0
+#endif
+       return QUEUE_ORDERED_NONE;
+}
+
+static inline void drbd_blk_run_queue(struct request_queue *q)
+{
+       if (q && q->unplug_fn)
+               q->unplug_fn(q);
+}
+
+static inline void drbd_kick_lo(struct drbd_conf *mdev)
+{
+       if (get_ldev(mdev)) {
+               drbd_blk_run_queue(bdev_get_queue(mdev->ldev->backing_bdev));
+               put_ldev(mdev);
+       }
+}
+
+static inline void drbd_md_flush(struct drbd_conf *mdev)
+{
+       int r;
+
+       if (test_bit(MD_NO_BARRIER, &mdev->flags))
+               return;
+
+       r = blkdev_issue_flush(mdev->ldev->md_bdev, NULL);
+       if (r) {
+               set_bit(MD_NO_BARRIER, &mdev->flags);
+               dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r);
+       }
+}
+
+#endif
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
new file mode 100644 (file)
index 0000000..edf0b80
--- /dev/null
@@ -0,0 +1,3735 @@
+/*
+   drbd.c
+
+   This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
+
+   Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
+   Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
+   Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
+
+   Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
+   from Logicworks, Inc. for making SDP replication support possible.
+
+   drbd is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 2, or (at your option)
+   any later version.
+
+   drbd is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with drbd; see the file COPYING.  If not, write to
+   the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+
+ */
+
+#include <linux/autoconf.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/drbd.h>
+#include <asm/uaccess.h>
+#include <asm/types.h>
+#include <net/sock.h>
+#include <linux/ctype.h>
+#include <linux/smp_lock.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/proc_fs.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/memcontrol.h>
+#include <linux/mm_inline.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/reboot.h>
+#include <linux/notifier.h>
+#include <linux/kthread.h>
+
+#define __KERNEL_SYSCALLS__
+#include <linux/unistd.h>
+#include <linux/vmalloc.h>
+
+#include <linux/drbd_limits.h>
+#include "drbd_int.h"
+#include "drbd_tracing.h"
+#include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
+
+#include "drbd_vli.h"
+
+struct after_state_chg_work {
+       struct drbd_work w;
+       union drbd_state os;
+       union drbd_state ns;
+       enum chg_state_flags flags;
+       struct completion *done;
+};
+
+int drbdd_init(struct drbd_thread *);
+int drbd_worker(struct drbd_thread *);
+int drbd_asender(struct drbd_thread *);
+
+int drbd_init(void);
+static int drbd_open(struct block_device *bdev, fmode_t mode);
+static int drbd_release(struct gendisk *gd, fmode_t mode);
+static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused);
+static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
+                          union drbd_state ns, enum chg_state_flags flags);
+static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused);
+static void md_sync_timer_fn(unsigned long data);
+static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused);
+
+DEFINE_TRACE(drbd_unplug);
+DEFINE_TRACE(drbd_uuid);
+DEFINE_TRACE(drbd_ee);
+DEFINE_TRACE(drbd_packet);
+DEFINE_TRACE(drbd_md_io);
+DEFINE_TRACE(drbd_epoch);
+DEFINE_TRACE(drbd_netlink);
+DEFINE_TRACE(drbd_actlog);
+DEFINE_TRACE(drbd_bio);
+DEFINE_TRACE(_drbd_resync);
+DEFINE_TRACE(drbd_req);
+
+MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
+             "Lars Ellenberg <lars@linbit.com>");
+MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
+MODULE_VERSION(REL_VERSION);
+MODULE_LICENSE("GPL");
+MODULE_PARM_DESC(minor_count, "Maximum number of drbd devices (1-255)");
+MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
+
+#include <linux/moduleparam.h>
+/* allow_open_on_secondary */
+MODULE_PARM_DESC(allow_oos, "DONT USE!");
+/* thanks to these macros, if compiled into the kernel (not-module),
+ * this becomes the boot parameter drbd.minor_count */
+module_param(minor_count, uint, 0444);
+module_param(disable_sendpage, bool, 0644);
+module_param(allow_oos, bool, 0);
+module_param(cn_idx, uint, 0444);
+module_param(proc_details, int, 0644);
+
+#ifdef CONFIG_DRBD_FAULT_INJECTION
+int enable_faults;
+int fault_rate;
+static int fault_count;
+int fault_devs;
+/* bitmap of enabled faults */
+module_param(enable_faults, int, 0664);
+/* fault rate % value - applies to all enabled faults */
+module_param(fault_rate, int, 0664);
+/* count of faults inserted */
+module_param(fault_count, int, 0664);
+/* bitmap of devices to insert faults on */
+module_param(fault_devs, int, 0644);
+#endif
+
+/* module parameter, defined */
+unsigned int minor_count = 32;
+int disable_sendpage;
+int allow_oos;
+unsigned int cn_idx = CN_IDX_DRBD;
+int proc_details;       /* Detail level in proc drbd*/
+
+/* Module parameter for setting the user mode helper program
+ * to run. Default is /sbin/drbdadm */
+char usermode_helper[80] = "/sbin/drbdadm";
+
+module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644);
+
+/* in 2.6.x, our device mapping and config info contains our virtual gendisks
+ * as member "struct gendisk *vdisk;"
+ */
+struct drbd_conf **minor_table;
+
+struct kmem_cache *drbd_request_cache;
+struct kmem_cache *drbd_ee_cache;      /* epoch entries */
+struct kmem_cache *drbd_bm_ext_cache;  /* bitmap extents */
+struct kmem_cache *drbd_al_ext_cache;  /* activity log extents */
+mempool_t *drbd_request_mempool;
+mempool_t *drbd_ee_mempool;
+
+/* I do not use a standard mempool, because:
+   1) I want to hand out the pre-allocated objects first.
+   2) I want to be able to interrupt sleeping allocation with a signal.
+   Note: This is a single linked list, the next pointer is the private
+        member of struct page.
+ */
+struct page *drbd_pp_pool;
+spinlock_t   drbd_pp_lock;
+int          drbd_pp_vacant;
+wait_queue_head_t drbd_pp_wait;
+
+DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
+
+static struct block_device_operations drbd_ops = {
+       .owner =   THIS_MODULE,
+       .open =    drbd_open,
+       .release = drbd_release,
+};
+
+#define ARRY_SIZE(A) (sizeof(A)/sizeof(A[0]))
+
+#ifdef __CHECKER__
+/* When checking with sparse, and this is an inline function, sparse will
+   give tons of false positives. When this is a real functions sparse works.
+ */
+int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
+{
+       int io_allowed;
+
+       atomic_inc(&mdev->local_cnt);
+       io_allowed = (mdev->state.disk >= mins);
+       if (!io_allowed) {
+               if (atomic_dec_and_test(&mdev->local_cnt))
+                       wake_up(&mdev->misc_wait);
+       }
+       return io_allowed;
+}
+
+#endif
+
+/**
+ * DOC: The transfer log
+ *
+ * The transfer log is a single linked list of &struct drbd_tl_epoch objects.
+ * mdev->newest_tle points to the head, mdev->oldest_tle points to the tail
+ * of the list. There is always at least one &struct drbd_tl_epoch object.
+ *
+ * Each &struct drbd_tl_epoch has a circular double linked list of requests
+ * attached.
+ */
+static int tl_init(struct drbd_conf *mdev)
+{
+       struct drbd_tl_epoch *b;
+
+       /* during device minor initialization, we may well use GFP_KERNEL */
+       b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_KERNEL);
+       if (!b)
+               return 0;
+       INIT_LIST_HEAD(&b->requests);
+       INIT_LIST_HEAD(&b->w.list);
+       b->next = NULL;
+       b->br_number = 4711;
+       b->n_req = 0;
+       b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
+
+       mdev->oldest_tle = b;
+       mdev->newest_tle = b;
+       INIT_LIST_HEAD(&mdev->out_of_sequence_requests);
+
+       mdev->tl_hash = NULL;
+       mdev->tl_hash_s = 0;
+
+       return 1;
+}
+
+static void tl_cleanup(struct drbd_conf *mdev)
+{
+       D_ASSERT(mdev->oldest_tle == mdev->newest_tle);
+       D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
+       kfree(mdev->oldest_tle);
+       mdev->oldest_tle = NULL;
+       kfree(mdev->unused_spare_tle);
+       mdev->unused_spare_tle = NULL;
+       kfree(mdev->tl_hash);
+       mdev->tl_hash = NULL;
+       mdev->tl_hash_s = 0;
+}
+
+/**
+ * _tl_add_barrier() - Adds a barrier to the transfer log
+ * @mdev:      DRBD device.
+ * @new:       Barrier to be added before the current head of the TL.
+ *
+ * The caller must hold the req_lock.
+ */
+void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_tl_epoch *new)
+{
+       struct drbd_tl_epoch *newest_before;
+
+       INIT_LIST_HEAD(&new->requests);
+       INIT_LIST_HEAD(&new->w.list);
+       new->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
+       new->next = NULL;
+       new->n_req = 0;
+
+       newest_before = mdev->newest_tle;
+       /* never send a barrier number == 0, because that is special-cased
+        * when using TCQ for our write ordering code */
+       new->br_number = (newest_before->br_number+1) ?: 1;
+       if (mdev->newest_tle != new) {
+               mdev->newest_tle->next = new;
+               mdev->newest_tle = new;
+       }
+}
+
+/**
+ * tl_release() - Free or recycle the oldest &struct drbd_tl_epoch object of the TL
+ * @mdev:      DRBD device.
+ * @barrier_nr:        Expected identifier of the DRBD write barrier packet.
+ * @set_size:  Expected number of requests before that barrier.
+ *
+ * In case the passed barrier_nr or set_size does not match the oldest
+ * &struct drbd_tl_epoch objects this function will cause a termination
+ * of the connection.
+ */
+void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
+                      unsigned int set_size)
+{
+       struct drbd_tl_epoch *b, *nob; /* next old barrier */
+       struct list_head *le, *tle;
+       struct drbd_request *r;
+
+       spin_lock_irq(&mdev->req_lock);
+
+       b = mdev->oldest_tle;
+
+       /* first some paranoia code */
+       if (b == NULL) {
+               dev_err(DEV, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
+                       barrier_nr);
+               goto bail;
+       }
+       if (b->br_number != barrier_nr) {
+               dev_err(DEV, "BAD! BarrierAck #%u received, expected #%u!\n",
+                       barrier_nr, b->br_number);
+               goto bail;
+       }
+       if (b->n_req != set_size) {
+               dev_err(DEV, "BAD! BarrierAck #%u received with n_req=%u, expected n_req=%u!\n",
+                       barrier_nr, set_size, b->n_req);
+               goto bail;
+       }
+
+       /* Clean up list of requests processed during current epoch */
+       list_for_each_safe(le, tle, &b->requests) {
+               r = list_entry(le, struct drbd_request, tl_requests);
+               _req_mod(r, barrier_acked);
+       }
+       /* There could be requests on the list waiting for completion
+          of the write to the local disk. To avoid corruptions of
+          slab's data structures we have to remove the lists head.
+
+          Also there could have been a barrier ack out of sequence, overtaking
+          the write acks - which would be a bug and violating write ordering.
+          To not deadlock in case we lose connection while such requests are
+          still pending, we need some way to find them for the
+          _req_mode(connection_lost_while_pending).
+
+          These have been list_move'd to the out_of_sequence_requests list in
+          _req_mod(, barrier_acked) above.
+          */
+       list_del_init(&b->requests);
+
+       nob = b->next;
+       if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
+               _tl_add_barrier(mdev, b);
+               if (nob)
+                       mdev->oldest_tle = nob;
+               /* if nob == NULL b was the only barrier, and becomes the new
+                  barrier. Therefore mdev->oldest_tle points already to b */
+       } else {
+               D_ASSERT(nob != NULL);
+               mdev->oldest_tle = nob;
+               kfree(b);
+       }
+
+       spin_unlock_irq(&mdev->req_lock);
+       dec_ap_pending(mdev);
+
+       return;
+
+bail:
+       spin_unlock_irq(&mdev->req_lock);
+       drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
+}
+
+
+/**
+ * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
+ * @mdev:      DRBD device.
+ *
+ * This is called after the connection to the peer was lost. The storage covered
+ * by the requests on the transfer gets marked as our of sync. Called from the
+ * receiver thread and the worker thread.
+ */
+void tl_clear(struct drbd_conf *mdev)
+{
+       struct drbd_tl_epoch *b, *tmp;
+       struct list_head *le, *tle;
+       struct drbd_request *r;
+       int new_initial_bnr = net_random();
+
+       spin_lock_irq(&mdev->req_lock);
+
+       b = mdev->oldest_tle;
+       while (b) {
+               list_for_each_safe(le, tle, &b->requests) {
+                       r = list_entry(le, struct drbd_request, tl_requests);
+                       /* It would be nice to complete outside of spinlock.
+                        * But this is easier for now. */
+                       _req_mod(r, connection_lost_while_pending);
+               }
+               tmp = b->next;
+
+               /* there could still be requests on that ring list,
+                * in case local io is still pending */
+               list_del(&b->requests);
+
+               /* dec_ap_pending corresponding to queue_barrier.
+                * the newest barrier may not have been queued yet,
+                * in which case w.cb is still NULL. */
+               if (b->w.cb != NULL)
+                       dec_ap_pending(mdev);
+
+               if (b == mdev->newest_tle) {
+                       /* recycle, but reinit! */
+                       D_ASSERT(tmp == NULL);
+                       INIT_LIST_HEAD(&b->requests);
+                       INIT_LIST_HEAD(&b->w.list);
+                       b->w.cb = NULL;
+                       b->br_number = new_initial_bnr;
+                       b->n_req = 0;
+
+                       mdev->oldest_tle = b;
+                       break;
+               }
+               kfree(b);
+               b = tmp;
+       }
+
+       /* we expect this list to be empty. */
+       D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
+
+       /* but just in case, clean it up anyways! */
+       list_for_each_safe(le, tle, &mdev->out_of_sequence_requests) {
+               r = list_entry(le, struct drbd_request, tl_requests);
+               /* It would be nice to complete outside of spinlock.
+                * But this is easier for now. */
+               _req_mod(r, connection_lost_while_pending);
+       }
+
+       /* ensure bit indicating barrier is required is clear */
+       clear_bit(CREATE_BARRIER, &mdev->flags);
+
+       spin_unlock_irq(&mdev->req_lock);
+}
+
+/**
+ * cl_wide_st_chg() - TRUE if the state change is a cluster wide one
+ * @mdev:      DRBD device.
+ * @os:                old (current) state.
+ * @ns:                new (wanted) state.
+ */
+static int cl_wide_st_chg(struct drbd_conf *mdev,
+                         union drbd_state os, union drbd_state ns)
+{
+       return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED &&
+                ((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
+                 (os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
+                 (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
+                 (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))) ||
+               (os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
+               (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S);
+}
+
+int drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
+                     union drbd_state mask, union drbd_state val)
+{
+       unsigned long flags;
+       union drbd_state os, ns;
+       int rv;
+
+       spin_lock_irqsave(&mdev->req_lock, flags);
+       os = mdev->state;
+       ns.i = (os.i & ~mask.i) | val.i;
+       rv = _drbd_set_state(mdev, ns, f, NULL);
+       ns = mdev->state;
+       spin_unlock_irqrestore(&mdev->req_lock, flags);
+
+       return rv;
+}
+
+/**
+ * drbd_force_state() - Impose a change which happens outside our control on our state
+ * @mdev:      DRBD device.
+ * @mask:      mask of state bits to change.
+ * @val:       value of new state bits.
+ */
+void drbd_force_state(struct drbd_conf *mdev,
+       union drbd_state mask, union drbd_state val)
+{
+       drbd_change_state(mdev, CS_HARD, mask, val);
+}
+
+static int is_valid_state(struct drbd_conf *mdev, union drbd_state ns);
+static int is_valid_state_transition(struct drbd_conf *,
+                                    union drbd_state, union drbd_state);
+static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
+                                      union drbd_state ns, int *warn_sync_abort);
+int drbd_send_state_req(struct drbd_conf *,
+                       union drbd_state, union drbd_state);
+
+static enum drbd_state_ret_codes _req_st_cond(struct drbd_conf *mdev,
+                                   union drbd_state mask, union drbd_state val)
+{
+       union drbd_state os, ns;
+       unsigned long flags;
+       int rv;
+
+       if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags))
+               return SS_CW_SUCCESS;
+
+       if (test_and_clear_bit(CL_ST_CHG_FAIL, &mdev->flags))
+               return SS_CW_FAILED_BY_PEER;
+
+       rv = 0;
+       spin_lock_irqsave(&mdev->req_lock, flags);
+       os = mdev->state;
+       ns.i = (os.i & ~mask.i) | val.i;
+       ns = sanitize_state(mdev, os, ns, NULL);
+
+       if (!cl_wide_st_chg(mdev, os, ns))
+               rv = SS_CW_NO_NEED;
+       if (!rv) {
+               rv = is_valid_state(mdev, ns);
+               if (rv == SS_SUCCESS) {
+                       rv = is_valid_state_transition(mdev, ns, os);
+                       if (rv == SS_SUCCESS)
+                               rv = 0; /* cont waiting, otherwise fail. */
+               }
+       }
+       spin_unlock_irqrestore(&mdev->req_lock, flags);
+
+       return rv;
+}
+
+/**
+ * drbd_req_state() - Perform an eventually cluster wide state change
+ * @mdev:      DRBD device.
+ * @mask:      mask of state bits to change.
+ * @val:       value of new state bits.
+ * @f:         flags
+ *
+ * Should not be called directly, use drbd_request_state() or
+ * _drbd_request_state().
+ */
+static int drbd_req_state(struct drbd_conf *mdev,
+                         union drbd_state mask, union drbd_state val,
+                         enum chg_state_flags f)
+{
+       struct completion done;
+       unsigned long flags;
+       union drbd_state os, ns;
+       int rv;
+
+       init_completion(&done);
+
+       if (f & CS_SERIALIZE)
+               mutex_lock(&mdev->state_mutex);
+
+       spin_lock_irqsave(&mdev->req_lock, flags);
+       os = mdev->state;
+       ns.i = (os.i & ~mask.i) | val.i;
+       ns = sanitize_state(mdev, os, ns, NULL);
+
+       if (cl_wide_st_chg(mdev, os, ns)) {
+               rv = is_valid_state(mdev, ns);
+               if (rv == SS_SUCCESS)
+                       rv = is_valid_state_transition(mdev, ns, os);
+               spin_unlock_irqrestore(&mdev->req_lock, flags);
+
+               if (rv < SS_SUCCESS) {
+                       if (f & CS_VERBOSE)
+                               print_st_err(mdev, os, ns, rv);
+                       goto abort;
+               }
+
+               drbd_state_lock(mdev);
+               if (!drbd_send_state_req(mdev, mask, val)) {
+                       drbd_state_unlock(mdev);
+                       rv = SS_CW_FAILED_BY_PEER;
+                       if (f & CS_VERBOSE)
+                               print_st_err(mdev, os, ns, rv);
+                       goto abort;
+               }
+
+               wait_event(mdev->state_wait,
+                       (rv = _req_st_cond(mdev, mask, val)));
+
+               if (rv < SS_SUCCESS) {
+                       drbd_state_unlock(mdev);
+                       if (f & CS_VERBOSE)
+                               print_st_err(mdev, os, ns, rv);
+                       goto abort;
+               }
+               spin_lock_irqsave(&mdev->req_lock, flags);
+               os = mdev->state;
+               ns.i = (os.i & ~mask.i) | val.i;
+               rv = _drbd_set_state(mdev, ns, f, &done);
+               drbd_state_unlock(mdev);
+       } else {
+               rv = _drbd_set_state(mdev, ns, f, &done);
+       }
+
+       spin_unlock_irqrestore(&mdev->req_lock, flags);
+
+       if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
+               D_ASSERT(current != mdev->worker.task);
+               wait_for_completion(&done);
+       }
+
+abort:
+       if (f & CS_SERIALIZE)
+               mutex_unlock(&mdev->state_mutex);
+
+       return rv;
+}
+
+/**
+ * _drbd_request_state() - Request a state change (with flags)
+ * @mdev:      DRBD device.
+ * @mask:      mask of state bits to change.
+ * @val:       value of new state bits.
+ * @f:         flags
+ *
+ * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
+ * flag, or when logging of failed state change requests is not desired.
+ */
+int _drbd_request_state(struct drbd_conf *mdev,        union drbd_state mask,
+                       union drbd_state val,   enum chg_state_flags f)
+{
+       int rv;
+
+       wait_event(mdev->state_wait,
+                  (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE);
+
+       return rv;
+}
+
+static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns)
+{
+       dev_err(DEV, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c }\n",
+           name,
+           drbd_conn_str(ns.conn),
+           drbd_role_str(ns.role),
+           drbd_role_str(ns.peer),
+           drbd_disk_str(ns.disk),
+           drbd_disk_str(ns.pdsk),
+           ns.susp ? 's' : 'r',
+           ns.aftr_isp ? 'a' : '-',
+           ns.peer_isp ? 'p' : '-',
+           ns.user_isp ? 'u' : '-'
+           );
+}
+
+void print_st_err(struct drbd_conf *mdev,
+       union drbd_state os, union drbd_state ns, int err)
+{
+       if (err == SS_IN_TRANSIENT_STATE)
+               return;
+       dev_err(DEV, "State change failed: %s\n", drbd_set_st_err_str(err));
+       print_st(mdev, " state", os);
+       print_st(mdev, "wanted", ns);
+}
+
+
+#define drbd_peer_str drbd_role_str
+#define drbd_pdsk_str drbd_disk_str
+
+#define drbd_susp_str(A)     ((A) ? "1" : "0")
+#define drbd_aftr_isp_str(A) ((A) ? "1" : "0")
+#define drbd_peer_isp_str(A) ((A) ? "1" : "0")
+#define drbd_user_isp_str(A) ((A) ? "1" : "0")
+
+#define PSC(A) \
+       ({ if (ns.A != os.A) { \
+               pbp += sprintf(pbp, #A "( %s -> %s ) ", \
+                             drbd_##A##_str(os.A), \
+                             drbd_##A##_str(ns.A)); \
+       } })
+
+/**
+ * is_valid_state() - Returns an SS_ error code if ns is not valid
+ * @mdev:      DRBD device.
+ * @ns:                State to consider.
+ */
+static int is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
+{
+       /* See drbd_state_sw_errors in drbd_strings.c */
+
+       enum drbd_fencing_p fp;
+       int rv = SS_SUCCESS;
+
+       fp = FP_DONT_CARE;
+       if (get_ldev(mdev)) {
+               fp = mdev->ldev->dc.fencing;
+               put_ldev(mdev);
+       }
+
+       if (get_net_conf(mdev)) {
+               if (!mdev->net_conf->two_primaries &&
+                   ns.role == R_PRIMARY && ns.peer == R_PRIMARY)
+                       rv = SS_TWO_PRIMARIES;
+               put_net_conf(mdev);
+       }
+
+       if (rv <= 0)
+               /* already found a reason to abort */;
+       else if (ns.role == R_SECONDARY && mdev->open_cnt)
+               rv = SS_DEVICE_IN_USE;
+
+       else if (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.disk < D_UP_TO_DATE)
+               rv = SS_NO_UP_TO_DATE_DISK;
+
+       else if (fp >= FP_RESOURCE &&
+                ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk >= D_UNKNOWN)
+               rv = SS_PRIMARY_NOP;
+
+       else if (ns.role == R_PRIMARY && ns.disk <= D_INCONSISTENT && ns.pdsk <= D_INCONSISTENT)
+               rv = SS_NO_UP_TO_DATE_DISK;
+
+       else if (ns.conn > C_CONNECTED && ns.disk < D_INCONSISTENT)
+               rv = SS_NO_LOCAL_DISK;
+
+       else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT)
+               rv = SS_NO_REMOTE_DISK;
+
+       else if ((ns.conn == C_CONNECTED ||
+                 ns.conn == C_WF_BITMAP_S ||
+                 ns.conn == C_SYNC_SOURCE ||
+                 ns.conn == C_PAUSED_SYNC_S) &&
+                 ns.disk == D_OUTDATED)
+               rv = SS_CONNECTED_OUTDATES;
+
+       else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
+                (mdev->sync_conf.verify_alg[0] == 0))
+               rv = SS_NO_VERIFY_ALG;
+
+       else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
+                 mdev->agreed_pro_version < 88)
+               rv = SS_NOT_SUPPORTED;
+
+       return rv;
+}
+
+/**
+ * is_valid_state_transition() - Returns an SS_ error code if the state transition is not possible
+ * @mdev:      DRBD device.
+ * @ns:                new state.
+ * @os:                old state.
+ */
+static int is_valid_state_transition(struct drbd_conf *mdev,
+                                    union drbd_state ns, union drbd_state os)
+{
+       int rv = SS_SUCCESS;
+
+       if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) &&
+           os.conn > C_CONNECTED)
+               rv = SS_RESYNC_RUNNING;
+
+       if (ns.conn == C_DISCONNECTING && os.conn == C_STANDALONE)
+               rv = SS_ALREADY_STANDALONE;
+
+       if (ns.disk > D_ATTACHING && os.disk == D_DISKLESS)
+               rv = SS_IS_DISKLESS;
+
+       if (ns.conn == C_WF_CONNECTION && os.conn < C_UNCONNECTED)
+               rv = SS_NO_NET_CONFIG;
+
+       if (ns.disk == D_OUTDATED && os.disk < D_OUTDATED && os.disk != D_ATTACHING)
+               rv = SS_LOWER_THAN_OUTDATED;
+
+       if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED)
+               rv = SS_IN_TRANSIENT_STATE;
+
+       if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
+               rv = SS_IN_TRANSIENT_STATE;
+
+       if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
+               rv = SS_NEED_CONNECTION;
+
+       if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
+           ns.conn != os.conn && os.conn > C_CONNECTED)
+               rv = SS_RESYNC_RUNNING;
+
+       if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
+           os.conn < C_CONNECTED)
+               rv = SS_NEED_CONNECTION;
+
+       return rv;
+}
+
+/**
+ * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
+ * @mdev:      DRBD device.
+ * @os:                old state.
+ * @ns:                new state.
+ * @warn_sync_abort:
+ *
+ * When we loose connection, we have to set the state of the peers disk (pdsk)
+ * to D_UNKNOWN. This rule and many more along those lines are in this function.
+ */
+static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
+                                      union drbd_state ns, int *warn_sync_abort)
+{
+       enum drbd_fencing_p fp;
+
+       fp = FP_DONT_CARE;
+       if (get_ldev(mdev)) {
+               fp = mdev->ldev->dc.fencing;
+               put_ldev(mdev);
+       }
+
+       /* Disallow Network errors to configure a device's network part */
+       if ((ns.conn >= C_TIMEOUT && ns.conn <= C_TEAR_DOWN) &&
+           os.conn <= C_DISCONNECTING)
+               ns.conn = os.conn;
+
+       /* After a network error (+C_TEAR_DOWN) only C_UNCONNECTED or C_DISCONNECTING can follow */
+       if (os.conn >= C_TIMEOUT && os.conn <= C_TEAR_DOWN &&
+           ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING)
+               ns.conn = os.conn;
+
+       /* After C_DISCONNECTING only C_STANDALONE may follow */
+       if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE)
+               ns.conn = os.conn;
+
+       if (ns.conn < C_CONNECTED) {
+               ns.peer_isp = 0;
+               ns.peer = R_UNKNOWN;
+               if (ns.pdsk > D_UNKNOWN || ns.pdsk < D_INCONSISTENT)
+                       ns.pdsk = D_UNKNOWN;
+       }
+
+       /* Clear the aftr_isp when becoming unconfigured */
+       if (ns.conn == C_STANDALONE && ns.disk == D_DISKLESS && ns.role == R_SECONDARY)
+               ns.aftr_isp = 0;
+
+       if (ns.conn <= C_DISCONNECTING && ns.disk == D_DISKLESS)
+               ns.pdsk = D_UNKNOWN;
+
+       /* Abort resync if a disk fails/detaches */
+       if (os.conn > C_CONNECTED && ns.conn > C_CONNECTED &&
+           (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
+               if (warn_sync_abort)
+                       *warn_sync_abort = 1;
+               ns.conn = C_CONNECTED;
+       }
+
+       if (ns.conn >= C_CONNECTED &&
+           ((ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED) ||
+            (ns.disk == D_NEGOTIATING && ns.conn == C_WF_BITMAP_T))) {
+               switch (ns.conn) {
+               case C_WF_BITMAP_T:
+               case C_PAUSED_SYNC_T:
+                       ns.disk = D_OUTDATED;
+                       break;
+               case C_CONNECTED:
+               case C_WF_BITMAP_S:
+               case C_SYNC_SOURCE:
+               case C_PAUSED_SYNC_S:
+                       ns.disk = D_UP_TO_DATE;
+                       break;
+               case C_SYNC_TARGET:
+                       ns.disk = D_INCONSISTENT;
+                       dev_warn(DEV, "Implicitly set disk state Inconsistent!\n");
+                       break;
+               }
+               if (os.disk == D_OUTDATED && ns.disk == D_UP_TO_DATE)
+                       dev_warn(DEV, "Implicitly set disk from Outdated to UpToDate\n");
+       }
+
+       if (ns.conn >= C_CONNECTED &&
+           (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)) {
+               switch (ns.conn) {
+               case C_CONNECTED:
+               case C_WF_BITMAP_T:
+               case C_PAUSED_SYNC_T:
+               case C_SYNC_TARGET:
+                       ns.pdsk = D_UP_TO_DATE;
+                       break;
+               case C_WF_BITMAP_S:
+               case C_PAUSED_SYNC_S:
+                       ns.pdsk = D_OUTDATED;
+                       break;
+               case C_SYNC_SOURCE:
+                       ns.pdsk = D_INCONSISTENT;
+                       dev_warn(DEV, "Implicitly set pdsk Inconsistent!\n");
+                       break;
+               }
+               if (os.pdsk == D_OUTDATED && ns.pdsk == D_UP_TO_DATE)
+                       dev_warn(DEV, "Implicitly set pdsk from Outdated to UpToDate\n");
+       }
+
+       /* Connection breaks down before we finished "Negotiating" */
+       if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING &&
+           get_ldev_if_state(mdev, D_NEGOTIATING)) {
+               if (mdev->ed_uuid == mdev->ldev->md.uuid[UI_CURRENT]) {
+                       ns.disk = mdev->new_state_tmp.disk;
+                       ns.pdsk = mdev->new_state_tmp.pdsk;
+               } else {
+                       dev_alert(DEV, "Connection lost while negotiating, no data!\n");
+                       ns.disk = D_DISKLESS;
+                       ns.pdsk = D_UNKNOWN;
+               }
+               put_ldev(mdev);
+       }
+
+       if (fp == FP_STONITH &&
+           (ns.role == R_PRIMARY &&
+            ns.conn < C_CONNECTED &&
+            ns.pdsk > D_OUTDATED))
+                       ns.susp = 1;
+
+       if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
+               if (ns.conn == C_SYNC_SOURCE)
+                       ns.conn = C_PAUSED_SYNC_S;
+               if (ns.conn == C_SYNC_TARGET)
+                       ns.conn = C_PAUSED_SYNC_T;
+       } else {
+               if (ns.conn == C_PAUSED_SYNC_S)
+                       ns.conn = C_SYNC_SOURCE;
+               if (ns.conn == C_PAUSED_SYNC_T)
+                       ns.conn = C_SYNC_TARGET;
+       }
+
+       return ns;
+}
+
+/* helper for __drbd_set_state */
+static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
+{
+       if (cs == C_VERIFY_T) {
+               /* starting online verify from an arbitrary position
+                * does not fit well into the existing protocol.
+                * on C_VERIFY_T, we initialize ov_left and friends
+                * implicitly in receive_DataRequest once the
+                * first P_OV_REQUEST is received */
+               mdev->ov_start_sector = ~(sector_t)0;
+       } else {
+               unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector);
+               if (bit >= mdev->rs_total)
+                       mdev->ov_start_sector =
+                               BM_BIT_TO_SECT(mdev->rs_total - 1);
+               mdev->ov_position = mdev->ov_start_sector;
+       }
+}
+
+/**
+ * __drbd_set_state() - Set a new DRBD state
+ * @mdev:      DRBD device.
+ * @ns:                new state.
+ * @flags:     Flags
+ * @done:      Optional completion, that will get completed after the after_state_ch() finished
+ *
+ * Caller needs to hold req_lock, and global_state_lock. Do not call directly.
+ */
+int __drbd_set_state(struct drbd_conf *mdev,
+                   union drbd_state ns, enum chg_state_flags flags,
+                   struct completion *done)
+{
+       union drbd_state os;
+       int rv = SS_SUCCESS;
+       int warn_sync_abort = 0;
+       struct after_state_chg_work *ascw;
+
+       os = mdev->state;
+
+       ns = sanitize_state(mdev, os, ns, &warn_sync_abort);
+
+       if (ns.i == os.i)
+               return SS_NOTHING_TO_DO;
+
+       if (!(flags & CS_HARD)) {
+               /*  pre-state-change checks ; only look at ns  */
+               /* See drbd_state_sw_errors in drbd_strings.c */
+
+               rv = is_valid_state(mdev, ns);
+               if (rv < SS_SUCCESS) {
+                       /* If the old state was illegal as well, then let
+                          this happen...*/
+
+                       if (is_valid_state(mdev, os) == rv) {
+                               dev_err(DEV, "Considering state change from bad state. "
+                                   "Error would be: '%s'\n",
+                                   drbd_set_st_err_str(rv));
+                               print_st(mdev, "old", os);
+                               print_st(mdev, "new", ns);
+                               rv = is_valid_state_transition(mdev, ns, os);
+                       }
+               } else
+                       rv = is_valid_state_transition(mdev, ns, os);
+       }
+
+       if (rv < SS_SUCCESS) {
+               if (flags & CS_VERBOSE)
+                       print_st_err(mdev, os, ns, rv);
+               return rv;
+       }
+
+       if (warn_sync_abort)
+               dev_warn(DEV, "Resync aborted.\n");
+
+       {
+               char *pbp, pb[300];
+               pbp = pb;
+               *pbp = 0;
+               PSC(role);
+               PSC(peer);
+               PSC(conn);
+               PSC(disk);
+               PSC(pdsk);
+               PSC(susp);
+               PSC(aftr_isp);
+               PSC(peer_isp);
+               PSC(user_isp);
+               dev_info(DEV, "%s\n", pb);
+       }
+
+       /* solve the race between becoming unconfigured,
+        * worker doing the cleanup, and
+        * admin reconfiguring us:
+        * on (re)configure, first set CONFIG_PENDING,
+        * then wait for a potentially exiting worker,
+        * start the worker, and schedule one no_op.
+        * then proceed with configuration.
+        */
+       if (ns.disk == D_DISKLESS &&
+           ns.conn == C_STANDALONE &&
+           ns.role == R_SECONDARY &&
+           !test_and_set_bit(CONFIG_PENDING, &mdev->flags))
+               set_bit(DEVICE_DYING, &mdev->flags);
+
+       mdev->state.i = ns.i;
+       wake_up(&mdev->misc_wait);
+       wake_up(&mdev->state_wait);
+
+       /*   post-state-change actions   */
+       if (os.conn >= C_SYNC_SOURCE   && ns.conn <= C_CONNECTED) {
+               set_bit(STOP_SYNC_TIMER, &mdev->flags);
+               mod_timer(&mdev->resync_timer, jiffies);
+       }
+
+       /* aborted verify run. log the last position */
+       if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
+           ns.conn < C_CONNECTED) {
+               mdev->ov_start_sector =
+                       BM_BIT_TO_SECT(mdev->rs_total - mdev->ov_left);
+               dev_info(DEV, "Online Verify reached sector %llu\n",
+                       (unsigned long long)mdev->ov_start_sector);
+       }
+
+       if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
+           (ns.conn == C_SYNC_TARGET  || ns.conn == C_SYNC_SOURCE)) {
+               dev_info(DEV, "Syncer continues.\n");
+               mdev->rs_paused += (long)jiffies-(long)mdev->rs_mark_time;
+               if (ns.conn == C_SYNC_TARGET) {
+                       if (!test_and_clear_bit(STOP_SYNC_TIMER, &mdev->flags))
+                               mod_timer(&mdev->resync_timer, jiffies);
+                       /* This if (!test_bit) is only needed for the case
+                          that a device that has ceased to used its timer,
+                          i.e. it is already in drbd_resync_finished() gets
+                          paused and resumed. */
+               }
+       }
+
+       if ((os.conn == C_SYNC_TARGET  || os.conn == C_SYNC_SOURCE) &&
+           (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) {
+               dev_info(DEV, "Resync suspended\n");
+               mdev->rs_mark_time = jiffies;
+               if (ns.conn == C_PAUSED_SYNC_T)
+                       set_bit(STOP_SYNC_TIMER, &mdev->flags);
+       }
+
+       if (os.conn == C_CONNECTED &&
+           (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) {
+               mdev->ov_position = 0;
+               mdev->rs_total =
+               mdev->rs_mark_left = drbd_bm_bits(mdev);
+               if (mdev->agreed_pro_version >= 90)
+                       set_ov_position(mdev, ns.conn);
+               else
+                       mdev->ov_start_sector = 0;
+               mdev->ov_left = mdev->rs_total
+                             - BM_SECT_TO_BIT(mdev->ov_position);
+               mdev->rs_start     =
+               mdev->rs_mark_time = jiffies;
+               mdev->ov_last_oos_size = 0;
+               mdev->ov_last_oos_start = 0;
+
+               if (ns.conn == C_VERIFY_S) {
+                       dev_info(DEV, "Starting Online Verify from sector %llu\n",
+                                       (unsigned long long)mdev->ov_position);
+                       mod_timer(&mdev->resync_timer, jiffies);
+               }
+       }
+
+       if (get_ldev(mdev)) {
+               u32 mdf = mdev->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
+                                                MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
+                                                MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
+
+               if (test_bit(CRASHED_PRIMARY, &mdev->flags))
+                       mdf |= MDF_CRASHED_PRIMARY;
+               if (mdev->state.role == R_PRIMARY ||
+                   (mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY))
+                       mdf |= MDF_PRIMARY_IND;
+               if (mdev->state.conn > C_WF_REPORT_PARAMS)
+                       mdf |= MDF_CONNECTED_IND;
+               if (mdev->state.disk > D_INCONSISTENT)
+                       mdf |= MDF_CONSISTENT;
+               if (mdev->state.disk > D_OUTDATED)
+                       mdf |= MDF_WAS_UP_TO_DATE;
+               if (mdev->state.pdsk <= D_OUTDATED && mdev->state.pdsk >= D_INCONSISTENT)
+                       mdf |= MDF_PEER_OUT_DATED;
+               if (mdf != mdev->ldev->md.flags) {
+                       mdev->ldev->md.flags = mdf;
+                       drbd_md_mark_dirty(mdev);
+               }
+               if (os.disk < D_CONSISTENT && ns.disk >= D_CONSISTENT)
+                       drbd_set_ed_uuid(mdev, mdev->ldev->md.uuid[UI_CURRENT]);
+               put_ldev(mdev);
+       }
+
+       /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
+       if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
+           os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
+               set_bit(CONSIDER_RESYNC, &mdev->flags);
+
+       /* Receiver should clean up itself */
+       if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
+               drbd_thread_stop_nowait(&mdev->receiver);
+
+       /* Now the receiver finished cleaning up itself, it should die */
+       if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
+               drbd_thread_stop_nowait(&mdev->receiver);
+
+       /* Upon network failure, we need to restart the receiver. */
+       if (os.conn > C_TEAR_DOWN &&
+           ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
+               drbd_thread_restart_nowait(&mdev->receiver);
+
+       ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
+       if (ascw) {
+               ascw->os = os;
+               ascw->ns = ns;
+               ascw->flags = flags;
+               ascw->w.cb = w_after_state_ch;
+               ascw->done = done;
+               drbd_queue_work(&mdev->data.work, &ascw->w);
+       } else {
+               dev_warn(DEV, "Could not kmalloc an ascw\n");
+       }
+
+       return rv;
+}
+
+static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused)
+{
+       struct after_state_chg_work *ascw =
+               container_of(w, struct after_state_chg_work, w);
+       after_state_ch(mdev, ascw->os, ascw->ns, ascw->flags);
+       if (ascw->flags & CS_WAIT_COMPLETE) {
+               D_ASSERT(ascw->done != NULL);
+               complete(ascw->done);
+       }
+       kfree(ascw);
+
+       return 1;
+}
+
+static void abw_start_sync(struct drbd_conf *mdev, int rv)
+{
+       if (rv) {
+               dev_err(DEV, "Writing the bitmap failed not starting resync.\n");
+               _drbd_request_state(mdev, NS(conn, C_CONNECTED), CS_VERBOSE);
+               return;
+       }
+
+       switch (mdev->state.conn) {
+       case C_STARTING_SYNC_T:
+               _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
+               break;
+       case C_STARTING_SYNC_S:
+               drbd_start_resync(mdev, C_SYNC_SOURCE);
+               break;
+       }
+}
+
+/**
+ * after_state_ch() - Perform after state change actions that may sleep
+ * @mdev:      DRBD device.
+ * @os:                old state.
+ * @ns:                new state.
+ * @flags:     Flags
+ */
+static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
+                          union drbd_state ns, enum chg_state_flags flags)
+{
+       enum drbd_fencing_p fp;
+
+       if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
+               clear_bit(CRASHED_PRIMARY, &mdev->flags);
+               if (mdev->p_uuid)
+                       mdev->p_uuid[UI_FLAGS] &= ~((u64)2);
+       }
+
+       fp = FP_DONT_CARE;
+       if (get_ldev(mdev)) {
+               fp = mdev->ldev->dc.fencing;
+               put_ldev(mdev);
+       }
+
+       /* Inform userspace about the change... */
+       drbd_bcast_state(mdev, ns);
+
+       if (!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE) &&
+           (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
+               drbd_khelper(mdev, "pri-on-incon-degr");
+
+       /* Here we have the actions that are performed after a
+          state change. This function might sleep */
+
+       if (fp == FP_STONITH && ns.susp) {
+               /* case1: The outdate peer handler is successful:
+                * case2: The connection was established again: */
+               if ((os.pdsk > D_OUTDATED  && ns.pdsk <= D_OUTDATED) ||
+                   (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)) {
+                       tl_clear(mdev);
+                       spin_lock_irq(&mdev->req_lock);
+                       _drbd_set_state(_NS(mdev, susp, 0), CS_VERBOSE, NULL);
+                       spin_unlock_irq(&mdev->req_lock);
+               }
+       }
+       /* Do not change the order of the if above and the two below... */
+       if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) {      /* attach on the peer */
+               drbd_send_uuids(mdev);
+               drbd_send_state(mdev);
+       }
+       if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S)
+               drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL, "send_bitmap (WFBitMapS)");
+
+       /* Lost contact to peer's copy of the data */
+       if ((os.pdsk >= D_INCONSISTENT &&
+            os.pdsk != D_UNKNOWN &&
+            os.pdsk != D_OUTDATED)
+       &&  (ns.pdsk < D_INCONSISTENT ||
+            ns.pdsk == D_UNKNOWN ||
+            ns.pdsk == D_OUTDATED)) {
+               kfree(mdev->p_uuid);
+               mdev->p_uuid = NULL;
+               if (get_ldev(mdev)) {
+                       if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
+                           mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
+                               drbd_uuid_new_current(mdev);
+                               drbd_send_uuids(mdev);
+                       }
+                       put_ldev(mdev);
+               }
+       }
+
+       if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
+               if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0)
+                       drbd_uuid_new_current(mdev);
+
+               /* D_DISKLESS Peer becomes secondary */
+               if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
+                       drbd_al_to_on_disk_bm(mdev);
+               put_ldev(mdev);
+       }
+
+       /* Last part of the attaching process ... */
+       if (ns.conn >= C_CONNECTED &&
+           os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
+               kfree(mdev->p_uuid); /* We expect to receive up-to-date UUIDs soon. */
+               mdev->p_uuid = NULL; /* ...to not use the old ones in the mean time */
+               drbd_send_sizes(mdev, 0);  /* to start sync... */
+               drbd_send_uuids(mdev);
+               drbd_send_state(mdev);
+       }
+
+       /* We want to pause/continue resync, tell peer. */
+       if (ns.conn >= C_CONNECTED &&
+            ((os.aftr_isp != ns.aftr_isp) ||
+             (os.user_isp != ns.user_isp)))
+               drbd_send_state(mdev);
+
+       /* In case one of the isp bits got set, suspend other devices. */
+       if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
+           (ns.aftr_isp || ns.peer_isp || ns.user_isp))
+               suspend_other_sg(mdev);
+
+       /* Make sure the peer gets informed about eventual state
+          changes (ISP bits) while we were in WFReportParams. */
+       if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
+               drbd_send_state(mdev);
+
+       /* We are in the progress to start a full sync... */
+       if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
+           (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S))
+               drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, &abw_start_sync, "set_n_write from StartingSync");
+
+       /* We are invalidating our self... */
+       if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED &&
+           os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
+               drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate");
+
+       if (os.disk > D_FAILED && ns.disk == D_FAILED) {
+               enum drbd_io_error_p eh;
+
+               eh = EP_PASS_ON;
+               if (get_ldev_if_state(mdev, D_FAILED)) {
+                       eh = mdev->ldev->dc.on_io_error;
+                       put_ldev(mdev);
+               }
+
+               drbd_rs_cancel_all(mdev);
+               /* since get_ldev() only works as long as disk>=D_INCONSISTENT,
+                  and it is D_DISKLESS here, local_cnt can only go down, it can
+                  not increase... It will reach zero */
+               wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
+               mdev->rs_total = 0;
+               mdev->rs_failed = 0;
+               atomic_set(&mdev->rs_pending_cnt, 0);
+
+               spin_lock_irq(&mdev->req_lock);
+               _drbd_set_state(_NS(mdev, disk, D_DISKLESS), CS_HARD, NULL);
+               spin_unlock_irq(&mdev->req_lock);
+
+               if (eh == EP_CALL_HELPER)
+                       drbd_khelper(mdev, "local-io-error");
+       }
+
+       if (os.disk > D_DISKLESS && ns.disk == D_DISKLESS) {
+
+               if (os.disk == D_FAILED) /* && ns.disk == D_DISKLESS*/ {
+                       if (drbd_send_state(mdev))
+                               dev_warn(DEV, "Notified peer that my disk is broken.\n");
+                       else
+                               dev_err(DEV, "Sending state in drbd_io_error() failed\n");
+               }
+
+               lc_destroy(mdev->resync);
+               mdev->resync = NULL;
+               lc_destroy(mdev->act_log);
+               mdev->act_log = NULL;
+               __no_warn(local,
+                       drbd_free_bc(mdev->ldev);
+                       mdev->ldev = NULL;);
+
+               if (mdev->md_io_tmpp)
+                       __free_page(mdev->md_io_tmpp);
+       }
+
+       /* Disks got bigger while they were detached */
+       if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
+           test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) {
+               if (ns.conn == C_CONNECTED)
+                       resync_after_online_grow(mdev);
+       }
+
+       /* A resync finished or aborted, wake paused devices... */
+       if ((os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) ||
+           (os.peer_isp && !ns.peer_isp) ||
+           (os.user_isp && !ns.user_isp))
+               resume_next_sg(mdev);
+
+       /* Upon network connection, we need to start the receiver */
+       if (os.conn == C_STANDALONE && ns.conn == C_UNCONNECTED)
+               drbd_thread_start(&mdev->receiver);
+
+       /* Terminate worker thread if we are unconfigured - it will be
+          restarted as needed... */
+       if (ns.disk == D_DISKLESS &&
+           ns.conn == C_STANDALONE &&
+           ns.role == R_SECONDARY) {
+               if (os.aftr_isp != ns.aftr_isp)
+                       resume_next_sg(mdev);
+               /* set in __drbd_set_state, unless CONFIG_PENDING was set */
+               if (test_bit(DEVICE_DYING, &mdev->flags))
+                       drbd_thread_stop_nowait(&mdev->worker);
+       }
+
+       drbd_md_sync(mdev);
+}
+
+
+static int drbd_thread_setup(void *arg)
+{
+       struct drbd_thread *thi = (struct drbd_thread *) arg;
+       struct drbd_conf *mdev = thi->mdev;
+       unsigned long flags;
+       int retval;
+
+restart:
+       retval = thi->function(thi);
+
+       spin_lock_irqsave(&thi->t_lock, flags);
+
+       /* if the receiver has been "Exiting", the last thing it did
+        * was set the conn state to "StandAlone",
+        * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
+        * and receiver thread will be "started".
+        * drbd_thread_start needs to set "Restarting" in that case.
+        * t_state check and assignment needs to be within the same spinlock,
+        * so either thread_start sees Exiting, and can remap to Restarting,
+        * or thread_start see None, and can proceed as normal.
+        */
+
+       if (thi->t_state == Restarting) {
+               dev_info(DEV, "Restarting %s\n", current->comm);
+               thi->t_state = Running;
+               spin_unlock_irqrestore(&thi->t_lock, flags);
+               goto restart;
+       }
+
+       thi->task = NULL;
+       thi->t_state = None;
+       smp_mb();
+       complete(&thi->stop);
+       spin_unlock_irqrestore(&thi->t_lock, flags);
+
+       dev_info(DEV, "Terminating %s\n", current->comm);
+
+       /* Release mod reference taken when thread was started */
+       module_put(THIS_MODULE);
+       return retval;
+}
+
+static void drbd_thread_init(struct drbd_conf *mdev, struct drbd_thread *thi,
+                     int (*func) (struct drbd_thread *))
+{
+       spin_lock_init(&thi->t_lock);
+       thi->task    = NULL;
+       thi->t_state = None;
+       thi->function = func;
+       thi->mdev = mdev;
+}
+
+int drbd_thread_start(struct drbd_thread *thi)
+{
+       struct drbd_conf *mdev = thi->mdev;
+       struct task_struct *nt;
+       unsigned long flags;
+
+       const char *me =
+               thi == &mdev->receiver ? "receiver" :
+               thi == &mdev->asender  ? "asender"  :
+               thi == &mdev->worker   ? "worker"   : "NONSENSE";
+
+       /* is used from state engine doing drbd_thread_stop_nowait,
+        * while holding the req lock irqsave */
+       spin_lock_irqsave(&thi->t_lock, flags);
+
+       switch (thi->t_state) {
+       case None:
+               dev_info(DEV, "Starting %s thread (from %s [%d])\n",
+                               me, current->comm, current->pid);
+
+               /* Get ref on module for thread - this is released when thread exits */
+               if (!try_module_get(THIS_MODULE)) {
+                       dev_err(DEV, "Failed to get module reference in drbd_thread_start\n");
+                       spin_unlock_irqrestore(&thi->t_lock, flags);
+                       return FALSE;
+               }
+
+               init_completion(&thi->stop);
+               D_ASSERT(thi->task == NULL);
+               thi->reset_cpu_mask = 1;
+               thi->t_state = Running;
+               spin_unlock_irqrestore(&thi->t_lock, flags);
+               flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
+
+               nt = kthread_create(drbd_thread_setup, (void *) thi,
+                                   "drbd%d_%s", mdev_to_minor(mdev), me);
+
+               if (IS_ERR(nt)) {
+                       dev_err(DEV, "Couldn't start thread\n");
+
+                       module_put(THIS_MODULE);
+                       return FALSE;
+               }
+               spin_lock_irqsave(&thi->t_lock, flags);
+               thi->task = nt;
+               thi->t_state = Running;
+               spin_unlock_irqrestore(&thi->t_lock, flags);
+               wake_up_process(nt);
+               break;
+       case Exiting:
+               thi->t_state = Restarting;
+               dev_info(DEV, "Restarting %s thread (from %s [%d])\n",
+                               me, current->comm, current->pid);
+               /* fall through */
+       case Running:
+       case Restarting:
+       default:
+               spin_unlock_irqrestore(&thi->t_lock, flags);
+               break;
+       }
+
+       return TRUE;
+}
+
+
+void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
+{
+       unsigned long flags;
+
+       enum drbd_thread_state ns = restart ? Restarting : Exiting;
+
+       /* may be called from state engine, holding the req lock irqsave */
+       spin_lock_irqsave(&thi->t_lock, flags);
+
+       if (thi->t_state == None) {
+               spin_unlock_irqrestore(&thi->t_lock, flags);
+               if (restart)
+                       drbd_thread_start(thi);
+               return;
+       }
+
+       if (thi->t_state != ns) {
+               if (thi->task == NULL) {
+                       spin_unlock_irqrestore(&thi->t_lock, flags);
+                       return;
+               }
+
+               thi->t_state = ns;
+               smp_mb();
+               init_completion(&thi->stop);
+               if (thi->task != current)
+                       force_sig(DRBD_SIGKILL, thi->task);
+
+       }
+
+       spin_unlock_irqrestore(&thi->t_lock, flags);
+
+       if (wait)
+               wait_for_completion(&thi->stop);
+}
+
+#ifdef CONFIG_SMP
+/**
+ * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
+ * @mdev:      DRBD device.
+ *
+ * Forces all threads of a device onto the same CPU. This is beneficial for
+ * DRBD's performance. May be overwritten by user's configuration.
+ */
+void drbd_calc_cpu_mask(struct drbd_conf *mdev)
+{
+       int ord, cpu;
+
+       /* user override. */
+       if (cpumask_weight(mdev->cpu_mask))
+               return;
+
+       ord = mdev_to_minor(mdev) % cpumask_weight(cpu_online_mask);
+       for_each_online_cpu(cpu) {
+               if (ord-- == 0) {
+                       cpumask_set_cpu(cpu, mdev->cpu_mask);
+                       return;
+               }
+       }
+       /* should not be reached */
+       cpumask_setall(mdev->cpu_mask);
+}
+
+/**
+ * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
+ * @mdev:      DRBD device.
+ *
+ * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
+ * prematurely.
+ */
+void drbd_thread_current_set_cpu(struct drbd_conf *mdev)
+{
+       struct task_struct *p = current;
+       struct drbd_thread *thi =
+               p == mdev->asender.task  ? &mdev->asender  :
+               p == mdev->receiver.task ? &mdev->receiver :
+               p == mdev->worker.task   ? &mdev->worker   :
+               NULL;
+       ERR_IF(thi == NULL)
+               return;
+       if (!thi->reset_cpu_mask)
+               return;
+       thi->reset_cpu_mask = 0;
+       set_cpus_allowed_ptr(p, mdev->cpu_mask);
+}
+#endif
+
+/* the appropriate socket mutex must be held already */
+int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
+                         enum drbd_packets cmd, struct p_header *h,
+                         size_t size, unsigned msg_flags)
+{
+       int sent, ok;
+
+       ERR_IF(!h) return FALSE;
+       ERR_IF(!size) return FALSE;
+
+       h->magic   = BE_DRBD_MAGIC;
+       h->command = cpu_to_be16(cmd);
+       h->length  = cpu_to_be16(size-sizeof(struct p_header));
+
+       trace_drbd_packet(mdev, sock, 0, (void *)h, __FILE__, __LINE__);
+       sent = drbd_send(mdev, sock, h, size, msg_flags);
+
+       ok = (sent == size);
+       if (!ok)
+               dev_err(DEV, "short sent %s size=%d sent=%d\n",
+                   cmdname(cmd), (int)size, sent);
+       return ok;
+}
+
+/* don't pass the socket. we may only look at it
+ * when we hold the appropriate socket mutex.
+ */
+int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
+                 enum drbd_packets cmd, struct p_header *h, size_t size)
+{
+       int ok = 0;
+       struct socket *sock;
+
+       if (use_data_socket) {
+               mutex_lock(&mdev->data.mutex);
+               sock = mdev->data.socket;
+       } else {
+               mutex_lock(&mdev->meta.mutex);
+               sock = mdev->meta.socket;
+       }
+
+       /* drbd_disconnect() could have called drbd_free_sock()
+        * while we were waiting in down()... */
+       if (likely(sock != NULL))
+               ok = _drbd_send_cmd(mdev, sock, cmd, h, size, 0);
+
+       if (use_data_socket)
+               mutex_unlock(&mdev->data.mutex);
+       else
+               mutex_unlock(&mdev->meta.mutex);
+       return ok;
+}
+
+int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd, char *data,
+                  size_t size)
+{
+       struct p_header h;
+       int ok;
+
+       h.magic   = BE_DRBD_MAGIC;
+       h.command = cpu_to_be16(cmd);
+       h.length  = cpu_to_be16(size);
+
+       if (!drbd_get_data_sock(mdev))
+               return 0;
+
+       trace_drbd_packet(mdev, mdev->data.socket, 0, (void *)&h, __FILE__, __LINE__);
+
+       ok = (sizeof(h) ==
+               drbd_send(mdev, mdev->data.socket, &h, sizeof(h), 0));
+       ok = ok && (size ==
+               drbd_send(mdev, mdev->data.socket, data, size, 0));
+
+       drbd_put_data_sock(mdev);
+
+       return ok;
+}
+
+int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc)
+{
+       struct p_rs_param_89 *p;
+       struct socket *sock;
+       int size, rv;
+       const int apv = mdev->agreed_pro_version;
+
+       size = apv <= 87 ? sizeof(struct p_rs_param)
+               : apv == 88 ? sizeof(struct p_rs_param)
+                       + strlen(mdev->sync_conf.verify_alg) + 1
+               : /* 89 */    sizeof(struct p_rs_param_89);
+
+       /* used from admin command context and receiver/worker context.
+        * to avoid kmalloc, grab the socket right here,
+        * then use the pre-allocated sbuf there */
+       mutex_lock(&mdev->data.mutex);
+       sock = mdev->data.socket;
+
+       if (likely(sock != NULL)) {
+               enum drbd_packets cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
+
+               p = &mdev->data.sbuf.rs_param_89;
+
+               /* initialize verify_alg and csums_alg */
+               memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
+
+               p->rate = cpu_to_be32(sc->rate);
+
+               if (apv >= 88)
+                       strcpy(p->verify_alg, mdev->sync_conf.verify_alg);
+               if (apv >= 89)
+                       strcpy(p->csums_alg, mdev->sync_conf.csums_alg);
+
+               rv = _drbd_send_cmd(mdev, sock, cmd, &p->head, size, 0);
+       } else
+               rv = 0; /* not ok */
+
+       mutex_unlock(&mdev->data.mutex);
+
+       return rv;
+}
+
+int drbd_send_protocol(struct drbd_conf *mdev)
+{
+       struct p_protocol *p;
+       int size, rv;
+
+       size = sizeof(struct p_protocol);
+
+       if (mdev->agreed_pro_version >= 87)
+               size += strlen(mdev->net_conf->integrity_alg) + 1;
+
+       /* we must not recurse into our own queue,
+        * as that is blocked during handshake */
+       p = kmalloc(size, GFP_NOIO);
+       if (p == NULL)
+               return 0;
+
+       p->protocol      = cpu_to_be32(mdev->net_conf->wire_protocol);
+       p->after_sb_0p   = cpu_to_be32(mdev->net_conf->after_sb_0p);
+       p->after_sb_1p   = cpu_to_be32(mdev->net_conf->after_sb_1p);
+       p->after_sb_2p   = cpu_to_be32(mdev->net_conf->after_sb_2p);
+       p->want_lose     = cpu_to_be32(mdev->net_conf->want_lose);
+       p->two_primaries = cpu_to_be32(mdev->net_conf->two_primaries);
+
+       if (mdev->agreed_pro_version >= 87)
+               strcpy(p->integrity_alg, mdev->net_conf->integrity_alg);
+
+       rv = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_PROTOCOL,
+                          (struct p_header *)p, size);
+       kfree(p);
+       return rv;
+}
+
+int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
+{
+       struct p_uuids p;
+       int i;
+
+       if (!get_ldev_if_state(mdev, D_NEGOTIATING))
+               return 1;
+
+       for (i = UI_CURRENT; i < UI_SIZE; i++)
+               p.uuid[i] = mdev->ldev ? cpu_to_be64(mdev->ldev->md.uuid[i]) : 0;
+
+       mdev->comm_bm_set = drbd_bm_total_weight(mdev);
+       p.uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
+       uuid_flags |= mdev->net_conf->want_lose ? 1 : 0;
+       uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
+       uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
+       p.uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
+
+       put_ldev(mdev);
+
+       return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_UUIDS,
+                            (struct p_header *)&p, sizeof(p));
+}
+
+int drbd_send_uuids(struct drbd_conf *mdev)
+{
+       return _drbd_send_uuids(mdev, 0);
+}
+
+int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev)
+{
+       return _drbd_send_uuids(mdev, 8);
+}
+
+
+int drbd_send_sync_uuid(struct drbd_conf *mdev, u64 val)
+{
+       struct p_rs_uuid p;
+
+       p.uuid = cpu_to_be64(val);
+
+       return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID,
+                            (struct p_header *)&p, sizeof(p));
+}
+
+int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply)
+{
+       struct p_sizes p;
+       sector_t d_size, u_size;
+       int q_order_type;
+       int ok;
+
+       if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
+               D_ASSERT(mdev->ldev->backing_bdev);
+               d_size = drbd_get_max_capacity(mdev->ldev);
+               u_size = mdev->ldev->dc.disk_size;
+               q_order_type = drbd_queue_order_type(mdev);
+               p.queue_order_type = cpu_to_be32(drbd_queue_order_type(mdev));
+               put_ldev(mdev);
+       } else {
+               d_size = 0;
+               u_size = 0;
+               q_order_type = QUEUE_ORDERED_NONE;
+       }
+
+       p.d_size = cpu_to_be64(d_size);
+       p.u_size = cpu_to_be64(u_size);
+       p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
+       p.max_segment_size = cpu_to_be32(queue_max_segment_size(mdev->rq_queue));
+       p.queue_order_type = cpu_to_be32(q_order_type);
+
+       ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SIZES,
+                          (struct p_header *)&p, sizeof(p));
+       return ok;
+}
+
+/**
+ * drbd_send_state() - Sends the drbd state to the peer
+ * @mdev:      DRBD device.
+ */
+int drbd_send_state(struct drbd_conf *mdev)
+{
+       struct socket *sock;
+       struct p_state p;
+       int ok = 0;
+
+       /* Grab state lock so we wont send state if we're in the middle
+        * of a cluster wide state change on another thread */
+       drbd_state_lock(mdev);
+
+       mutex_lock(&mdev->data.mutex);
+
+       p.state = cpu_to_be32(mdev->state.i); /* Within the send mutex */
+       sock = mdev->data.socket;
+
+       if (likely(sock != NULL)) {
+               ok = _drbd_send_cmd(mdev, sock, P_STATE,
+                                   (struct p_header *)&p, sizeof(p), 0);
+       }
+
+       mutex_unlock(&mdev->data.mutex);
+
+       drbd_state_unlock(mdev);
+       return ok;
+}
+
+int drbd_send_state_req(struct drbd_conf *mdev,
+       union drbd_state mask, union drbd_state val)
+{
+       struct p_req_state p;
+
+       p.mask    = cpu_to_be32(mask.i);
+       p.val     = cpu_to_be32(val.i);
+
+       return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_STATE_CHG_REQ,
+                            (struct p_header *)&p, sizeof(p));
+}
+
+int drbd_send_sr_reply(struct drbd_conf *mdev, int retcode)
+{
+       struct p_req_state_reply p;
+
+       p.retcode    = cpu_to_be32(retcode);
+
+       return drbd_send_cmd(mdev, USE_META_SOCKET, P_STATE_CHG_REPLY,
+                            (struct p_header *)&p, sizeof(p));
+}
+
+int fill_bitmap_rle_bits(struct drbd_conf *mdev,
+       struct p_compressed_bm *p,
+       struct bm_xfer_ctx *c)
+{
+       struct bitstream bs;
+       unsigned long plain_bits;
+       unsigned long tmp;
+       unsigned long rl;
+       unsigned len;
+       unsigned toggle;
+       int bits;
+
+       /* may we use this feature? */
+       if ((mdev->sync_conf.use_rle == 0) ||
+               (mdev->agreed_pro_version < 90))
+                       return 0;
+
+       if (c->bit_offset >= c->bm_bits)
+               return 0; /* nothing to do. */
+
+       /* use at most thus many bytes */
+       bitstream_init(&bs, p->code, BM_PACKET_VLI_BYTES_MAX, 0);
+       memset(p->code, 0, BM_PACKET_VLI_BYTES_MAX);
+       /* plain bits covered in this code string */
+       plain_bits = 0;
+
+       /* p->encoding & 0x80 stores whether the first run length is set.
+        * bit offset is implicit.
+        * start with toggle == 2 to be able to tell the first iteration */
+       toggle = 2;
+
+       /* see how much plain bits we can stuff into one packet
+        * using RLE and VLI. */
+       do {
+               tmp = (toggle == 0) ? _drbd_bm_find_next_zero(mdev, c->bit_offset)
+                                   : _drbd_bm_find_next(mdev, c->bit_offset);
+               if (tmp == -1UL)
+                       tmp = c->bm_bits;
+               rl = tmp - c->bit_offset;
+
+               if (toggle == 2) { /* first iteration */
+                       if (rl == 0) {
+                               /* the first checked bit was set,
+                                * store start value, */
+                               DCBP_set_start(p, 1);
+                               /* but skip encoding of zero run length */
+                               toggle = !toggle;
+                               continue;
+                       }
+                       DCBP_set_start(p, 0);
+               }
+
+               /* paranoia: catch zero runlength.
+                * can only happen if bitmap is modified while we scan it. */
+               if (rl == 0) {
+                       dev_err(DEV, "unexpected zero runlength while encoding bitmap "
+                           "t:%u bo:%lu\n", toggle, c->bit_offset);
+                       return -1;
+               }
+
+               bits = vli_encode_bits(&bs, rl);
+               if (bits == -ENOBUFS) /* buffer full */
+                       break;
+               if (bits <= 0) {
+                       dev_err(DEV, "error while encoding bitmap: %d\n", bits);
+                       return 0;
+               }
+
+               toggle = !toggle;
+               plain_bits += rl;
+               c->bit_offset = tmp;
+       } while (c->bit_offset < c->bm_bits);
+
+       len = bs.cur.b - p->code + !!bs.cur.bit;
+
+       if (plain_bits < (len << 3)) {
+               /* incompressible with this method.
+                * we need to rewind both word and bit position. */
+               c->bit_offset -= plain_bits;
+               bm_xfer_ctx_bit_to_word_offset(c);
+               c->bit_offset = c->word_offset * BITS_PER_LONG;
+               return 0;
+       }
+
+       /* RLE + VLI was able to compress it just fine.
+        * update c->word_offset. */
+       bm_xfer_ctx_bit_to_word_offset(c);
+
+       /* store pad_bits */
+       DCBP_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
+
+       return len;
+}
+
+enum { OK, FAILED, DONE }
+send_bitmap_rle_or_plain(struct drbd_conf *mdev,
+       struct p_header *h, struct bm_xfer_ctx *c)
+{
+       struct p_compressed_bm *p = (void*)h;
+       unsigned long num_words;
+       int len;
+       int ok;
+
+       len = fill_bitmap_rle_bits(mdev, p, c);
+
+       if (len < 0)
+               return FAILED;
+
+       if (len) {
+               DCBP_set_code(p, RLE_VLI_Bits);
+               ok = _drbd_send_cmd(mdev, mdev->data.socket, P_COMPRESSED_BITMAP, h,
+                       sizeof(*p) + len, 0);
+
+               c->packets[0]++;
+               c->bytes[0] += sizeof(*p) + len;
+
+               if (c->bit_offset >= c->bm_bits)
+                       len = 0; /* DONE */
+       } else {
+               /* was not compressible.
+                * send a buffer full of plain text bits instead. */
+               num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
+               len = num_words * sizeof(long);
+               if (len)
+                       drbd_bm_get_lel(mdev, c->word_offset, num_words, (unsigned long*)h->payload);
+               ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BITMAP,
+                                  h, sizeof(struct p_header) + len, 0);
+               c->word_offset += num_words;
+               c->bit_offset = c->word_offset * BITS_PER_LONG;
+
+               c->packets[1]++;
+               c->bytes[1] += sizeof(struct p_header) + len;
+
+               if (c->bit_offset > c->bm_bits)
+                       c->bit_offset = c->bm_bits;
+       }
+       ok = ok ? ((len == 0) ? DONE : OK) : FAILED;
+
+       if (ok == DONE)
+               INFO_bm_xfer_stats(mdev, "send", c);
+       return ok;
+}
+
+/* See the comment at receive_bitmap() */
+int _drbd_send_bitmap(struct drbd_conf *mdev)
+{
+       struct bm_xfer_ctx c;
+       struct p_header *p;
+       int ret;
+
+       ERR_IF(!mdev->bitmap) return FALSE;
+
+       /* maybe we should use some per thread scratch page,
+        * and allocate that during initial device creation? */
+       p = (struct p_header *) __get_free_page(GFP_NOIO);
+       if (!p) {
+               dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
+               return FALSE;
+       }
+
+       if (get_ldev(mdev)) {
+               if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
+                       dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n");
+                       drbd_bm_set_all(mdev);
+                       if (drbd_bm_write(mdev)) {
+                               /* write_bm did fail! Leave full sync flag set in Meta P_DATA
+                                * but otherwise process as per normal - need to tell other
+                                * side that a full resync is required! */
+                               dev_err(DEV, "Failed to write bitmap to disk!\n");
+                       } else {
+                               drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
+                               drbd_md_sync(mdev);
+                       }
+               }
+               put_ldev(mdev);
+       }
+
+       c = (struct bm_xfer_ctx) {
+               .bm_bits = drbd_bm_bits(mdev),
+               .bm_words = drbd_bm_words(mdev),
+       };
+
+       do {
+               ret = send_bitmap_rle_or_plain(mdev, p, &c);
+       } while (ret == OK);
+
+       free_page((unsigned long) p);
+       return (ret == DONE);
+}
+
+int drbd_send_bitmap(struct drbd_conf *mdev)
+{
+       int err;
+
+       if (!drbd_get_data_sock(mdev))
+               return -1;
+       err = !_drbd_send_bitmap(mdev);
+       drbd_put_data_sock(mdev);
+       return err;
+}
+
+int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
+{
+       int ok;
+       struct p_barrier_ack p;
+
+       p.barrier  = barrier_nr;
+       p.set_size = cpu_to_be32(set_size);
+
+       if (mdev->state.conn < C_CONNECTED)
+               return FALSE;
+       ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK,
+                       (struct p_header *)&p, sizeof(p));
+       return ok;
+}
+
+/**
+ * _drbd_send_ack() - Sends an ack packet
+ * @mdev:      DRBD device.
+ * @cmd:       Packet command code.
+ * @sector:    sector, needs to be in big endian byte order
+ * @blksize:   size in byte, needs to be in big endian byte order
+ * @block_id:  Id, big endian byte order
+ */
+static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
+                         u64 sector,
+                         u32 blksize,
+                         u64 block_id)
+{
+       int ok;
+       struct p_block_ack p;
+
+       p.sector   = sector;
+       p.block_id = block_id;
+       p.blksize  = blksize;
+       p.seq_num  = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
+
+       if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
+               return FALSE;
+       ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd,
+                               (struct p_header *)&p, sizeof(p));
+       return ok;
+}
+
+int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd,
+                    struct p_data *dp)
+{
+       const int header_size = sizeof(struct p_data)
+                             - sizeof(struct p_header);
+       int data_size  = ((struct p_header *)dp)->length - header_size;
+
+       return _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
+                             dp->block_id);
+}
+
+int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packets cmd,
+                    struct p_block_req *rp)
+{
+       return _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
+}
+
+/**
+ * drbd_send_ack() - Sends an ack packet
+ * @mdev:      DRBD device.
+ * @cmd:       Packet command code.
+ * @e:         Epoch entry.
+ */
+int drbd_send_ack(struct drbd_conf *mdev,
+       enum drbd_packets cmd, struct drbd_epoch_entry *e)
+{
+       return _drbd_send_ack(mdev, cmd,
+                             cpu_to_be64(e->sector),
+                             cpu_to_be32(e->size),
+                             e->block_id);
+}
+
+/* This function misuses the block_id field to signal if the blocks
+ * are is sync or not. */
+int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd,
+                    sector_t sector, int blksize, u64 block_id)
+{
+       return _drbd_send_ack(mdev, cmd,
+                             cpu_to_be64(sector),
+                             cpu_to_be32(blksize),
+                             cpu_to_be64(block_id));
+}
+
+int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
+                      sector_t sector, int size, u64 block_id)
+{
+       int ok;
+       struct p_block_req p;
+
+       p.sector   = cpu_to_be64(sector);
+       p.block_id = block_id;
+       p.blksize  = cpu_to_be32(size);
+
+       ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd,
+                               (struct p_header *)&p, sizeof(p));
+       return ok;
+}
+
+int drbd_send_drequest_csum(struct drbd_conf *mdev,
+                           sector_t sector, int size,
+                           void *digest, int digest_size,
+                           enum drbd_packets cmd)
+{
+       int ok;
+       struct p_block_req p;
+
+       p.sector   = cpu_to_be64(sector);
+       p.block_id = BE_DRBD_MAGIC + 0xbeef;
+       p.blksize  = cpu_to_be32(size);
+
+       p.head.magic   = BE_DRBD_MAGIC;
+       p.head.command = cpu_to_be16(cmd);
+       p.head.length  = cpu_to_be16(sizeof(p) - sizeof(struct p_header) + digest_size);
+
+       mutex_lock(&mdev->data.mutex);
+
+       ok = (sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), 0));
+       ok = ok && (digest_size == drbd_send(mdev, mdev->data.socket, digest, digest_size, 0));
+
+       mutex_unlock(&mdev->data.mutex);
+
+       return ok;
+}
+
+int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
+{
+       int ok;
+       struct p_block_req p;
+
+       p.sector   = cpu_to_be64(sector);
+       p.block_id = BE_DRBD_MAGIC + 0xbabe;
+       p.blksize  = cpu_to_be32(size);
+
+       ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OV_REQUEST,
+                          (struct p_header *)&p, sizeof(p));
+       return ok;
+}
+
+/* called on sndtimeo
+ * returns FALSE if we should retry,
+ * TRUE if we think connection is dead
+ */
+static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *sock)
+{
+       int drop_it;
+       /* long elapsed = (long)(jiffies - mdev->last_received); */
+
+       drop_it =   mdev->meta.socket == sock
+               || !mdev->asender.task
+               || get_t_state(&mdev->asender) != Running
+               || mdev->state.conn < C_CONNECTED;
+
+       if (drop_it)
+               return TRUE;
+
+       drop_it = !--mdev->ko_count;
+       if (!drop_it) {
+               dev_err(DEV, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
+                      current->comm, current->pid, mdev->ko_count);
+               request_ping(mdev);
+       }
+
+       return drop_it; /* && (mdev->state == R_PRIMARY) */;
+}
+
+/* The idea of sendpage seems to be to put some kind of reference
+ * to the page into the skb, and to hand it over to the NIC. In
+ * this process get_page() gets called.
+ *
+ * As soon as the page was really sent over the network put_page()
+ * gets called by some part of the network layer. [ NIC driver? ]
+ *
+ * [ get_page() / put_page() increment/decrement the count. If count
+ *   reaches 0 the page will be freed. ]
+ *
+ * This works nicely with pages from FSs.
+ * But this means that in protocol A we might signal IO completion too early!
+ *
+ * In order not to corrupt data during a resync we must make sure
+ * that we do not reuse our own buffer pages (EEs) to early, therefore
+ * we have the net_ee list.
+ *
+ * XFS seems to have problems, still, it submits pages with page_count == 0!
+ * As a workaround, we disable sendpage on pages
+ * with page_count == 0 or PageSlab.
+ */
+static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
+                  int offset, size_t size)
+{
+       int sent = drbd_send(mdev, mdev->data.socket, kmap(page) + offset, size, 0);
+       kunmap(page);
+       if (sent == size)
+               mdev->send_cnt += size>>9;
+       return sent == size;
+}
+
+static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
+                   int offset, size_t size)
+{
+       mm_segment_t oldfs = get_fs();
+       int sent, ok;
+       int len = size;
+
+       /* e.g. XFS meta- & log-data is in slab pages, which have a
+        * page_count of 0 and/or have PageSlab() set.
+        * we cannot use send_page for those, as that does get_page();
+        * put_page(); and would cause either a VM_BUG directly, or
+        * __page_cache_release a page that would actually still be referenced
+        * by someone, leading to some obscure delayed Oops somewhere else. */
+       if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
+               return _drbd_no_send_page(mdev, page, offset, size);
+
+       drbd_update_congested(mdev);
+       set_fs(KERNEL_DS);
+       do {
+               sent = mdev->data.socket->ops->sendpage(mdev->data.socket, page,
+                                                       offset, len,
+                                                       MSG_NOSIGNAL);
+               if (sent == -EAGAIN) {
+                       if (we_should_drop_the_connection(mdev,
+                                                   &n